code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
from torch.autograd import Variable
import torch
import torch.nn as nn
import torch.nn.functional as F
class LSTM(nn.Module):
def __init__(self, nin, hidden_size):
super(LSTM, self).__init__()
if torch.cuda.is_available():
self.linear_f = nn.Linear(nin + hidden_size, hidden_size).cuda()
self.linear_i = nn.Linear(nin + hidden_size, hidden_size).cuda()
self.linear_ctilde = nn.Linear(nin + hidden_size, hidden_size).cuda()
self.linear_o = nn.Linear(nin + hidden_size, hidden_size).cuda()
else:
self.linear_f = nn.Linear(nin + hidden_size, hidden_size)
self.linear_i = nn.Linear(nin + hidden_size, hidden_size)
self.linear_ctilde = nn.Linear(nin + hidden_size, hidden_size)
self.linear_o = nn.Linear(nin + hidden_size, hidden_size)
self.hidden_size = hidden_size
self.init_weights()
def forward(self, x, mask):
hidden, c = self.init_hidden(x.size(1))
def step(emb, hid, c_t_old, mask_cur):
combined = torch.cat((hid, emb), 1)
f = F.sigmoid(self.linear_f(combined))
i = F.sigmoid(self.linear_i(combined))
o = F.sigmoid(self.linear_o(combined))
c_tilde = F.tanh(self.linear_ctilde(combined))
c_t = f * c_t_old + i * c_tilde
c_t = mask_cur[:, None] * c_t + (1. - mask_cur)[:, None] * c_t_old
hid_new = o * F.tanh(c_t)
hid_new = mask_cur[:, None] * hid_new + (1. - mask_cur)[:, None] * hid
return hid_new, c_t, i
h_hist = []
i_hist = []
for i in range(x.size(0)):
hidden, c, i = step(x[i].squeeze(), hidden, c, mask[i])
h_hist.append(hidden[None, :, :])
i_hist.append(i[None, :, :])
return torch.cat(h_hist), torch.cat(i_hist)
def init_hidden(self, bat_size):
if torch.cuda.is_available():
h0 = Variable(torch.zeros(bat_size, self.hidden_size).cuda())
c0 = Variable(torch.zeros(bat_size, self.hidden_size).cuda())
else:
h0 = Variable(torch.zeros(bat_size, self.hidden_size))
c0 = Variable(torch.zeros(bat_size, self.hidden_size))
return h0, c0
def init_weights(self):
initrange = 0.1
lin_layers = [self.linear_f, self.linear_i, self.linear_ctilde, self.linear_o]
for layer in lin_layers:
layer.weight.data.uniform_(-initrange, initrange)
layer.bias.data.fill_(0)
| [
"torch.nn.functional.tanh",
"torch.cuda.is_available",
"torch.nn.Linear",
"torch.zeros",
"torch.cat"
] | [((216, 241), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (239, 241), False, 'import torch\n'), ((1929, 1954), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1952, 1954), False, 'import torch\n'), ((599, 640), 'torch.nn.Linear', 'nn.Linear', (['(nin + hidden_size)', 'hidden_size'], {}), '(nin + hidden_size, hidden_size)\n', (608, 640), True, 'import torch.nn as nn\n'), ((669, 710), 'torch.nn.Linear', 'nn.Linear', (['(nin + hidden_size)', 'hidden_size'], {}), '(nin + hidden_size, hidden_size)\n', (678, 710), True, 'import torch.nn as nn\n'), ((744, 785), 'torch.nn.Linear', 'nn.Linear', (['(nin + hidden_size)', 'hidden_size'], {}), '(nin + hidden_size, hidden_size)\n', (753, 785), True, 'import torch.nn as nn\n'), ((814, 855), 'torch.nn.Linear', 'nn.Linear', (['(nin + hidden_size)', 'hidden_size'], {}), '(nin + hidden_size, hidden_size)\n', (823, 855), True, 'import torch.nn as nn\n'), ((1076, 1100), 'torch.cat', 'torch.cat', (['(hid, emb)', '(1)'], {}), '((hid, emb), 1)\n', (1085, 1100), False, 'import torch\n'), ((1843, 1860), 'torch.cat', 'torch.cat', (['h_hist'], {}), '(h_hist)\n', (1852, 1860), False, 'import torch\n'), ((1862, 1879), 'torch.cat', 'torch.cat', (['i_hist'], {}), '(i_hist)\n', (1871, 1879), False, 'import torch\n'), ((1465, 1476), 'torch.nn.functional.tanh', 'F.tanh', (['c_t'], {}), '(c_t)\n', (1471, 1476), True, 'import torch.nn.functional as F\n'), ((2144, 2183), 'torch.zeros', 'torch.zeros', (['bat_size', 'self.hidden_size'], {}), '(bat_size, self.hidden_size)\n', (2155, 2183), False, 'import torch\n'), ((2211, 2250), 'torch.zeros', 'torch.zeros', (['bat_size', 'self.hidden_size'], {}), '(bat_size, self.hidden_size)\n', (2222, 2250), False, 'import torch\n'), ((271, 312), 'torch.nn.Linear', 'nn.Linear', (['(nin + hidden_size)', 'hidden_size'], {}), '(nin + hidden_size, hidden_size)\n', (280, 312), True, 'import torch.nn as nn\n'), ((348, 389), 'torch.nn.Linear', 'nn.Linear', (['(nin + hidden_size)', 'hidden_size'], {}), '(nin + hidden_size, hidden_size)\n', (357, 389), True, 'import torch.nn as nn\n'), ((430, 471), 'torch.nn.Linear', 'nn.Linear', (['(nin + hidden_size)', 'hidden_size'], {}), '(nin + hidden_size, hidden_size)\n', (439, 471), True, 'import torch.nn as nn\n'), ((507, 548), 'torch.nn.Linear', 'nn.Linear', (['(nin + hidden_size)', 'hidden_size'], {}), '(nin + hidden_size, hidden_size)\n', (516, 548), True, 'import torch.nn as nn\n'), ((1982, 2021), 'torch.zeros', 'torch.zeros', (['bat_size', 'self.hidden_size'], {}), '(bat_size, self.hidden_size)\n', (1993, 2021), False, 'import torch\n'), ((2056, 2095), 'torch.zeros', 'torch.zeros', (['bat_size', 'self.hidden_size'], {}), '(bat_size, self.hidden_size)\n', (2067, 2095), False, 'import torch\n')] |
import torch
from torch import nn
from torch.utils.data import DataLoader
from tqdm.notebook import tqdm
from torch.optim import Adam
import torch.nn.functional as F
from voice_verification.src.models.vggvox import VGGVox
from voice_verification.src.losses.contrastive_loss import ContrastiveLoss
from datasets import VoiceContrastiveDataset
import config
def get_loader():
train_dataset = VoiceContrastiveDataset(data_path='')
test_dataset = VoiceContrastiveDataset(data_path='')
train_loader = DataLoader(train_dataset, batch_size=config.batch_size)
test_loader = DataLoader(test_dataset, batch_size=config.batch_size)
return train_loader, test_loader
def train():
train_loader, test_loader = get_loader()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = VGGVox(n_classes=400).to(device)
model.fc8 = nn.Linear(1024, 8)
criterion = ContrastiveLoss().to(device)
optimizer = Adam(model.parameters(), lr=config.lr)
train_epoch_iterator = tqdm(train_loader,
desc="Training (Step X) (loss=X.X)",
bar_format="{l_bar}{r_bar}",
dynamic_ncols=True)
best_accuracy = 0
for epoch in range(config.n_epochs):
model.train()
train_batch_losses = []
print(f'Epoch {epoch + 1}: ')
for input_tensor1, input_tensor2, labels in train_epoch_iterator:
input_tensor1 = input_tensor1.to(device)
input_tensor2 = input_tensor2.to(device)
labels = labels.to(device)
output_tensor1, output_tensor2 = model(input_tensor1), model(input_tensor2)
loss = criterion(output_tensor1, output_tensor2, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_epoch_iterator.set_description(f"loss: {loss.item()}")
train_batch_losses.append(loss.item())
print('Train loss: ', sum(train_batch_losses) / len(train_batch_losses))
n_true = 0
n_sample = 0
model.eval()
for input_tensor1, input_tensor2, labels in test_loader:
input_tensor1 = input_tensor1.to(device)
input_tensor2 = input_tensor2.to(device)
labels = labels.to(device)
with torch.no_grad():
output_tensor1, output_tensor2 = model(input_tensor1), model(input_tensor2)
loss = criterion(output_tensor1, output_tensor2, labels)
print(loss.item())
euclidean_distance = F.pairwise_distance(output_tensor1, output_tensor2, keepdim=True)
predicts = euclidean_distance
predicts = predicts < 0.5
predicts = 1 - predicts.view(-1).int()
n_true += sum(predicts == labels)
n_sample += predicts.shape[0]
accuracy = n_true / n_sample
print("Validation accuracy: ", accuracy)
if accuracy > best_accuracy:
torch.save(model.state_dict(), 'checkpoints/model.pth')
if __name__ == '__main__':
train()
pass
| [
"voice_verification.src.models.vggvox.VGGVox",
"voice_verification.src.losses.contrastive_loss.ContrastiveLoss",
"datasets.VoiceContrastiveDataset",
"torch.cuda.is_available",
"torch.nn.Linear",
"torch.utils.data.DataLoader",
"torch.no_grad",
"tqdm.notebook.tqdm",
"torch.nn.functional.pairwise_dista... | [((398, 435), 'datasets.VoiceContrastiveDataset', 'VoiceContrastiveDataset', ([], {'data_path': '""""""'}), "(data_path='')\n", (421, 435), False, 'from datasets import VoiceContrastiveDataset\n'), ((455, 492), 'datasets.VoiceContrastiveDataset', 'VoiceContrastiveDataset', ([], {'data_path': '""""""'}), "(data_path='')\n", (478, 492), False, 'from datasets import VoiceContrastiveDataset\n'), ((513, 568), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'config.batch_size'}), '(train_dataset, batch_size=config.batch_size)\n', (523, 568), False, 'from torch.utils.data import DataLoader\n'), ((587, 641), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'batch_size': 'config.batch_size'}), '(test_dataset, batch_size=config.batch_size)\n', (597, 641), False, 'from torch.utils.data import DataLoader\n'), ((877, 895), 'torch.nn.Linear', 'nn.Linear', (['(1024)', '(8)'], {}), '(1024, 8)\n', (886, 895), False, 'from torch import nn\n'), ((1025, 1134), 'tqdm.notebook.tqdm', 'tqdm', (['train_loader'], {'desc': '"""Training (Step X) (loss=X.X)"""', 'bar_format': '"""{l_bar}{r_bar}"""', 'dynamic_ncols': '(True)'}), "(train_loader, desc='Training (Step X) (loss=X.X)', bar_format=\n '{l_bar}{r_bar}', dynamic_ncols=True)\n", (1029, 1134), False, 'from tqdm.notebook import tqdm\n'), ((777, 802), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (800, 802), False, 'import torch\n'), ((828, 849), 'voice_verification.src.models.vggvox.VGGVox', 'VGGVox', ([], {'n_classes': '(400)'}), '(n_classes=400)\n', (834, 849), False, 'from voice_verification.src.models.vggvox import VGGVox\n'), ((913, 930), 'voice_verification.src.losses.contrastive_loss.ContrastiveLoss', 'ContrastiveLoss', ([], {}), '()\n', (928, 930), False, 'from voice_verification.src.losses.contrastive_loss import ContrastiveLoss\n'), ((2350, 2365), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2363, 2365), False, 'import torch\n'), ((2604, 2669), 'torch.nn.functional.pairwise_distance', 'F.pairwise_distance', (['output_tensor1', 'output_tensor2'], {'keepdim': '(True)'}), '(output_tensor1, output_tensor2, keepdim=True)\n', (2623, 2669), True, 'import torch.nn.functional as F\n')] |
import sqlite3
import json
import sys
import os
class Database(object):
# Contructior opens database
def __init__(self, databasePath = "app/data.db"):
self.databasePath = databasePath
self.select = ""
self.froms = ""
self.joins = ""
self.wheres = ""
self.groupBy = ""
self.orderBy = ""
def open_conn(self):
# api route
dirpath = os.path.dirname(os.path.realpath(__file__)) + "/../../"
pathlength = len(os.getcwd())
if(pathlength != 1):
skipchars = pathlength + 1
else:
skipchars = 1
self.databaseRelativeLocation = dirpath[skipchars:] + self.databasePath
self.conn = sqlite3.connect(self.databaseRelativeLocation)
# this file test use route
self.c = self.conn.cursor()
self.conn.row_factory = sqlite3.Row
def close_conn(self):
self.conn.commit()
self.conn.close()
# Create releavant tables
# Only use if you know what you are doing!!!
def create_table(self):
tables = ["address", "user_address", "product", "product_aroma", "wishes", "account", "favorites", "orders", "order_details"]
for table in tables:
query = "DROP TABLE IF EXISTS " + table
self.raw_querry(query)
# print("tables deleted")
queryFile = open('app/api/createdb.sql', 'r')
querrys = queryFile.read()
queryFile.close()
querrys = querrys.split(';')
for querry in querrys:
try:
self.raw_querry(querry)
except (sqlite3.OperationalError, msg):
print ("command skipped: ", msg)
# Gets json and inserts values into databse
def insert_coffee(self):
self.open_conn()
data = open("app/products.json", 'r')
jsonData = json.load(data)
data.close()
# return jsonData
for item in jsonData:
# insert coffees
querry = 'insert into product (product_id, name, description, price, roast_level, origin) \
values ({}, "{}", "{}", {}, "{}", "{}")'.format(str(item["ID"]), item["Name"], item["Description"], str(float(item["Price"])), item["Roast"], item["Origin"])
self.c.execute(querry)
# insert connection between aromas and coffees
for aroma in item["Aromas"]:
if aroma == 'Nutty':
querry = 'INSERT INTO product_aroma(product_id, aroma_name) VALUES ({}, "{}")'.format(item["ID"], aroma)
if aroma == 'Fruity':
querry = 'INSERT INTO product_aroma(product_id, aroma_name) VALUES ({}, "{}")'.format(item["ID"], aroma)
if aroma == 'Spicy':
querry = 'INSERT INTO product_aroma(product_id, aroma_name) VALUES ({}, "{}")'.format(item["ID"], aroma)
if aroma == 'Chocolate':
querry = 'INSERT INTO product_aroma(product_id, aroma_name) VALUES ({}, "{}")'.format(item["ID"], aroma)
self.c.execute(querry)
self.close_conn()
# Drop all tables, Create new table and fill them
def reset_database(self):
self.create_table()
self.insert_coffee()
def raw_get_querry(self, querry):
try:
self.open_conn()
self.c.execute(querry)
result = self.c.fetchall()
names = [description[0] for description in self.c.description]
final = []
for elem in result:
tempdict = {}
for value in range(0, len(elem)):
tempdict[names[value]] = elem[value]
final.append(tempdict)
self.close_conn()
except:
final = sys.exc_info()
return final
def raw_get_one_querry(self, querry):
try:
self.open_conn()
self.c.execute(querry)
result = self.c.fetchone()
names = [description[0] for description in self.c.description]
final = {}
if result != None:
for i in range(0, len(result)):
final[names[i]] = result[i]
self.close_conn()
except:
final = sys.exc_info()
return final
# executes given query
# returns number of rows affected by query or last rowid
def raw_querry(self, querry, rowcount = True):
try:
self.open_conn()
self.c.execute(querry)
if rowcount:
result = self.c.rowcount
else:
result = self.c.lastrowid
self.close_conn()
except:
result = sys.exc_info()
print(result)
print("raw query error")
print(querry)
return result
# rest values for querry
def reset_querry(self):
self.select = ""
self.froms = ""
self.joins = ""
self.wheres = ""
self.groupBy = ""
self.orderBy = ""
# Build querry form components
# This funtion makes us of any argumetns passed to where(), join()
def get_all(self, table, select = "*"):
self.select = select
self.froms = table
querry = "SELECT " + self.select +" \n"
querry += "FROM " + self.froms + " \n"
if self.joins != "":
querry += self.joins
if self.wheres != "":
querry += self.wheres
if self.groupBy != "":
querry += self.groupBy
if self.orderBy != "":
querry += self.orderBy
result = self.raw_get_querry(querry)
self.reset_querry()
return result
# Build querry form components
# This funtion makes us of any argumetns passed to where(), join()
def get_one(self, table, select = "*"):
self.select = select
self.froms = table
querry = "SELECT " + self.select +" \n"
querry += "FROM " + self.froms + " \n"
if self.joins != "":
querry += self.joins
if self.wheres != "":
querry += self.wheres
if self.groupBy != "":
querry += self.groupBy
if self.orderBy != "":
querry += self.orderBy
result = self.raw_get_one_querry(querry)
self.reset_querry()
return result
# Add joins to querry
def join(self, table, condition, type = "INNER"):
self.joins += type + " JOIN " + table +" ON " + condition +"\n"
# add conditions to querry
def where(self, column, value, comparator = "="):
if isinstance(value, str):
value = "'" + value + "'"
if isinstance(value, int):
value = str(value)
if self.wheres == "":
self.wheres += "WHERE "
else:
self.wheres += " AND "
self.wheres += column +" "+ comparator +" "+ value +" \n"
# table; string, table name
# values; dictonary {'columnname':'value'}, columnames and value
# return; last rowid
def insert(self, table, values):
columns = ""
value = ""
for key in values:
if columns == "":
columns += key
else:
columns += ', ' + key
if value == "":
if isinstance(values[key], str):
value += '"' + str(values[key]) + '"'
else:
value += str(values[key])
else:
if isinstance(values[key], str):
value += ', "' + str(values[key]) + '"'
else:
value += ', ' + str(values[key])
querry = 'INSERT INTO {}({}) VALUES ({})'.format(table, columns, value)
return self.raw_querry(querry, False)
# table; string, table name
# values; dictonary (eg {'columnname':'value'}), columnames and value
# this function also makes use of any arguments passed to where()
# return;
def update(self, table, values):
updates = ''
for key in values:
if updates == '':
updates += key + ' = '
else:
updates += ', ' + key + ' = '
if isinstance(values[key], str):
updates += '"' + values[key] +'"'
elif isinstance(values[key], int):
updates += "'" + str(values[key]) + "'"
else:
updates += values[key]
querry = "UPDATE {}\n SET {} \n".format(table, updates)
if self.wheres != '':
querry += self.wheres
result = self.raw_querry(querry)
self.reset_querry()
return result
# table: string, table name
# this function also uses the arguments passed to where()
# return; number of rows affected
def delete(self, table):
querry = 'DELETE FROM {} \n'.format(table)
if self.wheres != '':
querry += self.wheres
self.reset_querry()
return self.raw_querry(querry)
# column: string, column you want to group by
# This function adds a group by argument to your query
def group_by(self, column):
if self.groupBy == "":
self.groupBy += "GROUP BY " + column
else:
self.groupBy += " , "+ column
def order_by(self, column):
if self.orderBy == "":
self.orderBy += "ORDER BY " + column
else:
self.orderBy += " , "+ column
# Converts given to json.
def to_jsonarray(self, array):
return json.dumps(array, ensure_ascii=False, sort_keys=True)
| [
"sqlite3.connect",
"json.dumps",
"os.getcwd",
"os.path.realpath",
"sys.exc_info",
"json.load"
] | [((615, 661), 'sqlite3.connect', 'sqlite3.connect', (['self.databaseRelativeLocation'], {}), '(self.databaseRelativeLocation)\n', (630, 661), False, 'import sqlite3\n'), ((1585, 1600), 'json.load', 'json.load', (['data'], {}), '(data)\n', (1594, 1600), False, 'import json\n'), ((7815, 7868), 'json.dumps', 'json.dumps', (['array'], {'ensure_ascii': '(False)', 'sort_keys': '(True)'}), '(array, ensure_ascii=False, sort_keys=True)\n', (7825, 7868), False, 'import json\n'), ((429, 440), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (438, 440), False, 'import os\n'), ((370, 396), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (386, 396), False, 'import os\n'), ((3152, 3166), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (3164, 3166), False, 'import sys\n'), ((3518, 3532), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (3530, 3532), False, 'import sys\n'), ((3859, 3873), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (3871, 3873), False, 'import sys\n')] |
from keras.models import Model, load_model
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import SGD
import numpy as np
import os
def preprocess_input(x):
x /= 127.5
x -= 1.
return x
test_gen = ImageDataGenerator(
preprocessing_function=preprocess_input
)
test_generator = test_gen.flow_from_directory(
"./data/test", (224, 224), shuffle=False)
models = ['./backup/resnet_model_epoch45.h5']
for model_name in models:
model = load_model(os.path.join(model_name))
print("{} --> {}".format(model_name,model.evaluate_generator(test_generator)))
| [
"os.path.join",
"keras.preprocessing.image.ImageDataGenerator"
] | [((242, 301), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'preprocessing_function': 'preprocess_input'}), '(preprocessing_function=preprocess_input)\n', (260, 301), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((499, 523), 'os.path.join', 'os.path.join', (['model_name'], {}), '(model_name)\n', (511, 523), False, 'import os\n')] |
import os
import sys
import cv2
import numpy as np
class Equirectangular:
def __init__(self, img):
self._img = img
#self._img = cv2.imread(img_name, cv2.IMREAD_COLOR)
[self._height, self._width, _] = self._img.shape
print(self._img.shape)
def GetPerspective(self, FOV, THETA, PHI, height, width):
#
# THETA is left/right angle, PHI is up/down angle, both in degree
#
equ_h = self._height
equ_w = self._width
equ_cx = (equ_w - 1) / 2.0
equ_cy = (equ_h - 1) / 2.0
wFOV = FOV
hFOV = float(height) / width * wFOV
w_len = np.tan(np.radians(wFOV / 2.0))
h_len = np.tan(np.radians(hFOV / 2.0))
x_map = np.ones([height, width], np.float32)
y_map = np.tile(np.linspace(-w_len, w_len,width), [height,1])
z_map = -np.tile(np.linspace(-h_len, h_len,height), [width,1]).T
D = np.sqrt(x_map**2 + y_map**2 + z_map**2)
xyz = np.stack((x_map,y_map,z_map),axis=2)/np.repeat(D[:, :, np.newaxis], 3, axis=2)
y_axis = np.array([0.0, 1.0, 0.0], np.float32)
z_axis = np.array([0.0, 0.0, 1.0], np.float32)
[R1, _] = cv2.Rodrigues(z_axis * np.radians(THETA))
[R2, _] = cv2.Rodrigues(np.dot(R1, y_axis) * np.radians(-PHI))
xyz = xyz.reshape([height * width, 3]).T
xyz = np.dot(R1, xyz)
xyz = np.dot(R2, xyz).T
lat = np.arcsin(xyz[:, 2])
lon = np.arctan2(xyz[:, 1] , xyz[:, 0])
lon = lon.reshape([height, width]) / np.pi * 180
lat = -lat.reshape([height, width]) / np.pi * 180
lon = lon / 180 * equ_cx + equ_cx
lat = lat / 90 * equ_cy + equ_cy
persp = cv2.remap(self._img, lon.astype(np.float32), lat.astype(np.float32), cv2.INTER_CUBIC, borderMode=cv2.BORDER_WRAP)
return persp
| [
"numpy.radians",
"numpy.sqrt",
"numpy.ones",
"numpy.repeat",
"numpy.arcsin",
"numpy.array",
"numpy.dot",
"numpy.linspace",
"numpy.arctan2",
"numpy.stack"
] | [((743, 779), 'numpy.ones', 'np.ones', (['[height, width]', 'np.float32'], {}), '([height, width], np.float32)\n', (750, 779), True, 'import numpy as np\n'), ((936, 981), 'numpy.sqrt', 'np.sqrt', (['(x_map ** 2 + y_map ** 2 + z_map ** 2)'], {}), '(x_map ** 2 + y_map ** 2 + z_map ** 2)\n', (943, 981), True, 'import numpy as np\n'), ((1095, 1132), 'numpy.array', 'np.array', (['[0.0, 1.0, 0.0]', 'np.float32'], {}), '([0.0, 1.0, 0.0], np.float32)\n', (1103, 1132), True, 'import numpy as np\n'), ((1150, 1187), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0]', 'np.float32'], {}), '([0.0, 0.0, 1.0], np.float32)\n', (1158, 1187), True, 'import numpy as np\n'), ((1383, 1398), 'numpy.dot', 'np.dot', (['R1', 'xyz'], {}), '(R1, xyz)\n', (1389, 1398), True, 'import numpy as np\n'), ((1445, 1465), 'numpy.arcsin', 'np.arcsin', (['xyz[:, 2]'], {}), '(xyz[:, 2])\n', (1454, 1465), True, 'import numpy as np\n'), ((1480, 1512), 'numpy.arctan2', 'np.arctan2', (['xyz[:, 1]', 'xyz[:, 0]'], {}), '(xyz[:, 1], xyz[:, 0])\n', (1490, 1512), True, 'import numpy as np\n'), ((654, 676), 'numpy.radians', 'np.radians', (['(wFOV / 2.0)'], {}), '(wFOV / 2.0)\n', (664, 676), True, 'import numpy as np\n'), ((701, 723), 'numpy.radians', 'np.radians', (['(hFOV / 2.0)'], {}), '(hFOV / 2.0)\n', (711, 723), True, 'import numpy as np\n'), ((804, 837), 'numpy.linspace', 'np.linspace', (['(-w_len)', 'w_len', 'width'], {}), '(-w_len, w_len, width)\n', (815, 837), True, 'import numpy as np\n'), ((990, 1029), 'numpy.stack', 'np.stack', (['(x_map, y_map, z_map)'], {'axis': '(2)'}), '((x_map, y_map, z_map), axis=2)\n', (998, 1029), True, 'import numpy as np\n'), ((1027, 1068), 'numpy.repeat', 'np.repeat', (['D[:, :, np.newaxis]', '(3)'], {'axis': '(2)'}), '(D[:, :, np.newaxis], 3, axis=2)\n', (1036, 1068), True, 'import numpy as np\n'), ((1413, 1428), 'numpy.dot', 'np.dot', (['R2', 'xyz'], {}), '(R2, xyz)\n', (1419, 1428), True, 'import numpy as np\n'), ((1229, 1246), 'numpy.radians', 'np.radians', (['THETA'], {}), '(THETA)\n', (1239, 1246), True, 'import numpy as np\n'), ((1280, 1298), 'numpy.dot', 'np.dot', (['R1', 'y_axis'], {}), '(R1, y_axis)\n', (1286, 1298), True, 'import numpy as np\n'), ((1301, 1317), 'numpy.radians', 'np.radians', (['(-PHI)'], {}), '(-PHI)\n', (1311, 1317), True, 'import numpy as np\n'), ((875, 909), 'numpy.linspace', 'np.linspace', (['(-h_len)', 'h_len', 'height'], {}), '(-h_len, h_len, height)\n', (886, 909), True, 'import numpy as np\n')] |
"""
This module contains decorators that can be used in model implementation to relate them to each other.
For example, getting dies from a wafer.
"""
from functools import wraps
from typing import Callable, Type
from .mongomodel import MongoModel
def forward_link_one(model_get: Callable[[], Type[MongoModel]]):
"""
Links to a single instance of another model type that is defined in the code after this model
:param model_get: Method that returns the model name of the model that is not yet defined
:return: Decorator that links to a single instance of another model type
"""
def decorator(func: Callable[[MongoModel], dict]):
@wraps(func)
def wrap(self):
return model_get().find_one(query=func(self))
return wrap
return decorator
def link_one(model: Type[MongoModel]):
"""
Links to a single instance of another model type
:param model: The model type to link to
:return: Decorator that links to a single instance of another model type
"""
return forward_link_one(lambda: model)
def forward_link_many(model_get: Callable[[], Type[MongoModel]]):
"""
Links to multiple instances of another model type that is defined in the code after this model
:param model_get: Method that returns the model name of the model that is not yet defined
:return: Decorator that links to multiple instances of another model type
"""
def decorator(func: Callable[[MongoModel], dict]):
@wraps(func)
def wrap(self):
return model_get().find(query=func(self))
return wrap
return decorator
def link_many(model: Type[MongoModel]):
"""
Links to multiple instances of another model type
:param model: The model type to link to
:return: Decorator that links to multiple instances of another model type
"""
return forward_link_many(lambda: model)
| [
"functools.wraps"
] | [((665, 676), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (670, 676), False, 'from functools import wraps\n'), ((1495, 1506), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (1500, 1506), False, 'from functools import wraps\n')] |
import os
import streamlit as st
import uuid
import streamlit.components.v1 as components
from hydralit_components import IS_RELEASE
if IS_RELEASE:
absolute_path = os.path.dirname(os.path.abspath(__file__))
build_path = os.path.join(absolute_path, "frontend/build")
_component_func = components.declare_component("info_card", path=build_path)
else:
_component_func = components.declare_component("info_card", url="http://localhost:3000")
def info_card(title=None, content=None,sentiment=None,bar_value=None,theme_override=None,key=None):
"""
Creates an info card with a title, content text, display icon and an optional progress like bar, all with custom color and formatting.
Fully suports Font Awesome and Bootstrap icons on or off line.
Parameters
-------------
title: str
The title to use for the info card.
content: str
The text content to use on the info card.
sentiment: str (default None)
An automatic way to color the info card as using a sentiment,for example there are 3 options ('good', 'bad, 'neutral'), you can also specify these details using the theme_override parameter.
'good'
{'bgcolor': '#EFF8F7','title_color': '#2A4657','content_color': 'green','progress_color': 'green','icon_color': 'green', 'icon': 'fa fa-check-circle'}
'bad'
{'bgcolor': '#FFF0F0','title_color': '#2A4657','content_color': 'red','progress_color': 'red','icon_color': 'red', 'icon': 'fa fa-times-circle'}
'neutral'
{'bgcolor': '#fcf8e5','title_color': '#2A4657','content_color': 'orange','progress_color': 'orange','icon_color': 'orange', 'icon': 'fa fa-question-circle'}
bar_value: int (default None)
If a value between 0-100, if specifed, a horizontal progress like bar will appear at the bottom of the info card.
key:
A unique key or name for this component
theme_override: dict
Override the Streamlit theme applied to the card
{'bgcolor': '#EFF8F7','title_color': '#2A4657','content_color': 'green','progress_color': 'green','icon_color': 'green', 'icon': 'fa fa-check-circle'}
Returns
---------
None
"""
if theme_override is None:
if sentiment == 'good':
theme_override = {'bgcolor': '#EFF8F7','title_color': '#2A4657','content_color': 'green','progress_color': 'green','icon_color': 'green', 'icon': 'fa fa-check-circle'}
elif sentiment == 'bad':
theme_override = {'bgcolor': '#FFF0F0','title_color': '#2A4657','content_color': 'red','progress_color': 'red','icon_color': 'red', 'icon': 'fa fa-times-circle'}
elif sentiment == 'neutral':
theme_override = {'bgcolor': '#fcf8e5','title_color': '#2A4657','content_color': 'orange','progress_color': 'orange','icon_color': 'orange', 'icon': 'fa fa-question-circle'}
else:
theme_override = None #{'bgcolor': None,'title_color': None,'content_color': None,'icon_color': None, 'icon': None}
component_value = _component_func(title=title, content=content,bar_value=bar_value, key=key,theme_override=theme_override)
return component_value | [
"os.path.abspath",
"streamlit.components.v1.declare_component",
"os.path.join"
] | [((230, 275), 'os.path.join', 'os.path.join', (['absolute_path', '"""frontend/build"""'], {}), "(absolute_path, 'frontend/build')\n", (242, 275), False, 'import os\n'), ((298, 356), 'streamlit.components.v1.declare_component', 'components.declare_component', (['"""info_card"""'], {'path': 'build_path'}), "('info_card', path=build_path)\n", (326, 356), True, 'import streamlit.components.v1 as components\n'), ((385, 455), 'streamlit.components.v1.declare_component', 'components.declare_component', (['"""info_card"""'], {'url': '"""http://localhost:3000"""'}), "('info_card', url='http://localhost:3000')\n", (413, 455), True, 'import streamlit.components.v1 as components\n'), ((186, 211), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (201, 211), False, 'import os\n')] |
import os
MAIN_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
| [
"os.path.realpath"
] | [((44, 70), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (60, 70), False, 'import os\n')] |
import unittest
from typing import Any, Mapping, Optional, Type
import torch
from kgm.data import KnowledgeGraphAlignmentDataset, get_erdos_renyi
from kgm.data.knowledge_graph import sub_graph_alignment
from kgm.models import GCNAlign, get_matching_model_by_name
from kgm.models.matching.base import KGMatchingModel
from kgm.utils.common import kwargs_or_empty
def _random_tensor(num_nodes: int, num_edges: int) -> torch.LongTensor:
return torch.randint(num_nodes, size=(2, num_edges))
def _get_cycle_edge_tensor(num_nodes: int) -> torch.LongTensor:
source = torch.arange(num_nodes)
target = torch.cat([source[-1:], source[:-1]], dim=0)
return torch.stack([source, target], dim=0)
class _KGMatchingTests:
num_entities: int = 33
num_relations: int = 2
embedding_dim: int = 7
model_cls: Type[KGMatchingModel]
model_kwargs: Optional[Mapping[str, Any]] = None
model: KGMatchingModel
dataset: KnowledgeGraphAlignmentDataset
def setUp(self) -> None:
self.dataset = sub_graph_alignment(
graph=get_erdos_renyi(
num_entities=self.num_entities,
num_relations=self.num_relations,
p=.5,
),
)
self.model = self.model_cls(
num_nodes=self.dataset.num_nodes,
embedding_dim=self.embedding_dim,
**(kwargs_or_empty(kwargs=self.model_kwargs))
)
def test_name_resolution(self):
name = self.model_cls.__name__.lower()
model_cls = get_matching_model_by_name(name=name)
assert model_cls == self.model_cls
def test_forward(self):
self.model.set_edge_tensors_(edge_tensors=self.dataset.edge_tensors)
enriched_embeddings = self.model()
assert len(enriched_embeddings) == len(self.dataset.edge_tensors)
for side, size in self.dataset.num_nodes.items():
assert enriched_embeddings[side].shape == (size, self.embedding_dim)
def test_to(self):
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
self.model.set_edge_tensors_(edge_tensors=self.dataset.edge_tensors)
# send to device
model_on_device = self.model.to(device=device)
assert model_on_device is not None
# check that all attributes reside on device
for p in self.model.parameters(recurse=True):
assert p.device == device
def test_reset_parameters(self):
self.model.reset_parameters()
class GCNAlignTests(_KGMatchingTests, unittest.TestCase):
model_cls = GCNAlign
if __name__ == '__main__':
unittest.main()
| [
"kgm.data.get_erdos_renyi",
"kgm.models.get_matching_model_by_name",
"torch.stack",
"kgm.utils.common.kwargs_or_empty",
"torch.randint",
"torch.arange",
"torch.cuda.is_available",
"unittest.main",
"torch.cat",
"torch.device"
] | [((448, 493), 'torch.randint', 'torch.randint', (['num_nodes'], {'size': '(2, num_edges)'}), '(num_nodes, size=(2, num_edges))\n', (461, 493), False, 'import torch\n'), ((573, 596), 'torch.arange', 'torch.arange', (['num_nodes'], {}), '(num_nodes)\n', (585, 596), False, 'import torch\n'), ((610, 654), 'torch.cat', 'torch.cat', (['[source[-1:], source[:-1]]'], {'dim': '(0)'}), '([source[-1:], source[:-1]], dim=0)\n', (619, 654), False, 'import torch\n'), ((666, 702), 'torch.stack', 'torch.stack', (['[source, target]'], {'dim': '(0)'}), '([source, target], dim=0)\n', (677, 702), False, 'import torch\n'), ((2629, 2644), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2642, 2644), False, 'import unittest\n'), ((1527, 1564), 'kgm.models.get_matching_model_by_name', 'get_matching_model_by_name', ([], {'name': 'name'}), '(name=name)\n', (1553, 1564), False, 'from kgm.models import GCNAlign, get_matching_model_by_name\n'), ((2036, 2061), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2059, 2061), False, 'import torch\n'), ((2012, 2032), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (2024, 2032), False, 'import torch\n'), ((2067, 2086), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2079, 2086), False, 'import torch\n'), ((1064, 1157), 'kgm.data.get_erdos_renyi', 'get_erdos_renyi', ([], {'num_entities': 'self.num_entities', 'num_relations': 'self.num_relations', 'p': '(0.5)'}), '(num_entities=self.num_entities, num_relations=self.\n num_relations, p=0.5)\n', (1079, 1157), False, 'from kgm.data import KnowledgeGraphAlignmentDataset, get_erdos_renyi\n'), ((1370, 1411), 'kgm.utils.common.kwargs_or_empty', 'kwargs_or_empty', ([], {'kwargs': 'self.model_kwargs'}), '(kwargs=self.model_kwargs)\n', (1385, 1411), False, 'from kgm.utils.common import kwargs_or_empty\n')] |
import os
import unittest
import pandas as pd
from ai4good.runner.facade import Facade
from ai4good.models.model_registry import get_models, create_params
class InitialiseParameters(unittest.TestCase):
def setUp(self) -> None:
self.facade = Facade.simple()
self.mdl = get_models()['compartmental-model'](self.facade.ps)
def test_cm_category(self):
params = create_params(self.facade.ps, 'compartmental-model',
self.facade.ps.get_profiles('compartmental-model')[0],
'Moria', None)
self.assertCountEqual(params.change_in_categories, ['C' + x for x in params.calculated_categories])
self.assertEqual(len(params.categories), 2*len(params.calculated_categories) + 1)
self.assertEqual(params.control_dict['better_hygiene']['value'],
params.better_hygiene)
def test_cm_custom_profile(self):
custom_profile_df = pd.read_csv(os.path.join(os.path.dirname(__file__), 'resources/profile.csv'))
params = create_params(self.facade.ps, 'compartmental-model',
custom_profile_df, 'Moria', None)
self.assertEqual(params.control_dict['ICU_capacity']['value'],
int(custom_profile_df[custom_profile_df['Parameter'] == 'ICU_capacity']['Value'])/params.population)
self.assertEqual(params.control_dict['better_hygiene']['value'],
float(custom_profile_df[custom_profile_df['Parameter'] == 'better_hygiene']['Value']))
self.assertIsNotNone(params.control_dict['remove_symptomatic']['timing'])
self.assertIsNotNone(params.control_dict['remove_high_risk']['n_categories_removed'])
self.assertEqual(params.control_dict['numberOfIterations'], 2)
self.assertEqual(params.control_dict['t_sim'], 200)
| [
"os.path.dirname",
"ai4good.models.model_registry.get_models",
"ai4good.models.model_registry.create_params",
"ai4good.runner.facade.Facade.simple"
] | [((255, 270), 'ai4good.runner.facade.Facade.simple', 'Facade.simple', ([], {}), '()\n', (268, 270), False, 'from ai4good.runner.facade import Facade\n'), ((1059, 1149), 'ai4good.models.model_registry.create_params', 'create_params', (['self.facade.ps', '"""compartmental-model"""', 'custom_profile_df', '"""Moria"""', 'None'], {}), "(self.facade.ps, 'compartmental-model', custom_profile_df,\n 'Moria', None)\n", (1072, 1149), False, 'from ai4good.models.model_registry import get_models, create_params\n'), ((290, 302), 'ai4good.models.model_registry.get_models', 'get_models', ([], {}), '()\n', (300, 302), False, 'from ai4good.models.model_registry import get_models, create_params\n'), ((989, 1014), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1004, 1014), False, 'import os\n')] |
import numpy as np
try:
import matplotlib.pyplot as plt
except ImportError: # mpl is optional
pass
def compareplot(comp_df, insample_dev=True, se=True, dse=True, ax=None,
plot_kwargs=None):
"""
Model comparison summary plot in the style of the one used in the book
Statistical Rethinking by <NAME>.
Parameters
----------
comp_df: DataFrame
the result of the `pm.compare()` function
insample_dev : bool
plot the in-sample deviance, that is the value of the IC without the
penalization given by the effective number of parameters (pIC).
Defaults to True
se : bool
plot the standard error of the IC estimate. Defaults to True
dse : bool
plot standard error of the difference in IC between each model and the
top-ranked model. Defaults to True
plot_kwargs : dict
Optional arguments for plot elements. Currently accepts 'color_ic',
'marker_ic', 'color_insample_dev', 'marker_insample_dev', 'color_dse',
'marker_dse', 'ls_min_ic' 'color_ls_min_ic', 'fontsize'
ax : axes
Matplotlib axes. Defaults to None
Returns
-------
ax : matplotlib axes
"""
if ax is None:
_, ax = plt.subplots()
if plot_kwargs is None:
plot_kwargs = {}
yticks_pos, step = np.linspace(0, -1, (comp_df.shape[0] * 2) - 1,
retstep=True)
yticks_pos[1::2] = yticks_pos[1::2] + step / 2
yticks_labels = [''] * len(yticks_pos)
ic = 'WAIC'
if ic not in comp_df.columns:
ic = 'LOO'
if dse:
yticks_labels[0] = comp_df.index[0]
yticks_labels[2::2] = comp_df.index[1:]
ax.set_yticks(yticks_pos)
ax.errorbar(x=comp_df[ic].iloc[1:],
y=yticks_pos[1::2],
xerr=comp_df.dSE[1:],
color=plot_kwargs.get('color_dse', 'grey'),
fmt=plot_kwargs.get('marker_dse', '^'))
else:
yticks_labels = comp_df.index
ax.set_yticks(yticks_pos[::2])
if se:
ax.errorbar(x=comp_df[ic],
y=yticks_pos[::2],
xerr=comp_df.SE,
color=plot_kwargs.get('color_ic', 'k'),
fmt=plot_kwargs.get('marker_ic', 'o'),
mfc='None',
mew=1)
else:
ax.plot(comp_df[ic],
yticks_pos[::2],
color=plot_kwargs.get('color_ic', 'k'),
marker=plot_kwargs.get('marker_ic', 'o'),
mfc='None',
mew=1,
lw=0)
if insample_dev:
ax.plot(comp_df[ic] - (2 * comp_df['p'+ic]),
yticks_pos[::2],
color=plot_kwargs.get('color_insample_dev', 'k'),
marker=plot_kwargs.get('marker_insample_dev', 'o'),
lw=0)
ax.axvline(comp_df[ic].iloc[0],
ls=plot_kwargs.get('ls_min_ic', '--'),
color=plot_kwargs.get('color_ls_min_ic', 'grey'))
ax.set_xlabel('Deviance', fontsize=plot_kwargs.get('fontsize', 14))
ax.set_yticklabels(yticks_labels)
ax.set_ylim(-1 + step, 0 - step)
return ax
| [
"numpy.linspace",
"matplotlib.pyplot.subplots"
] | [((1348, 1406), 'numpy.linspace', 'np.linspace', (['(0)', '(-1)', '(comp_df.shape[0] * 2 - 1)'], {'retstep': '(True)'}), '(0, -1, comp_df.shape[0] * 2 - 1, retstep=True)\n', (1359, 1406), True, 'import numpy as np\n'), ((1255, 1269), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1267, 1269), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python3
import binascii
import itertools
from Crypto.Cipher import AES
HASH_LEN = 32
DATA_LEN = 204
KEY_LEN = 42
FIB_OFFSET = 4919
def fib_seq(n):
out = [0, 1]
for i in range(2, n):
out.append(out[(i - 1)] + out[(i - 2)])
return out
def gen_keys():
keys = []
for a, b in itertools.product(range(256), range(256)):
keys.append(bytes([a, b]) * 16)
return keys
def valid_hash(h):
return all(chr(x).isalnum() for x in h)
def is_printable(c):
return c >= 32 and c <= 127
def print_key(key):
for i in key:
s = '?' if i == -1 else chr(i)
print(s, end='')
print()
def main():
FIB = fib_seq(KEY_LEN + DATA_LEN + FIB_OFFSET)
KEY = [-1 for _ in range(KEY_LEN)]
with open('hash.txt', 'rb') as f:
hash_blob = f.read().rstrip(b'\n')
possible_keys = gen_keys()
block_num = 0
i = 0
while i < DATA_LEN:
hash_block = hash_blob[block_num * 64:(block_num + 1) * 64]
enc_data_block = binascii.unhexlify(hash_block)
block_num += 1
key1_pos = (i + FIB[FIB_OFFSET + i]) % KEY_LEN
i += 1
key2_pos = (i + FIB[FIB_OFFSET + i]) % KEY_LEN
i += 1
for possible_key in possible_keys:
pt = AES.new(possible_key, AES.MODE_ECB).decrypt(enc_data_block)
if valid_hash(pt):
KEY[key1_pos] = possible_key[0]
KEY[key2_pos] = possible_key[1]
print_key(KEY)
break
if __name__ == '__main__':
main()
| [
"Crypto.Cipher.AES.new",
"binascii.unhexlify"
] | [((1020, 1050), 'binascii.unhexlify', 'binascii.unhexlify', (['hash_block'], {}), '(hash_block)\n', (1038, 1050), False, 'import binascii\n'), ((1277, 1312), 'Crypto.Cipher.AES.new', 'AES.new', (['possible_key', 'AES.MODE_ECB'], {}), '(possible_key, AES.MODE_ECB)\n', (1284, 1312), False, 'from Crypto.Cipher import AES\n')] |
# Generated by Django 2.2.3 on 2019-07-09 17:10
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('boards', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='board',
name='category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, to='boards.Category'),
),
]
| [
"django.db.models.ForeignKey"
] | [((356, 486), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'parent_link': '(True)', 'to': '"""boards.Category"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, parent_link=True, to='boards.Category')\n", (373, 486), False, 'from django.db import migrations, models\n')] |
import numpy as np
import re
from nltk import Tree
from nltk import induce_pcfg
from nltk import Nonterminal
from nltk.parse.generate import generate
epsilon = 1e-20
class corpus:
# stores all sentence forms in data
def __init__(self):
self.sentence_forms = {}
for i in range(6): # init six levels
self.sentence_forms[i + 1] = {}
self.corp = []
def sort_sentence_types(self, types):
for t in types:
freq = types[t]
if freq >= 500:
self.sentence_forms[1][t.rstrip("\n")] = freq # we need to strip these newline characters because they shouldn't count as terminal
self.sentence_forms[2][t.rstrip("\n")] = freq
self.sentence_forms[3][t.rstrip("\n")] = freq
self.sentence_forms[4][t.rstrip("\n")] = freq
self.sentence_forms[5][t.rstrip("\n")] = freq
self.sentence_forms[6][t.rstrip("\n")] = freq
if freq >= 300:
self.sentence_forms[2][t.rstrip("\n")] = freq
self.sentence_forms[3][t.rstrip("\n")] = freq
self.sentence_forms[4][t.rstrip("\n")] = freq
self.sentence_forms[5][t.rstrip("\n")] = freq
self.sentence_forms[6][t.rstrip("\n")] = freq
if freq >= 100:
self.sentence_forms[3][t.rstrip("\n")] = freq
self.sentence_forms[4][t.rstrip("\n")] = freq
self.sentence_forms[5][t.rstrip("\n")] = freq
self.sentence_forms[6][t.rstrip("\n")] = freq
if freq >= 50:
self.sentence_forms[4][t.rstrip("\n")] = freq
self.sentence_forms[5][t.rstrip("\n")] = freq
self.sentence_forms[6][t.rstrip("\n")] = freq
if freq >= 10:
self.sentence_forms[5][t.rstrip("\n")] = freq
self.sentence_forms[6][t.rstrip("\n")] = freq
self.sentence_forms[6][t.rstrip("\n")] = freq
FREE = "Free"
PRG = "Regular"
PCFG = "Context Free"
def geometric(n, p):
return p * np.power(1.0 - p, n - 1, dtype=np.float64)
def compute_prior(G, corpus, n, level, flag=False): # flag for NLTK
# P : number of productions for grammar G
# n: number of non terminals for grammar G
# V: Vocabulary size = # num non terminals + # num terminals = len(corpus[level])
productions = None
if flag:
productions = corpus
else:
productions = G
P = len(productions)
V = None
if flag:
V = len(corpus)
else:
V = len(corpus.sentence_forms[level])
prob_P = np.log(geometric(P, 0.5)+epsilon)
prob_n = np.log(geometric(n, 0.5)+epsilon)
log_prior = prob_P + prob_n
for i in range(P):
if flag:
N_i = len(productions[i])
else:
N_i = len(list(productions.keys())[i])# num symbols for production i
prob_N_i = geometric(N_i, 0.5)
log_prior -= (N_i * np.log(V))
log_prior += prob_N_i
return log_prior
def compute_log_likelihood(corpus, G, T, level, flag=False):
# k: number of unique sentence types in corpus
log_likelihood = 0
D = None
k = None
if flag:
k = len(corpus)
D = corpus
else:
D = corpus.corp # sentence forms at specified level in corpus
k = len(D) # get num diff sentence forms at given level
productions = G
for i in range(k):
sl = None
if flag:
sl = compute_sentence_likelihood_nltk(productions, D[:50])
else:
sentence_i = D[i].split(" ")
sl = compute_sentence_likelihood(sentence_i, productions)
if sl != 0:
log_likelihood += np.log(sl)
return log_likelihood
def compute_sentence_likelihood(S_i, productions):
# sum of probability of generating S_i under all possible parses
# productions = "S -> U" # example
prob = 0
prods = list(productions.keys())
for p in prods:
p_split = p.split("->") # change based on how the prod symbols are seperated
s1 = p_split[0]
s2 = p_split[1] # should be only two prod symbols per production
for i, token in enumerate(S_i[:-1]):
if s1 == token and s2 == S_i[i + 1]:
prob += productions[p]
return prob
def compute_sentence_likelihood_nltk(G, productions):
prob = 0
prods = list(G.keys())
S_i = productions
for p in prods:
p_split = p.split(" -> ")
s1 = p_split[0]
s2 = p_split[1]
for i, token in enumerate(S_i[:-1]):
if s1 == token and s2 == S_i[i + 1]:
prob += np.log(G[p])
return prob
def compute_log_posterior(log_prior, log_likelihood):
return log_prior + log_likelihood + np.log((1.0 / 3.0))
def test_functions(adam_levelk, k):
terminal_pattern = "[.?!]"
levelk_terminal = 0
for j in adam_levelk.keys():
terminal = re.search(terminal_pattern, j)
if terminal:
levelk_terminal += 1
# #turn grammar into probabilities
total = sum(adam_levelk.values())
adam_levelk_probabilities = {}
for j in adam_levelk.keys():
adam_levelk_probabilities[j] = adam_levelk[j]/total
levelk_nonterminal = (len(adam_levelk) - levelk_terminal)
prior = compute_prior(adam_levelk_probabilities, data, levelk_nonterminal, k)
likelihood = compute_log_likelihood(data, adam_levelk_probabilities, PCFG, k)
logpost = compute_log_posterior(prior, likelihood)
return prior, likelihood, logpost
import os
directory = "Adam/"
people = ["*MOT", "*URS", "*RIC", "*COL", "*CHI"]
def read_and_return(directory):
speakers = {}
struct = {}
append_next = False
for file_path in os.listdir(directory):
with open("Adam/" + file_path, "r") as f:
speakers[file_path] = []
struct[file_path] = []
for line in f:
split = line.split(" ")
if append_next and split[0][:4] == "%mor":
content = split[0].split("\t")[-1]
struct[file_path].append(content.split(" "))
elif split[0][:4] in people[:-1]:
speakers[file_path].append(split)
append_next = True
else:
append_next = False
return speakers, struct
def loadTrees(path):
with open (path, 'r') as f:
data = f.read().split("\n\n")
flattened_data = []
for i in range(len(data)):
#flatten it and strip extra whitespace
flattened_data.append(" ".join(data[i].replace("\n", "").split()))
tree = []
for i, s in enumerate(flattened_data[:-2]):
if "R" in s:
tree.append(Tree.fromstring(s))
return tree
def productionsFromTrees(trees):
productions = []
for tree in trees:
productions += tree.productions()
return productions
def inducePCFGFromProductions(productions):
S = Nonterminal('S')
grammar = induce_pcfg(S, productions)
return grammar
if __name__ == "__main__":
speakers, struct = read_and_return(directory) # this function was used before perfors sent his data
corp = []
types = {}
for fp in struct:
for segments in struct[fp]:
t = ""
for s in segments[:-1]:
token = s.split("|")[0].split(":")[0]
if ("#" in token):
token = token.split("#")[1]
t += token + " "
corp.append(t[:-1])
splitter = t.split(" ")[:-1]
for i in range(len(splitter)):
if (i < (len(splitter) - 1)):
tok = splitter[i] + "->" + splitter[i+1]
if tok in types:
types[tok] += 1
else:
types[tok] = 1
data = corpus()
data.sort_sentence_types(types)
data.corp = corp
adam_level1 = data.sentence_forms[1]
adam_level2 = data.sentence_forms[2]
adam_level3 = data.sentence_forms[3]
adam_level4 = data.sentence_forms[4]
adam_level5 = data.sentence_forms[5]
adam_level6 = data.sentence_forms[6]
print("FREQUENCY WEIGHTED CFG")
for i in range(6):
print("----------------")
print("LEVEL " + str(i+1))
prior, likelihood, logpost = test_functions(data.sentence_forms[i+1], i+1)
print("Log Prior: " + str(prior))
print("Log Likelihood: " + str(likelihood))
print("Log Posterior: " + str(logpost))
trees = loadTrees("Parsetree/brown-adam.parsed")
productions = productionsFromTrees(trees)
nltkgrammar = inducePCFGFromProductions(productions)
grammarToParse = str(nltkgrammar).split("\n")
finalGrammar = []
grammarDict = {}
for g in grammarToParse:
finalGrammar.append(g[4:])
for fg in finalGrammar[1:]:
gg = fg.split("[")
rule = gg[0][:-1]
value = gg[1][:-1]
grammarDict[rule] = float(value)
terminal_pattern = "[.?!]"
terminal_sum = 0
for j in grammarDict.keys():
terminal = re.search(terminal_pattern, j)
if terminal:
terminal_sum += 1
print("PROBABALISTIC PCFG")
prior = compute_prior(grammarDict, productions, terminal_sum, 0, True)
print("Log Prior: " + str(prior))
likelihood = compute_log_likelihood(productions, grammarDict, PCFG, 0, True)
print("Log Likelihood: " + str(likelihood))
logpost = compute_log_posterior(prior, likelihood)
print("Log Posterior: " + str(logpost))
| [
"os.listdir",
"numpy.power",
"nltk.Nonterminal",
"numpy.log",
"nltk.Tree.fromstring",
"nltk.induce_pcfg",
"re.search"
] | [((5787, 5808), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (5797, 5808), False, 'import os\n'), ((7044, 7060), 'nltk.Nonterminal', 'Nonterminal', (['"""S"""'], {}), "('S')\n", (7055, 7060), False, 'from nltk import Nonterminal\n'), ((7075, 7102), 'nltk.induce_pcfg', 'induce_pcfg', (['S', 'productions'], {}), '(S, productions)\n', (7086, 7102), False, 'from nltk import induce_pcfg\n'), ((2104, 2146), 'numpy.power', 'np.power', (['(1.0 - p)', '(n - 1)'], {'dtype': 'np.float64'}), '(1.0 - p, n - 1, dtype=np.float64)\n', (2112, 2146), True, 'import numpy as np\n'), ((4807, 4824), 'numpy.log', 'np.log', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (4813, 4824), True, 'import numpy as np\n'), ((4972, 5002), 're.search', 're.search', (['terminal_pattern', 'j'], {}), '(terminal_pattern, j)\n', (4981, 5002), False, 'import re\n'), ((9249, 9279), 're.search', 're.search', (['terminal_pattern', 'j'], {}), '(terminal_pattern, j)\n', (9258, 9279), False, 'import re\n'), ((2992, 3001), 'numpy.log', 'np.log', (['V'], {}), '(V)\n', (2998, 3001), True, 'import numpy as np\n'), ((3747, 3757), 'numpy.log', 'np.log', (['sl'], {}), '(sl)\n', (3753, 3757), True, 'import numpy as np\n'), ((4682, 4694), 'numpy.log', 'np.log', (['G[p]'], {}), '(G[p])\n', (4688, 4694), True, 'import numpy as np\n'), ((6811, 6829), 'nltk.Tree.fromstring', 'Tree.fromstring', (['s'], {}), '(s)\n', (6826, 6829), False, 'from nltk import Tree\n')] |
from elasticsearch import helpers, Elasticsearch
import csv
def read_mapping():
try:
with open('books-mapping.json', 'r') as file:
mapping = file.read().replace('\n', '')
return mapping
except Exception as e:
print(e)
es = Elasticsearch()
index_name = 'books'
mapping = read_mapping()
try:
es.indices.create(index=index_name, ignore=400, body=mapping)
with open('../data/books.csv') as f:
reader = csv.DictReader(f)
helpers.bulk(es, reader, index=index_name) #, doc_type='my-type'
print("Loading done successfully!")
except Exception as e:
print(e)
| [
"elasticsearch.helpers.bulk",
"elasticsearch.Elasticsearch",
"csv.DictReader"
] | [((271, 286), 'elasticsearch.Elasticsearch', 'Elasticsearch', ([], {}), '()\n', (284, 286), False, 'from elasticsearch import helpers, Elasticsearch\n'), ((463, 480), 'csv.DictReader', 'csv.DictReader', (['f'], {}), '(f)\n', (477, 480), False, 'import csv\n'), ((489, 531), 'elasticsearch.helpers.bulk', 'helpers.bulk', (['es', 'reader'], {'index': 'index_name'}), '(es, reader, index=index_name)\n', (501, 531), False, 'from elasticsearch import helpers, Elasticsearch\n')] |
import bpy
import numpy as np
from PIL import Image
class CarModelViewToImage():
# def __init__:
# self.camera_ = None
# self.image_folder_ = None
# self.car_width_ = 0
# self.car_length_ = 0
# self.viewport_width_ = 0
# self.viewport_height_ = 0
# self.stride_ = 0
# self.stride_radians_ = 0
# self.car_ = None
# self.scene_length_ = 0
# self.scene_height_ = 0
# self.light_ctr_ = None
def init(self, info):
"""
info: {
"car_width" : float,
"car_length": float,
"viewport_width" : float,
"viewport_height" : float,
"image_folder" : string
}
"""
# get base information
self.car_width_ = info["car_width"]
self.car_length_ = info["car_length"]
self.viewport_width_ = info["viewport_width"]
self.viewport_height_ = info["viewport_height"]
self.image_folder_ = info["image_folder"]
self.scene_length_ = self.car_length_ * 2
self.scene_height_ = self.car_length_
bpy.context.scene.render.resolution_x = self.viewport_width_
bpy.context.scene.render.resolution_y = self.viewport_height_
bpy.context.scene.render.filepath = self.image_folder_
# resize model and light
# save model dimensions and location
self.car_ = bpy.data.objects["car"]
# save light location
self.light_ctr_ = [bpy.data.objects["left_light"],
bpy.data.objects["right_light"], bpy.data.objects["top_light"]]
# move model and light
offset = self.car_.location.copy()
self.car_.location -= offset
for l in self.light_ctr_:
l.location -= offset
# calculate prop from length and resize
car_length_now = max(self.car_.dimensions)
scale_size = self.car_length_ / car_length_now
self.car_.scale *= scale_size
for l in self.light_ctr_:
l.location *= scale_size
l.scale *= scale_size
# set camera
bpy.ops.object.camera_add()
self.camera_ = bpy.data.objects["Camera"]
# set camera base info
self.camera_.data.lens_unit = "FOV"
self.camera_.data.angle = np.radians(90)
self.camera_.data.clip_start = 0.1
self.camera_.data.clip_end = self.scene_length_ * 2
# set camera constraint
bpy.ops.object.constraint_add(type="TRACK_TO")
bpy.context.object.constraints["Track To"].up_axis = 'UP_Y'
bpy.context.object.constraints["Track To"].track_axis = 'TRACK_NEGATIVE_Z'
bpy.context.object.constraints["Track To"].target = self.car_
bpy.context.object.constraints["Track To"].use_target_z = True
# set render Node
self.scene_ = bpy.context.scene
self.scene_.use_nodes = True
self.tree_ = self.scene_.node_tree
self.links_ = self.tree_.links
# clear default nodes
for n in self.tree_.nodes:
self.tree_.nodes.remove(n)
self.render_layer_ = self.tree_.nodes.new('CompositorNodeRLayers')
self.viewer_image_ = self.tree_.nodes.new('CompositorNodeViewer')
self.viewer_image_.use_alpha = False
def set_camera_pos(self, x, y, z=None):
# 计算真实坐标
real_x = np.clip(x, -1, 1) * self.scene_length_
real_y = np.clip(y, -1, 1) * self.scene_length_
self.camera_.location[0] = real_x
self.camera_.location[1] = real_y
if(z != None):
real_z = np.clip(z, 0, 1) * self.scene_height_
self.camera_.location[2] = real_z
def render_image(self, img_name, folder_path):
"""
渲染图像
"""
filepath = folder_path + img_name
filepath_depth = folder_path + "z" + img_name
# color
self.links_.clear()
self.links_.new(self.render_layer_.outputs[0], self.viewer_image_.inputs[0])
bpy.ops.render.render()
bpy.data.images[0].save_render(filepath)
# depth
self.links_.clear()
# self.links_.new(self.render_layer_.outputs["Depth"], self.viewer_depth_.inputs[0])
self.links_.new(self.render_layer_.outputs["Depth"], self.viewer_image_.inputs[0])
bpy.ops.render.render()
pixels = bpy.data.images['Viewer Node'].pixels
pixels = np.array(pixels)[::4][::-1] # get the pixels
pixels[pixels < 10000000000.0] = 255
pixels[pixels >= 10000000000.0] = 0
pix = pixels.astype(dtype=np.uint8).reshape((self.viewport_height_, self.viewport_width_))
img = Image.fromarray(pix)
img = img.transpose(Image.FLIP_LEFT_RIGHT)
img.save(filepath_depth)
def get_single_image(self, x, y, z, img_name, folder_path=""):
"""
x,y,z:摄像头位置在场景的比例,其中x、y为-1~1,z为0~1
img_name : 文件名
folder_path : 文件夹路径
"""
# 设置摄像机
self.set_camera_pos(x,y,z)
# 渲染并保存图像
bpy.context.scene.camera = self.camera_
bpy.ops.render.render()
self.render_image(img_name, folder_path)
if(folder_path != ""):
self.render_image(img_name, folder_path)
else:
self.render_image(img_name, self.image_folder_)
def get_surround_image(self, xy, z, rotate_stride, folder_path = ""):
"""
x,y,z:摄像头位置在场景的比例,其中x、y为-1~1,z为0~1
rotate_stride : 旋转的角度
folder_path : 文件夹路径
"""
def set_camera_pos(angle, camera_to_origin_length):
self.camera_.location[0] = camera_to_origin_length * np.cos(np.radians(angle))
self.camera_.location[1] = camera_to_origin_length * np.sin(np.radians(angle))
# 计算旋转角度相关信息
bpy.context.scene.camera = self.camera_
self.stride_ = rotate_stride
self.stride_radians_ = np.radians(rotate_stride)
# set camera parameters
self.set_camera_pos(xy, 0, z)
real_xy = self.scene_length_ * np.clip(xy, -1, 1)
real_z = self.scene_height_ * np.clip(z, 0, 1)
camera_length = np.sqrt(real_xy**2 + real_z**2)
for i in range(0, 360, rotate_stride):
img_name = str(i) + ".jpg"
set_camera_pos(i, camera_length)
bpy.context.scene.camera = self.camera_
bpy.ops.render.render()
if(folder_path != ""):
self.render_image(img_name, folder_path)
else:
self.render_image(img_name, self.image_folder_)
if __name__ == '__main__':
info = {
"car_width" : 30,
"car_length": 50,
"viewport_width" : 1280,
"viewport_height" : 720,
"image_folder" : "E:/company/MyWork/Workspace/CPU_3D/resources/Huake8296/car_image/single/"
}
car_view = CarModelViewToImage()
car_view.init(info)
#car_view.get_single_image(0, 0, 1, "top_view.jpg")# have a bug
#car_view.get_surround_image(-0.6, 0.4, 90)
car_view.get_single_image(0, -0.6, 0.6, "view_front.jpg")
car_view.get_single_image(0, 0.6, 0.6, "view_back.jpg")
car_view.get_single_image(0.6, 0, 0.6, "view_left.jpg")
car_view.get_single_image(-0.6, 0, 0.6, "view_right.jpg")
car_view.get_single_image(0.6, -0.6, 0.6, "view_left_front.jpg")
car_view.get_single_image(0.6, 0.6, 0.6, "view_left_back.jpg")
car_view.get_single_image(-0.6, -0.6, 0.6, "view_right_front.jpg")
car_view.get_single_image(0.6, -0.6, 0.6, "view_right_back.jpg")
| [
"numpy.radians",
"numpy.clip",
"bpy.ops.object.camera_add",
"PIL.Image.fromarray",
"numpy.sqrt",
"bpy.ops.object.constraint_add",
"numpy.array",
"bpy.ops.render.render"
] | [((2180, 2207), 'bpy.ops.object.camera_add', 'bpy.ops.object.camera_add', ([], {}), '()\n', (2205, 2207), False, 'import bpy\n'), ((2367, 2381), 'numpy.radians', 'np.radians', (['(90)'], {}), '(90)\n', (2377, 2381), True, 'import numpy as np\n'), ((2526, 2572), 'bpy.ops.object.constraint_add', 'bpy.ops.object.constraint_add', ([], {'type': '"""TRACK_TO"""'}), "(type='TRACK_TO')\n", (2555, 2572), False, 'import bpy\n'), ((4068, 4091), 'bpy.ops.render.render', 'bpy.ops.render.render', ([], {}), '()\n', (4089, 4091), False, 'import bpy\n'), ((4379, 4402), 'bpy.ops.render.render', 'bpy.ops.render.render', ([], {}), '()\n', (4400, 4402), False, 'import bpy\n'), ((4732, 4752), 'PIL.Image.fromarray', 'Image.fromarray', (['pix'], {}), '(pix)\n', (4747, 4752), False, 'from PIL import Image\n'), ((5163, 5186), 'bpy.ops.render.render', 'bpy.ops.render.render', ([], {}), '()\n', (5184, 5186), False, 'import bpy\n'), ((5990, 6015), 'numpy.radians', 'np.radians', (['rotate_stride'], {}), '(rotate_stride)\n', (6000, 6015), True, 'import numpy as np\n'), ((6232, 6267), 'numpy.sqrt', 'np.sqrt', (['(real_xy ** 2 + real_z ** 2)'], {}), '(real_xy ** 2 + real_z ** 2)\n', (6239, 6267), True, 'import numpy as np\n'), ((3432, 3449), 'numpy.clip', 'np.clip', (['x', '(-1)', '(1)'], {}), '(x, -1, 1)\n', (3439, 3449), True, 'import numpy as np\n'), ((3488, 3505), 'numpy.clip', 'np.clip', (['y', '(-1)', '(1)'], {}), '(y, -1, 1)\n', (3495, 3505), True, 'import numpy as np\n'), ((6134, 6152), 'numpy.clip', 'np.clip', (['xy', '(-1)', '(1)'], {}), '(xy, -1, 1)\n', (6141, 6152), True, 'import numpy as np\n'), ((6191, 6207), 'numpy.clip', 'np.clip', (['z', '(0)', '(1)'], {}), '(z, 0, 1)\n', (6198, 6207), True, 'import numpy as np\n'), ((6470, 6493), 'bpy.ops.render.render', 'bpy.ops.render.render', ([], {}), '()\n', (6491, 6493), False, 'import bpy\n'), ((3656, 3672), 'numpy.clip', 'np.clip', (['z', '(0)', '(1)'], {}), '(z, 0, 1)\n', (3663, 3672), True, 'import numpy as np\n'), ((4484, 4500), 'numpy.array', 'np.array', (['pixels'], {}), '(pixels)\n', (4492, 4500), True, 'import numpy as np\n'), ((5741, 5758), 'numpy.radians', 'np.radians', (['angle'], {}), '(angle)\n', (5751, 5758), True, 'import numpy as np\n'), ((5832, 5849), 'numpy.radians', 'np.radians', (['angle'], {}), '(angle)\n', (5842, 5849), True, 'import numpy as np\n')] |
import os
from os.path import exists
from os.path import join
from os.path import splitext
from subprocess import check_call
from typing import Dict
from typing import List
from typing import Mapping
from typing import Optional
from .compat import is_posix
from .exc import CommandError
def open_in_editor(
filename: str, environ: Optional[Dict[str, str]] = None
) -> None:
"""
Opens the given file in a text editor. If the environment variable
``EDITOR`` is set, this is taken as preference.
Otherwise, a list of commonly installed editors is tried.
If no editor matches, an :py:exc:`OSError` is raised.
:param filename: The filename to open. Will be passed verbatim to the
editor command.
:param environ: An optional drop-in replacement for ``os.environ``. Used
mainly for testing.
"""
env = os.environ if environ is None else environ
try:
editor = _find_editor(env)
check_call([editor, filename])
except Exception as exc:
raise CommandError("Error executing editor (%s)" % (exc,)) from exc
def _find_editor(environ: Mapping[str, str]) -> str:
candidates = _default_editors()
for i, var in enumerate(("EDITOR", "VISUAL")):
if var in environ:
user_choice = environ[var]
if exists(user_choice):
return user_choice
if os.sep not in user_choice:
candidates.insert(i, user_choice)
for candidate in candidates:
path = _find_executable(candidate, environ)
if path is not None:
return path
raise OSError(
"No suitable editor found. Please set the "
'"EDITOR" or "VISUAL" environment variables'
)
def _find_executable(
candidate: str, environ: Mapping[str, str]
) -> Optional[str]:
# Assuming this is on the PATH, we need to determine it's absolute
# location. Otherwise, ``check_call`` will fail
if not is_posix and splitext(candidate)[1] != ".exe":
candidate += ".exe"
for path in environ.get("PATH", "").split(os.pathsep):
value = join(path, candidate)
if exists(value):
return value
return None
def _default_editors() -> List[str]:
# Look for an editor. Prefer the user's choice by env-var, fall back to
# most commonly installed editor (nano/vim)
if is_posix:
return ["sensible-editor", "editor", "nano", "vim", "code"]
else:
return ["code.exe", "notepad++.exe", "notepad.exe"]
| [
"os.path.exists",
"os.path.splitext",
"os.path.join",
"subprocess.check_call"
] | [((952, 982), 'subprocess.check_call', 'check_call', (['[editor, filename]'], {}), '([editor, filename])\n', (962, 982), False, 'from subprocess import check_call\n'), ((2103, 2124), 'os.path.join', 'join', (['path', 'candidate'], {}), '(path, candidate)\n', (2107, 2124), False, 'from os.path import join\n'), ((2136, 2149), 'os.path.exists', 'exists', (['value'], {}), '(value)\n', (2142, 2149), False, 'from os.path import exists\n'), ((1311, 1330), 'os.path.exists', 'exists', (['user_choice'], {}), '(user_choice)\n', (1317, 1330), False, 'from os.path import exists\n'), ((1966, 1985), 'os.path.splitext', 'splitext', (['candidate'], {}), '(candidate)\n', (1974, 1985), False, 'from os.path import splitext\n')] |
from copy import deepcopy
from uuid import UUID
from django.db import transaction
from django.db.models import F
from django.http import JsonResponse
from django.utils import timezone
from django.utils.timezone import now
from rest_framework import status
from rest_framework.exceptions import PermissionDenied, ValidationError, ParseError
from rest_framework.generics import ListCreateAPIView, RetrieveUpdateDestroyAPIView, UpdateAPIView
from rest_framework.views import APIView
from api.applications import constants
from api.applications.creators import validate_application_ready_for_submission, _validate_agree_to_declaration
from api.applications.enums import ContractType
from api.applications.helpers import (
get_application_create_serializer,
get_application_view_serializer,
get_application_update_serializer,
validate_and_create_goods_on_licence,
auto_match_sanctions,
)
from api.applications.libraries.application_helpers import (
optional_str_to_bool,
can_status_be_set_by_exporter_user,
can_status_be_set_by_gov_user,
create_submitted_audit,
)
from api.applications.libraries.case_status_helpers import submit_application
from api.applications.libraries.edit_applications import (
save_and_audit_have_you_been_informed_ref,
set_case_flags_on_submitted_standard_or_open_application,
)
from api.applications.libraries.get_applications import get_application
from api.applications.libraries.goods_on_applications import add_goods_flags_to_submitted_application
from api.applications.libraries.licence import get_default_duration
from api.applications.models import (
BaseApplication,
HmrcQuery,
SiteOnApplication,
GoodOnApplication,
CountryOnApplication,
ExternalLocationOnApplication,
PartyOnApplication,
F680ClearanceApplication,
)
from api.applications.serializers.exhibition_clearance import ExhibitionClearanceDetailSerializer
from api.applications.serializers.generic_application import (
GenericApplicationListSerializer,
GenericApplicationCopySerializer,
)
from api.audit_trail import service as audit_trail_service
from api.audit_trail.enums import AuditType
from api.cases.enums import AdviceLevel, AdviceType, CaseTypeSubTypeEnum, CaseTypeEnum
from api.cases.generated_documents.models import GeneratedCaseDocument
from api.cases.generated_documents.helpers import auto_generate_case_document
from api.cases.helpers import can_set_status
from api.cases.libraries.get_flags import get_flags
from api.cases.service import get_destinations
from api.cases.tasks import get_application_target_sla
from api.core.authentication import ExporterAuthentication, SharedAuthentication, GovAuthentication
from api.core.constants import ExporterPermissions, GovPermissions, AutoGeneratedDocuments
from api.core.decorators import (
application_in_state,
authorised_to_view_application,
allowed_application_types,
)
from api.core.helpers import convert_date_to_string, str_to_bool
from api.core.permissions import assert_user_has_permission
from api.flags.enums import FlagStatuses, SystemFlags
from api.flags.models import Flag
from api.goods.serializers import GoodCreateSerializer
from api.goodstype.models import GoodsType
from api.licences.enums import LicenceStatus
from api.licences.helpers import get_licence_reference_code, update_licence_status
from api.licences.models import Licence
from api.licences.serializers.create_licence import LicenceCreateSerializer
from lite_content.lite_api import strings
from api.organisations.enums import OrganisationType
from api.organisations.libraries.get_organisation import get_request_user_organisation, get_request_user_organisation_id
from api.organisations.models import Site
from api.staticdata.f680_clearance_types.enums import F680ClearanceTypeEnum
from api.staticdata.statuses.enums import CaseStatusEnum
from api.staticdata.statuses.libraries.case_status_validate import is_case_status_draft
from api.staticdata.statuses.libraries.get_case_status import get_case_status_by_status
from api.users.libraries.notifications import get_case_notifications
from api.users.models import ExporterUser
from api.workflow.automation import run_routing_rules
from api.workflow.flagging_rules_automation import apply_flagging_rules_to_case
class ApplicationList(ListCreateAPIView):
authentication_classes = (ExporterAuthentication,)
serializer_class = GenericApplicationListSerializer
def get_queryset(self):
"""
Filter applications on submitted
"""
try:
submitted = optional_str_to_bool(self.request.GET.get("submitted"))
except ValueError as e:
return JsonResponse(data={"errors": str(e)}, status=status.HTTP_400_BAD_REQUEST)
organisation = get_request_user_organisation(self.request)
if organisation.type == OrganisationType.HMRC:
if submitted is None:
applications = HmrcQuery.objects.filter(hmrc_organisation=organisation)
elif submitted:
applications = HmrcQuery.objects.submitted(hmrc_organisation=organisation)
else:
applications = HmrcQuery.objects.drafts(hmrc_organisation=organisation)
else:
if submitted is None:
applications = BaseApplication.objects.filter(organisation=organisation)
elif submitted:
applications = BaseApplication.objects.submitted(organisation)
else:
applications = BaseApplication.objects.drafts(organisation)
users_sites = Site.objects.get_by_user_and_organisation(self.request.user.exporteruser, organisation)
disallowed_applications = SiteOnApplication.objects.exclude(site__id__in=users_sites).values_list(
"application", flat=True
)
applications = applications.exclude(id__in=disallowed_applications).exclude(
case_type_id=CaseTypeEnum.HMRC.id
)
return applications.prefetch_related("status", "case_type").select_subclasses()
def get_paginated_response(self, data):
data = get_case_notifications(data, self.request)
return super().get_paginated_response(data)
def post(self, request, **kwargs):
"""
Create a new application
"""
data = request.data
if not data.get("application_type"):
raise ValidationError({"application_type": [strings.Applications.Generic.SELECT_AN_APPLICATION_TYPE]})
case_type = data.pop("application_type", None)
serializer = get_application_create_serializer(case_type)
serializer = serializer(
data=data,
case_type_id=CaseTypeEnum.reference_to_id(case_type),
context=get_request_user_organisation(request),
)
if serializer.is_valid(raise_exception=True):
application = serializer.save()
return JsonResponse(data={"id": application.id}, status=status.HTTP_201_CREATED)
class ApplicationExisting(APIView):
"""
This view returns boolean values depending on the type of organisation:
HMRC - Whether the organisation has existing submitted queries
Standard - Whether the organisation has any drafts/applications
"""
authentication_classes = (ExporterAuthentication,)
def get(self, request):
organisation = get_request_user_organisation(request)
if organisation.type == "hmrc":
has_queries = HmrcQuery.objects.submitted(hmrc_organisation=organisation).exists()
return JsonResponse(data={"queries": has_queries})
else:
has_applications = BaseApplication.objects.filter(organisation=organisation).exists()
return JsonResponse(data={"applications": has_applications,})
class ApplicationDetail(RetrieveUpdateDestroyAPIView):
authentication_classes = (ExporterAuthentication,)
@authorised_to_view_application(ExporterUser)
def get(self, request, pk):
"""
Retrieve an application instance
"""
application = get_application(pk)
serializer = get_application_view_serializer(application)
data = serializer(
application,
context={
"user_type": request.user.type,
"exporter_user": request.user.exporteruser,
"organisation_id": get_request_user_organisation_id(request),
},
).data
if application.case_type.sub_type == CaseTypeSubTypeEnum.OPEN:
data["destinations"] = get_destinations(application.id, user_type=request.user.type)
return JsonResponse(data=data, status=status.HTTP_200_OK)
@authorised_to_view_application(ExporterUser)
@application_in_state(is_editable=True)
def put(self, request, pk):
"""
Update an application instance
"""
application = get_application(pk)
serializer = get_application_update_serializer(application)
case = application.get_case()
data = request.data.copy()
serializer = serializer(application, data=data, context=get_request_user_organisation(request), partial=True)
# Prevent minor edits of the clearance level
if not application.is_major_editable() and request.data.get("clearance_level"):
return JsonResponse(
data={"errors": {"clearance_level": [strings.Applications.Generic.NOT_POSSIBLE_ON_MINOR_EDIT]}},
status=status.HTTP_400_BAD_REQUEST,
)
# Prevent minor edits of the f680 clearance types
if not application.is_major_editable() and request.data.get("types"):
return JsonResponse(
data={"errors": {"types": [strings.Applications.Generic.NOT_POSSIBLE_ON_MINOR_EDIT]}},
status=status.HTTP_400_BAD_REQUEST,
)
# Prevent minor edits of additional_information
if not application.is_major_editable() and any(
[request.data.get(field) for field in constants.F680.ADDITIONAL_INFORMATION_FIELDS]
):
return JsonResponse(
data={"errors": {"Additional details": [strings.Applications.Generic.NOT_POSSIBLE_ON_MINOR_EDIT]}},
status=status.HTTP_400_BAD_REQUEST,
)
if not serializer.is_valid():
return JsonResponse(data={"errors": serializer.errors}, status=status.HTTP_400_BAD_REQUEST)
if application.case_type.sub_type == CaseTypeSubTypeEnum.HMRC:
serializer.save()
return JsonResponse(data={}, status=status.HTTP_200_OK)
# Audit block
if request.data.get("name"):
old_name = application.name
serializer.save()
audit_trail_service.create(
actor=request.user.exporteruser,
verb=AuditType.UPDATED_APPLICATION_NAME,
target=case,
payload={"old_name": old_name, "new_name": serializer.data.get("name")},
)
return JsonResponse(data={}, status=status.HTTP_200_OK)
if request.data.get("clearance_level"):
serializer.save()
return JsonResponse(data={}, status=status.HTTP_200_OK)
# Audit block
if application.case_type.sub_type == CaseTypeSubTypeEnum.F680:
if request.data.get("types"):
old_types = [
F680ClearanceTypeEnum.get_text(type) for type in application.types.values_list("name", flat=True)
]
new_types = [F680ClearanceTypeEnum.get_text(type) for type in request.data.get("types")]
serializer.save()
if set(old_types) != set(new_types):
audit_trail_service.create(
actor=request.user,
verb=AuditType.UPDATE_APPLICATION_F680_CLEARANCE_TYPES,
target=case,
payload={"old_types": old_types, "new_types": new_types},
)
return JsonResponse(data={}, status=status.HTTP_200_OK)
else:
serializer.save()
if application.case_type.sub_type == CaseTypeSubTypeEnum.STANDARD:
save_and_audit_have_you_been_informed_ref(request, application, serializer)
return JsonResponse(data={}, status=status.HTTP_200_OK)
@authorised_to_view_application(ExporterUser)
def delete(self, request, pk):
"""
Deleting an application should only be allowed for draft applications
"""
application = get_application(pk)
if not is_case_status_draft(application.status.status):
return JsonResponse(
data={"errors": strings.Applications.Generic.DELETE_SUBMITTED_APPLICATION_ERROR},
status=status.HTTP_400_BAD_REQUEST,
)
application.delete()
return JsonResponse(
data={"status": strings.Applications.Generic.DELETE_DRAFT_APPLICATION}, status=status.HTTP_200_OK
)
class ApplicationSubmission(APIView):
authentication_classes = (ExporterAuthentication,)
@transaction.atomic
@application_in_state(is_major_editable=True)
@authorised_to_view_application(ExporterUser)
def put(self, request, pk):
"""
Submit a draft application which will set its submitted_at datetime and status before creating a case
Depending on the application subtype, this will also submit the declaration of the licence
"""
application = get_application(pk)
old_status = application.status.status
if application.case_type.sub_type != CaseTypeSubTypeEnum.HMRC:
assert_user_has_permission(
request.user.exporteruser, ExporterPermissions.SUBMIT_LICENCE_APPLICATION, application.organisation
)
errors = validate_application_ready_for_submission(application)
if errors:
return JsonResponse(data={"errors": errors}, status=status.HTTP_400_BAD_REQUEST)
# Queries are completed directly when submit is clicked on the task list
# HMRC are completed when submit is clicked on the summary page (page after task list)
# Applications are completed when submit is clicked on the declaration page (page after summary page)
if application.case_type.sub_type in [CaseTypeSubTypeEnum.EUA, CaseTypeSubTypeEnum.GOODS] or (
CaseTypeSubTypeEnum.HMRC and request.data.get("submit_hmrc")
):
application.submitted_by = request.user.exporteruser
create_submitted_audit(request, application, old_status)
submit_application(application)
if request.data.get("submit_hmrc"):
auto_generate_case_document(
"application_form",
application,
AutoGeneratedDocuments.APPLICATION_FORM,
request.build_absolute_uri(),
)
elif application.case_type.sub_type in [
CaseTypeSubTypeEnum.STANDARD,
CaseTypeSubTypeEnum.OPEN,
CaseTypeSubTypeEnum.F680,
CaseTypeSubTypeEnum.GIFTING,
CaseTypeSubTypeEnum.EXHIBITION,
]:
if request.data.get("submit_declaration"):
errors = _validate_agree_to_declaration(request, errors)
if errors:
return JsonResponse(data={"errors": errors}, status=status.HTTP_400_BAD_REQUEST)
# If a valid declaration is provided, save the application
application.submitted_by = request.user.exporteruser
application.agreed_to_foi = request.data.get("agreed_to_foi")
application.foi_reason = request.data.get("foi_reason", "")
submit_application(application)
if application.case_type.sub_type in [CaseTypeSubTypeEnum.STANDARD, CaseTypeSubTypeEnum.OPEN]:
set_case_flags_on_submitted_standard_or_open_application(application)
add_goods_flags_to_submitted_application(application)
apply_flagging_rules_to_case(application)
create_submitted_audit(request, application, old_status)
auto_generate_case_document(
"application_form",
application,
AutoGeneratedDocuments.APPLICATION_FORM,
request.build_absolute_uri(),
)
run_routing_rules(application)
# Set the sites on this application as used so their name/site records located at are no longer editable
sites_on_application = SiteOnApplication.objects.filter(application=application)
Site.objects.filter(id__in=sites_on_application.values_list("site_id", flat=True)).update(
is_used_on_application=True
)
if application.case_type.sub_type in [
CaseTypeSubTypeEnum.STANDARD,
CaseTypeSubTypeEnum.OPEN,
CaseTypeSubTypeEnum.HMRC,
]:
if UUID(SystemFlags.ENFORCEMENT_CHECK_REQUIRED) not in application.flags.values_list("id", flat=True):
application.flags.add(SystemFlags.ENFORCEMENT_CHECK_REQUIRED)
if application.case_type.sub_type in [CaseTypeSubTypeEnum.STANDARD, CaseTypeSubTypeEnum.OPEN]:
auto_match_sanctions(application)
# If the user hasn't visited the optional goods to country mapping page, then no goods to country mappings will
# have been saved before this point. So save mappings for all goods to all countries, which is the default
if (
application.case_type.sub_type == CaseTypeSubTypeEnum.OPEN
and GoodsType.objects.filter(application=application, countries__isnull=True).exists()
):
countries_on_application = CountryOnApplication.objects.filter(application=application).values_list(
"country", flat=True
)
for goods_type in GoodsType.objects.filter(application=application, countries__isnull=True):
goods_type.countries.set(countries_on_application)
# Serialize for the response message
serializer = get_application_view_serializer(application)
serializer = serializer(application, context={"user_type": request.user.type})
application_data = serializer.data
if application.case_type.sub_type == CaseTypeSubTypeEnum.OPEN:
application_data["destinations"] = get_destinations(application.id, user_type=request.user.type)
data = {"application": {"reference_code": application.reference_code, **application_data}}
if application.reference_code:
data["reference_code"] = application.reference_code
return JsonResponse(data=data, status=status.HTTP_200_OK)
class ApplicationManageStatus(APIView):
authentication_classes = (SharedAuthentication,)
@transaction.atomic
def put(self, request, pk):
application = get_application(pk)
is_licence_application = application.case_type.sub_type != CaseTypeSubTypeEnum.EXHIBITION
data = deepcopy(request.data)
if data["status"] == CaseStatusEnum.FINALISED:
return JsonResponse(
data={"errors": [strings.Applications.Generic.Finalise.Error.SET_FINALISED]},
status=status.HTTP_400_BAD_REQUEST,
)
if not can_set_status(application, data["status"]):
raise ValidationError({"status": [strings.Statuses.BAD_STATUS]})
if hasattr(request.user, "exporteruser"):
if get_request_user_organisation_id(request) != application.organisation.id:
raise PermissionDenied()
if not can_status_be_set_by_exporter_user(application.status.status, data["status"]):
return JsonResponse(
data={"errors": [strings.Applications.Generic.Finalise.Error.EXPORTER_SET_STATUS]},
status=status.HTTP_400_BAD_REQUEST,
)
else:
if not can_status_be_set_by_gov_user(
request.user.govuser, application.status.status, data["status"], is_licence_application
):
return JsonResponse(
data={"errors": [strings.Applications.Generic.Finalise.Error.GOV_SET_STATUS]},
status=status.HTTP_400_BAD_REQUEST,
)
update_licence_status(application, data["status"])
case_status = get_case_status_by_status(data["status"])
data["status"] = str(case_status.pk)
old_status = application.status
serializer = get_application_update_serializer(application)
serializer = serializer(application, data=data, partial=True)
if not serializer.is_valid():
return JsonResponse(data={"errors": serializer.errors}, status=status.HTTP_400_BAD_REQUEST)
application = serializer.save()
if CaseStatusEnum.is_terminal(old_status.status) and not CaseStatusEnum.is_terminal(application.status.status):
# we reapply flagging rules if the status is reopened from a terminal state
apply_flagging_rules_to_case(application)
audit_trail_service.create(
actor=request.user,
verb=AuditType.UPDATED_STATUS,
target=application.get_case(),
payload={
"status": {
"new": CaseStatusEnum.get_text(case_status.status),
"old": CaseStatusEnum.get_text(old_status.status),
},
"additional_text": data.get("note"),
},
)
# Case routing rules
if old_status != application.status:
run_routing_rules(case=application, keep_status=True)
data = get_application_view_serializer(application)(application, context={"user_type": request.user.type}).data
if application.case_type.sub_type == CaseTypeSubTypeEnum.OPEN:
data["destinations"] = get_destinations(application.id, user_type=request.user.type)
return JsonResponse(data={"data": data}, status=status.HTTP_200_OK,)
class ApplicationFinaliseView(APIView):
authentication_classes = (GovAuthentication,)
def get(self, request, pk):
"""
Get goods to set licenced quantity for, with advice
"""
approved_goods_on_application = (
GoodOnApplication.objects.filter(
application_id=pk,
good__advice__level=AdviceLevel.FINAL,
good__advice__type__in=[AdviceType.APPROVE, AdviceType.PROVISO, AdviceType.NO_LICENCE_REQUIRED],
good__advice__case_id=pk,
good__advice__good_id__isnull=False,
)
.annotate(
advice_type=F("good__advice__type"),
advice_text=F("good__advice__text"),
advice_proviso=F("good__advice__proviso"),
)
.distinct()
)
good_on_applications_with_advice = [
{
"id": str(goa.id),
"good": GoodCreateSerializer(goa.good).data,
"unit": goa.unit,
"quantity": goa.quantity,
"control_list_entries": [
{"rating": item.rating, "text": item.text} for item in goa.control_list_entries.all()
],
"is_good_controlled": goa.is_good_controlled,
"value": goa.value,
"advice": {
"type": AdviceType.as_representation(goa.advice_type),
"text": goa.advice_text,
"proviso": goa.advice_proviso,
},
}
for goa in approved_goods_on_application
]
return JsonResponse({"goods": good_on_applications_with_advice})
@transaction.atomic # noqa
def put(self, request, pk):
"""
Finalise an application
"""
application = get_application(pk)
# Check permissions
is_mod_clearance = application.case_type.sub_type in CaseTypeSubTypeEnum.mod
if not can_status_be_set_by_gov_user(
request.user.govuser, application.status.status, CaseStatusEnum.FINALISED, is_mod_clearance
):
return JsonResponse(
data={"errors": [strings.Applications.Generic.Finalise.Error.SET_FINALISED]},
status=status.HTTP_400_BAD_REQUEST,
)
licence_data = request.data.copy()
action = licence_data.get("action")
if not action:
return JsonResponse(
data={"errors": [strings.Applications.Finalise.Error.NO_ACTION_GIVEN]},
status=status.HTTP_400_BAD_REQUEST,
)
# Check if any blocking flags are on the case
blocking_flags = (
get_flags(application.get_case())
.filter(status=FlagStatuses.ACTIVE, blocks_finalising=True)
.order_by("name")
.values_list("name", flat=True)
)
if blocking_flags:
raise PermissionDenied(
[f"{strings.Applications.Finalise.Error.BLOCKING_FLAGS}{','.join(list(blocking_flags))}"]
)
# Refusals & NLRs
if action in [AdviceType.REFUSE, AdviceType.NO_LICENCE_REQUIRED]:
return JsonResponse(data={"application": str(application.id)}, status=status.HTTP_200_OK)
# Approvals & Provisos
else:
try:
active_licence = Licence.objects.get_active_licence(application)
default_licence_duration = active_licence.duration
except Licence.DoesNotExist:
default_licence_duration = get_default_duration(application)
licence_data["duration"] = licence_data.get("duration", default_licence_duration)
# Check change default duration permission
if licence_data["duration"] != default_licence_duration and not request.user.govuser.has_permission(
GovPermissions.MANAGE_LICENCE_DURATION
):
raise PermissionDenied([strings.Applications.Finalise.Error.SET_DURATION_PERMISSION])
# Validate date
try:
start_date = timezone.datetime(
year=int(licence_data["year"]), month=int(licence_data["month"]), day=int(licence_data["day"])
)
except (KeyError, ValueError):
raise ParseError({"start_date": [strings.Applications.Finalise.Error.INVALID_DATE]})
# Delete existing draft if one exists
try:
licence = Licence.objects.get_draft_licence(application)
except Licence.DoesNotExist:
licence = None
licence_data["start_date"] = start_date.strftime("%Y-%m-%d")
if licence:
# Update Draft Licence object
licence_serializer = LicenceCreateSerializer(instance=licence, data=licence_data, partial=True)
else:
# Create Draft Licence object
licence_data["case"] = application.id
licence_data["status"] = LicenceStatus.DRAFT
licence_data["reference_code"] = get_licence_reference_code(application.reference_code)
licence_serializer = LicenceCreateSerializer(data=licence_data)
if not licence_serializer.is_valid():
raise ParseError(licence_serializer.errors)
licence = licence_serializer.save()
# Delete draft licence document that may now be invalid
GeneratedCaseDocument.objects.filter(
case_id=pk, advice_type=AdviceType.APPROVE, visible_to_exporter=False
).delete()
# Only validate & save GoodsOnLicence (quantities & values) for Standard applications
if application.case_type.sub_type == CaseTypeSubTypeEnum.STANDARD:
errors = validate_and_create_goods_on_licence(pk, licence.id, request.data)
if errors:
raise ParseError(errors)
return JsonResponse(data=LicenceCreateSerializer(licence).data, status=status.HTTP_200_OK)
class ApplicationDurationView(APIView):
authentication_classes = (GovAuthentication,)
def get(self, request, pk):
"""
Retrieve default duration for an application.
"""
application = get_application(pk)
duration = get_default_duration(application)
return JsonResponse(data={"licence_duration": duration}, status=status.HTTP_200_OK)
class ApplicationCopy(APIView):
authentication_classes = (ExporterAuthentication,)
@transaction.atomic
def post(self, request, pk):
"""
Copy an application
In this function we get the application and remove it's relation to itself on the database, which allows for us
keep most of the data in relation to the application intact.
"""
self.old_application_id = pk
old_application = get_application(pk)
data = request.data
serializer = GenericApplicationCopySerializer(
data=data, context={"application_type": old_application.case_type}
)
if not serializer.is_valid():
return JsonResponse(data={"errors": serializer.errors}, status=status.HTTP_400_BAD_REQUEST)
# Deepcopy so new_application is not a pointer to old_application
# (if not deepcopied, any changes done on one applies to the other)
self.new_application = deepcopy(old_application)
if self.new_application.case_type.sub_type == CaseTypeSubTypeEnum.F680:
for field in constants.F680.ADDITIONAL_INFORMATION_FIELDS:
setattr(self.new_application, field, None)
# Clear references to parent objects, and current application instance object
self.strip_id_for_application_copy()
# Replace the reference and have you been informed (if required) with users answer. Also sets some defaults
self.new_application.name = request.data["name"]
if (
self.new_application.case_type.sub_type == CaseTypeSubTypeEnum.STANDARD
and not self.new_application.case_type.id == CaseTypeEnum.SICL.id
):
self.new_application.have_you_been_informed = request.data.get("have_you_been_informed")
self.new_application.reference_number_on_information_form = request.data.get(
"reference_number_on_information_form"
)
self.new_application.status = get_case_status_by_status(CaseStatusEnum.DRAFT)
self.new_application.copy_of_id = self.old_application_id
# Remove SLA data
self.new_application.sla_days = 0
self.new_application.sla_remaining_days = get_application_target_sla(self.new_application.case_type.sub_type)
self.new_application.last_closed_at = None
self.new_application.sla_updated_at = None
# Remove data that should not be copied
self.remove_data_from_application_copy()
# Need to save here to create the pk/id for relationships
self.new_application.save()
# Create new foreign key connection using data from old application (this is for tables pointing to the case)
self.create_foreign_relations_for_new_application()
self.duplicate_goodstypes_for_new_application()
# Get all parties connected to the application and produce a copy (and replace reference for each one)
self.duplicate_parties_on_new_application()
# Get all f680 clearance types
self.duplicate_f680_clearance_types()
# Remove usage & licenced quantity/ value
self.new_application.goods_type.update(usage=0)
# Save
self.new_application.created_at = now()
self.new_application.save()
return JsonResponse(data={"data": self.new_application.id}, status=status.HTTP_201_CREATED)
def strip_id_for_application_copy(self):
"""
The current object id and pk need removed, and the pointers otherwise save() will determine the object exists
"""
self.new_application.pk = None
self.new_application.id = None
self.new_application.case_ptr = None
self.new_application.base_application_ptr = None
def remove_data_from_application_copy(self):
"""
Removes data of fields that are stored on the case model, and we wish not to copy.
"""
set_none = [
"case_officer",
"reference_code",
"submitted_at",
"licence_duration",
"is_informed_wmd",
"informed_wmd_ref",
"is_suspected_wmd",
"suspected_wmd_ref",
"is_military_end_use_controls",
"military_end_use_controls_ref",
"is_eu_military",
"is_compliant_limitations_eu",
"compliant_limitations_eu_ref",
"is_shipped_waybill_or_lading",
"non_waybill_or_lading_route_details",
"intended_end_use",
"temp_export_details",
"is_temp_direct_control",
"temp_direct_control_details",
"proposed_return_date",
]
for attribute in set_none:
setattr(self.new_application, attribute, None)
def duplicate_parties_on_new_application(self):
"""
Generates a copy of each party, and recreates any old application Party relations using the new copied party.
Deleted parties are not copied over.
"""
party_on_old_application = PartyOnApplication.objects.filter(
application_id=self.old_application_id, deleted_at__isnull=True
)
for old_party_on_app in party_on_old_application:
old_party_on_app.pk = None
old_party_on_app.id = None
# copy party
old_party_id = old_party_on_app.party.id
party = old_party_on_app.party
party.id = None
party.pk = None
if not party.copy_of:
party.copy_of_id = old_party_id
party.created_at = now()
party.save()
old_party_on_app.party = party
old_party_on_app.application = self.new_application
old_party_on_app.created_at = now()
old_party_on_app.save()
def create_foreign_relations_for_new_application(self):
"""
Recreates any connections from foreign tables existing on the current application,
we wish to move to the new application.
"""
# This is the super set of all many to many related objects for ALL application types.
# The loop below caters for the possibility that any of the relationships are not relevant to the current
# application type
relationships = [
GoodOnApplication,
SiteOnApplication,
CountryOnApplication,
ExternalLocationOnApplication,
]
for relation in relationships:
old_application_relation_results = relation.objects.filter(application_id=self.old_application_id).all()
for result in old_application_relation_results:
result.pk = None
result.id = None
result.application = self.new_application
# Some models listed above are not inheriting timestampable models,
# as such we need to ensure created_at exists
if getattr(result, "created_at", False):
result.created_at = now()
result.save()
if relation == CountryOnApplication:
if result.contract_types:
result.flags.set(
[
Flag.objects.get(name=ContractType.get_flag_name(contract_type))
for contract_type in result.contract_types
]
)
def duplicate_goodstypes_for_new_application(self):
"""
Creates a duplicate GoodsType and attaches it to the new application if applicable.
"""
# GoodsType has more logic than in "create_foreign_relations_for_new_application",
# such as listing the countries on the goodstype, and flags as such it is seperated.
for good in GoodsType.objects.filter(application_id=self.old_application_id).all():
old_good_countries = list(good.countries.all())
old_good_flags = list(good.flags.all())
old_good_control_list_entries = list(good.control_list_entries.all())
good.pk = None
good.id = None
good.application = self.new_application
good.created_at = now()
good.save()
good.countries.set(old_good_countries)
good.flags.set(old_good_flags)
good.control_list_entries.set(old_good_control_list_entries)
def duplicate_f680_clearance_types(self):
if self.new_application.case_type.sub_type == CaseTypeSubTypeEnum.F680:
self.new_application.types.set(
list(
F680ClearanceApplication.objects.get(id=self.old_application_id).types.values_list("id", flat=True)
)
)
class ExhibitionDetails(ListCreateAPIView):
authentication_classes = (ExporterAuthentication,)
queryset = BaseApplication.objects.all()
serializer = ExhibitionClearanceDetailSerializer
@application_in_state(is_major_editable=True)
@authorised_to_view_application(ExporterUser)
def post(self, request, pk):
application = get_application(pk)
serializer = self.serializer(instance=application, data=request.data)
if serializer.is_valid():
old_title = application.title
old_first_exhibition_date = application.first_exhibition_date
old_required_by_date = application.required_by_date
old_reason_for_clearance = application.reason_for_clearance
case = application.get_case()
serializer.save()
validated_data = serializer.validated_data
if validated_data["title"] != old_title:
audit_trail_service.create(
actor=request.user,
verb=AuditType.UPDATED_EXHIBITION_DETAILS_TITLE,
target=case,
payload={"old_title": old_title, "new_title": validated_data["title"],},
)
if validated_data["first_exhibition_date"] != old_first_exhibition_date:
audit_trail_service.create(
actor=request.user,
verb=AuditType.UPDATED_EXHIBITION_DETAILS_START_DATE,
target=application.get_case(),
payload={
"old_first_exhibition_date": convert_date_to_string(old_first_exhibition_date)
if old_first_exhibition_date
else "",
"new_first_exhibition_date": convert_date_to_string(validated_data["first_exhibition_date"]),
},
)
if validated_data["required_by_date"] != old_required_by_date:
audit_trail_service.create(
actor=request.user,
verb=AuditType.UPDATED_EXHIBITION_DETAILS_REQUIRED_BY_DATE,
target=application.get_case(),
payload={
"old_required_by_date": convert_date_to_string(old_required_by_date)
if old_required_by_date
else "",
"new_required_by_date": convert_date_to_string(validated_data["required_by_date"]),
},
)
if validated_data.get("reason_for_clearance") != old_reason_for_clearance:
audit_trail_service.create(
actor=request.user,
verb=AuditType.UPDATED_EXHIBITION_DETAILS_REASON_FOR_CLEARANCE,
target=application.get_case(),
payload={
"old_reason_for_clearance": old_reason_for_clearance,
"new_reason_for_clearance": validated_data["reason_for_clearance"],
},
)
return JsonResponse(data={"application": serializer.data}, status=status.HTTP_200_OK)
return JsonResponse(data={"errors": serializer.errors}, status=status.HTTP_400_BAD_REQUEST)
class ApplicationRouteOfGoods(UpdateAPIView):
authentication_classes = (ExporterAuthentication,)
@authorised_to_view_application(ExporterUser)
@application_in_state(is_major_editable=True)
@allowed_application_types([CaseTypeSubTypeEnum.OPEN, CaseTypeSubTypeEnum.STANDARD])
def put(self, request, pk):
""" Update an application instance with route of goods data. """
application = get_application(pk)
serializer = get_application_update_serializer(application)
case = application.get_case()
data = request.data.copy()
serializer = serializer(application, data=data, context=get_request_user_organisation(request), partial=True)
if not serializer.is_valid():
return JsonResponse(data={"errors": serializer.errors}, status=status.HTTP_400_BAD_REQUEST)
previous_answer = application.is_shipped_waybill_or_lading
new_answer = str_to_bool(data.get("is_shipped_waybill_or_lading"))
if previous_answer != new_answer:
self.add_audit_entry(request, case, "is shipped waybill or lading", previous_answer, new_answer)
if not new_answer:
previous_details = application.non_waybill_or_lading_route_details
new_details = data.get("non_waybill_or_lading_route_details")
if previous_details != new_details:
self.add_audit_entry(
request, case, "non_waybill_or_lading_route_details", previous_details, new_details
)
serializer.save()
return JsonResponse(data={}, status=status.HTTP_200_OK)
@staticmethod
def add_audit_entry(request, case, field, previous_value, new_value):
audit_trail_service.create(
actor=request.user,
verb=AuditType.UPDATED_ROUTE_OF_GOODS,
target=case,
payload={"route_of_goods_field": field, "previous_value": previous_value, "new_value": new_value},
)
| [
"api.applications.models.F680ClearanceApplication.objects.get",
"api.organisations.libraries.get_organisation.get_request_user_organisation",
"api.goods.serializers.GoodCreateSerializer",
"api.cases.enums.CaseTypeEnum.reference_to_id",
"api.applications.libraries.case_status_helpers.submit_application",
"... | [((7945, 7989), 'api.core.decorators.authorised_to_view_application', 'authorised_to_view_application', (['ExporterUser'], {}), '(ExporterUser)\n', (7975, 7989), False, 'from api.core.decorators import application_in_state, authorised_to_view_application, allowed_application_types\n'), ((8727, 8771), 'api.core.decorators.authorised_to_view_application', 'authorised_to_view_application', (['ExporterUser'], {}), '(ExporterUser)\n', (8757, 8771), False, 'from api.core.decorators import application_in_state, authorised_to_view_application, allowed_application_types\n'), ((8777, 8815), 'api.core.decorators.application_in_state', 'application_in_state', ([], {'is_editable': '(True)'}), '(is_editable=True)\n', (8797, 8815), False, 'from api.core.decorators import application_in_state, authorised_to_view_application, allowed_application_types\n'), ((12445, 12489), 'api.core.decorators.authorised_to_view_application', 'authorised_to_view_application', (['ExporterUser'], {}), '(ExporterUser)\n', (12475, 12489), False, 'from api.core.decorators import application_in_state, authorised_to_view_application, allowed_application_types\n'), ((13234, 13278), 'api.core.decorators.application_in_state', 'application_in_state', ([], {'is_major_editable': '(True)'}), '(is_major_editable=True)\n', (13254, 13278), False, 'from api.core.decorators import application_in_state, authorised_to_view_application, allowed_application_types\n'), ((13284, 13328), 'api.core.decorators.authorised_to_view_application', 'authorised_to_view_application', (['ExporterUser'], {}), '(ExporterUser)\n', (13314, 13328), False, 'from api.core.decorators import application_in_state, authorised_to_view_application, allowed_application_types\n'), ((37782, 37811), 'api.applications.models.BaseApplication.objects.all', 'BaseApplication.objects.all', ([], {}), '()\n', (37809, 37811), False, 'from api.applications.models import BaseApplication, HmrcQuery, SiteOnApplication, GoodOnApplication, CountryOnApplication, ExternalLocationOnApplication, PartyOnApplication, F680ClearanceApplication\n'), ((37871, 37915), 'api.core.decorators.application_in_state', 'application_in_state', ([], {'is_major_editable': '(True)'}), '(is_major_editable=True)\n', (37891, 37915), False, 'from api.core.decorators import application_in_state, authorised_to_view_application, allowed_application_types\n'), ((37921, 37965), 'api.core.decorators.authorised_to_view_application', 'authorised_to_view_application', (['ExporterUser'], {}), '(ExporterUser)\n', (37951, 37965), False, 'from api.core.decorators import application_in_state, authorised_to_view_application, allowed_application_types\n'), ((41057, 41101), 'api.core.decorators.authorised_to_view_application', 'authorised_to_view_application', (['ExporterUser'], {}), '(ExporterUser)\n', (41087, 41101), False, 'from api.core.decorators import application_in_state, authorised_to_view_application, allowed_application_types\n'), ((41107, 41151), 'api.core.decorators.application_in_state', 'application_in_state', ([], {'is_major_editable': '(True)'}), '(is_major_editable=True)\n', (41127, 41151), False, 'from api.core.decorators import application_in_state, authorised_to_view_application, allowed_application_types\n'), ((41157, 41245), 'api.core.decorators.allowed_application_types', 'allowed_application_types', (['[CaseTypeSubTypeEnum.OPEN, CaseTypeSubTypeEnum.STANDARD]'], {}), '([CaseTypeSubTypeEnum.OPEN, CaseTypeSubTypeEnum.\n STANDARD])\n', (41182, 41245), False, 'from api.core.decorators import application_in_state, authorised_to_view_application, allowed_application_types\n'), ((4779, 4822), 'api.organisations.libraries.get_organisation.get_request_user_organisation', 'get_request_user_organisation', (['self.request'], {}), '(self.request)\n', (4808, 4822), False, 'from api.organisations.libraries.get_organisation import get_request_user_organisation, get_request_user_organisation_id\n'), ((6147, 6189), 'api.users.libraries.notifications.get_case_notifications', 'get_case_notifications', (['data', 'self.request'], {}), '(data, self.request)\n', (6169, 6189), False, 'from api.users.libraries.notifications import get_case_notifications\n'), ((6603, 6647), 'api.applications.helpers.get_application_create_serializer', 'get_application_create_serializer', (['case_type'], {}), '(case_type)\n', (6636, 6647), False, 'from api.applications.helpers import get_application_create_serializer, get_application_view_serializer, get_application_update_serializer, validate_and_create_goods_on_licence, auto_match_sanctions\n'), ((7404, 7442), 'api.organisations.libraries.get_organisation.get_request_user_organisation', 'get_request_user_organisation', (['request'], {}), '(request)\n', (7433, 7442), False, 'from api.organisations.libraries.get_organisation import get_request_user_organisation, get_request_user_organisation_id\n'), ((8109, 8128), 'api.applications.libraries.get_applications.get_application', 'get_application', (['pk'], {}), '(pk)\n', (8124, 8128), False, 'from api.applications.libraries.get_applications import get_application\n'), ((8150, 8194), 'api.applications.helpers.get_application_view_serializer', 'get_application_view_serializer', (['application'], {}), '(application)\n', (8181, 8194), False, 'from api.applications.helpers import get_application_create_serializer, get_application_view_serializer, get_application_update_serializer, validate_and_create_goods_on_licence, auto_match_sanctions\n'), ((8670, 8720), 'django.http.JsonResponse', 'JsonResponse', ([], {'data': 'data', 'status': 'status.HTTP_200_OK'}), '(data=data, status=status.HTTP_200_OK)\n', (8682, 8720), False, 'from django.http import JsonResponse\n'), ((8933, 8952), 'api.applications.libraries.get_applications.get_application', 'get_application', (['pk'], {}), '(pk)\n', (8948, 8952), False, 'from api.applications.libraries.get_applications import get_application\n'), ((8974, 9020), 'api.applications.helpers.get_application_update_serializer', 'get_application_update_serializer', (['application'], {}), '(application)\n', (9007, 9020), False, 'from api.applications.helpers import get_application_create_serializer, get_application_view_serializer, get_application_update_serializer, validate_and_create_goods_on_licence, auto_match_sanctions\n'), ((12390, 12438), 'django.http.JsonResponse', 'JsonResponse', ([], {'data': '{}', 'status': 'status.HTTP_200_OK'}), '(data={}, status=status.HTTP_200_OK)\n', (12402, 12438), False, 'from django.http import JsonResponse\n'), ((12649, 12668), 'api.applications.libraries.get_applications.get_application', 'get_application', (['pk'], {}), '(pk)\n', (12664, 12668), False, 'from api.applications.libraries.get_applications import get_application\n'), ((12975, 13091), 'django.http.JsonResponse', 'JsonResponse', ([], {'data': "{'status': strings.Applications.Generic.DELETE_DRAFT_APPLICATION}", 'status': 'status.HTTP_200_OK'}), "(data={'status': strings.Applications.Generic.\n DELETE_DRAFT_APPLICATION}, status=status.HTTP_200_OK)\n", (12987, 13091), False, 'from django.http import JsonResponse\n'), ((13616, 13635), 'api.applications.libraries.get_applications.get_application', 'get_application', (['pk'], {}), '(pk)\n', (13631, 13635), False, 'from api.applications.libraries.get_applications import get_application\n'), ((13943, 13997), 'api.applications.creators.validate_application_ready_for_submission', 'validate_application_ready_for_submission', (['application'], {}), '(application)\n', (13984, 13997), False, 'from api.applications.creators import validate_application_ready_for_submission, _validate_agree_to_declaration\n'), ((18369, 18413), 'api.applications.helpers.get_application_view_serializer', 'get_application_view_serializer', (['application'], {}), '(application)\n', (18400, 18413), False, 'from api.applications.helpers import get_application_create_serializer, get_application_view_serializer, get_application_update_serializer, validate_and_create_goods_on_licence, auto_match_sanctions\n'), ((18946, 18996), 'django.http.JsonResponse', 'JsonResponse', ([], {'data': 'data', 'status': 'status.HTTP_200_OK'}), '(data=data, status=status.HTTP_200_OK)\n', (18958, 18996), False, 'from django.http import JsonResponse\n'), ((19171, 19190), 'api.applications.libraries.get_applications.get_application', 'get_application', (['pk'], {}), '(pk)\n', (19186, 19190), False, 'from api.applications.libraries.get_applications import get_application\n'), ((19305, 19327), 'copy.deepcopy', 'deepcopy', (['request.data'], {}), '(request.data)\n', (19313, 19327), False, 'from copy import deepcopy\n'), ((20612, 20662), 'api.licences.helpers.update_licence_status', 'update_licence_status', (['application', "data['status']"], {}), "(application, data['status'])\n", (20633, 20662), False, 'from api.licences.helpers import get_licence_reference_code, update_licence_status\n'), ((20686, 20727), 'api.staticdata.statuses.libraries.get_case_status.get_case_status_by_status', 'get_case_status_by_status', (["data['status']"], {}), "(data['status'])\n", (20711, 20727), False, 'from api.staticdata.statuses.libraries.get_case_status import get_case_status_by_status\n'), ((20835, 20881), 'api.applications.helpers.get_application_update_serializer', 'get_application_update_serializer', (['application'], {}), '(application)\n', (20868, 20881), False, 'from api.applications.helpers import get_application_create_serializer, get_application_view_serializer, get_application_update_serializer, validate_and_create_goods_on_licence, auto_match_sanctions\n'), ((22291, 22351), 'django.http.JsonResponse', 'JsonResponse', ([], {'data': "{'data': data}", 'status': 'status.HTTP_200_OK'}), "(data={'data': data}, status=status.HTTP_200_OK)\n", (22303, 22351), False, 'from django.http import JsonResponse\n'), ((24006, 24063), 'django.http.JsonResponse', 'JsonResponse', (["{'goods': good_on_applications_with_advice}"], {}), "({'goods': good_on_applications_with_advice})\n", (24018, 24063), False, 'from django.http import JsonResponse\n'), ((24207, 24226), 'api.applications.libraries.get_applications.get_application', 'get_application', (['pk'], {}), '(pk)\n', (24222, 24226), False, 'from api.applications.libraries.get_applications import get_application\n'), ((28692, 28711), 'api.applications.libraries.get_applications.get_application', 'get_application', (['pk'], {}), '(pk)\n', (28707, 28711), False, 'from api.applications.libraries.get_applications import get_application\n'), ((28732, 28765), 'api.applications.libraries.licence.get_default_duration', 'get_default_duration', (['application'], {}), '(application)\n', (28752, 28765), False, 'from api.applications.libraries.licence import get_default_duration\n'), ((28782, 28858), 'django.http.JsonResponse', 'JsonResponse', ([], {'data': "{'licence_duration': duration}", 'status': 'status.HTTP_200_OK'}), "(data={'licence_duration': duration}, status=status.HTTP_200_OK)\n", (28794, 28858), False, 'from django.http import JsonResponse\n'), ((29310, 29329), 'api.applications.libraries.get_applications.get_application', 'get_application', (['pk'], {}), '(pk)\n', (29325, 29329), False, 'from api.applications.libraries.get_applications import get_application\n'), ((29381, 29485), 'api.applications.serializers.generic_application.GenericApplicationCopySerializer', 'GenericApplicationCopySerializer', ([], {'data': 'data', 'context': "{'application_type': old_application.case_type}"}), "(data=data, context={'application_type':\n old_application.case_type})\n", (29413, 29485), False, 'from api.applications.serializers.generic_application import GenericApplicationListSerializer, GenericApplicationCopySerializer\n'), ((29829, 29854), 'copy.deepcopy', 'deepcopy', (['old_application'], {}), '(old_application)\n', (29837, 29854), False, 'from copy import deepcopy\n'), ((30856, 30903), 'api.staticdata.statuses.libraries.get_case_status.get_case_status_by_status', 'get_case_status_by_status', (['CaseStatusEnum.DRAFT'], {}), '(CaseStatusEnum.DRAFT)\n', (30881, 30903), False, 'from api.staticdata.statuses.libraries.get_case_status import get_case_status_by_status\n'), ((31089, 31156), 'api.cases.tasks.get_application_target_sla', 'get_application_target_sla', (['self.new_application.case_type.sub_type'], {}), '(self.new_application.case_type.sub_type)\n', (31115, 31156), False, 'from api.cases.tasks import get_application_target_sla\n'), ((32110, 32115), 'django.utils.timezone.now', 'now', ([], {}), '()\n', (32113, 32115), False, 'from django.utils.timezone import now\n'), ((32167, 32256), 'django.http.JsonResponse', 'JsonResponse', ([], {'data': "{'data': self.new_application.id}", 'status': 'status.HTTP_201_CREATED'}), "(data={'data': self.new_application.id}, status=status.\n HTTP_201_CREATED)\n", (32179, 32256), False, 'from django.http import JsonResponse\n'), ((33916, 34018), 'api.applications.models.PartyOnApplication.objects.filter', 'PartyOnApplication.objects.filter', ([], {'application_id': 'self.old_application_id', 'deleted_at__isnull': '(True)'}), '(application_id=self.old_application_id,\n deleted_at__isnull=True)\n', (33949, 34018), False, 'from api.applications.models import BaseApplication, HmrcQuery, SiteOnApplication, GoodOnApplication, CountryOnApplication, ExternalLocationOnApplication, PartyOnApplication, F680ClearanceApplication\n'), ((38021, 38040), 'api.applications.libraries.get_applications.get_application', 'get_application', (['pk'], {}), '(pk)\n', (38036, 38040), False, 'from api.applications.libraries.get_applications import get_application\n'), ((40863, 40952), 'django.http.JsonResponse', 'JsonResponse', ([], {'data': "{'errors': serializer.errors}", 'status': 'status.HTTP_400_BAD_REQUEST'}), "(data={'errors': serializer.errors}, status=status.\n HTTP_400_BAD_REQUEST)\n", (40875, 40952), False, 'from django.http import JsonResponse\n'), ((41369, 41388), 'api.applications.libraries.get_applications.get_application', 'get_application', (['pk'], {}), '(pk)\n', (41384, 41388), False, 'from api.applications.libraries.get_applications import get_application\n'), ((41410, 41456), 'api.applications.helpers.get_application_update_serializer', 'get_application_update_serializer', (['application'], {}), '(application)\n', (41443, 41456), False, 'from api.applications.helpers import get_application_create_serializer, get_application_view_serializer, get_application_update_serializer, validate_and_create_goods_on_licence, auto_match_sanctions\n'), ((42518, 42566), 'django.http.JsonResponse', 'JsonResponse', ([], {'data': '{}', 'status': 'status.HTTP_200_OK'}), '(data={}, status=status.HTTP_200_OK)\n', (42530, 42566), False, 'from django.http import JsonResponse\n'), ((42668, 42874), 'api.audit_trail.service.create', 'audit_trail_service.create', ([], {'actor': 'request.user', 'verb': 'AuditType.UPDATED_ROUTE_OF_GOODS', 'target': 'case', 'payload': "{'route_of_goods_field': field, 'previous_value': previous_value,\n 'new_value': new_value}"}), "(actor=request.user, verb=AuditType.\n UPDATED_ROUTE_OF_GOODS, target=case, payload={'route_of_goods_field':\n field, 'previous_value': previous_value, 'new_value': new_value})\n", (42694, 42874), True, 'from api.audit_trail import service as audit_trail_service\n'), ((5591, 5682), 'api.organisations.models.Site.objects.get_by_user_and_organisation', 'Site.objects.get_by_user_and_organisation', (['self.request.user.exporteruser', 'organisation'], {}), '(self.request.user.exporteruser,\n organisation)\n', (5632, 5682), False, 'from api.organisations.models import Site\n'), ((6430, 6531), 'rest_framework.exceptions.ValidationError', 'ValidationError', (["{'application_type': [strings.Applications.Generic.SELECT_AN_APPLICATION_TYPE]}"], {}), "({'application_type': [strings.Applications.Generic.\n SELECT_AN_APPLICATION_TYPE]})\n", (6445, 6531), False, 'from rest_framework.exceptions import PermissionDenied, ValidationError, ParseError\n'), ((6957, 7030), 'django.http.JsonResponse', 'JsonResponse', ([], {'data': "{'id': application.id}", 'status': 'status.HTTP_201_CREATED'}), "(data={'id': application.id}, status=status.HTTP_201_CREATED)\n", (6969, 7030), False, 'from django.http import JsonResponse\n'), ((7597, 7640), 'django.http.JsonResponse', 'JsonResponse', ([], {'data': "{'queries': has_queries}"}), "(data={'queries': has_queries})\n", (7609, 7640), False, 'from django.http import JsonResponse\n'), ((7772, 7825), 'django.http.JsonResponse', 'JsonResponse', ([], {'data': "{'applications': has_applications}"}), "(data={'applications': has_applications})\n", (7784, 7825), False, 'from django.http import JsonResponse\n'), ((8592, 8653), 'api.cases.service.get_destinations', 'get_destinations', (['application.id'], {'user_type': 'request.user.type'}), '(application.id, user_type=request.user.type)\n', (8608, 8653), False, 'from api.cases.service import get_destinations\n'), ((9373, 9523), 'django.http.JsonResponse', 'JsonResponse', ([], {'data': "{'errors': {'clearance_level': [strings.Applications.Generic.\n NOT_POSSIBLE_ON_MINOR_EDIT]}}", 'status': 'status.HTTP_400_BAD_REQUEST'}), "(data={'errors': {'clearance_level': [strings.Applications.\n Generic.NOT_POSSIBLE_ON_MINOR_EDIT]}}, status=status.HTTP_400_BAD_REQUEST)\n", (9385, 9523), False, 'from django.http import JsonResponse\n'), ((9722, 9862), 'django.http.JsonResponse', 'JsonResponse', ([], {'data': "{'errors': {'types': [strings.Applications.Generic.NOT_POSSIBLE_ON_MINOR_EDIT]}\n }", 'status': 'status.HTTP_400_BAD_REQUEST'}), "(data={'errors': {'types': [strings.Applications.Generic.\n NOT_POSSIBLE_ON_MINOR_EDIT]}}, status=status.HTTP_400_BAD_REQUEST)\n", (9734, 9862), False, 'from django.http import JsonResponse\n'), ((10144, 10297), 'django.http.JsonResponse', 'JsonResponse', ([], {'data': "{'errors': {'Additional details': [strings.Applications.Generic.\n NOT_POSSIBLE_ON_MINOR_EDIT]}}", 'status': 'status.HTTP_400_BAD_REQUEST'}), "(data={'errors': {'Additional details': [strings.Applications.\n Generic.NOT_POSSIBLE_ON_MINOR_EDIT]}}, status=status.HTTP_400_BAD_REQUEST)\n", (10156, 10297), False, 'from django.http import JsonResponse\n'), ((10398, 10487), 'django.http.JsonResponse', 'JsonResponse', ([], {'data': "{'errors': serializer.errors}", 'status': 'status.HTTP_400_BAD_REQUEST'}), "(data={'errors': serializer.errors}, status=status.\n HTTP_400_BAD_REQUEST)\n", (10410, 10487), False, 'from django.http import JsonResponse\n'), ((10604, 10652), 'django.http.JsonResponse', 'JsonResponse', ([], {'data': '{}', 'status': 'status.HTTP_200_OK'}), '(data={}, status=status.HTTP_200_OK)\n', (10616, 10652), False, 'from django.http import JsonResponse\n'), ((11082, 11130), 'django.http.JsonResponse', 'JsonResponse', ([], {'data': '{}', 'status': 'status.HTTP_200_OK'}), '(data={}, status=status.HTTP_200_OK)\n', (11094, 11130), False, 'from django.http import JsonResponse\n'), ((11229, 11277), 'django.http.JsonResponse', 'JsonResponse', ([], {'data': '{}', 'status': 'status.HTTP_200_OK'}), '(data={}, status=status.HTTP_200_OK)\n', (11241, 11277), False, 'from django.http import JsonResponse\n'), ((12298, 12373), 'api.applications.libraries.edit_applications.save_and_audit_have_you_been_informed_ref', 'save_and_audit_have_you_been_informed_ref', (['request', 'application', 'serializer'], {}), '(request, application, serializer)\n', (12339, 12373), False, 'from api.applications.libraries.edit_applications import save_and_audit_have_you_been_informed_ref, set_case_flags_on_submitted_standard_or_open_application\n'), ((12685, 12732), 'api.staticdata.statuses.libraries.case_status_validate.is_case_status_draft', 'is_case_status_draft', (['application.status.status'], {}), '(application.status.status)\n', (12705, 12732), False, 'from api.staticdata.statuses.libraries.case_status_validate import is_case_status_draft\n'), ((12753, 12888), 'django.http.JsonResponse', 'JsonResponse', ([], {'data': "{'errors': strings.Applications.Generic.DELETE_SUBMITTED_APPLICATION_ERROR}", 'status': 'status.HTTP_400_BAD_REQUEST'}), "(data={'errors': strings.Applications.Generic.\n DELETE_SUBMITTED_APPLICATION_ERROR}, status=status.HTTP_400_BAD_REQUEST)\n", (12765, 12888), False, 'from django.http import JsonResponse\n'), ((13767, 13899), 'api.core.permissions.assert_user_has_permission', 'assert_user_has_permission', (['request.user.exporteruser', 'ExporterPermissions.SUBMIT_LICENCE_APPLICATION', 'application.organisation'], {}), '(request.user.exporteruser, ExporterPermissions.\n SUBMIT_LICENCE_APPLICATION, application.organisation)\n', (13793, 13899), False, 'from api.core.permissions import assert_user_has_permission\n'), ((14036, 14109), 'django.http.JsonResponse', 'JsonResponse', ([], {'data': "{'errors': errors}", 'status': 'status.HTTP_400_BAD_REQUEST'}), "(data={'errors': errors}, status=status.HTTP_400_BAD_REQUEST)\n", (14048, 14109), False, 'from django.http import JsonResponse\n'), ((14662, 14718), 'api.applications.libraries.application_helpers.create_submitted_audit', 'create_submitted_audit', (['request', 'application', 'old_status'], {}), '(request, application, old_status)\n', (14684, 14718), False, 'from api.applications.libraries.application_helpers import optional_str_to_bool, can_status_be_set_by_exporter_user, can_status_be_set_by_gov_user, create_submitted_audit\n'), ((14731, 14762), 'api.applications.libraries.case_status_helpers.submit_application', 'submit_application', (['application'], {}), '(application)\n', (14749, 14762), False, 'from api.applications.libraries.case_status_helpers import submit_application\n'), ((17501, 17534), 'api.applications.helpers.auto_match_sanctions', 'auto_match_sanctions', (['application'], {}), '(application)\n', (17521, 17534), False, 'from api.applications.helpers import get_application_create_serializer, get_application_view_serializer, get_application_update_serializer, validate_and_create_goods_on_licence, auto_match_sanctions\n'), ((18160, 18233), 'api.goodstype.models.GoodsType.objects.filter', 'GoodsType.objects.filter', ([], {'application': 'application', 'countries__isnull': '(True)'}), '(application=application, countries__isnull=True)\n', (18184, 18233), False, 'from api.goodstype.models import GoodsType\n'), ((18664, 18725), 'api.cases.service.get_destinations', 'get_destinations', (['application.id'], {'user_type': 'request.user.type'}), '(application.id, user_type=request.user.type)\n', (18680, 18725), False, 'from api.cases.service import get_destinations\n'), ((19403, 19534), 'django.http.JsonResponse', 'JsonResponse', ([], {'data': "{'errors': [strings.Applications.Generic.Finalise.Error.SET_FINALISED]}", 'status': 'status.HTTP_400_BAD_REQUEST'}), "(data={'errors': [strings.Applications.Generic.Finalise.Error.\n SET_FINALISED]}, status=status.HTTP_400_BAD_REQUEST)\n", (19415, 19534), False, 'from django.http import JsonResponse\n'), ((19593, 19636), 'api.cases.helpers.can_set_status', 'can_set_status', (['application', "data['status']"], {}), "(application, data['status'])\n", (19607, 19636), False, 'from api.cases.helpers import can_set_status\n'), ((19656, 19714), 'rest_framework.exceptions.ValidationError', 'ValidationError', (["{'status': [strings.Statuses.BAD_STATUS]}"], {}), "({'status': [strings.Statuses.BAD_STATUS]})\n", (19671, 19714), False, 'from rest_framework.exceptions import PermissionDenied, ValidationError, ParseError\n'), ((21010, 21099), 'django.http.JsonResponse', 'JsonResponse', ([], {'data': "{'errors': serializer.errors}", 'status': 'status.HTTP_400_BAD_REQUEST'}), "(data={'errors': serializer.errors}, status=status.\n HTTP_400_BAD_REQUEST)\n", (21022, 21099), False, 'from django.http import JsonResponse\n'), ((21148, 21193), 'api.staticdata.statuses.enums.CaseStatusEnum.is_terminal', 'CaseStatusEnum.is_terminal', (['old_status.status'], {}), '(old_status.status)\n', (21174, 21193), False, 'from api.staticdata.statuses.enums import CaseStatusEnum\n'), ((21357, 21398), 'api.workflow.flagging_rules_automation.apply_flagging_rules_to_case', 'apply_flagging_rules_to_case', (['application'], {}), '(application)\n', (21385, 21398), False, 'from api.workflow.flagging_rules_automation import apply_flagging_rules_to_case\n'), ((21931, 21984), 'api.workflow.automation.run_routing_rules', 'run_routing_rules', ([], {'case': 'application', 'keep_status': '(True)'}), '(case=application, keep_status=True)\n', (21948, 21984), False, 'from api.workflow.automation import run_routing_rules\n'), ((22213, 22274), 'api.cases.service.get_destinations', 'get_destinations', (['application.id'], {'user_type': 'request.user.type'}), '(application.id, user_type=request.user.type)\n', (22229, 22274), False, 'from api.cases.service import get_destinations\n'), ((24356, 24483), 'api.applications.libraries.application_helpers.can_status_be_set_by_gov_user', 'can_status_be_set_by_gov_user', (['request.user.govuser', 'application.status.status', 'CaseStatusEnum.FINALISED', 'is_mod_clearance'], {}), '(request.user.govuser, application.status.\n status, CaseStatusEnum.FINALISED, is_mod_clearance)\n', (24385, 24483), False, 'from api.applications.libraries.application_helpers import optional_str_to_bool, can_status_be_set_by_exporter_user, can_status_be_set_by_gov_user, create_submitted_audit\n'), ((24521, 24652), 'django.http.JsonResponse', 'JsonResponse', ([], {'data': "{'errors': [strings.Applications.Generic.Finalise.Error.SET_FINALISED]}", 'status': 'status.HTTP_400_BAD_REQUEST'}), "(data={'errors': [strings.Applications.Generic.Finalise.Error.\n SET_FINALISED]}, status=status.HTTP_400_BAD_REQUEST)\n", (24533, 24652), False, 'from django.http import JsonResponse\n'), ((24826, 24951), 'django.http.JsonResponse', 'JsonResponse', ([], {'data': "{'errors': [strings.Applications.Finalise.Error.NO_ACTION_GIVEN]}", 'status': 'status.HTTP_400_BAD_REQUEST'}), "(data={'errors': [strings.Applications.Finalise.Error.\n NO_ACTION_GIVEN]}, status=status.HTTP_400_BAD_REQUEST)\n", (24838, 24951), False, 'from django.http import JsonResponse\n'), ((29562, 29651), 'django.http.JsonResponse', 'JsonResponse', ([], {'data': "{'errors': serializer.errors}", 'status': 'status.HTTP_400_BAD_REQUEST'}), "(data={'errors': serializer.errors}, status=status.\n HTTP_400_BAD_REQUEST)\n", (29574, 29651), False, 'from django.http import JsonResponse\n'), ((34464, 34469), 'django.utils.timezone.now', 'now', ([], {}), '()\n', (34467, 34469), False, 'from django.utils.timezone import now\n'), ((34645, 34650), 'django.utils.timezone.now', 'now', ([], {}), '()\n', (34648, 34650), False, 'from django.utils.timezone import now\n'), ((37124, 37129), 'django.utils.timezone.now', 'now', ([], {}), '()\n', (37127, 37129), False, 'from django.utils.timezone import now\n'), ((40768, 40846), 'django.http.JsonResponse', 'JsonResponse', ([], {'data': "{'application': serializer.data}", 'status': 'status.HTTP_200_OK'}), "(data={'application': serializer.data}, status=status.HTTP_200_OK)\n", (40780, 40846), False, 'from django.http import JsonResponse\n'), ((41706, 41795), 'django.http.JsonResponse', 'JsonResponse', ([], {'data': "{'errors': serializer.errors}", 'status': 'status.HTTP_400_BAD_REQUEST'}), "(data={'errors': serializer.errors}, status=status.\n HTTP_400_BAD_REQUEST)\n", (41718, 41795), False, 'from django.http import JsonResponse\n'), ((4944, 5000), 'api.applications.models.HmrcQuery.objects.filter', 'HmrcQuery.objects.filter', ([], {'hmrc_organisation': 'organisation'}), '(hmrc_organisation=organisation)\n', (4968, 5000), False, 'from api.applications.models import BaseApplication, HmrcQuery, SiteOnApplication, GoodOnApplication, CountryOnApplication, ExternalLocationOnApplication, PartyOnApplication, F680ClearanceApplication\n'), ((5305, 5362), 'api.applications.models.BaseApplication.objects.filter', 'BaseApplication.objects.filter', ([], {'organisation': 'organisation'}), '(organisation=organisation)\n', (5335, 5362), False, 'from api.applications.models import BaseApplication, HmrcQuery, SiteOnApplication, GoodOnApplication, CountryOnApplication, ExternalLocationOnApplication, PartyOnApplication, F680ClearanceApplication\n'), ((6729, 6768), 'api.cases.enums.CaseTypeEnum.reference_to_id', 'CaseTypeEnum.reference_to_id', (['case_type'], {}), '(case_type)\n', (6757, 6768), False, 'from api.cases.enums import AdviceLevel, AdviceType, CaseTypeSubTypeEnum, CaseTypeEnum\n'), ((6790, 6828), 'api.organisations.libraries.get_organisation.get_request_user_organisation', 'get_request_user_organisation', (['request'], {}), '(request)\n', (6819, 6828), False, 'from api.organisations.libraries.get_organisation import get_request_user_organisation, get_request_user_organisation_id\n'), ((9158, 9196), 'api.organisations.libraries.get_organisation.get_request_user_organisation', 'get_request_user_organisation', (['request'], {}), '(request)\n', (9187, 9196), False, 'from api.organisations.libraries.get_organisation import get_request_user_organisation, get_request_user_organisation_id\n'), ((12109, 12157), 'django.http.JsonResponse', 'JsonResponse', ([], {'data': '{}', 'status': 'status.HTTP_200_OK'}), '(data={}, status=status.HTTP_200_OK)\n', (12121, 12157), False, 'from django.http import JsonResponse\n'), ((17207, 17251), 'uuid.UUID', 'UUID', (['SystemFlags.ENFORCEMENT_CHECK_REQUIRED'], {}), '(SystemFlags.ENFORCEMENT_CHECK_REQUIRED)\n', (17211, 17251), False, 'from uuid import UUID\n'), ((19781, 19822), 'api.organisations.libraries.get_organisation.get_request_user_organisation_id', 'get_request_user_organisation_id', (['request'], {}), '(request)\n', (19813, 19822), False, 'from api.organisations.libraries.get_organisation import get_request_user_organisation, get_request_user_organisation_id\n'), ((19877, 19895), 'rest_framework.exceptions.PermissionDenied', 'PermissionDenied', ([], {}), '()\n', (19893, 19895), False, 'from rest_framework.exceptions import PermissionDenied, ValidationError, ParseError\n'), ((19916, 19993), 'api.applications.libraries.application_helpers.can_status_be_set_by_exporter_user', 'can_status_be_set_by_exporter_user', (['application.status.status', "data['status']"], {}), "(application.status.status, data['status'])\n", (19950, 19993), False, 'from api.applications.libraries.application_helpers import optional_str_to_bool, can_status_be_set_by_exporter_user, can_status_be_set_by_gov_user, create_submitted_audit\n'), ((20018, 20155), 'django.http.JsonResponse', 'JsonResponse', ([], {'data': "{'errors': [strings.Applications.Generic.Finalise.Error.EXPORTER_SET_STATUS]}", 'status': 'status.HTTP_400_BAD_REQUEST'}), "(data={'errors': [strings.Applications.Generic.Finalise.Error.\n EXPORTER_SET_STATUS]}, status=status.HTTP_400_BAD_REQUEST)\n", (20030, 20155), False, 'from django.http import JsonResponse\n'), ((20243, 20366), 'api.applications.libraries.application_helpers.can_status_be_set_by_gov_user', 'can_status_be_set_by_gov_user', (['request.user.govuser', 'application.status.status', "data['status']", 'is_licence_application'], {}), "(request.user.govuser, application.status.\n status, data['status'], is_licence_application)\n", (20272, 20366), False, 'from api.applications.libraries.application_helpers import optional_str_to_bool, can_status_be_set_by_exporter_user, can_status_be_set_by_gov_user, create_submitted_audit\n'), ((20416, 20548), 'django.http.JsonResponse', 'JsonResponse', ([], {'data': "{'errors': [strings.Applications.Generic.Finalise.Error.GOV_SET_STATUS]}", 'status': 'status.HTTP_400_BAD_REQUEST'}), "(data={'errors': [strings.Applications.Generic.Finalise.Error.\n GOV_SET_STATUS]}, status=status.HTTP_400_BAD_REQUEST)\n", (20428, 20548), False, 'from django.http import JsonResponse\n'), ((21202, 21255), 'api.staticdata.statuses.enums.CaseStatusEnum.is_terminal', 'CaseStatusEnum.is_terminal', (['application.status.status'], {}), '(application.status.status)\n', (21228, 21255), False, 'from api.staticdata.statuses.enums import CaseStatusEnum\n'), ((22001, 22045), 'api.applications.helpers.get_application_view_serializer', 'get_application_view_serializer', (['application'], {}), '(application)\n', (22032, 22045), False, 'from api.applications.helpers import get_application_create_serializer, get_application_view_serializer, get_application_update_serializer, validate_and_create_goods_on_licence, auto_match_sanctions\n'), ((25760, 25807), 'api.licences.models.Licence.objects.get_active_licence', 'Licence.objects.get_active_licence', (['application'], {}), '(application)\n', (25794, 25807), False, 'from api.licences.models import Licence\n'), ((26349, 26428), 'rest_framework.exceptions.PermissionDenied', 'PermissionDenied', (['[strings.Applications.Finalise.Error.SET_DURATION_PERMISSION]'], {}), '([strings.Applications.Finalise.Error.SET_DURATION_PERMISSION])\n', (26365, 26428), False, 'from rest_framework.exceptions import PermissionDenied, ValidationError, ParseError\n'), ((26894, 26940), 'api.licences.models.Licence.objects.get_draft_licence', 'Licence.objects.get_draft_licence', (['application'], {}), '(application)\n', (26927, 26940), False, 'from api.licences.models import Licence\n'), ((27195, 27269), 'api.licences.serializers.create_licence.LicenceCreateSerializer', 'LicenceCreateSerializer', ([], {'instance': 'licence', 'data': 'licence_data', 'partial': '(True)'}), '(instance=licence, data=licence_data, partial=True)\n', (27218, 27269), False, 'from api.licences.serializers.create_licence import LicenceCreateSerializer\n'), ((27498, 27552), 'api.licences.helpers.get_licence_reference_code', 'get_licence_reference_code', (['application.reference_code'], {}), '(application.reference_code)\n', (27524, 27552), False, 'from api.licences.helpers import get_licence_reference_code, update_licence_status\n'), ((27590, 27632), 'api.licences.serializers.create_licence.LicenceCreateSerializer', 'LicenceCreateSerializer', ([], {'data': 'licence_data'}), '(data=licence_data)\n', (27613, 27632), False, 'from api.licences.serializers.create_licence import LicenceCreateSerializer\n'), ((27706, 27743), 'rest_framework.exceptions.ParseError', 'ParseError', (['licence_serializer.errors'], {}), '(licence_serializer.errors)\n', (27716, 27743), False, 'from rest_framework.exceptions import PermissionDenied, ValidationError, ParseError\n'), ((28224, 28290), 'api.applications.helpers.validate_and_create_goods_on_licence', 'validate_and_create_goods_on_licence', (['pk', 'licence.id', 'request.data'], {}), '(pk, licence.id, request.data)\n', (28260, 28290), False, 'from api.applications.helpers import get_application_create_serializer, get_application_view_serializer, get_application_update_serializer, validate_and_create_goods_on_licence, auto_match_sanctions\n'), ((36722, 36786), 'api.goodstype.models.GoodsType.objects.filter', 'GoodsType.objects.filter', ([], {'application_id': 'self.old_application_id'}), '(application_id=self.old_application_id)\n', (36746, 36786), False, 'from api.goodstype.models import GoodsType\n'), ((38602, 38791), 'api.audit_trail.service.create', 'audit_trail_service.create', ([], {'actor': 'request.user', 'verb': 'AuditType.UPDATED_EXHIBITION_DETAILS_TITLE', 'target': 'case', 'payload': "{'old_title': old_title, 'new_title': validated_data['title']}"}), "(actor=request.user, verb=AuditType.\n UPDATED_EXHIBITION_DETAILS_TITLE, target=case, payload={'old_title':\n old_title, 'new_title': validated_data['title']})\n", (38628, 38791), True, 'from api.audit_trail import service as audit_trail_service\n'), ((41595, 41633), 'api.organisations.libraries.get_organisation.get_request_user_organisation', 'get_request_user_organisation', (['request'], {}), '(request)\n', (41624, 41633), False, 'from api.organisations.libraries.get_organisation import get_request_user_organisation, get_request_user_organisation_id\n'), ((5060, 5119), 'api.applications.models.HmrcQuery.objects.submitted', 'HmrcQuery.objects.submitted', ([], {'hmrc_organisation': 'organisation'}), '(hmrc_organisation=organisation)\n', (5087, 5119), False, 'from api.applications.models import BaseApplication, HmrcQuery, SiteOnApplication, GoodOnApplication, CountryOnApplication, ExternalLocationOnApplication, PartyOnApplication, F680ClearanceApplication\n'), ((5169, 5225), 'api.applications.models.HmrcQuery.objects.drafts', 'HmrcQuery.objects.drafts', ([], {'hmrc_organisation': 'organisation'}), '(hmrc_organisation=organisation)\n', (5193, 5225), False, 'from api.applications.models import BaseApplication, HmrcQuery, SiteOnApplication, GoodOnApplication, CountryOnApplication, ExternalLocationOnApplication, PartyOnApplication, F680ClearanceApplication\n'), ((5422, 5469), 'api.applications.models.BaseApplication.objects.submitted', 'BaseApplication.objects.submitted', (['organisation'], {}), '(organisation)\n', (5455, 5469), False, 'from api.applications.models import BaseApplication, HmrcQuery, SiteOnApplication, GoodOnApplication, CountryOnApplication, ExternalLocationOnApplication, PartyOnApplication, F680ClearanceApplication\n'), ((5519, 5563), 'api.applications.models.BaseApplication.objects.drafts', 'BaseApplication.objects.drafts', (['organisation'], {}), '(organisation)\n', (5549, 5563), False, 'from api.applications.models import BaseApplication, HmrcQuery, SiteOnApplication, GoodOnApplication, CountryOnApplication, ExternalLocationOnApplication, PartyOnApplication, F680ClearanceApplication\n'), ((5717, 5776), 'api.applications.models.SiteOnApplication.objects.exclude', 'SiteOnApplication.objects.exclude', ([], {'site__id__in': 'users_sites'}), '(site__id__in=users_sites)\n', (5750, 5776), False, 'from api.applications.models import BaseApplication, HmrcQuery, SiteOnApplication, GoodOnApplication, CountryOnApplication, ExternalLocationOnApplication, PartyOnApplication, F680ClearanceApplication\n'), ((7509, 7568), 'api.applications.models.HmrcQuery.objects.submitted', 'HmrcQuery.objects.submitted', ([], {'hmrc_organisation': 'organisation'}), '(hmrc_organisation=organisation)\n', (7536, 7568), False, 'from api.applications.models import BaseApplication, HmrcQuery, SiteOnApplication, GoodOnApplication, CountryOnApplication, ExternalLocationOnApplication, PartyOnApplication, F680ClearanceApplication\n'), ((7686, 7743), 'api.applications.models.BaseApplication.objects.filter', 'BaseApplication.objects.filter', ([], {'organisation': 'organisation'}), '(organisation=organisation)\n', (7716, 7743), False, 'from api.applications.models import BaseApplication, HmrcQuery, SiteOnApplication, GoodOnApplication, CountryOnApplication, ExternalLocationOnApplication, PartyOnApplication, F680ClearanceApplication\n'), ((11464, 11500), 'api.staticdata.f680_clearance_types.enums.F680ClearanceTypeEnum.get_text', 'F680ClearanceTypeEnum.get_text', (['type'], {}), '(type)\n', (11494, 11500), False, 'from api.staticdata.f680_clearance_types.enums import F680ClearanceTypeEnum\n'), ((11609, 11645), 'api.staticdata.f680_clearance_types.enums.F680ClearanceTypeEnum.get_text', 'F680ClearanceTypeEnum.get_text', (['type'], {}), '(type)\n', (11639, 11645), False, 'from api.staticdata.f680_clearance_types.enums import F680ClearanceTypeEnum\n'), ((11793, 11976), 'api.audit_trail.service.create', 'audit_trail_service.create', ([], {'actor': 'request.user', 'verb': 'AuditType.UPDATE_APPLICATION_F680_CLEARANCE_TYPES', 'target': 'case', 'payload': "{'old_types': old_types, 'new_types': new_types}"}), "(actor=request.user, verb=AuditType.\n UPDATE_APPLICATION_F680_CLEARANCE_TYPES, target=case, payload={\n 'old_types': old_types, 'new_types': new_types})\n", (11819, 11976), True, 'from api.audit_trail import service as audit_trail_service\n'), ((15402, 15449), 'api.applications.creators._validate_agree_to_declaration', '_validate_agree_to_declaration', (['request', 'errors'], {}), '(request, errors)\n', (15432, 15449), False, 'from api.applications.creators import validate_application_ready_for_submission, _validate_agree_to_declaration\n'), ((15893, 15924), 'api.applications.libraries.case_status_helpers.submit_application', 'submit_application', (['application'], {}), '(application)\n', (15911, 15924), False, 'from api.applications.libraries.case_status_helpers import submit_application\n'), ((16144, 16197), 'api.applications.libraries.goods_on_applications.add_goods_flags_to_submitted_application', 'add_goods_flags_to_submitted_application', (['application'], {}), '(application)\n', (16184, 16197), False, 'from api.applications.libraries.goods_on_applications import add_goods_flags_to_submitted_application\n'), ((16214, 16255), 'api.workflow.flagging_rules_automation.apply_flagging_rules_to_case', 'apply_flagging_rules_to_case', (['application'], {}), '(application)\n', (16242, 16255), False, 'from api.workflow.flagging_rules_automation import apply_flagging_rules_to_case\n'), ((16272, 16328), 'api.applications.libraries.application_helpers.create_submitted_audit', 'create_submitted_audit', (['request', 'application', 'old_status'], {}), '(request, application, old_status)\n', (16294, 16328), False, 'from api.applications.libraries.application_helpers import optional_str_to_bool, can_status_be_set_by_exporter_user, can_status_be_set_by_gov_user, create_submitted_audit\n'), ((16592, 16622), 'api.workflow.automation.run_routing_rules', 'run_routing_rules', (['application'], {}), '(application)\n', (16609, 16622), False, 'from api.workflow.automation import run_routing_rules\n'), ((16784, 16841), 'api.applications.models.SiteOnApplication.objects.filter', 'SiteOnApplication.objects.filter', ([], {'application': 'application'}), '(application=application)\n', (16816, 16841), False, 'from api.applications.models import BaseApplication, HmrcQuery, SiteOnApplication, GoodOnApplication, CountryOnApplication, ExternalLocationOnApplication, PartyOnApplication, F680ClearanceApplication\n'), ((17871, 17944), 'api.goodstype.models.GoodsType.objects.filter', 'GoodsType.objects.filter', ([], {'application': 'application', 'countries__isnull': '(True)'}), '(application=application, countries__isnull=True)\n', (17895, 17944), False, 'from api.goodstype.models import GoodsType\n'), ((18004, 18064), 'api.applications.models.CountryOnApplication.objects.filter', 'CountryOnApplication.objects.filter', ([], {'application': 'application'}), '(application=application)\n', (18039, 18064), False, 'from api.applications.models import BaseApplication, HmrcQuery, SiteOnApplication, GoodOnApplication, CountryOnApplication, ExternalLocationOnApplication, PartyOnApplication, F680ClearanceApplication\n'), ((23317, 23347), 'api.goods.serializers.GoodCreateSerializer', 'GoodCreateSerializer', (['goa.good'], {}), '(goa.good)\n', (23337, 23347), False, 'from api.goods.serializers import GoodCreateSerializer\n'), ((23751, 23796), 'api.cases.enums.AdviceType.as_representation', 'AdviceType.as_representation', (['goa.advice_type'], {}), '(goa.advice_type)\n', (23779, 23796), False, 'from api.cases.enums import AdviceLevel, AdviceType, CaseTypeSubTypeEnum, CaseTypeEnum\n'), ((25959, 25992), 'api.applications.libraries.licence.get_default_duration', 'get_default_duration', (['application'], {}), '(application)\n', (25979, 25992), False, 'from api.applications.libraries.licence import get_default_duration\n'), ((26721, 26799), 'rest_framework.exceptions.ParseError', 'ParseError', (["{'start_date': [strings.Applications.Finalise.Error.INVALID_DATE]}"], {}), "({'start_date': [strings.Applications.Finalise.Error.INVALID_DATE]})\n", (26731, 26799), False, 'from rest_framework.exceptions import PermissionDenied, ValidationError, ParseError\n'), ((27874, 27986), 'api.cases.generated_documents.models.GeneratedCaseDocument.objects.filter', 'GeneratedCaseDocument.objects.filter', ([], {'case_id': 'pk', 'advice_type': 'AdviceType.APPROVE', 'visible_to_exporter': '(False)'}), '(case_id=pk, advice_type=AdviceType.\n APPROVE, visible_to_exporter=False)\n', (27910, 27986), False, 'from api.cases.generated_documents.models import GeneratedCaseDocument\n'), ((28344, 28362), 'rest_framework.exceptions.ParseError', 'ParseError', (['errors'], {}), '(errors)\n', (28354, 28362), False, 'from rest_framework.exceptions import PermissionDenied, ValidationError, ParseError\n'), ((35909, 35914), 'django.utils.timezone.now', 'now', ([], {}), '()\n', (35912, 35914), False, 'from django.utils.timezone import now\n'), ((8412, 8453), 'api.organisations.libraries.get_organisation.get_request_user_organisation_id', 'get_request_user_organisation_id', (['request'], {}), '(request)\n', (8444, 8453), False, 'from api.organisations.libraries.get_organisation import get_request_user_organisation, get_request_user_organisation_id\n'), ((15504, 15577), 'django.http.JsonResponse', 'JsonResponse', ([], {'data': "{'errors': errors}", 'status': 'status.HTTP_400_BAD_REQUEST'}), "(data={'errors': errors}, status=status.HTTP_400_BAD_REQUEST)\n", (15516, 15577), False, 'from django.http import JsonResponse\n'), ((16057, 16126), 'api.applications.libraries.edit_applications.set_case_flags_on_submitted_standard_or_open_application', 'set_case_flags_on_submitted_standard_or_open_application', (['application'], {}), '(application)\n', (16113, 16126), False, 'from api.applications.libraries.edit_applications import save_and_audit_have_you_been_informed_ref, set_case_flags_on_submitted_standard_or_open_application\n'), ((21631, 21674), 'api.staticdata.statuses.enums.CaseStatusEnum.get_text', 'CaseStatusEnum.get_text', (['case_status.status'], {}), '(case_status.status)\n', (21654, 21674), False, 'from api.staticdata.statuses.enums import CaseStatusEnum\n'), ((21703, 21745), 'api.staticdata.statuses.enums.CaseStatusEnum.get_text', 'CaseStatusEnum.get_text', (['old_status.status'], {}), '(old_status.status)\n', (21726, 21745), False, 'from api.staticdata.statuses.enums import CaseStatusEnum\n'), ((22616, 22879), 'api.applications.models.GoodOnApplication.objects.filter', 'GoodOnApplication.objects.filter', ([], {'application_id': 'pk', 'good__advice__level': 'AdviceLevel.FINAL', 'good__advice__type__in': '[AdviceType.APPROVE, AdviceType.PROVISO, AdviceType.NO_LICENCE_REQUIRED]', 'good__advice__case_id': 'pk', 'good__advice__good_id__isnull': '(False)'}), '(application_id=pk, good__advice__level=\n AdviceLevel.FINAL, good__advice__type__in=[AdviceType.APPROVE,\n AdviceType.PROVISO, AdviceType.NO_LICENCE_REQUIRED],\n good__advice__case_id=pk, good__advice__good_id__isnull=False)\n', (22648, 22879), False, 'from api.applications.models import BaseApplication, HmrcQuery, SiteOnApplication, GoodOnApplication, CountryOnApplication, ExternalLocationOnApplication, PartyOnApplication, F680ClearanceApplication\n'), ((23013, 23036), 'django.db.models.F', 'F', (['"""good__advice__type"""'], {}), "('good__advice__type')\n", (23014, 23036), False, 'from django.db.models import F\n'), ((23066, 23089), 'django.db.models.F', 'F', (['"""good__advice__text"""'], {}), "('good__advice__text')\n", (23067, 23089), False, 'from django.db.models import F\n'), ((23122, 23148), 'django.db.models.F', 'F', (['"""good__advice__proviso"""'], {}), "('good__advice__proviso')\n", (23123, 23148), False, 'from django.db.models import F\n'), ((28401, 28433), 'api.licences.serializers.create_licence.LicenceCreateSerializer', 'LicenceCreateSerializer', (['licence'], {}), '(licence)\n', (28424, 28433), False, 'from api.licences.serializers.create_licence import LicenceCreateSerializer\n'), ((39450, 39513), 'api.core.helpers.convert_date_to_string', 'convert_date_to_string', (["validated_data['first_exhibition_date']"], {}), "(validated_data['first_exhibition_date'])\n", (39472, 39513), False, 'from api.core.helpers import convert_date_to_string, str_to_bool\n'), ((40099, 40157), 'api.core.helpers.convert_date_to_string', 'convert_date_to_string', (["validated_data['required_by_date']"], {}), "(validated_data['required_by_date'])\n", (40121, 40157), False, 'from api.core.helpers import convert_date_to_string, str_to_bool\n'), ((37534, 37598), 'api.applications.models.F680ClearanceApplication.objects.get', 'F680ClearanceApplication.objects.get', ([], {'id': 'self.old_application_id'}), '(id=self.old_application_id)\n', (37570, 37598), False, 'from api.applications.models import BaseApplication, HmrcQuery, SiteOnApplication, GoodOnApplication, CountryOnApplication, ExternalLocationOnApplication, PartyOnApplication, F680ClearanceApplication\n'), ((39261, 39310), 'api.core.helpers.convert_date_to_string', 'convert_date_to_string', (['old_first_exhibition_date'], {}), '(old_first_exhibition_date)\n', (39283, 39310), False, 'from api.core.helpers import convert_date_to_string, str_to_bool\n'), ((39925, 39969), 'api.core.helpers.convert_date_to_string', 'convert_date_to_string', (['old_required_by_date'], {}), '(old_required_by_date)\n', (39947, 39969), False, 'from api.core.helpers import convert_date_to_string, str_to_bool\n'), ((36171, 36212), 'api.applications.enums.ContractType.get_flag_name', 'ContractType.get_flag_name', (['contract_type'], {}), '(contract_type)\n', (36197, 36212), False, 'from api.applications.enums import ContractType\n')] |
from blessings import Terminal
term = Terminal()
class Writer(object):
"""
---------------------------------------------------------------------------
Create an object with a write method that writes to a
specific place on the screen, defined at instantiation.
This is the glue between blessings and progressbar.
---------------------------------------------------------------------------
"""
def __init__(self, location):
"""
-----------------------------------------------------------------------
Input: location - tuple of ints (x, y), the position
of the bar in the terminal
-----------------------------------------------------------------------
"""
self.location = location
def write(self, string):
with term.location(*self.location):
print(string)
| [
"blessings.Terminal"
] | [((39, 49), 'blessings.Terminal', 'Terminal', ([], {}), '()\n', (47, 49), False, 'from blessings import Terminal\n')] |
"""
Segment Info Merging Wrapper Script
- Takes an id mapping across segments as input
- Merges the segment info dataframes into one for the entire dataset
"""
import synaptor as s
import argparse
parser = argparse.ArgumentParser()
# Inputs & Outputs
parser.add_argument("storagestr")
parser.add_argument("hashval", type=int)
parser.add_argument("--aux_storagestr", default=None)
parser.add_argument("--timing_tag", default=None)
parser.add_argument("--szthresh", type=int, default=None)
args = parser.parse_args()
args.storagestr = s.io.parse_storagestr(args.storagestr)
args.aux_storagestr = s.io.parse_storagestr(args.aux_storagestr)
print(vars(args))
s.proc.tasks_w_io.merge_seginfo_task(**vars(args))
| [
"synaptor.io.parse_storagestr",
"argparse.ArgumentParser"
] | [((209, 234), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (232, 234), False, 'import argparse\n'), ((540, 578), 'synaptor.io.parse_storagestr', 's.io.parse_storagestr', (['args.storagestr'], {}), '(args.storagestr)\n', (561, 578), True, 'import synaptor as s\n'), ((601, 643), 'synaptor.io.parse_storagestr', 's.io.parse_storagestr', (['args.aux_storagestr'], {}), '(args.aux_storagestr)\n', (622, 643), True, 'import synaptor as s\n')] |
# Copyright (c) 2021, Intel Corporation
#
# SPDX-License-Identifier: BSD-3-Clause
import subprocess
import os
import time
from . import tools
from syslog import syslog
from util import util
class CommonPlatform:
def __init__(self, configuration):
self.conf = configuration
self.teardown_list = []
self.vlan_if = 'tsn_vlan'
self.talker_ip = '169.254.10.10'
self.listener_ip = '169.254.10.11'
self.experiment_port = 2000
def _log_platform(self):
self._log_interface()
self._log_kernel()
self._log_linux_ptp()
def setup(self):
self._log_platform()
self._enable_interface()
self._enable_interface_optimisations()
self._set_rx_irq_affinity()
self._enable_vlan()
self._set_queuing_discipline()
self._accept_multicast_addr()
self._setup_rx_filters()
self._start_ptp4l()
self._wait_ptp4l_stabilise()
self._configure_utc_offset()
self._start_phc2sys()
# TODO maybe run check_clocks here to ensure everything
# is ok before proceeding?
def teardown(self):
for teardown_step in reversed(self.teardown_list):
teardown_step()
# Validation steps
def _log_interface(self):
self.interface = self._get_configuration_key(
'System Setup', 'TSN Interface')
ethtool_output = tools.EthTool(self.interface).get_driver_info()
bus_info = ethtool_output['bus-info']
controller_name = util.run(['lspci', '-s', bus_info]).stdout
syslog(f'NIC under test: {controller_name} ')
def _log_kernel(self):
output = util.run(['uname', '-a']).stdout
syslog(f'Kernel under test: {output}')
kernel_cmdline = open('/proc/cmdline', 'r').read()
syslog(f'Kernel command line: {kernel_cmdline}')
def _log_linux_ptp(self):
version = util.run(['ptp4l', '-v']).stdout
syslog(f'Linuxptp version: {version}')
# Setup Steps
def _enable_interface(self):
ip_command = tools.IP(self.interface)
tools.EthTool(self.interface)
(self.mac, state) = ip_command.get_interface_info()
if state == 'DOWN':
self.teardown_list.append(ip_command.set_interface_up())
def _enable_interface_optimisations(self):
raise NotImplementedError('Must implement _enable_interface_optimisations()')
def _set_rx_irq_affinity(self):
mode = self._get_configuration_key('General Setup', 'Mode')
if mode.lower() != 'listener':
return
hw_queue = self._get_configuration_key('Listener Setup',
'TSN Hardware Queue')
irq_smp_affinity_mask = self._get_configuration_key('Listener Setup',
'Rx IRQ SMP Affinity Mask')
if irq_smp_affinity_mask is not None:
irq_command = tools.IRQ(self._get_irq_name() + str(hw_queue))
self.teardown_list.append(irq_command.set_irq_smp_affinity(irq_smp_affinity_mask))
def _set_queuing_discipline(self):
qdisc_profile = self._get_configuration_key('General Setup',
'Qdisc profile')
tsn_hw_queue = self._get_configuration_key('Listener Setup',
'TSN Hardware Queue')
other_hw_queue = self._get_configuration_key('Listener Setup',
'Other Hardware Queue')
vlan_priority = self._get_configuration_key('General Setup',
'VLAN Priority')
if qdisc_profile is None:
print("No qdisc profile is being set")
return
# First, clean up current qdiscs for interface
cmd = ['tc', 'qdisc', 'delete', 'dev', self.interface, 'parent',
'root']
subprocess.run(cmd)
commands = self._get_configuration_key('Qdiscs profiles',
qdisc_profile)
for line in commands:
line = line.replace('$iface', self.interface)
line = line.replace('$tsn_hw_queue', str(tsn_hw_queue))
line = line.replace('$tsn_vlan_prio', str(vlan_priority))
line = line.replace('$other_hw_queue', str(other_hw_queue))
cmd = ['tc'] + line.split()
subprocess.run(cmd)
def _setup_rx_filters(self):
socket_type = self._get_configuration_key('Test Setup', 'Socket Type')
if socket_type == 'AF_XDP':
raise NotImplementedError('Must implement _setup_rx_filters()')
def _enable_vlan(self):
ip_command = tools.IP(self.interface)
self.teardown_list.append(ip_command.add_vlan())
vlan_ip_command = tools.IP(self.vlan_if)
mode = self._get_configuration_key('General Setup', 'Mode')
if mode.lower() == 'talker':
vlan_ip_command.set_interface_ip_address(self.talker_ip)
elif mode.lower() == 'listener':
vlan_ip_command.set_interface_ip_address(self.listener_ip)
else:
raise KeyError(
'Invalid "General Setup: Mode:" value in the configuration '
'file')
self.teardown_list.append(vlan_ip_command.set_interface_up())
def _start_ptp4l(self):
ptp_conf_file = os.path.expanduser(
self._get_configuration_key('System Setup', 'PTP Conf'))
ptp = subprocess.Popen(
['ptp4l', '-i', self.interface, '-f', ptp_conf_file,
'--step_threshold=1', '-l', '6', '--hwts_filter', 'full'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.teardown_list.append(lambda: ptp.terminate())
def _configure_utc_offset(self):
util.run(['pmc', '-u', '-b', '0', '-t', '1',
'SET GRANDMASTER_SETTINGS_NP clockClass 248 '
'clockAccuracy 0xfe offsetScaledLogVariance 0xffff '
'currentUtcOffset 37 leap61 0 leap59 0 '
'currentUtcOffsetValid 1 ptpTimescale 1 timeTraceable 1 '
'frequencyTraceable 0 timeSource 0xa0'])
def _start_phc2sys(self):
phc = subprocess.Popen(['phc2sys', '-s', self.interface, '-c',
'CLOCK_REALTIME', '--step_threshold=1',
'--transportSpecific=1', '-w'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.teardown_list.append(lambda: phc.terminate())
# supporting methods
def _get_configuration_key(self, *args):
return util.get_configuration_key(self.conf, *args)
def _wait_ptp4l_stabilise(self):
keep_waiting = True
cmd = ['pmc', '-u', '-b', '0', '-t', '1', 'GET PORT_DATA_SET']
print('Waiting ptp4l stabilise...')
while keep_waiting:
time.sleep(1)
cp = util.run(cmd)
lines = cp.stdout.splitlines()
for line in lines:
if 'portState' in line:
state = line.split()[1]
if state.upper() in ['SLAVE', 'MASTER']:
keep_waiting = False
break
def _accept_multicast_addr(self):
ip_command = tools.IP(self.interface)
dest_mac_addr = self._get_configuration_key('Talker Setup',
'Destination MAC Address')
self.teardown_list.append(ip_command.add_multicast_address(dest_mac_addr))
def _get_irq_name(self):
return NotImplementedError('Must implement _get_irq_name()')
| [
"util.util.run",
"subprocess.Popen",
"subprocess.run",
"time.sleep",
"syslog.syslog",
"util.util.get_configuration_key"
] | [((1588, 1633), 'syslog.syslog', 'syslog', (['f"""NIC under test: {controller_name} """'], {}), "(f'NIC under test: {controller_name} ')\n", (1594, 1633), False, 'from syslog import syslog\n'), ((1720, 1758), 'syslog.syslog', 'syslog', (['f"""Kernel under test: {output}"""'], {}), "(f'Kernel under test: {output}')\n", (1726, 1758), False, 'from syslog import syslog\n'), ((1826, 1874), 'syslog.syslog', 'syslog', (['f"""Kernel command line: {kernel_cmdline}"""'], {}), "(f'Kernel command line: {kernel_cmdline}')\n", (1832, 1874), False, 'from syslog import syslog\n'), ((1965, 2003), 'syslog.syslog', 'syslog', (['f"""Linuxptp version: {version}"""'], {}), "(f'Linuxptp version: {version}')\n", (1971, 2003), False, 'from syslog import syslog\n'), ((3983, 4002), 'subprocess.run', 'subprocess.run', (['cmd'], {}), '(cmd)\n', (3997, 4002), False, 'import subprocess\n'), ((5564, 5749), 'subprocess.Popen', 'subprocess.Popen', (["['ptp4l', '-i', self.interface, '-f', ptp_conf_file, '--step_threshold=1',\n '-l', '6', '--hwts_filter', 'full']"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), "(['ptp4l', '-i', self.interface, '-f', ptp_conf_file,\n '--step_threshold=1', '-l', '6', '--hwts_filter', 'full'], stdout=\n subprocess.PIPE, stderr=subprocess.PIPE)\n", (5580, 5749), False, 'import subprocess\n'), ((5884, 6164), 'util.util.run', 'util.run', (["['pmc', '-u', '-b', '0', '-t', '1',\n 'SET GRANDMASTER_SETTINGS_NP clockClass 248 clockAccuracy 0xfe offsetScaledLogVariance 0xffff currentUtcOffset 37 leap61 0 leap59 0 currentUtcOffsetValid 1 ptpTimescale 1 timeTraceable 1 frequencyTraceable 0 timeSource 0xa0'\n ]"], {}), "(['pmc', '-u', '-b', '0', '-t', '1',\n 'SET GRANDMASTER_SETTINGS_NP clockClass 248 clockAccuracy 0xfe offsetScaledLogVariance 0xffff currentUtcOffset 37 leap61 0 leap59 0 currentUtcOffsetValid 1 ptpTimescale 1 timeTraceable 1 frequencyTraceable 0 timeSource 0xa0'\n ])\n", (5892, 6164), False, 'from util import util\n'), ((6303, 6488), 'subprocess.Popen', 'subprocess.Popen', (["['phc2sys', '-s', self.interface, '-c', 'CLOCK_REALTIME',\n '--step_threshold=1', '--transportSpecific=1', '-w']"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), "(['phc2sys', '-s', self.interface, '-c', 'CLOCK_REALTIME',\n '--step_threshold=1', '--transportSpecific=1', '-w'], stdout=subprocess\n .PIPE, stderr=subprocess.PIPE)\n", (6319, 6488), False, 'import subprocess\n'), ((6720, 6764), 'util.util.get_configuration_key', 'util.get_configuration_key', (['self.conf', '*args'], {}), '(self.conf, *args)\n', (6746, 6764), False, 'from util import util\n'), ((1537, 1572), 'util.util.run', 'util.run', (["['lspci', '-s', bus_info]"], {}), "(['lspci', '-s', bus_info])\n", (1545, 1572), False, 'from util import util\n'), ((1679, 1704), 'util.util.run', 'util.run', (["['uname', '-a']"], {}), "(['uname', '-a'])\n", (1687, 1704), False, 'from util import util\n'), ((1924, 1949), 'util.util.run', 'util.run', (["['ptp4l', '-v']"], {}), "(['ptp4l', '-v'])\n", (1932, 1949), False, 'from util import util\n'), ((4482, 4501), 'subprocess.run', 'subprocess.run', (['cmd'], {}), '(cmd)\n', (4496, 4501), False, 'import subprocess\n'), ((6986, 6999), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (6996, 6999), False, 'import time\n'), ((7018, 7031), 'util.util.run', 'util.run', (['cmd'], {}), '(cmd)\n', (7026, 7031), False, 'from util import util\n')] |
import socket
import random
from onionBrowser import *
ab = onionBrowser(proxies = [],\
user_agents=[('User-agents','superSecretBrowser')])
sock=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
bytes = random._urandom(1024)
ip = input('Target IP: ')
port = input('Target Port: ')
port = int(port)
sent = 1
while 1:
ab.anonymize()
sock.sendto(bytes,(ip,port))
sent = sent+1
print("Sent %s amount of packets to %s at port %s." %(sent,ip,port))
| [
"random._urandom",
"socket.socket"
] | [((155, 203), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (168, 203), False, 'import socket\n'), ((211, 232), 'random._urandom', 'random._urandom', (['(1024)'], {}), '(1024)\n', (226, 232), False, 'import random\n')] |
import torch
import arcsim
import gc
import time
import json
import sys
import gc
import os
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
now = datetime.now()
timestamp = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
#steps = 30
#epochs= 10
steps = 40
epochs= 20
#handles = [25, 60, 30, 54] # corners
handles = [6,16,25,30,54,60,69,70] # side verts + 2 corners
#handles = [6,16,25,30,54,60,69,70,14,23,48] # side verts + inner side verts + 2 corners
#handles = [24,25,52,53,54,71] # corners but more
losses = []
param_g = torch.zeros([steps, len(handles)*3],dtype=torch.float64, requires_grad=True)
out_path = 'default_out'
os.mkdir(out_path)
with open('conf/rigidcloth/fold_starts/fold_start.json','r') as f:
config = json.load(f)
def save_config(config, file):
with open(file,'w') as f:
json.dump(config, f)
save_config(config, out_path+'/conf.json')
torch.set_num_threads(16)
scalev=1
def reset_sim(sim, epoch):
if epoch < 20:
arcsim.init_physics(out_path+'/conf.json', out_path+'/out%d'%epoch,False)
else:
arcsim.init_physics(out_path+'/conf.json',out_path+'/out',False)
def get_target_mesh():
sim = arcsim.get_sim()
arcsim.init_physics('conf/rigidcloth/fold_targets/half_fold.json',out_path+'/target',False)
#arcsim.init_physics('conf/rigidcloth/fold_targets/sides_in.json',out_path+'/target',False)
#arcsim.init_physics('conf/rigidcloth/fold_targets/diag_quarters.json',out_path+'/target',False)
global node_number
node_number = len(sim.cloths[0].mesh.nodes)
ref = [sim.cloths[0].mesh.nodes[i].x.numpy() for i in range(node_number)]
ref = torch.from_numpy(np.vstack(ref))
return ref
def get_loss(sim,ref):
reg = torch.norm(param_g, p=2)*0.001
loss = 0
print("VERTS", ref.shape[0], len(sim.cloths[0].mesh.nodes))
for i in range(ref.shape[0]):
loss += torch.norm(ref[i]-sim.cloths[0].mesh.nodes[i].x)**2
loss /= node_number
loss += reg
return loss
def run_sim(steps,sim,ref):
# sim.obstacles[2].curr_state_mesh.dummy_node.x = param_g[1]
print("step")
for step in range(steps):
print(step)
for i in range(len(handles)):
inc_v = param_g[step,3*i:3*i+3]
sim.cloths[0].mesh.nodes[handles[i]].v += inc_v
del inc_v
arcsim.sim_step()
loss = get_loss(sim,ref)
return loss
#@profile
def do_train(cur_step,optimizer,scheduler,sim):
epoch = 0
ref = get_target_mesh()
while True:
reset_sim(sim, epoch)
st = time.time()
loss = run_sim(steps, sim,ref)
en0 = time.time()
optimizer.zero_grad()
loss.backward()
en1 = time.time()
print("=======================================")
f.write('epoch {}: loss={} \n'.format(epoch, loss.data))
print('epoch {}: loss={} \n'.format(epoch, loss.data))
print('forward time={}'.format(en0-st))
print('backward time={}'.format(en1-en0))
optimizer.step()
print("Num cloth meshes", len(sim.cloths))
#arcsim.delete_mesh(sim.cloths[0].mesh)
#scheduler.step(epoch)
losses.append(loss)
if epoch>=epochs:
break
epoch = epoch + 1
# break
def visualize_loss(losses,dir_name):
plt.plot(losses)
plt.title('losses')
plt.xlabel('epochs')
plt.ylabel('losses')
plt.savefig(dir_name+'/'+'loss.jpg')
with open(out_path+('/log%s.txt'%timestamp),'w',buffering=1) as f:
tot_step = 1
sim=arcsim.get_sim()
# reset_sim(sim)
lr = 10
momentum = 0.4
f.write('lr={} momentum={}\n'.format(lr,momentum))
optimizer = torch.optim.SGD([{'params':param_g,'lr':lr}],momentum=momentum)
scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer,10,2,eta_min=0.0001)
for cur_step in range(tot_step):
do_train(cur_step,optimizer,scheduler,sim)
#visualize_loss(losses,default_dir)
visualize_loss(losses,out_path)
print("done")
| [
"torch.optim.SGD",
"matplotlib.pyplot.savefig",
"torch.optim.lr_scheduler.CosineAnnealingWarmRestarts",
"arcsim.get_sim",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"torch.set_num_threads",
"datetime.datetime.now",
"torch.norm",
"os.mkdir",
"numpy.vstack"... | [((180, 194), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (192, 194), False, 'from datetime import datetime\n'), ((659, 677), 'os.mkdir', 'os.mkdir', (['out_path'], {}), '(out_path)\n', (667, 677), False, 'import os\n'), ((909, 934), 'torch.set_num_threads', 'torch.set_num_threads', (['(16)'], {}), '(16)\n', (930, 934), False, 'import torch\n'), ((758, 770), 'json.load', 'json.load', (['f'], {}), '(f)\n', (767, 770), False, 'import json\n'), ((1192, 1208), 'arcsim.get_sim', 'arcsim.get_sim', ([], {}), '()\n', (1206, 1208), False, 'import arcsim\n'), ((1213, 1312), 'arcsim.init_physics', 'arcsim.init_physics', (['"""conf/rigidcloth/fold_targets/half_fold.json"""', "(out_path + '/target')", '(False)'], {}), "('conf/rigidcloth/fold_targets/half_fold.json', out_path +\n '/target', False)\n", (1232, 1312), False, 'import arcsim\n'), ((3328, 3344), 'matplotlib.pyplot.plot', 'plt.plot', (['losses'], {}), '(losses)\n', (3336, 3344), True, 'import matplotlib.pyplot as plt\n'), ((3349, 3368), 'matplotlib.pyplot.title', 'plt.title', (['"""losses"""'], {}), "('losses')\n", (3358, 3368), True, 'import matplotlib.pyplot as plt\n'), ((3373, 3393), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epochs"""'], {}), "('epochs')\n", (3383, 3393), True, 'import matplotlib.pyplot as plt\n'), ((3398, 3418), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""losses"""'], {}), "('losses')\n", (3408, 3418), True, 'import matplotlib.pyplot as plt\n'), ((3423, 3463), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(dir_name + '/' + 'loss.jpg')"], {}), "(dir_name + '/' + 'loss.jpg')\n", (3434, 3463), True, 'import matplotlib.pyplot as plt\n'), ((3553, 3569), 'arcsim.get_sim', 'arcsim.get_sim', ([], {}), '()\n', (3567, 3569), False, 'import arcsim\n'), ((3693, 3760), 'torch.optim.SGD', 'torch.optim.SGD', (["[{'params': param_g, 'lr': lr}]"], {'momentum': 'momentum'}), "([{'params': param_g, 'lr': lr}], momentum=momentum)\n", (3708, 3760), False, 'import torch\n'), ((3773, 3863), 'torch.optim.lr_scheduler.CosineAnnealingWarmRestarts', 'torch.optim.lr_scheduler.CosineAnnealingWarmRestarts', (['optimizer', '(10)', '(2)'], {'eta_min': '(0.0001)'}), '(optimizer, 10, 2,\n eta_min=0.0001)\n', (3825, 3863), False, 'import torch\n'), ((207, 221), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (219, 221), False, 'from datetime import datetime\n'), ((842, 862), 'json.dump', 'json.dump', (['config', 'f'], {}), '(config, f)\n', (851, 862), False, 'import json\n'), ((1001, 1086), 'arcsim.init_physics', 'arcsim.init_physics', (["(out_path + '/conf.json')", "(out_path + '/out%d' % epoch)", '(False)'], {}), "(out_path + '/conf.json', out_path + '/out%d' % epoch, False\n )\n", (1020, 1086), False, 'import arcsim\n'), ((1093, 1163), 'arcsim.init_physics', 'arcsim.init_physics', (["(out_path + '/conf.json')", "(out_path + '/out')", '(False)'], {}), "(out_path + '/conf.json', out_path + '/out', False)\n", (1112, 1163), False, 'import arcsim\n'), ((1678, 1692), 'numpy.vstack', 'np.vstack', (['ref'], {}), '(ref)\n', (1687, 1692), True, 'import numpy as np\n'), ((1744, 1768), 'torch.norm', 'torch.norm', (['param_g'], {'p': '(2)'}), '(param_g, p=2)\n', (1754, 1768), False, 'import torch\n'), ((2346, 2363), 'arcsim.sim_step', 'arcsim.sim_step', ([], {}), '()\n', (2361, 2363), False, 'import arcsim\n'), ((2569, 2580), 'time.time', 'time.time', ([], {}), '()\n', (2578, 2580), False, 'import time\n'), ((2634, 2645), 'time.time', 'time.time', ([], {}), '()\n', (2643, 2645), False, 'import time\n'), ((2716, 2727), 'time.time', 'time.time', ([], {}), '()\n', (2725, 2727), False, 'import time\n'), ((1903, 1953), 'torch.norm', 'torch.norm', (['(ref[i] - sim.cloths[0].mesh.nodes[i].x)'], {}), '(ref[i] - sim.cloths[0].mesh.nodes[i].x)\n', (1913, 1953), False, 'import torch\n')] |
import fire
def main():
"""Command line interface."""
pass
if __name__ == "__main__":
fire.Fire(main)
| [
"fire.Fire"
] | [((102, 117), 'fire.Fire', 'fire.Fire', (['main'], {}), '(main)\n', (111, 117), False, 'import fire\n')] |
from django.contrib import admin
from django.urls import path
import manhole.views
urlpatterns = [
path('<str:client>', manhole.views.script, name='script'),
path('<str:client>/<int:ordering>', manhole.views.output, name='output')
]
| [
"django.urls.path"
] | [((104, 161), 'django.urls.path', 'path', (['"""<str:client>"""', 'manhole.views.script'], {'name': '"""script"""'}), "('<str:client>', manhole.views.script, name='script')\n", (108, 161), False, 'from django.urls import path\n'), ((167, 239), 'django.urls.path', 'path', (['"""<str:client>/<int:ordering>"""', 'manhole.views.output'], {'name': '"""output"""'}), "('<str:client>/<int:ordering>', manhole.views.output, name='output')\n", (171, 239), False, 'from django.urls import path\n')] |
import salome
import SMESH
from salome.geom import geomBuilder
from salome.smesh import smeshBuilder
import sys
import math
import numpy as np
from numpy.linalg import norm
from numpy.random import uniform
from pathlib import Path
from auxiliaryFunctions import clusteringAlgorithm
from auxiliaryFunctions import getTranslationalRiskAngleRefAxis
from itertools import product
import os
salome.salome_init()
geompy = geomBuilder.New()
smesh = smeshBuilder.New()
def smallestLineOnFace(face):
bndVertices_Slm = geompy.ExtractShapes(face, geompy.ShapeType["VERTEX"], True)
indexList = [(0,1), (0,2), (0,3)]
distances = [geompy.MinDistance(bndVertices_Slm[i], bndVertices_Slm[j]) for i,j in indexList]
index = distances.index(min(distances))
p1 = bndVertices_Slm[indexList[index][0]]
p2 = bndVertices_Slm[indexList[index][1]]
line = geompy.MakeLineTwoPnt(p1,p2)
return line
class Line:
def __init__(self, Point1, Point2):
self.origin = Point1
self.dest = Point2
v1 = geompy.MakeVertex(*list(Point1))
v2 = geompy.MakeVertex(*list(Point2))
self.geom = geompy.MakeLineTwoPnt(v1, v2)
def addToStudy(self, name = 'Line'):
geompy.addToStudy(self.geom, name)
def extendLine(self, multiplier):
self.geom = geompy.ExtendEdge(self.geom, 0, multiplier)
# Obtain the Salome vertexes: New Entity-Explode-SubShapes Selection
[v1, v2] = geompy.ExtractShapes(self.geom, geompy.ShapeType["VERTEX"], True)
v1coords = geompy.PointCoordinates(v1)
v2coords = geompy.PointCoordinates(v2)
self.dest = v2coords if np.allclose(v1coords, self.origin) else v1coords
def translateOrigin(self, newOrigin):
'''
Dada una linea definida por su origen y destino, la traslada paralelamente
a un nuevo origen
'''
vector = np.array(newOrigin) - np.array(self.origin)
point1 = newOrigin
point2 = np.array(self.dest) + vector
translatedLine = Line(point1, point2) # objeto linea en la nueva posicion
return translatedLine
def translateLineToCoords(self, coordsList):
translatedLines = [self.translateOrigin(coords) for coords in coordsList]
return translatedLines
def intersectsWith(self, femesh):
size = femesh.getSize()
center = femesh.getCenter()
multiplier = norm(center - self.origin) + size
self.extendLine(multiplier)
elementSize = femesh.getMinElementSize()
#tolerance = np.sqrt(2)*elementSize*1.1 # diagonal*factor
tolerance = elementSize*0.1 # diagonal*factor
smeshType = SMESH.FACE
smeshMethod = SMESH.FT_LyingOnGeom
aCriteria = [smesh.GetCriterion(smeshType, smeshMethod,
SMESH.FT_Undefined, self.geom,
SMESH.FT_Undefined, SMESH.FT_LogicalOR,
tolerance),]
aFilter = smesh.GetFilterFromCriteria(aCriteria)
aFilter.SetMesh(femesh.mesh.GetMesh())
holesMesh = femesh.mesh.GroupOnFilter(smeshType, 'tangentGroup', aFilter)
return not holesMesh.IsEmpty()
def getAngleWithVector(self, vector):
lineVector = np.array(self.dest - self.origin)
angle = np.degrees(np.arccos(lineVector.dot(vector)/(norm(lineVector)*norm(vector))))
return angle
class ScatteringPrism:
def __init__(self, prismParameters, translationalRiskAngle):
self.origin = prismParameters['origin']
self.fwdAngle = prismParameters['fwdAngle']
self.aftAngle = prismParameters['aftAngle']
self.fwdAngleR = np.radians(self.fwdAngle)
self.aftAngleR = np.radians(self.aftAngle)
self.orientation3D = prismParameters['orientation3D']
self.rotatingPlane = prismParameters['rotatingPlane']
assert (self.rotatingPlane in ('XY', 'XZ', 'YZ')), 'rotatingPlane must be XY, XZ or YZ'
self.cobM = self._COBMatrix()
# local axis z is always the reference axis for the translational risk angle
zlocal = self.cobM[:,2]
# Check if local axis z has the same sign compared to refAxisCode
refAxisCode = getTranslationalRiskAngleRefAxis(self.orientation3D, self.rotatingPlane)
if refAxisCode == 'X':
axis = np.array([1.0, 0.0, 0.0])
elif refAxisCode == 'Y':
axis = np.array([0.0, 1.0, 0.0])
elif refAxisCode == 'Z':
axis = np.array([0.0, 0.0, 1.0])
# different sign implies 180 degrees
self.translationalRiskAngle_lb = translationalRiskAngle[0]
self.translationalRiskAngle_ub = translationalRiskAngle[1]
#print('axis=',axis)
#print('zlocal=',zlocal)
if axis.dot(zlocal) < 0:
self.translationalRiskAngle_lb *= -1
self.translationalRiskAngle_ub *= -1
def generateDebris(self, nImpactLines, shapes, velocities):
'''
Dado un prisma definido por unos angulos y orientado de una determinada forma
Dado el numero posible de impactos que se pueden producir
Genera las lineas de impacto dentro del prisma
Genera geometria del debris con sus propiedades de forma, velocidad y su linea asociada
'''
assert nImpactLines == len(shapes) == len(velocities), 'arrays lenght must be equal to nImpactLines'
lines = self.getRandomLines(nImpactLines)
debrisList = [Debris(line, shape, velocity) for line, shape, velocity in zip(lines, shapes, velocities)]
return debrisList
def getRandomLines(self, numLines):
betas = uniform(self.aftAngle, self.fwdAngle, numLines)
thetas = uniform(self.translationalRiskAngle_lb, self.translationalRiskAngle_ub, numLines)
lines = [self._getLineInsidePrism(beta, theta) for beta, theta in zip(betas, thetas)]
return lines
def _getLineInsidePrism(self, beta, theta):
# beta, theta in degrees
pointPrism = self._getPointInsidePrism_global(beta, theta)
line = Line(self.origin, pointPrism)
return line
def _getPointInsidePrism_global(self, beta, theta):
'''
globalVector es el vector interior al prisma en coordenadas globales
libre y necesito las coordenadas del punto final del vector respecto al
eje global.
PTO (vertice del prisma en stma global) +
VECTOR interior al prisma en coordenadas globales =
PTO interior al prisma en stma global
Parameters:
beta: angle inside spread risk angle (degrees)
theta: angle inside translational risk angle (degrees)
'''
localVector = self._getPointInsidePrism_local(beta, theta)
globalVector = self.cobM.dot(localVector)
pointInsidePrismGlobal = self.origin + globalVector
return pointInsidePrismGlobal
def _getPointInsidePrism_local(self, beta, theta):
betaR = np.radians(beta)
thetaR = np.radians(theta)
h = 1
x = h
y = h*np.tan(betaR)
z = h*np.tan(thetaR)
if abs(theta) > 90: # Change sign of x and z in 2nd and 3rd quadrant
x*= -1
z*= -1
return [x, y, z]
def _COBMatrix(self):
x = self.orientation3D
if self.rotatingPlane == 'XY':
y = [0.0, 0.0, 1.0]
elif self.rotatingPlane == 'XZ':
y = [0.0, 1.0, 0.0]
elif self.rotatingPlane == 'YZ':
y = [1.0, 0.0, 0.0]
z = np.cross(x, y)
x, y, z = [v/norm(v) for v in (x, y, z)]
cobM = np.column_stack([x, y, z])
return cobM
class Debris:
'''
Define a piece of debris with shape and velocity properties and the
associated line inside the cone
'''
def __init__(self, line, shape, velocity):
self.line = line
self.shape = shape
self.velocity = velocity
self.origin = line.origin
self._getDebrisGeom()
def _getDebrisGeom(self):
angleRoll = float(uniform(0.0, 180.0, 1))
anglePitch = float(uniform(-45.0, 45.0, 1))
debris0 = geompy.MakeFaceObjHW(self.line.geom, self.shape['a'], self.shape['b'])
debris1 = geompy.Rotate(debris0, self.line.geom, angleRoll*np.pi/180.0, theCopy=True)
line = smallestLineOnFace(debris1)
middlePoints_Slm = geompy.MakeVertexOnCurve(line, 0.5, True)
origin_Slm = geompy.MakeVertex(*self.origin)
axis = geompy.MakeTranslationTwoPoints(line,middlePoints_Slm, origin_Slm)
debris2 = geompy.Rotate(debris1, axis, anglePitch*np.pi/180.0, theCopy=True)
#geompy.addToStudy(debris0, 'debris0')
#geompy.addToStudy(debris1, 'debris1')
geompy.addToStudy(debris2, 'debris2')
self.geom = debris2
def generateDebrisMesh(self, elementSize):
self.mesh = smesh.Mesh(self.geom)
Regular_1D = self.mesh.Segment()
size = Regular_1D.LocalLength(elementSize, None, 1e-07)
Quadrangle_2D = self.mesh.Quadrangle(algo=smeshBuilder.QUADRANGLE)
isDone = self.mesh.Compute()
def getNodeCoordinates(self):
nodesId = self.mesh.GetNodesId()
debrisNodesCoords = [self.mesh.GetNodeXYZ(id) for id in nodesId]
return debrisNodesCoords
class FEMesh:
def __init__(self, NameFile):
self.NameFile = NameFile
medFile = NameFile + '.med'
path = Path.cwd() / medFile
assert path.exists(), '%s does not exists' % str(path)
([self.mesh], status) = smesh.CreateMeshesFromMED(str(path))
assert status == SMESH.DRS_OK, 'Invalid Mesh'
def getnElements(self):
return self.mesh.NbElements()
def getElementsId(self):
return self.mesh.GetElementsId()
def getElementsCoG(self, elements):
elementsCoG = [self.mesh.BaryCenter(element) for element in elements]
return np.array(elementsCoG)
def _getBoundingBox(self):
box = np.array(self.mesh.BoundingBox())
minvalues = box[:3] # hasta el 3
maxvalues = box[3:] # del 3 hacia delante no incluido
return minvalues, maxvalues
def getSize(self):
minvalues, maxvalues = self._getBoundingBox()
size = norm(maxvalues - minvalues)
return size
def getCenter(self):
minvalues, maxvalues = self._getBoundingBox()
center = (maxvalues + minvalues)/2
return center
def getTranslationalRiskAngle(self, origin, orientation3D, rotatingPlane):
boundVertices = self._getBoundVertices(origin, orientation3D, rotatingPlane)
origin = np.array(origin)
p0 = np.array(boundVertices['bnd_1_near'])
p1 = np.array(boundVertices['bnd_1_far'])
tangentLine_1, tangent_point_1 = self._getTangentToMesh(origin,p0,p1)
angle_1 = tangentLine_1.getAngleWithVector(orientation3D)
p0 = np.array(boundVertices['bnd_2_near'])
p1 = np.array(boundVertices['bnd_2_far'])
tangentLine_2, tangent_point_2 = self._getTangentToMesh(origin,p0,p1)
angle_2 = tangentLine_2.getAngleWithVector(orientation3D)
tangentLine_1.addToStudy('tangentLine_1')
tangentLine_2.addToStudy('tangentLine_2')
tangent_point_1 = np.array(tangent_point_1)
tangent_point_2 = np.array(tangent_point_2)
refAxisCode = getTranslationalRiskAngleRefAxis(orientation3D, rotatingPlane)
axisDict = {'X': 0, 'Y': 1, 'Z': 2}
comp = axisDict[refAxisCode]
if tangent_point_1[comp] < origin[comp]: angle_1 = - angle_1
if tangent_point_2[comp] < origin[comp]: angle_2 = - angle_2
return angle_1, angle_2
def _getBoundVertices(self, origin, orientation3D, rotatingPlane):
if rotatingPlane == 'XY':
nVRotatingPlane = [0.0, 0.0, 1.0]
elif rotatingPlane == 'XZ':
nVRotatingPlane = [0.0, 1.0, 0.0]
elif rotatingPlane == 'YZ':
nVRotatingPlane = [1.0, 0.0, 0.0]
nVRotatingPlane_Slm = geompy.MakeVectorDXDYDZ(*nVRotatingPlane)
# normal vector to bound faces of translational risk angle
nVBoundFaces = np.cross(orientation3D, nVRotatingPlane)
nVBoundFaces_Slm = geompy.MakeVectorDXDYDZ(*nVBoundFaces)
#minimum and maximum values of the bounding box
minvalues, maxvalues = self._getBoundingBox()
vertex_1_Slm = geompy.MakeVertex(*minvalues) # each component to each argument
vertex_2_Slm = geompy.MakeVertex(*maxvalues)
# planes that contain bound faces
bndPlane_1_Slm = geompy.MakePlane(vertex_1_Slm, nVBoundFaces_Slm, 2*self.getSize())
bndPlane_2_Slm = geompy.MakePlane(vertex_2_Slm, nVBoundFaces_Slm, 2*self.getSize())
box = geompy.MakeBoxTwoPnt(vertex_1_Slm, vertex_2_Slm)
intersection1 = geompy.MakeSection(box, bndPlane_1_Slm, True) # box planar section
intersection2 = geompy.MakeSection(box, bndPlane_2_Slm, True) # box planar section
origin_Slm = geompy.MakeVertex(*origin)
planeInOrientation3D_Slm = geompy.MakePlane(origin_Slm, nVRotatingPlane_Slm, 4*self.getSize())
bndLine_1_Slm = geompy.MakeSection(intersection1, planeInOrientation3D_Slm, True) # box planar section
bndLine_2_Slm = geompy.MakeSection(intersection2, planeInOrientation3D_Slm, True) # box planar section
bndVertices_1_Slm = geompy.ExtractShapes(bndLine_1_Slm, geompy.ShapeType["VERTEX"], True)
bndVertices_2_Slm = geompy.ExtractShapes(bndLine_2_Slm, geompy.ShapeType["VERTEX"], True)
bndVertices_1 = [geompy.PointCoordinates(v) for v in bndVertices_1_Slm]
bndVertices_2 = [geompy.PointCoordinates(v) for v in bndVertices_2_Slm]
def distToorigin(coords):
dist = norm(np.array(coords) - np.array(origin))
return dist
bndVertices_1.sort(key=distToorigin)
bndVertices_2.sort(key=distToorigin)
bndVertices = {'bnd_1_near': bndVertices_1[0],
'bnd_1_far' : bndVertices_1[1],
'bnd_2_near': bndVertices_2[0],
'bnd_2_far' : bndVertices_2[1]
}
return bndVertices
def _getTangentToMesh(self, origin, lb, ub):
dist = 1.0
tol = 0.01
while dist > tol:
line_lb = Line(origin, lb)
intersects_lb = line_lb.intersectsWith(self)
line_ub = Line(origin, ub)
intersects_ub = line_ub.intersectsWith(self)
new_point = (lb+ub)/2
line = Line(origin, new_point)
intersects_new_point = line.intersectsWith(self)
if intersects_new_point & intersects_lb:
lb = new_point
elif intersects_new_point & intersects_ub:
ub = new_point
elif (not intersects_new_point) & intersects_lb:
ub = new_point
elif (not intersects_new_point) & intersects_ub:
lb = new_point
dist = norm(ub - lb)
line = Line(origin, new_point)
return line, new_point
def getMinElementSize(self):
minArea = self.mesh.GetMinMax(SMESH.FT_Area)[0]
minSize = np.sqrt(4*minArea/np.pi)
return minSize
def getAdjacentElementMesh(self, elementId, coplanarAngle=5):
aCriteria = smesh.GetCriterion(SMESH.FACE, SMESH.FT_CoplanarFaces,
SMESH.FT_Undefined, elementId,
SMESH.FT_Undefined, SMESH.FT_Undefined,
coplanarAngle)
aFilter = smesh.GetFilterFromCriteria([aCriteria])
aFilter.SetMesh(self.mesh.GetMesh())
sub_hole = self.mesh.GroupOnFilter(SMESH.FACE, 'Hole', aFilter)
sub_mesh = smesh.CopyMesh(sub_hole, 'meshHole', 0, 1)
return sub_mesh
def getHoleMeshFromIds(self, ids):
ids = str(ids).strip('[]') # '3,4,5' Remove characters
sub_mesh2D = self.getMeshFromRangeOfIds(ids, 2)
sub_mesh1D = self.getMeshFromRangeOfIds(ids, 1)
sub_mesh = self.getHoleMeshKeepingOriginalIds(sub_mesh2D, sub_mesh1D, 'meshHole')
hole = Hole(sub_mesh) # instancia de Hole que tiene esa malla asociada
return hole
def getMeshFromRangeOfIds(self, ids, dim):
assert dim in (1,2), 'dim must be 1 or 2'
smeshType = SMESH.FACE if dim == 2 else SMESH.EDGE
aCriteria = smesh.GetCriterion(smeshType,SMESH.FT_RangeOfIds,
SMESH.FT_Undefined,ids)
aFilter = smesh.GetFilterFromCriteria([aCriteria])
aFilter.SetMesh(self.mesh.GetMesh())
sub_hole = self.mesh.GroupOnFilter(smeshType, 'Hole%iD' %dim, aFilter)
sub_mesh = smesh.CopyMesh(sub_hole, 'meshHole%iD' %dim, 0, 1)
return sub_mesh
def getMeshFromGroupOfLines(self, debrisLines, dim, tolerance):
assert dim in (1,2), 'dim must be 1 or 2'
smeshType = SMESH.FACE if dim == 2 else SMESH.EDGE
assert hasattr(self, 'selectionMethod'), 'FEMesh needs attribute selectionMethod defined to use getMeshFromGroupOfLines'
smeshMethod = SMESH.FT_LyingOnGeom if self.selectionMethod == 'OneNode' else SMESH.FT_BelongToGeom
aCriteria = [smesh.GetCriterion(smeshType, smeshMethod,
SMESH.FT_Undefined, line.geom,
SMESH.FT_Undefined, SMESH.FT_LogicalOR,
tolerance) for line in debrisLines]
aFilter = smesh.GetFilterFromCriteria(aCriteria)
aFilter.SetMesh(self.fuselage.mesh.GetMesh())
holesMesh = self.fuselage.mesh.GroupOnFilter(smeshType, 'groupedHolesFromDebris%iD_%s' %(dim, HolesFromDebris.Id), aFilter)
mesh = smesh.CopyMesh(holesMesh, 'meshHolesFromDebris%iD_%s' %(dim, HolesFromDebris.Id), 0, 1) # malla que tiene info de todos los elementos con los que intersecan las lineas del debris
return mesh
def getHoleMeshKeepingOriginalIds(self, sub_mesh2D, sub_mesh1D, meshName):
ids2D = sub_mesh2D.GetElementsId()
ids1D = sub_mesh1D.GetElementsId()
idsAll = self.fuselage.getElementsId()
idsToRemove = [i for i in idsAll if i not in ids2D+ids1D]
sub_mesh = smesh.CopyMesh(self.fuselage.mesh, meshName)
sub_mesh.RemoveElements(idsToRemove)
return sub_mesh
def generateDamagedConfig(self, debrisList, damageCriteria, selectionMethod='AllNodes'):
'''
Dada una lista de objetos debris
Crea grupos de elementos a eliminar: Interseca las lineas de impacto
con la malla, dada la shape del debris
Elmina esos grupos de elementos de la malla
'''
assert selectionMethod in ('AllNodes', 'OneNode'), 'Selection Method must be AllNodes or OneNode'
size = self.getSize()
center = self.getCenter()
multiplier = [norm(center - debris.origin) + size for debris in debrisList]
for mult, debris in zip(multiplier, debrisList):
debris.line.extendLine(mult)
#debris.line.addToStudy()
damagedConfiguration = DamagedConfig(self, debrisList, damageCriteria, selectionMethod) # self es el objeto fuselage y self.mesh la malla de salome de ese objeto
return damagedConfiguration
def exportMesh(self, name='damagedConfig.med'):
try:
path = Path.cwd() / name
self.mesh.ExportMED(str(path), auto_groups=0, minor=40, overwrite=1, meshPart=None, autoDimension=1)
pass
except:
print('ExportMED() failed. Invalid file name?')
def addToStudy(self, name='fuselage'):
smesh.SetName(self.mesh.GetMesh(), name)
class Hole(FEMesh):
def __init__(self, mesh):
self.mesh = mesh
class HolesFromDebris(FEMesh):
# Class variable to use as counter
Id = 0
def __init__(self, fuselage, debris, damageCriteria, selectionMethod='AllNodes'):
self.fuselage = fuselage
self.debris = debris
self.damageCriteria = damageCriteria
self.selectionMethod = selectionMethod
self.isempty = False
# Reference the class variable
HolesFromDebris.Id += 1
self.groupedHoles = [] # va a ser una lista de objetos hole con la malla de cada agujero asociada. Hay que usar una recursion, y tengo que acumular sobre este vector
self._getGroupedHoles()
# damageCriteria es una instancia de la clase DamageCriteria con info de las curvas de velocidad y threshold. Tiene un metodo para aplicar dicho criterio
self.damagedHoles = self.damageCriteria.apply(self.groupedHoles, self.debris.velocity) # compobamos para cada hole de la lista si se atraviesa el fuselaje. Si atraviesa, se almacena en la lista damagedHoles
def _getMeshFromDebris(self):
elementSize = self.fuselage.getMinElementSize()
tolerance = np.sqrt(2)*elementSize*1.1 # diagonal*factor
self.debris.generateDebrisMesh(elementSize)
debrisNodesCoords = self.debris.getNodeCoordinates() # list with coordinates
debrisLines = self.debris.line.translateLineToCoords(debrisNodesCoords) # general function. list with line objects
#for line in debrisLines: line.addToStudy('ExtendedLine-%s' % HolesFromDebris.Id)
mesh2D = self.getMeshFromGroupOfLines(debrisLines, 2, tolerance)
mesh1D = self.getMeshFromGroupOfLines(debrisLines, 1, tolerance)
meshName = 'meshHolesFromDebris_%s' % HolesFromDebris.Id
self.mesh = self.getHoleMeshKeepingOriginalIds(mesh2D, mesh1D, meshName)
def _separateHolesOnImpacts(self, elements): # primera vez, al hacer self.getElementsId() obtengo los ids de la malla asociada a todos los agujeros procedentes de un mismo debris
if elements == []:
self.isempty = True # if elements is empty, there is no intersection or hole
else:
elementsCoG = self.getElementsCoG(elements)
clusteredElements = clusteringAlgorithm(elements, elementsCoG)
print('clusteredElements lenght',len(clusteredElements))
self.groupedHoles = [self.getHoleMeshFromIds(cluster) for cluster in clusteredElements] # list of hole objects
def _sortGroupedHoles(self):
def distanceToOrigin(hole): # defino una funcion dentro del metodo
return norm(hole.getCenter()-self.debris.origin)
self.groupedHoles.sort(key=distanceToOrigin) # ordena los elementos de la lista groupedHoles segun la funcion distanceToOrigin
def _getGroupedHoles(self):
self._getMeshFromDebris()
elements = self.getElementsId()
self._separateHolesOnImpacts(elements)
if self.groupedHoles == []:
# If groupedHoles is empty add an empty mesh
self.groupedHoles.append(Hole(self.mesh)) # son instancias de Hole que tienen esa malla vacia asociada
self._sortGroupedHoles()
class DamageCriteria:
def __init__(self, vThreshold, ftransfer):
self.vThreshold = vThreshold
self.ftransfer = ftransfer
def apply(self, groupedHoles, velocity):
damagedHoles = []
for hole in groupedHoles:
if velocity > self.vThreshold:
damagedHoles.append(hole)
velocity = self.ftransfer(velocity)
return damagedHoles
class DamagedConfig(FEMesh):
def __init__(self, fuselage, debrisList, damageCriteria, selectionMethod='AllNodes'):
self.fuselage = fuselage # objeto FEMesh
self.debrisList = debrisList
self.damageCriteria = damageCriteria
self.selectionMethod = selectionMethod
self._intersectLinesAndCutHoles()
def _intersectLinesAndCutHoles(self):
'''
Aplica la clase HolesFromDebris, donde para una debris dado, agrupa los elementos asciados a un mismo agujero
groupedHoles es una lista de instancias de Hole, donde cada objeto tiene info de la malla de cada agujero
para cada linea tengo un groupedHoles
'''
self.holesFromDebrisList = [HolesFromDebris(self.fuselage, debris, self.damageCriteria, self.selectionMethod)
for debris in self.debrisList]
elementsToRemove = []
for holesFromDebris in self.holesFromDebrisList:
for hole in holesFromDebris.damagedHoles:
elementsToRemove += hole.getElementsId()
np.savetxt('medIds.txt', elementsToRemove, fmt='%d')
self.mesh = smesh.CopyMesh(self.fuselage.mesh, 'DamagedMesh')
self.mesh.RemoveElements(elementsToRemove)
def addToStudy(self):
super().addToStudy(name='damagedMesh') # call the parent method
for debris in self.debrisList: debris.line.addToStudy() # los elementos de esta lista ya son objetos de tipo Line
| [
"numpy.radians",
"salome.salome_init",
"numpy.allclose",
"numpy.sqrt",
"numpy.cross",
"numpy.tan",
"salome.geom.geomBuilder.New",
"pathlib.Path.cwd",
"numpy.linalg.norm",
"numpy.column_stack",
"numpy.array",
"numpy.savetxt",
"numpy.random.uniform",
"auxiliaryFunctions.clusteringAlgorithm",... | [((392, 412), 'salome.salome_init', 'salome.salome_init', ([], {}), '()\n', (410, 412), False, 'import salome\n'), ((422, 439), 'salome.geom.geomBuilder.New', 'geomBuilder.New', ([], {}), '()\n', (437, 439), False, 'from salome.geom import geomBuilder\n'), ((448, 466), 'salome.smesh.smeshBuilder.New', 'smeshBuilder.New', ([], {}), '()\n', (464, 466), False, 'from salome.smesh import smeshBuilder\n'), ((3289, 3322), 'numpy.array', 'np.array', (['(self.dest - self.origin)'], {}), '(self.dest - self.origin)\n', (3297, 3322), True, 'import numpy as np\n'), ((3709, 3734), 'numpy.radians', 'np.radians', (['self.fwdAngle'], {}), '(self.fwdAngle)\n', (3719, 3734), True, 'import numpy as np\n'), ((3760, 3785), 'numpy.radians', 'np.radians', (['self.aftAngle'], {}), '(self.aftAngle)\n', (3770, 3785), True, 'import numpy as np\n'), ((4265, 4337), 'auxiliaryFunctions.getTranslationalRiskAngleRefAxis', 'getTranslationalRiskAngleRefAxis', (['self.orientation3D', 'self.rotatingPlane'], {}), '(self.orientation3D, self.rotatingPlane)\n', (4297, 4337), False, 'from auxiliaryFunctions import getTranslationalRiskAngleRefAxis\n'), ((5697, 5744), 'numpy.random.uniform', 'uniform', (['self.aftAngle', 'self.fwdAngle', 'numLines'], {}), '(self.aftAngle, self.fwdAngle, numLines)\n', (5704, 5744), False, 'from numpy.random import uniform\n'), ((5762, 5847), 'numpy.random.uniform', 'uniform', (['self.translationalRiskAngle_lb', 'self.translationalRiskAngle_ub', 'numLines'], {}), '(self.translationalRiskAngle_lb, self.translationalRiskAngle_ub,\n numLines)\n', (5769, 5847), False, 'from numpy.random import uniform\n'), ((7018, 7034), 'numpy.radians', 'np.radians', (['beta'], {}), '(beta)\n', (7028, 7034), True, 'import numpy as np\n'), ((7052, 7069), 'numpy.radians', 'np.radians', (['theta'], {}), '(theta)\n', (7062, 7069), True, 'import numpy as np\n'), ((7590, 7604), 'numpy.cross', 'np.cross', (['x', 'y'], {}), '(x, y)\n', (7598, 7604), True, 'import numpy as np\n'), ((7671, 7697), 'numpy.column_stack', 'np.column_stack', (['[x, y, z]'], {}), '([x, y, z])\n', (7686, 7697), True, 'import numpy as np\n'), ((9994, 10015), 'numpy.array', 'np.array', (['elementsCoG'], {}), '(elementsCoG)\n', (10002, 10015), True, 'import numpy as np\n'), ((10333, 10360), 'numpy.linalg.norm', 'norm', (['(maxvalues - minvalues)'], {}), '(maxvalues - minvalues)\n', (10337, 10360), False, 'from numpy.linalg import norm\n'), ((10713, 10729), 'numpy.array', 'np.array', (['origin'], {}), '(origin)\n', (10721, 10729), True, 'import numpy as np\n'), ((10744, 10781), 'numpy.array', 'np.array', (["boundVertices['bnd_1_near']"], {}), "(boundVertices['bnd_1_near'])\n", (10752, 10781), True, 'import numpy as np\n'), ((10795, 10831), 'numpy.array', 'np.array', (["boundVertices['bnd_1_far']"], {}), "(boundVertices['bnd_1_far'])\n", (10803, 10831), True, 'import numpy as np\n'), ((10990, 11027), 'numpy.array', 'np.array', (["boundVertices['bnd_2_near']"], {}), "(boundVertices['bnd_2_near'])\n", (10998, 11027), True, 'import numpy as np\n'), ((11041, 11077), 'numpy.array', 'np.array', (["boundVertices['bnd_2_far']"], {}), "(boundVertices['bnd_2_far'])\n", (11049, 11077), True, 'import numpy as np\n'), ((11350, 11375), 'numpy.array', 'np.array', (['tangent_point_1'], {}), '(tangent_point_1)\n', (11358, 11375), True, 'import numpy as np\n'), ((11402, 11427), 'numpy.array', 'np.array', (['tangent_point_2'], {}), '(tangent_point_2)\n', (11410, 11427), True, 'import numpy as np\n'), ((11451, 11513), 'auxiliaryFunctions.getTranslationalRiskAngleRefAxis', 'getTranslationalRiskAngleRefAxis', (['orientation3D', 'rotatingPlane'], {}), '(orientation3D, rotatingPlane)\n', (11483, 11513), False, 'from auxiliaryFunctions import getTranslationalRiskAngleRefAxis\n'), ((12251, 12291), 'numpy.cross', 'np.cross', (['orientation3D', 'nVRotatingPlane'], {}), '(orientation3D, nVRotatingPlane)\n', (12259, 12291), True, 'import numpy as np\n'), ((15347, 15375), 'numpy.sqrt', 'np.sqrt', (['(4 * minArea / np.pi)'], {}), '(4 * minArea / np.pi)\n', (15354, 15375), True, 'import numpy as np\n'), ((24660, 24712), 'numpy.savetxt', 'np.savetxt', (['"""medIds.txt"""', 'elementsToRemove'], {'fmt': '"""%d"""'}), "('medIds.txt', elementsToRemove, fmt='%d')\n", (24670, 24712), True, 'import numpy as np\n'), ((1642, 1676), 'numpy.allclose', 'np.allclose', (['v1coords', 'self.origin'], {}), '(v1coords, self.origin)\n', (1653, 1676), True, 'import numpy as np\n'), ((1884, 1903), 'numpy.array', 'np.array', (['newOrigin'], {}), '(newOrigin)\n', (1892, 1903), True, 'import numpy as np\n'), ((1906, 1927), 'numpy.array', 'np.array', (['self.origin'], {}), '(self.origin)\n', (1914, 1927), True, 'import numpy as np\n'), ((1972, 1991), 'numpy.array', 'np.array', (['self.dest'], {}), '(self.dest)\n', (1980, 1991), True, 'import numpy as np\n'), ((2409, 2435), 'numpy.linalg.norm', 'norm', (['(center - self.origin)'], {}), '(center - self.origin)\n', (2413, 2435), False, 'from numpy.linalg import norm\n'), ((4389, 4414), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0])\n', (4397, 4414), True, 'import numpy as np\n'), ((7114, 7127), 'numpy.tan', 'np.tan', (['betaR'], {}), '(betaR)\n', (7120, 7127), True, 'import numpy as np\n'), ((7142, 7156), 'numpy.tan', 'np.tan', (['thetaR'], {}), '(thetaR)\n', (7148, 7156), True, 'import numpy as np\n'), ((8116, 8138), 'numpy.random.uniform', 'uniform', (['(0.0)', '(180.0)', '(1)'], {}), '(0.0, 180.0, 1)\n', (8123, 8138), False, 'from numpy.random import uniform\n'), ((8167, 8190), 'numpy.random.uniform', 'uniform', (['(-45.0)', '(45.0)', '(1)'], {}), '(-45.0, 45.0, 1)\n', (8174, 8190), False, 'from numpy.random import uniform\n'), ((9510, 9520), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (9518, 9520), False, 'from pathlib import Path\n'), ((15151, 15164), 'numpy.linalg.norm', 'norm', (['(ub - lb)'], {}), '(ub - lb)\n', (15155, 15164), False, 'from numpy.linalg import norm\n'), ((22222, 22264), 'auxiliaryFunctions.clusteringAlgorithm', 'clusteringAlgorithm', (['elements', 'elementsCoG'], {}), '(elements, elementsCoG)\n', (22241, 22264), False, 'from auxiliaryFunctions import clusteringAlgorithm\n'), ((4467, 4492), 'numpy.array', 'np.array', (['[0.0, 1.0, 0.0]'], {}), '([0.0, 1.0, 0.0])\n', (4475, 4492), True, 'import numpy as np\n'), ((7627, 7634), 'numpy.linalg.norm', 'norm', (['v'], {}), '(v)\n', (7631, 7634), False, 'from numpy.linalg import norm\n'), ((19109, 19137), 'numpy.linalg.norm', 'norm', (['(center - debris.origin)'], {}), '(center - debris.origin)\n', (19113, 19137), False, 'from numpy.linalg import norm\n'), ((19602, 19612), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (19610, 19612), False, 'from pathlib import Path\n'), ((21125, 21135), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (21132, 21135), True, 'import numpy as np\n'), ((4545, 4570), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (4553, 4570), True, 'import numpy as np\n'), ((13895, 13911), 'numpy.array', 'np.array', (['coords'], {}), '(coords)\n', (13903, 13911), True, 'import numpy as np\n'), ((13914, 13930), 'numpy.array', 'np.array', (['origin'], {}), '(origin)\n', (13922, 13930), True, 'import numpy as np\n'), ((3385, 3401), 'numpy.linalg.norm', 'norm', (['lineVector'], {}), '(lineVector)\n', (3389, 3401), False, 'from numpy.linalg import norm\n'), ((3402, 3414), 'numpy.linalg.norm', 'norm', (['vector'], {}), '(vector)\n', (3406, 3414), False, 'from numpy.linalg import norm\n')] |
"""
.. module:: Augmentation
:platform: Unix, Windows
:synopsis: A useful module indeed.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import numpy as np
import random
from nltk.corpus import wordnet
import collections
import math
#import nltk
#nltk.download('wordnet')
class Augmentation:
r"""
This is the class to do data augmentation.
Args:
documents (:obj:`list`, optional, defaults to None):
A list of documents.
labels (:obj:`float`, optional, defaults to None):
A list of labels.
dataset_name (:obj:`string`, optional, defaults to ''):
Name of the dataset.
path (:obj:`string`, optional, defaults to ''):
Path to save the report.
Example::
from Manteia.Statistic import Statistic
documents=['a text','text b']
labels=['a','b']
Statistic(documents,labels)
Attributes:
"""
def __init__(self,documents=[],labels=[],strategy='daia',verbose=True):
self.documents = documents
self.labels = labels
self.verbose = verbose
if verbose:
print('Augmentation %s.' % strategy)
if strategy=='eda':
self.documents_augmented,self.labels_augmented = eda(self.documents,self.labels)
if strategy=='uda':
self.documents_augmented,self.labels_augmented = eda(self.documents,self.labels)
if strategy=='pyramid':
self.documents_augmented,self.labels_augmented = pyramid(self.documents,self.labels)
def test(self):
return "Mantéïa Augmentation."
def uda(documents,labels):
documents_augmented=[]
labels_augmented=[]
data_stats=get_data_stats(documents)
token_prob=0.9
op = TfIdfWordRep(token_prob, data_stats)
for text,label in zip(documents,labels):
text_aug=op(text)
documents_augmented.append(text_aug)
labels_augmented.append(label)
return documents_augmented,labels_augmented
#https://github.com/google-research/uda/blob/master/text/augmentation/word_level_augment.py
def pyramid(documents,labels,level):
r"""
This function compute DAIA.
Args:
documents
labels
level
return
documents_augmented
labels_augmented
Example::
"""
documents_augmented=[]
labels_augmented=[]
if level < 2:level=2
if level > 5:level=5
for text,label in zip(documents,labels):
text_list,label_list=split_text(text,label,level)
documents_augmented = documents_augmented+text_list
labels_augmented = labels_augmented+label_list
return documents_augmented,labels_augmented
def get_data_stats(texts):
"""Compute the IDF score for each word. Then compute the TF-IDF score."""
word_doc_freq = collections.defaultdict(int)
# Compute IDF
for text in texts:
cur_word_dict = {}
cur_sent = text.split(' ')
for word in cur_sent:
cur_word_dict[word] = 1
for word in cur_word_dict:
word_doc_freq[word] += 1
idf = {}
for word in word_doc_freq:
idf[word] = math.log(len(texts) * 1. / word_doc_freq[word])
# Compute TF-IDF
tf_idf = {}
for text in texts:
cur_word_dict = {}
cur_sent = text.split(' ')
for word in cur_sent:
if word not in tf_idf:
tf_idf[word] = 0
tf_idf[word] += 1. / len(cur_sent) * idf[word]
return {
"idf": idf,
"tf_idf": tf_idf,
}
class EfficientRandomGen(object):
"""A base class that generate multiple random numbers at the same time."""
def reset_random_prob(self):
"""Generate many random numbers at the same time and cache them."""
cache_len = 100000
self.random_prob_cache = np.random.random(size=(cache_len,))
self.random_prob_ptr = cache_len - 1
def get_random_prob(self):
"""Get a random number."""
value = self.random_prob_cache[self.random_prob_ptr]
self.random_prob_ptr -= 1
if self.random_prob_ptr == -1:
self.reset_random_prob()
return value
def get_random_token(self):
"""Get a random token."""
token = self.token_list[self.token_ptr]
self.token_ptr -= 1
if self.token_ptr == -1:
self.reset_token_list()
return token
class TfIdfWordRep(EfficientRandomGen):
"""TF-IDF Based Word Replacement."""
def __init__(self, token_prob, data_stats):
super(TfIdfWordRep, self).__init__()
self.token_prob = token_prob
self.data_stats = data_stats
self.idf = data_stats["idf"]
self.tf_idf = data_stats["tf_idf"]
tf_idf_items = data_stats["tf_idf"].items()
tf_idf_items = sorted(tf_idf_items, key=lambda item: -item[1])
self.tf_idf_keys = []
self.tf_idf_values = []
for key, value in tf_idf_items:
self.tf_idf_keys += [key]
self.tf_idf_values += [value]
self.normalized_tf_idf = np.array(self.tf_idf_values)
self.normalized_tf_idf = (self.normalized_tf_idf.max()
- self.normalized_tf_idf)
self.normalized_tf_idf = (self.normalized_tf_idf
/ self.normalized_tf_idf.sum())
self.reset_token_list()
self.reset_random_prob()
def get_replace_prob(self, all_words):
"""Compute the probability of replacing tokens in a sentence."""
cur_tf_idf = collections.defaultdict(int)
for word in all_words:
cur_tf_idf[word] += 1. / len(all_words) * self.idf[word]
replace_prob = []
for word in all_words:
replace_prob += [cur_tf_idf[word]]
replace_prob = np.array(replace_prob)
replace_prob = np.max(replace_prob) - replace_prob
replace_prob = (replace_prob / replace_prob.sum() *
self.token_prob * len(all_words))
return replace_prob
def __call__(self, example):
all_words = example.split(' ')
replace_prob = self.get_replace_prob(all_words)
all_words = self.replace_tokens(
all_words,
replace_prob[:len(all_words)]
)
return " ".join(all_words)
def replace_tokens(self, word_list, replace_prob):
"""Replace tokens in a sentence."""
for i in range(len(word_list)):
if self.get_random_prob() < replace_prob[i]:
word_list[i] = self.get_random_token()
return word_list
def reset_token_list(self):
cache_len = len(self.tf_idf_keys)
token_list_idx = np.random.choice(
cache_len, (cache_len,), p=self.normalized_tf_idf)
self.token_list = []
for idx in token_list_idx:
self.token_list += [self.tf_idf_keys[idx]]
self.token_ptr = len(self.token_list) - 1
#print("sampled token list: {:s}".format(" ".join(self.token_list)))
def eda(documents,labels):
documents_augmented=[]
labels_augmented=[]
for document,label in zip(documents,labels):
text_list,label_list = eda_text(document,label)
documents_augmented = documents_augmented+text_list
labels_augmented = labels_augmented+label_list
return documents_augmented,labels_augmented
def eda_text(text,label):
text_list,label_list=[],[]
#pour decoupage en word
word_list_1=text.split(' ')
#inversion de deux mot
idx_1 = random.randint(0,len(word_list_1)-1)
idx_2 = random.randint(0,len(word_list_1)-1)
word_list_1[idx_1],word_list_1[idx_2] = word_list_1[idx_2],word_list_1[idx_1]
text_list = [' '.join(word_list_1)]
label_list= [label]
#suppression d'un mot mot
word_list_2=text.split(' ')
idx_3 = random.randint(0,len(word_list_2)-1)
del word_list_2[idx_1]
text_list.append(' '.join(word_list_2))
label_list.append(label)
#Synonym Replacement
word_list_3=text.split(' ')
idx_4 = random.randint(0,len(word_list_3)-1)
if len(wordnet.synsets(word_list_3[idx_4]))>0:
idx_synonym=random.randint(0,len(wordnet.synsets(word_list_3[idx_4]))-1)
synonym = wordnet.synsets(word_list_3[idx_4])[idx_synonym].lemma_names()[0]
if synonym!=word_list_3[idx_4]:
word_list_3[idx_4]=synonym
text_list.append(' '.join(word_list_2))
label_list.append(label)
#Random Insertion (RI)
word_list_4=text.split(' ')
idx_5 = random.randint(0,len(word_list_4)-1)
idx_6 = random.randint(0,len(word_list_4)-1)
if len(wordnet.synsets(word_list_4[idx_5]))>0:
idx_synonym=random.randint(0,len(wordnet.synsets(word_list_4[idx_5]))-1)
synonym = wordnet.synsets(word_list_4[idx_5])[idx_synonym].lemma_names()[0]
if synonym!=word_list_4[idx_5]:
word_list_4.insert(idx_6, synonym)
text_list.append(' '.join(word_list_2))
label_list.append(label)
return text_list,label_list
def split_text(text,label,level=3):
text_list,label_list=[],[]
decoup_1a = int(0.05*len(text))
decoup_1b = int(0.95*len(text))
decoup_2 = int(len(text)/2)
decoup_3 = int(len(text)/3)
decoup_4 = int(len(text)/4)
decoup_5 = int(len(text)/5)
if level >=1 :
text_list = text_list+[text[decoup_1a:decoup_1b]]
label_list = label_list+[label]
if level >=2 :
text_list = text_list+[text[:decoup_2],text[decoup_2:]]
label_list = label_list+[label,label]
if level >=3 :
text_list = text_list+[text[:decoup_3],text[decoup_3:2*decoup_3],text[2*decoup_3:]]
label_list = label_list+[label,label,label]
if level >=4 :
text_list = text_list+[text[:decoup_4],text[decoup_4:2*decoup_4],text[2*decoup_4:3*decoup_4],text[3*decoup_4:]]
label_list = label_list+[label,label,label,label]
if level >=5 :
text_list = text_list+[text[:decoup_5],text[decoup_5:2*decoup_5],text[2*decoup_5:3*decoup_5],text[3*decoup_5:4*decoup_5],text[4*decoup_5:]]
label_list = label_list+[label,label,label,label,label]
return text_list,label_list
| [
"numpy.random.random",
"numpy.random.choice",
"numpy.max",
"numpy.array",
"collections.defaultdict",
"nltk.corpus.wordnet.synsets"
] | [((2556, 2584), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (2579, 2584), False, 'import collections\n'), ((3466, 3501), 'numpy.random.random', 'np.random.random', ([], {'size': '(cache_len,)'}), '(size=(cache_len,))\n', (3482, 3501), True, 'import numpy as np\n'), ((4591, 4619), 'numpy.array', 'np.array', (['self.tf_idf_values'], {}), '(self.tf_idf_values)\n', (4599, 4619), True, 'import numpy as np\n'), ((5035, 5063), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (5058, 5063), False, 'import collections\n'), ((5263, 5285), 'numpy.array', 'np.array', (['replace_prob'], {}), '(replace_prob)\n', (5271, 5285), True, 'import numpy as np\n'), ((6071, 6138), 'numpy.random.choice', 'np.random.choice', (['cache_len', '(cache_len,)'], {'p': 'self.normalized_tf_idf'}), '(cache_len, (cache_len,), p=self.normalized_tf_idf)\n', (6087, 6138), True, 'import numpy as np\n'), ((5305, 5325), 'numpy.max', 'np.max', (['replace_prob'], {}), '(replace_prob)\n', (5311, 5325), True, 'import numpy as np\n'), ((7362, 7397), 'nltk.corpus.wordnet.synsets', 'wordnet.synsets', (['word_list_3[idx_4]'], {}), '(word_list_3[idx_4])\n', (7377, 7397), False, 'from nltk.corpus import wordnet\n'), ((7851, 7886), 'nltk.corpus.wordnet.synsets', 'wordnet.synsets', (['word_list_4[idx_5]'], {}), '(word_list_4[idx_5])\n', (7866, 7886), False, 'from nltk.corpus import wordnet\n'), ((7438, 7473), 'nltk.corpus.wordnet.synsets', 'wordnet.synsets', (['word_list_3[idx_4]'], {}), '(word_list_3[idx_4])\n', (7453, 7473), False, 'from nltk.corpus import wordnet\n'), ((7927, 7962), 'nltk.corpus.wordnet.synsets', 'wordnet.synsets', (['word_list_4[idx_5]'], {}), '(word_list_4[idx_5])\n', (7942, 7962), False, 'from nltk.corpus import wordnet\n'), ((7491, 7526), 'nltk.corpus.wordnet.synsets', 'wordnet.synsets', (['word_list_3[idx_4]'], {}), '(word_list_3[idx_4])\n', (7506, 7526), False, 'from nltk.corpus import wordnet\n'), ((7980, 8015), 'nltk.corpus.wordnet.synsets', 'wordnet.synsets', (['word_list_4[idx_5]'], {}), '(word_list_4[idx_5])\n', (7995, 8015), False, 'from nltk.corpus import wordnet\n')] |
"""
THIS CODE IS UNDER THE BSD 2-Clause LICENSE. YOU CAN FIND THE COMPLETE
FILE AT THE SOURCE DIRECTORY.
Copyright (C) 2017 <NAME> - All rights reserved
@author : <EMAIL>
Publication:
A Novel Unsupervised Analysis of Electrophysiological
Signals Reveals New Sleep Sub-stages in Mice
*****************************************************************************
Class implementing the mean-covariance Restricted Boltzmann Machine (mcRBM)
by <NAME>.
It is based on the original code with minor modifications according to
the needs of our experiments.
Refer to:
"<NAME>, <NAME>, "Modeling Pixel Means and Covariances Using
Factorized Third-Order Boltzmann Machines", CVPR 2010"
You can find the original code at
http://www.cs.toronto.edu/~ranzato/publications/mcRBM/code/mcRBM_04May2010.zip
COPYRIGHT of the original code has been included in the currect directory.
<vkatsageorgiou@vassia-PC>
"""
import sys
import numpy as np
import os
import cudamat as cmt
import _pickle as cPickle
import matplotlib.pyplot as plt
import shutil
from numpy.random import RandomState
from scipy.io import loadmat, savemat
from configparser import *
from datetime import datetime
import sys
sys.path.insert(0, '../dataPreprocessing/')
from dataPreproc import DataPreproc
class mcRBM:
def __init__(self, refDir, expConfigFilename, modelConfigFilename, gpuId):
# directory containing all the configuration files for the experiment
self.refDir = refDir
# file with configuration details for the launched experiment
self.expConfigFilename = refDir + '/' + expConfigFilename
# file with configuration details for the model to be trained
self.modelConfigFilename = refDir + '/' + modelConfigFilename
# data pre-processing object
self.dpp = DataPreproc()
# loading details from configuration files
self.loadExpConfig()
self.loadModelConfig()
# id of the GPU which will be used for computation
self.gpuId = int(gpuId)
def loadExpConfig(self):
'''
Function loading the configuration details for the experiment &
data pre-pocessing flags
'''
config = ConfigParser()
config.read(self.expConfigFilename)
self.npRandSeed = config.getint('PARAMETERS','npRandSeed')
self.npRandState = config.getint('PARAMETERS','npRandState')
self.dataDir = config.get('EXP_DETAILS','dsetDir')
self.expsDir = config.get('EXP_DETAILS','expsDir')
self.expName = config.get('EXP_DETAILS','expID')
self.dSetName = config.get('EXP_DETAILS','dSetName')
self.logFlag = config.getboolean('EXP_DETAILS','logFlag')
self.meanSubtructionFlag = config.getboolean('EXP_DETAILS','meanSubtructionFlag')
self.scaleFlag = config.getboolean('EXP_DETAILS','scaleFlag')
self.scaling = config.get('EXP_DETAILS','scaling')
self.doPCA = config.getboolean('EXP_DETAILS','doPCA')
self.whitenFlag = config.getboolean('EXP_DETAILS','whitenFlag')
self.rescaleFlag = config.getboolean('EXP_DETAILS','rescaleFlag')
self.rescaling = config.get('EXP_DETAILS','rescaling')
self.dataFilename = self.dataDir + self.dSetName
self.saveDir = self.expsDir + self.expName
if not os.path.exists(self.saveDir):
os.makedirs(self.saveDir)
#shutil.copy2(self.expConfigFilename, self.saveDir)
#shutil.copy2(self.modelConfigFilename, self.saveDir)
def loadModelConfig(self):
'''
Function loading the configuration details for the model to be trained
'''
config = ConfigParser()
config.read(self.modelConfigFilename)
self.verbose = config.getint('VERBOSITY','verbose')
self.num_epochs = config.getint('MAIN_PARAMETER_SETTING','num_epochs')
self.batch_size = config.getint('MAIN_PARAMETER_SETTING','batch_size')
self.startFH = config.getint('MAIN_PARAMETER_SETTING','startFH')
self.startwd = config.getint('MAIN_PARAMETER_SETTING','startwd')
self.doPCD = config.getint('MAIN_PARAMETER_SETTING','doPCD')
# model parameters
self.num_fac = config.getint('MODEL_PARAMETER_SETTING','num_fac')
self.num_hid_cov = config.getint('MODEL_PARAMETER_SETTING','num_hid_cov')
self.num_hid_mean = config.getint('MODEL_PARAMETER_SETTING','num_hid_mean')
self.apply_mask = config.getint('MODEL_PARAMETER_SETTING','apply_mask')
self.epsilon = config.getfloat('OPTIMIZER_PARAMETERS','epsilon')
self.weightcost_final = config.getfloat('OPTIMIZER_PARAMETERS','weightcost_final')
self.hmc_step_nr = config.getint('HMC_PARAMETERS','hmc_step_nr')
self.hmc_target_ave_rej = config.getfloat('HMC_PARAMETERS','hmc_target_ave_rej')
#-- Data Loading function:
def loadData(self):
'''
Function loading the data
'''
# Create save folder
if not os.path.exists(self.saveDir + '/dataDetails/'):
os.makedirs(self.saveDir + '/dataDetails/')
# load data file:
if self.dataFilename.split('.')[1] == 'npz':
dLoad = np.load(self.dataFilename)
elif self.dataFilename.split('.') == 'mat':
dLoad = loadmat(self.dataFilename)
else:
print("error! Unrecognized data file")
self.d = dLoad['d']
self.obsKeys = dLoad['epochsLinked']
self.epochTime = dLoad['epochTime']
"""
If you want to keep only EEG features, uncomment next line.
"""
#self.d = self.d[:, :self.d.shape[1]-1]
self.d = np.array(self.d, dtype=np.float32)
self.obsKeys = np.array(self.obsKeys, dtype=np.float32)
print("initial size: ", self.d.shape)
#print("FrameIDs : ", self.obsKeys, "of shape : ", self.obsKeys.shape)
with open (self.saveDir + '/dataDetails/' + 'initialData.txt','w') as f:
f.write("\n Modeling: %s " % self.dataFilename)
f.write("\n Dataset size: %s " % str(self.d.shape))
f.write("\n Dataset type: %s " % str(self.d.dtype))
f.write("\n \n d_min: %s " % str(np.min(self.d, axis=0)))
f.write("\n \n d_max: %s " % str(np.max(self.d, axis=0)))
f.write("\n \n d_mean: %s " % str(np.mean(self.d, axis=0)))
f.write("\n \n d_std: %s " % str(np.std(self.d, axis=0)))
f.close()
# Function taken from original code
def compute_energy_mcRBM(self, data,normdata,vel,energy,VF,FH,bias_cov,bias_vis,w_mean,bias_mean,t1,t2,t6,feat,featsq,feat_mean,length,lengthsq,normcoeff,small,num_vis):
# normalize input data vectors
data.mult(data, target = t6) # DxP (nr input dims x nr samples)
t6.sum(axis = 0, target = lengthsq) # 1xP
lengthsq.mult(0.5, target = energy) # energy of quadratic regularization term
lengthsq.mult(1./num_vis) # normalize by number of components (like std)
lengthsq.add(small) # small prevents division by 0
cmt.sqrt(lengthsq, target = length)
length.reciprocal(target = normcoeff) # 1xP
data.mult_by_row(normcoeff, target = normdata) # normalized data
## potential
# covariance contribution
cmt.dot(VF.T, normdata, target = feat) # HxP (nr factors x nr samples)
feat.mult(feat, target = featsq) # HxP
cmt.dot(FH.T,featsq, target = t1) # OxP (nr cov hiddens x nr samples)
t1.mult(-0.5)
t1.add_col_vec(bias_cov) # OxP
cmt.exp(t1) # OxP
t1.add(1, target = t2) # OxP
cmt.log(t2)
t2.mult(-1)
energy.add_sums(t2, axis=0)
# mean contribution
cmt.dot(w_mean.T, data, target = feat_mean) # HxP (nr mean hiddens x nr samples)
feat_mean.add_col_vec(bias_mean) # HxP
cmt.exp(feat_mean)
feat_mean.add(1)
cmt.log(feat_mean)
feat_mean.mult(-1)
energy.add_sums(feat_mean, axis=0)
# visible bias term
data.mult_by_col(bias_vis, target = t6)
t6.mult(-1) # DxP
energy.add_sums(t6, axis=0) # 1xP
# kinetic
vel.mult(vel, target = t6)
energy.add_sums(t6, axis = 0, mult = .5)
# same as the previous function. Needed only if the energy has to be computed
# and stored to check the training process
def compute_energy_mcRBM_visual(self, data,normdata,energy,VF,FH,bias_cov,bias_vis,w_mean,bias_mean,t1,t2,t6,feat,featsq,feat_mean,length,lengthsq,normcoeff,small,num_vis):
# normalize input data vectors
data.mult(data, target = t6) # DxP (nr input dims x nr samples)
t6.sum(axis = 0, target = lengthsq) # 1xP
lengthsq.mult(0.5, target = energy) # energy of quadratic regularization term
lengthsq.mult(1./num_vis) # normalize by number of components (like std)
lengthsq.add(small) # small prevents division by 0
cmt.sqrt(lengthsq, target = length)
length.reciprocal(target = normcoeff) # 1xP
data.mult_by_row(normcoeff, target = normdata) # normalized data
## potential
# covariance contribution
cmt.dot(VF.T, normdata, target = feat) # HxP (nr factors x nr samples)
feat.mult(feat, target = featsq) # HxP
cmt.dot(FH.T,featsq, target = t1) # OxP (nr cov hiddens x nr samples)
t1.mult(-0.5)
t1.add_col_vec(bias_cov) # OxP
cmt.exp(t1) # OxP
t1.add(1, target = t2) # OxP
cmt.log(t2)
t2.mult(-1)
energy.add_sums(t2, axis=0)
# mean contribution
cmt.dot(w_mean.T, data, target = feat_mean) # HxP (nr mean hiddens x nr samples)
feat_mean.add_col_vec(bias_mean) # HxP
cmt.exp(feat_mean)
feat_mean.add(1)
cmt.log(feat_mean)
feat_mean.mult(-1)
energy.add_sums(feat_mean, axis=0)
# visible bias term
data.mult_by_col(bias_vis, target = t6)
t6.mult(-1) # DxP
energy.add_sums(t6, axis=0) # 1xP
# kinetic
data.mult(data, target = t6)
energy.add_sums(t6, axis = 0, mult = .5)
# Function taken from original code
#################################################################
# compute the derivative if the free energy at a given input
def compute_gradient_mcRBM(self, data,normdata,VF,FH,bias_cov,bias_vis,w_mean,bias_mean,t1,t2,t3,t4,t6,feat,featsq,feat_mean,gradient,normgradient,length,lengthsq,normcoeff,small,num_vis):
# normalize input data
data.mult(data, target = t6) # DxP
t6.sum(axis = 0, target = lengthsq) # 1xP
lengthsq.mult(1./num_vis) # normalize by number of components (like std)
lengthsq.add(small)
cmt.sqrt(lengthsq, target = length)
length.reciprocal(target = normcoeff) # 1xP
data.mult_by_row(normcoeff, target = normdata) # normalized data
cmt.dot(VF.T, normdata, target = feat) # HxP
feat.mult(feat, target = featsq) # HxP
cmt.dot(FH.T,featsq, target = t1) # OxP
t1.mult(-.5)
t1.add_col_vec(bias_cov) # OxP
t1.apply_sigmoid(target = t2) # OxP
cmt.dot(FH,t2, target = t3) # HxP
t3.mult(feat)
cmt.dot(VF, t3, target = normgradient) # VxP
# final bprop through normalization
length.mult(lengthsq, target = normcoeff)
normcoeff.reciprocal() # 1xP
normgradient.mult(data, target = gradient) # VxP
gradient.sum(axis = 0, target = t4) # 1xP
t4.mult(-1./num_vis)
data.mult_by_row(t4, target = gradient)
normgradient.mult_by_row(lengthsq, target = t6)
gradient.add(t6)
gradient.mult_by_row(normcoeff)
# add quadratic term gradient
gradient.add(data)
# add visible bias term
gradient.add_col_mult(bias_vis, -1)
# add MEAN contribution to gradient
cmt.dot(w_mean.T, data, target = feat_mean) # HxP
feat_mean.add_col_vec(bias_mean) # HxP
feat_mean.apply_sigmoid() # HxP
gradient.subtract_dot(w_mean,feat_mean) # VxP
# Function taken from original code
############################################################3
# Hybrid Monte Carlo sampler
def draw_HMC_samples(self, data,negdata,normdata,vel,gradient,normgradient,new_energy,old_energy,VF,FH,bias_cov,bias_vis,w_mean,bias_mean,hmc_step,hmc_step_nr,hmc_ave_rej,hmc_target_ave_rej,t1,t2,t3,t4,t5,t6,t7,thresh,feat,featsq,batch_size,feat_mean,length,lengthsq,normcoeff,small,num_vis):
vel.fill_with_randn()
negdata.assign(data)
self.compute_energy_mcRBM(negdata,normdata,vel,old_energy,VF,FH,bias_cov,bias_vis,w_mean,bias_mean,t1,t2,t6,feat,featsq,feat_mean,length,lengthsq,normcoeff,small,num_vis)
self.compute_gradient_mcRBM(negdata,normdata,VF,FH,bias_cov,bias_vis,w_mean,bias_mean,t1,t2,t3,t4,t6,feat,featsq,feat_mean,gradient,normgradient,length,lengthsq,normcoeff,small,num_vis)
# half step
vel.add_mult(gradient, -0.5*hmc_step)
negdata.add_mult(vel,hmc_step)
# full leap-frog steps
for ss in range(hmc_step_nr - 1):
## re-evaluate the gradient
self.compute_gradient_mcRBM(negdata,normdata,VF,FH,bias_cov,bias_vis,w_mean,bias_mean,t1,t2,t3,t4,t6,feat,featsq,feat_mean,gradient,normgradient,length,lengthsq,normcoeff,small,num_vis)
# update variables
vel.add_mult(gradient, -hmc_step)
negdata.add_mult(vel,hmc_step)
# final half-step
self.compute_gradient_mcRBM(negdata,normdata,VF,FH,bias_cov,bias_vis,w_mean,bias_mean,t1,t2,t3,t4,t6,feat,featsq,feat_mean,gradient,normgradient,length,lengthsq,normcoeff,small,num_vis)
vel.add_mult(gradient, -0.5*hmc_step)
# compute new energy
self.compute_energy_mcRBM(negdata,normdata,vel,new_energy,VF,FH,bias_cov,bias_vis,w_mean,bias_mean,t1,t2,t6,feat,featsq,feat_mean,length,lengthsq,normcoeff,small,num_vis)
# rejecton
old_energy.subtract(new_energy, target = thresh)
cmt.exp(thresh)
t4.fill_with_rand()
t4.less_than(thresh)
# update negdata and rejection rate
t4.mult(-1)
t4.add(1) # now 1's detect rejections
t4.sum(axis = 1, target = t5)
t5.copy_to_host()
rej = t5.numpy_array[0,0]/batch_size
data.mult_by_row(t4, target = t6)
negdata.mult_by_row(t4, target = t7)
negdata.subtract(t7)
negdata.add(t6)
hmc_ave_rej = 0.9*hmc_ave_rej + 0.1*rej
if hmc_ave_rej < hmc_target_ave_rej:
hmc_step = min(hmc_step*1.01,0.25)
else:
hmc_step = max(hmc_step*0.99,.001)
return hmc_step, hmc_ave_rej
def saveLsq(self):
'''
Function saving the sum of the square of the data
(needed for training as well as for post-analysis)
'''
d = self.d.astype(np.float32)
dsq = np.square(d)
lsq = np.sum(dsq, axis=0)
with open( self.refDir + 'lsqComplete.pkl', 'wb') as pklFile:
cPickle.dump(lsq, pklFile)
def train(self):
'''
Main train function : modified version of the original train function.
Additions : GPU selection (useful for multi-GPU machines)
Saving the sum of the square of the data for post-processing
Visible data are saved
Data samples are permuted for training
Weights are saved every 100 training epochs
Training energy is visualized every 100 training epochs
NOTE : anneal learning rate used in the initial code, is NOT used here!
'''
#plt.ion()
f1 = plt.figure()
ax1 = f1.add_subplot(111)
#ax2 = f1.add_subplot(122)
#plt.show()
cmt.cuda_set_device(self.gpuId)
cmt.cublas_init()
cmt.CUDAMatrix.init_random(1)
np.random.seed(self.npRandSeed)
prng = RandomState(self.npRandState)
################################################################
##################### CHANGE PATH ##############################
# Move to current experiment path:
os.chdir(self.saveDir)
# Get current path:
os.getcwd()
self.plotsDir = 'plots'
#self.probabilitiesDir = 'p_all'
if not os.path.isdir(self.plotsDir):
os.makedirs(self.plotsDir)
if not os.path.isdir(self.plotsDir + '/energy'):
os.makedirs(self.plotsDir + '/energy')
#if not os.path.isdir(self.probabilitiesDir):
# os.makedirs(self.probabilitiesDir)
if not os.path.isdir('weights'):
os.makedirs('weights')
d = self.d.astype(np.float32)
print("visible size: ", d.shape)
dsq = np.square(d)
lsq = np.sum(dsq, axis=0)
with open('lsqComplete.pkl', 'wb') as pklFile:
cPickle.dump(lsq, pklFile)
del dsq, lsq
# Save visible data :
visData = d
np.savez('visData.npz', data=d, obsKeys=self.obsKeys, epochTime=self.epochTime)
with open ('visData.txt','w') as f:
f.write("\n Dataset : %s" %(self.dataFilename))
f.write("\n visData size: %s " % str(visData.shape))
f.write("\n visData type: %s " % str(visData.dtype))
f.write("\n \n visData Range: %s " % str(np.max(visData, axis=0)-np.min(visData, axis=0)))
f.write("\n \n visData min: %s " % str(np.min(visData, axis=0)))
f.write("\n \n visData max: %s " % str(np.max(visData, axis=0)))
f.write("\n \n visData mean: %s " % str(np.mean(visData, axis=0)))
f.write("\n \n visData std: %s " % str(np.std(visData, axis=0)))
f.close()
del visData #if not needed for computing the latent states
permIdx = prng.permutation(d.shape[0])
d = d[permIdx,:]
#subsetting train and test datasets
#trainPerc = 0.7
#trainSampNum = int(np.ceil(trainPerc*d.shape[0]))
#trainSampNum = int(np.floor(trainSampNum/self.batch_size)*self.batch_size)
#testSampNum = int(d.shape[0]-trainSampNum-1)
# The test dataset is not used at the moment, it can be used as
# a validation set to check for overfitting. To use it, uncomment
# all the variables with 'test' in their name
#~ d_test = d[trainSampNum+1:,:]
#d = d[:trainSampNum,:]
#obsKeys = self.obsKeys[:trainSampNum]
totnumcases = d.shape[0]
num_vis = d.shape[1]
num_batches = int(totnumcases/self.batch_size)
print("num_batches: ", num_batches)
dev_dat = cmt.CUDAMatrix(d.T) # VxP
#~ test_dat = cmt.CUDAMatrix(d_test.T)
del d, self.d, self.epochTime, self.obsKeys
# training parameters (as in the original code by Ranzato)
epsilon = self.epsilon
epsilonVF = 2*epsilon
epsilonFH = 0.02*epsilon
epsilonb = 0.02*epsilon
epsilonw_mean = 0.2*epsilon
epsilonb_mean = 0.1*epsilon
weightcost_final = self.weightcost_final
# HMC setting
hmc_step_nr = self.hmc_step_nr
hmc_step = 0.01
hmc_target_ave_rej = self.hmc_target_ave_rej
hmc_ave_rej = hmc_target_ave_rej
# initialize weights
VF = cmt.CUDAMatrix(np.array(0.02 * prng.randn(num_vis, self.num_fac), dtype=np.float32, order='F')) # VxH
if self.apply_mask == 0:
FH = cmt.CUDAMatrix( np.array( np.eye(self.num_fac,self.num_hid_cov), dtype=np.float32, order='F') ) # HxO
else:
dd = loadmat('your_FHinit_mask_file.mat') # see CVPR2010paper_material/topo2D_3x3_stride2_576filt.mat for an example
FH = cmt.CUDAMatrix( np.array( dd["FH"], dtype=np.float32, order='F') )
bias_cov = cmt.CUDAMatrix( np.array(2.0*np.ones((self.num_hid_cov, 1)), dtype=np.float32, order='F') )
bias_vis = cmt.CUDAMatrix( np.array(np.zeros((num_vis, 1)), dtype=np.float32, order='F') )
w_mean = cmt.CUDAMatrix( np.array( 0.05 * prng.randn(num_vis, self.num_hid_mean), dtype=np.float32, order='F') ) # VxH
bias_mean = cmt.CUDAMatrix( np.array( -2.0*np.ones((self.num_hid_mean,1)), dtype=np.float32, order='F') )
# initialize variables to store derivatives
VFinc = cmt.CUDAMatrix( np.array(np.zeros((num_vis, self.num_fac)), dtype=np.float32, order='F'))
FHinc = cmt.CUDAMatrix( np.array(np.zeros((self.num_fac, self.num_hid_cov)), dtype=np.float32, order='F'))
bias_covinc = cmt.CUDAMatrix( np.array(np.zeros((self.num_hid_cov, 1)), dtype=np.float32, order='F'))
bias_visinc = cmt.CUDAMatrix( np.array(np.zeros((num_vis, 1)), dtype=np.float32, order='F'))
w_meaninc = cmt.CUDAMatrix( np.array(np.zeros((num_vis, self.num_hid_mean)), dtype=np.float32, order='F'))
bias_meaninc = cmt.CUDAMatrix( np.array(np.zeros((self.num_hid_mean, 1)), dtype=np.float32, order='F'))
# initialize temporary storage
data = cmt.CUDAMatrix( np.array(np.empty((num_vis, self.batch_size)), dtype=np.float32, order='F')) # VxP
normdata = cmt.CUDAMatrix( np.array(np.empty((num_vis, self.batch_size)), dtype=np.float32, order='F')) # VxP
negdataini = cmt.CUDAMatrix( np.array(np.empty((num_vis, self.batch_size)), dtype=np.float32, order='F')) # VxP
feat = cmt.CUDAMatrix( np.array(np.empty((self.num_fac, self.batch_size)), dtype=np.float32, order='F'))
featsq = cmt.CUDAMatrix( np.array(np.empty((self.num_fac, self.batch_size)), dtype=np.float32, order='F'))
negdata = cmt.CUDAMatrix( np.array(prng.randn(num_vis, self.batch_size), dtype=np.float32, order='F'))
old_energy = cmt.CUDAMatrix( np.array(np.zeros((1, self.batch_size)), dtype=np.float32, order='F'))
new_energy = cmt.CUDAMatrix( np.array(np.zeros((1, self.batch_size)), dtype=np.float32, order='F'))
energy = cmt.CUDAMatrix( np.array(np.zeros((1, self.batch_size)), dtype=np.float32, order='F'))
gradient = cmt.CUDAMatrix( np.array(np.empty((num_vis, self.batch_size)), dtype=np.float32, order='F')) # VxP
normgradient = cmt.CUDAMatrix( np.array(np.empty((num_vis, self.batch_size)), dtype=np.float32, order='F')) # VxP
thresh = cmt.CUDAMatrix( np.array(np.zeros((1, self.batch_size)), dtype=np.float32, order='F'))
feat_mean = cmt.CUDAMatrix( np.array(np.empty((self.num_hid_mean, self.batch_size)), dtype=np.float32, order='F'))
vel = cmt.CUDAMatrix( np.array(prng.randn(num_vis, self.batch_size), dtype=np.float32, order='F'))
length = cmt.CUDAMatrix( np.array(np.zeros((1, self.batch_size)), dtype=np.float32, order='F')) # 1xP
lengthsq = cmt.CUDAMatrix( np.array(np.zeros((1, self.batch_size)), dtype=np.float32, order='F')) # 1xP
normcoeff = cmt.CUDAMatrix( np.array(np.zeros((1, self.batch_size)), dtype=np.float32, order='F')) # 1xP
# commented to avoid computing the energy on test data
#~ data_test = cmt.CUDAMatrix( np.array(np.empty((num_vis, testSampNum)), dtype=np.float32, order='F')) # Vxtest_batch
#~ normdata_test = cmt.CUDAMatrix( np.array(np.empty((num_vis, testSampNum)), dtype=np.float32, order='F')) # Vxtest_batch
#~ length_test = cmt.CUDAMatrix( np.array(np.zeros((1, testSampNum)), dtype=np.float32, order='F')) # 1xtest_batch
#~ lengthsq_test = cmt.CUDAMatrix( np.array(np.zeros((1, testSampNum)), dtype=np.float32, order='F')) # 1xtest_batch
#~ normcoeff_test = cmt.CUDAMatrix( np.array(np.zeros((1, testSampNum)), dtype=np.float32, order='F')) # 1xtest_batch
#~ vel_test = cmt.CUDAMatrix( np.array(prng.randn(num_vis, testSampNum), dtype=np.float32, order='F'))
#~ feat_test = cmt.CUDAMatrix( np.array(np.empty((self.num_fac, testSampNum)), dtype=np.float32, order='F'))
#~ featsq_test = cmt.CUDAMatrix( np.array(np.empty((self.num_fac, testSampNum)), dtype=np.float32, order='F'))
#~ feat_mean_test = cmt.CUDAMatrix( np.array(np.empty((self.num_hid_mean, testSampNum)), dtype=np.float32, order='F'))
#~ energy_test = cmt.CUDAMatrix( np.array(np.zeros((1, testSampNum)), dtype=np.float32, order='F'))
if self.apply_mask==1: # this used to constrain very large FH matrices only allowing to change values in a neighborhood
dd = loadmat('your_FHinit_mask_file.mat')
mask = cmt.CUDAMatrix( np.array(dd["mask"], dtype=np.float32, order='F'))
normVF = 1
small = 0.5
# other temporary vars
t1 = cmt.CUDAMatrix( np.array(np.empty((self.num_hid_cov, self.batch_size)), dtype=np.float32, order='F'))
t2 = cmt.CUDAMatrix( np.array(np.empty((self.num_hid_cov, self.batch_size)), dtype=np.float32, order='F'))
t3 = cmt.CUDAMatrix( np.array(np.empty((self.num_fac, self.batch_size)), dtype=np.float32, order='F'))
t4 = cmt.CUDAMatrix( np.array(np.empty((1,self.batch_size)), dtype=np.float32, order='F'))
t5 = cmt.CUDAMatrix( np.array(np.empty((1,1)), dtype=np.float32, order='F'))
t6 = cmt.CUDAMatrix( np.array(np.empty((num_vis, self.batch_size)), dtype=np.float32, order='F'))
t7 = cmt.CUDAMatrix( np.array(np.empty((num_vis, self.batch_size)), dtype=np.float32, order='F'))
t8 = cmt.CUDAMatrix( np.array(np.empty((num_vis, self.num_fac)), dtype=np.float32, order='F'))
t9 = cmt.CUDAMatrix( np.array(np.zeros((self.num_fac, self.num_hid_cov)), dtype=np.float32, order='F'))
t10 = cmt.CUDAMatrix( np.array(np.empty((1,self.num_fac)), dtype=np.float32, order='F'))
t11 = cmt.CUDAMatrix( np.array(np.empty((1,self.num_hid_cov)), dtype=np.float32, order='F'))
# commented to avoid computing the energy on test data
#~ t1_test = cmt.CUDAMatrix( np.array(np.empty((self.num_hid_cov, testSampNum)), dtype=np.float32, order='F'))
#~ t2_test = cmt.CUDAMatrix( np.array(np.empty((self.num_hid_cov, testSampNum)), dtype=np.float32, order='F'))
#~ t3_test = cmt.CUDAMatrix( np.array(np.empty((self.num_fac, testSampNum)), dtype=np.float32, order='F'))
#~ t4_test = cmt.CUDAMatrix( np.array(np.empty((1,testSampNum)), dtype=np.float32, order='F'))
#~ t5_test = cmt.CUDAMatrix( np.array(np.empty((1,1)), dtype=np.float32, order='F'))
#~ t6_test = cmt.CUDAMatrix( np.array(np.empty((num_vis, testSampNum)), dtype=np.float32, order='F'))
meanEnergy = np.zeros(self.num_epochs)
minEnergy = np.zeros(self.num_epochs)
maxEnergy = np.zeros(self.num_epochs)
#~ meanEnergy_test = np.zeros(self.num_epochs)
#~ minEnergy_test = np.zeros(self.num_epochs)
#~ maxEnergy_test = np.zeros(self.num_epochs)
# start training
for epoch in range(self.num_epochs):
print ("Epoch " + str(epoch))
# anneal learning rates as found in the original code -
# uncomment if you wish to use annealing!
#~ epsilonVFc = epsilonVF/max(1,epoch/20)
#~ epsilonFHc = epsilonFH/max(1,epoch/20)
#~ epsilonbc = epsilonb/max(1,epoch/20)
#~ epsilonw_meanc = epsilonw_mean/max(1,epoch/20)
#~ epsilonb_meanc = epsilonb_mean/max(1,epoch/20)
# no annealing is used in our experiments because learning
# was stopping too early
epsilonVFc = epsilonVF
epsilonFHc = epsilonFH
epsilonbc = epsilonb
epsilonw_meanc = epsilonw_mean
epsilonb_meanc = epsilonb_mean
weightcost = weightcost_final
if epoch <= self.startFH:
epsilonFHc = 0
if epoch <= self.startwd:
weightcost = 0
# commented to avoid computing the energy on test data
#~ data_test = test_dat
#~ data_test.mult(data_test, target = t6_test) # DxP
#~ t6_test.sum(axis = 0, target = lengthsq_test) # 1xP
#~ lengthsq_test.mult(1./num_vis) # normalize by number of components (like std)
#~ lengthsq_test.add(small) # small avoids division by 0
#~ cmt.sqrt(lengthsq_test, target = length_test)
#~ length_test.reciprocal(target = normcoeff_test) # 1xP
#~ data_test.mult_by_row(normcoeff_test, target = normdata_test) # normalized data
for batch in range(num_batches):
# get current minibatch
data = dev_dat.slice(batch*self.batch_size,(batch + 1)*self.batch_size) # DxP (nr dims x nr samples)
# normalize input data
data.mult(data, target = t6) # DxP
t6.sum(axis = 0, target = lengthsq) # 1xP
lengthsq.mult(1./num_vis) # normalize by number of components (like std)
lengthsq.add(small) # small avoids division by 0
cmt.sqrt(lengthsq, target = length)
length.reciprocal(target = normcoeff) # 1xP
data.mult_by_row(normcoeff, target = normdata) # normalized data
## compute positive sample derivatives
# covariance part
cmt.dot(VF.T, normdata, target = feat) # HxP (nr facs x nr samples)
feat.mult(feat, target = featsq) # HxP
cmt.dot(FH.T,featsq, target = t1) # OxP (nr cov hiddens x nr samples)
t1.mult(-0.5)
t1.add_col_vec(bias_cov) # OxP
t1.apply_sigmoid(target = t2) # OxP
cmt.dot(featsq, t2.T, target = FHinc) # HxO
cmt.dot(FH,t2, target = t3) # HxP
t3.mult(feat)
cmt.dot(normdata, t3.T, target = VFinc) # VxH
t2.sum(axis = 1, target = bias_covinc)
bias_covinc.mult(-1)
# visible bias
data.sum(axis = 1, target = bias_visinc)
bias_visinc.mult(-1)
# mean part
cmt.dot(w_mean.T, data, target = feat_mean) # HxP (nr mean hiddens x nr samples)
feat_mean.add_col_vec(bias_mean) # HxP
feat_mean.apply_sigmoid() # HxP
feat_mean.mult(-1)
cmt.dot(data, feat_mean.T, target = w_meaninc)
feat_mean.sum(axis = 1, target = bias_meaninc)
# HMC sampling: draw an approximate sample from the model
if self.doPCD == 0: # CD-1 (set negative data to current training samples)
hmc_step, hmc_ave_rej = self.draw_HMC_samples(data,negdata,normdata,vel,gradient,normgradient,new_energy,old_energy,VF,FH,bias_cov,bias_vis,w_mean,bias_mean,hmc_step,hmc_step_nr,hmc_ave_rej,hmc_target_ave_rej,t1,t2,t3,t4,t5,t6,t7,thresh,feat,featsq,self.batch_size,feat_mean,length,lengthsq,normcoeff,small,num_vis)
else: # PCD-1 (use previous negative data as starting point for chain)
negdataini.assign(negdata)
hmc_step, hmc_ave_rej = self.draw_HMC_samples(negdataini,negdata,normdata,vel,gradient,normgradient,new_energy,old_energy,VF,FH,bias_cov,bias_vis,w_mean,bias_mean,hmc_step,hmc_step_nr,hmc_ave_rej,hmc_target_ave_rej,t1,t2,t3,t4,t5,t6,t7,thresh,feat,featsq,self.batch_size,feat_mean,length,lengthsq,normcoeff,small,num_vis)
# compute derivatives at the negative samples
# normalize input data
negdata.mult(negdata, target = t6) # DxP
t6.sum(axis = 0, target = lengthsq) # 1xP
lengthsq.mult(1./num_vis) # normalize by number of components (like std)
lengthsq.add(small)
cmt.sqrt(lengthsq, target = length)
length.reciprocal(target = normcoeff) # 1xP
negdata.mult_by_row(normcoeff, target = normdata) # normalized data
# covariance part
cmt.dot(VF.T, normdata, target = feat) # HxP
feat.mult(feat, target = featsq) # HxP
cmt.dot(FH.T,featsq, target = t1) # OxP
t1.mult(-0.5)
t1.add_col_vec(bias_cov) # OxP
t1.apply_sigmoid(target = t2) # OxP
FHinc.subtract_dot(featsq, t2.T) # HxO
FHinc.mult(0.5)
cmt.dot(FH,t2, target = t3) # HxP
t3.mult(feat)
VFinc.subtract_dot(normdata, t3.T) # VxH
bias_covinc.add_sums(t2, axis = 1)
# visible bias
bias_visinc.add_sums(negdata, axis = 1)
# mean part
cmt.dot(w_mean.T, negdata, target = feat_mean) # HxP
feat_mean.add_col_vec(bias_mean) # HxP
feat_mean.apply_sigmoid() # HxP
w_meaninc.add_dot(negdata, feat_mean.T)
bias_meaninc.add_sums(feat_mean, axis = 1)
# update parameters
VFinc.add_mult(VF.sign(), weightcost) # L1 regularization
VF.add_mult(VFinc, -epsilonVFc/self.batch_size)
# normalize columns of VF: normalize by running average of their norm
VF.mult(VF, target = t8)
t8.sum(axis = 0, target = t10)
cmt.sqrt(t10)
t10.sum(axis=1,target = t5)
t5.copy_to_host()
normVF = .95*normVF + (.05/self.num_fac) * t5.numpy_array[0,0] # estimate norm
t10.reciprocal()
VF.mult_by_row(t10)
VF.mult(normVF)
bias_cov.add_mult(bias_covinc, -epsilonbc/self.batch_size)
bias_vis.add_mult(bias_visinc, -epsilonbc/self.batch_size)
if epoch > self.startFH:
FHinc.add_mult(FH.sign(), weightcost) # L1 regularization
FH.add_mult(FHinc, -epsilonFHc/self.batch_size) # update
# set to 0 negative entries in FH
FH.greater_than(0, target = t9)
FH.mult(t9)
if self.apply_mask==1:
FH.mult(mask)
# normalize columns of FH: L1 norm set to 1 in each column
FH.sum(axis = 0, target = t11)
t11.reciprocal()
FH.mult_by_row(t11)
w_meaninc.add_mult(w_mean.sign(),weightcost)
w_mean.add_mult(w_meaninc, -epsilonw_meanc/self.batch_size)
bias_mean.add_mult(bias_meaninc, -epsilonb_meanc/self.batch_size)
if self.verbose == 1:
print( "VF: " + '%3.2e' % VF.euclid_norm() + ", DVF: " + '%3.2e' % (VFinc.euclid_norm()*(epsilonVFc/self.batch_size)) + ", FH: " + '%3.2e' % FH.euclid_norm() + ", DFH: " + '%3.2e' % (FHinc.euclid_norm()*(epsilonFHc/self.batch_size)) + ", bias_cov: " + '%3.2e' % bias_cov.euclid_norm() + ", Dbias_cov: " + '%3.2e' % (bias_covinc.euclid_norm()*(epsilonbc/self.batch_size)) + ", bias_vis: " + '%3.2e' % bias_vis.euclid_norm() + ", Dbias_vis: " + '%3.2e' % (bias_visinc.euclid_norm()*(epsilonbc/self.batch_size)) + ", wm: " + '%3.2e' % w_mean.euclid_norm() + ", Dwm: " + '%3.2e' % (w_meaninc.euclid_norm()*(epsilonw_meanc/self.batch_size)) + ", bm: " + '%3.2e' % bias_mean.euclid_norm() + ", Dbm: " + '%3.2e' % (bias_meaninc.euclid_norm()*(epsilonb_meanc/self.batch_size)) + ", step: " + '%3.2e' % hmc_step + ", rej: " + '%3.2e' % hmc_ave_rej )
with open ('terminal.txt','a') as f:
f.write('\n' + "epoch: %s" % str(epoch) + ", VF: " + '%3.2e' % VF.euclid_norm() + ", DVF: " + '%3.2e' % (VFinc.euclid_norm()*(epsilonVFc/self.batch_size)) + ", FH: " + '%3.2e' % FH.euclid_norm() + ", DFH: " + '%3.2e' % (FHinc.euclid_norm()*(epsilonFHc/self.batch_size)) + ", bias_cov: " + '%3.2e' % bias_cov.euclid_norm() + ", Dbias_cov: " + '%3.2e' % (bias_covinc.euclid_norm()*(epsilonbc/self.batch_size)) + ", bias_vis: " + '%3.2e' % bias_vis.euclid_norm() + ", Dbias_vis: " + '%3.2e' % (bias_visinc.euclid_norm()*(epsilonbc/self.batch_size)) + ", wm: " + '%3.2e' % w_mean.euclid_norm() + ", Dwm: " + '%3.2e' % (w_meaninc.euclid_norm()*(epsilonw_meanc/self.batch_size)) + ", bm: " + '%3.2e' % bias_mean.euclid_norm() + ", Dbm: " + '%3.2e' % (bias_meaninc.euclid_norm()*(epsilonb_meanc/self.batch_size)) + ", step: " + '%3.2e' % hmc_step + ", rej: " + '%3.2e' % hmc_ave_rej )
sys.stdout.flush()
# commented to avoid computing the energy on trainig data
self.compute_energy_mcRBM_visual(data,normdata,energy,VF,FH,bias_cov,bias_vis,w_mean,bias_mean,t1,t2,t6,feat,featsq,feat_mean,length,lengthsq,normcoeff,small,num_vis)
energy.copy_to_host()
meanEnergy[epoch] = np.mean(energy.numpy_array)
minEnergy[epoch] = np.min(energy.numpy_array)
maxEnergy[epoch] = np.max(energy.numpy_array)
# commented to avoid computing the energy on test data
#~ self.compute_energy_mcRBM_visual(data_test,normdata_test,energy_test,VF,FH,bias_cov,bias_vis,w_mean,bias_mean,t1_test,t2_test,t6_test,feat_test,featsq_test,feat_mean_test,length_test,lengthsq_test,normcoeff_test,small,num_vis)
#~ energy_test.copy_to_host()
#~ meanEnergy_test[epoch] = np.mean(energy_test.numpy_array)
#~ minEnergy_test[epoch] = np.min(energy_test.numpy_array)
#~ maxEnergy_test[epoch] = np.max(energy_test.numpy_array)
ax1.cla()
ax1.plot(range(epoch), meanEnergy[0:epoch])
ax1.plot(range(epoch), maxEnergy[0:epoch])
ax1.plot(range(epoch), minEnergy[0:epoch])
if np.mod(epoch,100) == 0:
#f1.savefig(output_folder + str(epoch)+'_'+'fig.png')
f1.savefig(self.plotsDir + '/energy/energyAt_%s.png' %str(epoch))
# back-up every once in a while
if np.mod(epoch,100) == 0:
VF.copy_to_host()
FH.copy_to_host()
bias_cov.copy_to_host()
w_mean.copy_to_host()
bias_mean.copy_to_host()
bias_vis.copy_to_host()
savemat("./weights/ws_temp%s" %str(epoch), {'VF':VF.numpy_array,'FH':FH.numpy_array,'bias_cov': bias_cov.numpy_array, 'bias_vis': bias_vis.numpy_array,'w_mean': w_mean.numpy_array, 'bias_mean': bias_mean.numpy_array, 'epoch':epoch})
# uncomment if computing the energy in order to store its evolution throghout training
#~ savemat(self.refDir + '/' + "training_energy_" + str(self.num_fac) + "_cov" + str(self.num_hid_cov) + "_mean" + str(self.num_hid_mean), {'meanEnergy':meanEnergy,'meanEnergy_test':meanEnergy_test,'maxEnergy': maxEnergy, 'maxEnergy_test': maxEnergy_test, 'minEnergy': minEnergy, 'minEnergy_test': minEnergy_test, 'epoch':epoch})
#savemat("training_energy_" + str(self.num_fac) + "_cov" + str(self.num_hid_cov) + "_mean" + str(self.num_hid_mean), {'meanEnergy':meanEnergy, 'maxEnergy': maxEnergy, 'minEnergy': minEnergy, 'epoch':epoch})
# in order to stop the training gracefully, create an empty file
# named 'stop_now' in the folder containing the experiment
# configuration file
if os.path.isfile('stop_now'):
break
# final back-up
VF.copy_to_host()
FH.copy_to_host()
bias_cov.copy_to_host()
bias_vis.copy_to_host()
w_mean.copy_to_host()
bias_mean.copy_to_host()
savemat("ws_fac%s" %str(self.num_fac) + "_cov%s" %str(self.num_hid_cov) + "_mean%s" %str(self.num_hid_mean), {'VF':VF.numpy_array,'FH':FH.numpy_array,'bias_cov': bias_cov.numpy_array, 'bias_vis': bias_vis.numpy_array, 'w_mean': w_mean.numpy_array, 'bias_mean': bias_mean.numpy_array, 'epoch':epoch})
# uncomment if computing the energy in order to store its evolution throghout training
#~ savemat(self.refDir + '/' + "training_energy_" + str(self.num_fac) + "_cov" + str(self.num_hid_cov) + "_mean" + str(self.num_hid_mean), {'meanEnergy':meanEnergy,'meanEnergy_test':meanEnergy_test,'maxEnergy': maxEnergy, 'maxEnergy_test': maxEnergy_test, 'minEnergy': minEnergy, 'minEnergy_test': minEnergy_test, 'epoch':epoch})
savemat("training_energy_" + str(self.num_fac) + "_cov" + str(self.num_hid_cov) + "_mean" + str(self.num_hid_mean), {'meanEnergy':meanEnergy, 'maxEnergy': maxEnergy, 'minEnergy': minEnergy, 'epoch':epoch})
# Compute states if desired:
# normalise data for covariance hidden:
#dsq = np.square(visData)
#lsq = np.sum(dsq, axis=0)
#lsq /= visData.shape[1]
#lsq += np.spacing(1)
#l = np.sqrt(lsq)
#normD = visData/l
#logisticArg_c = (-0.5*np.dot(FH.numpy_array.T, np.square(np.dot(VF.numpy_array.T, normD.T))) + bias_cov.numpy_array).T
#p_hc = logisticFunc(logisticArg_c)
#logisticArg_m = np.dot(visData, w_mean.numpy_array) + bias_mean.numpy_array.T
#p_hm = logisticFunc(logisticArg_m)
#p_all = np.concatenate((p_hc, p_hm), axis=1)
#savemat(self.probabilitiesDir + '/pAll_%i.mat' % epoch, mdict={'p_all':p_all})
with open('done', 'w') as doneFile:
doneFile.write(datetime.strftime(datetime.now(), '%d/%m/%Y %H:%M:%S'))
#doneFile.closed
| [
"cudamat.cublas_init",
"sys.path.insert",
"_pickle.dump",
"scipy.io.loadmat",
"numpy.array",
"cudamat.dot",
"numpy.mod",
"numpy.random.RandomState",
"os.path.exists",
"numpy.savez",
"numpy.mean",
"cudamat.CUDAMatrix",
"numpy.max",
"cudamat.log",
"os.path.isdir",
"numpy.empty",
"numpy... | [((1300, 1343), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../dataPreprocessing/"""'], {}), "(0, '../dataPreprocessing/')\n", (1315, 1343), False, 'import sys\n'), ((1913, 1926), 'dataPreproc.DataPreproc', 'DataPreproc', ([], {}), '()\n', (1924, 1926), False, 'from dataPreproc import DataPreproc\n'), ((5883, 5917), 'numpy.array', 'np.array', (['self.d'], {'dtype': 'np.float32'}), '(self.d, dtype=np.float32)\n', (5891, 5917), True, 'import numpy as np\n'), ((5941, 5981), 'numpy.array', 'np.array', (['self.obsKeys'], {'dtype': 'np.float32'}), '(self.obsKeys, dtype=np.float32)\n', (5949, 5981), True, 'import numpy as np\n'), ((7326, 7359), 'cudamat.sqrt', 'cmt.sqrt', (['lengthsq'], {'target': 'length'}), '(lengthsq, target=length)\n', (7334, 7359), True, 'import cudamat as cmt\n'), ((7555, 7591), 'cudamat.dot', 'cmt.dot', (['VF.T', 'normdata'], {'target': 'feat'}), '(VF.T, normdata, target=feat)\n', (7562, 7591), True, 'import cudamat as cmt\n'), ((7683, 7715), 'cudamat.dot', 'cmt.dot', (['FH.T', 'featsq'], {'target': 't1'}), '(FH.T, featsq, target=t1)\n', (7690, 7715), True, 'import cudamat as cmt\n'), ((7822, 7833), 'cudamat.exp', 'cmt.exp', (['t1'], {}), '(t1)\n', (7829, 7833), True, 'import cudamat as cmt\n'), ((7885, 7896), 'cudamat.log', 'cmt.log', (['t2'], {}), '(t2)\n', (7892, 7896), True, 'import cudamat as cmt\n'), ((7989, 8030), 'cudamat.dot', 'cmt.dot', (['w_mean.T', 'data'], {'target': 'feat_mean'}), '(w_mean.T, data, target=feat_mean)\n', (7996, 8030), True, 'import cudamat as cmt\n'), ((8125, 8143), 'cudamat.exp', 'cmt.exp', (['feat_mean'], {}), '(feat_mean)\n', (8132, 8143), True, 'import cudamat as cmt\n'), ((8178, 8196), 'cudamat.log', 'cmt.log', (['feat_mean'], {}), '(feat_mean)\n', (8185, 8196), True, 'import cudamat as cmt\n'), ((9224, 9257), 'cudamat.sqrt', 'cmt.sqrt', (['lengthsq'], {'target': 'length'}), '(lengthsq, target=length)\n', (9232, 9257), True, 'import cudamat as cmt\n'), ((9453, 9489), 'cudamat.dot', 'cmt.dot', (['VF.T', 'normdata'], {'target': 'feat'}), '(VF.T, normdata, target=feat)\n', (9460, 9489), True, 'import cudamat as cmt\n'), ((9581, 9613), 'cudamat.dot', 'cmt.dot', (['FH.T', 'featsq'], {'target': 't1'}), '(FH.T, featsq, target=t1)\n', (9588, 9613), True, 'import cudamat as cmt\n'), ((9720, 9731), 'cudamat.exp', 'cmt.exp', (['t1'], {}), '(t1)\n', (9727, 9731), True, 'import cudamat as cmt\n'), ((9783, 9794), 'cudamat.log', 'cmt.log', (['t2'], {}), '(t2)\n', (9790, 9794), True, 'import cudamat as cmt\n'), ((9887, 9928), 'cudamat.dot', 'cmt.dot', (['w_mean.T', 'data'], {'target': 'feat_mean'}), '(w_mean.T, data, target=feat_mean)\n', (9894, 9928), True, 'import cudamat as cmt\n'), ((10023, 10041), 'cudamat.exp', 'cmt.exp', (['feat_mean'], {}), '(feat_mean)\n', (10030, 10041), True, 'import cudamat as cmt\n'), ((10076, 10094), 'cudamat.log', 'cmt.log', (['feat_mean'], {}), '(feat_mean)\n', (10083, 10094), True, 'import cudamat as cmt\n'), ((11025, 11058), 'cudamat.sqrt', 'cmt.sqrt', (['lengthsq'], {'target': 'length'}), '(lengthsq, target=length)\n', (11033, 11058), True, 'import cudamat as cmt\n'), ((11198, 11234), 'cudamat.dot', 'cmt.dot', (['VF.T', 'normdata'], {'target': 'feat'}), '(VF.T, normdata, target=feat)\n', (11205, 11234), True, 'import cudamat as cmt\n'), ((11301, 11333), 'cudamat.dot', 'cmt.dot', (['FH.T', 'featsq'], {'target': 't1'}), '(FH.T, featsq, target=t1)\n', (11308, 11333), True, 'import cudamat as cmt\n'), ((11453, 11479), 'cudamat.dot', 'cmt.dot', (['FH', 't2'], {'target': 't3'}), '(FH, t2, target=t3)\n', (11460, 11479), True, 'import cudamat as cmt\n'), ((11517, 11553), 'cudamat.dot', 'cmt.dot', (['VF', 't3'], {'target': 'normgradient'}), '(VF, t3, target=normgradient)\n', (11524, 11553), True, 'import cudamat as cmt\n'), ((12191, 12232), 'cudamat.dot', 'cmt.dot', (['w_mean.T', 'data'], {'target': 'feat_mean'}), '(w_mean.T, data, target=feat_mean)\n', (12198, 12232), True, 'import cudamat as cmt\n'), ((14347, 14362), 'cudamat.exp', 'cmt.exp', (['thresh'], {}), '(thresh)\n', (14354, 14362), True, 'import cudamat as cmt\n'), ((15247, 15259), 'numpy.square', 'np.square', (['d'], {}), '(d)\n', (15256, 15259), True, 'import numpy as np\n'), ((15274, 15293), 'numpy.sum', 'np.sum', (['dsq'], {'axis': '(0)'}), '(dsq, axis=0)\n', (15280, 15293), True, 'import numpy as np\n'), ((16029, 16041), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (16039, 16041), True, 'import matplotlib.pyplot as plt\n'), ((16148, 16179), 'cudamat.cuda_set_device', 'cmt.cuda_set_device', (['self.gpuId'], {}), '(self.gpuId)\n', (16167, 16179), True, 'import cudamat as cmt\n'), ((16188, 16205), 'cudamat.cublas_init', 'cmt.cublas_init', ([], {}), '()\n', (16203, 16205), True, 'import cudamat as cmt\n'), ((16214, 16243), 'cudamat.CUDAMatrix.init_random', 'cmt.CUDAMatrix.init_random', (['(1)'], {}), '(1)\n', (16240, 16243), True, 'import cudamat as cmt\n'), ((16261, 16292), 'numpy.random.seed', 'np.random.seed', (['self.npRandSeed'], {}), '(self.npRandSeed)\n', (16275, 16292), True, 'import numpy as np\n'), ((16309, 16338), 'numpy.random.RandomState', 'RandomState', (['self.npRandState'], {}), '(self.npRandState)\n', (16320, 16338), False, 'from numpy.random import RandomState\n'), ((16545, 16567), 'os.chdir', 'os.chdir', (['self.saveDir'], {}), '(self.saveDir)\n', (16553, 16567), False, 'import os\n'), ((16604, 16615), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (16613, 16615), False, 'import os\n'), ((17195, 17207), 'numpy.square', 'np.square', (['d'], {}), '(d)\n', (17204, 17207), True, 'import numpy as np\n'), ((17222, 17241), 'numpy.sum', 'np.sum', (['dsq'], {'axis': '(0)'}), '(dsq, axis=0)\n', (17228, 17241), True, 'import numpy as np\n'), ((17433, 17512), 'numpy.savez', 'np.savez', (['"""visData.npz"""'], {'data': 'd', 'obsKeys': 'self.obsKeys', 'epochTime': 'self.epochTime'}), "('visData.npz', data=d, obsKeys=self.obsKeys, epochTime=self.epochTime)\n", (17441, 17512), True, 'import numpy as np\n'), ((19172, 19191), 'cudamat.CUDAMatrix', 'cmt.CUDAMatrix', (['d.T'], {}), '(d.T)\n', (19186, 19191), True, 'import cudamat as cmt\n'), ((27006, 27031), 'numpy.zeros', 'np.zeros', (['self.num_epochs'], {}), '(self.num_epochs)\n', (27014, 27031), True, 'import numpy as np\n'), ((27052, 27077), 'numpy.zeros', 'np.zeros', (['self.num_epochs'], {}), '(self.num_epochs)\n', (27060, 27077), True, 'import numpy as np\n'), ((27098, 27123), 'numpy.zeros', 'np.zeros', (['self.num_epochs'], {}), '(self.num_epochs)\n', (27106, 27123), True, 'import numpy as np\n'), ((3485, 3513), 'os.path.exists', 'os.path.exists', (['self.saveDir'], {}), '(self.saveDir)\n', (3499, 3513), False, 'import os\n'), ((3527, 3552), 'os.makedirs', 'os.makedirs', (['self.saveDir'], {}), '(self.saveDir)\n', (3538, 3552), False, 'import os\n'), ((5180, 5226), 'os.path.exists', 'os.path.exists', (["(self.saveDir + '/dataDetails/')"], {}), "(self.saveDir + '/dataDetails/')\n", (5194, 5226), False, 'import os\n'), ((5240, 5283), 'os.makedirs', 'os.makedirs', (["(self.saveDir + '/dataDetails/')"], {}), "(self.saveDir + '/dataDetails/')\n", (5251, 5283), False, 'import os\n'), ((5392, 5418), 'numpy.load', 'np.load', (['self.dataFilename'], {}), '(self.dataFilename)\n', (5399, 5418), True, 'import numpy as np\n'), ((15376, 15402), '_pickle.dump', 'cPickle.dump', (['lsq', 'pklFile'], {}), '(lsq, pklFile)\n', (15388, 15402), True, 'import _pickle as cPickle\n'), ((16721, 16749), 'os.path.isdir', 'os.path.isdir', (['self.plotsDir'], {}), '(self.plotsDir)\n', (16734, 16749), False, 'import os\n'), ((16763, 16789), 'os.makedirs', 'os.makedirs', (['self.plotsDir'], {}), '(self.plotsDir)\n', (16774, 16789), False, 'import os\n'), ((16805, 16845), 'os.path.isdir', 'os.path.isdir', (["(self.plotsDir + '/energy')"], {}), "(self.plotsDir + '/energy')\n", (16818, 16845), False, 'import os\n'), ((16859, 16897), 'os.makedirs', 'os.makedirs', (["(self.plotsDir + '/energy')"], {}), "(self.plotsDir + '/energy')\n", (16870, 16897), False, 'import os\n'), ((17023, 17047), 'os.path.isdir', 'os.path.isdir', (['"""weights"""'], {}), "('weights')\n", (17036, 17047), False, 'import os\n'), ((17061, 17083), 'os.makedirs', 'os.makedirs', (['"""weights"""'], {}), "('weights')\n", (17072, 17083), False, 'import os\n'), ((17309, 17335), '_pickle.dump', 'cPickle.dump', (['lsq', 'pklFile'], {}), '(lsq, pklFile)\n', (17321, 17335), True, 'import _pickle as cPickle\n'), ((20143, 20179), 'scipy.io.loadmat', 'loadmat', (['"""your_FHinit_mask_file.mat"""'], {}), "('your_FHinit_mask_file.mat')\n", (20150, 20179), False, 'from scipy.io import loadmat, savemat\n'), ((24904, 24940), 'scipy.io.loadmat', 'loadmat', (['"""your_FHinit_mask_file.mat"""'], {}), "('your_FHinit_mask_file.mat')\n", (24911, 24940), False, 'from scipy.io import loadmat, savemat\n'), ((37408, 37435), 'numpy.mean', 'np.mean', (['energy.numpy_array'], {}), '(energy.numpy_array)\n', (37415, 37435), True, 'import numpy as np\n'), ((37467, 37493), 'numpy.min', 'np.min', (['energy.numpy_array'], {}), '(energy.numpy_array)\n', (37473, 37493), True, 'import numpy as np\n'), ((37525, 37551), 'numpy.max', 'np.max', (['energy.numpy_array'], {}), '(energy.numpy_array)\n', (37531, 37551), True, 'import numpy as np\n'), ((40029, 40055), 'os.path.isfile', 'os.path.isfile', (['"""stop_now"""'], {}), "('stop_now')\n", (40043, 40055), False, 'import os\n'), ((5491, 5517), 'scipy.io.loadmat', 'loadmat', (['self.dataFilename'], {}), '(self.dataFilename)\n', (5498, 5517), False, 'from scipy.io import loadmat, savemat\n'), ((20288, 20335), 'numpy.array', 'np.array', (["dd['FH']"], {'dtype': 'np.float32', 'order': '"""F"""'}), "(dd['FH'], dtype=np.float32, order='F')\n", (20296, 20335), True, 'import numpy as np\n'), ((20495, 20517), 'numpy.zeros', 'np.zeros', (['(num_vis, 1)'], {}), '((num_vis, 1))\n', (20503, 20517), True, 'import numpy as np\n'), ((20886, 20919), 'numpy.zeros', 'np.zeros', (['(num_vis, self.num_fac)'], {}), '((num_vis, self.num_fac))\n', (20894, 20919), True, 'import numpy as np\n'), ((20992, 21034), 'numpy.zeros', 'np.zeros', (['(self.num_fac, self.num_hid_cov)'], {}), '((self.num_fac, self.num_hid_cov))\n', (21000, 21034), True, 'import numpy as np\n'), ((21113, 21144), 'numpy.zeros', 'np.zeros', (['(self.num_hid_cov, 1)'], {}), '((self.num_hid_cov, 1))\n', (21121, 21144), True, 'import numpy as np\n'), ((21223, 21245), 'numpy.zeros', 'np.zeros', (['(num_vis, 1)'], {}), '((num_vis, 1))\n', (21231, 21245), True, 'import numpy as np\n'), ((21322, 21360), 'numpy.zeros', 'np.zeros', (['(num_vis, self.num_hid_mean)'], {}), '((num_vis, self.num_hid_mean))\n', (21330, 21360), True, 'import numpy as np\n'), ((21440, 21472), 'numpy.zeros', 'np.zeros', (['(self.num_hid_mean, 1)'], {}), '((self.num_hid_mean, 1))\n', (21448, 21472), True, 'import numpy as np\n'), ((21584, 21620), 'numpy.empty', 'np.empty', (['(num_vis, self.batch_size)'], {}), '((num_vis, self.batch_size))\n', (21592, 21620), True, 'import numpy as np\n'), ((21702, 21738), 'numpy.empty', 'np.empty', (['(num_vis, self.batch_size)'], {}), '((num_vis, self.batch_size))\n', (21710, 21738), True, 'import numpy as np\n'), ((21822, 21858), 'numpy.empty', 'np.empty', (['(num_vis, self.batch_size)'], {}), '((num_vis, self.batch_size))\n', (21830, 21858), True, 'import numpy as np\n'), ((21936, 21977), 'numpy.empty', 'np.empty', (['(self.num_fac, self.batch_size)'], {}), '((self.num_fac, self.batch_size))\n', (21944, 21977), True, 'import numpy as np\n'), ((22051, 22092), 'numpy.empty', 'np.empty', (['(self.num_fac, self.batch_size)'], {}), '((self.num_fac, self.batch_size))\n', (22059, 22092), True, 'import numpy as np\n'), ((22281, 22311), 'numpy.zeros', 'np.zeros', (['(1, self.batch_size)'], {}), '((1, self.batch_size))\n', (22289, 22311), True, 'import numpy as np\n'), ((22389, 22419), 'numpy.zeros', 'np.zeros', (['(1, self.batch_size)'], {}), '((1, self.batch_size))\n', (22397, 22419), True, 'import numpy as np\n'), ((22493, 22523), 'numpy.zeros', 'np.zeros', (['(1, self.batch_size)'], {}), '((1, self.batch_size))\n', (22501, 22523), True, 'import numpy as np\n'), ((22599, 22635), 'numpy.empty', 'np.empty', (['(num_vis, self.batch_size)'], {}), '((num_vis, self.batch_size))\n', (22607, 22635), True, 'import numpy as np\n'), ((22721, 22757), 'numpy.empty', 'np.empty', (['(num_vis, self.batch_size)'], {}), '((num_vis, self.batch_size))\n', (22729, 22757), True, 'import numpy as np\n'), ((22837, 22867), 'numpy.zeros', 'np.zeros', (['(1, self.batch_size)'], {}), '((1, self.batch_size))\n', (22845, 22867), True, 'import numpy as np\n'), ((22944, 22990), 'numpy.empty', 'np.empty', (['(self.num_hid_mean, self.batch_size)'], {}), '((self.num_hid_mean, self.batch_size))\n', (22952, 22990), True, 'import numpy as np\n'), ((23171, 23201), 'numpy.zeros', 'np.zeros', (['(1, self.batch_size)'], {}), '((1, self.batch_size))\n', (23179, 23201), True, 'import numpy as np\n'), ((23283, 23313), 'numpy.zeros', 'np.zeros', (['(1, self.batch_size)'], {}), '((1, self.batch_size))\n', (23291, 23313), True, 'import numpy as np\n'), ((23396, 23426), 'numpy.zeros', 'np.zeros', (['(1, self.batch_size)'], {}), '((1, self.batch_size))\n', (23404, 23426), True, 'import numpy as np\n'), ((24977, 25026), 'numpy.array', 'np.array', (["dd['mask']"], {'dtype': 'np.float32', 'order': '"""F"""'}), "(dd['mask'], dtype=np.float32, order='F')\n", (24985, 25026), True, 'import numpy as np\n'), ((25149, 25194), 'numpy.empty', 'np.empty', (['(self.num_hid_cov, self.batch_size)'], {}), '((self.num_hid_cov, self.batch_size))\n', (25157, 25194), True, 'import numpy as np\n'), ((25264, 25309), 'numpy.empty', 'np.empty', (['(self.num_hid_cov, self.batch_size)'], {}), '((self.num_hid_cov, self.batch_size))\n', (25272, 25309), True, 'import numpy as np\n'), ((25379, 25420), 'numpy.empty', 'np.empty', (['(self.num_fac, self.batch_size)'], {}), '((self.num_fac, self.batch_size))\n', (25387, 25420), True, 'import numpy as np\n'), ((25490, 25520), 'numpy.empty', 'np.empty', (['(1, self.batch_size)'], {}), '((1, self.batch_size))\n', (25498, 25520), True, 'import numpy as np\n'), ((25589, 25605), 'numpy.empty', 'np.empty', (['(1, 1)'], {}), '((1, 1))\n', (25597, 25605), True, 'import numpy as np\n'), ((25674, 25710), 'numpy.empty', 'np.empty', (['(num_vis, self.batch_size)'], {}), '((num_vis, self.batch_size))\n', (25682, 25710), True, 'import numpy as np\n'), ((25780, 25816), 'numpy.empty', 'np.empty', (['(num_vis, self.batch_size)'], {}), '((num_vis, self.batch_size))\n', (25788, 25816), True, 'import numpy as np\n'), ((25886, 25919), 'numpy.empty', 'np.empty', (['(num_vis, self.num_fac)'], {}), '((num_vis, self.num_fac))\n', (25894, 25919), True, 'import numpy as np\n'), ((25989, 26031), 'numpy.zeros', 'np.zeros', (['(self.num_fac, self.num_hid_cov)'], {}), '((self.num_fac, self.num_hid_cov))\n', (25997, 26031), True, 'import numpy as np\n'), ((26102, 26129), 'numpy.empty', 'np.empty', (['(1, self.num_fac)'], {}), '((1, self.num_fac))\n', (26110, 26129), True, 'import numpy as np\n'), ((26199, 26230), 'numpy.empty', 'np.empty', (['(1, self.num_hid_cov)'], {}), '((1, self.num_hid_cov))\n', (26207, 26230), True, 'import numpy as np\n'), ((29532, 29565), 'cudamat.sqrt', 'cmt.sqrt', (['lengthsq'], {'target': 'length'}), '(lengthsq, target=length)\n', (29540, 29565), True, 'import cudamat as cmt\n'), ((29815, 29851), 'cudamat.dot', 'cmt.dot', (['VF.T', 'normdata'], {'target': 'feat'}), '(VF.T, normdata, target=feat)\n', (29822, 29851), True, 'import cudamat as cmt\n'), ((29956, 29988), 'cudamat.dot', 'cmt.dot', (['FH.T', 'featsq'], {'target': 't1'}), '(FH.T, featsq, target=t1)\n', (29963, 29988), True, 'import cudamat as cmt\n'), ((30171, 30206), 'cudamat.dot', 'cmt.dot', (['featsq', 't2.T'], {'target': 'FHinc'}), '(featsq, t2.T, target=FHinc)\n', (30178, 30206), True, 'import cudamat as cmt\n'), ((30231, 30257), 'cudamat.dot', 'cmt.dot', (['FH', 't2'], {'target': 't3'}), '(FH, t2, target=t3)\n', (30238, 30257), True, 'import cudamat as cmt\n'), ((30311, 30348), 'cudamat.dot', 'cmt.dot', (['normdata', 't3.T'], {'target': 'VFinc'}), '(normdata, t3.T, target=VFinc)\n', (30318, 30348), True, 'import cudamat as cmt\n'), ((30620, 30661), 'cudamat.dot', 'cmt.dot', (['w_mean.T', 'data'], {'target': 'feat_mean'}), '(w_mean.T, data, target=feat_mean)\n', (30627, 30661), True, 'import cudamat as cmt\n'), ((30855, 30899), 'cudamat.dot', 'cmt.dot', (['data', 'feat_mean.T'], {'target': 'w_meaninc'}), '(data, feat_mean.T, target=w_meaninc)\n', (30862, 30899), True, 'import cudamat as cmt\n'), ((32337, 32370), 'cudamat.sqrt', 'cmt.sqrt', (['lengthsq'], {'target': 'length'}), '(lengthsq, target=length)\n', (32345, 32370), True, 'import cudamat as cmt\n'), ((32568, 32604), 'cudamat.dot', 'cmt.dot', (['VF.T', 'normdata'], {'target': 'feat'}), '(VF.T, normdata, target=feat)\n', (32575, 32604), True, 'import cudamat as cmt\n'), ((32687, 32719), 'cudamat.dot', 'cmt.dot', (['FH.T', 'featsq'], {'target': 't1'}), '(FH.T, featsq, target=t1)\n', (32694, 32719), True, 'import cudamat as cmt\n'), ((32959, 32985), 'cudamat.dot', 'cmt.dot', (['FH', 't2'], {'target': 't3'}), '(FH, t2, target=t3)\n', (32966, 32985), True, 'import cudamat as cmt\n'), ((33262, 33306), 'cudamat.dot', 'cmt.dot', (['w_mean.T', 'negdata'], {'target': 'feat_mean'}), '(w_mean.T, negdata, target=feat_mean)\n', (33269, 33306), True, 'import cudamat as cmt\n'), ((33900, 33913), 'cudamat.sqrt', 'cmt.sqrt', (['t10'], {}), '(t10)\n', (33908, 33913), True, 'import cudamat as cmt\n'), ((37061, 37079), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (37077, 37079), False, 'import sys\n'), ((38360, 38378), 'numpy.mod', 'np.mod', (['epoch', '(100)'], {}), '(epoch, 100)\n', (38366, 38378), True, 'import numpy as np\n'), ((38613, 38631), 'numpy.mod', 'np.mod', (['epoch', '(100)'], {}), '(epoch, 100)\n', (38619, 38631), True, 'import numpy as np\n'), ((20035, 20073), 'numpy.eye', 'np.eye', (['self.num_fac', 'self.num_hid_cov'], {}), '(self.num_fac, self.num_hid_cov)\n', (20041, 20073), True, 'import numpy as np\n'), ((20388, 20418), 'numpy.ones', 'np.ones', (['(self.num_hid_cov, 1)'], {}), '((self.num_hid_cov, 1))\n', (20395, 20418), True, 'import numpy as np\n'), ((20728, 20759), 'numpy.ones', 'np.ones', (['(self.num_hid_mean, 1)'], {}), '((self.num_hid_mean, 1))\n', (20735, 20759), True, 'import numpy as np\n'), ((42101, 42115), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (42113, 42115), False, 'from datetime import datetime\n'), ((6438, 6460), 'numpy.min', 'np.min', (['self.d'], {'axis': '(0)'}), '(self.d, axis=0)\n', (6444, 6460), True, 'import numpy as np\n'), ((6508, 6530), 'numpy.max', 'np.max', (['self.d'], {'axis': '(0)'}), '(self.d, axis=0)\n', (6514, 6530), True, 'import numpy as np\n'), ((6579, 6602), 'numpy.mean', 'np.mean', (['self.d'], {'axis': '(0)'}), '(self.d, axis=0)\n', (6586, 6602), True, 'import numpy as np\n'), ((6650, 6672), 'numpy.std', 'np.std', (['self.d'], {'axis': '(0)'}), '(self.d, axis=0)\n', (6656, 6672), True, 'import numpy as np\n'), ((17910, 17933), 'numpy.min', 'np.min', (['visData'], {'axis': '(0)'}), '(visData, axis=0)\n', (17916, 17933), True, 'import numpy as np\n'), ((17987, 18010), 'numpy.max', 'np.max', (['visData'], {'axis': '(0)'}), '(visData, axis=0)\n', (17993, 18010), True, 'import numpy as np\n'), ((18065, 18089), 'numpy.mean', 'np.mean', (['visData'], {'axis': '(0)'}), '(visData, axis=0)\n', (18072, 18089), True, 'import numpy as np\n'), ((18143, 18166), 'numpy.std', 'np.std', (['visData'], {'axis': '(0)'}), '(visData, axis=0)\n', (18149, 18166), True, 'import numpy as np\n'), ((17809, 17832), 'numpy.max', 'np.max', (['visData'], {'axis': '(0)'}), '(visData, axis=0)\n', (17815, 17832), True, 'import numpy as np\n'), ((17833, 17856), 'numpy.min', 'np.min', (['visData'], {'axis': '(0)'}), '(visData, axis=0)\n', (17839, 17856), True, 'import numpy as np\n')] |
"""Report generator for the error command.
TODO: move reporting functionality out of the ErrorEstimator class.
"""
from itertools import repeat
from atropos.commands.reports import BaseReportGenerator
from atropos.io import open_output
from atropos.commands.legacy_report import Printer, TitlePrinter
class ReportGenerator(BaseReportGenerator):
def generate_text_report(self, fmt, summary, outfile, **kwargs):
if fmt == 'txt':
with open_output(outfile, context_wrapper=True) as out:
generate_reports(out, summary)
else:
super().generate_from_template(fmt, summary, outfile, **kwargs)
def generate_reports(outstream, summary):
names = summary['input']['input_names'] or repeat(None)
estimates = summary['errorrate']['estimate']
_print = Printer(outstream)
_print_title = TitlePrinter(outstream)
input_idx = 0
for input_idx, (estimate, details, name) in enumerate(zip(
estimates, summary['errorrate']['details'], names), 1):
generate_estimator_report(
outstream, input_idx, estimate, details, _print, _print_title, name)
if input_idx > 1:
_print.newline()
_print_title("Overall", level=0)
total_lens = summary['errorrate']['total_len']
overall_err = (
sum(err * total_len for err, total_len in zip(estimates, total_lens)) /
sum(total_lens))
print("Error rate: {:.2%}".format(overall_err), file=outstream)
def generate_estimator_report(
outstream, input_idx, estimate, details, _print, _print_title,
input_name=None):
_print_indent = Printer(outstream, indent=' ')
_print.newline()
_print_title("Input {}".format(input_idx), level=0)
if input_name:
_print("File: {}".format(input_name))
_print("Error rate: {:.2%}".format(estimate))
if details:
_print("Details:\n")
per_read = details['per_read']
per_cycle = details['per_cycle']
_print_indent("StdErr: {:.2%}".format(per_read['standard error']))
_print_indent("Per-cycle rates:")
for cycle in per_cycle:
_print_indent(
"Cycle: {}, Error: {:.2%}, StdErr: {:.2%}".format(*cycle),
indent=2)
| [
"atropos.commands.legacy_report.TitlePrinter",
"atropos.io.open_output",
"atropos.commands.legacy_report.Printer",
"itertools.repeat"
] | [((816, 834), 'atropos.commands.legacy_report.Printer', 'Printer', (['outstream'], {}), '(outstream)\n', (823, 834), False, 'from atropos.commands.legacy_report import Printer, TitlePrinter\n'), ((854, 877), 'atropos.commands.legacy_report.TitlePrinter', 'TitlePrinter', (['outstream'], {}), '(outstream)\n', (866, 877), False, 'from atropos.commands.legacy_report import Printer, TitlePrinter\n'), ((1656, 1687), 'atropos.commands.legacy_report.Printer', 'Printer', (['outstream'], {'indent': '""" """'}), "(outstream, indent=' ')\n", (1663, 1687), False, 'from atropos.commands.legacy_report import Printer, TitlePrinter\n'), ((736, 748), 'itertools.repeat', 'repeat', (['None'], {}), '(None)\n', (742, 748), False, 'from itertools import repeat\n'), ((458, 500), 'atropos.io.open_output', 'open_output', (['outfile'], {'context_wrapper': '(True)'}), '(outfile, context_wrapper=True)\n', (469, 500), False, 'from atropos.io import open_output\n')] |
# from flask import Flask, render_template, flash, redirect, url_for, session, request, logging
# from wtforms import Form, StringField, TextAreaField, PasswordField, validators
# from functools import wraps
import requests
import json
import pandas as pd
import platform
import shutil
import datetime
from module import delivermsg_to_num
import time
from module.Apple_ID.all_id import *
import random
import math
#如果 shutdown_US = 1 就跳過美國的產品
shutdown_US = 0
#一般商品開啟測試模式 =1
nonus_test = 0
if nonus_test == 1:
product_test = 'iPhone 13 mini'
country_test = 'Tw'
else:
product_test = ''
country_test = ''
if platform.system() == "Windows":
# Local 端
path = 'static/data/Data.csv'
else:
# AWS 端
path = "/home/cathaylife04/smartphone/iphone11/static/data/Data.csv"
# path = 'static/data/Data.csv'
Data = pd.read_csv(path)
Old_Data = Data.to_dict('records')
headers = {
'sec-ch-ua':'"Chromium";v="94", "Google Chrome";v="94", ";Not A Brand";v="99"',
'sec-ch-ua-mobile':'?0',
'sec-ch-ua-platform':"Windows",
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.164 Safari/537.36',
'upgrade-insecure-requests': '1'
}
# 把 預期要刪掉的字串組成 list 傳到 function裡
bagofwords = [' – 送料無料',' — Free',' – 免額外付費',' — 免額外付費']
failnum = 0
# 引數前面加星號 就是不預設有幾個變數
def replacestring(x,bagofwords):
for text in bagofwords:
if text in x:
y = x.replace(text,'')
return y
return x
# 使用try except 解決可能遇到的反爬蟲機制
# 傳入 d產品資訊, url產品的連結
def anti_scrapping(d,url):
while True:
failnum = 0
try:
print(url)
s = requests.Session()
r = s.post(url,headers = headers)
# print(s.cookies)
response = json.loads(r.text)
deliever_msg = response['body']['content']['deliveryMessage'][Model]['deliveryOptionMessages'][0]['displayName']
deliever_msg = replacestring(deliever_msg, bagofwords)
d['Deliver'] = deliever_msg
exec('d["Day"] = delivermsg_to_num.'+d['Country']+'(d["Deliver"],d["TimeStemp"])')
print(d)
time.sleep(1)
# 如果被反爬蟲的話持續進行這個迴圈
except Exception as e:
failnum = failnum +1
print('被反爬蟲',str(e),'嘗試次數',failnum)
# f = random.uniform(1, 10)
# time.sleep(math.floor(f * 10) / 10.0)
time.sleep(2)
continue
# 如果try 成功的話 break 迴圈
else:
break
return d
res=[]
# 如果被反爬蟲擋下來
url_us_fail_list = []
# 美國的要單獨跑 因為地址網址的dictionary 是空的
count = 0
if shutdown_US != 1:
for Model in Model_Us:
# count = count +1
# if count % 30 == 0 :
# time.sleep(5)
# if Product_Us_R[Model] == 'IpadPro':
# print(Model)
d = {} #清空dictionary
d['Country'] = 'Us'
d['TimeStemp'] = datetime.datetime.today().strftime("%Y-%m-%d")
d['Product'] = Product_Us_R[Model]
# try:
# 如果是AirPodPro 因為沒有Size也沒有Color的資訊所以 除了 AirPodPro 以外其他產品都有Color 跟 Size 的 key
if Product_Us_R[Model] == 'AirPodPro':
url = 'https://www.apple.com/shop/fulfillment-messages?parts.0=%s&little=true' % ( Model )
d = anti_scrapping(d,url)
res.append(d)
# 產品線是 AppleWatch 系列 或是 iPad 系列
elif Product_Us_R[Model][0:10] == 'AppleWatch' or Product_Us_R[Model][0:4] == 'iPad':
# Applwatch6 及 AppleWatchSE 的型號要塞 兩個 變得比較複雜了所以要單獨處理
if Product_Us_R[Model] == 'AppleWatch6' or Product_Us_R[Model] == 'AppleWatchSE':
url = f'https://www.apple.com/shop/fulfillment-messages?parts.0=Z0YQ&option.0='+ Model +'&little=true'
d['Celluar'] = Celluar_R[Model]
d['Size'] = Size_R[Model]
d = anti_scrapping(d,url)
else:
url = f'https://www.apple.com/shop/fulfillment-messages?parts.0='+ Model + '&little=true'
d = anti_scrapping(d,url)
# 如果是iPad 則多了 Colar 這個 項目
if Product_Us_R[Model][0:4] == 'iPad':
d['Colors'] = Color_R[Model[0:5]]
# 如果產品不是 Apple watch6 跟 ApplewatchSE
if 'Celluar' not in d.keys():
d['Celluar'] = Celluar_R[Model[0:5]]
d['Size'] = Size_R[Model[0:5]]
# d['Days'] = delivermsg_to_num.Us(d['Deliver'],d['TimeStemp'])
# d["Days"] = eval('delivermsg_to_num.'+d['Country']+'(d["Deliver"],d["TimeStemp"])')
res.append(d)
#一般的產品線:
else:
# 如果找不到 Size 就 不去做request. 產品都會對 Size 256GB做下架
url = 'https://www.apple.com/shop/fulfillment-messages?mt=regular&parts.0=%s&little=true' % ( Model )
d['Colors'] = Color_R[Model[0:5]]
d['Size'] = Size_R[Model[0:5]]
d = anti_scrapping(d,url)
res.append(d)
# print(url)
# f = random.uniform(1, 1.5)
# time.sleep(math.floor(f * 10) / 10.0)
# r = requests.get(url)
# response = json.loads(r.text)
for Product in countries:
#外迴圈跑國家
# 如果產品是我們想要測試的項目 或是 要測試的產品是 none
if (Product == product_test) or (nonus_test == 0):
for Country in countries[Product]:
if (Country == country_test) or (nonus_test == 0):
#內迴圈跑型號
for Model in countries[Product][Country]:
# if Product_R[Model] == 'IpadPro':
d = {} #清空dictionary
# 現在 要處理新增的選項一樣丟在color裡嗎XD
d['Country'] = Country
d['Product'] = Product_R[Model]
d['TimeStemp'] = datetime.datetime.today().strftime("%Y-%m-%d")
# try:
# 如果是AirPod 因為沒有Size也沒有Color的資訊所以單獨處理
if Product_R[Model] == 'AirPodPro':
url = 'https://www.apple.com/%s/shop/fulfillment-messages?parts.0=%s&little=true' % (d['Country'].lower(), Model)
d = anti_scrapping(d,url)
res.append(d)
# 產品線是 AppleWatch 系列 或是 iPad 系列
elif Product_R[Model][0:10] == 'AppleWatch' or Product_R[Model][0:4] == 'iPad':
# Applwatch6 及 AppleWatchSE 的型號要塞 兩個 變得比較複雜了
if Product_R[Model] == 'AppleWatch6' or Product_R[Model] == 'AppleWatchSE':
url = f'https://www.apple.com/'+ d['Country'].lower() +'/shop/fulfillment-messages?parts.0=Z0YQ&option.0='+ Model +'&little=true'
r = requests.get(url)
response = json.loads(r.text)
deliver_string = response['body']['content']['deliveryMessage']['Z0YQ']['deliveryOptionMessages'][0]['displayName']
d['Deliver'] = replacestring(deliver_string,*bagofwords)
d['Celluar'] = Celluar_R[Model]
d['Size'] = Size_R[Model]
else:
url = 'https://www.apple.com/%s/shop/fulfillment-messages?parts.0=%s&little=true' % (d['Country'].lower(), Model)
d = anti_scrapping(d,url)
# 如果是iPad 則多了 Colar 這個 項目
if Product_R[Model][0:4] == 'iPad':
d['Colors'] = Color_R[Model[0:5]]
# 如果產品不是 Apple watch6 跟 ApplewatchSE
if 'Celluar' not in d.keys():
d['Celluar'] = Celluar_R[Model[0:5]]
d['Size'] = Size_R[Model[0:5]]
res.append(d)
#一般的產品線:
else:
# 如果找不到 Size 就 不去做request. 產品都會對 Size 256GB做下架
url = 'https://www.apple.com/%s/shop/fulfillment-messages?mt=regular&parts.0=%s&little=true' % (d['Country'].lower(), Model)
d['Colors'] = Color_R[Model[0:5]]
d['Size'] = Size_R[Model[0:5]]
d = anti_scrapping(d,url)
res.append(d)
# except:
# print(d,'下架')
newres = res + Old_Data
# newres = res
df = pd.DataFrame(newres)
df = df.drop_duplicates()
# Pivot value:欲處理的資訊(相加 取平均 等等等)
#index:列向量
#columns:行向量
# path
df.to_csv(path,encoding='utf_8_sig', index=False)
# #要去哪裡
# destname = "/home/ec2-user/Mainweb/static/Data.csv"
# #來源資料
# fromname = "/home/ec2-user/Mainweb/Data.csv"
# shutil.copy2(fromname, destname)
print("ok") | [
"json.loads",
"requests.Session",
"pandas.read_csv",
"time.sleep",
"requests.get",
"datetime.datetime.today",
"platform.system",
"pandas.DataFrame"
] | [((831, 848), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (842, 848), True, 'import pandas as pd\n'), ((8500, 8520), 'pandas.DataFrame', 'pd.DataFrame', (['newres'], {}), '(newres)\n', (8512, 8520), True, 'import pandas as pd\n'), ((625, 642), 'platform.system', 'platform.system', ([], {}), '()\n', (640, 642), False, 'import platform\n'), ((1791, 1809), 'requests.Session', 'requests.Session', ([], {}), '()\n', (1807, 1809), False, 'import requests\n'), ((1910, 1928), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (1920, 1928), False, 'import json\n'), ((2289, 2302), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2299, 2302), False, 'import time\n'), ((2547, 2560), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (2557, 2560), False, 'import time\n'), ((3022, 3047), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (3045, 3047), False, 'import datetime\n'), ((5832, 5857), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (5855, 5857), False, 'import datetime\n'), ((6767, 6784), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (6779, 6784), False, 'import requests\n'), ((6824, 6842), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (6834, 6842), False, 'import json\n')] |
from parsing import get_diet
import discord , asyncio , datetime , sys , os
import parsing
def main():
client = discord.Client()
TOKEN = "<KEY>"
#명령어 목록
Command_list = (
"```css\n"
"[NCC_bot Command List]\n"
"!도움말 - 도움말\n"
"!버전 - 버전 정보\n"
"!현재 시각 - 현재 시각을 알려줌\n"
"!급식 - 오늘 급식\n"
"!내일 급식 - 오늘 급식\n"
"!어제 급식 - 어제 급식\n"
"!내일모래 급식 - 내일모래 급식\n"
"!그저께 급식 - 그저께 급식\n"
"!식단 - 원하는 날의 급식 식단\n"
"```"
)
#급식안내
meal_notice = (
"```css\n"
"[-] 2019년 5월 2일 인 경우 19052 로 보낼 것.\n"
"[-] 2019년 10월 1일 인 경우 19101 로 보낼 것.\n"
"[-] 2020년 12월 7일 인 경우 20127 로 보낼 것.\n"
"[-] 2020년 5월 27일 인 경우 200527 로 보낼 것.\n"
"```"
)
@client.event
async def on_ready():
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('---------')
activity = discord.Game(name="!도움말을 눌러 도움말 확인하기")
await client.change_presence(status=discord.Status.online, activity=activity)
@client.event
async def print_get_meal(local_date, local_weekday, message):
l_diet = get_diet(2, local_date, local_weekday)
d_diet = get_diet(3, local_date, local_weekday)
if len(l_diet) == 1:
embed = discord.Embed(title="No Meal", description="급식이 없습니다.", color=0x00ff00)
await message.channel.send("급식이 없습니다", embed=embed)
elif len(d_diet) == 1:
lunch = local_date + " 중식\n" + l_diet
embed = discord.Embed(title="Lunch", description=lunch, color=0x00ff00)
await message.channel.send("급식 정보입니다", embed=embed)
else:
lunch = local_date + " 중식\n" + l_diet
dinner = local_date + " 석식\n" + d_diet
embed= discord.Embed(title="Lunch", description=lunch, color=0x00ff00)
await message.channel.send("급식 정보입니다", embed=embed)
embed = discord.Embed(title="Dinner", description=dinner, color=0x00ff00)
await message.channel.send("급식 정보입니다", embed=embed)
@client.event
async def on_message(message):
if message.content.startswith('!도움말'):
await message.channel.send(Command_list)
elif message.content.startswith('!버전'):
embed = discord.Embed(title="Bot Version", description="updated", color=0x00ff00)
embed.add_field(name="Version", value="2.0.0", inline=False)
await message.channel.send("버전 정보입니다", embed=embed)
elif message.content.startswith('!버젼'):
embed = discord.Embed(title="Bot Version", description="updated", color=0x00ff00)
embed.add_field(name="Version", value="2.0.0", inline=False)
await message.channel.send("버전 정보입니다", embed=embed)
elif message.content.startswith('!현재 시각'):
dt = datetime.datetime.now()
local_time = dt.strftime("%Y년 %m월 %d일 %H시 %M분 %S초".encode('unicode-escape').decode()).encode().decode('unicode-escape')
embed = discord.Embed(title="Local Time", description=local_time, color=0x00ff00)
await message.channel.send("현재 시각 정보입니다", embed=embed)
elif message.content.startswith('!급식'):
f_dt = datetime.datetime.today() + datetime.timedelta(days=0)
meal_date = f_dt.strftime("%Y.%m.%d")
whatday = f_dt.weekday()
await print_get_meal(meal_date, whatday, message)
elif message.content.startswith('!내일 급식'):
f_dt = datetime.datetime.today() + datetime.timedelta(days=1)
meal_date = f_dt.strftime("%Y.%m.%d")
whatday = f_dt.weekday()
await print_get_meal(meal_date, whatday, message)
elif message.content.startswith('!어제 급식'):
f_dt = datetime.datetime.today() + datetime.timedelta(days=-1)
meal_date = f_dt.strftime("%Y.%m.%d")
whatday = f_dt.weekday()
await print_get_meal(meal_date, whatday, message)
elif message.content.startswith('!그저께 급식'):
f_dt = datetime.datetime.today() + datetime.timedelta(days=-2)
meal_date = f_dt.strftime("%Y.%m.%d")
whatday = f_dt.weekday()
await print_get_meal(meal_date, whatday, message)
elif message.content.startswith('!내일모래 급식'):
f_dt = datetime.datetime.today() + datetime.timedelta(days=2)
meal_date = f_dt.strftime("%Y.%m.%d")
whatday = f_dt.weekday()
await print_get_meal(meal_date, whatday, message)
elif message.content.startswith('!식단'):
request = meal_notice + '\n' + '날짜를 보내주세요...'
request_e = discord.Embed(title="날짜를 보내주세요!", description=request, color=0xcceeff)
await message.channel.send(message.channel, embed=request_e)
meal_date = await client.wait_for('message', timeout=15.0)
#입력이 없을 경우
if meal_date is None:
longtimemsg = discord.Embed(title="In 15sec", description='15초내로 입력해주세요. 다시시도 : !식단', color=0xff0000)
await message.channel.send(message.channel, embed=longtimemsg)
return
meal_date = str(meal_date.content) # 171121
meal_date = '20' + meal_date[:2] + '.' + meal_date[2:4] + '.' + meal_date[4:6] # 2017.11.21
s = meal_date.replace('.', ', ') # 2017, 11, 21
#한자리수 달인 경우를 해결하기위함
if int(s[6:8]) < 10:
s = s.replace(s[6:8], s[7:8])
ss = "datetime.datetime(" + s + ").weekday()"
try:
whatday = eval(ss)
except:
warnning = discord.Embed(title="Plz Retry", description='올바른 값으로 다시 시도하세요 : !식단', color=0xff0000)
await message.channel.send(message.channel, embed=warnning)
return
await print_get_meal(meal_date, whatday, message)
client.run(TOKEN)
#대기 시간 초과로 봇이 종료되었을 때 자동으로 재실행을 위함
#import sys, os
executable = sys.executable
args = sys.argv[:]
args.insert(0, sys.executable)
print("Respawning")
os.execvp(executable, args)
if __name__ == '__main__':
main()
| [
"os.execvp",
"discord.Game",
"datetime.datetime.now",
"datetime.datetime.today",
"discord.Client",
"datetime.timedelta",
"discord.Embed",
"parsing.get_diet"
] | [((118, 134), 'discord.Client', 'discord.Client', ([], {}), '()\n', (132, 134), False, 'import discord, asyncio, datetime, sys, os\n'), ((6401, 6428), 'os.execvp', 'os.execvp', (['executable', 'args'], {}), '(executable, args)\n', (6410, 6428), False, 'import discord, asyncio, datetime, sys, os\n'), ((1209, 1247), 'discord.Game', 'discord.Game', ([], {'name': '"""!도움말을 눌러 도움말 확인하기"""'}), "(name='!도움말을 눌러 도움말 확인하기')\n", (1221, 1247), False, 'import discord, asyncio, datetime, sys, os\n'), ((1436, 1474), 'parsing.get_diet', 'get_diet', (['(2)', 'local_date', 'local_weekday'], {}), '(2, local_date, local_weekday)\n', (1444, 1474), False, 'from parsing import get_diet\n'), ((1492, 1530), 'parsing.get_diet', 'get_diet', (['(3)', 'local_date', 'local_weekday'], {}), '(3, local_date, local_weekday)\n', (1500, 1530), False, 'from parsing import get_diet\n'), ((1581, 1649), 'discord.Embed', 'discord.Embed', ([], {'title': '"""No Meal"""', 'description': '"""급식이 없습니다."""', 'color': '(65280)'}), "(title='No Meal', description='급식이 없습니다.', color=65280)\n", (1594, 1649), False, 'import discord, asyncio, datetime, sys, os\n'), ((1818, 1878), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Lunch"""', 'description': 'lunch', 'color': '(65280)'}), "(title='Lunch', description=lunch, color=65280)\n", (1831, 1878), False, 'import discord, asyncio, datetime, sys, os\n'), ((2080, 2140), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Lunch"""', 'description': 'lunch', 'color': '(65280)'}), "(title='Lunch', description=lunch, color=65280)\n", (2093, 2140), False, 'import discord, asyncio, datetime, sys, os\n'), ((2228, 2290), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Dinner"""', 'description': 'dinner', 'color': '(65280)'}), "(title='Dinner', description=dinner, color=65280)\n", (2241, 2290), False, 'import discord, asyncio, datetime, sys, os\n'), ((2581, 2651), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Bot Version"""', 'description': '"""updated"""', 'color': '(65280)'}), "(title='Bot Version', description='updated', color=65280)\n", (2594, 2651), False, 'import discord, asyncio, datetime, sys, os\n'), ((2861, 2931), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Bot Version"""', 'description': '"""updated"""', 'color': '(65280)'}), "(title='Bot Version', description='updated', color=65280)\n", (2874, 2931), False, 'import discord, asyncio, datetime, sys, os\n'), ((3141, 3164), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3162, 3164), False, 'import discord, asyncio, datetime, sys, os\n'), ((3317, 3387), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Local Time"""', 'description': 'local_time', 'color': '(65280)'}), "(title='Local Time', description=local_time, color=65280)\n", (3330, 3387), False, 'import discord, asyncio, datetime, sys, os\n'), ((3526, 3551), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (3549, 3551), False, 'import discord, asyncio, datetime, sys, os\n'), ((3554, 3580), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(0)'}), '(days=0)\n', (3572, 3580), False, 'import discord, asyncio, datetime, sys, os\n'), ((3801, 3826), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (3824, 3826), False, 'import discord, asyncio, datetime, sys, os\n'), ((3829, 3855), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (3847, 3855), False, 'import discord, asyncio, datetime, sys, os\n'), ((4076, 4101), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (4099, 4101), False, 'import discord, asyncio, datetime, sys, os\n'), ((4104, 4131), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(-1)'}), '(days=-1)\n', (4122, 4131), False, 'import discord, asyncio, datetime, sys, os\n'), ((4353, 4378), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (4376, 4378), False, 'import discord, asyncio, datetime, sys, os\n'), ((4381, 4408), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(-2)'}), '(days=-2)\n', (4399, 4408), False, 'import discord, asyncio, datetime, sys, os\n'), ((4631, 4656), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (4654, 4656), False, 'import discord, asyncio, datetime, sys, os\n'), ((4659, 4685), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(2)'}), '(days=2)\n', (4677, 4685), False, 'import discord, asyncio, datetime, sys, os\n'), ((4966, 5036), 'discord.Embed', 'discord.Embed', ([], {'title': '"""날짜를 보내주세요!"""', 'description': 'request', 'color': '(13430527)'}), "(title='날짜를 보내주세요!', description=request, color=13430527)\n", (4979, 5036), False, 'import discord, asyncio, datetime, sys, os\n'), ((5269, 5360), 'discord.Embed', 'discord.Embed', ([], {'title': '"""In 15sec"""', 'description': '"""15초내로 입력해주세요. 다시시도 : !식단"""', 'color': '(16711680)'}), "(title='In 15sec', description='15초내로 입력해주세요. 다시시도 : !식단',\n color=16711680)\n", (5282, 5360), False, 'import discord, asyncio, datetime, sys, os\n'), ((5951, 6041), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Plz Retry"""', 'description': '"""올바른 값으로 다시 시도하세요 : !식단"""', 'color': '(16711680)'}), "(title='Plz Retry', description='올바른 값으로 다시 시도하세요 : !식단',\n color=16711680)\n", (5964, 6041), False, 'import discord, asyncio, datetime, sys, os\n')] |
import boto3, json, time, os, logging, botocore, uuid
from crhelper import CfnResource
from botocore.exceptions import ClientError
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logging.getLogger('boto3').setLevel(logging.CRITICAL)
logging.getLogger('botocore').setLevel(logging.CRITICAL)
session = boto3.Session()
helper = CfnResource(json_logging=False, log_level='INFO', boto_level='CRITICAL', sleep_on_delete=15)
@helper.create
@helper.update
# This module perform the following:
# 1. attempt to create stackset if one does not exist
# 2. attempt to deploy stackset instance to target accounts
def create(event, context):
logger.info(json.dumps(event))
try:
firstLaunch = False
stackSetName = os.environ['stackSetName']
stackSetUrl = os.environ['stackSetUrl']
newRelicAccId = os.environ['newRelicAccId']
newRelicSecret = os.environ['newRelicSecret']
newRelicStackSNS = os.environ['newRelicStackSNS']
managementAccountId = context.invoked_function_arn.split(":")[4]
cloudFormationClient = session.client('cloudformation')
regionName = context.invoked_function_arn.split(":")[3]
cloudFormationClient.describe_stack_set(StackSetName=stackSetName)
logger.info('Stack set {} already exist'.format(stackSetName))
helper.Data.update({"result": stackSetName})
except Exception as describeException:
logger.info('Stack set {} does not exist, creating it now.'.format(stackSetName))
cloudFormationClient.create_stack_set(
StackSetName=stackSetName,
Description='Adds in New Relic integration to your aws accounts. Launch as Stack Set in your Control Tower landing zone management account.',
TemplateURL=stackSetUrl,
Parameters=[
{
'ParameterKey': 'NewRelicAccountNumber',
'ParameterValue': newRelicAccId,
'UsePreviousValue': False,
'ResolvedValue': 'string'
}
],
Capabilities=[
'CAPABILITY_NAMED_IAM'
],
AdministrationRoleARN='arn:aws:iam::' + managementAccountId + ':role/service-role/AWSControlTowerStackSetRole',
ExecutionRoleName='AWSControlTowerExecution')
try:
result = cloudFormationClient.describe_stack_set(StackSetName=stackSetName)
firstLaunch = True
logger.info('StackSet {} deployed'.format(stackSetName))
except cloudFormationClient.exceptions.StackSetNotFoundException as describeException:
logger.error('Exception getting new stack set, {}'.format(describeException))
raise describeException
try:
if firstLaunch and len(os.environ['seedAccounts']) > 0 :
logger.info("New accounts : {}".format(os.environ['seedAccounts']))
accountList = os.environ['seedAccounts'].split(",")
snsClient = session.client('sns')
messageBody = {}
messageBody[stackSetName] = { 'target_accounts': accountList, 'target_regions': [regionName] }
try:
snsResponse = snsClient.publish(
TopicArn=newRelicStackSNS,
Message = json.dumps(messageBody))
logger.info("Queued for stackset instance creation: {}".format(snsResponse))
except Exception as snsException:
logger.error("Failed to send queue for stackset instance creation: {}".format(snsException))
else:
logger.info("No additional StackSet instances requested")
except Exception as create_exception:
logger.error('Exception creating stack instance with {}'.format(create_exception))
raise create_exception
helper.Data.update({"result": stackSetName})
# To return an error to cloudformation you raise an exception:
if not helper.Data.get("result"):
raise ValueError("Error occured during solution onboarding")
return None #Generate random ID
@helper.delete
# This module perform the following:
# 1. attempt to delete stackset instances
# 2. attempt to delete stackset
def delete(event, context):
logger.info("Delete StackSet Instances")
deleteWaitTime = (int(context.get_remaining_time_in_millis()) - 100)/1000
deleteSleepTime = 30
try:
stackSetName = os.environ['stackSetName']
stackSetUrl = os.environ['stackSetUrl']
managementAccountId = context.invoked_function_arn.split(":")[4]
cloudFormationClient = session.client('cloudformation')
regionName = context.invoked_function_arn.split(":")[3]
cloudFormationClient.describe_stack_set(StackSetName=stackSetName)
logger.info('Stack set {} exist'.format(stackSetName))
paginator = cloudFormationClient.get_paginator('list_stack_instances')
pageIterator = paginator.paginate(StackSetName= stackSetName)
stackSetList = []
accountList = []
regionList = []
for page in pageIterator:
if 'Summaries' in page:
stackSetList.extend(page['Summaries'])
for instance in stackSetList:
accountList.append(instance['Account'])
regionList.append(instance['Region'])
regionList = list(set(regionList))
accountList = list(set(accountList))
logger.info("StackSet instances found in region(s): {}".format(regionList))
logger.info("StackSet instances found in account(s): {}".format(accountList))
try:
if len(accountList) > 0:
response = cloudFormationClient.delete_stack_instances(
StackSetName=stackSetName,
Accounts=accountList,
Regions=regionList,
RetainStacks=False)
logger.info(response)
status = cloudFormationClient.describe_stack_set_operation(
StackSetName=stackSetName,
OperationId=response['OperationId'])
while status['StackSetOperation']['Status'] == 'RUNNING' and deleteWaitTime>0:
time.sleep(deleteSleepTime)
deleteWaitTime=deleteWaitTime-deleteSleepTime
status = cloudFormationClient.describe_stack_set_operation(
StackSetName=stackSetName,
OperationId=response['OperationId'])
logger.info("StackSet instance delete status {}".format(status))
try:
response = cloudFormationClient.delete_stack_set(StackSetName=stackSetName)
logger.info("StackSet template delete status {}".format(response))
except Exception as stackSetException:
logger.warning("Problem occured while deleting, StackSet still exist : {}".format(stackSetException))
except Exception as describeException:
logger.error(describeException)
except Exception as describeException:
logger.error(describeException)
return None
return None #Generate random ID
def lambda_handler(event, context):
logger.info(json.dumps(event))
try:
if 'RequestType' in event: helper(event, context)
except Exception as e:
helper.init_failure(e) | [
"logging.getLogger",
"boto3.Session",
"json.dumps",
"time.sleep",
"crhelper.CfnResource"
] | [((141, 160), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (158, 160), False, 'import boto3, json, time, os, logging, botocore, uuid\n'), ((312, 327), 'boto3.Session', 'boto3.Session', ([], {}), '()\n', (325, 327), False, 'import boto3, json, time, os, logging, botocore, uuid\n'), ((338, 434), 'crhelper.CfnResource', 'CfnResource', ([], {'json_logging': '(False)', 'log_level': '"""INFO"""', 'boto_level': '"""CRITICAL"""', 'sleep_on_delete': '(15)'}), "(json_logging=False, log_level='INFO', boto_level='CRITICAL',\n sleep_on_delete=15)\n", (349, 434), False, 'from crhelper import CfnResource\n'), ((191, 217), 'logging.getLogger', 'logging.getLogger', (['"""boto3"""'], {}), "('boto3')\n", (208, 217), False, 'import boto3, json, time, os, logging, botocore, uuid\n'), ((245, 274), 'logging.getLogger', 'logging.getLogger', (['"""botocore"""'], {}), "('botocore')\n", (262, 274), False, 'import boto3, json, time, os, logging, botocore, uuid\n'), ((657, 674), 'json.dumps', 'json.dumps', (['event'], {}), '(event)\n', (667, 674), False, 'import boto3, json, time, os, logging, botocore, uuid\n'), ((7449, 7466), 'json.dumps', 'json.dumps', (['event'], {}), '(event)\n', (7459, 7466), False, 'import boto3, json, time, os, logging, botocore, uuid\n'), ((6399, 6426), 'time.sleep', 'time.sleep', (['deleteSleepTime'], {}), '(deleteSleepTime)\n', (6409, 6426), False, 'import boto3, json, time, os, logging, botocore, uuid\n'), ((3371, 3394), 'json.dumps', 'json.dumps', (['messageBody'], {}), '(messageBody)\n', (3381, 3394), False, 'import boto3, json, time, os, logging, botocore, uuid\n')] |
'''
Load external modules
'''
import os
import sys
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(BASE_DIR, "../python_modules"))
| [
"os.path.realpath",
"os.path.join"
] | [((78, 104), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (94, 104), False, 'import os\n'), ((122, 165), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""../python_modules"""'], {}), "(BASE_DIR, '../python_modules')\n", (134, 165), False, 'import os\n')] |
import json
import os
from datetime import datetime
import boto3
from aws_lambda_powertools.logging import Logger
logger = Logger()
@logger.inject_lambda_context
def main(event, context):
records = event.get("Records", [])
entries = []
stream_label = os.environ["STREAM_LABEL"]
logger.info(
{"record_count": len(records), "stream": stream_label,}
)
for record in records:
keys = record.get("dynamodb").get("Keys")
pk = keys["pk"]["S"]
sk = keys["sk"]["S"]
# pk and sk are prefixed with <type>#, every char before the '#' describes the attribute type
pk_type = pk[: pk.find("#")]
sk_type = sk[: sk.find("#")]
event_name = record["eventName"]
logger.info(
{
"pk": pk,
"pk_type": pk_type,
"sk": sk,
"sk_type": sk_type,
"event_name": event_name,
}
)
entry = {
"Source": f"{stream_label}",
"Resources": [],
"DetailType": event_name,
"Detail": json.dumps(
{"pk_type": pk_type, "sk_type": sk_type, "record": record}
),
"EventBusName": "default",
}
entries.append(entry)
client = boto3.client("events")
response = client.put_events(Entries=entries)
logger.debug(entries)
logger.info(
{"num_entries": len(records), "failed_entries": response["FailedEntryCount"],}
)
return
| [
"json.dumps",
"boto3.client",
"aws_lambda_powertools.logging.Logger"
] | [((125, 133), 'aws_lambda_powertools.logging.Logger', 'Logger', ([], {}), '()\n', (131, 133), False, 'from aws_lambda_powertools.logging import Logger\n'), ((1308, 1330), 'boto3.client', 'boto3.client', (['"""events"""'], {}), "('events')\n", (1320, 1330), False, 'import boto3\n'), ((1113, 1183), 'json.dumps', 'json.dumps', (["{'pk_type': pk_type, 'sk_type': sk_type, 'record': record}"], {}), "({'pk_type': pk_type, 'sk_type': sk_type, 'record': record})\n", (1123, 1183), False, 'import json\n')] |
import cv2
import matplotlib.pyplot as plt
import time
from picamera.array import PiRGBArray as pi_rgb
from picamera import PiCamera as picam
confidence_threshold = 0.45 # Threshold to detect object
font = cv2.FONT_HERSHEY_COMPLEX
color = [255, 255, 255]
height = 320
width = 640
focal_length = 500
class PiCam:
def __init__(self):
self.cam = picam()
self.cam.resolution = (width, height)
self.cam.framerate = 30
self.raw_cap = pi_rgb(picam, size=self.cam.resolution)
time.sleep(0.1)
def focal_length(measured_distance, real_width, width_in_rf_image):
foc_length = (width_in_rf_image * measured_distance) / real_width
return foc_length
def distance_finder(foc_len, real_face_width, face_width_in_frame):
distance = (real_face_width * foc_len) / face_width_in_frame
return distance
camera = PiCam()
classFile = 'coco.names'
with open(classFile, 'rt') as f:
classNames = f.read().rstrip('\n').split('\n')
configPath = 'ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt'
weightsPath = 'frozen_inference_graph.pb'
net = cv2.dnn_DetectionModel(weightsPath, configPath)
net.setInputSize(320, 320)
net.setInputScale(1.0 / 127.5)
net.setInputMean((127.5, 127.5, 127.5))
net.setInputSwapRB(True)
for frame in camera.cam.capture_continuous(camera.raw_cap, format="bgr", use_video_port=True):
img = frame.array
class_ids, confidences, boundary_boxes = net.detect(img, confThreshold=confidence_threshold)
if len(class_ids) != 0:
for classId, confidence, box in zip(class_ids.flatten(), confidences.flatten(), boundary_boxes):
cv2.rectangle(img, box, color=color, thickness=2)
cv2.putText(img, classNames[classId - 1].upper(), (box[0] + 10, box[1] + 30), font, 1, color, 2)
cv2.putText(img, str(round(confidence * 100, 2)), (box[0] + 200, box[1] + 30), font, 1, color, 2)
cv2.imshow("IEE3061 IoT", img)
camera.raw_cap.truncate(0)
key = cv2.waitKey(1)
if key == 27:
break
cv2.destroyAllWindows()
| [
"cv2.rectangle",
"picamera.PiCamera",
"time.sleep",
"cv2.imshow",
"cv2.dnn_DetectionModel",
"cv2.destroyAllWindows",
"picamera.array.PiRGBArray",
"cv2.waitKey"
] | [((1122, 1169), 'cv2.dnn_DetectionModel', 'cv2.dnn_DetectionModel', (['weightsPath', 'configPath'], {}), '(weightsPath, configPath)\n', (1144, 1169), False, 'import cv2\n'), ((2068, 2091), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2089, 2091), False, 'import cv2\n'), ((1944, 1974), 'cv2.imshow', 'cv2.imshow', (['"""IEE3061 IoT"""', 'img'], {}), "('IEE3061 IoT', img)\n", (1954, 1974), False, 'import cv2\n'), ((2018, 2032), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2029, 2032), False, 'import cv2\n'), ((371, 378), 'picamera.PiCamera', 'picam', ([], {}), '()\n', (376, 378), True, 'from picamera import PiCamera as picam\n'), ((483, 522), 'picamera.array.PiRGBArray', 'pi_rgb', (['picam'], {'size': 'self.cam.resolution'}), '(picam, size=self.cam.resolution)\n', (489, 522), True, 'from picamera.array import PiRGBArray as pi_rgb\n'), ((532, 547), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (542, 547), False, 'import time\n'), ((1666, 1715), 'cv2.rectangle', 'cv2.rectangle', (['img', 'box'], {'color': 'color', 'thickness': '(2)'}), '(img, box, color=color, thickness=2)\n', (1679, 1715), False, 'import cv2\n')] |
from os.path import join
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import zscore
from sklearn.decomposition import PCA
import pandas as pd
from itertools import combinations
# Load helper function(s) for interacting with CTF dataset
from ctf_dataset.load import create_wrapped_dataset
base_dir = '/mnt/bucket/labs/hasson/snastase/social-ctf'
data_dir = join(base_dir, 'data')
# Create wrapped CTF dataset
wrap_f = create_wrapped_dataset(data_dir, output_dataset_name="virtual.hdf5")
n_lstms = 512
n_repeats = 8
n_players = 4
map_id = 0
# Get matchups with all same agents (e.g. AA vs AA)
agent_ids = wrap_f['map/matchup/repeat/player/agent_id'][0, :, :, :, 0]
matchup_ids = np.all(agent_ids[:, 0, :] ==
agent_ids[:, 0, 0][:, np.newaxis], axis=1)
n_matchups = np.sum(matchup_ids) # 0, 34, 49, 54
# Extract LSTMs for one map and matchup
lstms_matched = np.tanh(wrap_f['map/matchup/repeat/player/time/lstm'][
map_id, matchup_ids, ...].astype(np.float32))
print("Loaded LSTMs for within-population matchups")
# Loop through matchups, repeats, and players to compute PCA
k = n_lstms
lstm_pca = {}
for m in np.arange(n_matchups):
lstm_pca[m] = {}
for r in np.arange(n_repeats):
lstm_pca[m][r] = {}
for p in np.arange(n_players):
lstm_pca[m][r][p] = {}
pca = PCA(n_components=k)
transformed = pca.fit_transform(
#zscore(lstms_matched[m, r, p], axis=0))
#np.tanh(lstms_matched[m, r, p]))
zscore(lstms_matched[m, r, p], axis=0))
lstm_pca[m][r][p]['transformed'] = transformed
lstm_pca[m][r][p]['pca'] = pca
print(f"Finished running PCA for matchup {m}, "
f"repeat {r}, player {p}")
np.save('results/pca_lstm_tanh-z_results.npy', lstm_pca)
# Convert PCA outputs to long dictionary for plotting
lstm_pca_long = {'population': [], 'repeat': [], 'player': [],
'variance explained': [], 'dimension': []}
pops = {0: 'A', 1: 'B', 2: 'C', 3: 'D'}
for m in np.arange(n_matchups):
for r in np.arange(n_repeats):
for p in np.arange(n_players):
for k, v in enumerate(lstm_pca[m][r][p][
'pca'].explained_variance_ratio_):
lstm_pca_long['population'].append(pops[m])
lstm_pca_long['repeat'].append(r)
lstm_pca_long['player'].append(p)
lstm_pca_long['variance explained'].append(v)
lstm_pca_long['dimension'].append(k + 1)
lstm_pca_long = pd.DataFrame(lstm_pca_long)
max_k = 30
lstm_pca_trunc = lstm_pca_long[lstm_pca_long['dimension'] <= max_k]
sns.set(font_scale=1.2, style='white')
sns.relplot(data=lstm_pca_trunc, x='dimension',
y='variance explained', hue='repeat',
col='population', col_wrap=2,
kind='line')
# Compute number of components required for percentage variance
percents = [.5, .75, .9, .95, .99]
percents_vaf = np.zeros((n_matchups, n_repeats, n_players, len(percents)))
for m in np.arange(n_matchups):
for r in np.arange(n_repeats):
for p in np.arange(n_players):
for i, perc in enumerate(percents):
k = np.sum(np.cumsum(
lstm_pca[m][r][p][
'pca'].explained_variance_ratio_) <= perc) + 1
percents_vaf[m, r, p, i] = k
for m in np.arange(n_matchups):
for i, perc in enumerate(percents):
median = int(np.median(percents_vaf[m, ..., i]))
min = int(np.amin(percents_vaf[m, ..., i]))
max = int(np.amax(percents_vaf[m, ..., i]))
print(f"Population {pops[m]}: {median} dimensions "
f"for {perc} variance (range: {min}-{max})")
print('\n')
# Stack pairs of players and compute joint PCA
pairs = list(combinations(np.arange(n_players), 2))
n_pairs = len(pairs)
k = n_lstms * 2
coop_ids, comp_ids = [0, 5], [1, 2, 3, 4]
lstm_pair_pca = {}
for m in np.arange(n_matchups):
lstm_pair_pca[m] = {}
for r in np.arange(n_repeats):
lstm_pair_pca[m][r] = {}
for p, pair in enumerate(pairs):
lstm_pair_pca[m][r][p] = {}
stack_lstm = np.hstack((lstms_matched[m, r, pair[0]],
lstms_matched[m, r, pair[1]]))
pca = PCA(n_components=k)
transformed = pca.fit_transform(
zscore(stack_lstm, axis=0))
lstm_pair_pca[m][r][p]['transformed'] = transformed
lstm_pair_pca[m][r][p]['pca'] = pca
print(f"Finished running PCA for matchup {m}, "
f"repeat {r}, pair {pair}")
np.save('results/pair-pca_lstm_tanh-z_results.npy', lstm_pair_pca)
# Convert PCA outputs to long dictionary for plotting
lstm_pair_pca_long = {'population': [], 'repeat': [], 'pair': [],
'variance explained': [], 'dimension': [],
'type': []}
pops = {0: 'A', 1: 'B', 2: 'C', 3: 'D'}
pair_type = {c:('cooperative' if c in coop_ids else 'competitive')
for c in np.arange(n_pairs)}
for m in np.arange(n_matchups):
for r in np.arange(n_repeats):
for p in np.arange(n_pairs):
for k, v in enumerate(lstm_pair_pca[m][r][p][
'pca'].explained_variance_ratio_):
lstm_pair_pca_long['population'].append(pops[m])
lstm_pair_pca_long['repeat'].append(r)
lstm_pair_pca_long['pair'].append(p)
lstm_pair_pca_long['variance explained'].append(v)
lstm_pair_pca_long['dimension'].append(k + 1)
lstm_pair_pca_long['type'].append(pair_type[p])
lstm_pair_pca_long = pd.DataFrame(lstm_pair_pca_long)
max_k = 10
lstm_pair_pca_trunc = lstm_pair_pca_long[
lstm_pair_pca_long['dimension'] <= max_k]
sns.set(font_scale=1.2, style='white')
sns.relplot(data=lstm_pair_pca_trunc, x='dimension',
y='variance explained', hue='type',
col='population', col_wrap=2, linewidth=3,
kind='line')
# Compute number of components required for percentage variance
percents = [.5, .75, .9, .95, .99]
percents_vaf = np.zeros((n_matchups, n_repeats, n_pairs, len(percents)))
for m in np.arange(n_matchups):
for r in np.arange(n_repeats):
for p in np.arange(n_pairs):
for i, perc in enumerate(percents):
k = np.sum(np.cumsum(
lstm_pair_pca[m][r][p][
'pca'].explained_variance_ratio_) <= perc) + 1
percents_vaf[m, r, p, i] = k
for m in np.arange(n_matchups):
for type, c in zip(['cooperative', 'competitive'],
[coop_ids, comp_ids]):
for i, perc in enumerate(percents):
median = int(np.median(percents_vaf[m, :, c, i]))
min = int(np.amin(percents_vaf[m, :, c, i]))
max = int(np.amax(percents_vaf[m, :, c, i]))
print(f"Population {pops[m]} {type}: {median} dimensions "
f"for {perc} variance (range: {min}-{max})")
print('\n')
# Stack across all repeats and run PCA
k = n_lstms
lstm_stack_pca = {}
for m in np.arange(n_matchups):
lstm_stack_pca[m] = {}
stack_lstm = []
for r in np.arange(n_repeats):
for p in np.arange(n_players):
stack_lstm.append(zscore(lstms_matched[m, r, p],
axis=0))
stack_lstm = np.vstack(stack_lstm)
pca = PCA(n_components=k)
transformed = pca.fit_transform(stack_lstm)
lstm_stack_pca[m]['transformed'] = transformed
lstm_stack_pca[m]['pca'] = pca
print(f"Finished running stacked PCA for matchup {m}")
np.save('results/stack-pca_lstm_tanh-z_results.npy', lstm_stack_pca)
# Convert PCA outputs to long dictionary for plotting
lstm_stack_pca_long = {'population': [], 'variance explained': [],
'dimension': []}
pops = {0: 'A', 1: 'B', 2: 'C', 3: 'D'}
for m in np.arange(n_matchups):
for k, v in enumerate(lstm_stack_pca[m][
'pca'].explained_variance_ratio_):
lstm_stack_pca_long['population'].append(pops[m])
lstm_stack_pca_long['variance explained'].append(v)
lstm_stack_pca_long['dimension'].append(k + 1)
lstm_stack_pca_long = pd.DataFrame(lstm_stack_pca_long)
max_k = 8
lstm_stack_pca_trunc = lstm_stack_pca_long[
lstm_stack_pca_long['dimension'] <= max_k]
sns.set(font_scale=1.2, style='white')
sns.lineplot(data=lstm_stack_pca_trunc, x='dimension',
y='variance explained', hue='population',
linewidth=3)
# Compute number of components required for percentage variance
percents = [.5, .75, .9, .95, .99]
percents_vaf = np.zeros((n_matchups, len(percents)))
for m in np.arange(n_matchups):
for i, perc in enumerate(percents):
k = np.sum(np.cumsum(
lstm_stack_pca[m][
'pca'].explained_variance_ratio_) <= perc) + 1
percents_vaf[m, i] = k
for m in np.arange(n_matchups):
for i, perc in enumerate(percents):
median = int(np.median(percents_vaf[m, i]))
print(f"Population {pops[m]}: {median} dimensions "
f"for {perc} variance")
print('\n')
# Create reduced-dimension version of data (e.g. k = 100)
k = 100
lstm_pca_reduce = []
for m in np.arange(n_matchups):
stack_lstm = []
for r in np.arange(n_repeats):
for p in np.arange(n_players):
stack_lstm.append(zscore(lstms_matched[m, r, p],
axis=0))
stack_lstm = np.vstack(stack_lstm)
pca = PCA(n_components=k)
transformed = pca.fit_transform(stack_lstm)
percent_vaf = np.sum(pca.explained_variance_ratio_)
# Un-stack PCA-transformed arrays for repeats, players
unstack_lstm = np.stack(np.split(np.stack(
np.split(transformed, 8), axis=0), 4, axis=1), axis=1)
lstm_pca_reduce.append(unstack_lstm)
print(f"Finished running stacked PCA for matchup {m}")
print(f"Proportion variance at for matchup {m} at k = {k}: "
f"{percent_vaf:.3f}")
lstm_pca_reduce = np.stack(lstm_pca_reduce, axis=0)
np.save(f'results/lstms_tanh-z_pca-k{k}.npy', lstm_pca_reduce)
## Compute correlations for PC in comparison to game variable
from features import get_features
from scipy.stats import pearsonr
# Load pre-saved PCA's
k = 100
lstm_pca = np.load(f'results/lstms_tanh-z_pca-k{k}.npy')
# Exclude degenerate features from analysis
feature_set = ['position', 'health', 'events']
all_features, labels = get_features(wrap_f, feature_set=feature_set, map_id=map_id,
matchup_id=matchup_ids, player_id=slice(None),
repeat_id=slice(None))
features_exclude = []
for label in labels:
features = all_features[..., np.array(labels) == label]
n_nonzeros = np.sum(np.nonzero(features))
print(f'checking {label} for all nonzeros; found {n_nonzeros} nonzeros')
if n_nonzeros == 0:
features_exclude.append(label)
print(f'excluding {label}')
labels = [l for l in labels if l not in features_exclude]
# Define a single variable to pull stats for (this may be redundant, review later)
pca_corrs = {}
for game_var in labels:
features = all_features[..., np.array(labels) == game_var]
# code is breaking above because new labels code that removes degenerative features does not match dimensions of
feature_shape = features.shape[:-2]
pca_corrs[game_var] = np.full(feature_shape + (k,), np.nan)
for matchup_id in np.arange(n_matchups):
for repeat_id in np.arange(n_repeats):
for player_id in np.arange(n_players):
for pc_id in np.arange(k):
pc_corr = pearsonr(features[matchup_id, repeat_id, player_id, :, 0],
lstm_pca[matchup_id, repeat_id, player_id,
:, pc_id])[0]
pca_corrs[game_var][matchup_id, repeat_id, player_id, pc_id] = pc_corr
print(f"finished pca correlations w/ {game_var}")
# Save dictionary
np.save(f'results/lstm_pca-k{k}_feature_correlations.npy', pca_corrs)
## Plot
pca_corrs = np.load('results/lstm_pca-k100_feature_correlations.npy', allow_pickle=True)
# Summarize PCA Corrs across players and repeats
pca_corr_means = []
for game_var in pca_corrs:
pca_corr_means.append(np.nanmean(pca_corrs[game_var], axis=(1, 2)))
pca_corr_means = np.stack(pca_corr_means, 1)
assert pca_corr_means.shape[1] == len(labels)
pc_id = 2
for pc_id in np.arange(1,10):
plt.matshow(pca_corr_means[..., pc_id], cmap='RdBu_r')
plt.yticks([0, 1, 2, 3], ['A','B','C','D'])
plt.xticks(np.arange(pca_corr_means.shape[1]), labels, rotation=90);
plt.title(f'PCA Feature Correlations for PC{pc_id}')
plt.colorbar()
| [
"numpy.hstack",
"numpy.nanmean",
"numpy.array",
"numpy.cumsum",
"scipy.stats.pearsonr",
"numpy.save",
"numpy.arange",
"seaborn.set",
"sklearn.decomposition.PCA",
"numpy.stack",
"matplotlib.pyplot.yticks",
"numpy.vstack",
"pandas.DataFrame",
"numpy.amin",
"seaborn.lineplot",
"scipy.stat... | [((403, 425), 'os.path.join', 'join', (['base_dir', '"""data"""'], {}), "(base_dir, 'data')\n", (407, 425), False, 'from os.path import join\n'), ((465, 533), 'ctf_dataset.load.create_wrapped_dataset', 'create_wrapped_dataset', (['data_dir'], {'output_dataset_name': '"""virtual.hdf5"""'}), "(data_dir, output_dataset_name='virtual.hdf5')\n", (487, 533), False, 'from ctf_dataset.load import create_wrapped_dataset\n'), ((727, 798), 'numpy.all', 'np.all', (['(agent_ids[:, 0, :] == agent_ids[:, 0, 0][:, np.newaxis])'], {'axis': '(1)'}), '(agent_ids[:, 0, :] == agent_ids[:, 0, 0][:, np.newaxis], axis=1)\n', (733, 798), True, 'import numpy as np\n'), ((834, 853), 'numpy.sum', 'np.sum', (['matchup_ids'], {}), '(matchup_ids)\n', (840, 853), True, 'import numpy as np\n'), ((1185, 1206), 'numpy.arange', 'np.arange', (['n_matchups'], {}), '(n_matchups)\n', (1194, 1206), True, 'import numpy as np\n'), ((1820, 1876), 'numpy.save', 'np.save', (['"""results/pca_lstm_tanh-z_results.npy"""', 'lstm_pca'], {}), "('results/pca_lstm_tanh-z_results.npy', lstm_pca)\n", (1827, 1876), True, 'import numpy as np\n'), ((2106, 2127), 'numpy.arange', 'np.arange', (['n_matchups'], {}), '(n_matchups)\n', (2115, 2127), True, 'import numpy as np\n'), ((2603, 2630), 'pandas.DataFrame', 'pd.DataFrame', (['lstm_pca_long'], {}), '(lstm_pca_long)\n', (2615, 2630), True, 'import pandas as pd\n'), ((2728, 2766), 'seaborn.set', 'sns.set', ([], {'font_scale': '(1.2)', 'style': '"""white"""'}), "(font_scale=1.2, style='white')\n", (2735, 2766), True, 'import seaborn as sns\n'), ((2767, 2900), 'seaborn.relplot', 'sns.relplot', ([], {'data': 'lstm_pca_trunc', 'x': '"""dimension"""', 'y': '"""variance explained"""', 'hue': '"""repeat"""', 'col': '"""population"""', 'col_wrap': '(2)', 'kind': '"""line"""'}), "(data=lstm_pca_trunc, x='dimension', y='variance explained', hue\n ='repeat', col='population', col_wrap=2, kind='line')\n", (2778, 2900), True, 'import seaborn as sns\n'), ((3117, 3138), 'numpy.arange', 'np.arange', (['n_matchups'], {}), '(n_matchups)\n', (3126, 3138), True, 'import numpy as np\n'), ((3470, 3491), 'numpy.arange', 'np.arange', (['n_matchups'], {}), '(n_matchups)\n', (3479, 3491), True, 'import numpy as np\n'), ((4040, 4061), 'numpy.arange', 'np.arange', (['n_matchups'], {}), '(n_matchups)\n', (4049, 4061), True, 'import numpy as np\n'), ((4729, 4795), 'numpy.save', 'np.save', (['"""results/pair-pca_lstm_tanh-z_results.npy"""', 'lstm_pair_pca'], {}), "('results/pair-pca_lstm_tanh-z_results.npy', lstm_pair_pca)\n", (4736, 4795), True, 'import numpy as np\n'), ((5176, 5197), 'numpy.arange', 'np.arange', (['n_matchups'], {}), '(n_matchups)\n', (5185, 5197), True, 'import numpy as np\n'), ((5768, 5800), 'pandas.DataFrame', 'pd.DataFrame', (['lstm_pair_pca_long'], {}), '(lstm_pair_pca_long)\n', (5780, 5800), True, 'import pandas as pd\n'), ((5918, 5956), 'seaborn.set', 'sns.set', ([], {'font_scale': '(1.2)', 'style': '"""white"""'}), "(font_scale=1.2, style='white')\n", (5925, 5956), True, 'import seaborn as sns\n'), ((5957, 6105), 'seaborn.relplot', 'sns.relplot', ([], {'data': 'lstm_pair_pca_trunc', 'x': '"""dimension"""', 'y': '"""variance explained"""', 'hue': '"""type"""', 'col': '"""population"""', 'col_wrap': '(2)', 'linewidth': '(3)', 'kind': '"""line"""'}), "(data=lstm_pair_pca_trunc, x='dimension', y='variance explained',\n hue='type', col='population', col_wrap=2, linewidth=3, kind='line')\n", (5968, 6105), True, 'import seaborn as sns\n'), ((6321, 6342), 'numpy.arange', 'np.arange', (['n_matchups'], {}), '(n_matchups)\n', (6330, 6342), True, 'import numpy as np\n'), ((6677, 6698), 'numpy.arange', 'np.arange', (['n_matchups'], {}), '(n_matchups)\n', (6686, 6698), True, 'import numpy as np\n'), ((7258, 7279), 'numpy.arange', 'np.arange', (['n_matchups'], {}), '(n_matchups)\n', (7267, 7279), True, 'import numpy as np\n'), ((7804, 7872), 'numpy.save', 'np.save', (['"""results/stack-pca_lstm_tanh-z_results.npy"""', 'lstm_stack_pca'], {}), "('results/stack-pca_lstm_tanh-z_results.npy', lstm_stack_pca)\n", (7811, 7872), True, 'import numpy as np\n'), ((8086, 8107), 'numpy.arange', 'np.arange', (['n_matchups'], {}), '(n_matchups)\n', (8095, 8107), True, 'import numpy as np\n'), ((8393, 8426), 'pandas.DataFrame', 'pd.DataFrame', (['lstm_stack_pca_long'], {}), '(lstm_stack_pca_long)\n', (8405, 8426), True, 'import pandas as pd\n'), ((8546, 8584), 'seaborn.set', 'sns.set', ([], {'font_scale': '(1.2)', 'style': '"""white"""'}), "(font_scale=1.2, style='white')\n", (8553, 8584), True, 'import seaborn as sns\n'), ((8585, 8699), 'seaborn.lineplot', 'sns.lineplot', ([], {'data': 'lstm_stack_pca_trunc', 'x': '"""dimension"""', 'y': '"""variance explained"""', 'hue': '"""population"""', 'linewidth': '(3)'}), "(data=lstm_stack_pca_trunc, x='dimension', y=\n 'variance explained', hue='population', linewidth=3)\n", (8597, 8699), True, 'import seaborn as sns\n'), ((8884, 8905), 'numpy.arange', 'np.arange', (['n_matchups'], {}), '(n_matchups)\n', (8893, 8905), True, 'import numpy as np\n'), ((9117, 9138), 'numpy.arange', 'np.arange', (['n_matchups'], {}), '(n_matchups)\n', (9126, 9138), True, 'import numpy as np\n'), ((9445, 9466), 'numpy.arange', 'np.arange', (['n_matchups'], {}), '(n_matchups)\n', (9454, 9466), True, 'import numpy as np\n'), ((10265, 10298), 'numpy.stack', 'np.stack', (['lstm_pca_reduce'], {'axis': '(0)'}), '(lstm_pca_reduce, axis=0)\n', (10273, 10298), True, 'import numpy as np\n'), ((10300, 10362), 'numpy.save', 'np.save', (['f"""results/lstms_tanh-z_pca-k{k}.npy"""', 'lstm_pca_reduce'], {}), "(f'results/lstms_tanh-z_pca-k{k}.npy', lstm_pca_reduce)\n", (10307, 10362), True, 'import numpy as np\n'), ((10540, 10585), 'numpy.load', 'np.load', (['f"""results/lstms_tanh-z_pca-k{k}.npy"""'], {}), "(f'results/lstms_tanh-z_pca-k{k}.npy')\n", (10547, 10585), True, 'import numpy as np\n'), ((12313, 12382), 'numpy.save', 'np.save', (['f"""results/lstm_pca-k{k}_feature_correlations.npy"""', 'pca_corrs'], {}), "(f'results/lstm_pca-k{k}_feature_correlations.npy', pca_corrs)\n", (12320, 12382), True, 'import numpy as np\n'), ((12405, 12481), 'numpy.load', 'np.load', (['"""results/lstm_pca-k100_feature_correlations.npy"""'], {'allow_pickle': '(True)'}), "('results/lstm_pca-k100_feature_correlations.npy', allow_pickle=True)\n", (12412, 12481), True, 'import numpy as np\n'), ((12670, 12697), 'numpy.stack', 'np.stack', (['pca_corr_means', '(1)'], {}), '(pca_corr_means, 1)\n', (12678, 12697), True, 'import numpy as np\n'), ((12770, 12786), 'numpy.arange', 'np.arange', (['(1)', '(10)'], {}), '(1, 10)\n', (12779, 12786), True, 'import numpy as np\n'), ((1242, 1262), 'numpy.arange', 'np.arange', (['n_repeats'], {}), '(n_repeats)\n', (1251, 1262), True, 'import numpy as np\n'), ((2142, 2162), 'numpy.arange', 'np.arange', (['n_repeats'], {}), '(n_repeats)\n', (2151, 2162), True, 'import numpy as np\n'), ((3153, 3173), 'numpy.arange', 'np.arange', (['n_repeats'], {}), '(n_repeats)\n', (3162, 3173), True, 'import numpy as np\n'), ((4102, 4122), 'numpy.arange', 'np.arange', (['n_repeats'], {}), '(n_repeats)\n', (4111, 4122), True, 'import numpy as np\n'), ((5212, 5232), 'numpy.arange', 'np.arange', (['n_repeats'], {}), '(n_repeats)\n', (5221, 5232), True, 'import numpy as np\n'), ((6357, 6377), 'numpy.arange', 'np.arange', (['n_repeats'], {}), '(n_repeats)\n', (6366, 6377), True, 'import numpy as np\n'), ((7346, 7366), 'numpy.arange', 'np.arange', (['n_repeats'], {}), '(n_repeats)\n', (7355, 7366), True, 'import numpy as np\n'), ((7545, 7566), 'numpy.vstack', 'np.vstack', (['stack_lstm'], {}), '(stack_lstm)\n', (7554, 7566), True, 'import numpy as np\n'), ((7577, 7596), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'k'}), '(n_components=k)\n', (7580, 7596), False, 'from sklearn.decomposition import PCA\n'), ((9506, 9526), 'numpy.arange', 'np.arange', (['n_repeats'], {}), '(n_repeats)\n', (9515, 9526), True, 'import numpy as np\n'), ((9705, 9726), 'numpy.vstack', 'np.vstack', (['stack_lstm'], {}), '(stack_lstm)\n', (9714, 9726), True, 'import numpy as np\n'), ((9737, 9756), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'k'}), '(n_components=k)\n', (9740, 9756), False, 'from sklearn.decomposition import PCA\n'), ((9828, 9865), 'numpy.sum', 'np.sum', (['pca.explained_variance_ratio_'], {}), '(pca.explained_variance_ratio_)\n', (9834, 9865), True, 'import numpy as np\n'), ((11681, 11718), 'numpy.full', 'np.full', (['(feature_shape + (k,))', 'np.nan'], {}), '(feature_shape + (k,), np.nan)\n', (11688, 11718), True, 'import numpy as np\n'), ((11743, 11764), 'numpy.arange', 'np.arange', (['n_matchups'], {}), '(n_matchups)\n', (11752, 11764), True, 'import numpy as np\n'), ((12791, 12845), 'matplotlib.pyplot.matshow', 'plt.matshow', (['pca_corr_means[..., pc_id]'], {'cmap': '"""RdBu_r"""'}), "(pca_corr_means[..., pc_id], cmap='RdBu_r')\n", (12802, 12845), True, 'import matplotlib.pyplot as plt\n'), ((12850, 12896), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0, 1, 2, 3]', "['A', 'B', 'C', 'D']"], {}), "([0, 1, 2, 3], ['A', 'B', 'C', 'D'])\n", (12860, 12896), True, 'import matplotlib.pyplot as plt\n'), ((12971, 13023), 'matplotlib.pyplot.title', 'plt.title', (['f"""PCA Feature Correlations for PC{pc_id}"""'], {}), "(f'PCA Feature Correlations for PC{pc_id}')\n", (12980, 13023), True, 'import matplotlib.pyplot as plt\n'), ((13028, 13042), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (13040, 13042), True, 'import matplotlib.pyplot as plt\n'), ((1309, 1329), 'numpy.arange', 'np.arange', (['n_players'], {}), '(n_players)\n', (1318, 1329), True, 'import numpy as np\n'), ((2181, 2201), 'numpy.arange', 'np.arange', (['n_players'], {}), '(n_players)\n', (2190, 2201), True, 'import numpy as np\n'), ((3192, 3212), 'numpy.arange', 'np.arange', (['n_players'], {}), '(n_players)\n', (3201, 3212), True, 'import numpy as np\n'), ((3904, 3924), 'numpy.arange', 'np.arange', (['n_players'], {}), '(n_players)\n', (3913, 3924), True, 'import numpy as np\n'), ((5146, 5164), 'numpy.arange', 'np.arange', (['n_pairs'], {}), '(n_pairs)\n', (5155, 5164), True, 'import numpy as np\n'), ((5251, 5269), 'numpy.arange', 'np.arange', (['n_pairs'], {}), '(n_pairs)\n', (5260, 5269), True, 'import numpy as np\n'), ((6396, 6414), 'numpy.arange', 'np.arange', (['n_pairs'], {}), '(n_pairs)\n', (6405, 6414), True, 'import numpy as np\n'), ((7386, 7406), 'numpy.arange', 'np.arange', (['n_players'], {}), '(n_players)\n', (7395, 7406), True, 'import numpy as np\n'), ((9546, 9566), 'numpy.arange', 'np.arange', (['n_players'], {}), '(n_players)\n', (9555, 9566), True, 'import numpy as np\n'), ((11035, 11055), 'numpy.nonzero', 'np.nonzero', (['features'], {}), '(features)\n', (11045, 11055), True, 'import numpy as np\n'), ((11791, 11811), 'numpy.arange', 'np.arange', (['n_repeats'], {}), '(n_repeats)\n', (11800, 11811), True, 'import numpy as np\n'), ((12606, 12650), 'numpy.nanmean', 'np.nanmean', (['pca_corrs[game_var]'], {'axis': '(1, 2)'}), '(pca_corrs[game_var], axis=(1, 2))\n', (12616, 12650), True, 'import numpy as np\n'), ((12909, 12943), 'numpy.arange', 'np.arange', (['pca_corr_means.shape[1]'], {}), '(pca_corr_means.shape[1])\n', (12918, 12943), True, 'import numpy as np\n'), ((1384, 1403), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'k'}), '(n_components=k)\n', (1387, 1403), False, 'from sklearn.decomposition import PCA\n'), ((3554, 3588), 'numpy.median', 'np.median', (['percents_vaf[m, ..., i]'], {}), '(percents_vaf[m, ..., i])\n', (3563, 3588), True, 'import numpy as np\n'), ((3608, 3640), 'numpy.amin', 'np.amin', (['percents_vaf[m, ..., i]'], {}), '(percents_vaf[m, ..., i])\n', (3615, 3640), True, 'import numpy as np\n'), ((3660, 3692), 'numpy.amax', 'np.amax', (['percents_vaf[m, ..., i]'], {}), '(percents_vaf[m, ..., i])\n', (3667, 3692), True, 'import numpy as np\n'), ((4263, 4334), 'numpy.hstack', 'np.hstack', (['(lstms_matched[m, r, pair[0]], lstms_matched[m, r, pair[1]])'], {}), '((lstms_matched[m, r, pair[0]], lstms_matched[m, r, pair[1]]))\n', (4272, 4334), True, 'import numpy as np\n'), ((4389, 4408), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'k'}), '(n_components=k)\n', (4392, 4408), False, 'from sklearn.decomposition import PCA\n'), ((9201, 9230), 'numpy.median', 'np.median', (['percents_vaf[m, i]'], {}), '(percents_vaf[m, i])\n', (9210, 9230), True, 'import numpy as np\n'), ((11845, 11865), 'numpy.arange', 'np.arange', (['n_players'], {}), '(n_players)\n', (11854, 11865), True, 'import numpy as np\n'), ((1572, 1610), 'scipy.stats.zscore', 'zscore', (['lstms_matched[m, r, p]'], {'axis': '(0)'}), '(lstms_matched[m, r, p], axis=0)\n', (1578, 1610), False, 'from scipy.stats import zscore\n'), ((4470, 4496), 'scipy.stats.zscore', 'zscore', (['stack_lstm'], {'axis': '(0)'}), '(stack_lstm, axis=0)\n', (4476, 4496), False, 'from scipy.stats import zscore\n'), ((6870, 6905), 'numpy.median', 'np.median', (['percents_vaf[m, :, c, i]'], {}), '(percents_vaf[m, :, c, i])\n', (6879, 6905), True, 'import numpy as np\n'), ((6929, 6962), 'numpy.amin', 'np.amin', (['percents_vaf[m, :, c, i]'], {}), '(percents_vaf[m, :, c, i])\n', (6936, 6962), True, 'import numpy as np\n'), ((6986, 7019), 'numpy.amax', 'np.amax', (['percents_vaf[m, :, c, i]'], {}), '(percents_vaf[m, :, c, i])\n', (6993, 7019), True, 'import numpy as np\n'), ((7438, 7476), 'scipy.stats.zscore', 'zscore', (['lstms_matched[m, r, p]'], {'axis': '(0)'}), '(lstms_matched[m, r, p], axis=0)\n', (7444, 7476), False, 'from scipy.stats import zscore\n'), ((9598, 9636), 'scipy.stats.zscore', 'zscore', (['lstms_matched[m, r, p]'], {'axis': '(0)'}), '(lstms_matched[m, r, p], axis=0)\n', (9604, 9636), False, 'from scipy.stats import zscore\n'), ((9985, 10009), 'numpy.split', 'np.split', (['transformed', '(8)'], {}), '(transformed, 8)\n', (9993, 10009), True, 'import numpy as np\n'), ((10984, 11000), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (10992, 11000), True, 'import numpy as np\n'), ((11467, 11483), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (11475, 11483), True, 'import numpy as np\n'), ((11897, 11909), 'numpy.arange', 'np.arange', (['k'], {}), '(k)\n', (11906, 11909), True, 'import numpy as np\n'), ((8966, 9027), 'numpy.cumsum', 'np.cumsum', (["lstm_stack_pca[m]['pca'].explained_variance_ratio_"], {}), "(lstm_stack_pca[m]['pca'].explained_variance_ratio_)\n", (8975, 9027), True, 'import numpy as np\n'), ((11941, 12058), 'scipy.stats.pearsonr', 'pearsonr', (['features[matchup_id, repeat_id, player_id, :, 0]', 'lstm_pca[matchup_id, repeat_id, player_id, :, pc_id]'], {}), '(features[matchup_id, repeat_id, player_id, :, 0], lstm_pca[\n matchup_id, repeat_id, player_id, :, pc_id])\n', (11949, 12058), False, 'from scipy.stats import pearsonr\n'), ((3289, 3350), 'numpy.cumsum', 'np.cumsum', (["lstm_pca[m][r][p]['pca'].explained_variance_ratio_"], {}), "(lstm_pca[m][r][p]['pca'].explained_variance_ratio_)\n", (3298, 3350), True, 'import numpy as np\n'), ((6491, 6557), 'numpy.cumsum', 'np.cumsum', (["lstm_pair_pca[m][r][p]['pca'].explained_variance_ratio_"], {}), "(lstm_pair_pca[m][r][p]['pca'].explained_variance_ratio_)\n", (6500, 6557), True, 'import numpy as np\n')] |
from flask_wtf import FlaskForm
from wtforms import *
from wtforms.validators import InputRequired
class LoginForm(FlaskForm):
username = TextField('username',validators=[InputRequired()])
password = PasswordField('password',validators=[InputRequired()]) | [
"wtforms.validators.InputRequired"
] | [((183, 198), 'wtforms.validators.InputRequired', 'InputRequired', ([], {}), '()\n', (196, 198), False, 'from wtforms.validators import InputRequired\n'), ((250, 265), 'wtforms.validators.InputRequired', 'InputRequired', ([], {}), '()\n', (263, 265), False, 'from wtforms.validators import InputRequired\n')] |
import time
from memory import Memory
from constfile.constkey import *
from constfile.ddpgqueue import fifo, model_queue
from ddpg import DDPG
from utils.configsupport import config
from utils.logsupport import log
class train(object):
def __init__(self):
self.model = DDPG()
self.model.load_weights()
self.memory = Memory()
self.queue = fifo
self.model_queue = model_queue
self.warmup = config.get(MODEL_WARMUP)
self.modelsavefreq = config.get(MODEL_SAVE_FREQ) * 1000
def queue_pop(self):
if not self.queue.empty():
data = self.queue.get()
self.memory.append_experiences(data)
def train_model(self):
while True:
start = time.time()
if len(self.memory) < self.warmup:
log.warn("not enough samples[{}] in replay buffer!".format(len(self.memory)))
time.sleep(10)
start = time.time()
else:
end = time.time()
if end-start > self.modelsavefreq:
self.model.save_model()
start = end
self.model.update_policy(self.memory)
actor, critic = self.model.get_model()
if self.model_queue.empty():
self.model_queue.put((actor, critic))
def draw_loss(self):
actor_loss,critic_loss = self.model.get_loss()
pass
| [
"time.sleep",
"utils.configsupport.config.get",
"ddpg.DDPG",
"memory.Memory",
"time.time"
] | [((284, 290), 'ddpg.DDPG', 'DDPG', ([], {}), '()\n', (288, 290), False, 'from ddpg import DDPG\n'), ((347, 355), 'memory.Memory', 'Memory', ([], {}), '()\n', (353, 355), False, 'from memory import Memory\n'), ((443, 467), 'utils.configsupport.config.get', 'config.get', (['MODEL_WARMUP'], {}), '(MODEL_WARMUP)\n', (453, 467), False, 'from utils.configsupport import config\n'), ((497, 524), 'utils.configsupport.config.get', 'config.get', (['MODEL_SAVE_FREQ'], {}), '(MODEL_SAVE_FREQ)\n', (507, 524), False, 'from utils.configsupport import config\n'), ((747, 758), 'time.time', 'time.time', ([], {}), '()\n', (756, 758), False, 'import time\n'), ((916, 930), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (926, 930), False, 'import time\n'), ((955, 966), 'time.time', 'time.time', ([], {}), '()\n', (964, 966), False, 'import time\n'), ((1007, 1018), 'time.time', 'time.time', ([], {}), '()\n', (1016, 1018), False, 'import time\n')] |
import json
import os
import time
from copy import deepcopy
import TransportMaps.Distributions as dist
import TransportMaps.Likelihoods as like
from typing import List, Dict
from matplotlib import pyplot as plt
from factors.Factors import Factor, ExplicitPriorFactor, ImplicitPriorFactor, \
LikelihoodFactor, BinaryFactorMixture, KWayFactor
from sampler.NestedSampling import GlobalNestedSampler
from sampler.SimulationBasedSampler import SimulationBasedSampler
from slam.Variables import Variable, VariableType
from slam.FactorGraph import FactorGraph
from slam.BayesTree import BayesTree, BayesTreeNode
import numpy as np
from sampler.sampler_utils import JointFactor
from utils.Functions import sort_pair_lists
from utils.Visualization import plot_2d_samples
from utils.Functions import sample_dict_to_array, array_order_to_dict
class SolverArgs:
def __init__(self,
elimination_method: str = "natural",
posterior_sample_num: int = 500,
local_sample_num: int = 500,
store_clique_samples: bool = False,
local_sampling_method="direct",
adaptive_posterior_sampling=None,
*args, **kwargs
):
# graph-related and tree-related params
self.elimination_method = elimination_method
self.posterior_sample_num = posterior_sample_num
self.store_clique_samples = store_clique_samples
self.local_sampling_method = local_sampling_method
self.local_sample_num = local_sample_num
self.adaptive_posterior_sampling = adaptive_posterior_sampling
def jsonStr(self):
return json.dumps(self.__dict__)
class CliqueSeparatorFactor(ImplicitPriorFactor):
def sample(self, num_samples: int, **kwargs):
return NotImplementedError("implementation depends on density models")
class ConditionalSampler:
def conditional_sample_given_observation(self, conditional_dim,
obs_samples=None,
sample_number=None):
"""
This method returns samples with the dimension of conditional_dim.
If sample_number is given, samples of the first conditional_dim variables are return.
If obs_samples is given, samples of the first conditional_dim variables after
the dimension of obs_samples will be returned. obs_samples.shape = (sample num, dim)
Note that the dims here are of the vectorized point on manifolds not the dim of manifold.
"""
raise NotImplementedError("Implementation depends on density estimation method.")
class FactorGraphSolver:
"""
This is the abstract class of factor graph solvers.
It mainly works as:
1. the interface for users to define and solve factor graphs.
2. the maintainer of factor graphs and Bayes tree for incremental inference
3. fitting probabilistic models to the working part of factor graph and Bayes tree
4. inference (sampling) on the entire Bayes tree
The derived class may reply on different probabilistic modeling approaches.
"""
def __init__(self, args: SolverArgs):
"""
Parameters
----------
elimination_method : string
option of heuristics for variable elimination ordering.
TODO: this can be a dynamic parameter when updating Bayes tree
"""
self._args = args
self._physical_graph = FactorGraph()
self._working_graph = FactorGraph()
self._physical_bayes_tree = None
self._working_bayes_tree = None
self._conditional_couplings = {} # map from Bayes tree clique to flows
self._implicit_factors = {} # map from Bayes tree clique to factor
self._samples = {} # map from variable to samples
self._new_nodes = []
self._new_factors = []
self._clique_samples = {} # map from Bayes tree clique to samples
self._clique_true_obs = {} # map from Bayes tree clique to observations which augments flow models
self._clique_density_model = {} # map from Bayes tree clique to flow model
# map from Bayes tree clique to variable pattern; (Separator,Frontal) in reverse elimination order
self._clique_variable_pattern = {}
self._elimination_ordering = []
self._reverse_ordering_map = {}
self._temp_training_loss = {}
def set_args(self, args: SolverArgs):
raise NotImplementedError("Implementation depends on probabilistic modeling approaches.")
@property
def elimination_method(self) -> str:
return self._args.elimination_method
@property
def elimination_ordering(self) -> List[Variable]:
return self._elimination_ordering
@property
def physical_vars(self) -> List[Variable]:
return self._physical_graph.vars
@property
def new_vars(self) -> List[Variable]:
return self._new_nodes
@property
def working_vars(self) -> List[Variable]:
return self._working_graph.vars
@property
def physical_factors(self) -> List[Factor]:
return self._physical_graph.factors
@property
def new_factors(self) -> List[Factor]:
return self._new_factors
@property
def working_factors(self) -> List[Factor]:
return self._working_graph.factors
@property
def working_factor_graph(self) -> FactorGraph:
return self._working_graph
@property
def physical_factor_graph(self) -> FactorGraph:
return self._physical_graph
@property
def working_bayes_tree(self) -> BayesTree:
return self._working_bayes_tree
@property
def physical_bayes_tree(self) -> BayesTree:
return self._physical_bayes_tree
def generate_natural_ordering(self) -> None:
"""
Generate the ordering by which nodes are added
"""
self._elimination_ordering = self._physical_graph.vars + self._new_nodes
def generate_pose_first_ordering(self) -> None:
"""
Generate the ordering by which nodes are added and lmk eliminated later
"""
natural_order = self._physical_graph.vars + self._new_nodes
pose_list = []
lmk_list = []
for node in natural_order:
if node._type == VariableType.Landmark:
lmk_list.append(node)
else:
pose_list.append(node)
self._elimination_ordering = pose_list + lmk_list
def generate_ccolamd_ordering(self) -> None:
"""
"""
physical_graph_ordering = [var for var in self._elimination_ordering if var not in self._working_graph.vars]
working_graph_ordering = self._working_graph.analyze_elimination_ordering(
method="ccolamd", last_vars=
[[var for var in self._working_graph.vars if
var.type == VariableType.Pose][-1]])
self._elimination_ordering = physical_graph_ordering + working_graph_ordering
def generate_ordering(self) -> None:
"""
Generate the ordering by which Bayes tree should be generated
"""
if self._args.elimination_method == "natural":
self.generate_natural_ordering()
elif self._args.elimination_method == "ccolamd":
self.generate_ccolamd_ordering()
elif self._args.elimination_method == "pose_first":
self.generate_pose_first_ordering()
self._reverse_ordering_map = {
var: index for index, var in
enumerate(self._elimination_ordering[::-1])}
# TODO: Add other ordering methods
def add_node(self, var: Variable = None, name: str = None,
dim: int = None) -> "FactorGraphSolver":
"""
Add a new node
The node has not been added to the physical or current factor graphs
:param var:
:param name: used only when variable is not specified
:param dim: used only when variable is not specified
:return: the current problem
"""
if var:
self._new_nodes.append(var)
else:
self._new_nodes.append(Variable(name, dim))
return self
def add_factor(self, factor: Factor) -> "FactorGraphSolver":
"""
Add a prior factor to specified nodes
The factor has not been added to physical or current factor graphs
:param factor
:return: the current problem
"""
self._new_factors.append(factor)
return self
def add_prior_factor(self, vars: List[Variable],
distribution: dist.Distribution) -> "FactorGraphSolver":
"""
Add a prior factor to specified nodes
The factor has not been added to physical or current factor graphs
:param vars
:param distribution
:return: the current problem
"""
self._new_factors.append(ExplicitPriorFactor(
vars=vars, distribution=distribution))
return self
def add_likelihood_factor(self, vars: List[Variable],
likelihood: like.LikelihoodBase) -> "FactorGraphSolver":
"""
Add a likelihood factor to specified nodes
The factor has not been added to physical or current factor graphs
:param vars
:param likelihood
:return: the current problem
"""
self._new_factors.append(LikelihoodFactor(
vars=vars, log_likelihood=likelihood))
return self
def update_physical_and_working_graphs(self, timer: List[float] = None, device: str = "cpu"
) -> "FactorGraphSolver":
"""
Add all new nodes and factors into the physical factor graph,
retrieve the working factor graph, update Bayes trees
:return: the current problem
"""
start = time.time()
# Determine the affected variables in the physical Bayes tree
old_nodes = set(self.physical_vars)
nodes_of_new_factors = set.union(*[set(factor.vars) for
factor in self._new_factors])
old_nodes_of_new_factors = set.intersection(old_nodes,
nodes_of_new_factors)
# Get the working factor graph
if self._physical_bayes_tree: # if not first step, get sub graph
affected_nodes, sub_bayes_trees = \
self._physical_bayes_tree. \
get_affected_vars_and_partial_bayes_trees(
vars=old_nodes_of_new_factors)
self._working_graph = self._physical_graph.get_sub_factor_graph_with_prior(
variables=affected_nodes,
sub_trees=sub_bayes_trees,
clique_prior_dict=self._implicit_factors)
else:
sub_bayes_trees = set()
for node in self._new_nodes:
self._working_graph.add_node(node)
for factor in self._new_factors:
self._working_graph.add_factor(factor)
# Get the working Bayes treeget_sub_factor_graph
old_ordering = self._elimination_ordering
self.generate_ordering()
self._working_bayes_tree = self._working_graph.get_bayes_tree(
ordering=[var for var in self._elimination_ordering
if var in set(self.working_vars)])
# Update the physical factor graph
for node in self._new_nodes:
self._physical_graph.add_node(node)
for factor in self._new_factors:
self._physical_graph.add_factor(factor)
# Update the physical Bayesian tree
self._physical_bayes_tree = self._working_bayes_tree.__copy__()
self._physical_bayes_tree.append_child_bayes_trees(sub_bayes_trees)
# Delete legacy conditional samplers in the old tree and
# convert the density model w/o separator at leaves to density model w/ separator.
cliques_to_delete = set()
for old_clique in set(self._clique_density_model.keys()).difference(self._physical_bayes_tree.clique_nodes):
for new_clique in self._working_bayes_tree.clique_nodes:
if old_clique.vars == new_clique.vars and [var for var in old_ordering if var in old_clique.vars] == \
[var for var in self._elimination_ordering if var in new_clique.vars]:
# This clique was the root in the old tree but is leaf in the new tree.
# If the ordering of variables remains the same, its density model can be re-used.
# Update the clique to density model dict
self._clique_true_obs[new_clique] = self._clique_true_obs[old_clique]
if old_clique in self._clique_variable_pattern:
self._clique_variable_pattern[new_clique] = self._clique_variable_pattern[old_clique]
if old_clique in self._clique_samples:
self._clique_samples[new_clique] = self._clique_samples[old_clique]
self._clique_density_model[new_clique] = \
self.root_clique_density_model_to_leaf(old_clique, new_clique, device)
# since new clique will be skipped, related factors shall be eliminated beforehand.
# TODO: update _clique_density_model.keys() in which some clique parents change
# TODO: this currently has no impact on results
# TODO: if we store all models or clique-depend values on cliques, this issue will disappear
new_separator_factor = None
if new_clique.separator:
# extract new factor over separator
separator_var_list = sorted(new_clique.separator, key=lambda x: self._reverse_ordering_map[x])
new_separator_factor = \
self.clique_density_to_separator_factor(separator_var_list,
self._clique_density_model[new_clique],
self._clique_true_obs[old_clique])
self._implicit_factors[new_clique] = new_separator_factor
self._working_graph = self._working_graph.eliminate_clique_variables(clique=new_clique,
new_factor=new_separator_factor)
break
cliques_to_delete.add(old_clique)
for old_clique in cliques_to_delete:
del self._clique_density_model[old_clique]
del self._clique_true_obs[old_clique]
if old_clique in self._clique_variable_pattern:
del self._clique_variable_pattern[old_clique]
if old_clique in self._clique_samples:
del self._clique_samples[old_clique]
# Clear all newly added variables and factors
self._new_nodes = []
self._new_factors = []
end = time.time()
if timer is not None:
timer.append(end - start)
return self
def root_clique_density_model_to_leaf(self,
old_clique: BayesTreeNode,
new_clique: BayesTreeNode,
device) -> "ConditionalSampler":
"""
when old clique and new clique have same variables but different division of frontal and separator vars,
recycle the density model in the old clique and convert it to that in the new clique.
"""
raise NotImplementedError("Implementation depends on probabilistic modeling")
def clique_density_to_separator_factor(self,
separator_var_list: List[Variable],
density_model,
true_obs: np.ndarray) -> CliqueSeparatorFactor:
"""
extract marginal of separator variables from clique density as separator factor
"""
raise NotImplementedError("Implementation depends on probabilistic modeling")
def incremental_inference(self,
timer: List[float] = None,
clique_dim_timer: List[List[float]] = None,
*args, **kwargs
):
self.fit_tree_density_models(timer=timer,
clique_dim_timer=clique_dim_timer,
*args, **kwargs)
if self._args.adaptive_posterior_sampling is None:
self._samples = self.sample_posterior(timer=timer, *args, **kwargs)
else:
self._samples = self.adaptive_posterior(timer=timer, *args, **kwargs)
return self._samples
def fit_clique_density_model(self,
clique,
samples,
var_ordering,
timer,
*args, **kwargs) -> "ConditionalSampler":
raise NotImplementedError("Implementation depends on probabilistic modeling.")
def adaptive_posterior(self, timer: List[float] = None, *args, **kwargs
) -> Dict[Variable, np.ndarray]:
"""
Generate samples for all variables
"""
raise NotADirectoryError("implementation depends on density models.")
def fit_tree_density_models(self,
timer: List[float] = None,
clique_dim_timer: List[List[float]] = None,
*args, **kwargs):
"""
By the order of Bayes tree, perform local sampling and training
on all cliques
:return:
"""
self._temp_training_loss = {}
clique_ordering = self._working_bayes_tree.clique_ordering()
total_clique_num = len(clique_ordering)
clique_cnt = 1
before_clique_time = time.time()
while clique_ordering:
start_clique_time = time.time()
clique = clique_ordering.pop()
if clique in self._clique_density_model:
end_clique_time = time.time()
print(f"\tTime for clique {clique_cnt}/{total_clique_num}: " + str(
end_clique_time - start_clique_time) + " sec, "
"total time elapsed: " + str(
end_clique_time - before_clique_time) + " sec")
clique_cnt += 1
if (clique_dim_timer is not None):
clique_dim_timer.append([clique.dim, end_clique_time - before_clique_time])
continue
# local sampling
sampler_start = time.time()
local_samples, sample_var_ordering, true_obs = \
self.clique_training_sampler(clique,
num_samples=self._args.local_sample_num,
method=self._args.local_sampling_method)
sampler_end = time.time()
if timer is not None:
timer.append(sampler_end - sampler_start)
self._clique_true_obs[clique] = true_obs
if self._args.store_clique_samples:
self._clique_samples[clique] = local_samples
local_density_model = \
self.fit_clique_density_model(clique=clique,
samples=local_samples,
var_ordering=sample_var_ordering,
timer=timer)
self._clique_density_model[clique] = local_density_model
new_separator_factor = None
if clique.separator:
# extract new factor over separator
separator_list = sorted(clique.separator,
key=lambda x:
self._reverse_ordering_map[x])
new_separator_factor = self.clique_density_to_separator_factor(separator_list,
local_density_model,
true_obs)
self._implicit_factors[clique] = new_separator_factor
self._working_graph = self._working_graph.eliminate_clique_variables(clique=clique,
new_factor=new_separator_factor)
end_clique_time = time.time()
print(f"\tTime for clique {clique_cnt}/{total_clique_num}: " + str(
end_clique_time - start_clique_time) + " sec, "
"total time elapsed: " + str(
end_clique_time - before_clique_time) + " sec" + ", clique_dim is " + str(clique.dim))
if (clique_dim_timer is not None):
clique_dim_timer.append([clique.dim, end_clique_time - before_clique_time])
clique_cnt += 1
def clique_training_sampler(self, clique: BayesTreeNode, num_samples: int, method: str):
r""" This function returns training samples, simulated variables, and unused observations
"""
graph = self._working_graph.get_clique_factor_graph(clique)
variable_pattern = \
self._working_bayes_tree.clique_variable_pattern(clique)
if method == "direct":
sampler = SimulationBasedSampler(factors=graph.factors, vars=variable_pattern)
samples, var_list, unused_obs = sampler.sample(num_samples)
elif method == "nested" or method == "dynamic nested":
ns_sampler = GlobalNestedSampler(nodes=variable_pattern, factors=graph.factors)
samples = ns_sampler.sample(live_points=num_samples, sampling_method=method)
var_list = variable_pattern
unused_obs = np.array([])
else:
raise ValueError("Unknown sampling method.")
return samples, var_list, unused_obs
def sample_posterior(self, timer: List[float] = None, *args, **kwargs
) -> Dict[Variable, np.ndarray]:
"""
Generate samples for all variables
"""
num_samples = self._args.posterior_sample_num
start = time.time()
stack = [self._physical_bayes_tree.root]
samples = {}
while stack:
# Retrieve the working clique
clique = stack.pop()
# Local sampling
frontal_list = sorted(clique.frontal,
key=lambda x: self._reverse_ordering_map[x])
separator_list = sorted(clique.separator,
key=lambda x: self._reverse_ordering_map[x])
clique_density_model = self._clique_density_model[clique]
obs = self._clique_true_obs[clique]
aug_separator_samples = np.zeros(shape=(num_samples, 0))
if len(obs) != 0:
aug_separator_samples = np.tile(obs, (num_samples, 1))
for var in separator_list:
aug_separator_samples = np.hstack((aug_separator_samples,
samples[var]))
if aug_separator_samples.shape[1] != 0:
frontal_samples = clique_density_model. \
conditional_sample_given_observation(conditional_dim=clique.frontal_dim,
obs_samples=aug_separator_samples)
else: # the root clique
frontal_samples = clique_density_model. \
conditional_sample_given_observation(conditional_dim=clique.frontal_dim,
sample_number=num_samples)
# Dispatch samples
cur_index = 0
for var in frontal_list:
samples[var] = frontal_samples[:,
cur_index: cur_index + var.dim]
cur_index += var.dim
if clique.children:
for child in clique.children:
stack.append(child)
end = time.time()
if timer is not None:
timer.append(end - start)
return samples
def plot2d_posterior(self, title: str = None, xlim=None, ylim=None,
marker_size: float = 1, if_legend: bool = False):
# xlim and ylim are tuples
vars = self._elimination_ordering
# list(self._samples.keys())
len_var = len(vars)
for i in range(len_var):
cur_sample = self._samples[vars[i]]
plt.scatter(cur_sample[:, 0], cur_sample[:, 1], marker=".",
s=marker_size)
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
if if_legend:
plt.legend([var.name for var in vars])
plt.xlabel('x (m)')
plt.ylabel('y (m)')
if title is not None:
plt.title(title)
fig_handle = plt.gcf()
plt.show()
return fig_handle
def results(self):
return list(self._samples.values()), list(self._samples.keys())
def plot2d_mean_points(self, title: str = None, xlim=None, ylim=None,
if_legend: bool = False):
# xlim and ylim are tuples
vars = self._elimination_ordering
# list(self._samples.keys())
len_var = len(vars)
x_list = []
y_list = []
for i in range(len_var):
cur_sample = self._samples[vars[i]]
x = np.mean(cur_sample[:, 0])
y = np.mean(cur_sample[:, 1])
x_list.append(x)
y_list.append(y)
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
plt.plot(x_list, y_list)
if if_legend:
plt.legend([var.name for var in vars])
plt.xlabel('x (m)')
plt.ylabel('y (m)')
if title is not None:
plt.title(title)
fig_handle = plt.gcf()
plt.show()
return fig_handle
def plot2d_mean_rbt_only(self, title: str = None, xlim=None, ylim=None,
if_legend: bool = False, fname=None, front_size=None, show_plot=False, **kwargs):
# xlim and ylim are tuples
vars = self._elimination_ordering
# list(self._samples.keys())
len_var = len(vars)
x_list = []
y_list = []
lmk_list = []
for i in range(len_var):
if vars[i]._type == VariableType.Landmark:
lmk_list.append(vars[i])
else:
cur_sample = self._samples[vars[i]]
x = np.mean(cur_sample[:, 0])
y = np.mean(cur_sample[:, 1])
x_list.append(x)
y_list.append(y)
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
plt.plot(x_list, y_list)
for var in lmk_list:
cur_sample = self._samples[var]
plt.scatter(cur_sample[:, 0], cur_sample[:, 1], label=var.name)
if if_legend:
if front_size is not None:
plt.legend()
else:
plt.legend(fontsize=front_size)
if front_size is not None:
plt.xlabel('x (m)', fontsize=front_size)
plt.ylabel('y (m)', fontsize=front_size)
else:
plt.xlabel('x (m)')
plt.ylabel('y (m)')
if title is not None:
if front_size is not None:
plt.title(title, fontsize=front_size)
else:
plt.title(title)
fig_handle = plt.gcf()
if fname is not None:
plt.savefig(fname)
if show_plot:
plt.show()
return fig_handle
def plot2d_MAP_rbt_only(self, title: str = None, xlim=None, ylim=None,
if_legend: bool = False, fname=None, front_size=None):
# xlim and ylim are tuples
vars = self._elimination_ordering
jf = JointFactor(self.physical_factors, vars)
# list(self._samples.keys())
all_sample = sample_dict_to_array(self._samples, vars)
log_pdf = jf.log_pdf(all_sample)
max_idx = np.argmax(log_pdf)
map_sample = all_sample[max_idx:max_idx+1]
map_sample_dict = array_order_to_dict(map_sample, vars)
len_var = len(vars)
x_list = []
y_list = []
lmk_list = []
for i in range(len_var):
if vars[i]._type == VariableType.Landmark:
lmk_list.append(vars[i])
else:
cur_sample = map_sample_dict[vars[i]]
x = np.mean(cur_sample[:, 0])
y = np.mean(cur_sample[:, 1])
x_list.append(x)
y_list.append(y)
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
plt.plot(x_list, y_list)
for var in lmk_list:
cur_sample = map_sample_dict[var]
plt.scatter(cur_sample[:, 0], cur_sample[:, 1], label=var.name)
if if_legend:
if front_size is not None:
plt.legend()
else:
plt.legend(fontsize=front_size)
if front_size is not None:
plt.xlabel('x (m)', fontsize=front_size)
plt.ylabel('y (m)', fontsize=front_size)
else:
plt.xlabel('x (m)')
plt.ylabel('y (m)')
if title is not None:
if front_size is not None:
plt.title(title, fontsize=front_size)
else:
plt.title(title)
fig_handle = plt.gcf()
if fname is not None:
plt.savefig(fname)
plt.show()
return fig_handle
def plot2d_mean_poses(self, title: str = None, xlim=None, ylim=None,
width: float = 0.05, if_legend: bool = False):
# xlim and ylim are tuples
vars = self._elimination_ordering
# list(self._samples.keys())
len_var = len(vars)
x_list = []
y_list = []
for i in range(len_var):
cur_sample = self._samples[vars[i]]
x = np.mean(cur_sample[:, 0])
y = np.mean(cur_sample[:, 1])
x_list.append(x)
y_list.append(y)
# th_mean = circmean(cur_sample[:,2])
# dx, dy = np.cos(th_mean), np.sin(th_mean)
# plt.arrow(x-dx/2, y-dy/2, dx, dy,
# head_width=4*width,
# width=0.05)
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
plt.plot(x_list, y_list)
if if_legend:
plt.legend([var.name for var in vars])
plt.xlabel('x (m)')
plt.ylabel('y (m)')
if title is not None:
plt.title(title)
fig_handle = plt.gcf()
plt.show()
return fig_handle
def plot_factor_graph(self):
pass
def plot_bayes_tree(self):
pass
def run_incrementally(case_dir: str, solver: FactorGraphSolver, nodes_factors_by_step, truth=None, traj_plot=False,
plot_args=None, check_root_transform=False) -> None:
run_count = 1
while os.path.exists(f"{case_dir}/run{run_count}"):
run_count += 1
os.mkdir(f"{case_dir}/run{run_count}")
run_dir = f"{case_dir}/run{run_count}"
print("create run dir: " + run_dir)
file = open(f"{run_dir}/parameters", "w+")
params = solver._args.jsonStr()
print(params)
file.write(params)
file.close()
num_batches = len(nodes_factors_by_step)
observed_nodes = []
step_timer = []
step_list = []
posterior_sampling_timer = []
fitting_timer = []
mixture_factor2weights = {}
show_plot = True
if "show_plot" in plot_args and not plot_args["show_plot"]:
show_plot = False
for i in range(num_batches):
step_nodes, step_factors = nodes_factors_by_step[i]
for node in step_nodes:
solver.add_node(node)
for factor in step_factors:
solver.add_factor(factor)
if isinstance(factor, BinaryFactorMixture):
mixture_factor2weights[factor] = []
observed_nodes += step_nodes
step_list.append(i)
step_file_prefix = f"{run_dir}/step{i}"
detailed_timer = []
clique_dim_timer = []
start = time.time()
solver.update_physical_and_working_graphs(timer=detailed_timer)
cur_sample = solver.incremental_inference(timer=detailed_timer, clique_dim_timer=clique_dim_timer)
end = time.time()
step_timer.append(end - start)
print(f"step {i}/{num_batches} time: {step_timer[-1]} sec, "
f"total time: {sum(step_timer)}")
file = open(f"{step_file_prefix}_ordering", "w+")
file.write(" ".join([var.name for var in solver.elimination_ordering]))
file.close()
file = open(f"{step_file_prefix}_split_timing", "w+")
file.write(" ".join([str(t) for t in detailed_timer]))
file.close()
file = open(f"{step_file_prefix}_step_training_loss", "w+")
last_training_loss = json.dumps(solver._temp_training_loss)
file.write(last_training_loss)
file.close()
posterior_sampling_timer.append(detailed_timer[-1])
fitting_timer.append(sum(detailed_timer[1:-1]))
X = np.hstack([cur_sample[var] for var in solver.elimination_ordering])
np.savetxt(fname=step_file_prefix, X=X)
# check transformation
if check_root_transform:
root_clique = solver.physical_bayes_tree.root
root_clique_model = solver._clique_density_model[root_clique]
y = root_clique_model.prior.sample((3000,))
tx = deepcopy(y)
if hasattr(root_clique_model, "flows"):
for f in root_clique_model.flows[::-1]:
tx = f.inverse_given_separator(tx, None)
y = y.detach().numpy()
tx = tx.detach().numpy()
np.savetxt(fname=step_file_prefix + '_root_normal_data', X=y)
np.savetxt(fname=step_file_prefix + '_root_transformed', X=tx)
plt.figure()
x_sort, tx_sort = sort_pair_lists(tx[:,0], y[:,0])
plt.plot(x_sort, tx_sort)
plt.ylabel("T(x)")
plt.xlabel("x")
plt.savefig(f"{step_file_prefix}_transform.png", bbox_inches="tight")
if show_plot: plt.show()
plt.close()
# clique dim and timing
np.savetxt(fname=step_file_prefix + '_dim_time', X=np.array(clique_dim_timer))
if traj_plot:
plot_2d_samples(samples_mapping=cur_sample,
equal_axis=True,
truth={variable: pose for variable, pose in
truth.items() if variable in solver.physical_vars},
truth_factors={factor for factor in solver.physical_factors if
set(factor.vars).issubset(solver.physical_vars)},
title=f'Step {i}',
plot_all_meas=False,
plot_meas_give_pose=[var for var in step_nodes if var.type == VariableType.Pose],
rbt_traj_no_samples=True,
truth_R2=True,
truth_SE2=False,
truth_odometry_color='k',
truth_landmark_markersize=10,
truth_landmark_marker='x',
file_name=f"{step_file_prefix}.png",
**plot_args)
else:
plot_2d_samples(samples_mapping=cur_sample,
equal_axis=True,
truth={variable: pose for variable, pose in
truth.items() if variable in solver.physical_vars},
truth_factors={factor for factor in solver.physical_factors if
set(factor.vars).issubset(solver.physical_vars)},
file_name=f"{step_file_prefix}.png", title=f'Step {i}',
**plot_args)
solver.plot2d_mean_rbt_only(title=f"step {i} posterior", if_legend=False, fname=f"{step_file_prefix}.png", **plot_args)
# solver.plot2d_MAP_rbt_only(title=f"step {i} posterior", if_legend=False, fname=f"{step_file_prefix}.png")
file = open(f"{run_dir}/step_timing", "w+")
file.write(" ".join(str(t) for t in step_timer))
file.close()
file = open(f"{run_dir}/step_list", "w+")
file.write(" ".join(str(s) for s in step_list))
file.close()
file = open(f"{run_dir}/posterior_sampling_timer", "w+")
file.write(" ".join(str(t) for t in posterior_sampling_timer))
file.close()
file = open(f"{run_dir}/fitting_timer", "w+")
file.write(" ".join(str(t) for t in fitting_timer))
file.close()
plt.figure()
plt.plot(np.array(step_list)*5+5, step_timer, 'go-', label='Total')
plt.plot(np.array(step_list)*5+5, posterior_sampling_timer, 'ro-', label='Posterior sampling')
plt.plot(np.array(step_list)*5+5, fitting_timer, 'bd-', label='Learning NF')
plt.ylabel(f"Time (sec)")
plt.xlabel(f"Key poses")
plt.legend()
plt.savefig(f"{run_dir}/step_timing.png", bbox_inches="tight")
if show_plot: plt.show()
plt.close()
if mixture_factor2weights:
# write updated hypothesis weights
hypo_file = open(run_dir + f'/step{i}.hypoweights', 'w+')
plt.figure()
for factor, weights in mixture_factor2weights.items():
hypo_weights = factor.posterior_weights(cur_sample)
line = ' '.join([var.name for var in factor.vars]) + ' : ' + ','.join(
[str(w) for w in hypo_weights])
hypo_file.writelines(line + '\n')
weights.append(hypo_weights)
for i_w in range(len(hypo_weights)):
plt.plot(np.arange(i + 1 - len(weights), i + 1), np.array(weights)[:, i_w], '-o',
label=f"H{i_w}at{factor.observer_var.name}" if not isinstance(factor, KWayFactor) else
f"{factor.observer_var.name} to {factor.observed_vars[i_w].name}")
hypo_file.close()
plt.legend()
plt.xlabel('Step')
plt.ylabel('Hypothesis weights')
plt.savefig(run_dir + f'/step{i}_hypoweights.png', dpi=300)
if show_plot: plt.show()
plt.close()
| [
"matplotlib.pyplot.ylabel",
"numpy.hstack",
"sampler.sampler_utils.JointFactor",
"numpy.array",
"utils.Functions.sample_dict_to_array",
"copy.deepcopy",
"sampler.SimulationBasedSampler.SimulationBasedSampler",
"slam.Variables.Variable",
"os.path.exists",
"numpy.mean",
"matplotlib.pyplot.xlabel",... | [((32008, 32052), 'os.path.exists', 'os.path.exists', (['f"""{case_dir}/run{run_count}"""'], {}), "(f'{case_dir}/run{run_count}')\n", (32022, 32052), False, 'import os\n'), ((32081, 32119), 'os.mkdir', 'os.mkdir', (['f"""{case_dir}/run{run_count}"""'], {}), "(f'{case_dir}/run{run_count}')\n", (32089, 32119), False, 'import os\n'), ((1672, 1697), 'json.dumps', 'json.dumps', (['self.__dict__'], {}), '(self.__dict__)\n', (1682, 1697), False, 'import json\n'), ((3514, 3527), 'slam.FactorGraph.FactorGraph', 'FactorGraph', ([], {}), '()\n', (3525, 3527), False, 'from slam.FactorGraph import FactorGraph\n'), ((3558, 3571), 'slam.FactorGraph.FactorGraph', 'FactorGraph', ([], {}), '()\n', (3569, 3571), False, 'from slam.FactorGraph import FactorGraph\n'), ((9948, 9959), 'time.time', 'time.time', ([], {}), '()\n', (9957, 9959), False, 'import time\n'), ((15198, 15209), 'time.time', 'time.time', ([], {}), '()\n', (15207, 15209), False, 'import time\n'), ((18262, 18273), 'time.time', 'time.time', ([], {}), '()\n', (18271, 18273), False, 'import time\n'), ((22714, 22725), 'time.time', 'time.time', ([], {}), '()\n', (22723, 22725), False, 'import time\n'), ((24602, 24613), 'time.time', 'time.time', ([], {}), '()\n', (24611, 24613), False, 'import time\n'), ((25398, 25417), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x (m)"""'], {}), "('x (m)')\n", (25408, 25417), True, 'from matplotlib import pyplot as plt\n'), ((25426, 25445), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y (m)"""'], {}), "('y (m)')\n", (25436, 25445), True, 'from matplotlib import pyplot as plt\n'), ((25526, 25535), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (25533, 25535), True, 'from matplotlib import pyplot as plt\n'), ((25544, 25554), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (25552, 25554), True, 'from matplotlib import pyplot as plt\n'), ((26346, 26370), 'matplotlib.pyplot.plot', 'plt.plot', (['x_list', 'y_list'], {}), '(x_list, y_list)\n', (26354, 26370), True, 'from matplotlib import pyplot as plt\n'), ((26452, 26471), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x (m)"""'], {}), "('x (m)')\n", (26462, 26471), True, 'from matplotlib import pyplot as plt\n'), ((26480, 26499), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y (m)"""'], {}), "('y (m)')\n", (26490, 26499), True, 'from matplotlib import pyplot as plt\n'), ((26580, 26589), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (26587, 26589), True, 'from matplotlib import pyplot as plt\n'), ((26598, 26608), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (26606, 26608), True, 'from matplotlib import pyplot as plt\n'), ((27536, 27560), 'matplotlib.pyplot.plot', 'plt.plot', (['x_list', 'y_list'], {}), '(x_list, y_list)\n', (27544, 27560), True, 'from matplotlib import pyplot as plt\n'), ((28282, 28291), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (28289, 28291), True, 'from matplotlib import pyplot as plt\n'), ((28674, 28714), 'sampler.sampler_utils.JointFactor', 'JointFactor', (['self.physical_factors', 'vars'], {}), '(self.physical_factors, vars)\n', (28685, 28714), False, 'from sampler.sampler_utils import JointFactor\n'), ((28773, 28814), 'utils.Functions.sample_dict_to_array', 'sample_dict_to_array', (['self._samples', 'vars'], {}), '(self._samples, vars)\n', (28793, 28814), False, 'from utils.Functions import sample_dict_to_array, array_order_to_dict\n'), ((28883, 28901), 'numpy.argmax', 'np.argmax', (['log_pdf'], {}), '(log_pdf)\n', (28892, 28901), True, 'import numpy as np\n'), ((28981, 29018), 'utils.Functions.array_order_to_dict', 'array_order_to_dict', (['map_sample', 'vars'], {}), '(map_sample, vars)\n', (29000, 29018), False, 'from utils.Functions import sample_dict_to_array, array_order_to_dict\n'), ((29620, 29644), 'matplotlib.pyplot.plot', 'plt.plot', (['x_list', 'y_list'], {}), '(x_list, y_list)\n', (29628, 29644), True, 'from matplotlib import pyplot as plt\n'), ((30368, 30377), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (30375, 30377), True, 'from matplotlib import pyplot as plt\n'), ((30447, 30457), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (30455, 30457), True, 'from matplotlib import pyplot as plt\n'), ((31406, 31430), 'matplotlib.pyplot.plot', 'plt.plot', (['x_list', 'y_list'], {}), '(x_list, y_list)\n', (31414, 31430), True, 'from matplotlib import pyplot as plt\n'), ((31512, 31531), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x (m)"""'], {}), "('x (m)')\n", (31522, 31531), True, 'from matplotlib import pyplot as plt\n'), ((31540, 31559), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y (m)"""'], {}), "('y (m)')\n", (31550, 31559), True, 'from matplotlib import pyplot as plt\n'), ((31640, 31649), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (31647, 31649), True, 'from matplotlib import pyplot as plt\n'), ((31658, 31668), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (31666, 31668), True, 'from matplotlib import pyplot as plt\n'), ((33187, 33198), 'time.time', 'time.time', ([], {}), '()\n', (33196, 33198), False, 'import time\n'), ((33392, 33403), 'time.time', 'time.time', ([], {}), '()\n', (33401, 33403), False, 'import time\n'), ((33966, 34004), 'json.dumps', 'json.dumps', (['solver._temp_training_loss'], {}), '(solver._temp_training_loss)\n', (33976, 34004), False, 'import json\n'), ((34195, 34262), 'numpy.hstack', 'np.hstack', (['[cur_sample[var] for var in solver.elimination_ordering]'], {}), '([cur_sample[var] for var in solver.elimination_ordering])\n', (34204, 34262), True, 'import numpy as np\n'), ((34271, 34310), 'numpy.savetxt', 'np.savetxt', ([], {'fname': 'step_file_prefix', 'X': 'X'}), '(fname=step_file_prefix, X=X)\n', (34281, 34310), True, 'import numpy as np\n'), ((37916, 37928), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (37926, 37928), True, 'from matplotlib import pyplot as plt\n'), ((38201, 38226), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['f"""Time (sec)"""'], {}), "(f'Time (sec)')\n", (38211, 38226), True, 'from matplotlib import pyplot as plt\n'), ((38235, 38259), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['f"""Key poses"""'], {}), "(f'Key poses')\n", (38245, 38259), True, 'from matplotlib import pyplot as plt\n'), ((38268, 38280), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (38278, 38280), True, 'from matplotlib import pyplot as plt\n'), ((38289, 38351), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{run_dir}/step_timing.png"""'], {'bbox_inches': '"""tight"""'}), "(f'{run_dir}/step_timing.png', bbox_inches='tight')\n", (38300, 38351), True, 'from matplotlib import pyplot as plt\n'), ((38393, 38404), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (38402, 38404), True, 'from matplotlib import pyplot as plt\n'), ((8980, 9037), 'factors.Factors.ExplicitPriorFactor', 'ExplicitPriorFactor', ([], {'vars': 'vars', 'distribution': 'distribution'}), '(vars=vars, distribution=distribution)\n', (8999, 9037), False, 'from factors.Factors import Factor, ExplicitPriorFactor, ImplicitPriorFactor, LikelihoodFactor, BinaryFactorMixture, KWayFactor\n'), ((9484, 9538), 'factors.Factors.LikelihoodFactor', 'LikelihoodFactor', ([], {'vars': 'vars', 'log_likelihood': 'likelihood'}), '(vars=vars, log_likelihood=likelihood)\n', (9500, 9538), False, 'from factors.Factors import Factor, ExplicitPriorFactor, ImplicitPriorFactor, LikelihoodFactor, BinaryFactorMixture, KWayFactor\n'), ((18337, 18348), 'time.time', 'time.time', ([], {}), '()\n', (18346, 18348), False, 'import time\n'), ((19062, 19073), 'time.time', 'time.time', ([], {}), '()\n', (19071, 19073), False, 'import time\n'), ((19386, 19397), 'time.time', 'time.time', ([], {}), '()\n', (19395, 19397), False, 'import time\n'), ((20931, 20942), 'time.time', 'time.time', ([], {}), '()\n', (20940, 20942), False, 'import time\n'), ((21865, 21933), 'sampler.SimulationBasedSampler.SimulationBasedSampler', 'SimulationBasedSampler', ([], {'factors': 'graph.factors', 'vars': 'variable_pattern'}), '(factors=graph.factors, vars=variable_pattern)\n', (21887, 21933), False, 'from sampler.SimulationBasedSampler import SimulationBasedSampler\n'), ((23343, 23375), 'numpy.zeros', 'np.zeros', ([], {'shape': '(num_samples, 0)'}), '(shape=(num_samples, 0))\n', (23351, 23375), True, 'import numpy as np\n'), ((25090, 25164), 'matplotlib.pyplot.scatter', 'plt.scatter', (['cur_sample[:, 0]', 'cur_sample[:, 1]'], {'marker': '"""."""', 's': 'marker_size'}), "(cur_sample[:, 0], cur_sample[:, 1], marker='.', s=marker_size)\n", (25101, 25164), True, 'from matplotlib import pyplot as plt\n'), ((25351, 25389), 'matplotlib.pyplot.legend', 'plt.legend', (['[var.name for var in vars]'], {}), '([var.name for var in vars])\n', (25361, 25389), True, 'from matplotlib import pyplot as plt\n'), ((25488, 25504), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (25497, 25504), True, 'from matplotlib import pyplot as plt\n'), ((26084, 26109), 'numpy.mean', 'np.mean', (['cur_sample[:, 0]'], {}), '(cur_sample[:, 0])\n', (26091, 26109), True, 'import numpy as np\n'), ((26126, 26151), 'numpy.mean', 'np.mean', (['cur_sample[:, 1]'], {}), '(cur_sample[:, 1])\n', (26133, 26151), True, 'import numpy as np\n'), ((26405, 26443), 'matplotlib.pyplot.legend', 'plt.legend', (['[var.name for var in vars]'], {}), '([var.name for var in vars])\n', (26415, 26443), True, 'from matplotlib import pyplot as plt\n'), ((26542, 26558), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (26551, 26558), True, 'from matplotlib import pyplot as plt\n'), ((27646, 27709), 'matplotlib.pyplot.scatter', 'plt.scatter', (['cur_sample[:, 0]', 'cur_sample[:, 1]'], {'label': 'var.name'}), '(cur_sample[:, 0], cur_sample[:, 1], label=var.name)\n', (27657, 27709), True, 'from matplotlib import pyplot as plt\n'), ((27914, 27954), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x (m)"""'], {'fontsize': 'front_size'}), "('x (m)', fontsize=front_size)\n", (27924, 27954), True, 'from matplotlib import pyplot as plt\n'), ((27967, 28007), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y (m)"""'], {'fontsize': 'front_size'}), "('y (m)', fontsize=front_size)\n", (27977, 28007), True, 'from matplotlib import pyplot as plt\n'), ((28034, 28053), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x (m)"""'], {}), "('x (m)')\n", (28044, 28053), True, 'from matplotlib import pyplot as plt\n'), ((28066, 28085), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y (m)"""'], {}), "('y (m)')\n", (28076, 28085), True, 'from matplotlib import pyplot as plt\n'), ((28334, 28352), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {}), '(fname)\n', (28345, 28352), True, 'from matplotlib import pyplot as plt\n'), ((28387, 28397), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (28395, 28397), True, 'from matplotlib import pyplot as plt\n'), ((29732, 29795), 'matplotlib.pyplot.scatter', 'plt.scatter', (['cur_sample[:, 0]', 'cur_sample[:, 1]'], {'label': 'var.name'}), '(cur_sample[:, 0], cur_sample[:, 1], label=var.name)\n', (29743, 29795), True, 'from matplotlib import pyplot as plt\n'), ((30000, 30040), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x (m)"""'], {'fontsize': 'front_size'}), "('x (m)', fontsize=front_size)\n", (30010, 30040), True, 'from matplotlib import pyplot as plt\n'), ((30053, 30093), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y (m)"""'], {'fontsize': 'front_size'}), "('y (m)', fontsize=front_size)\n", (30063, 30093), True, 'from matplotlib import pyplot as plt\n'), ((30120, 30139), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x (m)"""'], {}), "('x (m)')\n", (30130, 30139), True, 'from matplotlib import pyplot as plt\n'), ((30152, 30171), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y (m)"""'], {}), "('y (m)')\n", (30162, 30171), True, 'from matplotlib import pyplot as plt\n'), ((30420, 30438), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {}), '(fname)\n', (30431, 30438), True, 'from matplotlib import pyplot as plt\n'), ((30910, 30935), 'numpy.mean', 'np.mean', (['cur_sample[:, 0]'], {}), '(cur_sample[:, 0])\n', (30917, 30935), True, 'import numpy as np\n'), ((30952, 30977), 'numpy.mean', 'np.mean', (['cur_sample[:, 1]'], {}), '(cur_sample[:, 1])\n', (30959, 30977), True, 'import numpy as np\n'), ((31465, 31503), 'matplotlib.pyplot.legend', 'plt.legend', (['[var.name for var in vars]'], {}), '([var.name for var in vars])\n', (31475, 31503), True, 'from matplotlib import pyplot as plt\n'), ((31602, 31618), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (31611, 31618), True, 'from matplotlib import pyplot as plt\n'), ((34581, 34592), 'copy.deepcopy', 'deepcopy', (['y'], {}), '(y)\n', (34589, 34592), False, 'from copy import deepcopy\n'), ((34846, 34907), 'numpy.savetxt', 'np.savetxt', ([], {'fname': "(step_file_prefix + '_root_normal_data')", 'X': 'y'}), "(fname=step_file_prefix + '_root_normal_data', X=y)\n", (34856, 34907), True, 'import numpy as np\n'), ((34920, 34982), 'numpy.savetxt', 'np.savetxt', ([], {'fname': "(step_file_prefix + '_root_transformed')", 'X': 'tx'}), "(fname=step_file_prefix + '_root_transformed', X=tx)\n", (34930, 34982), True, 'import numpy as np\n'), ((34996, 35008), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (35006, 35008), True, 'from matplotlib import pyplot as plt\n'), ((35039, 35073), 'utils.Functions.sort_pair_lists', 'sort_pair_lists', (['tx[:, 0]', 'y[:, 0]'], {}), '(tx[:, 0], y[:, 0])\n', (35054, 35073), False, 'from utils.Functions import sort_pair_lists\n'), ((35084, 35109), 'matplotlib.pyplot.plot', 'plt.plot', (['x_sort', 'tx_sort'], {}), '(x_sort, tx_sort)\n', (35092, 35109), True, 'from matplotlib import pyplot as plt\n'), ((35122, 35140), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""T(x)"""'], {}), "('T(x)')\n", (35132, 35140), True, 'from matplotlib import pyplot as plt\n'), ((35153, 35168), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (35163, 35168), True, 'from matplotlib import pyplot as plt\n'), ((35181, 35250), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{step_file_prefix}_transform.png"""'], {'bbox_inches': '"""tight"""'}), "(f'{step_file_prefix}_transform.png', bbox_inches='tight')\n", (35192, 35250), True, 'from matplotlib import pyplot as plt\n'), ((35300, 35311), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (35309, 35311), True, 'from matplotlib import pyplot as plt\n'), ((38374, 38384), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (38382, 38384), True, 'from matplotlib import pyplot as plt\n'), ((38570, 38582), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (38580, 38582), True, 'from matplotlib import pyplot as plt\n'), ((39361, 39373), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (39371, 39373), True, 'from matplotlib import pyplot as plt\n'), ((39386, 39404), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Step"""'], {}), "('Step')\n", (39396, 39404), True, 'from matplotlib import pyplot as plt\n'), ((39417, 39449), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Hypothesis weights"""'], {}), "('Hypothesis weights')\n", (39427, 39449), True, 'from matplotlib import pyplot as plt\n'), ((39462, 39521), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(run_dir + f'/step{i}_hypoweights.png')"], {'dpi': '(300)'}), "(run_dir + f'/step{i}_hypoweights.png', dpi=300)\n", (39473, 39521), True, 'from matplotlib import pyplot as plt\n'), ((39571, 39582), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (39580, 39582), True, 'from matplotlib import pyplot as plt\n'), ((8209, 8228), 'slam.Variables.Variable', 'Variable', (['name', 'dim'], {}), '(name, dim)\n', (8217, 8228), False, 'from slam.Variables import Variable, VariableType\n'), ((18479, 18490), 'time.time', 'time.time', ([], {}), '()\n', (18488, 18490), False, 'import time\n'), ((22094, 22160), 'sampler.NestedSampling.GlobalNestedSampler', 'GlobalNestedSampler', ([], {'nodes': 'variable_pattern', 'factors': 'graph.factors'}), '(nodes=variable_pattern, factors=graph.factors)\n', (22113, 22160), False, 'from sampler.NestedSampling import GlobalNestedSampler\n'), ((22315, 22327), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (22323, 22327), True, 'import numpy as np\n'), ((23447, 23477), 'numpy.tile', 'np.tile', (['obs', '(num_samples, 1)'], {}), '(obs, (num_samples, 1))\n', (23454, 23477), True, 'import numpy as np\n'), ((23557, 23605), 'numpy.hstack', 'np.hstack', (['(aug_separator_samples, samples[var])'], {}), '((aug_separator_samples, samples[var]))\n', (23566, 23605), True, 'import numpy as np\n'), ((25238, 25252), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlim'], {}), '(xlim)\n', (25246, 25252), True, 'from matplotlib import pyplot as plt\n'), ((25302, 25316), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ylim'], {}), '(ylim)\n', (25310, 25316), True, 'from matplotlib import pyplot as plt\n'), ((26259, 26273), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlim'], {}), '(xlim)\n', (26267, 26273), True, 'from matplotlib import pyplot as plt\n'), ((26323, 26337), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ylim'], {}), '(ylim)\n', (26331, 26337), True, 'from matplotlib import pyplot as plt\n'), ((27246, 27271), 'numpy.mean', 'np.mean', (['cur_sample[:, 0]'], {}), '(cur_sample[:, 0])\n', (27253, 27271), True, 'import numpy as np\n'), ((27292, 27317), 'numpy.mean', 'np.mean', (['cur_sample[:, 1]'], {}), '(cur_sample[:, 1])\n', (27299, 27317), True, 'import numpy as np\n'), ((27787, 27799), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (27797, 27799), True, 'from matplotlib import pyplot as plt\n'), ((27834, 27865), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': 'front_size'}), '(fontsize=front_size)\n', (27844, 27865), True, 'from matplotlib import pyplot as plt\n'), ((28172, 28209), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': 'front_size'}), '(title, fontsize=front_size)\n', (28181, 28209), True, 'from matplotlib import pyplot as plt\n'), ((28244, 28260), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (28253, 28260), True, 'from matplotlib import pyplot as plt\n'), ((29330, 29355), 'numpy.mean', 'np.mean', (['cur_sample[:, 0]'], {}), '(cur_sample[:, 0])\n', (29337, 29355), True, 'import numpy as np\n'), ((29376, 29401), 'numpy.mean', 'np.mean', (['cur_sample[:, 1]'], {}), '(cur_sample[:, 1])\n', (29383, 29401), True, 'import numpy as np\n'), ((29873, 29885), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (29883, 29885), True, 'from matplotlib import pyplot as plt\n'), ((29920, 29951), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': 'front_size'}), '(fontsize=front_size)\n', (29930, 29951), True, 'from matplotlib import pyplot as plt\n'), ((30258, 30295), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': 'front_size'}), '(title, fontsize=front_size)\n', (30267, 30295), True, 'from matplotlib import pyplot as plt\n'), ((30330, 30346), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (30339, 30346), True, 'from matplotlib import pyplot as plt\n'), ((31319, 31333), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlim'], {}), '(xlim)\n', (31327, 31333), True, 'from matplotlib import pyplot as plt\n'), ((31383, 31397), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ylim'], {}), '(ylim)\n', (31391, 31397), True, 'from matplotlib import pyplot as plt\n'), ((35277, 35287), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (35285, 35287), True, 'from matplotlib import pyplot as plt\n'), ((35404, 35430), 'numpy.array', 'np.array', (['clique_dim_timer'], {}), '(clique_dim_timer)\n', (35412, 35430), True, 'import numpy as np\n'), ((39548, 39558), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (39556, 39558), True, 'from matplotlib import pyplot as plt\n'), ((27441, 27455), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlim'], {}), '(xlim)\n', (27449, 27455), True, 'from matplotlib import pyplot as plt\n'), ((27513, 27527), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ylim'], {}), '(ylim)\n', (27521, 27527), True, 'from matplotlib import pyplot as plt\n'), ((29525, 29539), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlim'], {}), '(xlim)\n', (29533, 29539), True, 'from matplotlib import pyplot as plt\n'), ((29597, 29611), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ylim'], {}), '(ylim)\n', (29605, 29611), True, 'from matplotlib import pyplot as plt\n'), ((37946, 37965), 'numpy.array', 'np.array', (['step_list'], {}), '(step_list)\n', (37954, 37965), True, 'import numpy as np\n'), ((38022, 38041), 'numpy.array', 'np.array', (['step_list'], {}), '(step_list)\n', (38030, 38041), True, 'import numpy as np\n'), ((38125, 38144), 'numpy.array', 'np.array', (['step_list'], {}), '(step_list)\n', (38133, 38144), True, 'import numpy as np\n'), ((39074, 39091), 'numpy.array', 'np.array', (['weights'], {}), '(weights)\n', (39082, 39091), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
import time
#https://stackoverflow.com/questions/714063/importing-modules-from-parent-folder
import sys
sys.path.insert(0,'../gym')
import numpy as np
from support import *
from model import *
def run_exper(model, steps, get_features, pre_proc_features):
from environment import SIMULATOR
# initializing our environment
my_sim = SIMULATOR()
# beginning of an episode
state_temp = my_sim.reset()
observation = my_sim.state_to_tensor(state_temp)
r_tup, e_tup, rover_poss = [], [], []
# main loop
prev_input = None
total_moves = 0
MAX_MOVES = 25
for i in range(steps):
total_moves += 1
start = time.perf_counter()
cur_input = observation
x = cur_input.astype(np.float).ravel() if prev_input is not None else np.zeros(70)
x = x[10:80] if prev_input is not None else x
x = np.array([x[i] for i in range(len(x)) if not (i%10 == 0)])
x = np.array([x[i] for i in range(len(x)) if not ((i - 8 )% 9 == 0)])
x , rov_pos = get_rover_pos(x, r_tup, e_tup, rover_poss)
x = np.array(x)
rover_poss.append(rov_pos)
"""
x = x[x != 0]
if(len(x) == 1):
x = np.zeros(4)
x = x.tolist()
x.append(-7.)
x = np.array(x)
"""
#print_map(x)
x_t = pre_proc_features.fit_transform(x.reshape(-1, 1))
x_t = x_t.reshape(1, INPUT_SIZE)[0]
print("Shape = ", x_t.shape)
prev_input = cur_input
# forward the policy network and sample action according to the proba distribution
#print_map(x)
proba = model.predict(np.expand_dims(x_t, axis=1).T)
end = time.perf_counter()
action = proba[0].argmax()
print("Time taken = ", end - start)
#run one step
state_temp, reward, done, r_tup, e_tup = my_sim.step(action)
observation = my_sim.state_to_tensor(state_temp)
my_sim.render()
time.sleep(1)
if total_moves == MAX_MOVES:
total_moves = 0
done = True
# if episode is over, reset to beginning
if done:
state_temp = my_sim.reset()
observation = my_sim.state_to_tensor(state_temp)
my_sim.render()
rover_poss = []
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--data_type', default = 'sparse',
type=str, help = 'Choose between encoded or sparse')
parser.add_argument('--n_steps', default = 30,
type=int, help = 'Choose a number.')
parser.add_argument('--demo_file', default = '',
type=str, help = 'File for demo.')
args = parser.parse_args()
data_type = args.data_type
steps = args.n_steps
latest_file = args.demo_file
model = get_model(data_type)
get_features, pre_proc_features = get_pre_proc_info(data_type)
if(len(latest_file) == 0):
latest_file = get_latest_file()
if latest_file != None and latest_file[0:13] == "rock_my_model":
print("===>", latest_file)
model.load_weights(latest_file)
else:
print("Model not found: Exiting...")
sys.exit(0)
run_exper(model, steps, get_features, pre_proc_features)
| [
"sys.path.insert",
"argparse.ArgumentParser",
"time.perf_counter",
"time.sleep",
"environment.SIMULATOR",
"numpy.array",
"numpy.zeros",
"numpy.expand_dims",
"sys.exit"
] | [((144, 172), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../gym"""'], {}), "(0, '../gym')\n", (159, 172), False, 'import sys\n'), ((384, 395), 'environment.SIMULATOR', 'SIMULATOR', ([], {}), '()\n', (393, 395), False, 'from environment import SIMULATOR\n'), ((2414, 2439), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2437, 2439), False, 'import argparse\n'), ((703, 722), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (720, 722), False, 'import time\n'), ((1127, 1138), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1135, 1138), True, 'import numpy as np\n'), ((1741, 1760), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1758, 1760), False, 'import time\n'), ((2021, 2034), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2031, 2034), False, 'import time\n'), ((3269, 3280), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (3277, 3280), False, 'import sys\n'), ((833, 845), 'numpy.zeros', 'np.zeros', (['(70)'], {}), '(70)\n', (841, 845), True, 'import numpy as np\n'), ((1696, 1723), 'numpy.expand_dims', 'np.expand_dims', (['x_t'], {'axis': '(1)'}), '(x_t, axis=1)\n', (1710, 1723), True, 'import numpy as np\n')] |
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10 ** 7)
h, w = map(int, input().split())
for i in range(h):
c = input().strip()
print(c)
print(c)
| [
"sys.setrecursionlimit"
] | [((38, 68), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(10 ** 7)'], {}), '(10 ** 7)\n', (59, 68), False, 'import sys\n')] |
# generated by 'clang2py'
# flags '-c -d -l ftd2xx64.dll ftd2xx.h -vvv -o _ftd2xx64.py'
# -*- coding: utf-8 -*-
#
# TARGET arch is: []
# WORD_SIZE is: 4
# POINTER_SIZE is: 8
# LONGDOUBLE_SIZE is: 8
#
import ctypes
# if local wordsize is same as target, keep ctypes pointer function.
if ctypes.sizeof(ctypes.c_void_p) == 8:
POINTER_T = ctypes.POINTER
else:
# required to access _ctypes
import _ctypes
# Emulate a pointer class using the approriate c_int32/c_int64 type
# The new class should have :
# ['__module__', 'from_param', '_type_', '__dict__', '__weakref__', '__doc__']
# but the class should be submitted to a unique instance for each base type
# to that if A == B, POINTER_T(A) == POINTER_T(B)
ctypes._pointer_t_type_cache = {}
def POINTER_T(pointee):
# a pointer should have the same length as LONG
fake_ptr_base_type = ctypes.c_uint32
# specific case for c_void_p
if pointee is None: # VOID pointer type. c_void_p.
pointee = type(None) # ctypes.c_void_p # ctypes.c_ulong
clsname = 'c_void'
else:
clsname = pointee.__name__
if clsname in ctypes._pointer_t_type_cache:
return ctypes._pointer_t_type_cache[clsname]
# make template
class _T(_ctypes._SimpleCData,):
_type_ = 'L'
_subtype_ = pointee
def _sub_addr_(self):
return self.value
def __repr__(self):
return '%s(%d)'%(clsname, self.value)
def contents(self):
raise TypeError('This is not a ctypes pointer.')
def __init__(self, **args):
raise TypeError('This is not a ctypes pointer. It is not instanciable.')
_class = type('LP_%d_%s'%(8, clsname), (_T,),{})
ctypes._pointer_t_type_cache[clsname] = _class
return _class
c_int128 = ctypes.c_ubyte*16
c_uint128 = c_int128
void = None
if ctypes.sizeof(ctypes.c_longdouble) == 8:
c_long_double_t = ctypes.c_longdouble
else:
c_long_double_t = ctypes.c_ubyte*8
_libraries = {}
_libraries['ftd2xx64.dll'] = ctypes.CDLL('ftd2xx64.dll')
PULONG = POINTER_T(ctypes.c_uint32)
PUCHAR = POINTER_T(ctypes.c_ubyte)
DWORD = ctypes.c_uint32
BOOL = ctypes.c_int32
WORD = ctypes.c_uint16
LPWORD = POINTER_T(ctypes.c_uint16)
LPLONG = POINTER_T(ctypes.c_int32)
LPDWORD = POINTER_T(ctypes.c_uint32)
LPVOID = POINTER_T(None)
ULONG = ctypes.c_uint32
UCHAR = ctypes.c_ubyte
USHORT = ctypes.c_uint16
class struct__SECURITY_ATTRIBUTES(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('nLength', ctypes.c_uint32),
('PADDING_0', ctypes.c_ubyte * 4),
('lpSecurityDescriptor', POINTER_T(None)),
('bInheritHandle', ctypes.c_int32),
('PADDING_1', ctypes.c_ubyte * 4),
]
LPSECURITY_ATTRIBUTES = POINTER_T(struct__SECURITY_ATTRIBUTES)
class struct__OVERLAPPED(ctypes.Structure):
pass
class union__OVERLAPPED_0(ctypes.Union):
pass
class struct__OVERLAPPED_0_0(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('Offset', ctypes.c_uint32),
('OffsetHigh', ctypes.c_uint32),
]
union__OVERLAPPED_0._pack_ = True # source:False
union__OVERLAPPED_0._fields_ = [
('_0', struct__OVERLAPPED_0_0),
('Pointer', POINTER_T(None)),
]
struct__OVERLAPPED._pack_ = True # source:False
struct__OVERLAPPED._fields_ = [
('Internal', ctypes.c_uint64),
('InternalHigh', ctypes.c_uint64),
('_2', union__OVERLAPPED_0),
('hEvent', POINTER_T(None)),
]
LPOVERLAPPED = POINTER_T(struct__OVERLAPPED)
PVOID = POINTER_T(None)
PCHAR = POINTER_T(ctypes.c_char)
LPCTSTR = POINTER_T(ctypes.c_char)
HANDLE = POINTER_T(None)
FT_HANDLE = POINTER_T(None)
FT_STATUS = ctypes.c_uint32
# values for enumeration 'c__Ea_FT_OK'
FT_OK = 0
FT_INVALID_HANDLE = 1
FT_DEVICE_NOT_FOUND = 2
FT_DEVICE_NOT_OPENED = 3
FT_IO_ERROR = 4
FT_INSUFFICIENT_RESOURCES = 5
FT_INVALID_PARAMETER = 6
FT_INVALID_BAUD_RATE = 7
FT_DEVICE_NOT_OPENED_FOR_ERASE = 8
FT_DEVICE_NOT_OPENED_FOR_WRITE = 9
FT_FAILED_TO_WRITE_DEVICE = 10
FT_EEPROM_READ_FAILED = 11
FT_EEPROM_WRITE_FAILED = 12
FT_EEPROM_ERASE_FAILED = 13
FT_EEPROM_NOT_PRESENT = 14
FT_EEPROM_NOT_PROGRAMMED = 15
FT_INVALID_ARGS = 16
FT_NOT_SUPPORTED = 17
FT_OTHER_ERROR = 18
FT_DEVICE_LIST_NOT_READY = 19
c__Ea_FT_OK = ctypes.c_int # enum
PFT_EVENT_HANDLER = POINTER_T(ctypes.CFUNCTYPE(None, ctypes.c_uint32, ctypes.c_uint32))
FT_DEVICE = ctypes.c_uint32
# values for enumeration 'c__Ea_FT_DEVICE_BM'
FT_DEVICE_BM = 0
FT_DEVICE_AM = 1
FT_DEVICE_100AX = 2
FT_DEVICE_UNKNOWN = 3
FT_DEVICE_2232C = 4
FT_DEVICE_232R = 5
FT_DEVICE_2232H = 6
FT_DEVICE_4232H = 7
FT_DEVICE_232H = 8
FT_DEVICE_X_SERIES = 9
FT_DEVICE_4222H_0 = 10
FT_DEVICE_4222H_1_2 = 11
FT_DEVICE_4222H_3 = 12
FT_DEVICE_4222_PROG = 13
FT_DEVICE_900 = 14
FT_DEVICE_930 = 15
FT_DEVICE_UMFTPD3A = 16
c__Ea_FT_DEVICE_BM = ctypes.c_int # enum
FT_Open = _libraries['ftd2xx64.dll'].FT_Open
FT_Open.restype = FT_STATUS
# FT_Open(deviceNumber, pHandle)
FT_Open.argtypes = [ctypes.c_int32, POINTER_T(POINTER_T(None))]
FT_Open.__doc__ = \
"""FT_STATUS FT_Open(c_int32 deviceNumber, LP_LP_None pHandle)
ftd2xx.h:334"""
FT_OpenEx = _libraries['ftd2xx64.dll'].FT_OpenEx
FT_OpenEx.restype = FT_STATUS
# FT_OpenEx(pArg1, Flags, pHandle)
FT_OpenEx.argtypes = [PVOID, DWORD, POINTER_T(POINTER_T(None))]
FT_OpenEx.__doc__ = \
"""FT_STATUS FT_OpenEx(PVOID pArg1, DWORD Flags, LP_LP_None pHandle)
ftd2xx.h:340"""
FT_ListDevices = _libraries['ftd2xx64.dll'].FT_ListDevices
FT_ListDevices.restype = FT_STATUS
# FT_ListDevices(pArg1, pArg2, Flags)
FT_ListDevices.argtypes = [PVOID, PVOID, DWORD]
FT_ListDevices.__doc__ = \
"""FT_STATUS FT_ListDevices(PVOID pArg1, PVOID pArg2, DWORD Flags)
ftd2xx.h:347"""
FT_Close = _libraries['ftd2xx64.dll'].FT_Close
FT_Close.restype = FT_STATUS
# FT_Close(ftHandle)
FT_Close.argtypes = [FT_HANDLE]
FT_Close.__doc__ = \
"""FT_STATUS FT_Close(FT_HANDLE ftHandle)
ftd2xx.h:354"""
FT_Read = _libraries['ftd2xx64.dll'].FT_Read
FT_Read.restype = FT_STATUS
# FT_Read(ftHandle, lpBuffer, dwBytesToRead, lpBytesReturned)
FT_Read.argtypes = [FT_HANDLE, LPVOID, DWORD, LPDWORD]
FT_Read.__doc__ = \
"""FT_STATUS FT_Read(FT_HANDLE ftHandle, LPVOID lpBuffer, DWORD dwBytesToRead, LPDWORD lpBytesReturned)
ftd2xx.h:359"""
FT_Write = _libraries['ftd2xx64.dll'].FT_Write
FT_Write.restype = FT_STATUS
# FT_Write(ftHandle, lpBuffer, dwBytesToWrite, lpBytesWritten)
FT_Write.argtypes = [FT_HANDLE, LPVOID, DWORD, LPDWORD]
FT_Write.__doc__ = \
"""FT_STATUS FT_Write(FT_HANDLE ftHandle, LPVOID lpBuffer, DWORD dwBytesToWrite, LPDWORD lpBytesWritten)
ftd2xx.h:367"""
FT_IoCtl = _libraries['ftd2xx64.dll'].FT_IoCtl
FT_IoCtl.restype = FT_STATUS
# FT_IoCtl(ftHandle, dwIoControlCode, lpInBuf, nInBufSize, lpOutBuf, nOutBufSize, lpBytesReturned, lpOverlapped)
FT_IoCtl.argtypes = [FT_HANDLE, DWORD, LPVOID, DWORD, LPVOID, DWORD, LPDWORD, LPOVERLAPPED]
FT_IoCtl.__doc__ = \
"""FT_STATUS FT_IoCtl(FT_HANDLE ftHandle, DWORD dwIoControlCode, LPVOID lpInBuf, DWORD nInBufSize, LPVOID lpOutBuf, DWORD nOutBufSize, LPDWORD lpBytesReturned, LPOVERLAPPED lpOverlapped)
ftd2xx.h:375"""
FT_SetBaudRate = _libraries['ftd2xx64.dll'].FT_SetBaudRate
FT_SetBaudRate.restype = FT_STATUS
# FT_SetBaudRate(ftHandle, BaudRate)
FT_SetBaudRate.argtypes = [FT_HANDLE, ULONG]
FT_SetBaudRate.__doc__ = \
"""FT_STATUS FT_SetBaudRate(FT_HANDLE ftHandle, ULONG BaudRate)
ftd2xx.h:387"""
FT_SetDivisor = _libraries['ftd2xx64.dll'].FT_SetDivisor
FT_SetDivisor.restype = FT_STATUS
# FT_SetDivisor(ftHandle, Divisor)
FT_SetDivisor.argtypes = [FT_HANDLE, USHORT]
FT_SetDivisor.__doc__ = \
"""FT_STATUS FT_SetDivisor(FT_HANDLE ftHandle, USHORT Divisor)
ftd2xx.h:393"""
FT_SetDataCharacteristics = _libraries['ftd2xx64.dll'].FT_SetDataCharacteristics
FT_SetDataCharacteristics.restype = FT_STATUS
# FT_SetDataCharacteristics(ftHandle, WordLength, StopBits, Parity)
FT_SetDataCharacteristics.argtypes = [FT_HANDLE, UCHAR, UCHAR, UCHAR]
FT_SetDataCharacteristics.__doc__ = \
"""FT_STATUS FT_SetDataCharacteristics(FT_HANDLE ftHandle, UCHAR WordLength, UCHAR StopBits, UCHAR Parity)
ftd2xx.h:399"""
FT_SetFlowControl = _libraries['ftd2xx64.dll'].FT_SetFlowControl
FT_SetFlowControl.restype = FT_STATUS
# FT_SetFlowControl(ftHandle, FlowControl, XonChar, XoffChar)
FT_SetFlowControl.argtypes = [FT_HANDLE, USHORT, UCHAR, UCHAR]
FT_SetFlowControl.__doc__ = \
"""FT_STATUS FT_SetFlowControl(FT_HANDLE ftHandle, USHORT FlowControl, UCHAR XonChar, UCHAR XoffChar)
ftd2xx.h:407"""
FT_ResetDevice = _libraries['ftd2xx64.dll'].FT_ResetDevice
FT_ResetDevice.restype = FT_STATUS
# FT_ResetDevice(ftHandle)
FT_ResetDevice.argtypes = [FT_HANDLE]
FT_ResetDevice.__doc__ = \
"""FT_STATUS FT_ResetDevice(FT_HANDLE ftHandle)
ftd2xx.h:415"""
FT_SetDtr = _libraries['ftd2xx64.dll'].FT_SetDtr
FT_SetDtr.restype = FT_STATUS
# FT_SetDtr(ftHandle)
FT_SetDtr.argtypes = [FT_HANDLE]
FT_SetDtr.__doc__ = \
"""FT_STATUS FT_SetDtr(FT_HANDLE ftHandle)
ftd2xx.h:420"""
FT_ClrDtr = _libraries['ftd2xx64.dll'].FT_ClrDtr
FT_ClrDtr.restype = FT_STATUS
# FT_ClrDtr(ftHandle)
FT_ClrDtr.argtypes = [FT_HANDLE]
FT_ClrDtr.__doc__ = \
"""FT_STATUS FT_ClrDtr(FT_HANDLE ftHandle)
ftd2xx.h:425"""
FT_SetRts = _libraries['ftd2xx64.dll'].FT_SetRts
FT_SetRts.restype = FT_STATUS
# FT_SetRts(ftHandle)
FT_SetRts.argtypes = [FT_HANDLE]
FT_SetRts.__doc__ = \
"""FT_STATUS FT_SetRts(FT_HANDLE ftHandle)
ftd2xx.h:430"""
FT_ClrRts = _libraries['ftd2xx64.dll'].FT_ClrRts
FT_ClrRts.restype = FT_STATUS
# FT_ClrRts(ftHandle)
FT_ClrRts.argtypes = [FT_HANDLE]
FT_ClrRts.__doc__ = \
"""FT_STATUS FT_ClrRts(FT_HANDLE ftHandle)
ftd2xx.h:435"""
FT_GetModemStatus = _libraries['ftd2xx64.dll'].FT_GetModemStatus
FT_GetModemStatus.restype = FT_STATUS
# FT_GetModemStatus(ftHandle, pModemStatus)
FT_GetModemStatus.argtypes = [FT_HANDLE, POINTER_T(ctypes.c_uint32)]
FT_GetModemStatus.__doc__ = \
"""FT_STATUS FT_GetModemStatus(FT_HANDLE ftHandle, LP_c_uint32 pModemStatus)
ftd2xx.h:440"""
FT_SetChars = _libraries['ftd2xx64.dll'].FT_SetChars
FT_SetChars.restype = FT_STATUS
# FT_SetChars(ftHandle, EventChar, EventCharEnabled, ErrorChar, ErrorCharEnabled)
FT_SetChars.argtypes = [FT_HANDLE, UCHAR, UCHAR, UCHAR, UCHAR]
FT_SetChars.__doc__ = \
"""FT_STATUS FT_SetChars(FT_HANDLE ftHandle, UCHAR EventChar, UCHAR EventCharEnabled, UCHAR ErrorChar, UCHAR ErrorCharEnabled)
ftd2xx.h:446"""
FT_Purge = _libraries['ftd2xx64.dll'].FT_Purge
FT_Purge.restype = FT_STATUS
# FT_Purge(ftHandle, Mask)
FT_Purge.argtypes = [FT_HANDLE, ULONG]
FT_Purge.__doc__ = \
"""FT_STATUS FT_Purge(FT_HANDLE ftHandle, ULONG Mask)
ftd2xx.h:455"""
FT_SetTimeouts = _libraries['ftd2xx64.dll'].FT_SetTimeouts
FT_SetTimeouts.restype = FT_STATUS
# FT_SetTimeouts(ftHandle, ReadTimeout, WriteTimeout)
FT_SetTimeouts.argtypes = [FT_HANDLE, ULONG, ULONG]
FT_SetTimeouts.__doc__ = \
"""FT_STATUS FT_SetTimeouts(FT_HANDLE ftHandle, ULONG ReadTimeout, ULONG WriteTimeout)
ftd2xx.h:461"""
FT_GetQueueStatus = _libraries['ftd2xx64.dll'].FT_GetQueueStatus
FT_GetQueueStatus.restype = FT_STATUS
# FT_GetQueueStatus(ftHandle, dwRxBytes)
FT_GetQueueStatus.argtypes = [FT_HANDLE, POINTER_T(ctypes.c_uint32)]
FT_GetQueueStatus.__doc__ = \
"""FT_STATUS FT_GetQueueStatus(FT_HANDLE ftHandle, LP_c_uint32 dwRxBytes)
ftd2xx.h:468"""
FT_SetEventNotification = _libraries['ftd2xx64.dll'].FT_SetEventNotification
FT_SetEventNotification.restype = FT_STATUS
# FT_SetEventNotification(ftHandle, Mask, Param)
FT_SetEventNotification.argtypes = [FT_HANDLE, DWORD, PVOID]
FT_SetEventNotification.__doc__ = \
"""FT_STATUS FT_SetEventNotification(FT_HANDLE ftHandle, DWORD Mask, PVOID Param)
ftd2xx.h:474"""
FT_GetStatus = _libraries['ftd2xx64.dll'].FT_GetStatus
FT_GetStatus.restype = FT_STATUS
# FT_GetStatus(ftHandle, dwRxBytes, dwTxBytes, dwEventDWord)
FT_GetStatus.argtypes = [FT_HANDLE, POINTER_T(ctypes.c_uint32), POINTER_T(ctypes.c_uint32), POINTER_T(ctypes.c_uint32)]
FT_GetStatus.__doc__ = \
"""FT_STATUS FT_GetStatus(FT_HANDLE ftHandle, LP_c_uint32 dwRxBytes, LP_c_uint32 dwTxBytes, LP_c_uint32 dwEventDWord)
ftd2xx.h:481"""
FT_SetBreakOn = _libraries['ftd2xx64.dll'].FT_SetBreakOn
FT_SetBreakOn.restype = FT_STATUS
# FT_SetBreakOn(ftHandle)
FT_SetBreakOn.argtypes = [FT_HANDLE]
FT_SetBreakOn.__doc__ = \
"""FT_STATUS FT_SetBreakOn(FT_HANDLE ftHandle)
ftd2xx.h:489"""
FT_SetBreakOff = _libraries['ftd2xx64.dll'].FT_SetBreakOff
FT_SetBreakOff.restype = FT_STATUS
# FT_SetBreakOff(ftHandle)
FT_SetBreakOff.argtypes = [FT_HANDLE]
FT_SetBreakOff.__doc__ = \
"""FT_STATUS FT_SetBreakOff(FT_HANDLE ftHandle)
ftd2xx.h:494"""
FT_SetWaitMask = _libraries['ftd2xx64.dll'].FT_SetWaitMask
FT_SetWaitMask.restype = FT_STATUS
# FT_SetWaitMask(ftHandle, Mask)
FT_SetWaitMask.argtypes = [FT_HANDLE, DWORD]
FT_SetWaitMask.__doc__ = \
"""FT_STATUS FT_SetWaitMask(FT_HANDLE ftHandle, DWORD Mask)
ftd2xx.h:499"""
FT_WaitOnMask = _libraries['ftd2xx64.dll'].FT_WaitOnMask
FT_WaitOnMask.restype = FT_STATUS
# FT_WaitOnMask(ftHandle, Mask)
FT_WaitOnMask.argtypes = [FT_HANDLE, POINTER_T(ctypes.c_uint32)]
FT_WaitOnMask.__doc__ = \
"""FT_STATUS FT_WaitOnMask(FT_HANDLE ftHandle, LP_c_uint32 Mask)
ftd2xx.h:505"""
FT_GetEventStatus = _libraries['ftd2xx64.dll'].FT_GetEventStatus
FT_GetEventStatus.restype = FT_STATUS
# FT_GetEventStatus(ftHandle, dwEventDWord)
FT_GetEventStatus.argtypes = [FT_HANDLE, POINTER_T(ctypes.c_uint32)]
FT_GetEventStatus.__doc__ = \
"""FT_STATUS FT_GetEventStatus(FT_HANDLE ftHandle, LP_c_uint32 dwEventDWord)
ftd2xx.h:511"""
FT_ReadEE = _libraries['ftd2xx64.dll'].FT_ReadEE
FT_ReadEE.restype = FT_STATUS
# FT_ReadEE(ftHandle, dwWordOffset, lpwValue)
FT_ReadEE.argtypes = [FT_HANDLE, DWORD, LPWORD]
FT_ReadEE.__doc__ = \
"""FT_STATUS FT_ReadEE(FT_HANDLE ftHandle, DWORD dwWordOffset, LPWORD lpwValue)
ftd2xx.h:517"""
FT_WriteEE = _libraries['ftd2xx64.dll'].FT_WriteEE
FT_WriteEE.restype = FT_STATUS
# FT_WriteEE(ftHandle, dwWordOffset, wValue)
FT_WriteEE.argtypes = [FT_HANDLE, DWORD, WORD]
FT_WriteEE.__doc__ = \
"""FT_STATUS FT_WriteEE(FT_HANDLE ftHandle, DWORD dwWordOffset, WORD wValue)
ftd2xx.h:524"""
FT_EraseEE = _libraries['ftd2xx64.dll'].FT_EraseEE
FT_EraseEE.restype = FT_STATUS
# FT_EraseEE(ftHandle)
FT_EraseEE.argtypes = [FT_HANDLE]
FT_EraseEE.__doc__ = \
"""FT_STATUS FT_EraseEE(FT_HANDLE ftHandle)
ftd2xx.h:531"""
class struct_ft_program_data(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('Signature1', ctypes.c_uint32),
('Signature2', ctypes.c_uint32),
('Version', ctypes.c_uint32),
('VendorId', ctypes.c_uint16),
('ProductId', ctypes.c_uint16),
('Manufacturer', POINTER_T(ctypes.c_char)),
('ManufacturerId', POINTER_T(ctypes.c_char)),
('Description', POINTER_T(ctypes.c_char)),
('SerialNumber', POINTER_T(ctypes.c_char)),
('MaxPower', ctypes.c_uint16),
('PnP', ctypes.c_uint16),
('SelfPowered', ctypes.c_uint16),
('RemoteWakeup', ctypes.c_uint16),
('Rev4', ctypes.c_ubyte),
('IsoIn', ctypes.c_ubyte),
('IsoOut', ctypes.c_ubyte),
('PullDownEnable', ctypes.c_ubyte),
('SerNumEnable', ctypes.c_ubyte),
('USBVersionEnable', ctypes.c_ubyte),
('USBVersion', ctypes.c_uint16),
('Rev5', ctypes.c_ubyte),
('IsoInA', ctypes.c_ubyte),
('IsoInB', ctypes.c_ubyte),
('IsoOutA', ctypes.c_ubyte),
('IsoOutB', ctypes.c_ubyte),
('PullDownEnable5', ctypes.c_ubyte),
('SerNumEnable5', ctypes.c_ubyte),
('USBVersionEnable5', ctypes.c_ubyte),
('USBVersion5', ctypes.c_uint16),
('AIsHighCurrent', ctypes.c_ubyte),
('BIsHighCurrent', ctypes.c_ubyte),
('IFAIsFifo', ctypes.c_ubyte),
('IFAIsFifoTar', ctypes.c_ubyte),
('IFAIsFastSer', ctypes.c_ubyte),
('AIsVCP', ctypes.c_ubyte),
('IFBIsFifo', ctypes.c_ubyte),
('IFBIsFifoTar', ctypes.c_ubyte),
('IFBIsFastSer', ctypes.c_ubyte),
('BIsVCP', ctypes.c_ubyte),
('UseExtOsc', ctypes.c_ubyte),
('HighDriveIOs', ctypes.c_ubyte),
('EndpointSize', ctypes.c_ubyte),
('PullDownEnableR', ctypes.c_ubyte),
('SerNumEnableR', ctypes.c_ubyte),
('InvertTXD', ctypes.c_ubyte),
('InvertRXD', ctypes.c_ubyte),
('InvertRTS', ctypes.c_ubyte),
('InvertCTS', ctypes.c_ubyte),
('InvertDTR', ctypes.c_ubyte),
('InvertDSR', ctypes.c_ubyte),
('InvertDCD', ctypes.c_ubyte),
('InvertRI', ctypes.c_ubyte),
('Cbus0', ctypes.c_ubyte),
('Cbus1', ctypes.c_ubyte),
('Cbus2', ctypes.c_ubyte),
('Cbus3', ctypes.c_ubyte),
('Cbus4', ctypes.c_ubyte),
('RIsD2XX', ctypes.c_ubyte),
('PullDownEnable7', ctypes.c_ubyte),
('SerNumEnable7', ctypes.c_ubyte),
('ALSlowSlew', ctypes.c_ubyte),
('ALSchmittInput', ctypes.c_ubyte),
('ALDriveCurrent', ctypes.c_ubyte),
('AHSlowSlew', ctypes.c_ubyte),
('AHSchmittInput', ctypes.c_ubyte),
('AHDriveCurrent', ctypes.c_ubyte),
('BLSlowSlew', ctypes.c_ubyte),
('BLSchmittInput', ctypes.c_ubyte),
('BLDriveCurrent', ctypes.c_ubyte),
('BHSlowSlew', ctypes.c_ubyte),
('BHSchmittInput', ctypes.c_ubyte),
('BHDriveCurrent', ctypes.c_ubyte),
('IFAIsFifo7', ctypes.c_ubyte),
('IFAIsFifoTar7', ctypes.c_ubyte),
('IFAIsFastSer7', ctypes.c_ubyte),
('AIsVCP7', ctypes.c_ubyte),
('IFBIsFifo7', ctypes.c_ubyte),
('IFBIsFifoTar7', ctypes.c_ubyte),
('IFBIsFastSer7', ctypes.c_ubyte),
('BIsVCP7', ctypes.c_ubyte),
('PowerSaveEnable', ctypes.c_ubyte),
('PullDownEnable8', ctypes.c_ubyte),
('SerNumEnable8', ctypes.c_ubyte),
('ASlowSlew', ctypes.c_ubyte),
('ASchmittInput', ctypes.c_ubyte),
('ADriveCurrent', ctypes.c_ubyte),
('BSlowSlew', ctypes.c_ubyte),
('BSchmittInput', ctypes.c_ubyte),
('BDriveCurrent', ctypes.c_ubyte),
('CSlowSlew', ctypes.c_ubyte),
('CSchmittInput', ctypes.c_ubyte),
('CDriveCurrent', ctypes.c_ubyte),
('DSlowSlew', ctypes.c_ubyte),
('DSchmittInput', ctypes.c_ubyte),
('DDriveCurrent', ctypes.c_ubyte),
('ARIIsTXDEN', ctypes.c_ubyte),
('BRIIsTXDEN', ctypes.c_ubyte),
('CRIIsTXDEN', ctypes.c_ubyte),
('DRIIsTXDEN', ctypes.c_ubyte),
('AIsVCP8', ctypes.c_ubyte),
('BIsVCP8', ctypes.c_ubyte),
('CIsVCP8', ctypes.c_ubyte),
('DIsVCP8', ctypes.c_ubyte),
('PullDownEnableH', ctypes.c_ubyte),
('SerNumEnableH', ctypes.c_ubyte),
('ACSlowSlewH', ctypes.c_ubyte),
('ACSchmittInputH', ctypes.c_ubyte),
('ACDriveCurrentH', ctypes.c_ubyte),
('ADSlowSlewH', ctypes.c_ubyte),
('ADSchmittInputH', ctypes.c_ubyte),
('ADDriveCurrentH', ctypes.c_ubyte),
('Cbus0H', ctypes.c_ubyte),
('Cbus1H', ctypes.c_ubyte),
('Cbus2H', ctypes.c_ubyte),
('Cbus3H', ctypes.c_ubyte),
('Cbus4H', ctypes.c_ubyte),
('Cbus5H', ctypes.c_ubyte),
('Cbus6H', ctypes.c_ubyte),
('Cbus7H', ctypes.c_ubyte),
('Cbus8H', ctypes.c_ubyte),
('Cbus9H', ctypes.c_ubyte),
('IsFifoH', ctypes.c_ubyte),
('IsFifoTarH', ctypes.c_ubyte),
('IsFastSerH', ctypes.c_ubyte),
('IsFT1248H', ctypes.c_ubyte),
('FT1248CpolH', ctypes.c_ubyte),
('FT1248LsbH', ctypes.c_ubyte),
('FT1248FlowControlH', ctypes.c_ubyte),
('IsVCPH', ctypes.c_ubyte),
('PowerSaveEnableH', ctypes.c_ubyte),
('PADDING_0', ctypes.c_ubyte),
]
FT_PROGRAM_DATA = struct_ft_program_data
PFT_PROGRAM_DATA = POINTER_T(struct_ft_program_data)
FT_EE_Program = _libraries['ftd2xx64.dll'].FT_EE_Program
FT_EE_Program.restype = FT_STATUS
# FT_EE_Program(ftHandle, pData)
FT_EE_Program.argtypes = [FT_HANDLE, PFT_PROGRAM_DATA]
FT_EE_Program.__doc__ = \
"""FT_STATUS FT_EE_Program(FT_HANDLE ftHandle, PFT_PROGRAM_DATA pData)
ftd2xx.h:700"""
FT_EE_ProgramEx = _libraries['ftd2xx64.dll'].FT_EE_ProgramEx
FT_EE_ProgramEx.restype = FT_STATUS
# FT_EE_ProgramEx(ftHandle, pData, Manufacturer, ManufacturerId, Description, SerialNumber)
FT_EE_ProgramEx.argtypes = [FT_HANDLE, PFT_PROGRAM_DATA, POINTER_T(ctypes.c_char), POINTER_T(ctypes.c_char), POINTER_T(ctypes.c_char), POINTER_T(ctypes.c_char)]
FT_EE_ProgramEx.__doc__ = \
"""FT_STATUS FT_EE_ProgramEx(FT_HANDLE ftHandle, PFT_PROGRAM_DATA pData, LP_c_char Manufacturer, LP_c_char ManufacturerId, LP_c_char Description, LP_c_char SerialNumber)
ftd2xx.h:706"""
FT_EE_Read = _libraries['ftd2xx64.dll'].FT_EE_Read
FT_EE_Read.restype = FT_STATUS
# FT_EE_Read(ftHandle, pData)
FT_EE_Read.argtypes = [FT_HANDLE, PFT_PROGRAM_DATA]
FT_EE_Read.__doc__ = \
"""FT_STATUS FT_EE_Read(FT_HANDLE ftHandle, PFT_PROGRAM_DATA pData)
ftd2xx.h:716"""
FT_EE_ReadEx = _libraries['ftd2xx64.dll'].FT_EE_ReadEx
FT_EE_ReadEx.restype = FT_STATUS
# FT_EE_ReadEx(ftHandle, pData, Manufacturer, ManufacturerId, Description, SerialNumber)
FT_EE_ReadEx.argtypes = [FT_HANDLE, PFT_PROGRAM_DATA, POINTER_T(ctypes.c_char), POINTER_T(ctypes.c_char), POINTER_T(ctypes.c_char), POINTER_T(ctypes.c_char)]
FT_EE_ReadEx.__doc__ = \
"""FT_STATUS FT_EE_ReadEx(FT_HANDLE ftHandle, PFT_PROGRAM_DATA pData, LP_c_char Manufacturer, LP_c_char ManufacturerId, LP_c_char Description, LP_c_char SerialNumber)
ftd2xx.h:722"""
FT_EE_UASize = _libraries['ftd2xx64.dll'].FT_EE_UASize
FT_EE_UASize.restype = FT_STATUS
# FT_EE_UASize(ftHandle, lpdwSize)
FT_EE_UASize.argtypes = [FT_HANDLE, LPDWORD]
FT_EE_UASize.__doc__ = \
"""FT_STATUS FT_EE_UASize(FT_HANDLE ftHandle, LPDWORD lpdwSize)
ftd2xx.h:732"""
FT_EE_UAWrite = _libraries['ftd2xx64.dll'].FT_EE_UAWrite
FT_EE_UAWrite.restype = FT_STATUS
# FT_EE_UAWrite(ftHandle, pucData, dwDataLen)
FT_EE_UAWrite.argtypes = [FT_HANDLE, PUCHAR, DWORD]
FT_EE_UAWrite.__doc__ = \
"""FT_STATUS FT_EE_UAWrite(FT_HANDLE ftHandle, PUCHAR pucData, DWORD dwDataLen)
ftd2xx.h:738"""
FT_EE_UARead = _libraries['ftd2xx64.dll'].FT_EE_UARead
FT_EE_UARead.restype = FT_STATUS
# FT_EE_UARead(ftHandle, pucData, dwDataLen, lpdwBytesRead)
FT_EE_UARead.argtypes = [FT_HANDLE, PUCHAR, DWORD, LPDWORD]
FT_EE_UARead.__doc__ = \
"""FT_STATUS FT_EE_UARead(FT_HANDLE ftHandle, PUCHAR pucData, DWORD dwDataLen, LPDWORD lpdwBytesRead)
ftd2xx.h:745"""
class struct_ft_eeprom_header(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('deviceType', ctypes.c_uint32),
('VendorId', ctypes.c_uint16),
('ProductId', ctypes.c_uint16),
('SerNumEnable', ctypes.c_ubyte),
('PADDING_0', ctypes.c_ubyte),
('MaxPower', ctypes.c_uint16),
('SelfPowered', ctypes.c_ubyte),
('RemoteWakeup', ctypes.c_ubyte),
('PullDownEnable', ctypes.c_ubyte),
('PADDING_1', ctypes.c_ubyte),
]
FT_EEPROM_HEADER = struct_ft_eeprom_header
PFT_EEPROM_HEADER = POINTER_T(struct_ft_eeprom_header)
class struct_ft_eeprom_232b(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('common', FT_EEPROM_HEADER),
]
FT_EEPROM_232B = struct_ft_eeprom_232b
PFT_EEPROM_232B = POINTER_T(struct_ft_eeprom_232b)
class struct_ft_eeprom_2232(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('common', FT_EEPROM_HEADER),
('AIsHighCurrent', ctypes.c_ubyte),
('BIsHighCurrent', ctypes.c_ubyte),
('AIsFifo', ctypes.c_ubyte),
('AIsFifoTar', ctypes.c_ubyte),
('AIsFastSer', ctypes.c_ubyte),
('BIsFifo', ctypes.c_ubyte),
('BIsFifoTar', ctypes.c_ubyte),
('BIsFastSer', ctypes.c_ubyte),
('ADriverType', ctypes.c_ubyte),
('BDriverType', ctypes.c_ubyte),
('PADDING_0', ctypes.c_ubyte * 2),
]
FT_EEPROM_2232 = struct_ft_eeprom_2232
PFT_EEPROM_2232 = POINTER_T(struct_ft_eeprom_2232)
class struct_ft_eeprom_232r(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('common', FT_EEPROM_HEADER),
('IsHighCurrent', ctypes.c_ubyte),
('UseExtOsc', ctypes.c_ubyte),
('InvertTXD', ctypes.c_ubyte),
('InvertRXD', ctypes.c_ubyte),
('InvertRTS', ctypes.c_ubyte),
('InvertCTS', ctypes.c_ubyte),
('InvertDTR', ctypes.c_ubyte),
('InvertDSR', ctypes.c_ubyte),
('InvertDCD', ctypes.c_ubyte),
('InvertRI', ctypes.c_ubyte),
('Cbus0', ctypes.c_ubyte),
('Cbus1', ctypes.c_ubyte),
('Cbus2', ctypes.c_ubyte),
('Cbus3', ctypes.c_ubyte),
('Cbus4', ctypes.c_ubyte),
('DriverType', ctypes.c_ubyte),
]
FT_EEPROM_232R = struct_ft_eeprom_232r
PFT_EEPROM_232R = POINTER_T(struct_ft_eeprom_232r)
class struct_ft_eeprom_2232h(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('common', FT_EEPROM_HEADER),
('ALSlowSlew', ctypes.c_ubyte),
('ALSchmittInput', ctypes.c_ubyte),
('ALDriveCurrent', ctypes.c_ubyte),
('AHSlowSlew', ctypes.c_ubyte),
('AHSchmittInput', ctypes.c_ubyte),
('AHDriveCurrent', ctypes.c_ubyte),
('BLSlowSlew', ctypes.c_ubyte),
('BLSchmittInput', ctypes.c_ubyte),
('BLDriveCurrent', ctypes.c_ubyte),
('BHSlowSlew', ctypes.c_ubyte),
('BHSchmittInput', ctypes.c_ubyte),
('BHDriveCurrent', ctypes.c_ubyte),
('AIsFifo', ctypes.c_ubyte),
('AIsFifoTar', ctypes.c_ubyte),
('AIsFastSer', ctypes.c_ubyte),
('BIsFifo', ctypes.c_ubyte),
('BIsFifoTar', ctypes.c_ubyte),
('BIsFastSer', ctypes.c_ubyte),
('PowerSaveEnable', ctypes.c_ubyte),
('ADriverType', ctypes.c_ubyte),
('BDriverType', ctypes.c_ubyte),
('PADDING_0', ctypes.c_ubyte * 3),
]
FT_EEPROM_2232H = struct_ft_eeprom_2232h
PFT_EEPROM_2232H = POINTER_T(struct_ft_eeprom_2232h)
class struct_ft_eeprom_4232h(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('common', FT_EEPROM_HEADER),
('ASlowSlew', ctypes.c_ubyte),
('ASchmittInput', ctypes.c_ubyte),
('ADriveCurrent', ctypes.c_ubyte),
('BSlowSlew', ctypes.c_ubyte),
('BSchmittInput', ctypes.c_ubyte),
('BDriveCurrent', ctypes.c_ubyte),
('CSlowSlew', ctypes.c_ubyte),
('CSchmittInput', ctypes.c_ubyte),
('CDriveCurrent', ctypes.c_ubyte),
('DSlowSlew', ctypes.c_ubyte),
('DSchmittInput', ctypes.c_ubyte),
('DDriveCurrent', ctypes.c_ubyte),
('ARIIsTXDEN', ctypes.c_ubyte),
('BRIIsTXDEN', ctypes.c_ubyte),
('CRIIsTXDEN', ctypes.c_ubyte),
('DRIIsTXDEN', ctypes.c_ubyte),
('ADriverType', ctypes.c_ubyte),
('BDriverType', ctypes.c_ubyte),
('CDriverType', ctypes.c_ubyte),
('DDriverType', ctypes.c_ubyte),
]
FT_EEPROM_4232H = struct_ft_eeprom_4232h
PFT_EEPROM_4232H = POINTER_T(struct_ft_eeprom_4232h)
class struct_ft_eeprom_232h(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('common', FT_EEPROM_HEADER),
('ACSlowSlew', ctypes.c_ubyte),
('ACSchmittInput', ctypes.c_ubyte),
('ACDriveCurrent', ctypes.c_ubyte),
('ADSlowSlew', ctypes.c_ubyte),
('ADSchmittInput', ctypes.c_ubyte),
('ADDriveCurrent', ctypes.c_ubyte),
('Cbus0', ctypes.c_ubyte),
('Cbus1', ctypes.c_ubyte),
('Cbus2', ctypes.c_ubyte),
('Cbus3', ctypes.c_ubyte),
('Cbus4', ctypes.c_ubyte),
('Cbus5', ctypes.c_ubyte),
('Cbus6', ctypes.c_ubyte),
('Cbus7', ctypes.c_ubyte),
('Cbus8', ctypes.c_ubyte),
('Cbus9', ctypes.c_ubyte),
('FT1248Cpol', ctypes.c_ubyte),
('FT1248Lsb', ctypes.c_ubyte),
('FT1248FlowControl', ctypes.c_ubyte),
('IsFifo', ctypes.c_ubyte),
('IsFifoTar', ctypes.c_ubyte),
('IsFastSer', ctypes.c_ubyte),
('IsFT1248', ctypes.c_ubyte),
('PowerSaveEnable', ctypes.c_ubyte),
('DriverType', ctypes.c_ubyte),
('PADDING_0', ctypes.c_ubyte * 3),
]
FT_EEPROM_232H = struct_ft_eeprom_232h
PFT_EEPROM_232H = POINTER_T(struct_ft_eeprom_232h)
class struct_ft_eeprom_x_series(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('common', FT_EEPROM_HEADER),
('ACSlowSlew', ctypes.c_ubyte),
('ACSchmittInput', ctypes.c_ubyte),
('ACDriveCurrent', ctypes.c_ubyte),
('ADSlowSlew', ctypes.c_ubyte),
('ADSchmittInput', ctypes.c_ubyte),
('ADDriveCurrent', ctypes.c_ubyte),
('Cbus0', ctypes.c_ubyte),
('Cbus1', ctypes.c_ubyte),
('Cbus2', ctypes.c_ubyte),
('Cbus3', ctypes.c_ubyte),
('Cbus4', ctypes.c_ubyte),
('Cbus5', ctypes.c_ubyte),
('Cbus6', ctypes.c_ubyte),
('InvertTXD', ctypes.c_ubyte),
('InvertRXD', ctypes.c_ubyte),
('InvertRTS', ctypes.c_ubyte),
('InvertCTS', ctypes.c_ubyte),
('InvertDTR', ctypes.c_ubyte),
('InvertDSR', ctypes.c_ubyte),
('InvertDCD', ctypes.c_ubyte),
('InvertRI', ctypes.c_ubyte),
('BCDEnable', ctypes.c_ubyte),
('BCDForceCbusPWREN', ctypes.c_ubyte),
('BCDDisableSleep', ctypes.c_ubyte),
('I2CSlaveAddress', ctypes.c_uint16),
('PADDING_0', ctypes.c_ubyte * 2),
('I2CDeviceId', ctypes.c_uint32),
('I2CDisableSchmitt', ctypes.c_ubyte),
('FT1248Cpol', ctypes.c_ubyte),
('FT1248Lsb', ctypes.c_ubyte),
('FT1248FlowControl', ctypes.c_ubyte),
('RS485EchoSuppress', ctypes.c_ubyte),
('PowerSaveEnable', ctypes.c_ubyte),
('DriverType', ctypes.c_ubyte),
('PADDING_1', ctypes.c_ubyte),
]
FT_EEPROM_X_SERIES = struct_ft_eeprom_x_series
PFT_EEPROM_X_SERIES = POINTER_T(struct_ft_eeprom_x_series)
FT_EEPROM_Read = _libraries['ftd2xx64.dll'].FT_EEPROM_Read
FT_EEPROM_Read.restype = FT_STATUS
# FT_EEPROM_Read(ftHandle, eepromData, eepromDataSize, Manufacturer, ManufacturerId, Description, SerialNumber)
FT_EEPROM_Read.argtypes = [FT_HANDLE, POINTER_T(None), DWORD, POINTER_T(ctypes.c_char), POINTER_T(ctypes.c_char), POINTER_T(ctypes.c_char), POINTER_T(ctypes.c_char)]
FT_EEPROM_Read.__doc__ = \
"""FT_STATUS FT_EEPROM_Read(FT_HANDLE ftHandle, LP_None eepromData, DWORD eepromDataSize, LP_c_char Manufacturer, LP_c_char ManufacturerId, LP_c_char Description, LP_c_char SerialNumber)
ftd2xx.h:968"""
FT_EEPROM_Program = _libraries['ftd2xx64.dll'].FT_EEPROM_Program
FT_EEPROM_Program.restype = FT_STATUS
# FT_EEPROM_Program(ftHandle, eepromData, eepromDataSize, Manufacturer, ManufacturerId, Description, SerialNumber)
FT_EEPROM_Program.argtypes = [FT_HANDLE, POINTER_T(None), DWORD, POINTER_T(ctypes.c_char), POINTER_T(ctypes.c_char), POINTER_T(ctypes.c_char), POINTER_T(ctypes.c_char)]
FT_EEPROM_Program.__doc__ = \
"""FT_STATUS FT_EEPROM_Program(FT_HANDLE ftHandle, LP_None eepromData, DWORD eepromDataSize, LP_c_char Manufacturer, LP_c_char ManufacturerId, LP_c_char Description, LP_c_char SerialNumber)
ftd2xx.h:980"""
FT_SetLatencyTimer = _libraries['ftd2xx64.dll'].FT_SetLatencyTimer
FT_SetLatencyTimer.restype = FT_STATUS
# FT_SetLatencyTimer(ftHandle, ucLatency)
FT_SetLatencyTimer.argtypes = [FT_HANDLE, UCHAR]
FT_SetLatencyTimer.__doc__ = \
"""FT_STATUS FT_SetLatencyTimer(FT_HANDLE ftHandle, UCHAR ucLatency)
ftd2xx.h:992"""
FT_GetLatencyTimer = _libraries['ftd2xx64.dll'].FT_GetLatencyTimer
FT_GetLatencyTimer.restype = FT_STATUS
# FT_GetLatencyTimer(ftHandle, pucLatency)
FT_GetLatencyTimer.argtypes = [FT_HANDLE, PUCHAR]
FT_GetLatencyTimer.__doc__ = \
"""FT_STATUS FT_GetLatencyTimer(FT_HANDLE ftHandle, PUCHAR pucLatency)
ftd2xx.h:998"""
FT_SetBitMode = _libraries['ftd2xx64.dll'].FT_SetBitMode
FT_SetBitMode.restype = FT_STATUS
# FT_SetBitMode(ftHandle, ucMask, ucEnable)
FT_SetBitMode.argtypes = [FT_HANDLE, UCHAR, UCHAR]
FT_SetBitMode.__doc__ = \
"""FT_STATUS FT_SetBitMode(FT_HANDLE ftHandle, UCHAR ucMask, UCHAR ucEnable)
ftd2xx.h:1004"""
FT_GetBitMode = _libraries['ftd2xx64.dll'].FT_GetBitMode
FT_GetBitMode.restype = FT_STATUS
# FT_GetBitMode(ftHandle, pucMode)
FT_GetBitMode.argtypes = [FT_HANDLE, PUCHAR]
FT_GetBitMode.__doc__ = \
"""FT_STATUS FT_GetBitMode(FT_HANDLE ftHandle, PUCHAR pucMode)
ftd2xx.h:1011"""
FT_SetUSBParameters = _libraries['ftd2xx64.dll'].FT_SetUSBParameters
FT_SetUSBParameters.restype = FT_STATUS
# FT_SetUSBParameters(ftHandle, ulInTransferSize, ulOutTransferSize)
FT_SetUSBParameters.argtypes = [FT_HANDLE, ULONG, ULONG]
FT_SetUSBParameters.__doc__ = \
"""FT_STATUS FT_SetUSBParameters(FT_HANDLE ftHandle, ULONG ulInTransferSize, ULONG ulOutTransferSize)
ftd2xx.h:1017"""
FT_SetDeadmanTimeout = _libraries['ftd2xx64.dll'].FT_SetDeadmanTimeout
FT_SetDeadmanTimeout.restype = FT_STATUS
# FT_SetDeadmanTimeout(ftHandle, ulDeadmanTimeout)
FT_SetDeadmanTimeout.argtypes = [FT_HANDLE, ULONG]
FT_SetDeadmanTimeout.__doc__ = \
"""FT_STATUS FT_SetDeadmanTimeout(FT_HANDLE ftHandle, ULONG ulDeadmanTimeout)
ftd2xx.h:1024"""
FT_GetDeviceInfo = _libraries['ftd2xx64.dll'].FT_GetDeviceInfo
FT_GetDeviceInfo.restype = FT_STATUS
# FT_GetDeviceInfo(ftHandle, lpftDevice, lpdwID, SerialNumber, Description, Dummy)
FT_GetDeviceInfo.argtypes = [FT_HANDLE, POINTER_T(ctypes.c_uint32), LPDWORD, PCHAR, PCHAR, LPVOID]
FT_GetDeviceInfo.__doc__ = \
"""FT_STATUS FT_GetDeviceInfo(FT_HANDLE ftHandle, LP_c_uint32 lpftDevice, LPDWORD lpdwID, PCHAR SerialNumber, PCHAR Description, LPVOID Dummy)
ftd2xx.h:1053"""
FT_StopInTask = _libraries['ftd2xx64.dll'].FT_StopInTask
FT_StopInTask.restype = FT_STATUS
# FT_StopInTask(ftHandle)
FT_StopInTask.argtypes = [FT_HANDLE]
FT_StopInTask.__doc__ = \
"""FT_STATUS FT_StopInTask(FT_HANDLE ftHandle)
ftd2xx.h:1063"""
FT_RestartInTask = _libraries['ftd2xx64.dll'].FT_RestartInTask
FT_RestartInTask.restype = FT_STATUS
# FT_RestartInTask(ftHandle)
FT_RestartInTask.argtypes = [FT_HANDLE]
FT_RestartInTask.__doc__ = \
"""FT_STATUS FT_RestartInTask(FT_HANDLE ftHandle)
ftd2xx.h:1068"""
FT_SetResetPipeRetryCount = _libraries['ftd2xx64.dll'].FT_SetResetPipeRetryCount
FT_SetResetPipeRetryCount.restype = FT_STATUS
# FT_SetResetPipeRetryCount(ftHandle, dwCount)
FT_SetResetPipeRetryCount.argtypes = [FT_HANDLE, DWORD]
FT_SetResetPipeRetryCount.__doc__ = \
"""FT_STATUS FT_SetResetPipeRetryCount(FT_HANDLE ftHandle, DWORD dwCount)
ftd2xx.h:1073"""
FT_ResetPort = _libraries['ftd2xx64.dll'].FT_ResetPort
FT_ResetPort.restype = FT_STATUS
# FT_ResetPort(ftHandle)
FT_ResetPort.argtypes = [FT_HANDLE]
FT_ResetPort.__doc__ = \
"""FT_STATUS FT_ResetPort(FT_HANDLE ftHandle)
ftd2xx.h:1079"""
FT_CyclePort = _libraries['ftd2xx64.dll'].FT_CyclePort
FT_CyclePort.restype = FT_STATUS
# FT_CyclePort(ftHandle)
FT_CyclePort.argtypes = [FT_HANDLE]
FT_CyclePort.__doc__ = \
"""FT_STATUS FT_CyclePort(FT_HANDLE ftHandle)
ftd2xx.h:1084"""
FT_W32_CreateFile = _libraries['ftd2xx64.dll'].FT_W32_CreateFile
FT_W32_CreateFile.restype = FT_HANDLE
# FT_W32_CreateFile(lpszName, dwAccess, dwShareMode, lpSecurityAttributes, dwCreate, dwAttrsAndFlags, hTemplate)
FT_W32_CreateFile.argtypes = [LPCTSTR, DWORD, DWORD, LPSECURITY_ATTRIBUTES, DWORD, DWORD, HANDLE]
FT_W32_CreateFile.__doc__ = \
"""FT_HANDLE FT_W32_CreateFile(LPCTSTR lpszName, DWORD dwAccess, DWORD dwShareMode, LPSECURITY_ATTRIBUTES lpSecurityAttributes, DWORD dwCreate, DWORD dwAttrsAndFlags, HANDLE hTemplate)
ftd2xx.h:1094"""
FT_W32_CloseHandle = _libraries['ftd2xx64.dll'].FT_W32_CloseHandle
FT_W32_CloseHandle.restype = BOOL
# FT_W32_CloseHandle(ftHandle)
FT_W32_CloseHandle.argtypes = [FT_HANDLE]
FT_W32_CloseHandle.__doc__ = \
"""BOOL FT_W32_CloseHandle(FT_HANDLE ftHandle)
ftd2xx.h:1105"""
FT_W32_ReadFile = _libraries['ftd2xx64.dll'].FT_W32_ReadFile
FT_W32_ReadFile.restype = BOOL
# FT_W32_ReadFile(ftHandle, lpBuffer, nBufferSize, lpBytesReturned, lpOverlapped)
FT_W32_ReadFile.argtypes = [FT_HANDLE, LPVOID, DWORD, LPDWORD, LPOVERLAPPED]
FT_W32_ReadFile.__doc__ = \
"""BOOL FT_W32_ReadFile(FT_HANDLE ftHandle, LPVOID lpBuffer, DWORD nBufferSize, LPDWORD lpBytesReturned, LPOVERLAPPED lpOverlapped)
ftd2xx.h:1110"""
FT_W32_WriteFile = _libraries['ftd2xx64.dll'].FT_W32_WriteFile
FT_W32_WriteFile.restype = BOOL
# FT_W32_WriteFile(ftHandle, lpBuffer, nBufferSize, lpBytesWritten, lpOverlapped)
FT_W32_WriteFile.argtypes = [FT_HANDLE, LPVOID, DWORD, LPDWORD, LPOVERLAPPED]
FT_W32_WriteFile.__doc__ = \
"""BOOL FT_W32_WriteFile(FT_HANDLE ftHandle, LPVOID lpBuffer, DWORD nBufferSize, LPDWORD lpBytesWritten, LPOVERLAPPED lpOverlapped)
ftd2xx.h:1119"""
FT_W32_GetLastError = _libraries['ftd2xx64.dll'].FT_W32_GetLastError
FT_W32_GetLastError.restype = DWORD
# FT_W32_GetLastError(ftHandle)
FT_W32_GetLastError.argtypes = [FT_HANDLE]
FT_W32_GetLastError.__doc__ = \
"""DWORD FT_W32_GetLastError(FT_HANDLE ftHandle)
ftd2xx.h:1128"""
FT_W32_GetOverlappedResult = _libraries['ftd2xx64.dll'].FT_W32_GetOverlappedResult
FT_W32_GetOverlappedResult.restype = BOOL
# FT_W32_GetOverlappedResult(ftHandle, lpOverlapped, lpdwBytesTransferred, bWait)
FT_W32_GetOverlappedResult.argtypes = [FT_HANDLE, LPOVERLAPPED, LPDWORD, BOOL]
FT_W32_GetOverlappedResult.__doc__ = \
"""BOOL FT_W32_GetOverlappedResult(FT_HANDLE ftHandle, LPOVERLAPPED lpOverlapped, LPDWORD lpdwBytesTransferred, BOOL bWait)
ftd2xx.h:1133"""
FT_W32_CancelIo = _libraries['ftd2xx64.dll'].FT_W32_CancelIo
FT_W32_CancelIo.restype = BOOL
# FT_W32_CancelIo(ftHandle)
FT_W32_CancelIo.argtypes = [FT_HANDLE]
FT_W32_CancelIo.__doc__ = \
"""BOOL FT_W32_CancelIo(FT_HANDLE ftHandle)
ftd2xx.h:1141"""
class struct__FTCOMSTAT(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('fCtsHold', ctypes.c_uint32, 1),
('fDsrHold', ctypes.c_uint32, 1),
('fRlsdHold', ctypes.c_uint32, 1),
('fXoffHold', ctypes.c_uint32, 1),
('fXoffSent', ctypes.c_uint32, 1),
('fEof', ctypes.c_uint32, 1),
('fTxim', ctypes.c_uint32, 1),
('fReserved', ctypes.c_uint32, 25),
('cbInQue', ctypes.c_uint32),
('cbOutQue', ctypes.c_uint32),
]
FTCOMSTAT = struct__FTCOMSTAT
LPFTCOMSTAT = POINTER_T(struct__FTCOMSTAT)
class struct__FTDCB(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('DCBlength', ctypes.c_uint32),
('BaudRate', ctypes.c_uint32),
('fBinary', ctypes.c_uint32, 1),
('fParity', ctypes.c_uint32, 1),
('fOutxCtsFlow', ctypes.c_uint32, 1),
('fOutxDsrFlow', ctypes.c_uint32, 1),
('fDtrControl', ctypes.c_uint32, 2),
('fDsrSensitivity', ctypes.c_uint32, 1),
('fTXContinueOnXoff', ctypes.c_uint32, 1),
('fOutX', ctypes.c_uint32, 1),
('fInX', ctypes.c_uint32, 1),
('fErrorChar', ctypes.c_uint32, 1),
('fNull', ctypes.c_uint32, 1),
('fRtsControl', ctypes.c_uint32, 2),
('fAbortOnError', ctypes.c_uint32, 1),
('fDummy2', ctypes.c_uint32, 17),
('wReserved', ctypes.c_uint16),
('XonLim', ctypes.c_uint16),
('XoffLim', ctypes.c_uint16),
('ByteSize', ctypes.c_ubyte),
('Parity', ctypes.c_ubyte),
('StopBits', ctypes.c_ubyte),
('XonChar', ctypes.c_char),
('XoffChar', ctypes.c_char),
('ErrorChar', ctypes.c_char),
('EofChar', ctypes.c_char),
('EvtChar', ctypes.c_char),
('wReserved1', ctypes.c_uint16),
]
FTDCB = struct__FTDCB
LPFTDCB = POINTER_T(struct__FTDCB)
class struct__FTTIMEOUTS(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('ReadIntervalTimeout', ctypes.c_uint32),
('ReadTotalTimeoutMultiplier', ctypes.c_uint32),
('ReadTotalTimeoutConstant', ctypes.c_uint32),
('WriteTotalTimeoutMultiplier', ctypes.c_uint32),
('WriteTotalTimeoutConstant', ctypes.c_uint32),
]
FTTIMEOUTS = struct__FTTIMEOUTS
LPFTTIMEOUTS = POINTER_T(struct__FTTIMEOUTS)
FT_W32_ClearCommBreak = _libraries['ftd2xx64.dll'].FT_W32_ClearCommBreak
FT_W32_ClearCommBreak.restype = BOOL
# FT_W32_ClearCommBreak(ftHandle)
FT_W32_ClearCommBreak.argtypes = [FT_HANDLE]
FT_W32_ClearCommBreak.__doc__ = \
"""BOOL FT_W32_ClearCommBreak(FT_HANDLE ftHandle)
ftd2xx.h:1203"""
FT_W32_ClearCommError = _libraries['ftd2xx64.dll'].FT_W32_ClearCommError
FT_W32_ClearCommError.restype = BOOL
# FT_W32_ClearCommError(ftHandle, lpdwErrors, lpftComstat)
FT_W32_ClearCommError.argtypes = [FT_HANDLE, LPDWORD, LPFTCOMSTAT]
FT_W32_ClearCommError.__doc__ = \
"""BOOL FT_W32_ClearCommError(FT_HANDLE ftHandle, LPDWORD lpdwErrors, LPFTCOMSTAT lpftComstat)
ftd2xx.h:1208"""
FT_W32_EscapeCommFunction = _libraries['ftd2xx64.dll'].FT_W32_EscapeCommFunction
FT_W32_EscapeCommFunction.restype = BOOL
# FT_W32_EscapeCommFunction(ftHandle, dwFunc)
FT_W32_EscapeCommFunction.argtypes = [FT_HANDLE, DWORD]
FT_W32_EscapeCommFunction.__doc__ = \
"""BOOL FT_W32_EscapeCommFunction(FT_HANDLE ftHandle, DWORD dwFunc)
ftd2xx.h:1215"""
FT_W32_GetCommModemStatus = _libraries['ftd2xx64.dll'].FT_W32_GetCommModemStatus
FT_W32_GetCommModemStatus.restype = BOOL
# FT_W32_GetCommModemStatus(ftHandle, lpdwModemStatus)
FT_W32_GetCommModemStatus.argtypes = [FT_HANDLE, LPDWORD]
FT_W32_GetCommModemStatus.__doc__ = \
"""BOOL FT_W32_GetCommModemStatus(FT_HANDLE ftHandle, LPDWORD lpdwModemStatus)
ftd2xx.h:1221"""
FT_W32_GetCommState = _libraries['ftd2xx64.dll'].FT_W32_GetCommState
FT_W32_GetCommState.restype = BOOL
# FT_W32_GetCommState(ftHandle, lpftDcb)
FT_W32_GetCommState.argtypes = [FT_HANDLE, LPFTDCB]
FT_W32_GetCommState.__doc__ = \
"""BOOL FT_W32_GetCommState(FT_HANDLE ftHandle, LPFTDCB lpftDcb)
ftd2xx.h:1227"""
FT_W32_GetCommTimeouts = _libraries['ftd2xx64.dll'].FT_W32_GetCommTimeouts
FT_W32_GetCommTimeouts.restype = BOOL
# FT_W32_GetCommTimeouts(ftHandle, pTimeouts)
FT_W32_GetCommTimeouts.argtypes = [FT_HANDLE, POINTER_T(struct__FTTIMEOUTS)]
FT_W32_GetCommTimeouts.__doc__ = \
"""BOOL FT_W32_GetCommTimeouts(FT_HANDLE ftHandle, LP_struct__FTTIMEOUTS pTimeouts)
ftd2xx.h:1233"""
FT_W32_PurgeComm = _libraries['ftd2xx64.dll'].FT_W32_PurgeComm
FT_W32_PurgeComm.restype = BOOL
# FT_W32_PurgeComm(ftHandle, dwMask)
FT_W32_PurgeComm.argtypes = [FT_HANDLE, DWORD]
FT_W32_PurgeComm.__doc__ = \
"""BOOL FT_W32_PurgeComm(FT_HANDLE ftHandle, DWORD dwMask)
ftd2xx.h:1239"""
FT_W32_SetCommBreak = _libraries['ftd2xx64.dll'].FT_W32_SetCommBreak
FT_W32_SetCommBreak.restype = BOOL
# FT_W32_SetCommBreak(ftHandle)
FT_W32_SetCommBreak.argtypes = [FT_HANDLE]
FT_W32_SetCommBreak.__doc__ = \
"""BOOL FT_W32_SetCommBreak(FT_HANDLE ftHandle)
ftd2xx.h:1245"""
FT_W32_SetCommMask = _libraries['ftd2xx64.dll'].FT_W32_SetCommMask
FT_W32_SetCommMask.restype = BOOL
# FT_W32_SetCommMask(ftHandle, ulEventMask)
FT_W32_SetCommMask.argtypes = [FT_HANDLE, ULONG]
FT_W32_SetCommMask.__doc__ = \
"""BOOL FT_W32_SetCommMask(FT_HANDLE ftHandle, ULONG ulEventMask)
ftd2xx.h:1250"""
FT_W32_GetCommMask = _libraries['ftd2xx64.dll'].FT_W32_GetCommMask
FT_W32_GetCommMask.restype = BOOL
# FT_W32_GetCommMask(ftHandle, lpdwEventMask)
FT_W32_GetCommMask.argtypes = [FT_HANDLE, LPDWORD]
FT_W32_GetCommMask.__doc__ = \
"""BOOL FT_W32_GetCommMask(FT_HANDLE ftHandle, LPDWORD lpdwEventMask)
ftd2xx.h:1256"""
FT_W32_SetCommState = _libraries['ftd2xx64.dll'].FT_W32_SetCommState
FT_W32_SetCommState.restype = BOOL
# FT_W32_SetCommState(ftHandle, lpftDcb)
FT_W32_SetCommState.argtypes = [FT_HANDLE, LPFTDCB]
FT_W32_SetCommState.__doc__ = \
"""BOOL FT_W32_SetCommState(FT_HANDLE ftHandle, LPFTDCB lpftDcb)
ftd2xx.h:1262"""
FT_W32_SetCommTimeouts = _libraries['ftd2xx64.dll'].FT_W32_SetCommTimeouts
FT_W32_SetCommTimeouts.restype = BOOL
# FT_W32_SetCommTimeouts(ftHandle, pTimeouts)
FT_W32_SetCommTimeouts.argtypes = [FT_HANDLE, POINTER_T(struct__FTTIMEOUTS)]
FT_W32_SetCommTimeouts.__doc__ = \
"""BOOL FT_W32_SetCommTimeouts(FT_HANDLE ftHandle, LP_struct__FTTIMEOUTS pTimeouts)
ftd2xx.h:1268"""
FT_W32_SetupComm = _libraries['ftd2xx64.dll'].FT_W32_SetupComm
FT_W32_SetupComm.restype = BOOL
# FT_W32_SetupComm(ftHandle, dwReadBufferSize, dwWriteBufferSize)
FT_W32_SetupComm.argtypes = [FT_HANDLE, DWORD, DWORD]
FT_W32_SetupComm.__doc__ = \
"""BOOL FT_W32_SetupComm(FT_HANDLE ftHandle, DWORD dwReadBufferSize, DWORD dwWriteBufferSize)
ftd2xx.h:1274"""
FT_W32_WaitCommEvent = _libraries['ftd2xx64.dll'].FT_W32_WaitCommEvent
FT_W32_WaitCommEvent.restype = BOOL
# FT_W32_WaitCommEvent(ftHandle, pulEvent, lpOverlapped)
FT_W32_WaitCommEvent.argtypes = [FT_HANDLE, PULONG, LPOVERLAPPED]
FT_W32_WaitCommEvent.__doc__ = \
"""BOOL FT_W32_WaitCommEvent(FT_HANDLE ftHandle, PULONG pulEvent, LPOVERLAPPED lpOverlapped)
ftd2xx.h:1281"""
class struct__ft_device_list_info_node(ctypes.Structure):
_pack_ = True # source:False
_fields_ = [
('Flags', ctypes.c_uint32),
('Type', ctypes.c_uint32),
('ID', ctypes.c_uint32),
('LocId', ctypes.c_uint32),
('SerialNumber', ctypes.c_char * 16),
('Description', ctypes.c_char * 64),
('ftHandle', POINTER_T(None)),
]
FT_DEVICE_LIST_INFO_NODE = struct__ft_device_list_info_node
# values for enumeration 'c__Ea_FT_FLAGS_OPENED'
FT_FLAGS_OPENED = 1
FT_FLAGS_HISPEED = 2
c__Ea_FT_FLAGS_OPENED = ctypes.c_int # enum
FT_CreateDeviceInfoList = _libraries['ftd2xx64.dll'].FT_CreateDeviceInfoList
FT_CreateDeviceInfoList.restype = FT_STATUS
# FT_CreateDeviceInfoList(lpdwNumDevs)
FT_CreateDeviceInfoList.argtypes = [LPDWORD]
FT_CreateDeviceInfoList.__doc__ = \
"""FT_STATUS FT_CreateDeviceInfoList(LPDWORD lpdwNumDevs)
ftd2xx.h:1310"""
FT_GetDeviceInfoList = _libraries['ftd2xx64.dll'].FT_GetDeviceInfoList
FT_GetDeviceInfoList.restype = FT_STATUS
# FT_GetDeviceInfoList(pDest, lpdwNumDevs)
FT_GetDeviceInfoList.argtypes = [POINTER_T(struct__ft_device_list_info_node), LPDWORD]
FT_GetDeviceInfoList.__doc__ = \
"""FT_STATUS FT_GetDeviceInfoList(LP_struct__ft_device_list_info_node pDest, LPDWORD lpdwNumDevs)
ftd2xx.h:1315"""
FT_GetDeviceInfoDetail = _libraries['ftd2xx64.dll'].FT_GetDeviceInfoDetail
FT_GetDeviceInfoDetail.restype = FT_STATUS
# FT_GetDeviceInfoDetail(dwIndex, lpdwFlags, lpdwType, lpdwID, lpdwLocId, lpSerialNumber, lpDescription, pftHandle)
FT_GetDeviceInfoDetail.argtypes = [DWORD, LPDWORD, LPDWORD, LPDWORD, LPDWORD, LPVOID, LPVOID, POINTER_T(POINTER_T(None))]
FT_GetDeviceInfoDetail.__doc__ = \
"""FT_STATUS FT_GetDeviceInfoDetail(DWORD dwIndex, LPDWORD lpdwFlags, LPDWORD lpdwType, LPDWORD lpdwID, LPDWORD lpdwLocId, LPVOID lpSerialNumber, LPVOID lpDescription, LP_LP_None pftHandle)
ftd2xx.h:1321"""
FT_GetDriverVersion = _libraries['ftd2xx64.dll'].FT_GetDriverVersion
FT_GetDriverVersion.restype = FT_STATUS
# FT_GetDriverVersion(ftHandle, lpdwVersion)
FT_GetDriverVersion.argtypes = [FT_HANDLE, LPDWORD]
FT_GetDriverVersion.__doc__ = \
"""FT_STATUS FT_GetDriverVersion(FT_HANDLE ftHandle, LPDWORD lpdwVersion)
ftd2xx.h:1338"""
FT_GetLibraryVersion = _libraries['ftd2xx64.dll'].FT_GetLibraryVersion
FT_GetLibraryVersion.restype = FT_STATUS
# FT_GetLibraryVersion(lpdwVersion)
FT_GetLibraryVersion.argtypes = [LPDWORD]
FT_GetLibraryVersion.__doc__ = \
"""FT_STATUS FT_GetLibraryVersion(LPDWORD lpdwVersion)
ftd2xx.h:1344"""
FT_Rescan = _libraries['ftd2xx64.dll'].FT_Rescan
FT_Rescan.restype = FT_STATUS
# FT_Rescan()
FT_Rescan.argtypes = []
FT_Rescan.__doc__ = \
"""FT_STATUS FT_Rescan()
ftd2xx.h:1350"""
FT_Reload = _libraries['ftd2xx64.dll'].FT_Reload
FT_Reload.restype = FT_STATUS
# FT_Reload(wVid, wPid)
FT_Reload.argtypes = [WORD, WORD]
FT_Reload.__doc__ = \
"""FT_STATUS FT_Reload(WORD wVid, WORD wPid)
ftd2xx.h:1355"""
FT_GetComPortNumber = _libraries['ftd2xx64.dll'].FT_GetComPortNumber
FT_GetComPortNumber.restype = FT_STATUS
# FT_GetComPortNumber(ftHandle, lpdwComPortNumber)
FT_GetComPortNumber.argtypes = [FT_HANDLE, LPLONG]
FT_GetComPortNumber.__doc__ = \
"""FT_STATUS FT_GetComPortNumber(FT_HANDLE ftHandle, LPLONG lpdwComPortNumber)
ftd2xx.h:1361"""
FT_EE_ReadConfig = _libraries['ftd2xx64.dll'].FT_EE_ReadConfig
FT_EE_ReadConfig.restype = FT_STATUS
# FT_EE_ReadConfig(ftHandle, ucAddress, pucValue)
FT_EE_ReadConfig.argtypes = [FT_HANDLE, UCHAR, PUCHAR]
FT_EE_ReadConfig.__doc__ = \
"""FT_STATUS FT_EE_ReadConfig(FT_HANDLE ftHandle, UCHAR ucAddress, PUCHAR pucValue)
ftd2xx.h:1372"""
FT_EE_WriteConfig = _libraries['ftd2xx64.dll'].FT_EE_WriteConfig
FT_EE_WriteConfig.restype = FT_STATUS
# FT_EE_WriteConfig(ftHandle, ucAddress, ucValue)
FT_EE_WriteConfig.argtypes = [FT_HANDLE, UCHAR, UCHAR]
FT_EE_WriteConfig.__doc__ = \
"""FT_STATUS FT_EE_WriteConfig(FT_HANDLE ftHandle, UCHAR ucAddress, UCHAR ucValue)
ftd2xx.h:1379"""
FT_EE_ReadECC = _libraries['ftd2xx64.dll'].FT_EE_ReadECC
FT_EE_ReadECC.restype = FT_STATUS
# FT_EE_ReadECC(ftHandle, ucOption, lpwValue)
FT_EE_ReadECC.argtypes = [FT_HANDLE, UCHAR, LPWORD]
FT_EE_ReadECC.__doc__ = \
"""FT_STATUS FT_EE_ReadECC(FT_HANDLE ftHandle, UCHAR ucOption, LPWORD lpwValue)
ftd2xx.h:1386"""
FT_GetQueueStatusEx = _libraries['ftd2xx64.dll'].FT_GetQueueStatusEx
FT_GetQueueStatusEx.restype = FT_STATUS
# FT_GetQueueStatusEx(ftHandle, dwRxBytes)
FT_GetQueueStatusEx.argtypes = [FT_HANDLE, POINTER_T(ctypes.c_uint32)]
FT_GetQueueStatusEx.__doc__ = \
"""FT_STATUS FT_GetQueueStatusEx(FT_HANDLE ftHandle, LP_c_uint32 dwRxBytes)
ftd2xx.h:1393"""
FT_ComPortIdle = _libraries['ftd2xx64.dll'].FT_ComPortIdle
FT_ComPortIdle.restype = FT_STATUS
# FT_ComPortIdle(ftHandle)
FT_ComPortIdle.argtypes = [FT_HANDLE]
FT_ComPortIdle.__doc__ = \
"""FT_STATUS FT_ComPortIdle(FT_HANDLE ftHandle)
ftd2xx.h:1399"""
FT_ComPortCancelIdle = _libraries['ftd2xx64.dll'].FT_ComPortCancelIdle
FT_ComPortCancelIdle.restype = FT_STATUS
# FT_ComPortCancelIdle(ftHandle)
FT_ComPortCancelIdle.argtypes = [FT_HANDLE]
FT_ComPortCancelIdle.__doc__ = \
"""FT_STATUS FT_ComPortCancelIdle(FT_HANDLE ftHandle)
ftd2xx.h:1404"""
FT_VendorCmdGet = _libraries['ftd2xx64.dll'].FT_VendorCmdGet
FT_VendorCmdGet.restype = FT_STATUS
# FT_VendorCmdGet(ftHandle, Request, Buf, Len)
FT_VendorCmdGet.argtypes = [FT_HANDLE, UCHAR, POINTER_T(ctypes.c_ubyte), USHORT]
FT_VendorCmdGet.__doc__ = \
"""FT_STATUS FT_VendorCmdGet(FT_HANDLE ftHandle, UCHAR Request, LP_c_ubyte Buf, USHORT Len)
ftd2xx.h:1409"""
FT_VendorCmdSet = _libraries['ftd2xx64.dll'].FT_VendorCmdSet
FT_VendorCmdSet.restype = FT_STATUS
# FT_VendorCmdSet(ftHandle, Request, Buf, Len)
FT_VendorCmdSet.argtypes = [FT_HANDLE, UCHAR, POINTER_T(ctypes.c_ubyte), USHORT]
FT_VendorCmdSet.__doc__ = \
"""FT_STATUS FT_VendorCmdSet(FT_HANDLE ftHandle, UCHAR Request, LP_c_ubyte Buf, USHORT Len)
ftd2xx.h:1417"""
FT_VendorCmdGetEx = _libraries['ftd2xx64.dll'].FT_VendorCmdGetEx
FT_VendorCmdGetEx.restype = FT_STATUS
# FT_VendorCmdGetEx(ftHandle, wValue, Buf, Len)
FT_VendorCmdGetEx.argtypes = [FT_HANDLE, USHORT, POINTER_T(ctypes.c_ubyte), USHORT]
FT_VendorCmdGetEx.__doc__ = \
"""FT_STATUS FT_VendorCmdGetEx(FT_HANDLE ftHandle, USHORT wValue, LP_c_ubyte Buf, USHORT Len)
ftd2xx.h:1425"""
FT_VendorCmdSetEx = _libraries['ftd2xx64.dll'].FT_VendorCmdSetEx
FT_VendorCmdSetEx.restype = FT_STATUS
# FT_VendorCmdSetEx(ftHandle, wValue, Buf, Len)
FT_VendorCmdSetEx.argtypes = [FT_HANDLE, USHORT, POINTER_T(ctypes.c_ubyte), USHORT]
FT_VendorCmdSetEx.__doc__ = \
"""FT_STATUS FT_VendorCmdSetEx(FT_HANDLE ftHandle, USHORT wValue, LP_c_ubyte Buf, USHORT Len)
ftd2xx.h:1433"""
__all__ = \
['struct_ft_eeprom_232r', 'FT_SetDtr', 'FT_INVALID_BAUD_RATE',
'FT_EEPROM_NOT_PRESENT', 'FT_DEVICE_232R', 'PULONG',
'FT_GetBitMode', 'FT_EE_ReadECC', 'PFT_EEPROM_2232H',
'FT_EEPROM_2232', 'FT_EE_UARead', 'FT_CyclePort',
'FT_EEPROM_X_SERIES', 'FT_W32_ReadFile', 'FT_DEVICE_4222_PROG',
'FT_WriteEE', 'struct_ft_eeprom_4232h', 'FT_VendorCmdGet',
'FT_EE_ReadEx', 'FT_DEVICE_930', 'FT_EraseEE', 'PFT_EEPROM_4232H',
'FT_DEVICE_NOT_FOUND', 'PFT_EEPROM_232B', 'FT_W32_SetCommMask',
'PUCHAR', 'FT_SetBreakOff', 'FT_EE_ProgramEx',
'FT_ComPortCancelIdle', 'c__Ea_FT_OK', 'PFT_EEPROM_X_SERIES',
'struct__FTDCB', 'FT_W32_GetOverlappedResult',
'FT_EEPROM_READ_FAILED', 'FT_SetWaitMask', 'FT_DEVICE',
'FT_EE_Read', 'FT_W32_CancelIo', 'FT_DEVICE_NOT_OPENED',
'FT_DEVICE_NOT_OPENED_FOR_ERASE', 'c__Ea_FT_FLAGS_OPENED',
'FT_GetDeviceInfoDetail', 'union__OVERLAPPED_0', 'FT_ListDevices',
'LPLONG', 'FT_W32_GetCommMask', 'FT_DEVICE_X_SERIES',
'FT_W32_ClearCommBreak', 'FT_ClrRts', 'FT_INVALID_PARAMETER',
'struct_ft_eeprom_232h', 'FT_GetDriverVersion',
'FT_INSUFFICIENT_RESOURCES', 'FT_RestartInTask',
'FT_W32_ClearCommError', 'FT_OTHER_ERROR', 'FT_SetRts',
'FT_DEVICE_4222H_0', 'FT_GetQueueStatusEx',
'FT_SetDataCharacteristics', 'struct_ft_eeprom_2232', 'PVOID',
'FT_W32_GetCommModemStatus', 'FT_DEVICE_100AX',
'FT_W32_WriteFile', 'FT_GetDeviceInfo', 'LPFTDCB',
'FT_EEPROM_WRITE_FAILED', 'FT_W32_GetCommTimeouts',
'PFT_PROGRAM_DATA', 'LPFTTIMEOUTS', 'FT_EEPROM_Read', 'BOOL',
'FT_DEVICE_4222H_1_2', 'FT_DEVICE_LIST_INFO_NODE',
'FT_GetComPortNumber', 'FT_INVALID_ARGS', 'FT_EE_WriteConfig',
'struct_ft_program_data', 'FT_DEVICE_LIST_NOT_READY',
'FT_WaitOnMask', 'FT_FAILED_TO_WRITE_DEVICE',
'FT_SetDeadmanTimeout', 'FT_StopInTask', 'struct__FTCOMSTAT',
'FT_EEPROM_NOT_PROGRAMMED', 'FT_GetModemStatus', 'LPDWORD',
'struct_ft_eeprom_2232h', 'FT_SetFlowControl', 'FT_EEPROM_2232H',
'PFT_EEPROM_2232', 'FT_EE_Program', 'FT_VendorCmdSet', 'FT_Purge',
'LPCTSTR', 'FT_GetQueueStatus', 'FT_SetEventNotification',
'FT_EEPROM_Program', 'FT_W32_PurgeComm', 'FT_GetLatencyTimer',
'FT_DEVICE_232H', 'FT_SetDivisor', 'PCHAR', 'HANDLE',
'struct_ft_eeprom_header', 'FTTIMEOUTS', 'FT_IO_ERROR',
'FT_ReadEE', 'USHORT', 'struct_ft_eeprom_x_series', 'FT_STATUS',
'FT_Close', 'struct__OVERLAPPED', 'FT_DEVICE_UMFTPD3A',
'FT_W32_CreateFile', 'struct__ft_device_list_info_node',
'FT_ComPortIdle', 'c__Ea_FT_DEVICE_BM', 'FT_Reload', 'WORD',
'FT_EE_ReadConfig', 'FT_SetBaudRate', 'FT_EEPROM_232B', 'FT_OK',
'ULONG', 'FT_OpenEx', 'FT_SetUSBParameters',
'FT_W32_GetLastError', 'FT_W32_EscapeCommFunction', 'FT_Open',
'FT_DEVICE_NOT_OPENED_FOR_WRITE', 'FT_SetChars',
'FT_DEVICE_4232H', 'struct__FTTIMEOUTS', 'FT_DEVICE_BM',
'FT_EEPROM_HEADER', 'struct__OVERLAPPED_0_0', 'FT_HANDLE',
'PFT_EVENT_HANDLER', 'FT_ClrDtr', 'FT_W32_SetCommState',
'FT_W32_WaitCommEvent', 'FT_GetLibraryVersion', 'FT_SetBitMode',
'FT_DEVICE_AM', 'struct_ft_eeprom_232b', 'FT_EEPROM_232R',
'FT_EEPROM_4232H', 'FT_Write', 'FT_W32_GetCommState',
'FT_DEVICE_2232H', 'PFT_EEPROM_HEADER', 'FT_W32_CloseHandle',
'PFT_EEPROM_232H', 'FT_W32_SetCommTimeouts', 'FT_EE_UASize',
'LPVOID', 'FT_DEVICE_900', 'LPOVERLAPPED',
'FT_CreateDeviceInfoList', 'LPSECURITY_ATTRIBUTES',
'struct__SECURITY_ATTRIBUTES', 'FT_W32_SetupComm',
'FT_VendorCmdGetEx', 'LPFTCOMSTAT', 'FT_VendorCmdSetEx',
'FT_EEPROM_ERASE_FAILED', 'FT_PROGRAM_DATA',
'FT_SetResetPipeRetryCount', 'UCHAR', 'FT_DEVICE_2232C',
'FT_FLAGS_HISPEED', 'FT_DEVICE_UNKNOWN', 'FT_SetLatencyTimer',
'FT_ResetDevice', 'FT_GetEventStatus', 'DWORD',
'FT_INVALID_HANDLE', 'FT_GetStatus', 'FT_EE_UAWrite',
'FT_SetBreakOn', 'FT_FLAGS_OPENED', 'FT_W32_SetCommBreak',
'FT_Rescan', 'LPWORD', 'FT_DEVICE_4222H_3', 'FT_SetTimeouts',
'PFT_EEPROM_232R', 'FT_IoCtl', 'FT_GetDeviceInfoList',
'FT_NOT_SUPPORTED', 'FT_ResetPort', 'FTDCB', 'FT_EEPROM_232H',
'FTCOMSTAT', 'FT_Read']
| [
"ctypes.CFUNCTYPE",
"ctypes.sizeof",
"ctypes.CDLL"
] | [((2139, 2166), 'ctypes.CDLL', 'ctypes.CDLL', (['"""ftd2xx64.dll"""'], {}), "('ftd2xx64.dll')\n", (2150, 2166), False, 'import ctypes\n'), ((288, 318), 'ctypes.sizeof', 'ctypes.sizeof', (['ctypes.c_void_p'], {}), '(ctypes.c_void_p)\n', (301, 318), False, 'import ctypes\n'), ((1965, 1999), 'ctypes.sizeof', 'ctypes.sizeof', (['ctypes.c_longdouble'], {}), '(ctypes.c_longdouble)\n', (1978, 1999), False, 'import ctypes\n'), ((4379, 4435), 'ctypes.CFUNCTYPE', 'ctypes.CFUNCTYPE', (['None', 'ctypes.c_uint32', 'ctypes.c_uint32'], {}), '(None, ctypes.c_uint32, ctypes.c_uint32)\n', (4395, 4435), False, 'import ctypes\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
class TripletLoss(nn.Module):
def __init__(self,margin = 0.2, sigma = 0.3):
super(TripletLoss,self).__init__()
self.margin = margin
self.sigma = sigma
def forward(self,f_anchor,f_positive, f_negative): # (-1,c)
d_ap = torch.norm(f_anchor - f_positive, dim = 1) / self.sigma # (-1,1)
d_an = torch.norm(f_anchor - f_negative, dim = 1) / self.sigma
return torch.clamp(torch.exp(d_ap) - torch.exp(d_an) + self.margin,0).sum()
class MetricSoftmaxLoss(nn.Module):
def __init__(self):
super(MetricSoftmaxLoss,self).__init__()
def forward(self,f_anchor,f_positive, f_negative):
d_ap = torch.norm(f_anchor - f_positive, dim = 1)
d_an = torch.norm(f_anchor - f_negative, dim = 1)
return -torch.log(torch.exp(d_an) / (torch.exp(d_an) + torch.exp(d_ap))).sum()
def hard_samples_mining(f_anchor,f_positive, f_negative, margin):
d_ap = torch.norm(f_anchor - f_positive, dim = 1)
d_an = torch.norm(f_anchor - f_negative, dim = 1)
idx = (d_ap - d_an) < margin
return idx
def renorm(x):
return x.renorm(2,0,1e-5).mul(1e5)
class MetricLoss(nn.Module):
def __init__(self,margin = 0.2, sigma = 0.3, l = 1.):
super(MetricLoss, self).__init__()
self.l = l
self.margin = margin
self.trip = TripletLoss(margin, sigma)
self.soft = MetricSoftmaxLoss()
def forward(self, f_anchor,f_positive, f_negative):
f_anchor, f_positive, f_negative = renorm(f_anchor), renorm(f_positive), renorm(f_negative)
with torch.no_grad():
idx = hard_samples_mining(f_anchor, f_positive, f_negative, self.margin)
#print(idx)
loss_trip = self.trip(f_anchor, f_positive, f_negative)
loss_soft = self.soft(f_anchor, f_positive, f_negative)
#print(loss_trip.item(), loss_soft.item())
return loss_trip + self.l * loss_soft
#return self.trip(f_anchor[idx], f_positive[idx], f_negative[idx]) + self.l * self.soft(f_anchor[idx], f_positive[idx], f_negative[idx])
if __name__ == "__main__":
x = torch.randn(3,17)
y = torch.randn(3,17)
z = torch.randn(3,17)
loss_fn = MetricLoss()
res = loss_fn(x,y,z)
| [
"torch.no_grad",
"torch.norm",
"torch.randn",
"torch.exp"
] | [((1002, 1042), 'torch.norm', 'torch.norm', (['(f_anchor - f_positive)'], {'dim': '(1)'}), '(f_anchor - f_positive, dim=1)\n', (1012, 1042), False, 'import torch\n'), ((1056, 1096), 'torch.norm', 'torch.norm', (['(f_anchor - f_negative)'], {'dim': '(1)'}), '(f_anchor - f_negative, dim=1)\n', (1066, 1096), False, 'import torch\n'), ((2187, 2205), 'torch.randn', 'torch.randn', (['(3)', '(17)'], {}), '(3, 17)\n', (2198, 2205), False, 'import torch\n'), ((2213, 2231), 'torch.randn', 'torch.randn', (['(3)', '(17)'], {}), '(3, 17)\n', (2224, 2231), False, 'import torch\n'), ((2239, 2257), 'torch.randn', 'torch.randn', (['(3)', '(17)'], {}), '(3, 17)\n', (2250, 2257), False, 'import torch\n'), ((736, 776), 'torch.norm', 'torch.norm', (['(f_anchor - f_positive)'], {'dim': '(1)'}), '(f_anchor - f_positive, dim=1)\n', (746, 776), False, 'import torch\n'), ((794, 834), 'torch.norm', 'torch.norm', (['(f_anchor - f_negative)'], {'dim': '(1)'}), '(f_anchor - f_negative, dim=1)\n', (804, 834), False, 'import torch\n'), ((327, 367), 'torch.norm', 'torch.norm', (['(f_anchor - f_positive)'], {'dim': '(1)'}), '(f_anchor - f_positive, dim=1)\n', (337, 367), False, 'import torch\n'), ((407, 447), 'torch.norm', 'torch.norm', (['(f_anchor - f_negative)'], {'dim': '(1)'}), '(f_anchor - f_negative, dim=1)\n', (417, 447), False, 'import torch\n'), ((1645, 1660), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1658, 1660), False, 'import torch\n'), ((490, 505), 'torch.exp', 'torch.exp', (['d_ap'], {}), '(d_ap)\n', (499, 505), False, 'import torch\n'), ((508, 523), 'torch.exp', 'torch.exp', (['d_an'], {}), '(d_an)\n', (517, 523), False, 'import torch\n'), ((863, 878), 'torch.exp', 'torch.exp', (['d_an'], {}), '(d_an)\n', (872, 878), False, 'import torch\n'), ((882, 897), 'torch.exp', 'torch.exp', (['d_an'], {}), '(d_an)\n', (891, 897), False, 'import torch\n'), ((900, 915), 'torch.exp', 'torch.exp', (['d_ap'], {}), '(d_ap)\n', (909, 915), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2021 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# *****************************************************************************
from nicos.core import ACCESS_LEVELS
def _access_level_list():
return ', '.join(repr(l) for l in ACCESS_LEVELS.values())
def UserPassLevelAuthEntry(val=None):
"""Provide a 3-tuple of user, password, and level
* user: string
* password: string
* level: oneof(ACCESS_LEVELS)
currently: GUEST, USER, ADMIN
"""
val = list(val)
if len(val) != 3:
raise ValueError('UserPassLevelAuthEntry entry needs to be '
'a 3-tuple (name, password, accesslevel)')
if not isinstance(val[0], str):
raise ValueError('user name must be a string')
val[0] = val[0].strip()
if not isinstance(val[1], str):
raise ValueError('user password must be a string')
val[1] = val[1].strip()
if isinstance(val[2], str):
for i, name in ACCESS_LEVELS.items():
if name == val[2].strip():
val[2] = i
break
else:
raise ValueError('access level must be one of %s' % _access_level_list())
elif not isinstance(val[2], int):
# for backwards compatibility: allow integer values as well
raise ValueError('access level must be one of %s' % _access_level_list())
else:
if val[2] not in ACCESS_LEVELS:
raise ValueError('access level must be one of %s' % _access_level_list())
return tuple(val)
def UserLevelAuthEntry(val=None):
"""Provide a 2-tuple of user and level
* user: string
* level: oneof(ACCESS_LEVELS)
currently: GUEST, USER, ADMIN
"""
if len(val) != 2:
raise ValueError('UserLevelAuthEntry entry needs to be a 2-tuple '
'(name, accesslevel)')
# pylint: disable=unbalanced-tuple-unpacking
user, _p, level = UserPassLevelAuthEntry((val[0], '', val[1]))
return tuple((user, level))
| [
"nicos.core.ACCESS_LEVELS.items",
"nicos.core.ACCESS_LEVELS.values"
] | [((1955, 1976), 'nicos.core.ACCESS_LEVELS.items', 'ACCESS_LEVELS.items', ([], {}), '()\n', (1974, 1976), False, 'from nicos.core import ACCESS_LEVELS\n'), ((1223, 1245), 'nicos.core.ACCESS_LEVELS.values', 'ACCESS_LEVELS.values', ([], {}), '()\n', (1243, 1245), False, 'from nicos.core import ACCESS_LEVELS\n')] |
#!/usr/bin/python3
import time
import os
import subprocess
import argparse
# constants
DATE_FORMAT = "%Y-%m-%d %H:%M"
ALERT_STRING = "alerted"
LUNCH_BREAK_DURATION = 1
INFO_WORKING_DURATION = 7
INFO_MESSAGE = "Time to finish open todos"
ALERT_WORKING_DURATION = 8
ALERT_MESSAGE = "Time to go home :-)"
# parse command line arguments
parser = argparse.ArgumentParser(description='Tracks working time based on your first login time after 5am.')
parser.add_argument('-v', '--verbose', action='store_true', help='print stats')
parser.add_argument('-f', '--force', action='store_true', help='force dialog pop-up')
args = parser.parse_args()
current_timestamp = time.time()
logfolder_name = os.path.expanduser("~") + "/.log/activity"
# day begins at 5 am (to count night sessions into "previous" day)
logfile_name = logfolder_name \
+ "/" \
+ time.strftime("%Y-%m-%d", time.localtime(current_timestamp - 5*60*60)) \
+ ".log"
symlink_name = logfolder_name + "/latest.log"
# create logfolder if it doesn't exist
try:
os.mkdir(logfolder_name)
except:
pass
# create "latest" symlink to logfile
try:
os.remove(symlink_name)
except:
pass
os.symlink(logfile_name, symlink_name)
last_alert = None
with open(logfile_name, "a+") as logfile:
# log current time
locked_screen = str(int(subprocess.check_output("ps -e | grep screenlocker | wc -l", shell=True)))
new_line = time.strftime(DATE_FORMAT, time.localtime(current_timestamp)) \
+ "; " + locked_screen \
+ os.linesep
logfile.write(new_line)
# read start time
logfile.seek(0)
log = logfile.readlines()
start_timestamp = time.mktime(time.strptime(log[0].split("; ")[0], DATE_FORMAT))
# count alerts
for logline in log:
logline_arr = logline[:-1].split("; ")
if len(logline_arr) > 2 and str(logline_arr[2]) == ALERT_STRING:
last_alert = time.mktime(time.strptime(logline_arr[0], DATE_FORMAT))
# produce a warning dialog every 30 min, if working longer than 8 hours (assumes cronjob every minute, 1 hour lunch break)
working_time = (current_timestamp - start_timestamp) / (60 * 60)
time_since_last_alert = 9999999999
if last_alert is not None:
time_since_last_alert = (current_timestamp - last_alert) / (60 * 60)
if args.verbose:
print(time.strftime("start: " + DATE_FORMAT, time.localtime(start_timestamp)))
print(time.strftime("current: " + DATE_FORMAT, time.localtime(current_timestamp)))
print("working time:", working_time - 1., " (plus 1 hour est. lunch break)")
print("time_since_last_alert:", time_since_last_alert)
if (working_time > min(INFO_WORKING_DURATION, ALERT_WORKING_DURATION) + LUNCH_BREAK_DURATION
and time_since_last_alert >= 0.5) or args.force:
dialog_already_open = int(subprocess.check_output("ps -fe | grep cron_callback.py | wc -l", shell=True)) > 3
if args.verbose:
print("dialog_already_open: ", dialog_already_open)#, " str:", dialog_already_open_str)
if not dialog_already_open:
message = ("You are already working more than %0.1f hours! (plus %0.1f hour est. lunch break)"
% (working_time - LUNCH_BREAK_DURATION, LUNCH_BREAK_DURATION))
if working_time > ALERT_WORKING_DURATION + LUNCH_BREAK_DURATION:
message += "\n\n" + ALERT_MESSAGE
elif working_time > INFO_WORKING_DURATION + LUNCH_BREAK_DURATION:
message += "\n\n" + INFO_MESSAGE
active_window = str(int(subprocess.check_output("xdotool getwindowfocus", shell=True)))
subprocess.check_call("kdialog --sorry '%s' --attach %s" % (message, active_window), shell=True)
with open(logfile_name, "a") as logfile:
# log the alert
locked_screen = str(int(subprocess.check_output("ps -e | grep screenlocker | wc -l", shell=True)))
new_line = time.strftime(DATE_FORMAT, time.localtime(current_timestamp)) \
+ "; " + locked_screen \
+ "; " + ALERT_STRING \
+ os.linesep
logfile.write(new_line)
| [
"subprocess.check_output",
"time.strptime",
"argparse.ArgumentParser",
"subprocess.check_call",
"os.symlink",
"os.mkdir",
"time.localtime",
"time.time",
"os.path.expanduser",
"os.remove"
] | [((347, 452), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Tracks working time based on your first login time after 5am."""'}), "(description=\n 'Tracks working time based on your first login time after 5am.')\n", (370, 452), False, 'import argparse\n'), ((664, 675), 'time.time', 'time.time', ([], {}), '()\n', (673, 675), False, 'import time\n'), ((1198, 1236), 'os.symlink', 'os.symlink', (['logfile_name', 'symlink_name'], {}), '(logfile_name, symlink_name)\n', (1208, 1236), False, 'import os\n'), ((693, 716), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (711, 716), False, 'import os\n'), ((1068, 1092), 'os.mkdir', 'os.mkdir', (['logfolder_name'], {}), '(logfolder_name)\n', (1076, 1092), False, 'import os\n'), ((1157, 1180), 'os.remove', 'os.remove', (['symlink_name'], {}), '(symlink_name)\n', (1166, 1180), False, 'import os\n'), ((3597, 3697), 'subprocess.check_call', 'subprocess.check_call', (['("kdialog --sorry \'%s\' --attach %s" % (message, active_window))'], {'shell': '(True)'}), '("kdialog --sorry \'%s\' --attach %s" % (message,\n active_window), shell=True)\n', (3618, 3697), False, 'import subprocess\n'), ((902, 949), 'time.localtime', 'time.localtime', (['(current_timestamp - 5 * 60 * 60)'], {}), '(current_timestamp - 5 * 60 * 60)\n', (916, 949), False, 'import time\n'), ((1350, 1422), 'subprocess.check_output', 'subprocess.check_output', (['"""ps -e | grep screenlocker | wc -l"""'], {'shell': '(True)'}), "('ps -e | grep screenlocker | wc -l', shell=True)\n", (1373, 1422), False, 'import subprocess\n'), ((2396, 2427), 'time.localtime', 'time.localtime', (['start_timestamp'], {}), '(start_timestamp)\n', (2410, 2427), False, 'import time\n'), ((2481, 2514), 'time.localtime', 'time.localtime', (['current_timestamp'], {}), '(current_timestamp)\n', (2495, 2514), False, 'import time\n'), ((2835, 2912), 'subprocess.check_output', 'subprocess.check_output', (['"""ps -fe | grep cron_callback.py | wc -l"""'], {'shell': '(True)'}), "('ps -fe | grep cron_callback.py | wc -l', shell=True)\n", (2858, 2912), False, 'import subprocess\n'), ((1959, 2001), 'time.strptime', 'time.strptime', (['logline_arr[0]', 'DATE_FORMAT'], {}), '(logline_arr[0], DATE_FORMAT)\n', (1972, 2001), False, 'import time\n'), ((3525, 3586), 'subprocess.check_output', 'subprocess.check_output', (['"""xdotool getwindowfocus"""'], {'shell': '(True)'}), "('xdotool getwindowfocus', shell=True)\n", (3548, 3586), False, 'import subprocess\n'), ((1467, 1500), 'time.localtime', 'time.localtime', (['current_timestamp'], {}), '(current_timestamp)\n', (1481, 1500), False, 'import time\n'), ((3808, 3880), 'subprocess.check_output', 'subprocess.check_output', (['"""ps -e | grep screenlocker | wc -l"""'], {'shell': '(True)'}), "('ps -e | grep screenlocker | wc -l', shell=True)\n", (3831, 3880), False, 'import subprocess\n'), ((3933, 3966), 'time.localtime', 'time.localtime', (['current_timestamp'], {}), '(current_timestamp)\n', (3947, 3966), False, 'import time\n')] |
import numpy as np
import tensorflow as tf
from sklearn.metrics import precision_recall_curve, roc_curve
from sklearn.metrics import average_precision_score, auc
from load_data import load_data
from model import build_model, compileModel, build_model_CNN
from numpy import interp
from itertools import cycle
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def main():
# gene = ['eif3a', 'YTHDF3','YTHDF1','YTHDF2']
# gene = ['YTHDF1']
gene = ['eif3a']
# gene = ['YTHDC1','YTHDC2']
# gene = ['YTHDC1']
# condition = ['Exon', 'Full']
condition = ['Exon']
# length = ['1000', '500', '250', '125']
length = ['125', '250', '500', '1000']
mode = 'CNN+RNN'
fpr = dict()
tpr = dict()
roc_auc = dict()
precision = dict()
recall = dict()
average_precision = dict()
name = dict()
epoch = 0
lw = 2
for x in gene:
for y in condition:
for z in length:
data_path = '/home/yuxuan/dp/longer_seq_data/{}_{}_{}.csv'.format(x, y, z)
_, x_test, _, y_test, _, _ = load_data(data_path)
if mode == 'CNN+RNN':
model = build_model(x_test)
checkpoint_path = '/home/yuxuan/dp/model/{}_{}_{}_CRNNmodel_test.h5'.format(x, y, z)
else:
model = build_model_CNN(x_test)
checkpoint_path = '/home/yuxuan/dp/model/{}_{}_{}_best_model.h5'.format(x, y, z)
print(checkpoint_path)
model.load_weights(checkpoint_path)
y_score = model.predict(x_test)
## PR curve
precision[epoch], recall[epoch], _ = precision_recall_curve(y_true=y_test, probas_pred=y_score)
average_precision[epoch] = average_precision_score(y_true=y_test, y_score=y_score)
## ROC curve
fpr[epoch], tpr[epoch], _ = roc_curve(y_true=y_test, y_score=y_score)
roc_auc[epoch] = auc(fpr[epoch], tpr[epoch])
name[epoch]='{}_{}_{}'.format(x.upper(),y,(int(z)*2+1))
epoch = epoch + 1
## ROC plotting
colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
for i, color in zip(range(epoch), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC curve of {}(area = {:.2f})'
''.format(name[i], roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title(' ROC of EIF3A mature transcripts in different length')
plt.legend(loc="lower right")
# plt.show()
plt.savefig('/home/yuxuan/dp_m6a_org/plot/ROC(RNN_all)_exon.png',
format='png')
plt.cla()
# plt.figure(figsize=(7, 8))
## PR curve plotting
colors = cycle(['navy', 'turquoise', 'darkorange', 'cornflowerblue', 'teal'])
lines = []
labels = []
for i, color in zip(range(epoch), colors):
l, = plt.plot(recall[i], precision[i], color=color, lw=2)
lines.append(l)
labels.append('PRAUC for {0} (area = {1:0.2f})'
''.format(name[i], average_precision[i]))
fig = plt.gcf()
fig.subplots_adjust(bottom=0.25)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Precision-Recall curve to EIF3A mature transcript in different lengths')
plt.legend(lines, labels, loc="lower right")
plt.savefig('/home/yuxuan/dp_m6a_org/plot/PR_Curve(RNN_all)_exon.png',
format='png')
if __name__ == '__main__':
main()
| [
"matplotlib.pyplot.ylabel",
"sklearn.metrics.auc",
"sklearn.metrics.roc_curve",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.cla",
"itertools.cycle",
"matplotlib.pyplot.savefig",
"matplotlib.use",
"matplotlib.pyplot.gcf",
"sklearn.metrics.a... | [((327, 348), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (341, 348), False, 'import matplotlib\n'), ((2196, 2243), 'itertools.cycle', 'cycle', (["['aqua', 'darkorange', 'cornflowerblue']"], {}), "(['aqua', 'darkorange', 'cornflowerblue'])\n", (2201, 2243), False, 'from itertools import cycle\n'), ((2460, 2498), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]', '"""k--"""'], {'lw': 'lw'}), "([0, 1], [0, 1], 'k--', lw=lw)\n", (2468, 2498), True, 'import matplotlib.pyplot as plt\n'), ((2503, 2523), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (2511, 2523), True, 'import matplotlib.pyplot as plt\n'), ((2528, 2549), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (2536, 2549), True, 'import matplotlib.pyplot as plt\n'), ((2554, 2587), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (2564, 2587), True, 'import matplotlib.pyplot as plt\n'), ((2592, 2624), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (2602, 2624), True, 'import matplotlib.pyplot as plt\n'), ((2629, 2694), 'matplotlib.pyplot.title', 'plt.title', (['""" ROC of EIF3A mature transcripts in different length"""'], {}), "(' ROC of EIF3A mature transcripts in different length')\n", (2638, 2694), True, 'import matplotlib.pyplot as plt\n'), ((2699, 2728), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (2709, 2728), True, 'import matplotlib.pyplot as plt\n'), ((2750, 2829), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""/home/yuxuan/dp_m6a_org/plot/ROC(RNN_all)_exon.png"""'], {'format': '"""png"""'}), "('/home/yuxuan/dp_m6a_org/plot/ROC(RNN_all)_exon.png', format='png')\n", (2761, 2829), True, 'import matplotlib.pyplot as plt\n'), ((2850, 2859), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (2857, 2859), True, 'import matplotlib.pyplot as plt\n'), ((2932, 3000), 'itertools.cycle', 'cycle', (["['navy', 'turquoise', 'darkorange', 'cornflowerblue', 'teal']"], {}), "(['navy', 'turquoise', 'darkorange', 'cornflowerblue', 'teal'])\n", (2937, 3000), False, 'from itertools import cycle\n'), ((3302, 3311), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (3309, 3311), True, 'import matplotlib.pyplot as plt\n'), ((3353, 3373), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (3361, 3373), True, 'import matplotlib.pyplot as plt\n'), ((3378, 3399), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (3386, 3399), True, 'import matplotlib.pyplot as plt\n'), ((3404, 3424), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Recall"""'], {}), "('Recall')\n", (3414, 3424), True, 'import matplotlib.pyplot as plt\n'), ((3429, 3452), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Precision"""'], {}), "('Precision')\n", (3439, 3452), True, 'import matplotlib.pyplot as plt\n'), ((3457, 3545), 'matplotlib.pyplot.title', 'plt.title', (['"""Precision-Recall curve to EIF3A mature transcript in different lengths"""'], {}), "(\n 'Precision-Recall curve to EIF3A mature transcript in different lengths')\n", (3466, 3545), True, 'import matplotlib.pyplot as plt\n'), ((3545, 3589), 'matplotlib.pyplot.legend', 'plt.legend', (['lines', 'labels'], {'loc': '"""lower right"""'}), "(lines, labels, loc='lower right')\n", (3555, 3589), True, 'import matplotlib.pyplot as plt\n'), ((3594, 3682), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""/home/yuxuan/dp_m6a_org/plot/PR_Curve(RNN_all)_exon.png"""'], {'format': '"""png"""'}), "('/home/yuxuan/dp_m6a_org/plot/PR_Curve(RNN_all)_exon.png',\n format='png')\n", (3605, 3682), True, 'import matplotlib.pyplot as plt\n'), ((3093, 3145), 'matplotlib.pyplot.plot', 'plt.plot', (['recall[i]', 'precision[i]'], {'color': 'color', 'lw': '(2)'}), '(recall[i], precision[i], color=color, lw=2)\n', (3101, 3145), True, 'import matplotlib.pyplot as plt\n'), ((1106, 1126), 'load_data.load_data', 'load_data', (['data_path'], {}), '(data_path)\n', (1115, 1126), False, 'from load_data import load_data\n'), ((1720, 1778), 'sklearn.metrics.precision_recall_curve', 'precision_recall_curve', ([], {'y_true': 'y_test', 'probas_pred': 'y_score'}), '(y_true=y_test, probas_pred=y_score)\n', (1742, 1778), False, 'from sklearn.metrics import precision_recall_curve, roc_curve\n'), ((1822, 1877), 'sklearn.metrics.average_precision_score', 'average_precision_score', ([], {'y_true': 'y_test', 'y_score': 'y_score'}), '(y_true=y_test, y_score=y_score)\n', (1845, 1877), False, 'from sklearn.metrics import average_precision_score, auc\n'), ((1953, 1994), 'sklearn.metrics.roc_curve', 'roc_curve', ([], {'y_true': 'y_test', 'y_score': 'y_score'}), '(y_true=y_test, y_score=y_score)\n', (1962, 1994), False, 'from sklearn.metrics import precision_recall_curve, roc_curve\n'), ((2028, 2055), 'sklearn.metrics.auc', 'auc', (['fpr[epoch]', 'tpr[epoch]'], {}), '(fpr[epoch], tpr[epoch])\n', (2031, 2055), False, 'from sklearn.metrics import average_precision_score, auc\n'), ((1194, 1213), 'model.build_model', 'build_model', (['x_test'], {}), '(x_test)\n', (1205, 1213), False, 'from model import build_model, compileModel, build_model_CNN\n'), ((1369, 1392), 'model.build_model_CNN', 'build_model_CNN', (['x_test'], {}), '(x_test)\n', (1384, 1392), False, 'from model import build_model, compileModel, build_model_CNN\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import scripts.conll18_ud_eval as ud_eval
from scripts.reinsert_compounds import reinsert_compounds
def evaluate(gold_filename, sys_filename, metric):
""""""
reinsert_compounds(gold_filename, sys_filename)
gold_conllu_file = ud_eval.load_conllu_file(gold_filename)
sys_conllu_file = ud_eval.load_conllu_file(sys_filename)
evaluation = ud_eval.evaluate(gold_conllu_file, sys_conllu_file)
return evaluation[metric].f1
| [
"scripts.conll18_ud_eval.load_conllu_file",
"scripts.reinsert_compounds.reinsert_compounds",
"scripts.conll18_ud_eval.evaluate"
] | [((276, 323), 'scripts.reinsert_compounds.reinsert_compounds', 'reinsert_compounds', (['gold_filename', 'sys_filename'], {}), '(gold_filename, sys_filename)\n', (294, 323), False, 'from scripts.reinsert_compounds import reinsert_compounds\n'), ((345, 384), 'scripts.conll18_ud_eval.load_conllu_file', 'ud_eval.load_conllu_file', (['gold_filename'], {}), '(gold_filename)\n', (369, 384), True, 'import scripts.conll18_ud_eval as ud_eval\n'), ((405, 443), 'scripts.conll18_ud_eval.load_conllu_file', 'ud_eval.load_conllu_file', (['sys_filename'], {}), '(sys_filename)\n', (429, 443), True, 'import scripts.conll18_ud_eval as ud_eval\n'), ((459, 510), 'scripts.conll18_ud_eval.evaluate', 'ud_eval.evaluate', (['gold_conllu_file', 'sys_conllu_file'], {}), '(gold_conllu_file, sys_conllu_file)\n', (475, 510), True, 'import scripts.conll18_ud_eval as ud_eval\n')] |
# --------------
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Code starts here
df=pd.read_csv(path)
df["state"]=df["state"].apply(lambda x: x.lower())
df['total']=df['Jan']+df['Feb']+df['Mar']
sum_var={col: df[col].sum() for col in df}
sum_row=pd.DataFrame(sum_var,index=[0])
df_final=df.append(sum_row)
# Code ends here
# --------------
import requests
# Code starts here
url="https://en.wikipedia.org/wiki/List_of_U.S._state_abbreviations"
response=requests.get(url)
df1=pd.read_html(response.content)[0]
df1=df1.iloc[11:, :]
df1 = df1.rename(columns=df1.iloc[0, :]).iloc[1:, :]
df1['United States of America']=df1['United States of America'].apply(lambda x: x.replace(" ", "")).astype(object)
# Code ends here
# --------------
df1['United States of America'] = df1['United States of America'].astype(str).apply(lambda x: x.lower())
df1['US'] = df1['US'].astype(str)
# Code starts here
mapping = dict(zip(df1['United States of America'], df1['US']))
df_final.insert(loc=6, column='abbr', value = df_final['state'].map(mapping))
# Code ends here
# --------------
# Code stars here
df_final.at[6, 'abbr'] = 'MS'
df_final.at[10, 'abbr'] = 'TN'
# Code ends here
# --------------
# Code starts here
# Calculate the total amount
df_sub=df_final[["abbr", "Jan", "Feb", "Mar", "total"]].groupby("abbr").sum()
print(df_sub.shape)
# Add the $ symbol
formatted_df = df_sub.applymap(lambda x: "${:,.0f}".format(x))
print(formatted_df)
# Code ends here
# --------------
# Code starts here
# Calculate the sum
sum_row = df_sub[["Jan", "Feb", "Mar", "total"]].sum()
df_sub_sum = pd.DataFrame(data=sum_row).T
#apply $ to the sum
df_sub_sum = df_sub_sum.applymap(lambda x: "${:,.0f}".format(x))
# append the sum
print(formatted_df)
final_table = formatted_df.append(df_sub_sum)
print(final_table)
# rename the index
final_table = final_table.rename(index={0: "Total"})
print(final_table)
# Code ends here
# --------------
# Code starts here
df_sub['total'] = df_sub[["Jan", "Feb", "Mar"]].sum()
df_sub['total'].plot.pie(y='total', figsize=(5, 5))
# Code ends here
| [
"pandas.DataFrame",
"requests.get",
"pandas.read_html",
"pandas.read_csv"
] | [((134, 151), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (145, 151), True, 'import pandas as pd\n'), ((296, 328), 'pandas.DataFrame', 'pd.DataFrame', (['sum_var'], {'index': '[0]'}), '(sum_var, index=[0])\n', (308, 328), True, 'import pandas as pd\n'), ((508, 525), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (520, 525), False, 'import requests\n'), ((530, 560), 'pandas.read_html', 'pd.read_html', (['response.content'], {}), '(response.content)\n', (542, 560), True, 'import pandas as pd\n'), ((1652, 1678), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'sum_row'}), '(data=sum_row)\n', (1664, 1678), True, 'import pandas as pd\n')] |
from injector import inject
from domain.connection.CheckDatabaseConnection.CheckDatabaseConnectionCommand import CheckDatabaseConnectionCommand
from domain.connection.CheckDatabaseConnection.CheckDatabaseConnectionRequest import CheckDatabaseConnectionRequest
from infrastructure.api.ResourceBase import ResourceBase
from infrastructure.api.decorators.Controller import controller
from infrastructure.cqrs.Dispatcher import Dispatcher
@controller()
class CheckConnectionDatabaseResource(ResourceBase):
@inject
def __init__(self,
dispatcher: Dispatcher,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.dispatcher = dispatcher
def post(self, req: CheckDatabaseConnectionRequest):
"""
Check Database Connection
"""
command = CheckDatabaseConnectionCommand(request=req)
self.dispatcher.dispatch(command) | [
"domain.connection.CheckDatabaseConnection.CheckDatabaseConnectionCommand.CheckDatabaseConnectionCommand",
"infrastructure.api.decorators.Controller.controller"
] | [((439, 451), 'infrastructure.api.decorators.Controller.controller', 'controller', ([], {}), '()\n', (449, 451), False, 'from infrastructure.api.decorators.Controller import controller\n'), ((829, 872), 'domain.connection.CheckDatabaseConnection.CheckDatabaseConnectionCommand.CheckDatabaseConnectionCommand', 'CheckDatabaseConnectionCommand', ([], {'request': 'req'}), '(request=req)\n', (859, 872), False, 'from domain.connection.CheckDatabaseConnection.CheckDatabaseConnectionCommand import CheckDatabaseConnectionCommand\n')] |
#! /usr/bin/env python
#
# Copyright (c) 2013, <NAME>
# Imperial College London
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Imperial College London nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
class Constants:
node = "simple_action_server_deadlock_companion"
topic = "deadlock"
max_action_duration = 3
import random
import actionlib
from actionlib.msg import TestAction, TestGoal
from actionlib_msgs.msg import GoalStatus
import rospy
class DeadlockCompanion:
def __init__(self):
# Seed random with fully resolved name of node and current time
random.seed(rospy.get_name() + str(rospy.Time.now().to_sec()))
# Create actionlib client
self.action_client = actionlib.SimpleActionClient(
Constants.topic,
TestAction)
def run(self):
while not rospy.is_shutdown():
# Send dummy goal
self.action_client.send_goal(TestGoal())
# Wait for a random amount of time
action_duration = random.uniform(0, Constants.max_action_duration)
self.action_client.wait_for_result(rospy.Duration(action_duration))
state = self.action_client.get_state()
if state == GoalStatus.ACTIVE or state == GoalStatus.PENDING:
self.action_client.cancel_goal()
if __name__ == '__main__':
rospy.init_node(Constants.node)
try:
companion = DeadlockCompanion()
companion.run()
except (KeyboardInterrupt, SystemExit):
raise
except:
pass
| [
"random.uniform",
"actionlib.msg.TestGoal",
"rospy.is_shutdown",
"rospy.init_node",
"rospy.Time.now",
"rospy.get_name",
"rospy.Duration",
"actionlib.SimpleActionClient"
] | [((2776, 2807), 'rospy.init_node', 'rospy.init_node', (['Constants.node'], {}), '(Constants.node)\n', (2791, 2807), False, 'import rospy\n'), ((2136, 2193), 'actionlib.SimpleActionClient', 'actionlib.SimpleActionClient', (['Constants.topic', 'TestAction'], {}), '(Constants.topic, TestAction)\n', (2164, 2193), False, 'import actionlib\n'), ((2257, 2276), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (2274, 2276), False, 'import rospy\n'), ((2439, 2487), 'random.uniform', 'random.uniform', (['(0)', 'Constants.max_action_duration'], {}), '(0, Constants.max_action_duration)\n', (2453, 2487), False, 'import random\n'), ((2021, 2037), 'rospy.get_name', 'rospy.get_name', ([], {}), '()\n', (2035, 2037), False, 'import rospy\n'), ((2349, 2359), 'actionlib.msg.TestGoal', 'TestGoal', ([], {}), '()\n', (2357, 2359), False, 'from actionlib.msg import TestAction, TestGoal\n'), ((2535, 2566), 'rospy.Duration', 'rospy.Duration', (['action_duration'], {}), '(action_duration)\n', (2549, 2566), False, 'import rospy\n'), ((2044, 2060), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (2058, 2060), False, 'import rospy\n')] |
from base_expansion import base_expansion as be
def binary_addition(num1, num2):
"""Return binary addition"""
a = _helper(num1)
b = _helper(num2)
c = 0
res = []
length = len(a) if len(a) > len(b) else len(b)
for i in range(length-1):
d = (_get(a, i)+_get(b, i)+c)//2
elem = _get(a, i)+_get(b, i)+c-2*d
res.append(elem)
c = d
res.append(c)
res = [str(i) for i in res]
res.reverse()
res = "".join(res)
return res
def _helper(num):
a = [int(i) for i in str(be(num, 2))]
a.reverse()
return a
def _get(l, idx, default=0):
try:
return l[idx]
except IndexError:
return default
if __name__ == "__main__":
num1 = int(input("Enter the 1st number: "))
num2 = int(input("Enter the 2nd number: "))
res = binary_addition(num1, num2)
print(res)
| [
"base_expansion.base_expansion"
] | [((544, 554), 'base_expansion.base_expansion', 'be', (['num', '(2)'], {}), '(num, 2)\n', (546, 554), True, 'from base_expansion import base_expansion as be\n')] |
"""Test the logger extension module."""
# pylint: disable=protected-access,redefined-outer-name,unused-variable,invalid-name
import importlib
import json
import sys
import google
import pytest
from marshmallow import fields
from conftest import app
from luckycharms import base
from protobuffers import proto
def setup_module():
"""Set up tests."""
# Force luckycharms.base to load without protobuf in the environment
sys.modules['google'] = None
sys.modules['google.protobuf'] = None
sys.modules['google.protobuf.message'] = None
importlib.reload(base)
def teardown_module():
"""Tear down tests."""
# pylint: disable=invalid-name
sys.modules['google'] = google
sys.modules['google.protobuf'] = google.protobuf
sys.modules['google.protobuf.message'] = google.protobuf.message
# Reload luckycharms.base to restore sys.modules to correct state
importlib.reload(base)
def test_without_proto():
class TestSchema(base.BaseModelSchema):
a = fields.Integer()
b = fields.String()
@TestSchema()
def business_logic(*args, **kwargs):
return {
'a': 1,
'b': 'One'
}
with app.test_request_context('/'):
result = business_logic()
assert json.loads(result) == {
'a': 1,
'b': 'One'
}
class TestSchema(base.BaseModelSchema):
a = fields.Integer()
b = fields.String()
config = {
'protobuffers': {
'load': proto.Test(),
'dump': proto.Test(),
'load_many': proto.Test(),
'dump_many': proto.TestCollection()
}
}
with pytest.raises(Exception) as excinfo:
@TestSchema()
def business_logic(*args, **kwargs):
return {
'a': 1,
'b': 'One'
}
assert excinfo.exconly() == "Exception: protobuffer libraries not installed; please install " \
"luckycharms with extra 'proto' (for example, pip install luckycharms[proto])"
| [
"json.loads",
"protobuffers.proto.TestCollection",
"conftest.app.test_request_context",
"protobuffers.proto.Test",
"pytest.raises",
"importlib.reload",
"marshmallow.fields.String",
"marshmallow.fields.Integer"
] | [((559, 581), 'importlib.reload', 'importlib.reload', (['base'], {}), '(base)\n', (575, 581), False, 'import importlib\n'), ((900, 922), 'importlib.reload', 'importlib.reload', (['base'], {}), '(base)\n', (916, 922), False, 'import importlib\n'), ((1008, 1024), 'marshmallow.fields.Integer', 'fields.Integer', ([], {}), '()\n', (1022, 1024), False, 'from marshmallow import fields\n'), ((1037, 1052), 'marshmallow.fields.String', 'fields.String', ([], {}), '()\n', (1050, 1052), False, 'from marshmallow import fields\n'), ((1193, 1222), 'conftest.app.test_request_context', 'app.test_request_context', (['"""/"""'], {}), "('/')\n", (1217, 1222), False, 'from conftest import app\n'), ((1407, 1423), 'marshmallow.fields.Integer', 'fields.Integer', ([], {}), '()\n', (1421, 1423), False, 'from marshmallow import fields\n'), ((1436, 1451), 'marshmallow.fields.String', 'fields.String', ([], {}), '()\n', (1449, 1451), False, 'from marshmallow import fields\n'), ((1707, 1731), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (1720, 1731), False, 'import pytest\n'), ((1273, 1291), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (1283, 1291), False, 'import json\n'), ((1526, 1538), 'protobuffers.proto.Test', 'proto.Test', ([], {}), '()\n', (1536, 1538), False, 'from protobuffers import proto\n'), ((1564, 1576), 'protobuffers.proto.Test', 'proto.Test', ([], {}), '()\n', (1574, 1576), False, 'from protobuffers import proto\n'), ((1607, 1619), 'protobuffers.proto.Test', 'proto.Test', ([], {}), '()\n', (1617, 1619), False, 'from protobuffers import proto\n'), ((1650, 1672), 'protobuffers.proto.TestCollection', 'proto.TestCollection', ([], {}), '()\n', (1670, 1672), False, 'from protobuffers import proto\n')] |
#!/usr/bin/python
import sicario
import os
import importlib
class ModuleManager:
modules = []
modules_failed = []
def load_modules (self, directory="modules/"):
directories = os.listdir(directory)
modules = []
modules_failed = []
for module in directories:
if not os.path.isdir('modules/' + module):
continue
if os.path.isfile('modules/' + module + '/' + module + '.py'):
module_package = importlib.import_module('modules.' + module)
module_class = getattr(getattr(module_package, module), module.capitalize())()
module_class.start()
self.modules.append(module_class)
else:
self.modules_failed.append(module)
return [self.modules, len(self.modules), len(self.modules_failed)]
def trigger_hook (self, hook_name, *args):
for module in self.modules:
module.trigger_hook(hook_name, args) | [
"os.path.isfile",
"os.listdir",
"os.path.isdir",
"importlib.import_module"
] | [((184, 205), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (194, 205), False, 'import os\n'), ((341, 399), 'os.path.isfile', 'os.path.isfile', (["('modules/' + module + '/' + module + '.py')"], {}), "('modules/' + module + '/' + module + '.py')\n", (355, 399), False, 'import os\n'), ((284, 318), 'os.path.isdir', 'os.path.isdir', (["('modules/' + module)"], {}), "('modules/' + module)\n", (297, 318), False, 'import os\n'), ((422, 466), 'importlib.import_module', 'importlib.import_module', (["('modules.' + module)"], {}), "('modules.' + module)\n", (445, 466), False, 'import importlib\n')] |
# https://leetcode.com/problems/valid-parentheses
from collections import deque
class Solution:
def isValid(self, s: str):
if not s:
return True
N = len(s)
st = deque([s[0]])
for i in range(1, N):
if not st:
st.append(s[i])
else:
top = st[-1]
if (
(top == "(" and s[i] == ")")
or (top == "{" and s[i] == "}")
or (top == "[" and s[i] == "]")
):
st.pop()
else:
st.append(s[i])
if len(st) == 0:
return True
else:
return False
| [
"collections.deque"
] | [((204, 217), 'collections.deque', 'deque', (['[s[0]]'], {}), '([s[0]])\n', (209, 217), False, 'from collections import deque\n')] |
import numpy as np
one_d_array = [0, 1, 2, 3, 4, 5]
two_d_array = [
[11, 12, 13, 14, 15],
[16, 17, 18, 19, 20],
[21, 22, 23, 24, 25],
[26, 27, 28 ,29, 30],
[31, 32, 33, 34, 35]
]
t = one_d_array[3]
# x: coord(index)
e = two_d_array[2][1]
# y x y:row x:column
arr = np.array(two_d_array) # 将列表转换为 ndarray
# print(arr[2, 2])
# 对一个二维数组的列进行切片
print(arr[1,5:8])
# 对一个二维数组的行进行切片
print(arr[1:, 1:4])
# 控制步进
print(arr[::4,::4])
# 行列转换(有一点丢弃其他部分的意思在里面)
print(arr[1, :]) | [
"numpy.array"
] | [((320, 341), 'numpy.array', 'np.array', (['two_d_array'], {}), '(two_d_array)\n', (328, 341), True, 'import numpy as np\n')] |
import frappe
from datetime import datetime
from adaequare_gsp.helpers.schema.states import number_state_mapping
from adaequare_gsp.helpers.schema.gstr_2b import (
GST_CATEGORY,
NOTE_TYPE,
YES_NO,
)
def update_period(date):
date = datetime.strptime(date, "%b-%y").strftime("%m%Y")
return date
DATE_FORMAT = "%d-%m-%Y"
DATE_FORMAT2 = "%d-%b-%y"
AMEND_TYPE = {
"R": "Receiver GSTIN Amended",
"N": "Invoice Number Amended",
"D": "Other Details Amended",
}
ISD_TYPE = {"ISDCN": "ISD Credit Note", "ISD": "ISD Invoice"}
SUP_DETAIL = {
"supplier_gstin": "ctin",
"gstr_1_filing_date": "fldtr1",
"sup_return_period": "flprdr1",
"gstr_1_filled": "cfs",
"gstr_3b_filled": "cfs3b",
"registration_cancel_date": "dtcancel",
}
MODIFY_SUP_DETAIL = {
"fldtr1": DATE_FORMAT2,
"dtcancel": DATE_FORMAT2,
"cfs": YES_NO,
"cfs3b": YES_NO,
"flprdr1": update_period,
}
B2B = frappe._dict(
{
"doc_number": "inum",
"supply_type": "inv_typ",
"doc_date": "idt",
"document_value": "val",
"place_of_supply": "pos",
"other_return_period": "aspd",
"amendment_type": "atyp",
"reverse_charge": "rchrg",
"diffprcnt": "diff_percent",
"irn_source": "srctyp",
"irn_number": "irn",
"irn_gen_date": "irngendate",
"doc_type": "doc_type", # Custom Field
}
)
MODIFY_B2B = frappe._dict(
{
"inv_typ": GST_CATEGORY,
"pos": number_state_mapping,
"rchrg": YES_NO,
"aspd": update_period,
"atyp": AMEND_TYPE,
"diff_percent": {1: 1, 0.65: 0.65, None: 1},
"idt": DATE_FORMAT,
"irngendate": DATE_FORMAT,
"doc_type": {None: "Invoice"},
}
)
ITEM = {
"item_number": "num",
"rate": "rt",
"taxable_value": "txval",
"igst": "iamt",
"cgst": "camt",
"sgst": "samt",
"cess": "csamt",
}
B2BA = frappe._dict(B2B).update(
{"original_doc_number": "oinum", "original_doc_date": "oidt"}
)
MODIFY_B2BA = frappe._dict(MODIFY_B2B).update({"oidt": DATE_FORMAT})
CDN = frappe._dict(B2B).update(
{"doc_number": "nt_num", "doc_date": "nt_dt", "doc_type": "ntty"}
)
MODIFY_CDN = frappe._dict(MODIFY_B2B).update(
{"doc_type": None, "ntty": NOTE_TYPE, "nt_dt": DATE_FORMAT}
)
CDNA = frappe._dict(CDN).update(
{
"original_doc_number": "ont_num",
"original_doc_date": "ont_dt",
"original_doc_type": "ntty",
}
)
MODIFY_CDNA = frappe._dict(MODIFY_CDN).update({"ont_dt": DATE_FORMAT})
ISD = frappe._dict(
{
"doc_type": "isd_docty",
"doc_number": "docnum",
"doc_date": "docdt",
"itc_availability": "itc_elg",
"other_return_period": "aspd",
"amendment_type": "atyp",
}
)
MODIFY_ISD = frappe._dict(
{
"itc_elg": {"Y": "Yes", "N": "No"},
"isd_docty": ISD_TYPE,
"docdt": DATE_FORMAT,
"aspd": update_period,
"atyp": AMEND_TYPE,
}
)
ITEM_ISD = frappe._dict(ITEM).update({"cess": "cess"})
IMPG = frappe._dict(
{
"doc_type": "doc_type", # custom field
"doc_number": "benum",
"doc_date": "bedt",
"is_amended": "amd",
"port_code": "portcd",
}
)
MODIFY_IMPG = frappe._dict(
{
"doc_type": {None: "Bill of Entry"},
"bedt": DATE_FORMAT,
"amd": YES_NO,
}
)
IMPGSEZ = frappe._dict(IMPG).update(
{
"supplier_gstin": "sgstin",
"supplier_name": "tdname",
}
)
ACTIONS = ["B2B", "B2BA", "CDN", "CDNA", "ISD", "IMPG", "IMPGSEZ"]
CLASSIFICATION = ["B2B", "B2BA", "CDNR", "CDNRA", "ISD", "IMPG", "IMPGSEZ"]
# TODO: ISD and IMPG and IMPGSEZ should be a part of configuration. Only if user deals in such transactions should it be imported.
# eg: If company or user deals in SEZ imports, only them should IMPGSEZ whould be called.
CLASS_MAP = {
"B2B": ["inv", "itms", "B2B", B2B, MODIFY_B2B, SUP_DETAIL, ITEM],
"B2BA": ["inv", "itms", "B2BA", B2BA, MODIFY_B2BA, SUP_DETAIL, ITEM],
"CDN": ["nt", "itms", "CDNR", CDN, MODIFY_CDN, SUP_DETAIL, ITEM],
"CDNA": ["nt", "itms", "CDNRA", CDNA, MODIFY_CDNA, SUP_DETAIL, ITEM],
"ISD": ["doclist", "", "ISD", ISD, MODIFY_ISD, SUP_DETAIL, ITEM_ISD],
"IMPG": ["boe", "", "IMPG", IMPG, MODIFY_IMPG, {}, ITEM],
"IMPGSEZ": ["boe", "", "IMPGSEZ", IMPGSEZ, MODIFY_IMPG, {}, ITEM],
}
| [
"datetime.datetime.strptime",
"frappe._dict"
] | [((934, 1298), 'frappe._dict', 'frappe._dict', (["{'doc_number': 'inum', 'supply_type': 'inv_typ', 'doc_date': 'idt',\n 'document_value': 'val', 'place_of_supply': 'pos',\n 'other_return_period': 'aspd', 'amendment_type': 'atyp',\n 'reverse_charge': 'rchrg', 'diffprcnt': 'diff_percent', 'irn_source':\n 'srctyp', 'irn_number': 'irn', 'irn_gen_date': 'irngendate', 'doc_type':\n 'doc_type'}"], {}), "({'doc_number': 'inum', 'supply_type': 'inv_typ', 'doc_date':\n 'idt', 'document_value': 'val', 'place_of_supply': 'pos',\n 'other_return_period': 'aspd', 'amendment_type': 'atyp',\n 'reverse_charge': 'rchrg', 'diffprcnt': 'diff_percent', 'irn_source':\n 'srctyp', 'irn_number': 'irn', 'irn_gen_date': 'irngendate', 'doc_type':\n 'doc_type'})\n", (946, 1298), False, 'import frappe\n'), ((1426, 1693), 'frappe._dict', 'frappe._dict', (["{'inv_typ': GST_CATEGORY, 'pos': number_state_mapping, 'rchrg': YES_NO,\n 'aspd': update_period, 'atyp': AMEND_TYPE, 'diff_percent': {(1): 1, (\n 0.65): 0.65, None: 1}, 'idt': DATE_FORMAT, 'irngendate': DATE_FORMAT,\n 'doc_type': {None: 'Invoice'}}"], {}), "({'inv_typ': GST_CATEGORY, 'pos': number_state_mapping, 'rchrg':\n YES_NO, 'aspd': update_period, 'atyp': AMEND_TYPE, 'diff_percent': {(1):\n 1, (0.65): 0.65, None: 1}, 'idt': DATE_FORMAT, 'irngendate':\n DATE_FORMAT, 'doc_type': {None: 'Invoice'}})\n", (1438, 1693), False, 'import frappe\n'), ((2563, 2743), 'frappe._dict', 'frappe._dict', (["{'doc_type': 'isd_docty', 'doc_number': 'docnum', 'doc_date': 'docdt',\n 'itc_availability': 'itc_elg', 'other_return_period': 'aspd',\n 'amendment_type': 'atyp'}"], {}), "({'doc_type': 'isd_docty', 'doc_number': 'docnum', 'doc_date':\n 'docdt', 'itc_availability': 'itc_elg', 'other_return_period': 'aspd',\n 'amendment_type': 'atyp'})\n", (2575, 2743), False, 'import frappe\n'), ((2811, 2953), 'frappe._dict', 'frappe._dict', (["{'itc_elg': {'Y': 'Yes', 'N': 'No'}, 'isd_docty': ISD_TYPE, 'docdt':\n DATE_FORMAT, 'aspd': update_period, 'atyp': AMEND_TYPE}"], {}), "({'itc_elg': {'Y': 'Yes', 'N': 'No'}, 'isd_docty': ISD_TYPE,\n 'docdt': DATE_FORMAT, 'aspd': update_period, 'atyp': AMEND_TYPE})\n", (2823, 2953), False, 'import frappe\n'), ((3067, 3196), 'frappe._dict', 'frappe._dict', (["{'doc_type': 'doc_type', 'doc_number': 'benum', 'doc_date': 'bedt',\n 'is_amended': 'amd', 'port_code': 'portcd'}"], {}), "({'doc_type': 'doc_type', 'doc_number': 'benum', 'doc_date':\n 'bedt', 'is_amended': 'amd', 'port_code': 'portcd'})\n", (3079, 3196), False, 'import frappe\n'), ((3276, 3367), 'frappe._dict', 'frappe._dict', (["{'doc_type': {None: 'Bill of Entry'}, 'bedt': DATE_FORMAT, 'amd': YES_NO}"], {}), "({'doc_type': {None: 'Bill of Entry'}, 'bedt': DATE_FORMAT,\n 'amd': YES_NO})\n", (3288, 3367), False, 'import frappe\n'), ((1938, 1955), 'frappe._dict', 'frappe._dict', (['B2B'], {}), '(B2B)\n', (1950, 1955), False, 'import frappe\n'), ((2046, 2070), 'frappe._dict', 'frappe._dict', (['MODIFY_B2B'], {}), '(MODIFY_B2B)\n', (2058, 2070), False, 'import frappe\n'), ((2108, 2125), 'frappe._dict', 'frappe._dict', (['B2B'], {}), '(B2B)\n', (2120, 2125), False, 'import frappe\n'), ((2220, 2244), 'frappe._dict', 'frappe._dict', (['MODIFY_B2B'], {}), '(MODIFY_B2B)\n', (2232, 2244), False, 'import frappe\n'), ((2327, 2344), 'frappe._dict', 'frappe._dict', (['CDN'], {}), '(CDN)\n', (2339, 2344), False, 'import frappe\n'), ((2499, 2523), 'frappe._dict', 'frappe._dict', (['MODIFY_CDN'], {}), '(MODIFY_CDN)\n', (2511, 2523), False, 'import frappe\n'), ((3015, 3033), 'frappe._dict', 'frappe._dict', (['ITEM'], {}), '(ITEM)\n', (3027, 3033), False, 'import frappe\n'), ((3412, 3430), 'frappe._dict', 'frappe._dict', (['IMPG'], {}), '(IMPG)\n', (3424, 3430), False, 'import frappe\n'), ((249, 281), 'datetime.datetime.strptime', 'datetime.strptime', (['date', '"""%b-%y"""'], {}), "(date, '%b-%y')\n", (266, 281), False, 'from datetime import datetime\n')] |
import tempfile
import tensorflow as tf
import tensorflow_model_optimization as tfmot
from tensorflow.keras.models import load_model
import os
from tensorflow import keras
import time
def evaluate_model(interpreter):
input_index = interpreter.get_input_details()[0]["index"]
output_index = interpreter.get_output_details()[0]["index"]
# Run predictions on every image in the "test" dataset.
prediction_digits = []
for i, test_image in enumerate(test_images):
# Pre-processing: add batch dimension and convert to float32 to match with
# the model's input data format.
test_image = np.expand_dims(test_image, axis=0).astype(np.float16)
interpreter.set_tensor(input_index, test_image)
# Run inference.
interpreter.invoke()
# Post-processing: remove batch dimension and find the digit with highest
# probability.
output = interpreter.tensor(output_index)
digit = np.argmax(output()[0])
prediction_digits.append(digit)
# Compare prediction results with ground truth labels to calculate accuracy.
prediction_digits = np.array(prediction_digits)
accuracy = (prediction_digits == test_labels).mean()
return accuracy
# Load MNIST dataset
mnist = keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# Normalize the input image so that each pixel value is between 0 to 1.
train_images = train_images / 255.0
test_images = test_images / 255.0
# Define the model architecture.
model = keras.Sequential([
keras.layers.InputLayer(input_shape=(28, 28)),
keras.layers.Reshape(target_shape=(28, 28, 1)),
tfmot.quantization.keras.quantize_annotate_layer(keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation=tf.nn.relu)),
keras.layers.BatchNormalization(),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.BatchNormalization(),
keras.layers.Activation('relu'),
keras.layers.Flatten(),
keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.summary()
# Compile the digit classification model
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
quantize_model = tfmot.quantization.keras.quantize_apply(model)
# `quantize_model` requires a recompile.
quantize_model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# train normal model
model.fit(
train_images,
train_labels,
batch_size=64,
epochs=10,
validation_split=0.2,
)
# `quantize_model` requires a recompile.
quantize_model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# train quantize normal model
quantize_model.fit(
train_images,
train_labels,
batch_size=64,
epochs=10,
validation_split=0.2,
)
#convert quantize model to tflite
converter = tf.lite.TFLiteConverter.from_keras_model(quantize_model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
quantized_tflite_model = converter.convert()
# convert normal model to tflite
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
# Measure sizes of models.
tf_file = 'model.tflite'
quant_file = 'quant.tflite'
# Write Files
with open(quant_file, 'wb') as f:
f.write(quantized_tflite_model)
with open(tf_file, 'wb') as f:
f.write(tflite_model)
#compare file size
print("Float model in Mb:", os.path.getsize(tf_file) / float(2**20))
print("Quantized model in Mb:", os.path.getsize(quant_file) / float(2**20))
# evaluate models
time_quant = time.time()
quant_test_accuracy = evaluate_model(quantized_tflite_model)
print("Time take by quantized tf lite model is ",time.time()-time_quant)
time_tfmodel = time.time()
test_accuracy = evaluate_model(tflite_model)
print("Time take by tf lite model is ",time.time()-time_tfmodel)
print("\n")
print("Accuracy of quantized tf lite model is",quant_test_accuracy)
print("Accuracy of tf lite model is",test_accuracy)
| [
"os.path.getsize",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow_model_optimization.quantization.keras.quantize_apply",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.Dense",
"tensorflow.lite.TFLiteConve... | [((2171, 2217), 'tensorflow_model_optimization.quantization.keras.quantize_apply', 'tfmot.quantization.keras.quantize_apply', (['model'], {}), '(model)\n', (2210, 2217), True, 'import tensorflow_model_optimization as tfmot\n'), ((2875, 2931), 'tensorflow.lite.TFLiteConverter.from_keras_model', 'tf.lite.TFLiteConverter.from_keras_model', (['quantize_model'], {}), '(quantize_model)\n', (2915, 2931), True, 'import tensorflow as tf\n'), ((3076, 3123), 'tensorflow.lite.TFLiteConverter.from_keras_model', 'tf.lite.TFLiteConverter.from_keras_model', (['model'], {}), '(model)\n', (3116, 3123), True, 'import tensorflow as tf\n'), ((3576, 3587), 'time.time', 'time.time', ([], {}), '()\n', (3585, 3587), False, 'import time\n'), ((3740, 3751), 'time.time', 'time.time', ([], {}), '()\n', (3749, 3751), False, 'import time\n'), ((1509, 1554), 'tensorflow.keras.layers.InputLayer', 'keras.layers.InputLayer', ([], {'input_shape': '(28, 28)'}), '(input_shape=(28, 28))\n', (1532, 1554), False, 'from tensorflow import keras\n'), ((1558, 1604), 'tensorflow.keras.layers.Reshape', 'keras.layers.Reshape', ([], {'target_shape': '(28, 28, 1)'}), '(target_shape=(28, 28, 1))\n', (1578, 1604), False, 'from tensorflow import keras\n'), ((1736, 1769), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (1767, 1769), False, 'from tensorflow import keras\n'), ((1773, 1816), 'tensorflow.keras.layers.MaxPooling2D', 'keras.layers.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (1798, 1816), False, 'from tensorflow import keras\n'), ((1820, 1853), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (1851, 1853), False, 'from tensorflow import keras\n'), ((1857, 1888), 'tensorflow.keras.layers.Activation', 'keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (1880, 1888), False, 'from tensorflow import keras\n'), ((1892, 1914), 'tensorflow.keras.layers.Flatten', 'keras.layers.Flatten', ([], {}), '()\n', (1912, 1914), False, 'from tensorflow import keras\n'), ((1918, 1966), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(10)'], {'activation': 'tf.nn.softmax'}), '(10, activation=tf.nn.softmax)\n', (1936, 1966), False, 'from tensorflow import keras\n'), ((3427, 3451), 'os.path.getsize', 'os.path.getsize', (['tf_file'], {}), '(tf_file)\n', (3442, 3451), False, 'import os\n'), ((3500, 3527), 'os.path.getsize', 'os.path.getsize', (['quant_file'], {}), '(quant_file)\n', (3515, 3527), False, 'import os\n'), ((3699, 3710), 'time.time', 'time.time', ([], {}), '()\n', (3708, 3710), False, 'import time\n'), ((3837, 3848), 'time.time', 'time.time', ([], {}), '()\n', (3846, 3848), False, 'import time\n'), ((1657, 1731), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(12)', 'kernel_size': '(3, 3)', 'activation': 'tf.nn.relu'}), '(filters=12, kernel_size=(3, 3), activation=tf.nn.relu)\n', (1676, 1731), False, 'from tensorflow import keras\n')] |
from __future__ import annotations
import copy
from typing import Optional
from mapfmclient import MarkedLocation
from python.coord import Coord, UncalculatedCoord
class Agent:
def __init__(self, location: Coord, color: int, accumulated_cost: Optional[int]):
self.location = location
self.accumulated_cost = accumulated_cost if accumulated_cost else 0
self.color = color
@property
def x(self):
return self.location.x
@property
def y(self):
return self.location.y
def __eq__(self, other: Agent):
if other is self:
return True
return self.location == other and self.accumulated_cost == other.accumulated_cost
def __hash__(self):
return hash(self.location) ^ hash(self.accumulated_cost)
def __repr__(self):
if self == UncalculatedAgent:
return "UncalculatedAgent "
else:
return f"Agent({self.location}, {self.accumulated_cost})"
@classmethod
def from_marked_location(cls, location: MarkedLocation, accumulated_cost: int) -> Agent:
return cls(Coord(location.x, location.y), location.color, accumulated_cost)
def with_new_position(self, new_pos: Coord) -> Agent:
return Agent(new_pos, self.color, self.accumulated_cost)
# An agent that has not yet been calculated. Stand in
# for agents in OD when states are only partially calculated
UncalculatedAgent = Agent(UncalculatedCoord, 0, 0)
| [
"python.coord.Coord"
] | [((1117, 1146), 'python.coord.Coord', 'Coord', (['location.x', 'location.y'], {}), '(location.x, location.y)\n', (1122, 1146), False, 'from python.coord import Coord, UncalculatedCoord\n')] |
import sqlalchemy as sq
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
engine = create_engine('sqlite:///tmp/expense.db',
connect_args={"check_same_thread": False},
pool_pre_ping=True)
DBSession = sessionmaker(bind=engine)
db = DBSession()
class Expense(Base):
__tablename__ = 'expenses'
id = sq.Column(sq.Integer, primary_key=True, autoincrement=True)
pdf = sq.Column(sq.String, nullable=False, unique=True)
txt = sq.Column(sq.Text)
date = sq.Column(sq.String(10))
amount = sq.Column(sq.Float)
merchant=sq.Column(sq.String(20))
category = sq.Column(sq.String(20), nullable=False)
def save(self):
db.add(self)
db.commit()
return self
def delete(self):
db.delete(self)
return db.commit()
@classmethod
def get_all(cls, *order_by_args):
if order_by_args:
return db.query(cls,
).order_by(*order_by_args
).all()
else:
return db.query(cls
).all()
@classmethod
def get_all_receipts(cls):
objs = db.query(cls.pdf
).order_by(cls.category, cls.date
).all()
receipts = [receipt for receipt, *_ in objs]
return receipts
@staticmethod
def rollback():
db.rollback()
@staticmethod
def save_all(objs):
db.add_all(objs)
db.commit()
Base.metadata.create_all(engine) | [
"sqlalchemy.orm.sessionmaker",
"sqlalchemy.create_engine",
"sqlalchemy.String",
"sqlalchemy.ext.declarative.declarative_base",
"sqlalchemy.Column"
] | [((166, 184), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (182, 184), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((194, 302), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite:///tmp/expense.db"""'], {'connect_args': "{'check_same_thread': False}", 'pool_pre_ping': '(True)'}), "('sqlite:///tmp/expense.db', connect_args={'check_same_thread':\n False}, pool_pre_ping=True)\n", (207, 302), False, 'from sqlalchemy import create_engine\n'), ((357, 382), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', ([], {'bind': 'engine'}), '(bind=engine)\n', (369, 382), False, 'from sqlalchemy.orm import sessionmaker\n'), ((463, 522), 'sqlalchemy.Column', 'sq.Column', (['sq.Integer'], {'primary_key': '(True)', 'autoincrement': '(True)'}), '(sq.Integer, primary_key=True, autoincrement=True)\n', (472, 522), True, 'import sqlalchemy as sq\n'), ((533, 582), 'sqlalchemy.Column', 'sq.Column', (['sq.String'], {'nullable': '(False)', 'unique': '(True)'}), '(sq.String, nullable=False, unique=True)\n', (542, 582), True, 'import sqlalchemy as sq\n'), ((593, 611), 'sqlalchemy.Column', 'sq.Column', (['sq.Text'], {}), '(sq.Text)\n', (602, 611), True, 'import sqlalchemy as sq\n'), ((661, 680), 'sqlalchemy.Column', 'sq.Column', (['sq.Float'], {}), '(sq.Float)\n', (670, 680), True, 'import sqlalchemy as sq\n'), ((633, 646), 'sqlalchemy.String', 'sq.String', (['(10)'], {}), '(10)\n', (642, 646), True, 'import sqlalchemy as sq\n'), ((704, 717), 'sqlalchemy.String', 'sq.String', (['(20)'], {}), '(20)\n', (713, 717), True, 'import sqlalchemy as sq\n'), ((744, 757), 'sqlalchemy.String', 'sq.String', (['(20)'], {}), '(20)\n', (753, 757), True, 'import sqlalchemy as sq\n')] |
"""web URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
from django.views.generic import RedirectView
from django.contrib.staticfiles.urls import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from . import settings
urlpatterns = [
path('dist/', include('dist.urls')),
path('synth/', include('synth.urls')),
path('data/', include('data.urls')),
path('home/', include('home.urls')),
path('admin/', admin.site.urls),
path('', RedirectView.as_view(url='/home/', permanent=True)),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
"django.views.generic.RedirectView.as_view",
"django.urls.path",
"django.contrib.staticfiles.urls.static",
"django.urls.include"
] | [((1191, 1252), 'django.contrib.staticfiles.urls.static', 'static', (['settings.MEDIA_URL'], {'document_root': 'settings.MEDIA_ROOT'}), '(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n', (1197, 1252), False, 'from django.contrib.staticfiles.urls import static\n'), ((1073, 1104), 'django.urls.path', 'path', (['"""admin/"""', 'admin.site.urls'], {}), "('admin/', admin.site.urls)\n", (1077, 1104), False, 'from django.urls import include, path\n'), ((921, 941), 'django.urls.include', 'include', (['"""dist.urls"""'], {}), "('dist.urls')\n", (928, 941), False, 'from django.urls import include, path\n'), ((963, 984), 'django.urls.include', 'include', (['"""synth.urls"""'], {}), "('synth.urls')\n", (970, 984), False, 'from django.urls import include, path\n'), ((1005, 1025), 'django.urls.include', 'include', (['"""data.urls"""'], {}), "('data.urls')\n", (1012, 1025), False, 'from django.urls import include, path\n'), ((1046, 1066), 'django.urls.include', 'include', (['"""home.urls"""'], {}), "('home.urls')\n", (1053, 1066), False, 'from django.urls import include, path\n'), ((1119, 1169), 'django.views.generic.RedirectView.as_view', 'RedirectView.as_view', ([], {'url': '"""/home/"""', 'permanent': '(True)'}), "(url='/home/', permanent=True)\n", (1139, 1169), False, 'from django.views.generic import RedirectView\n')] |
from django import forms
from dal import forward
from dal.autocomplete import ModelSelect2, ModelSelect2Multiple
from django.forms import formset_factory, inlineformset_factory, modelformset_factory
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Fieldset, ButtonHolder, Submit
from .models import (
Author,
Authorship,
Conference,
Institution,
Topic,
Keyword,
Work,
WorkType,
Country,
Language,
License,
Affiliation,
SeriesMembership,
ConferenceSeries,
Organizer,
)
class WorkFilter(forms.ModelForm):
ordering = forms.ChoiceField(
choices=(
("year", "Conference year (ascending)"),
("-year", "Conference year (descending)"),
("rank", "Text search relevance"),
("title", "Title (A-Z)"),
("-title", "Title (Z-A)"),
("last_name", "Last name of first author (A-Z)"),
("-last_name", "Last name of first author (Z-A)"),
),
required=False,
initial="year",
)
text = forms.CharField(
max_length=500,
strip=True,
required=False,
help_text="Search abstracts by title and by full text content when available. Search will look for all terms by default. Use OR to look for any terms in the query, and quotation marks to search for exact phrases.",
)
full_text_available = forms.BooleanField(
required=False, label="Full text has been indexed"
)
full_text_viewable = forms.BooleanField(
required=False, label="Full text is publicly viewable"
)
work_type = forms.ModelChoiceField(
queryset=WorkType.objects.distinct(),
required=False,
help_text='Abstracts may belong to one type that has been defined by editors based on a survey of all the abstracts in this collection, e.g. "poster", "workshop", "long paper".',
)
author = forms.ModelMultipleChoiceField(
queryset=Author.objects.all(),
required=False,
widget=ModelSelect2Multiple(
url="author-autocomplete", attrs={"data-html": True}
),
help_text="Abstract authorship must include this person",
)
conference = forms.ModelChoiceField(
queryset=Conference.objects.all(),
required=False,
widget=ModelSelect2(url="conference-autocomplete"),
help_text="The conference where this abstract was submitted/published.",
)
institution = forms.ModelMultipleChoiceField(
queryset=Institution.objects.all(),
widget=ModelSelect2Multiple(
url="institution-autocomplete", attrs={"data-html": True}
),
required=False,
help_text="Works having at least one author belonging to ANY of the selected institutions.",
)
affiliation = forms.ModelMultipleChoiceField(
queryset=Affiliation.objects.all(),
widget=ModelSelect2Multiple(
url="affiliation-autocomplete", attrs={"data-html": True}
),
required=False,
help_text="Works having at least one author belonging to a specific department or other center within a larger institution.",
)
class Meta:
model = Work
fields = [
"ordering",
"text",
"conference",
"full_text_available",
"full_text_viewable",
"work_type",
"author",
"institution",
"affiliation",
"keywords",
"languages",
"topics",
]
field_classes = {
"keywords": forms.ModelMultipleChoiceField,
"topics": forms.ModelMultipleChoiceField,
"languages": forms.ModelMultipleChoiceField,
}
widgets = {
"keywords": ModelSelect2Multiple(
url="keyword-autocomplete", attrs={"data-html": True}
),
"topics": ModelSelect2Multiple(
url="topic-autocomplete", attrs={"data-html": True}
),
"languages": ModelSelect2Multiple(
url="language-autocomplete", attrs={"data-html": True}
),
}
class WorkAuthorshipForm(forms.Form):
author = forms.ModelChoiceField(
queryset=Author.objects.all(),
required=False,
widget=ModelSelect2(url="author-autocomplete", attrs={"data-html": True}),
help_text="If the author currently exists, select them to auto-populate the fields below. Any edits to the details below will be stored as new assertions about this author. If the author does not yet exist, leave this field blank and they will be created from the information you enter below.",
)
authorship_order = forms.IntegerField(
min_value=1,
help_text="Authorship order must be unique across all the authorships of this work.",
)
first_name = forms.CharField(
max_length=100,
required=True,
label="First and middle names",
help_text="First and middle names/initials as it appears in the context of this abstract.",
)
last_name = forms.CharField(
max_length=100,
required=True,
help_text="Last name as it appears in the context of this abstract.",
)
affiliations = forms.ModelMultipleChoiceField(
queryset=Affiliation.objects.all(),
required=False,
widget=ModelSelect2Multiple(
url="affiliation-autocomplete",
attrs={"data-html": True},
forward=["institution"],
),
help_text="If the combination of department and institution is not available in this list, then use the fields below to define it.",
)
institution = forms.ModelChoiceField(
queryset=Institution.objects.all(),
required=False,
widget=ModelSelect2(url="institution-autocomplete", attrs={"data-html": True}),
help_text="Use this optional menu to filter the affiliation list below. This value is only used for filtering and does not affect the final affiliation data that gets saved.",
)
def clean(self):
cleaned_data = super().clean()
affiliations = cleaned_data.get("affiliations")
institution = cleaned_data.get("institution")
if institution is not None and len(affiliations) < 1:
self.add_error(
"affiliations",
"You must enter a specific affiliation for each author. It is not sufficient to only select an institution - that field is used only to filter the available affiliations.",
)
class WorkForm(forms.ModelForm):
class Meta:
model = Work
fields = [
"conference",
"title",
"url",
"work_type",
"full_text",
"full_text_type",
"full_text_license",
"keywords",
"languages",
"topics",
"parent_session",
]
widgets = {
"keywords": ModelSelect2Multiple(url="keyword-autocomplete"),
"topics": ModelSelect2Multiple(url="topic-autocomplete"),
"languages": ModelSelect2Multiple(url="language-autocomplete"),
"conference": ModelSelect2(url="conference-autocomplete"),
"parent_session": ModelSelect2(
url="work-autocomplete",
forward=["conference", forward.Const(True, "parents_only")],
),
}
def clean(self):
cleaned_data = super().clean()
full_text = cleaned_data.get("full_text")
full_text_type = cleaned_data.get("full_text_type")
licence_type = cleaned_data.get("full_text_license")
if full_text != "" and full_text_type == "":
self.add_error(
"full_text_type",
"When full text is present, you must select a text type.",
)
if full_text == "" and full_text_type != "":
self.add_error(
"full_text",
"When there is no full text, you may not select a text type.",
)
if full_text == "" and licence_type is not None:
self.add_error(
"full_text",
"When there is no full text, you may not select a license type.",
)
work_type = cleaned_data.get("work_type")
parent_session = cleaned_data.get("parent_session")
if work_type.is_parent and parent_session is not None:
self.add_error(
"parent_session",
f"Works of type '{work_type}' cannot have parent sessions.",
)
class AuthorFilter(forms.Form):
ordering = forms.ChoiceField(
choices=(
("last_name", "Last name (A-Z)"),
("-last_name", "Last name (Z-A)"),
("-n_works", "By number of abstracts (descending)"),
("n_works", "By number of abstracts (ascending)"),
),
required=False,
initial="last_name",
)
author = forms.ModelChoiceField(
queryset=Author.objects.all(),
required=False,
widget=ModelSelect2(url="author-autocomplete", attrs={"data-html": True}),
)
name = forms.CharField(max_length=100, strip=True, required=False)
first_name = forms.CharField(
max_length=100,
strip=True,
required=False,
label="First/middle name",
help_text="Search only first and middle names",
)
last_name = forms.CharField(
max_length=100, strip=True, required=False, help_text="Search only last names"
)
country = forms.ModelChoiceField(
queryset=Country.objects.all(),
required=False,
help_text="Authors who were once affiliated with an institution in this country",
widget=ModelSelect2(url="country-autocomplete"),
)
institution = forms.ModelChoiceField(
queryset=Institution.objects.all(),
required=False,
widget=ModelSelect2(url="institution-autocomplete", attrs={"data-html": True}),
help_text="Authors who were once affiliated with this institution",
)
affiliation = forms.ModelChoiceField(
queryset=Affiliation.objects.all(),
required=False,
widget=ModelSelect2(url="affiliation-autocomplete", attrs={"data-html": True}),
help_text='Search by department+institution combination. This is a more granular search than "Institution" above.',
)
conference = forms.ModelChoiceField(
queryset=Conference.objects.all(),
required=False,
widget=ModelSelect2(url="conference-autocomplete", attrs={"data-html": True}),
help_text="Show authors with works submitted to this conference.",
)
singleton = forms.BooleanField(
required=False, help_text="Show authors who only appear in one conference."
)
class AuthorMergeForm(forms.Form):
into = forms.ModelChoiceField(
queryset=Author.objects.all(),
widget=ModelSelect2(url="author-autocomplete", attrs={"data-html": True}),
required=True,
help_text="Select the author that will be used to replace the one you are merging.",
)
class InstitutionMergeForm(forms.Form):
into = forms.ModelChoiceField(
queryset=Institution.objects.all(),
widget=ModelSelect2(url="institution-autocomplete", attrs={"data-html": True}),
required=True,
help_text="Select the institution that will be used to replace the one you are deleting.",
)
class AffiliationMergeForm(forms.Form):
into = forms.ModelChoiceField(
queryset=Affiliation.objects.all(),
widget=ModelSelect2(url="affiliation-autocomplete", attrs={"data-html": True}),
required=True,
help_text="Select the affiliation that will be used to replace the one you are deleting.",
)
class AffiliationMultiMergeForm(forms.Form):
sources = forms.ModelMultipleChoiceField(
queryset=Affiliation.objects.all(),
widget=ModelSelect2Multiple(
url="affiliation-autocomplete", attrs={"data-html": True}
),
required=True,
help_text="Select the affiliations that you want to merge together",
)
into = forms.ModelChoiceField(
queryset=Affiliation.objects.all(),
widget=ModelSelect2(url="affiliation-autocomplete", attrs={"data-html": True}),
required=True,
help_text="Select the target affiliation to merge into",
)
class InstitutionMultiMergeForm(forms.Form):
sources = forms.ModelMultipleChoiceField(
queryset=Institution.objects.all(),
widget=ModelSelect2Multiple(
url="institution-autocomplete", attrs={"data-html": True}
),
required=True,
help_text="Select the institutions that you want to merge together",
)
into = forms.ModelChoiceField(
queryset=Institution.objects.all(),
widget=ModelSelect2(url="institution-autocomplete", attrs={"data-html": True}),
required=True,
help_text="Select the target institution to merge into",
)
class AffiliationEditForm(forms.ModelForm):
institution = forms.ModelChoiceField(
queryset=Institution.objects.all(),
widget=ModelSelect2(url="institution-autocomplete", attrs={"data-html": True}),
required=True,
)
class Meta:
model = Affiliation
fields = ["department", "institution"]
class FullInstitutionForm(forms.Form):
department = forms.CharField(
max_length=500, required=False, help_text="This free-text field is searchable"
)
no_department = forms.BooleanField(
required=False,
help_text="Show institutions with at least one affiliation that does not specifiy a department?",
)
singleton = forms.BooleanField(
required=False,
help_text="Show institutions that appear only in one conference (Useful for identifying institutions that ought to be merged.)",
)
affiliation = forms.ModelChoiceField(
queryset=Affiliation.objects.all(),
required=False,
widget=ModelSelect2(url="affiliation-autocomplete", attrs={"data-html": True}),
)
conference = forms.ModelChoiceField(
queryset=Conference.objects.all(),
required=False,
widget=ModelSelect2(url="conference-autocomplete", attrs={"data-html": True}),
)
institution = forms.ModelChoiceField(
queryset=Institution.objects.all(),
required=False,
widget=ModelSelect2(url="institution-autocomplete", attrs={"data-html": True}),
)
country = forms.ModelChoiceField(
queryset=Country.objects.filter().all(),
required=False,
widget=ModelSelect2(url="country-autocomplete"),
)
ordering = forms.ChoiceField(
choices=(
("a", "A-Z"),
("n_dsc", "By number of abstracts (descending)"),
("n_asc", "By number of abstracts (ascending)"),
),
required=False,
initial="n_dsc",
)
class KeywordMergeForm(forms.Form):
into = forms.ModelChoiceField(
queryset=Keyword.objects.all(),
widget=ModelSelect2(url="keyword-autocomplete"),
required=True,
help_text="Select the keyword that will be used to replace the one you are deleting.",
)
class KeywordMultiMergeForm(forms.Form):
sources = forms.ModelMultipleChoiceField(
queryset=Keyword.objects.all(),
widget=ModelSelect2Multiple(url="keyword-autocomplete"),
required=True,
help_text="Select the keywords that you want to merge together",
)
into = forms.ModelChoiceField(
queryset=Keyword.objects.all(),
widget=ModelSelect2(url="keyword-autocomplete"),
required=True,
help_text="Select the target keyword to merge into",
)
class TopicMultiMergeForm(forms.Form):
sources = forms.ModelMultipleChoiceField(
queryset=Topic.objects.all(),
widget=ModelSelect2Multiple(url="topic-autocomplete"),
required=True,
help_text="Select the topics that you want to merge together",
)
into = forms.ModelChoiceField(
queryset=Topic.objects.all(),
widget=ModelSelect2(url="topic-autocomplete"),
required=True,
help_text="Select the target topic to merge into",
)
class TopicMergeForm(forms.Form):
into = forms.ModelChoiceField(
queryset=Topic.objects.all(),
widget=ModelSelect2(url="topic-autocomplete"),
required=True,
help_text="Select the topic that will be used to replace the one you are deleting.",
)
class LanguageMergeForm(forms.Form):
into = forms.ModelChoiceField(
queryset=Language.objects.all(),
widget=ModelSelect2(url="language-autocomplete"),
required=True,
help_text="Select the language that will be used to replace the one you are deleting.",
)
class WorkTypeMergeForm(forms.Form):
into = forms.ModelChoiceField(
queryset=WorkType.objects.all(),
required=True,
help_text="Select the type that will be used to replace the one you are deleting.",
)
class TagForm(forms.Form):
name = forms.CharField(
max_length=100, required=False, help_text="Search by tag name"
)
ordering = forms.ChoiceField(
choices=(
("a", "A-Z"),
("n_asc", "Number of Works (ascending)"),
("n_dsc", "Number of works (descending)"),
),
required=True,
initial="a",
)
class ConferenceCheckoutForm(forms.Form):
USER_CHOICES = (
("self", "Assign self (replacing any currently-associated user)"),
("clear", "Clear self/others from conference"),
)
entry_status = forms.ChoiceField(
choices=Conference.ENTRY_STATUS,
widget=forms.RadioSelect(choices=Conference.ENTRY_STATUS),
)
assign_user = forms.ChoiceField(
choices=USER_CHOICES,
initial="self",
widget=forms.RadioSelect(choices=USER_CHOICES),
)
def get_license_choices():
return [("", "Do nothing"), ("clear", "Clear all licenses")] + [
(l.id, l.title) for l in License.objects.all()
]
class ConferenceForm(forms.ModelForm):
organizers = forms.ModelMultipleChoiceField(
queryset=Organizer.objects.all(),
required=False,
help_text="Organizers of the conference",
)
license_action = forms.ChoiceField(
choices=get_license_choices,
initial="",
required=False,
label="Bulk reassign licenses?",
)
class Meta:
model = Conference
fields = [
"year",
"short_title",
"theme_title",
"hosting_institutions",
"url",
"city",
"state_province_region",
"country",
"organizers",
"start_date",
"end_date",
"notes",
"references",
"contributors",
"attendance",
"entry_status",
"program_available",
"abstracts_available",
"license_action",
]
widgets = {
"entry_status": forms.RadioSelect(choices=Conference.ENTRY_STATUS),
"start_date": forms.DateInput(attrs={"type": "date"}),
"end_date": forms.DateInput(attrs={"type": "date"}),
"country": ModelSelect2(url="country-autocomplete"),
"hosting_institutions": ModelSelect2Multiple(
url="institution-autocomplete", attrs={"data-html": True}
),
"references": forms.Textarea(attrs={"rows": 2}),
"contributors": forms.Textarea(attrs={"rows": 2}),
"attendance": forms.Textarea(attrs={"rows": 2}),
}
def clean(self, *args, **kwargs):
cleaned_data = super().clean(*args, **kwargs)
short_title = cleaned_data.get("short_title")
theme_title = cleaned_data.get("theme_title")
hosting_institutions = cleaned_data.get("hosting_institutions")
if (
short_title == ""
and theme_title == ""
and city == ""
and len(hosting_institutions) == 0
):
self.add_error(
"Conference creation error",
"You must supply at least one of either: short title, theme title, city, or at least one hosting institution",
)
class ConferenceXMLUploadForm(forms.Form):
file = forms.FileField()
class ConferenceSeriesInline(forms.Form):
series = forms.ModelChoiceField(
queryset=ConferenceSeries.objects.all(), required=True
)
number = forms.IntegerField(
min_value=1,
required=False,
help_text="Number in the sequence of this conference series.",
)
| [
"django.forms.DateInput",
"django.forms.BooleanField",
"django.forms.CharField",
"django.forms.ChoiceField",
"django.forms.IntegerField",
"django.forms.Textarea",
"dal.autocomplete.ModelSelect2",
"dal.forward.Const",
"django.forms.RadioSelect",
"django.forms.FileField",
"dal.autocomplete.ModelSe... | [((615, 969), 'django.forms.ChoiceField', 'forms.ChoiceField', ([], {'choices': "(('year', 'Conference year (ascending)'), ('-year',\n 'Conference year (descending)'), ('rank', 'Text search relevance'), (\n 'title', 'Title (A-Z)'), ('-title', 'Title (Z-A)'), ('last_name',\n 'Last name of first author (A-Z)'), ('-last_name',\n 'Last name of first author (Z-A)'))", 'required': '(False)', 'initial': '"""year"""'}), "(choices=(('year', 'Conference year (ascending)'), (\n '-year', 'Conference year (descending)'), ('rank',\n 'Text search relevance'), ('title', 'Title (A-Z)'), ('-title',\n 'Title (Z-A)'), ('last_name', 'Last name of first author (A-Z)'), (\n '-last_name', 'Last name of first author (Z-A)')), required=False,\n initial='year')\n", (632, 969), False, 'from django import forms\n'), ((1085, 1369), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(500)', 'strip': '(True)', 'required': '(False)', 'help_text': '"""Search abstracts by title and by full text content when available. Search will look for all terms by default. Use OR to look for any terms in the query, and quotation marks to search for exact phrases."""'}), "(max_length=500, strip=True, required=False, help_text=\n 'Search abstracts by title and by full text content when available. Search will look for all terms by default. Use OR to look for any terms in the query, and quotation marks to search for exact phrases.'\n )\n", (1100, 1369), False, 'from django import forms\n'), ((1425, 1495), 'django.forms.BooleanField', 'forms.BooleanField', ([], {'required': '(False)', 'label': '"""Full text has been indexed"""'}), "(required=False, label='Full text has been indexed')\n", (1443, 1495), False, 'from django import forms\n'), ((1535, 1609), 'django.forms.BooleanField', 'forms.BooleanField', ([], {'required': '(False)', 'label': '"""Full text is publicly viewable"""'}), "(required=False, label='Full text is publicly viewable')\n", (1553, 1609), False, 'from django import forms\n'), ((4751, 4873), 'django.forms.IntegerField', 'forms.IntegerField', ([], {'min_value': '(1)', 'help_text': '"""Authorship order must be unique across all the authorships of this work."""'}), "(min_value=1, help_text=\n 'Authorship order must be unique across all the authorships of this work.')\n", (4769, 4873), False, 'from django import forms\n'), ((4909, 5094), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(100)', 'required': '(True)', 'label': '"""First and middle names"""', 'help_text': '"""First and middle names/initials as it appears in the context of this abstract."""'}), "(max_length=100, required=True, label=\n 'First and middle names', help_text=\n 'First and middle names/initials as it appears in the context of this abstract.'\n )\n", (4924, 5094), False, 'from django import forms\n'), ((5135, 5256), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(100)', 'required': '(True)', 'help_text': '"""Last name as it appears in the context of this abstract."""'}), "(max_length=100, required=True, help_text=\n 'Last name as it appears in the context of this abstract.')\n", (5150, 5256), False, 'from django import forms\n'), ((8705, 8954), 'django.forms.ChoiceField', 'forms.ChoiceField', ([], {'choices': "(('last_name', 'Last name (A-Z)'), ('-last_name', 'Last name (Z-A)'), (\n '-n_works', 'By number of abstracts (descending)'), ('n_works',\n 'By number of abstracts (ascending)'))", 'required': '(False)', 'initial': '"""last_name"""'}), "(choices=(('last_name', 'Last name (A-Z)'), ('-last_name',\n 'Last name (Z-A)'), ('-n_works', 'By number of abstracts (descending)'),\n ('n_works', 'By number of abstracts (ascending)')), required=False,\n initial='last_name')\n", (8722, 8954), False, 'from django import forms\n'), ((9233, 9292), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(100)', 'strip': '(True)', 'required': '(False)'}), '(max_length=100, strip=True, required=False)\n', (9248, 9292), False, 'from django import forms\n'), ((9310, 9449), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(100)', 'strip': '(True)', 'required': '(False)', 'label': '"""First/middle name"""', 'help_text': '"""Search only first and middle names"""'}), "(max_length=100, strip=True, required=False, label=\n 'First/middle name', help_text='Search only first and middle names')\n", (9325, 9449), False, 'from django import forms\n'), ((9508, 9608), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(100)', 'strip': '(True)', 'required': '(False)', 'help_text': '"""Search only last names"""'}), "(max_length=100, strip=True, required=False, help_text=\n 'Search only last names')\n", (9523, 9608), False, 'from django import forms\n'), ((10773, 10873), 'django.forms.BooleanField', 'forms.BooleanField', ([], {'required': '(False)', 'help_text': '"""Show authors who only appear in one conference."""'}), "(required=False, help_text=\n 'Show authors who only appear in one conference.')\n", (10791, 10873), False, 'from django import forms\n'), ((13516, 13616), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(500)', 'required': '(False)', 'help_text': '"""This free-text field is searchable"""'}), "(max_length=500, required=False, help_text=\n 'This free-text field is searchable')\n", (13531, 13616), False, 'from django import forms\n'), ((13646, 13788), 'django.forms.BooleanField', 'forms.BooleanField', ([], {'required': '(False)', 'help_text': '"""Show institutions with at least one affiliation that does not specifiy a department?"""'}), "(required=False, help_text=\n 'Show institutions with at least one affiliation that does not specifiy a department?'\n )\n", (13664, 13788), False, 'from django import forms\n'), ((13818, 13991), 'django.forms.BooleanField', 'forms.BooleanField', ([], {'required': '(False)', 'help_text': '"""Show institutions that appear only in one conference (Useful for identifying institutions that ought to be merged.)"""'}), "(required=False, help_text=\n 'Show institutions that appear only in one conference (Useful for identifying institutions that ought to be merged.)'\n )\n", (13836, 13991), False, 'from django import forms\n'), ((14803, 14984), 'django.forms.ChoiceField', 'forms.ChoiceField', ([], {'choices': "(('a', 'A-Z'), ('n_dsc', 'By number of abstracts (descending)'), ('n_asc',\n 'By number of abstracts (ascending)'))", 'required': '(False)', 'initial': '"""n_dsc"""'}), "(choices=(('a', 'A-Z'), ('n_dsc',\n 'By number of abstracts (descending)'), ('n_asc',\n 'By number of abstracts (ascending)')), required=False, initial='n_dsc')\n", (14820, 14984), False, 'from django import forms\n'), ((17231, 17310), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(100)', 'required': '(False)', 'help_text': '"""Search by tag name"""'}), "(max_length=100, required=False, help_text='Search by tag name')\n", (17246, 17310), False, 'from django import forms\n'), ((17340, 17502), 'django.forms.ChoiceField', 'forms.ChoiceField', ([], {'choices': "(('a', 'A-Z'), ('n_asc', 'Number of Works (ascending)'), ('n_dsc',\n 'Number of works (descending)'))", 'required': '(True)', 'initial': '"""a"""'}), "(choices=(('a', 'A-Z'), ('n_asc',\n 'Number of Works (ascending)'), ('n_dsc',\n 'Number of works (descending)')), required=True, initial='a')\n", (17357, 17502), False, 'from django import forms\n'), ((18474, 18585), 'django.forms.ChoiceField', 'forms.ChoiceField', ([], {'choices': 'get_license_choices', 'initial': '""""""', 'required': '(False)', 'label': '"""Bulk reassign licenses?"""'}), "(choices=get_license_choices, initial='', required=False,\n label='Bulk reassign licenses?')\n", (18491, 18585), False, 'from django import forms\n'), ((20550, 20567), 'django.forms.FileField', 'forms.FileField', ([], {}), '()\n', (20565, 20567), False, 'from django import forms\n'), ((20731, 20846), 'django.forms.IntegerField', 'forms.IntegerField', ([], {'min_value': '(1)', 'required': '(False)', 'help_text': '"""Number in the sequence of this conference series."""'}), "(min_value=1, required=False, help_text=\n 'Number in the sequence of this conference series.')\n", (20749, 20846), False, 'from django import forms\n'), ((2050, 2124), 'dal.autocomplete.ModelSelect2Multiple', 'ModelSelect2Multiple', ([], {'url': '"""author-autocomplete"""', 'attrs': "{'data-html': True}"}), "(url='author-autocomplete', attrs={'data-html': True})\n", (2070, 2124), False, 'from dal.autocomplete import ModelSelect2, ModelSelect2Multiple\n'), ((2343, 2386), 'dal.autocomplete.ModelSelect2', 'ModelSelect2', ([], {'url': '"""conference-autocomplete"""'}), "(url='conference-autocomplete')\n", (2355, 2386), False, 'from dal.autocomplete import ModelSelect2, ModelSelect2Multiple\n'), ((2584, 2663), 'dal.autocomplete.ModelSelect2Multiple', 'ModelSelect2Multiple', ([], {'url': '"""institution-autocomplete"""', 'attrs': "{'data-html': True}"}), "(url='institution-autocomplete', attrs={'data-html': True})\n", (2604, 2663), False, 'from dal.autocomplete import ModelSelect2, ModelSelect2Multiple\n'), ((2927, 3006), 'dal.autocomplete.ModelSelect2Multiple', 'ModelSelect2Multiple', ([], {'url': '"""affiliation-autocomplete"""', 'attrs': "{'data-html': True}"}), "(url='affiliation-autocomplete', attrs={'data-html': True})\n", (2947, 3006), False, 'from dal.autocomplete import ModelSelect2, ModelSelect2Multiple\n'), ((3819, 3894), 'dal.autocomplete.ModelSelect2Multiple', 'ModelSelect2Multiple', ([], {'url': '"""keyword-autocomplete"""', 'attrs': "{'data-html': True}"}), "(url='keyword-autocomplete', attrs={'data-html': True})\n", (3839, 3894), False, 'from dal.autocomplete import ModelSelect2, ModelSelect2Multiple\n'), ((3948, 4021), 'dal.autocomplete.ModelSelect2Multiple', 'ModelSelect2Multiple', ([], {'url': '"""topic-autocomplete"""', 'attrs': "{'data-html': True}"}), "(url='topic-autocomplete', attrs={'data-html': True})\n", (3968, 4021), False, 'from dal.autocomplete import ModelSelect2, ModelSelect2Multiple\n'), ((4078, 4154), 'dal.autocomplete.ModelSelect2Multiple', 'ModelSelect2Multiple', ([], {'url': '"""language-autocomplete"""', 'attrs': "{'data-html': True}"}), "(url='language-autocomplete', attrs={'data-html': True})\n", (4098, 4154), False, 'from dal.autocomplete import ModelSelect2, ModelSelect2Multiple\n'), ((4351, 4417), 'dal.autocomplete.ModelSelect2', 'ModelSelect2', ([], {'url': '"""author-autocomplete"""', 'attrs': "{'data-html': True}"}), "(url='author-autocomplete', attrs={'data-html': True})\n", (4363, 4417), False, 'from dal.autocomplete import ModelSelect2, ModelSelect2Multiple\n'), ((5417, 5526), 'dal.autocomplete.ModelSelect2Multiple', 'ModelSelect2Multiple', ([], {'url': '"""affiliation-autocomplete"""', 'attrs': "{'data-html': True}", 'forward': "['institution']"}), "(url='affiliation-autocomplete', attrs={'data-html': \n True}, forward=['institution'])\n", (5437, 5526), False, 'from dal.autocomplete import ModelSelect2, ModelSelect2Multiple\n'), ((5842, 5913), 'dal.autocomplete.ModelSelect2', 'ModelSelect2', ([], {'url': '"""institution-autocomplete"""', 'attrs': "{'data-html': True}"}), "(url='institution-autocomplete', attrs={'data-html': True})\n", (5854, 5913), False, 'from dal.autocomplete import ModelSelect2, ModelSelect2Multiple\n'), ((7026, 7074), 'dal.autocomplete.ModelSelect2Multiple', 'ModelSelect2Multiple', ([], {'url': '"""keyword-autocomplete"""'}), "(url='keyword-autocomplete')\n", (7046, 7074), False, 'from dal.autocomplete import ModelSelect2, ModelSelect2Multiple\n'), ((7098, 7144), 'dal.autocomplete.ModelSelect2Multiple', 'ModelSelect2Multiple', ([], {'url': '"""topic-autocomplete"""'}), "(url='topic-autocomplete')\n", (7118, 7144), False, 'from dal.autocomplete import ModelSelect2, ModelSelect2Multiple\n'), ((7171, 7220), 'dal.autocomplete.ModelSelect2Multiple', 'ModelSelect2Multiple', ([], {'url': '"""language-autocomplete"""'}), "(url='language-autocomplete')\n", (7191, 7220), False, 'from dal.autocomplete import ModelSelect2, ModelSelect2Multiple\n'), ((7248, 7291), 'dal.autocomplete.ModelSelect2', 'ModelSelect2', ([], {'url': '"""conference-autocomplete"""'}), "(url='conference-autocomplete')\n", (7260, 7291), False, 'from dal.autocomplete import ModelSelect2, ModelSelect2Multiple\n'), ((9148, 9214), 'dal.autocomplete.ModelSelect2', 'ModelSelect2', ([], {'url': '"""author-autocomplete"""', 'attrs': "{'data-html': True}"}), "(url='author-autocomplete', attrs={'data-html': True})\n", (9160, 9214), False, 'from dal.autocomplete import ModelSelect2, ModelSelect2Multiple\n'), ((9825, 9865), 'dal.autocomplete.ModelSelect2', 'ModelSelect2', ([], {'url': '"""country-autocomplete"""'}), "(url='country-autocomplete')\n", (9837, 9865), False, 'from dal.autocomplete import ModelSelect2, ModelSelect2Multiple\n'), ((9998, 10069), 'dal.autocomplete.ModelSelect2', 'ModelSelect2', ([], {'url': '"""institution-autocomplete"""', 'attrs': "{'data-html': True}"}), "(url='institution-autocomplete', attrs={'data-html': True})\n", (10010, 10069), False, 'from dal.autocomplete import ModelSelect2, ModelSelect2Multiple\n'), ((10278, 10349), 'dal.autocomplete.ModelSelect2', 'ModelSelect2', ([], {'url': '"""affiliation-autocomplete"""', 'attrs': "{'data-html': True}"}), "(url='affiliation-autocomplete', attrs={'data-html': True})\n", (10290, 10349), False, 'from dal.autocomplete import ModelSelect2, ModelSelect2Multiple\n'), ((10604, 10674), 'dal.autocomplete.ModelSelect2', 'ModelSelect2', ([], {'url': '"""conference-autocomplete"""', 'attrs': "{'data-html': True}"}), "(url='conference-autocomplete', attrs={'data-html': True})\n", (10616, 10674), False, 'from dal.autocomplete import ModelSelect2, ModelSelect2Multiple\n'), ((11009, 11075), 'dal.autocomplete.ModelSelect2', 'ModelSelect2', ([], {'url': '"""author-autocomplete"""', 'attrs': "{'data-html': True}"}), "(url='author-autocomplete', attrs={'data-html': True})\n", (11021, 11075), False, 'from dal.autocomplete import ModelSelect2, ModelSelect2Multiple\n'), ((11335, 11406), 'dal.autocomplete.ModelSelect2', 'ModelSelect2', ([], {'url': '"""institution-autocomplete"""', 'attrs': "{'data-html': True}"}), "(url='institution-autocomplete', attrs={'data-html': True})\n", (11347, 11406), False, 'from dal.autocomplete import ModelSelect2, ModelSelect2Multiple\n'), ((11672, 11743), 'dal.autocomplete.ModelSelect2', 'ModelSelect2', ([], {'url': '"""affiliation-autocomplete"""', 'attrs': "{'data-html': True}"}), "(url='affiliation-autocomplete', attrs={'data-html': True})\n", (11684, 11743), False, 'from dal.autocomplete import ModelSelect2, ModelSelect2Multiple\n'), ((12025, 12104), 'dal.autocomplete.ModelSelect2Multiple', 'ModelSelect2Multiple', ([], {'url': '"""affiliation-autocomplete"""', 'attrs': "{'data-html': True}"}), "(url='affiliation-autocomplete', attrs={'data-html': True})\n", (12045, 12104), False, 'from dal.autocomplete import ModelSelect2, ModelSelect2Multiple\n'), ((12328, 12399), 'dal.autocomplete.ModelSelect2', 'ModelSelect2', ([], {'url': '"""affiliation-autocomplete"""', 'attrs': "{'data-html': True}"}), "(url='affiliation-autocomplete', attrs={'data-html': True})\n", (12340, 12399), False, 'from dal.autocomplete import ModelSelect2, ModelSelect2Multiple\n'), ((12647, 12726), 'dal.autocomplete.ModelSelect2Multiple', 'ModelSelect2Multiple', ([], {'url': '"""institution-autocomplete"""', 'attrs': "{'data-html': True}"}), "(url='institution-autocomplete', attrs={'data-html': True})\n", (12667, 12726), False, 'from dal.autocomplete import ModelSelect2, ModelSelect2Multiple\n'), ((12950, 13021), 'dal.autocomplete.ModelSelect2', 'ModelSelect2', ([], {'url': '"""institution-autocomplete"""', 'attrs': "{'data-html': True}"}), "(url='institution-autocomplete', attrs={'data-html': True})\n", (12962, 13021), False, 'from dal.autocomplete import ModelSelect2, ModelSelect2Multiple\n'), ((13264, 13335), 'dal.autocomplete.ModelSelect2', 'ModelSelect2', ([], {'url': '"""institution-autocomplete"""', 'attrs': "{'data-html': True}"}), "(url='institution-autocomplete', attrs={'data-html': True})\n", (13276, 13335), False, 'from dal.autocomplete import ModelSelect2, ModelSelect2Multiple\n'), ((14130, 14201), 'dal.autocomplete.ModelSelect2', 'ModelSelect2', ([], {'url': '"""affiliation-autocomplete"""', 'attrs': "{'data-html': True}"}), "(url='affiliation-autocomplete', attrs={'data-html': True})\n", (14142, 14201), False, 'from dal.autocomplete import ModelSelect2, ModelSelect2Multiple\n'), ((14332, 14402), 'dal.autocomplete.ModelSelect2', 'ModelSelect2', ([], {'url': '"""conference-autocomplete"""', 'attrs': "{'data-html': True}"}), "(url='conference-autocomplete', attrs={'data-html': True})\n", (14344, 14402), False, 'from dal.autocomplete import ModelSelect2, ModelSelect2Multiple\n'), ((14535, 14606), 'dal.autocomplete.ModelSelect2', 'ModelSelect2', ([], {'url': '"""institution-autocomplete"""', 'attrs': "{'data-html': True}"}), "(url='institution-autocomplete', attrs={'data-html': True})\n", (14547, 14606), False, 'from dal.autocomplete import ModelSelect2, ModelSelect2Multiple\n'), ((14740, 14780), 'dal.autocomplete.ModelSelect2', 'ModelSelect2', ([], {'url': '"""country-autocomplete"""'}), "(url='country-autocomplete')\n", (14752, 14780), False, 'from dal.autocomplete import ModelSelect2, ModelSelect2Multiple\n'), ((15183, 15223), 'dal.autocomplete.ModelSelect2', 'ModelSelect2', ([], {'url': '"""keyword-autocomplete"""'}), "(url='keyword-autocomplete')\n", (15195, 15223), False, 'from dal.autocomplete import ModelSelect2, ModelSelect2Multiple\n'), ((15493, 15541), 'dal.autocomplete.ModelSelect2Multiple', 'ModelSelect2Multiple', ([], {'url': '"""keyword-autocomplete"""'}), "(url='keyword-autocomplete')\n", (15513, 15541), False, 'from dal.autocomplete import ModelSelect2, ModelSelect2Multiple\n'), ((15735, 15775), 'dal.autocomplete.ModelSelect2', 'ModelSelect2', ([], {'url': '"""keyword-autocomplete"""'}), "(url='keyword-autocomplete')\n", (15747, 15775), False, 'from dal.autocomplete import ModelSelect2, ModelSelect2Multiple\n'), ((16007, 16053), 'dal.autocomplete.ModelSelect2Multiple', 'ModelSelect2Multiple', ([], {'url': '"""topic-autocomplete"""'}), "(url='topic-autocomplete')\n", (16027, 16053), False, 'from dal.autocomplete import ModelSelect2, ModelSelect2Multiple\n'), ((16243, 16281), 'dal.autocomplete.ModelSelect2', 'ModelSelect2', ([], {'url': '"""topic-autocomplete"""'}), "(url='topic-autocomplete')\n", (16255, 16281), False, 'from dal.autocomplete import ModelSelect2, ModelSelect2Multiple\n'), ((16495, 16533), 'dal.autocomplete.ModelSelect2', 'ModelSelect2', ([], {'url': '"""topic-autocomplete"""'}), "(url='topic-autocomplete')\n", (16507, 16533), False, 'from dal.autocomplete import ModelSelect2, ModelSelect2Multiple\n'), ((16787, 16828), 'dal.autocomplete.ModelSelect2', 'ModelSelect2', ([], {'url': '"""language-autocomplete"""'}), "(url='language-autocomplete')\n", (16799, 16828), False, 'from dal.autocomplete import ModelSelect2, ModelSelect2Multiple\n'), ((17870, 17920), 'django.forms.RadioSelect', 'forms.RadioSelect', ([], {'choices': 'Conference.ENTRY_STATUS'}), '(choices=Conference.ENTRY_STATUS)\n', (17887, 17920), False, 'from django import forms\n'), ((18035, 18074), 'django.forms.RadioSelect', 'forms.RadioSelect', ([], {'choices': 'USER_CHOICES'}), '(choices=USER_CHOICES)\n', (18052, 18074), False, 'from django import forms\n'), ((19254, 19304), 'django.forms.RadioSelect', 'forms.RadioSelect', ([], {'choices': 'Conference.ENTRY_STATUS'}), '(choices=Conference.ENTRY_STATUS)\n', (19271, 19304), False, 'from django import forms\n'), ((19332, 19371), 'django.forms.DateInput', 'forms.DateInput', ([], {'attrs': "{'type': 'date'}"}), "(attrs={'type': 'date'})\n", (19347, 19371), False, 'from django import forms\n'), ((19397, 19436), 'django.forms.DateInput', 'forms.DateInput', ([], {'attrs': "{'type': 'date'}"}), "(attrs={'type': 'date'})\n", (19412, 19436), False, 'from django import forms\n'), ((19461, 19501), 'dal.autocomplete.ModelSelect2', 'ModelSelect2', ([], {'url': '"""country-autocomplete"""'}), "(url='country-autocomplete')\n", (19473, 19501), False, 'from dal.autocomplete import ModelSelect2, ModelSelect2Multiple\n'), ((19539, 19618), 'dal.autocomplete.ModelSelect2Multiple', 'ModelSelect2Multiple', ([], {'url': '"""institution-autocomplete"""', 'attrs': "{'data-html': True}"}), "(url='institution-autocomplete', attrs={'data-html': True})\n", (19559, 19618), False, 'from dal.autocomplete import ModelSelect2, ModelSelect2Multiple\n'), ((19676, 19709), 'django.forms.Textarea', 'forms.Textarea', ([], {'attrs': "{'rows': 2}"}), "(attrs={'rows': 2})\n", (19690, 19709), False, 'from django import forms\n'), ((19739, 19772), 'django.forms.Textarea', 'forms.Textarea', ([], {'attrs': "{'rows': 2}"}), "(attrs={'rows': 2})\n", (19753, 19772), False, 'from django import forms\n'), ((19800, 19833), 'django.forms.Textarea', 'forms.Textarea', ([], {'attrs': "{'rows': 2}"}), "(attrs={'rows': 2})\n", (19814, 19833), False, 'from django import forms\n'), ((7417, 7452), 'dal.forward.Const', 'forward.Const', (['(True)', '"""parents_only"""'], {}), "(True, 'parents_only')\n", (7430, 7452), False, 'from dal import forward\n')] |
import sys
import requests
from bs4 import BeautifulSoup
from urllib import request
args = sys.argv[1]
url = args
response = request.urlopen(url)
soup = BeautifulSoup(response, features = "html.parser")
response.close()
print(soup.title.text)
print(soup.pre.text)
| [
"bs4.BeautifulSoup",
"urllib.request.urlopen"
] | [((126, 146), 'urllib.request.urlopen', 'request.urlopen', (['url'], {}), '(url)\n', (141, 146), False, 'from urllib import request\n'), ((154, 201), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response'], {'features': '"""html.parser"""'}), "(response, features='html.parser')\n", (167, 201), False, 'from bs4 import BeautifulSoup\n')] |
#dada
from dearpygui import core, simple
from generic_window import GenericWindow
from tensor_flow_interface import TensorFlowInterface
from tensor_flow_interface import ModelDataContainer
from import_window import ImportWindow
from output_visualisation_window import OutputVisualisationWindow
from better_visualizer import BetterVisualizer
from history_graph_window import HistoryGraphWindow
class SettingsWindow(GenericWindow):
windowName = "Ustawienia sieci"
simulationSetting = "Ustawienia symulacji"
createNetwork = "Stworz siec"
createVisualization = "Stworz wizualizacje sieci"
createOutputPrediction = 'Stworz wykres wyjsciowy'
numberOfLayers = "Ilosc warstw"
layer = "Warstwa "
type = "Typ"
use2DInOut = 'Uzyj domyslnych wejsc/wyjsc'
activation = "Aktywacja"
trainData = "Trenuj siec"
historyGraph = "Rysuj graf historii"
neuronTypeList = ['Dense', 'Flatten', 'Activation']
neuronActivationList = ['relu', 'sigmoid', 'softmax', 'softplus', 'exponential']
timeToTrain = "Czas treningu"
xSize = 708
ySize = 368
xPos = 800
yPos = 30
visualization_window = None
tensorFlowInterface = None
neuronDataContainer = None
betterVisualizer = None
historyGraphWindow = None
lastEpochs = None
neuronDataContainerDefaultData = [2, [1,1,1,1,1,1,1,1], ['Dense', 'Dense','Dense', 'Dense','Dense', 'Dense','Dense', 'Dense'], ['relu', 'relu','relu', 'relu','relu', 'relu','relu', 'relu']]
maxNumberOfLayers = 8
def __init__(self):
self.tensorFlowInterface = TensorFlowInterface()
self.outputVisualisationWindow = OutputVisualisationWindow()
self.historyGraphWindow = HistoryGraphWindow()
with simple.window(self.windowName, width=self.xSize, height=self.ySize, x_pos=self.xPos, y_pos=self.yPos):
core.add_text(self.simulationSetting)
core.add_button(self.createNetwork, callback=self.create_network_callback)
core.add_same_line()
core.add_button(self.createVisualization, callback=self.create_visualisation_callback)
core.add_same_line()
core.add_button(self.createOutputPrediction, callback=self.create_output_prediction)
core.add_same_line()
core.add_button(self.historyGraph, callback=self.create_history_graph)
core.add_button(self.trainData, callback=self.execute_training_data)
core.add_same_line()
#core.add_slider_int(self.timeToTrain, default_value = 100, min_value=1, max_value=1000, width = 200)
core.add_input_int(self.timeToTrain, default_value=100, min_value=1, max_value=1000, width = 200)
core.add_same_line()
core.add_checkbox(self.use2DInOut)
core.add_slider_int(self.numberOfLayers, default_value=2, min_value=2, max_value=self.maxNumberOfLayers, callback=self.layer_slider_callback, width = 200)
for i in range(0, self.maxNumberOfLayers):
core.add_slider_int(self.layer + str(i), default_value=1, width = 200)
core.add_same_line()
core.add_combo(self.type +'##'+ str(i), items=self.neuronTypeList, width=70, callback = self.change_list_callback, default_value='Dense')
core.add_same_line()
core.add_combo(self.activation +'##'+ str(i), items=self.neuronActivationList, width = 70, callback=self.change_list_callback, default_value='relu')
core.add_separator()
self.layer_slider_callback()
#self.visualization_window = VisualizationWindow()
self.betterVisualizer = BetterVisualizer()
self.betterVisualizer.hide_window()
self.importWindow = ImportWindow()
self.neuronDataContainer = ModelDataContainer(self.neuronDataContainerDefaultData[0],self.neuronDataContainerDefaultData[1], self.neuronDataContainerDefaultData[2], self.neuronDataContainerDefaultData[3])
self.modify_neuron_list()
self.tensorFlowInterface.create_model(self.neuronDataContainer)
super().__init__()
def modify_neuron_list(self):
self.neuronDataContainer.numberOfLayers = core.get_value(self.numberOfLayers)
for i in range(0, core.get_value(self.numberOfLayers)):
self.neuronDataContainer.listOfLayerNeurons[i] = core.get_value(self.layer + str(i))
def setDefaultInOut(self):
self.neuronDataContainer.listOfLayerNeurons[0] = 2
self.neuronDataContainer.listOfLayerNeurons[self.neuronDataContainer.numberOfLayers-1] = 1
def create_network_callback(self):
self.tensorFlowInterface.remove_model()
self.modify_neuron_list()
if core.get_value(self.use2DInOut):
self.setDefaultInOut()
self.tensorFlowInterface.create_model(self.neuronDataContainer)
def create_visualisation_callback(self, sender, data):
self.tensorFlowInterface.remove_model()
self.modify_neuron_list()
if core.get_value(self.use2DInOut):
self.setDefaultInOut()
self.tensorFlowInterface.create_model(self.neuronDataContainer)
self.betterVisualizer.getContainerData(self.neuronDataContainer)
if (self.betterVisualizer.hidden):
self.betterVisualizer.show_window()
self.betterVisualizer.window_resize()
core.render_dearpygui_frame()
self.betterVisualizer.draw_visualisation()
self.betterVisualizer.window_resize()
def layer_slider_callback(self):
for i in range(0, self.maxNumberOfLayers):
simple.hide_item(self.layer + str(i))
simple.hide_item(self.type + '##' + str(i))
simple.hide_item(self.activation + '##' + str(i))
for i in range(0, core.get_value(self.numberOfLayers)):
simple.show_item(self.layer + str(i))
simple.show_item(self.type + '##' + str(i))
simple.show_item(self.activation + '##' + str(i))
def change_list_callback(self, sender, data):
if sender[0:len(self.type)] == self.type:
self.neuronDataContainer.listOfLayerTypes[int(sender[-1])] = core.get_value(sender)
if sender[0:len(self.activation)] == self.activation:
self.neuronDataContainer.listOfActivations[int(sender[-1])] = core.get_value(sender)
def execute_training_data(self):
self.lastEpochs = self.tensorFlowInterface.train_model_on_2D_data(self.importWindow.dataParsedIn, self.importWindow.dataParsedOut, core.get_value(self.timeToTrain))
def create_output_prediction(self):
self.outputVisualisationWindow.create_output_graph(self.tensorFlowInterface)
def create_history_graph(self):
self.historyGraphWindow.display_history_graph(self.tensorFlowInterface.dumpedTrainedDataHistory, self.lastEpochs)
def reset_item(self, window):
simple.set_item_width(window.windowName, window.xSize)
simple.set_item_height(window.windowName, window.ySize)
simple.set_window_pos(window.windowName, window.xPos, window.yPos)
def reset_all(self):
self.reset_item(self)
self.reset_item(self.importWindow)
self.reset_item(self.betterVisualizer)
self.reset_item(self.historyGraphWindow)
self.reset_item(self.outputVisualisationWindow)
| [
"dearpygui.core.render_dearpygui_frame",
"dearpygui.core.get_value",
"dearpygui.core.add_text",
"dearpygui.core.add_checkbox",
"history_graph_window.HistoryGraphWindow",
"dearpygui.simple.set_window_pos",
"tensor_flow_interface.ModelDataContainer",
"dearpygui.core.add_input_int",
"output_visualisati... | [((1571, 1592), 'tensor_flow_interface.TensorFlowInterface', 'TensorFlowInterface', ([], {}), '()\n', (1590, 1592), False, 'from tensor_flow_interface import TensorFlowInterface\n'), ((1634, 1661), 'output_visualisation_window.OutputVisualisationWindow', 'OutputVisualisationWindow', ([], {}), '()\n', (1659, 1661), False, 'from output_visualisation_window import OutputVisualisationWindow\n'), ((1696, 1716), 'history_graph_window.HistoryGraphWindow', 'HistoryGraphWindow', ([], {}), '()\n', (1714, 1716), False, 'from history_graph_window import HistoryGraphWindow\n'), ((3638, 3656), 'better_visualizer.BetterVisualizer', 'BetterVisualizer', ([], {}), '()\n', (3654, 3656), False, 'from better_visualizer import BetterVisualizer\n'), ((3729, 3743), 'import_window.ImportWindow', 'ImportWindow', ([], {}), '()\n', (3741, 3743), False, 'from import_window import ImportWindow\n'), ((3779, 3967), 'tensor_flow_interface.ModelDataContainer', 'ModelDataContainer', (['self.neuronDataContainerDefaultData[0]', 'self.neuronDataContainerDefaultData[1]', 'self.neuronDataContainerDefaultData[2]', 'self.neuronDataContainerDefaultData[3]'], {}), '(self.neuronDataContainerDefaultData[0], self.\n neuronDataContainerDefaultData[1], self.neuronDataContainerDefaultData[\n 2], self.neuronDataContainerDefaultData[3])\n', (3797, 3967), False, 'from tensor_flow_interface import ModelDataContainer\n'), ((4175, 4210), 'dearpygui.core.get_value', 'core.get_value', (['self.numberOfLayers'], {}), '(self.numberOfLayers)\n', (4189, 4210), False, 'from dearpygui import core, simple\n'), ((4695, 4726), 'dearpygui.core.get_value', 'core.get_value', (['self.use2DInOut'], {}), '(self.use2DInOut)\n', (4709, 4726), False, 'from dearpygui import core, simple\n'), ((4988, 5019), 'dearpygui.core.get_value', 'core.get_value', (['self.use2DInOut'], {}), '(self.use2DInOut)\n', (5002, 5019), False, 'from dearpygui import core, simple\n'), ((6871, 6925), 'dearpygui.simple.set_item_width', 'simple.set_item_width', (['window.windowName', 'window.xSize'], {}), '(window.windowName, window.xSize)\n', (6892, 6925), False, 'from dearpygui import core, simple\n'), ((6934, 6989), 'dearpygui.simple.set_item_height', 'simple.set_item_height', (['window.windowName', 'window.ySize'], {}), '(window.windowName, window.ySize)\n', (6956, 6989), False, 'from dearpygui import core, simple\n'), ((6998, 7064), 'dearpygui.simple.set_window_pos', 'simple.set_window_pos', (['window.windowName', 'window.xPos', 'window.yPos'], {}), '(window.windowName, window.xPos, window.yPos)\n', (7019, 7064), False, 'from dearpygui import core, simple\n'), ((1731, 1837), 'dearpygui.simple.window', 'simple.window', (['self.windowName'], {'width': 'self.xSize', 'height': 'self.ySize', 'x_pos': 'self.xPos', 'y_pos': 'self.yPos'}), '(self.windowName, width=self.xSize, height=self.ySize, x_pos=\n self.xPos, y_pos=self.yPos)\n', (1744, 1837), False, 'from dearpygui import core, simple\n'), ((1846, 1883), 'dearpygui.core.add_text', 'core.add_text', (['self.simulationSetting'], {}), '(self.simulationSetting)\n', (1859, 1883), False, 'from dearpygui import core, simple\n'), ((1896, 1970), 'dearpygui.core.add_button', 'core.add_button', (['self.createNetwork'], {'callback': 'self.create_network_callback'}), '(self.createNetwork, callback=self.create_network_callback)\n', (1911, 1970), False, 'from dearpygui import core, simple\n'), ((1983, 2003), 'dearpygui.core.add_same_line', 'core.add_same_line', ([], {}), '()\n', (2001, 2003), False, 'from dearpygui import core, simple\n'), ((2016, 2107), 'dearpygui.core.add_button', 'core.add_button', (['self.createVisualization'], {'callback': 'self.create_visualisation_callback'}), '(self.createVisualization, callback=self.\n create_visualisation_callback)\n', (2031, 2107), False, 'from dearpygui import core, simple\n'), ((2115, 2135), 'dearpygui.core.add_same_line', 'core.add_same_line', ([], {}), '()\n', (2133, 2135), False, 'from dearpygui import core, simple\n'), ((2148, 2237), 'dearpygui.core.add_button', 'core.add_button', (['self.createOutputPrediction'], {'callback': 'self.create_output_prediction'}), '(self.createOutputPrediction, callback=self.\n create_output_prediction)\n', (2163, 2237), False, 'from dearpygui import core, simple\n'), ((2245, 2265), 'dearpygui.core.add_same_line', 'core.add_same_line', ([], {}), '()\n', (2263, 2265), False, 'from dearpygui import core, simple\n'), ((2278, 2348), 'dearpygui.core.add_button', 'core.add_button', (['self.historyGraph'], {'callback': 'self.create_history_graph'}), '(self.historyGraph, callback=self.create_history_graph)\n', (2293, 2348), False, 'from dearpygui import core, simple\n'), ((2361, 2429), 'dearpygui.core.add_button', 'core.add_button', (['self.trainData'], {'callback': 'self.execute_training_data'}), '(self.trainData, callback=self.execute_training_data)\n', (2376, 2429), False, 'from dearpygui import core, simple\n'), ((2442, 2462), 'dearpygui.core.add_same_line', 'core.add_same_line', ([], {}), '()\n', (2460, 2462), False, 'from dearpygui import core, simple\n'), ((2589, 2688), 'dearpygui.core.add_input_int', 'core.add_input_int', (['self.timeToTrain'], {'default_value': '(100)', 'min_value': '(1)', 'max_value': '(1000)', 'width': '(200)'}), '(self.timeToTrain, default_value=100, min_value=1,\n max_value=1000, width=200)\n', (2607, 2688), False, 'from dearpygui import core, simple\n'), ((2699, 2719), 'dearpygui.core.add_same_line', 'core.add_same_line', ([], {}), '()\n', (2717, 2719), False, 'from dearpygui import core, simple\n'), ((2732, 2766), 'dearpygui.core.add_checkbox', 'core.add_checkbox', (['self.use2DInOut'], {}), '(self.use2DInOut)\n', (2749, 2766), False, 'from dearpygui import core, simple\n'), ((2780, 2940), 'dearpygui.core.add_slider_int', 'core.add_slider_int', (['self.numberOfLayers'], {'default_value': '(2)', 'min_value': '(2)', 'max_value': 'self.maxNumberOfLayers', 'callback': 'self.layer_slider_callback', 'width': '(200)'}), '(self.numberOfLayers, default_value=2, min_value=2,\n max_value=self.maxNumberOfLayers, callback=self.layer_slider_callback,\n width=200)\n', (2799, 2940), False, 'from dearpygui import core, simple\n'), ((3484, 3504), 'dearpygui.core.add_separator', 'core.add_separator', ([], {}), '()\n', (3502, 3504), False, 'from dearpygui import core, simple\n'), ((4237, 4272), 'dearpygui.core.get_value', 'core.get_value', (['self.numberOfLayers'], {}), '(self.numberOfLayers)\n', (4251, 4272), False, 'from dearpygui import core, simple\n'), ((5354, 5383), 'dearpygui.core.render_dearpygui_frame', 'core.render_dearpygui_frame', ([], {}), '()\n', (5381, 5383), False, 'from dearpygui import core, simple\n'), ((5770, 5805), 'dearpygui.core.get_value', 'core.get_value', (['self.numberOfLayers'], {}), '(self.numberOfLayers)\n', (5784, 5805), False, 'from dearpygui import core, simple\n'), ((6150, 6172), 'dearpygui.core.get_value', 'core.get_value', (['sender'], {}), '(sender)\n', (6164, 6172), False, 'from dearpygui import core, simple\n'), ((6309, 6331), 'dearpygui.core.get_value', 'core.get_value', (['sender'], {}), '(sender)\n', (6323, 6331), False, 'from dearpygui import core, simple\n'), ((6509, 6541), 'dearpygui.core.get_value', 'core.get_value', (['self.timeToTrain'], {}), '(self.timeToTrain)\n', (6523, 6541), False, 'from dearpygui import core, simple\n'), ((3094, 3114), 'dearpygui.core.add_same_line', 'core.add_same_line', ([], {}), '()\n', (3112, 3114), False, 'from dearpygui import core, simple\n'), ((3285, 3305), 'dearpygui.core.add_same_line', 'core.add_same_line', ([], {}), '()\n', (3303, 3305), False, 'from dearpygui import core, simple\n')] |
from typing import NewType
from typing import Union
from typing import List
from typing import Tuple
from typing import TypedDict
from typing import Optional
Baz = NewType("Baz", bool)
Foo = NewType("Foo", str)
"""array of strings is all...
"""
UnorderedSetOfFooz1UBFn8B = NewType("UnorderedSetOfFooz1UBFn8B", List[Foo])
Bar = NewType("Bar", int)
SetOfNumbers = NewType("SetOfNumbers", Tuple[Bar])
class ObjectOfBazLEtnUJ56(TypedDict):
NotFoo: Optional[Baz]
OneOfStuff = NewType("OneOfStuff", Union[UnorderedSetOfFooz1UBFn8B, SetOfNumbers])
"""Generated! Represents an alias to any of the provided schemas
"""
AnyOfFooFooObjectOfBazLEtnUJ56OneOfStuffBar = NewType("AnyOfFooFooObjectOfBazLEtnUJ56OneOfStuffBar", Union[Foo, ObjectOfBazLEtnUJ56, OneOfStuff, Bar])
| [
"typing.NewType"
] | [((165, 185), 'typing.NewType', 'NewType', (['"""Baz"""', 'bool'], {}), "('Baz', bool)\n", (172, 185), False, 'from typing import NewType\n'), ((193, 212), 'typing.NewType', 'NewType', (['"""Foo"""', 'str'], {}), "('Foo', str)\n", (200, 212), False, 'from typing import NewType\n'), ((275, 322), 'typing.NewType', 'NewType', (['"""UnorderedSetOfFooz1UBFn8B"""', 'List[Foo]'], {}), "('UnorderedSetOfFooz1UBFn8B', List[Foo])\n", (282, 322), False, 'from typing import NewType\n'), ((330, 349), 'typing.NewType', 'NewType', (['"""Bar"""', 'int'], {}), "('Bar', int)\n", (337, 349), False, 'from typing import NewType\n'), ((366, 401), 'typing.NewType', 'NewType', (['"""SetOfNumbers"""', 'Tuple[Bar]'], {}), "('SetOfNumbers', Tuple[Bar])\n", (373, 401), False, 'from typing import NewType\n'), ((481, 550), 'typing.NewType', 'NewType', (['"""OneOfStuff"""', 'Union[UnorderedSetOfFooz1UBFn8B, SetOfNumbers]'], {}), "('OneOfStuff', Union[UnorderedSetOfFooz1UBFn8B, SetOfNumbers])\n", (488, 550), False, 'from typing import NewType\n'), ((666, 774), 'typing.NewType', 'NewType', (['"""AnyOfFooFooObjectOfBazLEtnUJ56OneOfStuffBar"""', 'Union[Foo, ObjectOfBazLEtnUJ56, OneOfStuff, Bar]'], {}), "('AnyOfFooFooObjectOfBazLEtnUJ56OneOfStuffBar', Union[Foo,\n ObjectOfBazLEtnUJ56, OneOfStuff, Bar])\n", (673, 774), False, 'from typing import NewType\n')] |
# Copyright (c) 2021, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from src.lyap.verifier.z3verifier import Z3Verifier
from functools import partial
from src.lyap.learner.net import NN
from src.shared.activations import ActivationType
from experiments.benchmarks.benchmarks_lyap import *
import torch
from src.shared.components.Translator import Translator
from unittest import mock
from z3 import *
from src.shared.cegis_values import CegisStateKeys
from src.shared.consts import TranslatorType
class TestZ3Verifier(unittest.TestCase):
def test_poly2_with_good_Lyapunov_function(self):
system = partial(poly_2, batch_size=100)
n_vars = 2
verifier = Z3Verifier
x = verifier.new_vars(n_vars)
f, domain, _ = system(functions=verifier.solver_fncts(), inner=0, outer=100)
domain_z3 = domain(verifier.solver_fncts(), x)
verifier = Z3Verifier(n_vars, f, domain_z3, x)
# model
model = NN(2, 2,
bias=False,
activate=[ActivationType.SQUARE],
equilibria=None)
model.layers[0].weight[0][0] = 1
model.layers[0].weight[0][1] = 0
model.layers[0].weight[1][0] = 0
model.layers[0].weight[1][1] = 1
model.layers[1].weight[0][0] = 1
model.layers[1].weight[0][1] = 1
xdot = f(Z3Verifier.solver_fncts(), x)
translator = Translator(model, np.matrix(x).T, xdot, None, 1)
res = translator.get(**{'factors': None})
V, Vdot = res[CegisStateKeys.V], res[CegisStateKeys.V_dot]
print(V)
res = verifier.verify(V, Vdot)
self.assertEqual(res[CegisStateKeys.found], res[CegisStateKeys.cex] == [])
self.assertTrue(res[CegisStateKeys.found])
def test_poly2_with_bad_Lyapunov_function(self):
system = partial(poly_2, batch_size=100)
n_vars = 2
verifier = Z3Verifier
x = verifier.new_vars(n_vars)
f, domain, _ = system(functions=verifier.solver_fncts(), inner=0, outer=100)
domain_z3 = domain(verifier.solver_fncts(), x)
verifier = Z3Verifier(n_vars, f, domain_z3, x)
# model
model = NN(2, 2,
bias=True,
activate=[ActivationType.SQUARE],
equilibria=None)
model.layers[0].weight[0][0] = 1
model.layers[0].weight[0][1] = 0
model.layers[0].weight[1][0] = 0
model.layers[0].weight[1][1] = 1
model.layers[0].bias[0] = 1
model.layers[0].bias[1] = 1
xdot = f(Z3Verifier.solver_fncts(), x)
translator = Translator(model, np.matrix(x).T, xdot, None, 1)
res = translator.get(**{'factors': None})
V, Vdot = res[CegisStateKeys.V], res[CegisStateKeys.V_dot]
res = verifier.verify(V, Vdot)
self.assertEqual(res[CegisStateKeys.found], res[CegisStateKeys.cex] == [])
self.assertFalse(res[CegisStateKeys.found])
def test_poly2_with_another_bad_Lyapunov_function(self):
system = partial(poly_2, batch_size=100)
n_vars = 2
verifier = Z3Verifier
x = verifier.new_vars(n_vars)
f, domain, _ = system(functions=verifier.solver_fncts(), inner=0, outer=100)
domain_z3 = domain(verifier.solver_fncts(), x)
verifier = Z3Verifier(n_vars, f, domain_z3, x)
# model
model = NN(2, 2,
bias=False,
activate=[ActivationType.SQUARE],
equilibria=None)
model.layers[0].weight[0][0] = 1
model.layers[0].weight[0][1] = 1
model.layers[0].weight[1][0] = 0
model.layers[0].weight[1][1] = 1
xdot = f(Z3Verifier.solver_fncts(), x)
translator = Translator(model, np.matrix(x).T, xdot, None, 1)
res = translator.get(**{'factors': None})
V, Vdot = res[CegisStateKeys.V], res[CegisStateKeys.V_dot]
res = verifier.verify(V, Vdot)
self.assertEqual(res[CegisStateKeys.found], res[CegisStateKeys.cex] == [])
self.assertFalse(res[CegisStateKeys.found])
if __name__ == '__main__':
unittest.main()
| [
"src.lyap.verifier.z3verifier.Z3Verifier.solver_fncts",
"src.lyap.learner.net.NN",
"src.lyap.verifier.z3verifier.Z3Verifier",
"functools.partial",
"unittest.main"
] | [((4318, 4333), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4331, 4333), False, 'import unittest\n'), ((780, 811), 'functools.partial', 'partial', (['poly_2'], {'batch_size': '(100)'}), '(poly_2, batch_size=100)\n', (787, 811), False, 'from functools import partial\n'), ((1060, 1095), 'src.lyap.verifier.z3verifier.Z3Verifier', 'Z3Verifier', (['n_vars', 'f', 'domain_z3', 'x'], {}), '(n_vars, f, domain_z3, x)\n', (1070, 1095), False, 'from src.lyap.verifier.z3verifier import Z3Verifier\n'), ((1129, 1200), 'src.lyap.learner.net.NN', 'NN', (['(2)', '(2)'], {'bias': '(False)', 'activate': '[ActivationType.SQUARE]', 'equilibria': 'None'}), '(2, 2, bias=False, activate=[ActivationType.SQUARE], equilibria=None)\n', (1131, 1200), False, 'from src.lyap.learner.net import NN\n'), ((2008, 2039), 'functools.partial', 'partial', (['poly_2'], {'batch_size': '(100)'}), '(poly_2, batch_size=100)\n', (2015, 2039), False, 'from functools import partial\n'), ((2288, 2323), 'src.lyap.verifier.z3verifier.Z3Verifier', 'Z3Verifier', (['n_vars', 'f', 'domain_z3', 'x'], {}), '(n_vars, f, domain_z3, x)\n', (2298, 2323), False, 'from src.lyap.verifier.z3verifier import Z3Verifier\n'), ((2357, 2427), 'src.lyap.learner.net.NN', 'NN', (['(2)', '(2)'], {'bias': '(True)', 'activate': '[ActivationType.SQUARE]', 'equilibria': 'None'}), '(2, 2, bias=True, activate=[ActivationType.SQUARE], equilibria=None)\n', (2359, 2427), False, 'from src.lyap.learner.net import NN\n'), ((3226, 3257), 'functools.partial', 'partial', (['poly_2'], {'batch_size': '(100)'}), '(poly_2, batch_size=100)\n', (3233, 3257), False, 'from functools import partial\n'), ((3506, 3541), 'src.lyap.verifier.z3verifier.Z3Verifier', 'Z3Verifier', (['n_vars', 'f', 'domain_z3', 'x'], {}), '(n_vars, f, domain_z3, x)\n', (3516, 3541), False, 'from src.lyap.verifier.z3verifier import Z3Verifier\n'), ((3575, 3646), 'src.lyap.learner.net.NN', 'NN', (['(2)', '(2)'], {'bias': '(False)', 'activate': '[ActivationType.SQUARE]', 'equilibria': 'None'}), '(2, 2, bias=False, activate=[ActivationType.SQUARE], equilibria=None)\n', (3577, 3646), False, 'from src.lyap.learner.net import NN\n'), ((1530, 1555), 'src.lyap.verifier.z3verifier.Z3Verifier.solver_fncts', 'Z3Verifier.solver_fncts', ([], {}), '()\n', (1553, 1555), False, 'from src.lyap.verifier.z3verifier import Z3Verifier\n'), ((2756, 2781), 'src.lyap.verifier.z3verifier.Z3Verifier.solver_fncts', 'Z3Verifier.solver_fncts', ([], {}), '()\n', (2779, 2781), False, 'from src.lyap.verifier.z3verifier import Z3Verifier\n'), ((3894, 3919), 'src.lyap.verifier.z3verifier.Z3Verifier.solver_fncts', 'Z3Verifier.solver_fncts', ([], {}), '()\n', (3917, 3919), False, 'from src.lyap.verifier.z3verifier import Z3Verifier\n')] |
# Copyright 2017, <NAME>
# Copyright 2017, Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pecan import expose
from repoxplorer.controllers import utils
from repoxplorer import index
from repoxplorer.index.projects import Projects
from repoxplorer.index.tags import Tags
from repoxplorer.index.contributors import Contributors
class TagsController(object):
@expose('json')
def tags(self, pid=None, tid=None,
dfrom=None, dto=None, inc_repos=None):
t = Tags(index.Connector(index_suffix='tags'))
projects_index = Projects()
idents = Contributors()
query_kwargs = utils.resolv_filters(
projects_index, idents, pid, tid, None, None,
dfrom, dto, inc_repos, None, None, None, None)
p_filter = [":".join(r.split(':')[:-1]) for r in query_kwargs['repos']]
dfrom = query_kwargs['fromdate']
dto = query_kwargs['todate']
ret = [r['_source'] for r in t.get_tags(p_filter, dfrom, dto)]
# TODO: if tid is given we can include user defined releases
# for repo tagged with tid.
if not pid:
return ret
# now append user defined releases
ur = {}
project = projects_index.get(pid, source=['refs', 'releases'])
for release in project.get('releases', []):
ur[release['name']] = release
for ref in project['refs']:
for release in ref.get('releases', []):
ur[release['name']] = release
for rel in ur.values():
ret.append(rel)
return ret
| [
"repoxplorer.controllers.utils.resolv_filters",
"repoxplorer.index.contributors.Contributors",
"repoxplorer.index.projects.Projects",
"repoxplorer.index.Connector",
"pecan.expose"
] | [((887, 901), 'pecan.expose', 'expose', (['"""json"""'], {}), "('json')\n", (893, 901), False, 'from pecan import expose\n'), ((1073, 1083), 'repoxplorer.index.projects.Projects', 'Projects', ([], {}), '()\n', (1081, 1083), False, 'from repoxplorer.index.projects import Projects\n'), ((1101, 1115), 'repoxplorer.index.contributors.Contributors', 'Contributors', ([], {}), '()\n', (1113, 1115), False, 'from repoxplorer.index.contributors import Contributors\n'), ((1140, 1257), 'repoxplorer.controllers.utils.resolv_filters', 'utils.resolv_filters', (['projects_index', 'idents', 'pid', 'tid', 'None', 'None', 'dfrom', 'dto', 'inc_repos', 'None', 'None', 'None', 'None'], {}), '(projects_index, idents, pid, tid, None, None, dfrom,\n dto, inc_repos, None, None, None, None)\n', (1160, 1257), False, 'from repoxplorer.controllers import utils\n'), ((1010, 1046), 'repoxplorer.index.Connector', 'index.Connector', ([], {'index_suffix': '"""tags"""'}), "(index_suffix='tags')\n", (1025, 1046), False, 'from repoxplorer import index\n')] |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009-2012 <NAME> <<EMAIL>>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# WeeChat scripts manager.
# (this script requires WeeChat >= 0.3.0 and python >= 2.6)
#
# History:
#
# 2012-01-03, <NAME> <<EMAIL>>:
# version 1.5: make script compatible with Python 3.x
# 2011-03-25, <NAME> <<EMAIL>>:
# version 1.4: add completion with installed scripts for action "remove"
# 2011-03-10, <NAME> <<EMAIL>>:
# version 1.3: add script extension in script name completion and a new
# completion with tags for actions "list" and "listinstalled"
# 2011-02-13, <NAME> <<EMAIL>>:
# version 1.2: use new help format for command arguments
# 2010-11-08, <NAME> <<EMAIL>>:
# version 1.1: get python 2.x binary for hook_process (fix problem
# when python 3.x is default python version, requires
# WeeChat >= 0.3.4)
# 2010-02-22, <NAME> <<EMAIL>>:
# version 1.0: add action "listinstalled" for command /weeget
# 2010-01-25, <NAME> <<EMAIL>>:
# version 0.9: fix "running" status of scripts with /weeget check
# 2009-09-30, <NAME> <<EMAIL>>:
# version 0.8: fix bugs and add missing info in "/weeget show",
# display warning if url for plugins.xml.gz is old site
# 2009-09-07, <NAME> <<EMAIL>>:
# version 0.7: update weechat site with new URL
# 2009-05-02, <NAME> <<EMAIL>>:
# version 0.6: sync with last API changes
# 2009-04-15, <NAME> <<EMAIL>>:
# version 0.5: display missing module(s) when import failed
# 2009-04-11, <NAME> <<EMAIL>>:
# version 0.4: use new completion for command arguments
# 2009-04-07, <NAME> <<EMAIL>>:
# version 0.3: fix bug with install/upgrade when weeget is updated with
# other scripts: ensure that weeget is always the last
# installed script
# 2009-04-07, <NAME> <<EMAIL>>:
# version 0.2: add author's mail in script description
# 2009-04-05, <NAME> <<EMAIL>>:
# version 0.1: initial release
#
SCRIPT_NAME = "weeget"
SCRIPT_AUTHOR = "<NAME> <<EMAIL>>"
SCRIPT_VERSION = "1.5"
SCRIPT_LICENSE = "GPL3"
SCRIPT_DESC = "WeeChat scripts manager"
SCRIPT_COMMAND = "weeget"
import_ok = True
try:
import weechat
except ImportError:
print("This script must be run under WeeChat.")
print("Get WeeChat now at: http://www.weechat.org/")
import_ok = False
try:
import sys, os, stat, time, gzip, hashlib, xml.dom.minidom
except ImportError as message:
print("Missing package(s) for %s: %s" % (SCRIPT_NAME, message))
import_ok = False
CONFIG_FILE_NAME = "wg"
SCRIPT_EXTENSION = {
"perl" : "pl",
"python": "py",
"ruby" : "rb",
"lua" : "lua",
"tcl" : "tcl",
}
# timeout for download of plugins.xml.gz
TIMEOUT_UPDATE = 60 * 1000
# timeout for download of a script
TIMEOUT_SCRIPT = 60 * 1000
# config file and options
wg_config_file = ""
wg_config_option = {}
# action (install, remove, ..) and arguments
wg_action = ""
wg_action_args = ""
# loaded scripts
wg_loaded_scripts = {}
# hook process and stdout
wg_hook_process = { "update": "", "script": "" }
wg_stdout = { "update": "", "script": "" }
# scripts read from plugins.xml.gz
wg_scripts = {}
# list of script to install, and script currently installing
wg_scripts_to_install = []
wg_current_script_install = {}
# =================================[ config ]=================================
def wg_config_init():
"""
Initialization of configuration file.
Sections: color, scripts.
"""
global wg_config_file, wg_config_option
wg_config_file = weechat.config_new(CONFIG_FILE_NAME,
"wg_config_reload_cb", "")
if wg_config_file == "":
return
# section "color"
section_color = weechat.config_new_section(
wg_config_file, "color", 0, 0, "", "", "", "", "", "", "", "", "", "")
if section_color == "":
weechat.config_free(wg_config_file)
return
wg_config_option["color_script"] = weechat.config_new_option(
wg_config_file, section_color,
"script", "color", "Color for script names", "", 0, 0,
"cyan", "cyan", 0, "", "", "", "", "", "")
wg_config_option["color_installed"] = weechat.config_new_option(
wg_config_file, section_color,
"installed", "color", "Color for \"installed\" indicator", "", 0, 0,
"yellow", "yellow", 0, "", "", "", "", "", "")
wg_config_option["color_running"] = weechat.config_new_option(
wg_config_file, section_color,
"running", "color", "Color for \"running\" indicator", "", 0, 0,
"lightgreen", "lightgreen", 0, "", "", "", "", "", "")
wg_config_option["color_obsolete"] = weechat.config_new_option(
wg_config_file, section_color,
"obsolete", "color", "Color for \"obsolete\" indicator", "", 0, 0,
"lightmagenta", "lightmagenta", 0, "", "", "", "", "", "")
wg_config_option["color_unknown"] = weechat.config_new_option(
wg_config_file, section_color,
"unknown", "color", "Color for \"unknown status\" indicator", "", 0, 0,
"lightred", "lightred", 0, "", "", "", "", "", "")
wg_config_option["color_language"] = weechat.config_new_option(
wg_config_file, section_color,
"language", "color", "Color for language names", "", 0, 0,
"lightblue", "lightblue", 0, "", "", "", "", "", "")
# section "scripts"
section_scripts = weechat.config_new_section(
wg_config_file, "scripts", 0, 0, "", "", "", "", "", "", "", "", "", "")
if section_scripts == "":
weechat.config_free(wg_config_file)
return
wg_config_option["scripts_url"] = weechat.config_new_option(
wg_config_file, section_scripts,
"url", "string", "URL for file with list of plugins", "", 0, 0,
"http://www.weechat.org/files/plugins.xml.gz",
"http://www.weechat.org/files/plugins.xml.gz", 0, "", "", "", "", "", "")
wg_config_option["scripts_dir"] = weechat.config_new_option(
wg_config_file, section_scripts,
"dir", "string", "Local cache directory for" + SCRIPT_NAME, "", 0, 0,
"%h/" + SCRIPT_NAME, "%h/" + SCRIPT_NAME, 0, "", "", "", "", "", "")
wg_config_option["scripts_cache_expire"] = weechat.config_new_option(
wg_config_file, section_scripts,
"cache_expire", "integer", "Local cache expiration time, in minutes "
"(-1 = never expires, 0 = always expires)", "",
-1, 60*24*365, "60", "60", 0, "", "", "", "", "", "")
def wg_config_reload_cb(data, config_file):
""" Reload configuration file. """
return weechat.config_read(config_file)
def wg_config_read():
""" Read configuration file. """
global wg_config_file
return weechat.config_read(wg_config_file)
def wg_config_write():
""" Write configuration file. """
global wg_config_file
return weechat.config_write(wg_config_file)
def wg_config_color(color):
""" Get a color from configuration. """
global wg_config_option
option = wg_config_option.get("color_" + color, "")
if option == "":
return ""
return weechat.color(weechat.config_string(option))
def wg_config_get_dir():
""" Return weeget directory, with expanded WeeChat home dir. """
global wg_config_option
return weechat.config_string(
wg_config_option["scripts_dir"]).replace("%h",
weechat.info_get("weechat_dir", ""))
def wg_config_create_dir():
""" Create weeget directory. """
dir = wg_config_get_dir()
if not os.path.isdir(dir):
os.makedirs(dir, mode=0o700)
def wg_config_get_cache_filename():
""" Get local cache filename, based on URL. """
global wg_config_option
return wg_config_get_dir() + os.sep + \
os.path.basename(weechat.config_string(wg_config_option["scripts_url"]))
# =============================[ download file ]==============================
def wg_download_file(url, filename, timeout, callback, callback_data):
"""Download a file with an URL. Return hook_process created."""
script = [ "import sys",
"try:",
" if sys.version_info >= (3,):",
" import urllib.request",
" response = urllib.request.urlopen('%s')" % url,
" else:",
" import urllib2",
" response = urllib2.urlopen(urllib2.Request('%s'))" % url,
" f = open('%s', 'wb')" % filename,
" f.write(response.read())",
" response.close()",
" f.close()",
"except Exception as e:",
" print('error:' + str(e))" ]
return weechat.hook_process("python -c \"%s\"" % "\n".join(script),
timeout,
callback, callback_data)
# ================================[ scripts ]=================================
def wg_search_script_by_name(name):
"""
Search a script in list by name.
Name can be short name ('weeget') or full name ('weeget.py').
"""
global wg_scripts
for id, script in wg_scripts.items():
if script["name"] == name or script["full_name"] == name:
return script
return None
def wg_get_loaded_scripts():
"""
Get python dictionary with loaded scripts.
Keys are filenames and values are path to script, for example:
'weeget.py': '/home/xxx/.weechat/python/weeget.py'
"""
global wg_loaded_scripts
wg_loaded_scripts = {}
for language in SCRIPT_EXTENSION.keys():
infolist = weechat.infolist_get(language + "_script", "", "")
while weechat.infolist_next(infolist):
filename = weechat.infolist_string(infolist, "filename")
if filename != "":
wg_loaded_scripts[os.path.basename(filename)] = filename
weechat.infolist_free(infolist)
def wg_is_local_script_loaded(filename):
""" Check if a script filename (like 'python/weeget.py') is loaded. """
global wg_loaded_scripts
filename2 = filename
if filename2.startswith("autoload/"):
filename2 = filename2[9:]
for name, path in wg_loaded_scripts.items():
if path.endswith(filename) or path.endswith(filename2):
return True
return False
def wg_get_local_script_status(script):
"""
Check if a script is installed.
'script' is a dictionary retrieved from scripts xml list.
"""
global wg_loaded_scripts
status = { "installed": "", "obsolete": "", "running": "" }
local_dir = weechat.info_get("weechat_dir", "") + os.sep + script["language"]
local_name = local_dir + os.sep + "autoload" + os.sep + script["full_name"]
if not os.path.isfile(local_name):
local_name = local_dir + os.sep + script["full_name"]
if os.path.isfile(local_name):
status["installed"] = "1"
f = open(local_name, "rb")
md5 = hashlib.md5()
md5.update(f.read())
f.close()
local_md5 = md5.hexdigest()
if local_md5 != script["md5sum"]:
status["obsolete"] = "1"
if script["full_name"] in wg_loaded_scripts.keys():
status["running"] = "1"
return status
def wg_get_local_scripts():
"""
Get list of all local scripts (in languages and autoload dirs).
Return a dictionary with language as key and list of paths as value,
with autoloaded scripts at beginning of list, for example:
{ 'perl': [ 'autoload/buffers.pl',
'autoload/weetris.pl',
'beep.pl',
'launcher.pl' ],
'python': [ 'autoload/weeget.py',
'go.py',
'vdm.py' ]
}
"""
files = {}
for language in SCRIPT_EXTENSION.keys():
files[language] = []
autoloaded_files = []
rootdir = weechat.info_get("weechat_dir", "") + os.sep + language
for root, dirs, listfiles in os.walk(rootdir):
if root == rootdir:
files[language] = listfiles
elif root == rootdir + os.sep + "autoload":
autoloaded_files = listfiles
for file in autoloaded_files:
if file in files[language]:
files[language].remove(file)
files[language].insert(0, "autoload" + os.sep + file)
return files
def wg_get_local_scripts_status():
"""
Return list of all local scripts with status (unknown/obsolete/running).
For example:
[ 'perl/weetris.pl': { 'unknown': '', 'obsolete': '1', 'running': '' },
'python/weeget.py': { 'unknown': '', 'obsolete': '', 'running': '1' }
]
"""
local_scripts_status = []
local_scripts = wg_get_local_scripts()
if len(local_scripts) > 0:
for language, files in local_scripts.items():
for file in files:
script_status = { "unknown": "", "obsolete": "", "running": "" }
name_with_ext = os.path.basename(file)
script = wg_search_script_by_name(os.path.basename(file))
if script == None:
script_status["unknown"] = "1"
else:
status = wg_get_local_script_status(script)
if status["obsolete"]:
script_status["obsolete"] = "1"
if wg_is_local_script_loaded(file):
script_status["running"] = "1"
local_scripts_status.append((language + os.sep + file,
script_status))
return local_scripts_status
def wg_search_scripts(search):
""" Search word in scripts, return list of matching scripts. """
global wg_scripts
if search == "":
return wg_scripts
scripts_matching = {}
for id, script in wg_scripts.items():
if script["name"].lower().find(search) >= 0 \
or script["language"].lower().find(search) >= 0 \
or script["desc_en"].lower().find(search) >= 0 \
or script["desc_fr"].lower().find(search) >= 0 \
or script["tags"].lower().find(search) >= 0:
scripts_matching[id] = script
return scripts_matching
def wg_list_scripts(search, installed=False):
"""
List all scripts (with optional search string).
If installed == True, then list only installed scripts.
For each script, display status (installed/running/new version available),
name of script, language and description.
For example:
ir buffers pl Sidebar with list of buffers.
i N go py Quick jump to buffers.
i weetris pl Tetris-like game.
"""
global wg_scripts
search = search.strip().lower()
scripts_matching = wg_search_scripts(search)
if len(scripts_matching) == 0:
weechat.prnt("", "%s: no script found" % SCRIPT_NAME)
else:
weechat.prnt("", "")
if search != "":
if installed:
weechat.prnt("", "Scripts installed matching \"%s\":" % search)
else:
weechat.prnt("", "Scripts for WeeChat %s matching \"%s\":"
% (weechat.info_get("version", ""),
search))
else:
if installed:
weechat.prnt("", "Scripts installed:")
else:
weechat.prnt("", "Scripts for WeeChat %s:"
% weechat.info_get("version", ""))
sorted_scripts = sorted(scripts_matching.items(),
key=lambda s: s[1]["name"])
length_max_name = 0
for item in sorted_scripts:
length = len(item[1]["name"])
if length > length_max_name:
length_max_name = length
str_format = "%%s%%s%%s%%s%%s%%s%%s %%s%%-%ds %%s%%-3s %%s%%s" \
% length_max_name
for item in sorted_scripts:
script = item[1]
str_installed = " "
str_running = " "
str_obsolete = " "
status = wg_get_local_script_status(script)
if installed and not status["installed"]:
continue
if status["installed"]:
str_installed = "i"
if status["running"]:
str_running = "r"
if status["obsolete"]:
str_obsolete = "N"
weechat.prnt("", str_format
% (wg_config_color("installed"),
str_installed,
wg_config_color("running"),
str_running,
wg_config_color("obsolete"),
str_obsolete,
weechat.color("chat"),
wg_config_color("script"),
script["name"],
wg_config_color("language"),
SCRIPT_EXTENSION[script["language"]],
weechat.color("chat"),
script["desc_en"]))
def wg_show_script(name):
"""
Show detailed info about a script (in repository).
For example:
Script: weeget.py, version 0.7, license: GPL3
Author: <NAME> <flashcode [at] flashtux [dot] org>
Status: installed, running
Date: added: 2009-04-05, updated: 2009-09-07
URL: http://www.weechat.org/files/scripts/weeget.py
MD5: 4b0458dd5cc5c9a09ba8078f89830869
Desc: Scripts manager.
Tags: scripts
Requires: python 2.5
Min: 0.3.0
"""
if len(wg_scripts) == 0:
return
script = wg_search_script_by_name(name)
if script == None:
weechat.prnt("", "%s: script \"%s%s%s\" not found"
% (SCRIPT_NAME,
wg_config_color("script"),
name,
weechat.color("chat")))
else:
weechat.prnt("", "")
weechat.prnt("", " Script: %s%s%s, version %s, license: %s"
% (wg_config_color("script"),
script["full_name"],
weechat.color("chat"),
script["version"],
script["license"]))
weechat.prnt("", " Author: %s <%s>" % (script["author"], script["mail"]))
status = wg_get_local_script_status(script)
str_status = "not installed"
if status["installed"]:
str_status = "installed"
if status["running"]:
str_status += ", running"
else:
str_status += ", not running"
if status["obsolete"]:
str_status += " (new version available)"
weechat.prnt("", " Status: %s" % str_status)
date_added = script.get("added", "")[:10]
str_updated = script.get("updated", "")
if str_updated != "":
date_updated = script["updated"][:10]
if date_updated == "0000-00-00" or date_updated == date_added:
str_updated = ""
if str_updated != "":
weechat.prnt("", " Date: added: %s, updated: %s"
% (date_added, date_updated))
else:
weechat.prnt("", " Date: added: %s" % date_added)
weechat.prnt("", " URL: %s" % script.get("url", ""))
weechat.prnt("", " MD5: %s" % script.get("md5sum", ""))
weechat.prnt("", " Desc: %s" % script.get("desc_en", ""))
weechat.prnt("", " Tags: %s" % script.get("tags", ""))
str_requires = script.get("requirements", "")
if str_requires == "":
str_requires = "(nothing)"
weechat.prnt("", "Requires: %s" % str_requires)
vmin = script.get("min_weechat", "")
vmax = script.get("max_weechat", "")
if vmin != "":
weechat.prnt("", " Min: %s" % vmin)
if vmax != "":
weechat.prnt("", " Max: %s" % vmax)
def wg_install_next_script():
"""
Install first script in list wg_scripts_to_install and remove it from
list.
"""
global wg_scripts, wg_scripts_to_install, wg_current_script_install
global wg_hook_process
if len(wg_scripts) == 0:
return
# be sure weeget is ALWAYS last script to install/update
# otherwise we'll lose end of list when weeget is unloaded by WeeChat
if SCRIPT_NAME in wg_scripts_to_install:
wg_scripts_to_install.remove(SCRIPT_NAME)
wg_scripts_to_install.append(SCRIPT_NAME)
# loop until a script is installed, or end if list is empty
while len(wg_scripts_to_install) > 0:
name = wg_scripts_to_install.pop(0)
script = wg_search_script_by_name(name)
if script == None:
weechat.prnt("", "%s: script \"%s%s%s\" not found"
% (SCRIPT_NAME,
wg_config_color("script"),
name,
weechat.color("chat")))
else:
status = wg_get_local_script_status(script)
if status["installed"] and not status["obsolete"]:
weechat.prnt("",
"%s: script \"%s%s%s\" is already "
"installed and up to date"
% (SCRIPT_NAME,
wg_config_color("script"),
script["full_name"],
weechat.color("chat")))
else:
weechat.prnt("", "%s: downloading \"%s%s%s\"..."
% (SCRIPT_NAME,
wg_config_color("script"),
script["full_name"],
weechat.color("chat")))
if wg_hook_process["script"] != "":
weechat.unhook(wg_hook_process["script"])
wg_hook_process["script"] = ""
wg_current_script_install = script
filename = wg_config_get_dir() + os.sep + script["full_name"]
wg_hook_process["script"] = wg_download_file(script["url"], filename, TIMEOUT_SCRIPT,
"wg_process_script_cb", "")
# this function will be called again when script will be
# downloaded
return
def wg_install_scripts(names):
""" Install scripts. """
global wg_scripts_to_install
for name in names.split(" "):
wg_scripts_to_install.append(name)
wg_install_next_script()
def wg_process_script_cb(data, command, rc, stdout, stderr):
""" Callback when reading a script from website. """
global wg_hook_process, wg_stdout, wg_current_script_install, wg_loaded_scripts
if stdout != "":
wg_stdout["script"] += stdout
if stderr != "":
wg_stdout["script"] += stderr
if int(rc) >= 0:
if wg_stdout["script"].startswith("error:"):
weechat.prnt("", "%s%s: error downloading script (%s)"
% (weechat.prefix("error"), SCRIPT_NAME,
wg_stdout["update"][6:].strip()))
else:
# ask C plugin to install/load script
weechat.hook_signal_send(wg_current_script_install["language"] + "_script_install",
weechat.WEECHAT_HOOK_SIGNAL_STRING,
wg_config_get_dir() + os.sep + wg_current_script_install["full_name"])
wg_hook_process["script"] = ""
wg_install_next_script()
return weechat.WEECHAT_RC_OK
def wg_check_scripts():
"""
Check status of local script(s).
For each script found, display status (unknown/running/new version available).
For example:
r python/autoload/vdm.py
?r python/autoload/dummy.py
rN python/shell.py
perl/buffers.pl
"""
local_scripts_status = wg_get_local_scripts_status()
if len(local_scripts_status) == 0:
return
weechat.prnt("", "")
weechat.prnt("", "Local scripts:")
for file, status in local_scripts_status:
str_unknown = " "
str_running = " "
str_obsolete = " "
if status["unknown"]:
str_unknown = "?"
if status["running"]:
str_running = "r"
if status["obsolete"]:
str_obsolete = "N"
weechat.prnt("", "%s%s%s%s%s%s%s %s%s%s%s"
% (wg_config_color("unknown"), str_unknown,
wg_config_color("running"), str_running,
wg_config_color("obsolete"), str_obsolete,
weechat.color("chat"),
os.path.dirname(file),
os.sep,
wg_config_color("script"),
os.path.basename(file)))
def wg_upgrade_scripts():
""" Upgrade scripts. """
global wg_scripts, wg_scripts_to_install
if len(wg_scripts) == 0:
return
scripts_to_upgrade = []
for id, script in wg_scripts.items():
status = wg_get_local_script_status(script)
if status["installed"] and status["obsolete"]:
scripts_to_upgrade.append(script["name"])
if len(scripts_to_upgrade) == 0:
weechat.prnt("", "%s: all scripts are up to date" % SCRIPT_NAME)
else:
wg_scripts_to_install.extend(scripts_to_upgrade)
wg_install_next_script()
def wg_remove_scripts(names):
""" Remove scripts. """
if len(wg_scripts) == 0:
return
list_names = names.split(" ")
scripts_to_remove = {}
for language in SCRIPT_EXTENSION.keys():
scripts_to_remove[language] = []
for name in list_names:
script = wg_search_script_by_name(name)
if script == None:
weechat.prnt("", "%s: script \"%s%s%s\" not found"
% (SCRIPT_NAME,
wg_config_color("script"),
name,
weechat.color("chat")))
else:
if script["full_name"] not in scripts_to_remove[script["language"]]:
scripts_to_remove[script["language"]].append(script["full_name"])
for language in SCRIPT_EXTENSION.keys():
if len(scripts_to_remove[language]) > 0:
# ask C plugin to remove script file(s)
weechat.hook_signal_send(language + "_script_remove",
weechat.WEECHAT_HOOK_SIGNAL_STRING,
",".join(scripts_to_remove[language]))
# ==================================[ xml ]===================================
def wg_execute_action():
""" Execute action. """
global wg_action, wg_action_args, wg_loaded_scripts
if wg_action != "":
wg_get_loaded_scripts()
if wg_action == "list":
wg_list_scripts(wg_action_args)
elif wg_action == "listinstalled":
wg_list_scripts(wg_action_args, installed=True)
elif wg_action == "show":
wg_show_script(wg_action_args)
elif wg_action == "install":
wg_install_scripts(wg_action_args)
elif wg_action == "check":
wg_check_scripts()
elif wg_action == "upgrade":
wg_upgrade_scripts()
elif wg_action == "remove":
wg_remove_scripts(wg_action_args)
else:
weechat.prnt("", "%s%s: unknown action \"%s\""
% (weechat.prefix("error"), SCRIPT_NAME, wg_action))
# reset action
wg_action = ""
wg_action_args = ""
wg_loaded_scripts = {}
def wg_check_version(script):
""" Check if a script is designed for current running WeeChat version."""
version = weechat.info_get("version", "")
version = version.split("-", 1)[0]
vmin = script.get("min_weechat", "")
vmax = script.get("max_weechat", "")
if vmin != "" and version < vmin:
return False
if vmax != "" and version > vmax:
return False
return True
def wg_parse_xml():
"""
Parse XML scripts list and return dictionary with list, with key 'id'.
Example of item return in dictionary :
'119': { 'name' : 'weeget',
'version' : '0.1',
'url' : 'http://www.weechat.org/files/scripts/weeget.py',
'language' : 'python',
'license' : 'GPL3',
'md5sum' : 'd500714fc19b0e10cc4e339e70739e4ad500714fc19b0e10cc4e339e70739e4a',
'tags' : 'scripts',
'desc_en' : 'Scripts manager.',
'desc_fr' : 'Gestionnaire de scripts.',
'requirements': 'python 2.5',
'min_weechat' : '0.3.0',
'max_weechat' : '',
'author' : 'FlashCode',
'mail' : 'flashcode [at] flashtux [dot] org',
'added' : '2009-04-05 22:39:18',
'updated' : '0000-00-00 00:00:00' }
"""
global wg_scripts, wg_action, wg_action_args
wg_scripts = {}
try:
f = gzip.open(wg_config_get_cache_filename(), "rb")
string = f.read()
f.close()
except:
weechat.prnt("", "%s%s: unable to read xml file"
% (weechat.prefix("error"), SCRIPT_NAME))
else:
try:
dom = xml.dom.minidom.parseString(string)
except:
weechat.prnt("",
"%s%s: unable to parse xml list of scripts"
% (weechat.prefix("error"), SCRIPT_NAME))
# discard action
wg_action = ""
wg_action_args = ""
else:
for scriptNode in dom.getElementsByTagName("plugin"):
id = scriptNode.getAttribute("id")
script = {}
for node in scriptNode.childNodes:
if node.nodeType == node.ELEMENT_NODE:
if node.firstChild != None:
nodename = node.nodeName
value = node.firstChild.data
if sys.version_info < (3,):
# python 2.x: convert unicode to str (in python 3.x, id and text are already strings)
nodename = nodename.encode("utf-8")
value = value.encode("utf-8")
script[nodename] = value
if script["language"] in SCRIPT_EXTENSION:
script["full_name"] = script["name"] + "." + SCRIPT_EXTENSION[script["language"]]
if wg_check_version(script):
wg_scripts[id] = script
wg_execute_action()
def wg_process_update_cb(data, command, rc, stdout, stderr):
""" Callback when reading XML cache file from website. """
global wg_hook_process, wg_stdout, wg_scripts
if stdout != "":
wg_stdout["update"] += stdout
if stderr != "":
wg_stdout["update"] += stderr
if int(rc) >= 0:
if wg_stdout["update"].startswith("error:"):
weechat.prnt("", "%s%s: error downloading scripts (%s)"
% (weechat.prefix("error"), SCRIPT_NAME,
wg_stdout["update"][6:].strip()))
else:
weechat.prnt("", "%s: scripts downloaded" % SCRIPT_NAME)
wg_parse_xml()
wg_hook_process["update"] = ""
return weechat.WEECHAT_RC_OK
def wg_update_cache():
""" Download list of scripts and update local cache. """
global wg_config_option, wg_hook_process, wg_stdout
# get data from website, via hook_process
if wg_hook_process["update"] != "":
weechat.unhook(wg_hook_process["update"])
wg_hook_process["update"] = ""
weechat.prnt("", "%s: downloading list of scripts..." % SCRIPT_NAME)
wg_stdout["update"] = ""
wg_config_create_dir()
url = weechat.config_string(wg_config_option["scripts_url"])
filename = wg_config_get_cache_filename()
wg_hook_process["update"] = wg_download_file(url, filename, TIMEOUT_UPDATE,
"wg_process_update_cb", "")
def wg_read_scripts(download_list=True):
""" Read scripts list (download list if needed and asked). """
global wg_scripts
cache_file = wg_config_get_cache_filename()
if os.path.isfile(cache_file):
# check if local cache file is too old
cache_expire = weechat.config_integer(wg_config_option["scripts_cache_expire"]) * 60
if cache_expire >= 0:
diff_time = time.time() - os.stat(cache_file)[stat.ST_MTIME]
if download_list and diff_time >= cache_expire:
os.unlink(cache_file)
wg_scripts.clear()
if len(wg_scripts) > 0:
wg_execute_action()
else:
if os.path.isfile(cache_file):
wg_parse_xml()
elif download_list:
wg_update_cache()
# ================================[ command ]=================================
def wg_cmd(data, buffer, args):
""" Callback for /weeget command. """
global wg_action, wg_action_args
if args == "":
weechat.command("", "/help %s" % SCRIPT_COMMAND)
return weechat.WEECHAT_RC_OK
argv = args.strip().split(" ", 1)
if len(argv) == 0:
return weechat.WEECHAT_RC_OK
wg_action = ""
wg_action_args = ""
# check arguments
if len(argv) < 2:
if argv[0] == "show" or \
argv[0] == "install" or \
argv[0] == "remove":
weechat.prnt("", "%s: too few arguments for action \"%s\""
% (SCRIPT_NAME, argv[0]))
return weechat.WEECHAT_RC_OK
# execute asked action
if argv[0] == "update":
wg_update_cache()
else:
wg_action = argv[0]
wg_action_args = ""
if len(argv) > 1:
wg_action_args = argv[1]
wg_read_scripts()
return weechat.WEECHAT_RC_OK
def wg_completion_scripts_cb(data, completion_item, buffer, completion):
""" Complete with known script names, for command '/weeget'. """
global wg_scripts
wg_read_scripts(download_list=False)
if len(wg_scripts) > 0:
for id, script in wg_scripts.items():
weechat.hook_completion_list_add(completion, script["full_name"],
0, weechat.WEECHAT_LIST_POS_SORT)
return weechat.WEECHAT_RC_OK
def wg_completion_scripts_installed_cb(data, completion_item, buffer, completion):
""" Complete with names of scripts installed, for command '/weeget'. """
global wg_scripts
wg_read_scripts(download_list=False)
if len(wg_scripts) > 0:
for id, script in wg_scripts.items():
status = wg_get_local_script_status(script)
if status["installed"]:
weechat.hook_completion_list_add(completion, script["full_name"],
0, weechat.WEECHAT_LIST_POS_SORT)
return weechat.WEECHAT_RC_OK
def wg_completion_scripts_tags_cb(data, completion_item, buffer, completion):
""" Complete with known tags, for command '/weeget'. """
global wg_scripts
wg_read_scripts(download_list=False)
if len(wg_scripts) > 0:
for id, script in wg_scripts.items():
if script["tags"]:
for tag in script["tags"].split(","):
weechat.hook_completion_list_add(completion, tag,
0, weechat.WEECHAT_LIST_POS_SORT)
return weechat.WEECHAT_RC_OK
# ==================================[ main ]==================================
if __name__ == "__main__" and import_ok:
if weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE,
SCRIPT_DESC, "wg_unload_script", ""):
wg_config_init()
wg_config_read()
if weechat.config_string(wg_config_option["scripts_url"]).find("weechat.flashtux.org") >= 0:
weechat.prnt("", "%sWarning: old site still used in URL for plugins.xml.gz, you should do: /unset wg.scripts.url"
% weechat.prefix("error"))
str_installed = wg_config_color("installed") + "i" + weechat.color("chat")
str_unknown = wg_config_color("unknown") + "?" + weechat.color("chat")
str_running = wg_config_color("running") + "r" + weechat.color("chat")
str_obsolete = wg_config_color("obsolete") + "N" + weechat.color("chat")
weechat.hook_command(SCRIPT_COMMAND,
"WeeChat scripts manager",
"list|listinstalled [<text>|<tag>] || show <script>"
" || install|remove <script> [<script>...] || check|update|upgrade",
" list: list scripts (search text if given)\n"
"listinstalled: list installed scripts (search text if given)\n"
" show: show detailed information about a script (in repository)\n"
" install: install/upgrade script(s)\n"
" check: check if local scripts needs upgrade\n"
" update: update local scripts cache\n"
" upgrade: upgrade all local scripts if they are obsolete\n"
" remove: remove script(s)\n\n"
"Indicators in lists (first column):\n"
" " + str_installed + " script is installed\n"
" " + str_unknown + " unknown script\n"
" " + str_running + " script is running (loaded)\n"
" " + str_obsolete + " script is obsolete (new version available)\n\n"
"Examples:\n"
" /" + SCRIPT_COMMAND + " list => list all scripts\n"
" /" + SCRIPT_COMMAND + " list game => list all scripts with text/tag \"game\"\n"
" /" + SCRIPT_COMMAND + " install beep.pl => install script beep.pl\n"
" /" + SCRIPT_COMMAND + " remove beep.pl => remove script beep.pl",
"list %(weeget_scripts_tags)"
" || listinstalled %(weeget_scripts_tags)"
" || show %(weeget_scripts)"
" || install %(weeget_scripts)|%*"
" || remove %(weeget_scripts_installed)|%*"
" || check"
" || update"
" || upgrade",
"wg_cmd", "")
weechat.hook_completion("weeget_scripts", "list of scripts in repository",
"wg_completion_scripts_cb", "")
weechat.hook_completion("weeget_scripts_installed", "list of scripts installed",
"wg_completion_scripts_installed_cb", "")
weechat.hook_completion("weeget_scripts_tags", "tags of scripts in repository",
"wg_completion_scripts_tags_cb", "")
# ==================================[ end ]===================================
def wg_unload_script():
""" Function called when script is unloaded. """
wg_config_write()
return weechat.WEECHAT_RC_OK
| [
"weechat.prnt",
"weechat.infolist_string",
"weechat.config_read",
"weechat.color",
"weechat.config_string",
"weechat.command",
"weechat.infolist_free",
"weechat.config_integer",
"os.walk",
"weechat.hook_command",
"weechat.hook_completion",
"weechat.infolist_get",
"os.path.isdir",
"os.unlin... | [((4286, 4349), 'weechat.config_new', 'weechat.config_new', (['CONFIG_FILE_NAME', '"""wg_config_reload_cb"""', '""""""'], {}), "(CONFIG_FILE_NAME, 'wg_config_reload_cb', '')\n", (4304, 4349), False, 'import weechat\n'), ((4477, 4578), 'weechat.config_new_section', 'weechat.config_new_section', (['wg_config_file', '"""color"""', '(0)', '(0)', '""""""', '""""""', '""""""', '""""""', '""""""', '""""""', '""""""', '""""""', '""""""', '""""""'], {}), "(wg_config_file, 'color', 0, 0, '', '', '', '',\n '', '', '', '', '', '')\n", (4503, 4578), False, 'import weechat\n'), ((4710, 4872), 'weechat.config_new_option', 'weechat.config_new_option', (['wg_config_file', 'section_color', '"""script"""', '"""color"""', '"""Color for script names"""', '""""""', '(0)', '(0)', '"""cyan"""', '"""cyan"""', '(0)', '""""""', '""""""', '""""""', '""""""', '""""""', '""""""'], {}), "(wg_config_file, section_color, 'script', 'color',\n 'Color for script names', '', 0, 0, 'cyan', 'cyan', 0, '', '', '', '',\n '', '')\n", (4735, 4872), False, 'import weechat\n'), ((4932, 5110), 'weechat.config_new_option', 'weechat.config_new_option', (['wg_config_file', 'section_color', '"""installed"""', '"""color"""', '"""Color for "installed" indicator"""', '""""""', '(0)', '(0)', '"""yellow"""', '"""yellow"""', '(0)', '""""""', '""""""', '""""""', '""""""', '""""""', '""""""'], {}), '(wg_config_file, section_color, \'installed\',\n \'color\', \'Color for "installed" indicator\', \'\', 0, 0, \'yellow\',\n \'yellow\', 0, \'\', \'\', \'\', \'\', \'\', \'\')\n', (4957, 5110), False, 'import weechat\n'), ((5170, 5353), 'weechat.config_new_option', 'weechat.config_new_option', (['wg_config_file', 'section_color', '"""running"""', '"""color"""', '"""Color for "running" indicator"""', '""""""', '(0)', '(0)', '"""lightgreen"""', '"""lightgreen"""', '(0)', '""""""', '""""""', '""""""', '""""""', '""""""', '""""""'], {}), '(wg_config_file, section_color, \'running\', \'color\',\n \'Color for "running" indicator\', \'\', 0, 0, \'lightgreen\', \'lightgreen\', \n 0, \'\', \'\', \'\', \'\', \'\', \'\')\n', (5195, 5353), False, 'import weechat\n'), ((5413, 5601), 'weechat.config_new_option', 'weechat.config_new_option', (['wg_config_file', 'section_color', '"""obsolete"""', '"""color"""', '"""Color for "obsolete" indicator"""', '""""""', '(0)', '(0)', '"""lightmagenta"""', '"""lightmagenta"""', '(0)', '""""""', '""""""', '""""""', '""""""', '""""""', '""""""'], {}), '(wg_config_file, section_color, \'obsolete\',\n \'color\', \'Color for "obsolete" indicator\', \'\', 0, 0, \'lightmagenta\',\n \'lightmagenta\', 0, \'\', \'\', \'\', \'\', \'\', \'\')\n', (5438, 5601), False, 'import weechat\n'), ((5661, 5846), 'weechat.config_new_option', 'weechat.config_new_option', (['wg_config_file', 'section_color', '"""unknown"""', '"""color"""', '"""Color for "unknown status" indicator"""', '""""""', '(0)', '(0)', '"""lightred"""', '"""lightred"""', '(0)', '""""""', '""""""', '""""""', '""""""', '""""""', '""""""'], {}), '(wg_config_file, section_color, \'unknown\', \'color\',\n \'Color for "unknown status" indicator\', \'\', 0, 0, \'lightred\',\n \'lightred\', 0, \'\', \'\', \'\', \'\', \'\', \'\')\n', (5686, 5846), False, 'import weechat\n'), ((5907, 6083), 'weechat.config_new_option', 'weechat.config_new_option', (['wg_config_file', 'section_color', '"""language"""', '"""color"""', '"""Color for language names"""', '""""""', '(0)', '(0)', '"""lightblue"""', '"""lightblue"""', '(0)', '""""""', '""""""', '""""""', '""""""', '""""""', '""""""'], {}), "(wg_config_file, section_color, 'language',\n 'color', 'Color for language names', '', 0, 0, 'lightblue', 'lightblue',\n 0, '', '', '', '', '', '')\n", (5932, 6083), False, 'import weechat\n'), ((6148, 6251), 'weechat.config_new_section', 'weechat.config_new_section', (['wg_config_file', '"""scripts"""', '(0)', '(0)', '""""""', '""""""', '""""""', '""""""', '""""""', '""""""', '""""""', '""""""', '""""""', '""""""'], {}), "(wg_config_file, 'scripts', 0, 0, '', '', '', '',\n '', '', '', '', '', '')\n", (6174, 6251), False, 'import weechat\n'), ((6384, 6639), 'weechat.config_new_option', 'weechat.config_new_option', (['wg_config_file', 'section_scripts', '"""url"""', '"""string"""', '"""URL for file with list of plugins"""', '""""""', '(0)', '(0)', '"""http://www.weechat.org/files/plugins.xml.gz"""', '"""http://www.weechat.org/files/plugins.xml.gz"""', '(0)', '""""""', '""""""', '""""""', '""""""', '""""""', '""""""'], {}), "(wg_config_file, section_scripts, 'url', 'string',\n 'URL for file with list of plugins', '', 0, 0,\n 'http://www.weechat.org/files/plugins.xml.gz',\n 'http://www.weechat.org/files/plugins.xml.gz', 0, '', '', '', '', '', '')\n", (6409, 6639), False, 'import weechat\n'), ((6699, 6904), 'weechat.config_new_option', 'weechat.config_new_option', (['wg_config_file', 'section_scripts', '"""dir"""', '"""string"""', "('Local cache directory for' + SCRIPT_NAME)", '""""""', '(0)', '(0)', "('%h/' + SCRIPT_NAME)", "('%h/' + SCRIPT_NAME)", '(0)', '""""""', '""""""', '""""""', '""""""', '""""""', '""""""'], {}), "(wg_config_file, section_scripts, 'dir', 'string',\n 'Local cache directory for' + SCRIPT_NAME, '', 0, 0, '%h/' +\n SCRIPT_NAME, '%h/' + SCRIPT_NAME, 0, '', '', '', '', '', '')\n", (6724, 6904), False, 'import weechat\n'), ((6969, 7213), 'weechat.config_new_option', 'weechat.config_new_option', (['wg_config_file', 'section_scripts', '"""cache_expire"""', '"""integer"""', '"""Local cache expiration time, in minutes (-1 = never expires, 0 = always expires)"""', '""""""', '(-1)', '(60 * 24 * 365)', '"""60"""', '"""60"""', '(0)', '""""""', '""""""', '""""""', '""""""', '""""""', '""""""'], {}), "(wg_config_file, section_scripts, 'cache_expire',\n 'integer',\n 'Local cache expiration time, in minutes (-1 = never expires, 0 = always expires)'\n , '', -1, 60 * 24 * 365, '60', '60', 0, '', '', '', '', '', '')\n", (6994, 7213), False, 'import weechat\n'), ((7328, 7360), 'weechat.config_read', 'weechat.config_read', (['config_file'], {}), '(config_file)\n', (7347, 7360), False, 'import weechat\n'), ((7458, 7493), 'weechat.config_read', 'weechat.config_read', (['wg_config_file'], {}), '(wg_config_file)\n', (7477, 7493), False, 'import weechat\n'), ((7593, 7629), 'weechat.config_write', 'weechat.config_write', (['wg_config_file'], {}), '(wg_config_file)\n', (7613, 7629), False, 'import weechat\n'), ((11590, 11616), 'os.path.isfile', 'os.path.isfile', (['local_name'], {}), '(local_name)\n', (11604, 11616), False, 'import sys, os, stat, time, gzip, hashlib, xml.dom.minidom\n'), ((24905, 24925), 'weechat.prnt', 'weechat.prnt', (['""""""', '""""""'], {}), "('', '')\n", (24917, 24925), False, 'import weechat\n'), ((24930, 24964), 'weechat.prnt', 'weechat.prnt', (['""""""', '"""Local scripts:"""'], {}), "('', 'Local scripts:')\n", (24942, 24964), False, 'import weechat\n'), ((28636, 28667), 'weechat.info_get', 'weechat.info_get', (['"""version"""', '""""""'], {}), "('version', '')\n", (28652, 28667), False, 'import weechat\n'), ((32713, 32781), 'weechat.prnt', 'weechat.prnt', (['""""""', "('%s: downloading list of scripts...' % SCRIPT_NAME)"], {}), "('', '%s: downloading list of scripts...' % SCRIPT_NAME)\n", (32725, 32781), False, 'import weechat\n'), ((32848, 32902), 'weechat.config_string', 'weechat.config_string', (["wg_config_option['scripts_url']"], {}), "(wg_config_option['scripts_url'])\n", (32869, 32902), False, 'import weechat\n'), ((33292, 33318), 'os.path.isfile', 'os.path.isfile', (['cache_file'], {}), '(cache_file)\n', (33306, 33318), False, 'import sys, os, stat, time, gzip, hashlib, xml.dom.minidom\n'), ((36664, 36781), 'weechat.register', 'weechat.register', (['SCRIPT_NAME', 'SCRIPT_AUTHOR', 'SCRIPT_VERSION', 'SCRIPT_LICENSE', 'SCRIPT_DESC', '"""wg_unload_script"""', '""""""'], {}), "(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE,\n SCRIPT_DESC, 'wg_unload_script', '')\n", (36680, 36781), False, 'import weechat\n'), ((4620, 4655), 'weechat.config_free', 'weechat.config_free', (['wg_config_file'], {}), '(wg_config_file)\n', (4639, 4655), False, 'import weechat\n'), ((6295, 6330), 'weechat.config_free', 'weechat.config_free', (['wg_config_file'], {}), '(wg_config_file)\n', (6314, 6330), False, 'import weechat\n'), ((7851, 7880), 'weechat.config_string', 'weechat.config_string', (['option'], {}), '(option)\n', (7872, 7880), False, 'import weechat\n'), ((8143, 8178), 'weechat.info_get', 'weechat.info_get', (['"""weechat_dir"""', '""""""'], {}), "('weechat_dir', '')\n", (8159, 8178), False, 'import weechat\n'), ((8287, 8305), 'os.path.isdir', 'os.path.isdir', (['dir'], {}), '(dir)\n', (8300, 8305), False, 'import sys, os, stat, time, gzip, hashlib, xml.dom.minidom\n'), ((8315, 8341), 'os.makedirs', 'os.makedirs', (['dir'], {'mode': '(448)'}), '(dir, mode=448)\n', (8326, 8341), False, 'import sys, os, stat, time, gzip, hashlib, xml.dom.minidom\n'), ((10359, 10409), 'weechat.infolist_get', 'weechat.infolist_get', (["(language + '_script')", '""""""', '""""""'], {}), "(language + '_script', '', '')\n", (10379, 10409), False, 'import weechat\n'), ((10424, 10455), 'weechat.infolist_next', 'weechat.infolist_next', (['infolist'], {}), '(infolist)\n', (10445, 10455), False, 'import weechat\n'), ((10638, 10669), 'weechat.infolist_free', 'weechat.infolist_free', (['infolist'], {}), '(infolist)\n', (10659, 10669), False, 'import weechat\n'), ((11493, 11519), 'os.path.isfile', 'os.path.isfile', (['local_name'], {}), '(local_name)\n', (11507, 11519), False, 'import sys, os, stat, time, gzip, hashlib, xml.dom.minidom\n'), ((11701, 11714), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (11712, 11714), False, 'import sys, os, stat, time, gzip, hashlib, xml.dom.minidom\n'), ((12726, 12742), 'os.walk', 'os.walk', (['rootdir'], {}), '(rootdir)\n', (12733, 12742), False, 'import sys, os, stat, time, gzip, hashlib, xml.dom.minidom\n'), ((15600, 15653), 'weechat.prnt', 'weechat.prnt', (['""""""', "('%s: no script found' % SCRIPT_NAME)"], {}), "('', '%s: no script found' % SCRIPT_NAME)\n", (15612, 15653), False, 'import weechat\n'), ((15672, 15692), 'weechat.prnt', 'weechat.prnt', (['""""""', '""""""'], {}), "('', '')\n", (15684, 15692), False, 'import weechat\n'), ((18779, 18799), 'weechat.prnt', 'weechat.prnt', (['""""""', '""""""'], {}), "('', '')\n", (18791, 18799), False, 'import weechat\n'), ((19107, 19181), 'weechat.prnt', 'weechat.prnt', (['""""""', "(' Author: %s <%s>' % (script['author'], script['mail']))"], {}), "('', ' Author: %s <%s>' % (script['author'], script['mail']))\n", (19119, 19181), False, 'import weechat\n'), ((19572, 19617), 'weechat.prnt', 'weechat.prnt', (['""""""', "(' Status: %s' % str_status)"], {}), "('', ' Status: %s' % str_status)\n", (19584, 19617), False, 'import weechat\n'), ((20534, 20581), 'weechat.prnt', 'weechat.prnt', (['""""""', "('Requires: %s' % str_requires)"], {}), "('', 'Requires: %s' % str_requires)\n", (20546, 20581), False, 'import weechat\n'), ((26168, 26232), 'weechat.prnt', 'weechat.prnt', (['""""""', "('%s: all scripts are up to date' % SCRIPT_NAME)"], {}), "('', '%s: all scripts are up to date' % SCRIPT_NAME)\n", (26180, 26232), False, 'import weechat\n'), ((32628, 32669), 'weechat.unhook', 'weechat.unhook', (["wg_hook_process['update']"], {}), "(wg_hook_process['update'])\n", (32642, 32669), False, 'import weechat\n'), ((33773, 33799), 'os.path.isfile', 'os.path.isfile', (['cache_file'], {}), '(cache_file)\n', (33787, 33799), False, 'import sys, os, stat, time, gzip, hashlib, xml.dom.minidom\n'), ((34105, 34153), 'weechat.command', 'weechat.command', (['""""""', "('/help %s' % SCRIPT_COMMAND)"], {}), "('', '/help %s' % SCRIPT_COMMAND)\n", (34120, 34153), False, 'import weechat\n'), ((37463, 38892), 'weechat.hook_command', 'weechat.hook_command', (['SCRIPT_COMMAND', '"""WeeChat scripts manager"""', '"""list|listinstalled [<text>|<tag>] || show <script> || install|remove <script> [<script>...] || check|update|upgrade"""', '(\n """ list: list scripts (search text if given)\nlistinstalled: list installed scripts (search text if given)\n show: show detailed information about a script (in repository)\n install: install/upgrade script(s)\n check: check if local scripts needs upgrade\n update: update local scripts cache\n upgrade: upgrade all local scripts if they are obsolete\n remove: remove script(s)\n\nIndicators in lists (first column):\n """\n + str_installed + \' script is installed\\n \' + str_unknown +\n \' unknown script\\n \' + str_running +\n """ script is running (loaded)\n """ + str_obsolete +\n \' script is obsolete (new version available)\\n\\nExamples:\\n /\' +\n SCRIPT_COMMAND + """ list => list all scripts\n /""" +\n SCRIPT_COMMAND +\n """ list game => list all scripts with text/tag "game"\n /""" +\n SCRIPT_COMMAND + """ install beep.pl => install script beep.pl\n /""" +\n SCRIPT_COMMAND + \' remove beep.pl => remove script beep.pl\')', '"""list %(weeget_scripts_tags) || listinstalled %(weeget_scripts_tags) || show %(weeget_scripts) || install %(weeget_scripts)|%* || remove %(weeget_scripts_installed)|%* || check || update || upgrade"""', '"""wg_cmd"""', '""""""'], {}), '(SCRIPT_COMMAND, \'WeeChat scripts manager\',\n \'list|listinstalled [<text>|<tag>] || show <script> || install|remove <script> [<script>...] || check|update|upgrade\'\n , \n """ list: list scripts (search text if given)\nlistinstalled: list installed scripts (search text if given)\n show: show detailed information about a script (in repository)\n install: install/upgrade script(s)\n check: check if local scripts needs upgrade\n update: update local scripts cache\n upgrade: upgrade all local scripts if they are obsolete\n remove: remove script(s)\n\nIndicators in lists (first column):\n """\n + str_installed + \' script is installed\\n \' + str_unknown +\n \' unknown script\\n \' + str_running +\n """ script is running (loaded)\n """ + str_obsolete +\n \' script is obsolete (new version available)\\n\\nExamples:\\n /\' +\n SCRIPT_COMMAND + """ list => list all scripts\n /""" +\n SCRIPT_COMMAND +\n """ list game => list all scripts with text/tag "game"\n /""" +\n SCRIPT_COMMAND + """ install beep.pl => install script beep.pl\n /""" +\n SCRIPT_COMMAND + \' remove beep.pl => remove script beep.pl\',\n \'list %(weeget_scripts_tags) || listinstalled %(weeget_scripts_tags) || show %(weeget_scripts) || install %(weeget_scripts)|%* || remove %(weeget_scripts_installed)|%* || check || update || upgrade\'\n , \'wg_cmd\', \'\')\n', (37483, 38892), False, 'import weechat\n'), ((39787, 39897), 'weechat.hook_completion', 'weechat.hook_completion', (['"""weeget_scripts"""', '"""list of scripts in repository"""', '"""wg_completion_scripts_cb"""', '""""""'], {}), "('weeget_scripts', 'list of scripts in repository',\n 'wg_completion_scripts_cb', '')\n", (39810, 39897), False, 'import weechat\n'), ((39934, 40060), 'weechat.hook_completion', 'weechat.hook_completion', (['"""weeget_scripts_installed"""', '"""list of scripts installed"""', '"""wg_completion_scripts_installed_cb"""', '""""""'], {}), "('weeget_scripts_installed',\n 'list of scripts installed', 'wg_completion_scripts_installed_cb', '')\n", (39957, 40060), False, 'import weechat\n'), ((40097, 40217), 'weechat.hook_completion', 'weechat.hook_completion', (['"""weeget_scripts_tags"""', '"""tags of scripts in repository"""', '"""wg_completion_scripts_tags_cb"""', '""""""'], {}), "('weeget_scripts_tags',\n 'tags of scripts in repository', 'wg_completion_scripts_tags_cb', '')\n", (40120, 40217), False, 'import weechat\n'), ((8016, 8070), 'weechat.config_string', 'weechat.config_string', (["wg_config_option['scripts_dir']"], {}), "(wg_config_option['scripts_dir'])\n", (8037, 8070), False, 'import weechat\n'), ((8533, 8587), 'weechat.config_string', 'weechat.config_string', (["wg_config_option['scripts_url']"], {}), "(wg_config_option['scripts_url'])\n", (8554, 8587), False, 'import weechat\n'), ((10480, 10525), 'weechat.infolist_string', 'weechat.infolist_string', (['infolist', '"""filename"""'], {}), "(infolist, 'filename')\n", (10503, 10525), False, 'import weechat\n'), ((11336, 11371), 'weechat.info_get', 'weechat.info_get', (['"""weechat_dir"""', '""""""'], {}), "('weechat_dir', '')\n", (11352, 11371), False, 'import weechat\n'), ((19948, 20033), 'weechat.prnt', 'weechat.prnt', (['""""""', "(' Date: added: %s, updated: %s' % (date_added, date_updated))"], {}), "('', ' Date: added: %s, updated: %s' % (date_added,\n date_updated))\n", (19960, 20033), False, 'import weechat\n'), ((20081, 20133), 'weechat.prnt', 'weechat.prnt', (['""""""', "(' Date: added: %s' % date_added)"], {}), "('', ' Date: added: %s' % date_added)\n", (20093, 20133), False, 'import weechat\n'), ((20707, 20746), 'weechat.prnt', 'weechat.prnt', (['""""""', "(' Min: %s' % vmin)"], {}), "('', ' Min: %s' % vmin)\n", (20719, 20746), False, 'import weechat\n'), ((20782, 20821), 'weechat.prnt', 'weechat.prnt', (['""""""', "(' Max: %s' % vmax)"], {}), "('', ' Max: %s' % vmax)\n", (20794, 20821), False, 'import weechat\n'), ((32237, 32293), 'weechat.prnt', 'weechat.prnt', (['""""""', "('%s: scripts downloaded' % SCRIPT_NAME)"], {}), "('', '%s: scripts downloaded' % SCRIPT_NAME)\n", (32249, 32293), False, 'import weechat\n'), ((33390, 33454), 'weechat.config_integer', 'weechat.config_integer', (["wg_config_option['scripts_cache_expire']"], {}), "(wg_config_option['scripts_cache_expire'])\n", (33412, 33454), False, 'import weechat\n'), ((34503, 34589), 'weechat.prnt', 'weechat.prnt', (['""""""', '(\'%s: too few arguments for action "%s"\' % (SCRIPT_NAME, argv[0]))'], {}), '(\'\', \'%s: too few arguments for action "%s"\' % (SCRIPT_NAME,\n argv[0]))\n', (34515, 34589), False, 'import weechat\n'), ((35217, 35320), 'weechat.hook_completion_list_add', 'weechat.hook_completion_list_add', (['completion', "script['full_name']", '(0)', 'weechat.WEECHAT_LIST_POS_SORT'], {}), "(completion, script['full_name'], 0,\n weechat.WEECHAT_LIST_POS_SORT)\n", (35249, 35320), False, 'import weechat\n'), ((37194, 37215), 'weechat.color', 'weechat.color', (['"""chat"""'], {}), "('chat')\n", (37207, 37215), False, 'import weechat\n'), ((37273, 37294), 'weechat.color', 'weechat.color', (['"""chat"""'], {}), "('chat')\n", (37286, 37294), False, 'import weechat\n'), ((37352, 37373), 'weechat.color', 'weechat.color', (['"""chat"""'], {}), "('chat')\n", (37365, 37373), False, 'import weechat\n'), ((37433, 37454), 'weechat.color', 'weechat.color', (['"""chat"""'], {}), "('chat')\n", (37446, 37454), False, 'import weechat\n'), ((12633, 12668), 'weechat.info_get', 'weechat.info_get', (['"""weechat_dir"""', '""""""'], {}), "('weechat_dir', '')\n", (12649, 12668), False, 'import weechat\n'), ((13742, 13764), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (13758, 13764), False, 'import sys, os, stat, time, gzip, hashlib, xml.dom.minidom\n'), ((15760, 15821), 'weechat.prnt', 'weechat.prnt', (['""""""', '(\'Scripts installed matching "%s":\' % search)'], {}), '(\'\', \'Scripts installed matching "%s":\' % search)\n', (15772, 15821), False, 'import weechat\n'), ((16079, 16117), 'weechat.prnt', 'weechat.prnt', (['""""""', '"""Scripts installed:"""'], {}), "('', 'Scripts installed:')\n", (16091, 16117), False, 'import weechat\n'), ((33514, 33525), 'time.time', 'time.time', ([], {}), '()\n', (33523, 33525), False, 'import sys, os, stat, time, gzip, hashlib, xml.dom.minidom\n'), ((33639, 33660), 'os.unlink', 'os.unlink', (['cache_file'], {}), '(cache_file)\n', (33648, 33660), False, 'import sys, os, stat, time, gzip, hashlib, xml.dom.minidom\n'), ((35801, 35904), 'weechat.hook_completion_list_add', 'weechat.hook_completion_list_add', (['completion', "script['full_name']", '(0)', 'weechat.WEECHAT_LIST_POS_SORT'], {}), "(completion, script['full_name'], 0,\n weechat.WEECHAT_LIST_POS_SORT)\n", (35833, 35904), False, 'import weechat\n'), ((10591, 10617), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (10607, 10617), False, 'import sys, os, stat, time, gzip, hashlib, xml.dom.minidom\n'), ((13815, 13837), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (13831, 13837), False, 'import sys, os, stat, time, gzip, hashlib, xml.dom.minidom\n'), ((18737, 18758), 'weechat.color', 'weechat.color', (['"""chat"""'], {}), "('chat')\n", (18750, 18758), False, 'import weechat\n'), ((18989, 19010), 'weechat.color', 'weechat.color', (['"""chat"""'], {}), "('chat')\n", (19002, 19010), False, 'import weechat\n'), ((22714, 22755), 'weechat.unhook', 'weechat.unhook', (["wg_hook_process['script']"], {}), "(wg_hook_process['script'])\n", (22728, 22755), False, 'import weechat\n'), ((25545, 25566), 'weechat.color', 'weechat.color', (['"""chat"""'], {}), "('chat')\n", (25558, 25566), False, 'import weechat\n'), ((25592, 25613), 'os.path.dirname', 'os.path.dirname', (['file'], {}), '(file)\n', (25607, 25613), False, 'import sys, os, stat, time, gzip, hashlib, xml.dom.minidom\n'), ((25722, 25744), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (25738, 25744), False, 'import sys, os, stat, time, gzip, hashlib, xml.dom.minidom\n'), ((33528, 33547), 'os.stat', 'os.stat', (['cache_file'], {}), '(cache_file)\n', (33535, 33547), False, 'import sys, os, stat, time, gzip, hashlib, xml.dom.minidom\n'), ((36365, 36453), 'weechat.hook_completion_list_add', 'weechat.hook_completion_list_add', (['completion', 'tag', '(0)', 'weechat.WEECHAT_LIST_POS_SORT'], {}), '(completion, tag, 0, weechat.\n WEECHAT_LIST_POS_SORT)\n', (36397, 36453), False, 'import weechat\n'), ((36864, 36918), 'weechat.config_string', 'weechat.config_string', (["wg_config_option['scripts_url']"], {}), "(wg_config_option['scripts_url'])\n", (36885, 36918), False, 'import weechat\n'), ((37108, 37131), 'weechat.prefix', 'weechat.prefix', (['"""error"""'], {}), "('error')\n", (37122, 37131), False, 'import weechat\n'), ((16226, 16257), 'weechat.info_get', 'weechat.info_get', (['"""version"""', '""""""'], {}), "('version', '')\n", (16242, 16257), False, 'import weechat\n'), ((17545, 17566), 'weechat.color', 'weechat.color', (['"""chat"""'], {}), "('chat')\n", (17558, 17566), False, 'import weechat\n'), ((17818, 17839), 'weechat.color', 'weechat.color', (['"""chat"""'], {}), "('chat')\n", (17831, 17839), False, 'import weechat\n'), ((21822, 21843), 'weechat.color', 'weechat.color', (['"""chat"""'], {}), "('chat')\n", (21835, 21843), False, 'import weechat\n'), ((23942, 23965), 'weechat.prefix', 'weechat.prefix', (['"""error"""'], {}), "('error')\n", (23956, 23965), False, 'import weechat\n'), ((26907, 26928), 'weechat.color', 'weechat.color', (['"""chat"""'], {}), "('chat')\n", (26920, 26928), False, 'import weechat\n'), ((30185, 30208), 'weechat.prefix', 'weechat.prefix', (['"""error"""'], {}), "('error')\n", (30199, 30208), False, 'import weechat\n'), ((32111, 32134), 'weechat.prefix', 'weechat.prefix', (['"""error"""'], {}), "('error')\n", (32125, 32134), False, 'import weechat\n'), ((15949, 15980), 'weechat.info_get', 'weechat.info_get', (['"""version"""', '""""""'], {}), "('version', '')\n", (15965, 15980), False, 'import weechat\n'), ((22322, 22343), 'weechat.color', 'weechat.color', (['"""chat"""'], {}), "('chat')\n", (22335, 22343), False, 'import weechat\n'), ((22618, 22639), 'weechat.color', 'weechat.color', (['"""chat"""'], {}), "('chat')\n", (22631, 22639), False, 'import weechat\n'), ((30443, 30466), 'weechat.prefix', 'weechat.prefix', (['"""error"""'], {}), "('error')\n", (30457, 30466), False, 'import weechat\n'), ((28373, 28396), 'weechat.prefix', 'weechat.prefix', (['"""error"""'], {}), "('error')\n", (28387, 28396), False, 'import weechat\n')] |
#
#
# vizMetrics - an interactive toolset for calculating visualization metrics
#
#
#
import os
from kivy.app import App
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.lang import Builder
from kivy.uix.button import Button
from kivy.uix.boxlayout import BoxLayout
from kivy.properties import StringProperty
from saliencyMetric import low_res, hi_res, aalto, edgeCongestion
pathForImages = "C:/Users/nickh/OneDrive/Newcastle/Research/MetricsOfVisualization/vizMetrics/Images/"
generatedImages = "C:/Users/nickh/OneDrive/Newcastle/Research/MetricsOfVisualization/vizMetrics/_Images/"
currentFile = "default.png"
class aaltoSaliencyButton(Button):
# Create a property to allow saliency image file name to be set.
aaltoSaliencyImage = StringProperty('blank.png')
def __init__(self, **kwargs):
super(Button, self).__init__(**kwargs)
def saliency_aalto(self):
global pathForImages
global generatedImages
global currentFile
fName, fExt = os.path.splitext(currentFile)
self.aaltoSaliencyImage = fName+'_aalto_saliency'+fExt
inputFile = pathForImages + currentFile
saliencyFile = generatedImages + self.aaltoSaliencyImage
saliencyDensityVal = aalto(inputFile,saliencyFile)
print( "Aalto saliency : " + str(saliencyDensityVal) )
class highSaliencyButton(Button):
# Create a property to allow saliency image file name to be set.
highSaliencyImage = StringProperty('blank.png')
def __init__(self, **kwargs):
super(Button, self).__init__(**kwargs)
def saliency_high(self):
global pathForImages
global generatedImages
global currentFile
fName, fExt = os.path.splitext(currentFile)
self.highSaliencyImage = fName+'_high_saliency'+fExt
inputFile = pathForImages + currentFile
saliencyFile = generatedImages + self.highSaliencyImage
saliencyDensityVal = hi_res(inputFile,saliencyFile)
print( "Hi-res saliency : " + str(saliencyDensityVal) )
class lowSaliencyButton(Button):
# Create a property to allow saliency image file name to be set.
lowSaliencyImage = StringProperty('blank.png')
def __init__(self, **kwargs):
super(Button, self).__init__(**kwargs)
def saliency_low(self):
global pathForImages
global generatedImages
global currentFile
fName, fExt = os.path.splitext(currentFile)
self.lowSaliencyImage = fName+'_low_saliency'+fExt
inputFile = pathForImages + currentFile
saliencyFile = generatedImages + self.lowSaliencyImage
saliencyDensityVal = low_res(inputFile,saliencyFile)
print( "Lo-res saliency : " + str(saliencyDensityVal) )
class edgeCongestionButton(Button):
# Create a property to allow saliency image file name to be set.
edgeCongestionImage = StringProperty('blank.png')
def __init__(self, **kwargs):
super(Button, self).__init__(**kwargs)
def edgeCongestion(self):
global pathForImages
global generatedImages
global currentFile
fName, fExt = os.path.splitext(currentFile)
self.edgeCongestionImage = fName+'_edgeCongestion'+fExt
inputFile = pathForImages + currentFile
saliencyFile = generatedImages + self.edgeCongestionImage
edgeCongestionVal = edgeCongestion(inputFile,saliencyFile)
print( "Edge congestion : " + str(edgeCongestionVal) )
class vizMetrics(TabbedPanel):
def select(self, *args):
global currentFile
try:
file = args[1][0]
currentFile = os.path.basename(file)
self.label.text = os.path.basename(file)
except: pass
pass
class vizMetricsApp(App):
global pathForImages
global generatedImages
pathToImages = pathForImages
pathToGenerated = generatedImages
print( "vizMetrics I : starting interface " )
def build(self):
return vizMetrics()
#
# Defaults for screen size and app startup
#
from kivy.config import Config
Config.set('graphics', 'width', '1422')
Config.set('graphics', 'height', '774')
if __name__ == '__main__':
vizMetricsApp().run() | [
"kivy.config.Config.set",
"saliencyMetric.hi_res",
"saliencyMetric.edgeCongestion",
"os.path.splitext",
"saliencyMetric.low_res",
"os.path.basename",
"saliencyMetric.aalto",
"kivy.properties.StringProperty"
] | [((4136, 4175), 'kivy.config.Config.set', 'Config.set', (['"""graphics"""', '"""width"""', '"""1422"""'], {}), "('graphics', 'width', '1422')\n", (4146, 4175), False, 'from kivy.config import Config\n'), ((4176, 4215), 'kivy.config.Config.set', 'Config.set', (['"""graphics"""', '"""height"""', '"""774"""'], {}), "('graphics', 'height', '774')\n", (4186, 4215), False, 'from kivy.config import Config\n'), ((754, 781), 'kivy.properties.StringProperty', 'StringProperty', (['"""blank.png"""'], {}), "('blank.png')\n", (768, 781), False, 'from kivy.properties import StringProperty\n'), ((1481, 1508), 'kivy.properties.StringProperty', 'StringProperty', (['"""blank.png"""'], {}), "('blank.png')\n", (1495, 1508), False, 'from kivy.properties import StringProperty\n'), ((2203, 2230), 'kivy.properties.StringProperty', 'StringProperty', (['"""blank.png"""'], {}), "('blank.png')\n", (2217, 2230), False, 'from kivy.properties import StringProperty\n'), ((2928, 2955), 'kivy.properties.StringProperty', 'StringProperty', (['"""blank.png"""'], {}), "('blank.png')\n", (2942, 2955), False, 'from kivy.properties import StringProperty\n'), ((1013, 1042), 'os.path.splitext', 'os.path.splitext', (['currentFile'], {}), '(currentFile)\n', (1029, 1042), False, 'import os\n'), ((1259, 1289), 'saliencyMetric.aalto', 'aalto', (['inputFile', 'saliencyFile'], {}), '(inputFile, saliencyFile)\n', (1264, 1289), False, 'from saliencyMetric import low_res, hi_res, aalto, edgeCongestion\n'), ((1739, 1768), 'os.path.splitext', 'os.path.splitext', (['currentFile'], {}), '(currentFile)\n', (1755, 1768), False, 'import os\n'), ((1982, 2013), 'saliencyMetric.hi_res', 'hi_res', (['inputFile', 'saliencyFile'], {}), '(inputFile, saliencyFile)\n', (1988, 2013), False, 'from saliencyMetric import low_res, hi_res, aalto, edgeCongestion\n'), ((2460, 2489), 'os.path.splitext', 'os.path.splitext', (['currentFile'], {}), '(currentFile)\n', (2476, 2489), False, 'import os\n'), ((2700, 2732), 'saliencyMetric.low_res', 'low_res', (['inputFile', 'saliencyFile'], {}), '(inputFile, saliencyFile)\n', (2707, 2732), False, 'from saliencyMetric import low_res, hi_res, aalto, edgeCongestion\n'), ((3187, 3216), 'os.path.splitext', 'os.path.splitext', (['currentFile'], {}), '(currentFile)\n', (3203, 3216), False, 'import os\n'), ((3434, 3473), 'saliencyMetric.edgeCongestion', 'edgeCongestion', (['inputFile', 'saliencyFile'], {}), '(inputFile, saliencyFile)\n', (3448, 3473), False, 'from saliencyMetric import low_res, hi_res, aalto, edgeCongestion\n'), ((3697, 3719), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (3713, 3719), False, 'import os\n'), ((3750, 3772), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (3766, 3772), False, 'import os\n')] |
#coding=utf8
import sys
import json
codes_fmt = '''
auto ns = LuaAdapterEnvironment::getInstance().getNamespace("%s");
ns->begin();
{
ns->registerClass("%s", typeid(%s));
auto cls = ns->getClass("%s");
cls->begin();
%s//extends
%s//constructors
%s//destructor
%s//nonmember variables
%s//member variables
%s//nonmember functions
%s//member functions
cls->end();
}
ns->end();
'''
def __gen_extends( cls_full_name, super_clses_full ):
if len(super_clses_full) == 0:
return ""
return 'cls->extends<%s, %s>();\n' %( cls_full_name, ",".join( super_clses_full ) )
def __gen_nonmember_function( cls_full_name, func_name ):
return 'cls->registerFunction(%s, "%s");\n' % ( "::".join((cls_full_name, func_name)), func_name )
def __gen_member_function( cls_full_name, func_name ):
return 'cls->registerFunction(&%s, "%s");\n' % ( "::".join((cls_full_name, func_name)), func_name )
def __gen_nonmember_variable( cls_full_name, vari_name ):
return 'cls->registerVariable(&%s, "%s");\n' % ( "::".join((cls_full_name, vari_name)), vari_name )
def __gen_member_variable( cls_full_name, vari_name ):
return __gen_nonmember_variables( cls_full_name, vari_name )
def gen_cls_codes( codes_list, ns_name, cls_full_name, super_clses_full, nonmember_funcs, member_funcs, nonmember_varis, member_varis ):
la_cls_full_name = cls_full_name.replace( "::", "." )
codes_list.append( 'ns->begin();{\n' );
codes_list.append( ' ns->registerClass("%s", typeid(%s));\n' % (la_cls_full_name, cls_full_name) );
codes_list.append( ' auto cls = ns->getClass("%s");\n' % la_cls_full_name );
codes_list.append( ' cls->begin();\n' );
codes_list.append( ' %s' % __gen_extends( cls_full_name, super_clses_full ) )
for vari_name in nonmember_varis:
codes_list.append( ' %s' % __gen_nonmember_variable( cls_full_name, vari_name ) )
for vari_name in member_varis:
codes_list.append( ' %s' % __gen_member_variable( cls_full_name, vari_name ) )
for func_name in nonmember_funcs:
codes_list.append( ' %s' % __gen_nonmember_function( cls_full_name, func_name) )
for func_name in member_funcs:
codes_list.append( ' %s' % __gen_member_function( cls_full_name, func_name) )
codes_list.append( ' cls->end();\n' );
codes_list.append( '}ns->end();\n' );
def __read_file_content( fpath ):
f = open( fpath )
if not f:
raise "cant open file %s" % atlas
fContent = f.read()
f.close()
return fContent
def gen_by_config( fPath ):
fContent = __read_file_content( fPath )
fJson = json.loads( fContent )
nsInfo = fJson["namespace"]
ns_name = nsInfo["namespace_name"]
la_ns_name = ns_name.replace( "::", "." )
codes_list = []
codes_list.append( 'void lua_support_register_auto_meteor() {\n' )
codes_list.append( 'auto ns = LuaAdapterEnvironment::getInstance().getNamespace("%s");\n' % la_ns_name )
for clsInfo in fJson["classes"]:
cls_name = clsInfo["class_name"]
cls_full_name = "::".join( [ns_name, cls_name] )
super_clses = clsInfo.has_key("extends") and clsInfo["extends"] or ()
def __path_ns( super_cls ):
if super_cls.find( "::" ) == -1:
return "::".join( [ns_name, super_cls] )
else:
return super_cls
super_clses_full = map( __path_ns, super_clses )
nonmember_funcs = clsInfo.has_key("nonmember_functions") and clsInfo["nonmember_functions"] or ()
member_funcs = clsInfo.has_key("member_functions") and clsInfo["member_functions"] or ()
nonmember_varis = clsInfo.has_key("nonmember_variables") and clsInfo["nonmember_variables"] or ()
member_varis = clsInfo.has_key("member_variables") and clsInfo["member_variables"] or ()
gen_cls_codes( codes_list, ns_name, cls_full_name, super_clses_full,
nonmember_funcs, member_funcs, nonmember_varis, member_varis )
#end for
codes_list.append( '}\n' )
f = open( "result.cpp", "w+" )
for code in codes_list:
f.write( code )
f.flush()
f.close()
if "__main__" == __name__:
fPath = sys.argv[1]
gen_by_config(fPath) | [
"json.loads"
] | [((2548, 2568), 'json.loads', 'json.loads', (['fContent'], {}), '(fContent)\n', (2558, 2568), False, 'import json\n')] |
'''
Created on 2009-08-11
@author: malem303
'''
import unittest
from imugrabber.algorithms import fong_accelero, utils, statistics
from imugrabber.algorithms import io
import os
import scipy as sp
from numpy import testing
class FongTests(unittest.TestCase):
def setUp(self):
self.misalignmentsAndScales = sp.array([[ 4.08269136e-03, -1.96002082e-05, 1.16692771e-04],
[ -6.73123099e-06, 3.86658837e-03, -2.77361987e-04],
[ -6.43895175e-05, 2.91260930e-04, 3.93614477e-03]])
self.biases = sp.array([[ 604.00283039],
[ 480.33539568],
[ 522.23054001]])
def test_fit_parameters(self):
dataSet = io.float_columns_from_CSV(csvFileName = "FongTests.csv", path = io._test_path)
x, y, z = dataSet['avgX'], dataSet['avgY'], dataSet['avgZ']
measures = utils.build_measures_matrix(x, y, z)
misalignmentsAndScales, biases = fong_accelero.fit(measures)
testing.assert_almost_equal(misalignmentsAndScales, self.misalignmentsAndScales)
testing.assert_almost_equal(biases, self.biases)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | [
"imugrabber.algorithms.fong_accelero.fit",
"scipy.array",
"imugrabber.algorithms.utils.build_measures_matrix",
"numpy.testing.assert_almost_equal",
"imugrabber.algorithms.io.float_columns_from_CSV",
"unittest.main"
] | [((1366, 1381), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1379, 1381), False, 'import unittest\n'), ((332, 502), 'scipy.array', 'sp.array', (['[[0.00408269136, -1.96002082e-05, 0.000116692771], [-6.73123099e-06, \n 0.00386658837, -0.000277361987], [-6.43895175e-05, 0.00029126093, \n 0.00393614477]]'], {}), '([[0.00408269136, -1.96002082e-05, 0.000116692771], [-\n 6.73123099e-06, 0.00386658837, -0.000277361987], [-6.43895175e-05, \n 0.00029126093, 0.00393614477]])\n', (340, 502), True, 'import scipy as sp\n'), ((630, 688), 'scipy.array', 'sp.array', (['[[604.00283039], [480.33539568], [522.23054001]]'], {}), '([[604.00283039], [480.33539568], [522.23054001]])\n', (638, 688), True, 'import scipy as sp\n'), ((821, 895), 'imugrabber.algorithms.io.float_columns_from_CSV', 'io.float_columns_from_CSV', ([], {'csvFileName': '"""FongTests.csv"""', 'path': 'io._test_path'}), "(csvFileName='FongTests.csv', path=io._test_path)\n", (846, 895), False, 'from imugrabber.algorithms import io\n'), ((1005, 1041), 'imugrabber.algorithms.utils.build_measures_matrix', 'utils.build_measures_matrix', (['x', 'y', 'z'], {}), '(x, y, z)\n', (1032, 1041), False, 'from imugrabber.algorithms import fong_accelero, utils, statistics\n'), ((1093, 1120), 'imugrabber.algorithms.fong_accelero.fit', 'fong_accelero.fit', (['measures'], {}), '(measures)\n', (1110, 1120), False, 'from imugrabber.algorithms import fong_accelero, utils, statistics\n'), ((1138, 1223), 'numpy.testing.assert_almost_equal', 'testing.assert_almost_equal', (['misalignmentsAndScales', 'self.misalignmentsAndScales'], {}), '(misalignmentsAndScales, self.misalignmentsAndScales\n )\n', (1165, 1223), False, 'from numpy import testing\n'), ((1236, 1284), 'numpy.testing.assert_almost_equal', 'testing.assert_almost_equal', (['biases', 'self.biases'], {}), '(biases, self.biases)\n', (1263, 1284), False, 'from numpy import testing\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 1 18:25:21 2020
@author: vinnie
"""
import tweepy
from collections import defaultdict
import pandas as pd
import argparse
import os
from stats import tweet_analyzer
from wordanalysis import WordsAnalysis
from keys import (
api_key,
api_secret_key,
access_token,
access_token_secret
)
auth = tweepy.OAuthHandler(api_key, api_secret_key)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
class GetTweets:
def __init__(self, api, userid, include_rts = False):
self.userid = userid
self.include_rts = include_rts
print('Fetching tweets of: ', self.userid)
self.tweets = api.user_timeline(screen_name = self.userid, count = 200, include_rts = self.include_rts, tweet_mode = 'extended')
self.tweets_dict = defaultdict(list)
self.acc_info = self.tweets[0].user
def __check_if_retweet(self, obj):
if hasattr(obj, 'retweeted_status'):
return True
return False
def __get_hashtags(self, hashtags):
tags_list = []
for tags in hashtags:
tags_list.append(tags['text'])
return tags_list
def __get_account_info(self):
twt = self.acc_info
print(f'\n \nName:\t {twt.name}')
print(f'Description: {twt.description}' )
print(f'Followers: {twt.followers_count}\t Follwing: {twt.friends_count}' )
print(f'Account created on: {twt.created_at}\t Location: {twt.location}\n')
with open("data/info" + self.userid, "w") as text_file:
text_file.write(f'Name: {twt.name}\n Description: {twt.description}\n \
Followers: {twt.followers_count}\t Follwing: {twt.friends_count}\n \
Account created on: {twt.created_at}\t Location: {twt.location}')
def __build_dictionary(self):
for status in self.tweets:
self.tweets_dict['id'].append(status.id_str)
self.tweets_dict['favourite_count'].append(status.favorite_count)
self.tweets_dict['created_at'].append(status.created_at)
self.tweets_dict['retweet_count'].append(status.retweet_count)
self.tweets_dict['tweet'].append(status.full_text)
self.tweets_dict['tags'].append(self.__get_hashtags(status.entities.get('hashtags')))
tweet_url = 'https://twitter.com/twitter/status/' + status.id_str
self.tweets_dict['tweet_url'].append(tweet_url)
if not self.include_rts:
self.tweets_dict['is_retweet'].append(self.__check_if_retweet(status))
def fetch_tweets(self):
oldest_id = self.tweets[-1].id
self.__build_dictionary()
n_tweets = len(self.tweets)
while True:
print('Tweets fetched till now {}'.format(n_tweets))
self.tweets = api.user_timeline(screen_name = self.userid,
count = 200, include_rts = False,
max_id = oldest_id - 1,
tweet_mode = 'extended')
n_tweets += len(self.tweets)
if len(self.tweets) == 0:
break
oldest_id = self.tweets[-1].id
self.__build_dictionary()
self.__get_account_info()
return pd.DataFrame.from_dict(self.tweets_dict)
def save_obj(self, df, name):
df.to_csv('data/'+ name + '.csv', index=False)
def main(USERID):
t1 = GetTweets(api, USERID)
tweets_df = t1.fetch_tweets()
t1.save_obj(tweets_df, USERID)
analyzer = tweet_analyzer(tweets_df, plot=True)
analyzer.get_stats()
wa = WordsAnalysis(tweets_df)
wa.analyse()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Get statistical analysis on tweeter users')
parser.add_argument("-u", "--user", required=False, dest="user",
help="u/user_name to fetch the tweets", metavar="TWT_USER")
args = parser.parse_args()
user = args.user
if not (os.path.isdir('data')):
os.mkdir('data')
main(user)
| [
"argparse.ArgumentParser",
"stats.tweet_analyzer",
"pandas.DataFrame.from_dict",
"wordanalysis.WordsAnalysis",
"tweepy.API",
"os.path.isdir",
"collections.defaultdict",
"os.mkdir",
"tweepy.OAuthHandler"
] | [((383, 427), 'tweepy.OAuthHandler', 'tweepy.OAuthHandler', (['api_key', 'api_secret_key'], {}), '(api_key, api_secret_key)\n', (402, 427), False, 'import tweepy\n'), ((492, 508), 'tweepy.API', 'tweepy.API', (['auth'], {}), '(auth)\n', (502, 508), False, 'import tweepy\n'), ((3969, 4005), 'stats.tweet_analyzer', 'tweet_analyzer', (['tweets_df'], {'plot': '(True)'}), '(tweets_df, plot=True)\n', (3983, 4005), False, 'from stats import tweet_analyzer\n'), ((4045, 4069), 'wordanalysis.WordsAnalysis', 'WordsAnalysis', (['tweets_df'], {}), '(tweets_df)\n', (4058, 4069), False, 'from wordanalysis import WordsAnalysis\n'), ((4134, 4219), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Get statistical analysis on tweeter users"""'}), "(description='Get statistical analysis on tweeter users'\n )\n", (4157, 4219), False, 'import argparse\n'), ((892, 909), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (903, 909), False, 'from collections import defaultdict\n'), ((3647, 3687), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['self.tweets_dict'], {}), '(self.tweets_dict)\n', (3669, 3687), True, 'import pandas as pd\n'), ((4447, 4468), 'os.path.isdir', 'os.path.isdir', (['"""data"""'], {}), "('data')\n", (4460, 4468), False, 'import os\n'), ((4479, 4495), 'os.mkdir', 'os.mkdir', (['"""data"""'], {}), "('data')\n", (4487, 4495), False, 'import os\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 15 09:44:30 2021
@author: erri
"""
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap, BoundaryNorm
########################################################################################################################
# SETUP FOLDERS
########################################################################################################################
# setup working directory and DEM's name
w_dir = '/home/erri/Documents/morphological_approach/3_output_data/q2.0_2/2_prc_laser/surveys/'
DEM1_name = 'matrix_bed_norm_q20S5.txt'
DEM2_name = 'matrix_bed_norm_q20S6.txt'
# array mask for filtering data outside the channel domain
array_mask, array_mask_path = 'array_mask.txt', '/home/erri/Documents/morphological_approach/2_raw_data'
# TODO Modificare la maschera sulla base dei nuovi output Laser [soglia a 12mm]
path_DEM1 = os.path.join(w_dir, DEM1_name)
path_DEM2 = os.path.join(w_dir, DEM2_name)
DoD_name = 'DoD_' + DEM2_name[19:21] + '-' + DEM1_name[19:21] + '_'
# Output folder
name_out = 'script_outputs_' + DEM2_name[19:21] + '-' + DEM1_name[19:21]
dir_out = '/home/erri/Documents/morphological_approach/3_output_data/q1.0_2/2_prc_laser/DoDs/'
path_out = os.path.join(dir_out, name_out)
if os.path.exists(path_out):
pass
else:
os.mkdir(path_out)
########################################################################################################################
# SETUP SCRIPT PARAMETERS
########################################################################################################################
# Thresholds values
thrs_1 = 2.0 # [mm]
thrs_2 = 15.0 # [mm]
neigh_thrs = 4 # [-]
# Pixel dimension
px_x = 50 # [mm]
px_y = 5 # [mm]
# Not a number raster value (NaN)
NaN = -999
##############################################################################
# DATA READING...
##############################################################################
# Header initialization and extraction
lines = []
header = []
with open(path_DEM1, 'r') as file:
for line in file:
lines.append(line) # lines is a list. Each item is a row of the input file
# Header extraction...
for i in range(0, 7):
header.append(lines[i])
# Header printing in a file txt called header.txt
with open(path_out + '/' + DoD_name + 'header.txt', 'w') as head:
head.writelines(header)
##############################################################################
# DATA LOADING...
##############################################################################
DEM1 = np.loadtxt(path_DEM1,
# delimiter=',',
skiprows=8
)
DEM2 = np.loadtxt(path_DEM2,
# delimiter=',',
skiprows=8)
# Shape control:
arr_shape=min(DEM1.shape, DEM2.shape)
if not(DEM1.shape == DEM2.shape):
print('Attention: DEMs have not the same shape.')
# reshaping:
rows = min(DEM1.shape[0], DEM2.shape[0])
cols = min(DEM1.shape[1], DEM2.shape[1])
arr_shape = [rows, cols]
DEM1=DEM1[0:arr_shape[0], 0:arr_shape[1]]
DEM2=DEM2[0:arr_shape[0], 0:arr_shape[1]]
##############################################################################
# PERFORM DEM OF DIFFERENCE - DEM2-DEM1
##############################################################################
# mask for filtering data outside the channel domain
array_mask = np.loadtxt(os.path.join(array_mask_path, array_mask))
if not(array_mask.shape == arr_shape):
array_mask=array_mask[0:arr_shape[0], 0:arr_shape[1]]
array_msk = np.where(np.isnan(array_mask), 0, 1)
array_msk_nan = np.where(np.logical_not(np.isnan(array_mask)), 1, np.nan)
# Raster dimension
dim_x, dim_y = DEM1.shape
# Creating DoD array with np.nan
DoD_raw = np.zeros(DEM1.shape)
DoD_raw = np.where(np.logical_or(DEM1 == NaN, DEM2 == NaN), np.nan, DEM2 - DEM1)
# Creating GIS readable DoD array (np.nan as -999)
DoD_raw_rst = np.zeros(DEM1.shape)
DoD_raw_rst = np.where(np.logical_or(DEM1 == NaN, DEM2 == NaN), NaN, DEM2 - DEM1)
# Count the number of pixels in the channel area
DoD_count = np.count_nonzero(np.where(np.isnan(DoD_raw), 0, 1))
print('Active pixels:', DoD_count)
# DoD statistics
print('The minimum DoD value is:\n', np.nanmin(DoD_raw))
print('The maximum DoD value is:\n', np.nanmax(DoD_raw))
print('The DoD shape is:\n', DoD_raw.shape)
##############################################################################
# DATA FILTERING...
##############################################################################
# Perform domain-wide average
domain_avg = np.pad(DoD_raw, 1, mode='edge') # i size pad with edge values domain
DoD_mean = np.zeros(DEM1.shape)
for i in range (0, dim_x):
for j in range (0, dim_y):
if np.isnan(DoD_raw[i, j]):
DoD_mean[i, j] = np.nan
else:
k = np.array([[domain_avg[i, j], domain_avg[i, j + 1], domain_avg[i, j + 2]],
[domain_avg[i + 1, j], domain_avg[i + 1, j + 1], domain_avg[i + 1, j + 2]],
[domain_avg[i + 2, j], domain_avg[i + 2, j + 1], domain_avg[i + 2, j + 2]]])
w = np.array([[0, 1, 0],
[0, 2, 0],
[0, 1, 0]])
w_norm = w / (sum(sum(w))) # Normalizing ker
DoD_mean[i, j] = np.nansum(k * w_norm)
# Filtered array weighted average by nan.array mask
DoD_mean_msk = DoD_mean * array_msk_nan
# Create a GIS readable DoD mean (np.nann as -999)
DoD_mean_rst = np.where(np.isnan(DoD_mean_msk), NaN, DoD_mean_msk)
# Filtering data less than thrs_1
mask_thrs_1 = abs(DoD_mean_msk) > thrs_1
DoD_mean_th1 = DoD_mean_msk * mask_thrs_1 # * array_msk_nan
DoD_mean_th1_rst = np.where(np.isnan(DoD_mean_th1), NaN, DoD_mean_th1)
# Neighbourhood coalition analysis
domain_neigh = np.pad(DoD_mean_th1, 1, mode='edge') # Analysis domain
coal_neigh = np.zeros(DEM1.shape) # Initialized output array
# TODO Controllare che nessun valore venga escluso da questa analisi
for i in range(0, dim_x):
for j in range(0, dim_y):
if np.isnan(DoD_mean_th1[i, j]):
coal_neigh[i, j] = np.nan
elif thrs_1 <= abs(DoD_mean_th1[i, j]) <= thrs_2:
ker = np.array([[domain_neigh[i, j], domain_neigh[i, j + 1], domain_neigh[i, j + 2]],
[domain_neigh[i + 1, j], domain_neigh[i + 1, j + 1], domain_neigh[i + 1, j + 2]],
[domain_neigh[i + 2, j], domain_neigh[i + 2, j + 1], domain_neigh[i + 2, j + 2]]])
if DoD_mean_th1[i, j] < 0 and np.count_nonzero(ker < 0) > neigh_thrs:
coal_neigh[i, j] = DoD_mean_th1[i, j]
elif DoD_mean_th1[i, j] > 0 and np.count_nonzero(ker > 0) > neigh_thrs:
coal_neigh[i, j] = DoD_mean_th1[i, j]
else:
coal_neigh[i, j] = 0
else:
coal_neigh[i,j] = DoD_mean_th1[i,j]
...
# Avoiding zero-surrounded pixel
domain_neigh2 = np.pad(coal_neigh, 1, mode='edge') # Analysis domain
for i in range(0, dim_x):
for j in range(0,dim_y):
ker = np.array([[domain_neigh2[i, j], domain_neigh2[i, j + 1], domain_neigh2[i, j + 2]],
[domain_neigh2[i + 1, j], 0, domain_neigh2[i + 1, j + 2]],
[domain_neigh2[i + 2, j], domain_neigh2[i + 2, j + 1], domain_neigh2[i + 2, j + 2]]])
num = np.count_nonzero(ker == 0) + np.count_nonzero(~np.isnan(ker))
if num == 8:
coal_neigh[i,j]=0
...
DoD_out = coal_neigh # * array_msk_nan
# Create a GIS readable filtered DoD (np.nann as -999)
DoD_out_rst = np.where(np.isnan(DoD_out), NaN, DoD_out)
##############################################################################
# PLOT RAW DOD, MEAN DOD AND FILTERED DOD
##############################################################################
# Plot data using nicer colors
colors = ['linen', 'lightgreen', 'darkgreen', 'maroon']
class_bins = [-10.5, -1.5, 0, 1.5, 10.5]
cmap = ListedColormap(colors)
norm = BoundaryNorm(class_bins,
len(colors))
fig, (ax1, ax2, ax3) = plt.subplots(3,1)
raw= ax1.imshow(DoD_raw, cmap=cmap, norm=norm)
ax1.set_title('raw DoD')
mean = ax2.imshow(DoD_mean_th1, cmap=cmap, norm=norm)
ax2.set_title('mean DoD')
filt = ax3.imshow(DoD_out, cmap=cmap, norm=norm)
ax3.set_title('Filtered DoD')
#fig.colorbar()
fig.tight_layout()
plt.show()
plt.savefig(path_out + '/raster.pdf') # raster (png, jpg, rgb, tif), vector (pdf, eps), latex (pgf)
#plt.imshow(DoD_out, cmap='RdYlGn')
##############################################################################
# VOLUMES
##############################################################################
# DoD filtered name: coal_neigh
# Create new raster where apply volume calculation
# DoD>0 --> Deposition, DoD<0 --> Scour
DoD_vol = np.where(np.isnan(coal_neigh), 0, coal_neigh)
DEP = (DoD_vol>0)*DoD_vol
SCO = (DoD_vol<0)*DoD_vol
print('Total volume [mm^3]:', np.sum(DoD_vol)*px_x*px_y)
print('Deposition volume [mm^3]:', np.sum(DEP)*px_x*px_y)
print('Scour volume [mm^3]:', np.sum(SCO)*px_x*px_y)
#volume_filt1 = np.sum(np.abs(filtered1_raster_volume))*px_x*px_y
#print('DoD filt_1 volume:', volume_filt1, 'mm^3')
##############################################################################
# SAVE DATA
##############################################################################
#percorso = '/home/erri/Documents/morphological_approach/3_output_data/q1.5/2_prc_laser/script_outputs_s7-s6/verifica/'
#np.savetxt(percorso + 'DoD_raw.txt', DoD_raw, fmt='%0.1f', delimiter='\t')
#np.savetxt(percorso + 'DoD_mean.txt', DoD_mean, fmt='%0.1f', delimiter='\t')
# RAW DoD
# Print raw DoD in txt file (NaN as np.nan)
np.savetxt(path_out + '/' + DoD_name + 'raw.txt', DoD_raw, fmt='%0.1f', delimiter='\t')
# Printing raw DoD in txt file (NaN as -999)
np.savetxt(path_out + '/' + DoD_name + 'raw_rst.txt', DoD_raw_rst, fmt='%0.1f', delimiter='\t')
# MEAN DoD
# Print DoD mean in txt file (NaN as np.nan)
np.savetxt(path_out + '/' + DoD_name + 'DoD_mean.txt', DoD_mean_rst , fmt='%0.1f', delimiter='\t')
# MEAN + THRS1 DoD
# Print DoD mean, threshold 1 filtered in txt file (NaN as np.nan)
np.savetxt(path_out + '/' + DoD_name + 'DoD_mean_th1.txt', DoD_mean_th1, fmt='%0.1f', delimiter='\t')
# # Print filtered DoD (with NaN as -999)
np.savetxt(path_out + '/' + DoD_name + 'DoD_mean_th1_rst.txt', DoD_mean_th1_rst, fmt='%0.1f', delimiter='\t')
#MEAN + THRS_1 + NEIGH ANALYSIS DoD
# Print filtered DoD (with np.nan)...
np.savetxt(path_out + '/' + DoD_name + 'filt_.txt', DoD_out, fmt='%0.1f', delimiter='\t')
# Print filtered DoD (with NaN as -999)
np.savetxt(path_out + 'DoD_name' + 'filt_raw_rst.txt', DoD_out_rst, fmt='%0.1f', delimiter='\t')
# # Print DoD and filtered DoD (with NaN as -999) in a GIS readable format (ASCII grid):
# with open(path_out + '/' + DoD_name + 'header.txt') as f_head:
# w_header = f_head.read() # Header
# with open(path_out + '/' + DoD_name + 'raw_rst.txt') as DoD:
# w_DoD_raw= DoD.read() # Raw DoD
# with open(path_out + 'DoD_name' + 'filt_raw_rst.txt') as DoD_filt:
# w_DoD_filt = DoD_filt.read() # Filtered DoD
# with open(path_out + '/' + DoD_name + 'DoD_mean_th1_rst.txt') as DoD_mn_th1:
# w_DoD_mean_th1 = DoD_mn_th1.read()
# with open(path_out + '/' + DoD_name + 'DoD_mean.txt') as DoD_mn:
# w_DoD_mean = DoD_mn.read() # DoD mean
# # Print GIS readable raster [raw DoD, mean DOD, filtered DoD]
# DoD = w_header + w_DoD_raw
# DoD_mean = w_header + w_DoD_mean
# DoD_mean_th1 = w_header + w_DoD_mean_th1
# DoD_filt = w_header + w_DoD_filt
# with open(path_out + '/' +'gis-'+ DoD_name + 'raw.txt', 'w') as fp:
# fp.write(DoD)
# with open(path_out + '/' + 'gis-' + DoD_name + 'mean.txt', 'w') as fp:
# fp.write(DoD_mean)
# with open(path_out + '/' + 'gis-' + DoD_name + 'mean_th1.txt', 'w') as fp:
# fp.write(DoD_mean_th1)
# with open(path_out + '/' + 'gis-' + DoD_name + 'filt.txt', 'w') as fp:
# fp.write(DoD_filt)
# Cross section analysis
#n_cross=1
#y_values = np.arange(0,144*5,5)
#cross_sct = DoD_out[:,n_cross]
#fig, ax = plt.subplots(figsize=(20,5))
#ax.plot(y_values, cross_sct)
#title = 'Section_'+str(n_cross)
#ax.set(xlabel='Cross section coordinates [mm]',
# ylabel='Elevation [mm]',
# title=title)
#ax.grid()
#fig.savefig(path_out+'/section'+n_cross+'.png')
#plt.show() | [
"numpy.count_nonzero",
"numpy.array",
"numpy.loadtxt",
"numpy.nanmin",
"os.path.exists",
"matplotlib.colors.ListedColormap",
"os.mkdir",
"numpy.nanmax",
"matplotlib.pyplot.savefig",
"numpy.isnan",
"numpy.savetxt",
"numpy.nansum",
"matplotlib.pyplot.show",
"os.path.join",
"numpy.logical_o... | [((959, 989), 'os.path.join', 'os.path.join', (['w_dir', 'DEM1_name'], {}), '(w_dir, DEM1_name)\n', (971, 989), False, 'import os\n'), ((1002, 1032), 'os.path.join', 'os.path.join', (['w_dir', 'DEM2_name'], {}), '(w_dir, DEM2_name)\n', (1014, 1032), False, 'import os\n'), ((1297, 1328), 'os.path.join', 'os.path.join', (['dir_out', 'name_out'], {}), '(dir_out, name_out)\n', (1309, 1328), False, 'import os\n'), ((1332, 1356), 'os.path.exists', 'os.path.exists', (['path_out'], {}), '(path_out)\n', (1346, 1356), False, 'import os\n'), ((2642, 2675), 'numpy.loadtxt', 'np.loadtxt', (['path_DEM1'], {'skiprows': '(8)'}), '(path_DEM1, skiprows=8)\n', (2652, 2675), True, 'import numpy as np\n'), ((2755, 2788), 'numpy.loadtxt', 'np.loadtxt', (['path_DEM2'], {'skiprows': '(8)'}), '(path_DEM2, skiprows=8)\n', (2765, 2788), True, 'import numpy as np\n'), ((3846, 3866), 'numpy.zeros', 'np.zeros', (['DEM1.shape'], {}), '(DEM1.shape)\n', (3854, 3866), True, 'import numpy as np\n'), ((4013, 4033), 'numpy.zeros', 'np.zeros', (['DEM1.shape'], {}), '(DEM1.shape)\n', (4021, 4033), True, 'import numpy as np\n'), ((4664, 4695), 'numpy.pad', 'np.pad', (['DoD_raw', '(1)'], {'mode': '"""edge"""'}), "(DoD_raw, 1, mode='edge')\n", (4670, 4695), True, 'import numpy as np\n'), ((4744, 4764), 'numpy.zeros', 'np.zeros', (['DEM1.shape'], {}), '(DEM1.shape)\n', (4752, 4764), True, 'import numpy as np\n'), ((5897, 5933), 'numpy.pad', 'np.pad', (['DoD_mean_th1', '(1)'], {'mode': '"""edge"""'}), "(DoD_mean_th1, 1, mode='edge')\n", (5903, 5933), True, 'import numpy as np\n'), ((5966, 5986), 'numpy.zeros', 'np.zeros', (['DEM1.shape'], {}), '(DEM1.shape)\n', (5974, 5986), True, 'import numpy as np\n'), ((7053, 7087), 'numpy.pad', 'np.pad', (['coal_neigh', '(1)'], {'mode': '"""edge"""'}), "(coal_neigh, 1, mode='edge')\n", (7059, 7087), True, 'import numpy as np\n'), ((8085, 8107), 'matplotlib.colors.ListedColormap', 'ListedColormap', (['colors'], {}), '(colors)\n', (8099, 8107), False, 'from matplotlib.colors import ListedColormap, BoundaryNorm\n'), ((8197, 8215), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {}), '(3, 1)\n', (8209, 8215), True, 'import matplotlib.pyplot as plt\n'), ((8485, 8495), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8493, 8495), True, 'import matplotlib.pyplot as plt\n'), ((8496, 8533), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path_out + '/raster.pdf')"], {}), "(path_out + '/raster.pdf')\n", (8507, 8533), True, 'import matplotlib.pyplot as plt\n'), ((9820, 9911), 'numpy.savetxt', 'np.savetxt', (["(path_out + '/' + DoD_name + 'raw.txt')", 'DoD_raw'], {'fmt': '"""%0.1f"""', 'delimiter': '"""\t"""'}), "(path_out + '/' + DoD_name + 'raw.txt', DoD_raw, fmt='%0.1f',\n delimiter='\\t')\n", (9830, 9911), True, 'import numpy as np\n'), ((9953, 10053), 'numpy.savetxt', 'np.savetxt', (["(path_out + '/' + DoD_name + 'raw_rst.txt')", 'DoD_raw_rst'], {'fmt': '"""%0.1f"""', 'delimiter': '"""\t"""'}), "(path_out + '/' + DoD_name + 'raw_rst.txt', DoD_raw_rst, fmt=\n '%0.1f', delimiter='\\t')\n", (9963, 10053), True, 'import numpy as np\n'), ((10106, 10208), 'numpy.savetxt', 'np.savetxt', (["(path_out + '/' + DoD_name + 'DoD_mean.txt')", 'DoD_mean_rst'], {'fmt': '"""%0.1f"""', 'delimiter': '"""\t"""'}), "(path_out + '/' + DoD_name + 'DoD_mean.txt', DoD_mean_rst, fmt=\n '%0.1f', delimiter='\\t')\n", (10116, 10208), True, 'import numpy as np\n'), ((10292, 10397), 'numpy.savetxt', 'np.savetxt', (["(path_out + '/' + DoD_name + 'DoD_mean_th1.txt')", 'DoD_mean_th1'], {'fmt': '"""%0.1f"""', 'delimiter': '"""\t"""'}), "(path_out + '/' + DoD_name + 'DoD_mean_th1.txt', DoD_mean_th1,\n fmt='%0.1f', delimiter='\\t')\n", (10302, 10397), True, 'import numpy as np\n'), ((10436, 10549), 'numpy.savetxt', 'np.savetxt', (["(path_out + '/' + DoD_name + 'DoD_mean_th1_rst.txt')", 'DoD_mean_th1_rst'], {'fmt': '"""%0.1f"""', 'delimiter': '"""\t"""'}), "(path_out + '/' + DoD_name + 'DoD_mean_th1_rst.txt',\n DoD_mean_th1_rst, fmt='%0.1f', delimiter='\\t')\n", (10446, 10549), True, 'import numpy as np\n'), ((10621, 10714), 'numpy.savetxt', 'np.savetxt', (["(path_out + '/' + DoD_name + 'filt_.txt')", 'DoD_out'], {'fmt': '"""%0.1f"""', 'delimiter': '"""\t"""'}), "(path_out + '/' + DoD_name + 'filt_.txt', DoD_out, fmt='%0.1f',\n delimiter='\\t')\n", (10631, 10714), True, 'import numpy as np\n'), ((10751, 10852), 'numpy.savetxt', 'np.savetxt', (["(path_out + 'DoD_name' + 'filt_raw_rst.txt')", 'DoD_out_rst'], {'fmt': '"""%0.1f"""', 'delimiter': '"""\t"""'}), "(path_out + 'DoD_name' + 'filt_raw_rst.txt', DoD_out_rst, fmt=\n '%0.1f', delimiter='\\t')\n", (10761, 10852), True, 'import numpy as np\n'), ((1377, 1395), 'os.mkdir', 'os.mkdir', (['path_out'], {}), '(path_out)\n', (1385, 1395), False, 'import os\n'), ((3493, 3534), 'os.path.join', 'os.path.join', (['array_mask_path', 'array_mask'], {}), '(array_mask_path, array_mask)\n', (3505, 3534), False, 'import os\n'), ((3654, 3674), 'numpy.isnan', 'np.isnan', (['array_mask'], {}), '(array_mask)\n', (3662, 3674), True, 'import numpy as np\n'), ((3886, 3925), 'numpy.logical_or', 'np.logical_or', (['(DEM1 == NaN)', '(DEM2 == NaN)'], {}), '(DEM1 == NaN, DEM2 == NaN)\n', (3899, 3925), True, 'import numpy as np\n'), ((4057, 4096), 'numpy.logical_or', 'np.logical_or', (['(DEM1 == NaN)', '(DEM2 == NaN)'], {}), '(DEM1 == NaN, DEM2 == NaN)\n', (4070, 4096), True, 'import numpy as np\n'), ((4320, 4338), 'numpy.nanmin', 'np.nanmin', (['DoD_raw'], {}), '(DoD_raw)\n', (4329, 4338), True, 'import numpy as np\n'), ((4377, 4395), 'numpy.nanmax', 'np.nanmax', (['DoD_raw'], {}), '(DoD_raw)\n', (4386, 4395), True, 'import numpy as np\n'), ((5593, 5615), 'numpy.isnan', 'np.isnan', (['DoD_mean_msk'], {}), '(DoD_mean_msk)\n', (5601, 5615), True, 'import numpy as np\n'), ((5801, 5823), 'numpy.isnan', 'np.isnan', (['DoD_mean_th1'], {}), '(DoD_mean_th1)\n', (5809, 5823), True, 'import numpy as np\n'), ((7716, 7733), 'numpy.isnan', 'np.isnan', (['DoD_out'], {}), '(DoD_out)\n', (7724, 7733), True, 'import numpy as np\n'), ((8943, 8963), 'numpy.isnan', 'np.isnan', (['coal_neigh'], {}), '(coal_neigh)\n', (8951, 8963), True, 'import numpy as np\n'), ((3722, 3742), 'numpy.isnan', 'np.isnan', (['array_mask'], {}), '(array_mask)\n', (3730, 3742), True, 'import numpy as np\n'), ((4204, 4221), 'numpy.isnan', 'np.isnan', (['DoD_raw'], {}), '(DoD_raw)\n', (4212, 4221), True, 'import numpy as np\n'), ((4834, 4857), 'numpy.isnan', 'np.isnan', (['DoD_raw[i, j]'], {}), '(DoD_raw[i, j])\n', (4842, 4857), True, 'import numpy as np\n'), ((6151, 6179), 'numpy.isnan', 'np.isnan', (['DoD_mean_th1[i, j]'], {}), '(DoD_mean_th1[i, j])\n', (6159, 6179), True, 'import numpy as np\n'), ((7176, 7416), 'numpy.array', 'np.array', (['[[domain_neigh2[i, j], domain_neigh2[i, j + 1], domain_neigh2[i, j + 2]], [\n domain_neigh2[i + 1, j], 0, domain_neigh2[i + 1, j + 2]], [\n domain_neigh2[i + 2, j], domain_neigh2[i + 2, j + 1], domain_neigh2[i +\n 2, j + 2]]]'], {}), '([[domain_neigh2[i, j], domain_neigh2[i, j + 1], domain_neigh2[i, j +\n 2]], [domain_neigh2[i + 1, j], 0, domain_neigh2[i + 1, j + 2]], [\n domain_neigh2[i + 2, j], domain_neigh2[i + 2, j + 1], domain_neigh2[i +\n 2, j + 2]]])\n', (7184, 7416), True, 'import numpy as np\n'), ((4925, 5166), 'numpy.array', 'np.array', (['[[domain_avg[i, j], domain_avg[i, j + 1], domain_avg[i, j + 2]], [\n domain_avg[i + 1, j], domain_avg[i + 1, j + 1], domain_avg[i + 1, j + 2\n ]], [domain_avg[i + 2, j], domain_avg[i + 2, j + 1], domain_avg[i + 2, \n j + 2]]]'], {}), '([[domain_avg[i, j], domain_avg[i, j + 1], domain_avg[i, j + 2]], [\n domain_avg[i + 1, j], domain_avg[i + 1, j + 1], domain_avg[i + 1, j + 2\n ]], [domain_avg[i + 2, j], domain_avg[i + 2, j + 1], domain_avg[i + 2, \n j + 2]]])\n', (4933, 5166), True, 'import numpy as np\n'), ((5220, 5263), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 2, 0], [0, 1, 0]]'], {}), '([[0, 1, 0], [0, 2, 0], [0, 1, 0]])\n', (5228, 5263), True, 'import numpy as np\n'), ((5403, 5424), 'numpy.nansum', 'np.nansum', (['(k * w_norm)'], {}), '(k * w_norm)\n', (5412, 5424), True, 'import numpy as np\n'), ((7466, 7492), 'numpy.count_nonzero', 'np.count_nonzero', (['(ker == 0)'], {}), '(ker == 0)\n', (7482, 7492), True, 'import numpy as np\n'), ((9062, 9077), 'numpy.sum', 'np.sum', (['DoD_vol'], {}), '(DoD_vol)\n', (9068, 9077), True, 'import numpy as np\n'), ((9124, 9135), 'numpy.sum', 'np.sum', (['DEP'], {}), '(DEP)\n', (9130, 9135), True, 'import numpy as np\n'), ((9177, 9188), 'numpy.sum', 'np.sum', (['SCO'], {}), '(SCO)\n', (9183, 9188), True, 'import numpy as np\n'), ((6295, 6553), 'numpy.array', 'np.array', (['[[domain_neigh[i, j], domain_neigh[i, j + 1], domain_neigh[i, j + 2]], [\n domain_neigh[i + 1, j], domain_neigh[i + 1, j + 1], domain_neigh[i + 1,\n j + 2]], [domain_neigh[i + 2, j], domain_neigh[i + 2, j + 1],\n domain_neigh[i + 2, j + 2]]]'], {}), '([[domain_neigh[i, j], domain_neigh[i, j + 1], domain_neigh[i, j + \n 2]], [domain_neigh[i + 1, j], domain_neigh[i + 1, j + 1], domain_neigh[\n i + 1, j + 2]], [domain_neigh[i + 2, j], domain_neigh[i + 2, j + 1],\n domain_neigh[i + 2, j + 2]]])\n', (6303, 6553), True, 'import numpy as np\n'), ((7513, 7526), 'numpy.isnan', 'np.isnan', (['ker'], {}), '(ker)\n', (7521, 7526), True, 'import numpy as np\n'), ((6638, 6663), 'numpy.count_nonzero', 'np.count_nonzero', (['(ker < 0)'], {}), '(ker < 0)\n', (6654, 6663), True, 'import numpy as np\n'), ((6776, 6801), 'numpy.count_nonzero', 'np.count_nonzero', (['(ker > 0)'], {}), '(ker > 0)\n', (6792, 6801), True, 'import numpy as np\n')] |
"""unit test for bandage.Supply"""
import bandage
supplier = bandage.Supply(
"https://github.com/perpetualCreations/bandage/releases/tag/BANDAGE",
"F://bandage//tests//test_target//VERSION")
print(supplier.realize())
print(supplier.pre_collect_dump())
print(supplier.version_gap)
| [
"bandage.Supply"
] | [((63, 200), 'bandage.Supply', 'bandage.Supply', (['"""https://github.com/perpetualCreations/bandage/releases/tag/BANDAGE"""', '"""F://bandage//tests//test_target//VERSION"""'], {}), "(\n 'https://github.com/perpetualCreations/bandage/releases/tag/BANDAGE',\n 'F://bandage//tests//test_target//VERSION')\n", (77, 200), False, 'import bandage\n')] |
#!/usr/bin/env python3
# Pascals triangle
# 2022 <NAME> -- MIT License
import colorama
from colorama import Fore
from colorama import Style
C = "A "
SEED = 3
ITERATIONS = 49
SCREEN_WIDTH = 160
class TNode:
value = SEED
parent_left = None
parent_right = None
def __init__(self, left = None, right = None):
self.parent_left = left
self.parent_right = right
def calculate_value(self):
tv = 0
if self.parent_left is not None:
tv += self.parent_left.value
if self.parent_right is not None:
tv += self.parent_right.value
if tv != 0: self.value = tv
def fancy_print(r):
# with ansi for color
o = ""
# without ansi for math
oo = ""
for e in r:
oo += C
if e % 7 == 0:
o += Style.BRIGHT if e % 2 else Style.NORMAL
o += Fore.WHITE+C
elif e % 7 == 1:
o += Style.BRIGHT if e % 2 else Style.NORMAL
o += Fore.RED+C
elif e % 7 == 2:
o += Style.BRIGHT if e % 2 else Style.NORMAL
o += Fore.BLUE+C
elif e % 7 == 3:
o += Style.BRIGHT if e % 2 else Style.NORMAL
o += Fore.MAGENTA+C
elif e % 7 == 4:
o += Style.BRIGHT if e % 2 else Style.NORMAL
o += Fore.YELLOW+C
elif e % 7 == 5:
o += Style.BRIGHT if e % 2 else Style.NORMAL
o += Fore.GREEN+C
else:
o += Style.BRIGHT if e % 2 else Style.NORMAL
o += Fore.CYAN+C
# the ansi colors throw it off
b = oo.rstrip().center(SCREEN_WIDTH)
leading_white = len(b) - len(b.lstrip())
print(f"{' '*leading_white}{o.rstrip()}")
colorama.init()
# make traversal easy
children = []
# row 1 has no parents
prev_row = [None, None]
for row in range(1,ITERATIONS+1):
# first child created will never have a parent to the left just like the last one will never have a parent to the right
current_row = [None]
for node in range(0,row):
child = TNode(prev_row[node], prev_row[node+1])
current_row.append(child)
child.calculate_value()
children.append(child)
prev_row = current_row
prev_row.append(None)
row = []
for c in children:
row.append(c.value)
if c.parent_right is None:
fancy_print(row)
row=[]
| [
"colorama.init"
] | [((1709, 1724), 'colorama.init', 'colorama.init', ([], {}), '()\n', (1722, 1724), False, 'import colorama\n')] |
"""Test that parsing Percolator input files works correctly"""
import pytest
import mokapot
import pandas as pd
@pytest.fixture
def std_pin(tmp_path):
"""Create a standard pin file"""
out_file = tmp_path / "std_pin"
with open(str(out_file), "w+") as pin:
dat = (
"sPeCid\tLaBel\tpepTide\tsCore\tscanNR\tpRoteins\n"
"DefaultDirection\t-\t-\t-\t1\t-\t-\n"
"a\t1\tABC\t5\t2\tprotein1\tprotein2\n"
"b\t-1\tCBA\t10\t3\tdecoy_protein1\tdecoy_protein2"
)
pin.write(dat)
return out_file
def test_pin_parsing(std_pin):
"""Test pin parsing"""
df = mokapot.read_pin(std_pin, to_df=True)
assert df["LaBel"].dtype == "bool"
assert len(df) == 2
assert len(df[df["LaBel"]]) == 1
assert len(df[df["LaBel"]]) == 1
dat = mokapot.read_pin(std_pin)
pd.testing.assert_frame_equal(df.loc[:, ("sCore",)], dat.features)
def test_pin_wo_dir():
"""Test a PIN file without a DefaultDirection line"""
dat = mokapot.read_pin("data/scope2_FP97AA.pin")
| [
"mokapot.read_pin",
"pandas.testing.assert_frame_equal"
] | [((639, 676), 'mokapot.read_pin', 'mokapot.read_pin', (['std_pin'], {'to_df': '(True)'}), '(std_pin, to_df=True)\n', (655, 676), False, 'import mokapot\n'), ((825, 850), 'mokapot.read_pin', 'mokapot.read_pin', (['std_pin'], {}), '(std_pin)\n', (841, 850), False, 'import mokapot\n'), ((855, 921), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (["df.loc[:, ('sCore',)]", 'dat.features'], {}), "(df.loc[:, ('sCore',)], dat.features)\n", (884, 921), True, 'import pandas as pd\n'), ((1015, 1057), 'mokapot.read_pin', 'mokapot.read_pin', (['"""data/scope2_FP97AA.pin"""'], {}), "('data/scope2_FP97AA.pin')\n", (1031, 1057), False, 'import mokapot\n')] |
import napari
from nilearn import image
from skimage import segmentation
img = image.image.load_img('assets/BraTS19_2013_10_1_flair.nii').get_data()
viewer = napari.view_image(img)
pix = segmentation.slic(img, n_segments=10000, compactness=0.002, multichannel=False,
)
pix_boundaries = segmentation.find_boundaries(pix)
viewer.add_labels(pix_boundaries)
| [
"skimage.segmentation.find_boundaries",
"napari.view_image",
"nilearn.image.image.load_img",
"skimage.segmentation.slic"
] | [((159, 181), 'napari.view_image', 'napari.view_image', (['img'], {}), '(img)\n', (176, 181), False, 'import napari\n'), ((189, 268), 'skimage.segmentation.slic', 'segmentation.slic', (['img'], {'n_segments': '(10000)', 'compactness': '(0.002)', 'multichannel': '(False)'}), '(img, n_segments=10000, compactness=0.002, multichannel=False)\n', (206, 268), False, 'from skimage import segmentation\n'), ((312, 345), 'skimage.segmentation.find_boundaries', 'segmentation.find_boundaries', (['pix'], {}), '(pix)\n', (340, 345), False, 'from skimage import segmentation\n'), ((80, 138), 'nilearn.image.image.load_img', 'image.image.load_img', (['"""assets/BraTS19_2013_10_1_flair.nii"""'], {}), "('assets/BraTS19_2013_10_1_flair.nii')\n", (100, 138), False, 'from nilearn import image\n')] |
#!/usr/bin/env python
import argparse
import csv
import datetime
import dateutil.relativedelta as relativedelta
import dateutil.rrule as rrule
def parse_args():
parser = argparse.ArgumentParser(description='Generates an empty class schedule CSV file')
parser.add_argument('-s', '--startdate',
type=datetime.datetime.fromisoformat,
help="First day of classes (YYYY-MM-DD)",
required=True)
parser.add_argument('-e', '--enddate',
type=datetime.datetime.fromisoformat,
help="Last day of classes (YYYY-MM-DD)",
required=True)
parser.add_argument('-d', '--days',
type=lambda days: [int(day) for day in days.split(',')],
help='Comma-separated days of the week in numbers (e.g., 0,2 is Monday, Wednesday)',
required=True)
parser.add_argument('-c','--cols',
type=lambda cols: [col.strip() for col in cols.split(',')],
help='Comma-separated names of additional columns for the CSV file (e.g., Topic,"Read before class",Exercise)',
required=True)
parser.add_argument('-o', '--output',
type=argparse.FileType('w'),
help="Output CSV file name (will be overwritten)",
required=True)
args = parser.parse_args()
return (args.startdate, args.enddate, args.days, args.cols, args.output)
if __name__ == '__main__':
start, end, days, cols, csvfile = parse_args()
rr = rrule.rrule(rrule.WEEKLY, byweekday=days, dtstart=start)
days = rr.between(start, end, inc=True)
csvwriter = csv.writer(csvfile)
csvwriter.writerow(['Date'] + cols)
for day in days:
csvwriter.writerow([day.strftime("%A, %B %d")] + ['']*len(cols))
csvfile.close()
| [
"argparse.FileType",
"csv.writer",
"argparse.ArgumentParser",
"dateutil.rrule.rrule"
] | [((176, 262), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generates an empty class schedule CSV file"""'}), "(description=\n 'Generates an empty class schedule CSV file')\n", (199, 262), False, 'import argparse\n'), ((1416, 1472), 'dateutil.rrule.rrule', 'rrule.rrule', (['rrule.WEEKLY'], {'byweekday': 'days', 'dtstart': 'start'}), '(rrule.WEEKLY, byweekday=days, dtstart=start)\n', (1427, 1472), True, 'import dateutil.rrule as rrule\n'), ((1534, 1553), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (1544, 1553), False, 'import csv\n'), ((1110, 1132), 'argparse.FileType', 'argparse.FileType', (['"""w"""'], {}), "('w')\n", (1127, 1132), False, 'import argparse\n')] |
# CTK: Cherokee Toolkit
#
# Authors:
# <NAME>
# <NAME>
#
# Copyright (C) 2010-2011 <NAME>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 2 of the GNU General Public
# License as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
from Widget import Widget
from Box import Box
from RawHTML import RawHTML
from Container import Container
from Server import cfg
from util import *
HTML = """
<input type="radio" id="%(id)s" %(props)s />
"""
class Radio (Widget):
def __init__ (self, props={}):
# Sanity check
assert type(props) == dict
Widget.__init__ (self)
self._props = props.copy()
def Render (self):
# Deal with a couple of exceptions
new_props = self._props.copy()
if new_props.has_key('checked') and int(new_props.pop('checked')):
new_props['checked'] = "checked"
if new_props.has_key('disabled') and int(new_props.pop('disabled')):
new_props['disabled'] = "disabled"
# Render the widget
render = Widget.Render (self)
render.html += HTML %({'id': self.id,
'props': props_to_str (new_props)})
return render
class RadioGroupCfg (Box):
def __init__ (self, key, options, _props={}):
Box.__init__ (self)
self.props = _props.copy()
self._options = options
if not 'id' in self.props:
self.id = 'RadioGroup_%s' %(self.uniq_id)
cfg_value = cfg.get_val (key)
for o in options:
val, desc = o
new_props = {}
new_props['name'] = key
new_props['value'] = val
# Initial value
if cfg_value != None and \
cfg_value == val:
new_props['checked'] = 1
elif 'checked' in self.props:
if self.props['checked'] == val:
new_props['checked'] = 1
self += RadioText (desc, new_props)
class RadioText (Box):
def __init__ (self, txt, props={}):
Box.__init__ (self)
self.radio = Radio (props.copy())
self += self.radio
self.text = Box ({'class': 'radio-text'}, RawHTML(txt))
self += self.text
self.text.bind ('click', "$('#%s').attr('checked', true).trigger('change');" %(self.radio.id))
| [
"Widget.Widget.__init__",
"Server.cfg.get_val",
"Widget.Widget.Render",
"RawHTML.RawHTML",
"Box.Box.__init__"
] | [((1086, 1107), 'Widget.Widget.__init__', 'Widget.__init__', (['self'], {}), '(self)\n', (1101, 1107), False, 'from Widget import Widget\n'), ((1542, 1561), 'Widget.Widget.Render', 'Widget.Render', (['self'], {}), '(self)\n', (1555, 1561), False, 'from Widget import Widget\n'), ((1788, 1806), 'Box.Box.__init__', 'Box.__init__', (['self'], {}), '(self)\n', (1800, 1806), False, 'from Box import Box\n'), ((1990, 2006), 'Server.cfg.get_val', 'cfg.get_val', (['key'], {}), '(key)\n', (2001, 2006), False, 'from Server import cfg\n'), ((2564, 2582), 'Box.Box.__init__', 'Box.__init__', (['self'], {}), '(self)\n', (2576, 2582), False, 'from Box import Box\n'), ((2705, 2717), 'RawHTML.RawHTML', 'RawHTML', (['txt'], {}), '(txt)\n', (2712, 2717), False, 'from RawHTML import RawHTML\n')] |
def colorprint(text, color, bgcolor, *options):
import os
bef = "\x1B["
bg = bef + bgcolorcode(bgcolor)
cc = bef + colorcode(color)
styles = optioncode(*options)
end = bef + "0m"
os.system("echo \"" + styles + bg + cc + text + end + "\"")
def optioncode(*options):
option = ""
alloptions = ["bold", "dim", "underline", "blink", "reverse", "hidden"]
allcodes = ["\x1B[1m", "\x1B[2m", "\x1B[4m", "\x1B[5m", "\x1B[7m", "\x1B[8m"]
for i in range(len(options)):
for a in range(len(alloptions)):
if (alloptions[a] in options[i]):
option = option + allcodes[a]
return option
def colorcode(color):
colorsA = ["black", "red", "green", "yellow", "blue", "magenta", "cyan", "light gray"]
colorsB = ["dark gray", "light red", "light green", "light yellow", "light blue", "light magenta", "light cyan", "white"]
if color == "default":
option = 39
else:
if (color in colorsA):
option = colorsA.index(color) + 30
elif (color in colorsB):
option = colorsB.index(color) + 90
else:
option = 404
return str(option) + "m"
def bgcolorcode(color):
colorsA = ["black", "red", "green", "yellow", "blue", "magenta", "cyan", "light gray"]
colorsB = ["dark gray", "light red", "light green", "light yellow", "light blue", "light magenta", "light cyan", "white"]
if color == "default":
option = 49
else:
if (color in colorsA):
option = colorsA.index(color) + 40
elif (color in colorsB):
option = colorsB.index(color) + 100
else:
option = 404
return str(option) + "m"
| [
"os.system"
] | [((208, 265), 'os.system', 'os.system', (['(\'echo "\' + styles + bg + cc + text + end + \'"\')'], {}), '(\'echo "\' + styles + bg + cc + text + end + \'"\')\n', (217, 265), False, 'import os\n')] |
import flask
import threading
app = flask.Flask('main.py')
@app.route('/')
def index():
return flask.render_template("index.html")
t = threading.Thread(target=app.run())
t.start()
| [
"flask.render_template",
"flask.Flask"
] | [((37, 59), 'flask.Flask', 'flask.Flask', (['"""main.py"""'], {}), "('main.py')\n", (48, 59), False, 'import flask\n'), ((102, 137), 'flask.render_template', 'flask.render_template', (['"""index.html"""'], {}), "('index.html')\n", (123, 137), False, 'import flask\n')] |
from django.urls import path, include
from .views import ReportCreate, ReportList, ImpactCreate, ReportEdit, \
ReportDetail, ReportAddImpact, DeleteImpact, ReportSubmit, FinalReport
from .plots import meta_plot
urlpatterns = [
path('impact/', ImpactCreate.as_view(), name='report-impact'),
path('create/', ReportCreate.as_view(), name='report-create'),
path('<int:pk>/edit/', ReportEdit.as_view(), name='report-edit'),
path('<int:pk>/submit/', ReportSubmit.as_view(), name='report-submit'),
path('<int:pk>/impact/', ReportAddImpact.as_view(), name='report-add-impact'),
path('<int:pk>/impact/delete/', DeleteImpact.as_view(), name='impact-delete'),
path('<int:pk>/', ReportDetail.as_view(), name='report-view'),
path('list/', ReportList.as_view(), name='report-list'),
path('final/<int:year>/', FinalReport.as_view(), name="final-report"),
# path('countries/<int:year>/', countries_summary, name="cohort_countries"),
path('final/<int:year>/demo/<str:plotname>.png', meta_plot, name="meta_plot")
]
| [
"django.urls.path"
] | [((968, 1045), 'django.urls.path', 'path', (['"""final/<int:year>/demo/<str:plotname>.png"""', 'meta_plot'], {'name': '"""meta_plot"""'}), "('final/<int:year>/demo/<str:plotname>.png', meta_plot, name='meta_plot')\n", (972, 1045), False, 'from django.urls import path, include\n')] |
import numpy as np
def load_lda(path):
rows = []
with open(path, 'r') as f:
for line in f:
line = line.strip(" []\n")
if line:
rows.append(np.fromstring(line, dtype=np.float32, sep=' '))
matrix = np.array(rows).T
return matrix[:-1], matrix[-1]
| [
"numpy.array",
"numpy.fromstring"
] | [((259, 273), 'numpy.array', 'np.array', (['rows'], {}), '(rows)\n', (267, 273), True, 'import numpy as np\n'), ((197, 243), 'numpy.fromstring', 'np.fromstring', (['line'], {'dtype': 'np.float32', 'sep': '""" """'}), "(line, dtype=np.float32, sep=' ')\n", (210, 243), True, 'import numpy as np\n')] |
# This script looks for errors in the examples creates by `create_run_all_examples.py`
import argparse
import glob
parser = argparse.ArgumentParser()
parser.add_argument(
'working_dir',
type=str,
metavar='PATH',
help='path to examples working directory',
)
args = parser.parse_args()
files_with_errors = []
for logfile in glob.glob(f"{args.working_dir}/*/output/log/activitysim.log"):
with open(logfile, 'rt') as f:
printing_traceback = False
found_traceback = False
for n, line in enumerate(f.readlines(), start=1):
if printing_traceback:
print(line.rstrip())
if not line.startswith(" "):
printing_traceback = False
else:
if "Traceback" in line:
print(f"======= TRACEBACK in {logfile} at line {n} =======")
print(line.rstrip())
printing_traceback = True
found_traceback = True
if not found_traceback:
print(f"OK: {logfile}")
else:
files_with_errors.append(logfile)
if files_with_errors:
print("=====================================================")
print(f"Found {len(files_with_errors)} examples with errors:")
for f in files_with_errors:
print(f"- {f}")
print("=====================================================")
| [
"glob.glob",
"argparse.ArgumentParser"
] | [((126, 151), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (149, 151), False, 'import argparse\n'), ((343, 404), 'glob.glob', 'glob.glob', (['f"""{args.working_dir}/*/output/log/activitysim.log"""'], {}), "(f'{args.working_dir}/*/output/log/activitysim.log')\n", (352, 404), False, 'import glob\n')] |
#!/usr/bin/python
from __future__ import absolute_import
import torch
import torch.nn as nn
from torchvision import transforms
from torchvision.models import vgg16_bn
from torch.utils.data import Dataset, DataLoader
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import os
from statistics import stdev, mean
import csv
import re
def plot_imgs():
img_path = "pT1_dataset/dataset/img0/img0_11_normal/img0_11_normal-image.jpg"
# mask_path = "pT1_dataset/dataset/img0/img0_11_normal/img0_11_normal-gt.png"
img = Image.open(img_path)
# mask = Image.open(mask_path)
img = np.asarray(img)
# mask = np.asarray(mask)
# mask = np.repeat(mask[:,:, np.newaxis], 3, axis=2)
# img = np.where(mask, img, mask)
plt.imshow(img)
plt.show()
img_path = "pT1_dataset/dataset/img1/img1_11_abnormal/img1_11_abnormal-image.jpg"
# mask_path = "pT1_dataset/dataset/img1/img1_11_abnormal/img1_11_abnormal-gt.png"
img = Image.open(img_path)
# mask = Image.open(mask_path)
img = np.asarray(img)
# mask = np.asarray(mask)
# mask = np.repeat(mask[:,:, np.newaxis], 3, axis=2)
# img = np.where(mask, img, mask)
plt.imshow(img)
plt.show()
def image_sizes():
path = "pT1_dataset/dataset/"
smallest_width = 10000
smallest_hight = 10000
for patient in os.listdir(path):
if not patient.endswith(".csv"):
for img_folder in os.listdir(path + patient):
img = Image.open(path+patient+ "/" + img_folder + "/" + img_folder + "-image.jpg")
img = np.asarray(img)
if img.shape[0] < smallest_hight:
smallest_hight = img.shape[0]
pic_h = img_folder
if img.shape[1] < smallest_width:
smallest_width = img.shape[1]
pic_w = img_folder
print(smallest_hight, pic_h)
print(smallest_width, pic_w)
# for img in os.listdir(path + paient)
# if not f.startswith("."):
#
# if os.path.isfile(path + f) and k in f:
# with open(path + f, "r") as file:
def plot_all_images():
"""
plots all images of the dataset
:return:
"""
path = "pT1_dataset/dataset/"
counter = 1
for patient in os.listdir(path): # iterate over every patient
if not patient.endswith(".csv"): # only consider the img folders
for img_folder in os.listdir(path + patient): # iterate ofrer ever img folder
img = Image.open(path+patient+ "/" + img_folder + "/" + img_folder + "-image.jpg") # open the image (PIL)
img = np.asarray(img) # convert from PILformat to numpy array
if counter <= 100:
plt.rc("font", size=5) # determine font size
plt.subplot(10,10, counter)
plt.imshow(img)
if "abnormal" in img_folder:
plt.title("dysplastic")
else:
plt.title("normal")
plt.axis("off")
counter+=1
else:
plt.show()
counter=1
plt.rc("font", size=5)
plt.subplot(10, 10, counter)
plt.imshow(img)
if "abnormal" in img_folder:
plt.title("dysplastic")
else:
plt.title("normal")
plt.axis("off")
counter+=1
############################################################################################
def get_opt_param(m, fold):
"""
reads a csv file to return the optimal parameters for a given data-split
"""
k = "fold" + str(fold)
path = "Hyperparameters/CNN/"
for f in os.listdir(path):
if not f.startswith("."):
if os.path.isfile(path + f) and k in f and f.endswith("it50.csv") and f.startswith(m): #TODO: add VGG16_bn
with open(path + f, "r") as file:
hp = list(csv.reader(file))
hp=hp[1]
m = hp[0]
fold = int(hp[1])
opt_lr = float(hp[4])
opt_lr_decay = float(hp[5])
opt_num_epochs = int(float(hp[6]))
opt_step_size = int(float(hp[7]))
opt_weight_decay = float(hp[8])
return m, fold, opt_lr, opt_lr_decay, opt_num_epochs, opt_step_size, opt_weight_decay
def train_val_test_split(fold):
"""
:param fold: determines which data split is used
:return: three (train, val, test) lists containing the IDs of the images,
the ID is like he path to the image, ID looks like: img0/img0_0_normal/img0_0_normal-image.jpg
"""
# open the csv file
with open("pT1_dataset/dataset_split.csv") as csv_file:
csv_reader = csv.reader(csv_file, delimiter=",")
line_count = 0
dic = {}
# iterate over every row of the csv file
for row in csv_reader:
if line_count == 0: # ignore the header
line_count += 1
else: # use a dictionary to save the information about how to split the data into train test and val
dic[row[0]] = [row[fold + 1]] # get a dictionary containing all the needed information to split the data
path = "pT1_dataset/dataset/"
train_IDs, val_IDs, test_IDs = [],[],[]
for patient in os.listdir(path): # iterate over the diretory (iterate over every patient)
if not patient.endswith(".csv"): # ignore the csv file in this folder
if dic[patient][0]=="train": # check if the patient belongs to train
for img_folder in os.listdir(path + patient): # iterate over all images from this patient
train_IDs.append(patient + "/" + img_folder + "/" + img_folder + "-image.jpg") # append the ID
if dic[patient][0]=="val":
for img_folder in os.listdir(path + patient):
val_IDs.append(patient + "/" + img_folder + "/" + img_folder + "-image.jpg")
if dic[patient][0] == "test":
for img_folder in os.listdir(path + patient):
test_IDs.append(patient + "/" + img_folder + "/" + img_folder + "-image.jpg")
return train_IDs, val_IDs, test_IDs
############################################################################################
class ImageDataset(Dataset):
def __init__(self, list_IDs, transform):
self.list_IDs = list_IDs
self.transform = transform
def __len__(self):
return len(self.list_IDs)
def __getitem__(self, index):
ID = self.list_IDs[index]
img = Image.open("pT1_dataset/dataset/" + ID)
# img = transforms.Resize([64,60])(img) #TODO resize images
if "abnormal" in ID:
label = [1,0]
cls = 1
else:
label = [0,1]
cls = 0
label = torch.tensor(label, dtype=torch.float)
img = self.transform(img)
_, _, filename = ID.split("/")
img_patch = [int(i) for i in re.findall(r"[0-9]+", filename)]
img_patch.append(cls)
name = torch.tensor([img_patch], dtype=torch.float)
# print(img.shape)
return img, label, name
class CNN(nn.Module):
"""
feed forward conv net
"""
def __init__(self, img_size):
super(CNN, self).__init__()
self.final_img_size = int(img_size/8)
self.out_conv1= 16
self.out_conv2 = 32
self.out_conv3 = 64
self.cnn_layers = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=self.out_conv1, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout2d(0.1),
nn.Conv2d(in_channels=self.out_conv1, out_channels=self.out_conv2, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout2d(0.1),
nn.Conv2d(in_channels=self.out_conv2, out_channels=self.out_conv3, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout2d(0.1),
)
self.linear_layers = nn.Sequential(
nn.Dropout(p=0.5),
nn.Linear(in_features=self.final_img_size*self.final_img_size*self.out_conv3, out_features=self.final_img_size*self.final_img_size*16),
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(in_features=self.final_img_size*self.final_img_size*16, out_features=2),
nn.Softmax()
)
def forward(self, input):
output = self.cnn_layers(input)
output_flat = output.reshape(-1, self.final_img_size*self.final_img_size*self.out_conv3)
output = self.linear_layers(output_flat)
return output
############################################################################################
def train_and_test_1Fold(fold, m, device):
# get the hyperparameters
_, _, lr, lr_decay, num_epochs, step_size, weight_decay = get_opt_param(m,fold)
bool=True
while bool:
# train and validate the CNN, save the model weights that have highest accuracy on validation set
val_res, bool, train_accs, val_accs, train_losses, val_losses = train_and_val_1Fold(fold=fold, m=m,
num_epochs=num_epochs,
lr=lr,
lr_decay=lr_decay,
weight_decay=weight_decay,
device=device,
step_size=step_size,
testing=True)
if bool:
print("FOLD" + str(fold) + "rerun due to dead neurons")
print("val acc:", val_res[2])
img_size = 128
train_IDs, val_IDs, test_IDs = train_val_test_split(fold) # split data
val_transform = transforms.Compose([transforms.Resize((img_size, img_size)),
transforms.ToTensor()])
test_data = ImageDataset(test_IDs, val_transform) # get data objects
# data loaders
batchsize = 64
test_loader = DataLoader(test_data, batchsize, shuffle=True) # create Dataloader
model = torch.load("Parameters/CNN/" + m + "_fold"+str(fold) + ".pt") # load model
crit = torch.nn.CrossEntropyLoss(reduction="sum") # define loss function
test_acc, test_loss, img_name, TP_TN_FP_FN = evaluate(model, test_loader, crit, device, testing=True) # evaluation on test_set
print("test_acc:", test_acc)
# print(TP_TN_FP_FN)
return test_acc, train_accs, val_accs, train_losses, val_losses, img_name, TP_TN_FP_FN
def test(runs, m, device):
"""
write train accs and val accs of all runs to one csv file per fold
:param runs: number of times train, val and test is repeated
:param device: "cuda" or "cpu"
:return:
"""
# m = "CNN"
folder_short="CNN/"
all_test_accs = []
test_accs_per_fold = [[],[],[],[]]
path_test = "out/" + folder_short + m + "_test_data.csv"
for fold in range(4):
print("Fold:", fold)
# paths of csv files to which will be written to
path_train_acc = "out/" + folder_short + m + "_train_acc_fold" + str(fold) + ".csv"
path_train_loss = "out/" + folder_short + m + "_train_loss_fold" + str(fold) + ".csv"
path_val_acc = "out/" + folder_short + m + "_val_acc_fold" + str(fold) + ".csv"
path_val_loss = "out/" + folder_short + m + "_val_loss_fold" + str(fold) + ".csv"
path_TP_TN_FP_FN = "out/" + folder_short + m + "_TP_TN_FP_FN_fold" + str(fold) + ".csv"
# write train acc, train loss, val acc and val loss to separate csv files
with open(path_train_acc, "w") as train_acc_file, \
open(path_train_loss, "w") as train_loss_file, \
open(path_val_acc, "w") as val_acc_file, \
open(path_val_loss, "w") as val_loss_file, \
open(path_TP_TN_FP_FN, "w") as cls_file:
train_acc_writer = csv.writer(train_acc_file)
train_loss_writer = csv.writer(train_loss_file)
val_acc_writer = csv.writer(val_acc_file)
val_loss_writer = csv.writer(val_loss_file)
cls_writer = csv.writer(cls_file)
for it in range(runs):
test_acc, train_accs, val_accs, train_losses, val_losses, img_name, TP_TN_FP_FN = train_and_test_1Fold(fold=fold, m=m, device=device)
all_test_accs.append(test_acc)
test_accs_per_fold[fold].append(test_acc)
train_acc_writer.writerow([i for i in train_accs])
train_loss_writer.writerow([i for i in train_losses])
val_acc_writer.writerow([i for i in val_accs])
val_loss_writer.writerow([i for i in val_losses])
final_TPTNFPFN = []
groups = ["TP", "TN", "FP", "FN"]
for group_idx in range(4):
for img in img_name[group_idx]:
img_str = str(img[0][0]) + "_" + str(img[0][1]) + "_" + str(img[0][2]) + "_" + groups[group_idx]
final_TPTNFPFN.append(img_str)
cls_writer.writerow(final_TPTNFPFN)
if it == runs-1:
avg = mean(test_accs_per_fold[fold])
sd = stdev(test_accs_per_fold[fold])
test_accs_per_fold[fold].append(avg)
test_accs_per_fold[fold].append(sd)
test_accs_per_fold[fold].reverse()
# write test results (mean and sd) of every fold and total to a csv file
with open(path_test, "w") as test_file:
test_writer = csv.writer(test_file)
test_writer.writerow([stdev(all_test_accs), mean(all_test_accs)])
for fold in range(4):
test_writer.writerow(test_accs_per_fold[fold])
print("Results on Testset:")
print("mean:", "\t", mean(all_test_accs)*100)
print("standard deviation:", "\t", stdev(all_test_accs)*100)
############################################################################################
def train_and_val_1Fold(fold, m, num_epochs, lr, lr_decay, step_size, weight_decay, device, plotting=False, testing=False):
img_size = 128
# img_size = 256
train_IDs, val_IDs, test_IDs = train_val_test_split(fold)
if "aug" in m:
print("Augment training data")
train_transform = transforms.Compose([transforms.Resize((img_size,img_size)),
# transforms.RandomHorizontalFlip(),
transforms.RandomRotation((0,360)),
# transforms.RandomVerticalFlip(),
transforms.ToTensor()])
else:
train_transform = transforms.Compose([transforms.Resize((img_size,img_size)),
# transforms.RandomHorizontalFlip(),
# transforms.RandomRotation((0,360)),
# transforms.RandomVerticalFlip(),
transforms.ToTensor()])
val_transform = transforms.Compose([transforms.Resize((img_size,img_size)),
transforms.ToTensor()])
train_data = ImageDataset(train_IDs, train_transform)
val_data = ImageDataset(val_IDs, val_transform)
test_data = ImageDataset(test_IDs, val_transform)
# print("train size: " + str(len(train_data)) + " val size: " + str(len(val_data)))
# data loaders
batchsize = 64
# batchsize = 16
train_loader = DataLoader(train_data, batchsize, shuffle=True, drop_last=True)
val_loader = DataLoader(val_data, batchsize, shuffle=True)
test_loader = DataLoader(test_data, batchsize, shuffle=True)
train_accs = [] # will contain the training accuracy of every epoch
val_accs = [] # will contain the validation accuracy of every epoch
train_losses = [] # will contain the training loss of every epoch
val_losses = [] # will contain the validation loss of every epoch
# initialize model
print("initialize", m)
if m == "CNN":
model = CNN(img_size).to(device)
elif m == "VGG16_bn":
model = vgg16_bn().to(device)
elif m == "VGG16_bn_aug":
model = vgg16_bn().to(device)
elif m == "VGG16_bn_aug_pretrained":
model = vgg16_bn(pretrained=True).to(device)
# model = torch.load("Parameters/CNN/VGG16_bn_pretrained_imagenet.pt").to(device)
elif m == "VGG16_bn_pretrained":
model = vgg16_bn(pretrained=True).to(device)
# model = torch.load("Parameters/CNN/VGG16_bn_pretrained_imagenet.pt").to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay) # define the optimizer, weight_decay corresponds to L2 regularization
# optimizer = torch.optim.SGD(model.parameters(), lr=lr, weight_decay=weight_decay, momentum=0.9)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=lr_decay) # learning rate decay
crit = torch.nn.CrossEntropyLoss(reduction="sum")
for epoch in range(num_epochs):
if epoch == 0: # get train and val accs before training
train_acc, train_loss, _, _ = evaluate(model, train_loader, crit, device)
val_acc, val_loss, img_name, TP_TN_FP_FN = evaluate(model, val_loader, crit, device)
train_accs.append(train_acc)
train_losses.append(train_loss)
val_accs.append(val_acc)
val_losses.append(val_loss)
running_val_acc = np.array([0,0,val_acc])
val_res = np.copy(running_val_acc)
if testing:
torch.save(model, "Parameters/CNN/" + m + "_fold"+str(fold) + ".pt")
# train the model
train(model, train_loader, optimizer, crit, device)
scheduler.step()
# evalutate the model
train_acc, train_loss, _, _= evaluate(model, train_loader, crit, device)
val_acc, val_loss, img_name, TP_TN_FP_FN = evaluate(model, val_loader, crit, device)
train_accs.append(train_acc)
train_losses.append(train_loss)
val_accs.append(val_acc)
val_losses.append(val_loss)
running_val_acc[0] = running_val_acc[1]
running_val_acc[1] = running_val_acc[2]
running_val_acc[2] = val_acc
if np.mean(running_val_acc) > np.mean(val_res) and not testing:
val_res = np.copy(running_val_acc)
if running_val_acc[2] > val_res[2] and testing:
val_res = np.copy(running_val_acc)
torch.save(model,"Parameters/CNN/" + m + "_fold"+str(fold) + ".pt")
if plotting:
for param_group in optimizer.param_groups:
print("Epoch: {:03d}, lr: {:.5f}, train_loss: {:.5f}, val_loss: {:.5f}, train_acc: {:.5f}, val_acc: {:.5f}".format(epoch, param_group["lr"], train_loss, val_loss, train_acc, val_acc))
if stdev(train_losses[-20:]) < 0.05 and mean(train_accs[-20:]) < 0.55:
boolean = True
# print("Oops")
else:
boolean = False
# # plot learning curves
if plotting:
x = np.arange(0,len(train_accs))
plt.subplot(2,1,1)
plt.plot(x, train_accs, color="r")
plt.ylim(0.5, 1)
plt.plot(x, val_accs, color="g")
plt.subplot(2,1,2)
plt.plot(x, train_losses, color="r")
plt.plot(x, val_losses, color="g")
plt.show()
return(val_res, boolean, np.asarray(train_accs), np.asarray(val_accs), np.asarray(train_losses), np.asarray(val_losses))
def train(model, train_loader, optimizer, crit, device):
model.train()
for data, label, name in train_loader:
data = data.to(device) # transfer the data to the device
label = label.to(device) # transfer the labels to the device
optimizer.zero_grad() # set the gradient to 0
output = model(data) # pass the data through the model
loss = crit(output, torch.max(label,1)[1].long()) # compute the loss between output and label
loss.backward() # compute the gradient
optimizer.step()
def evaluate(model, val_loader, crit, device, testing=False):
model.eval()
loss_all =0
img_count=0
batch_count =0
correct_pred = 0
img_name = [[],[], [], []]
TP_TN_FP_FN = np.zeros((4))
with torch.no_grad(): # gradients don't need to be calculated in evaluation
# pass data through the model and get label and prediction
for data, labelT, name in val_loader: # iterate over every batch in validation training set
data = data.to(device) # trainsfer data to device
predT = model(data)#.detach().cpu().numpy() # pass the data through the model and store the predictions in a numpy array
pred = predT.detach().cpu().numpy()
label=labelT.detach().cpu().numpy()
predicted_classes = (pred == pred.max(axis=1)[:,None]).astype(int)
# if testing:
# c=0
# for i in data:
# plt.imshow(np.transpose(i.detach().cpu().numpy(), (1,2,0)))
# plt.title("predicted:" +str(predicted_classes[c,0]) + " class:" +str(label[c,0]))
# plt.show()
# c+=1
correct_pred += np.sum(predicted_classes[:, 0] == labelT[:, 0].numpy())
loss = crit(predT, torch.max(labelT, 1)[1].long().to(device)) # compute the loss between output and label
loss_all += loss.item()
img_count += len(data)
batch_count +=1
# count the false negatives and false positives
false_idx = np.argwhere(predicted_classes[:,0]!=label[:,0]).reshape(-1)
truth = label[false_idx,:]
c=0
for t in truth:
if t[0] == 1:
TP_TN_FP_FN[3] +=1
img_name[3].append(name[false_idx][c].tolist())
if t[0] == 0:
TP_TN_FP_FN[2] +=1
img_name[2].append(name[false_idx][c].tolist())
c+=1
true_idx = np.argwhere(predicted_classes[:,0]==label[:,0]).reshape(-1)
truth = label[true_idx,:]
c=0
for t in truth:
if t[0] == 1:
TP_TN_FP_FN[0] +=1
img_name[0].append(name[true_idx][c].tolist())
if t[0] == 0:
TP_TN_FP_FN[1] += 1
img_name[1].append(name[true_idx][c].tolist())
c+=1
avg_acc = correct_pred / img_count
avg_loss = loss_all/img_count
return avg_acc, avg_loss, img_name, TP_TN_FP_FN
if __name__ == "__main__":
# plot_imgs()
# image_sizes()
# split_images(0)
# train_list_ID0, _, _ = train_val_test_split(0)
# print("num_train_samples:", len(train_list_ID0))
# transform = transforms.Compose([transforms.Resize((128, 128)),
# transforms.ToTensor()])
# train_split0 = ImageDataset(train_list_ID0, transform)
#
#
# train_loader = DataLoader(train_split0, batch_size=32)
# for batch, labels in train_loader:
# print("batchsize:", len(batch))
# for idx, img in enumerate(batch):
# plt.subplot(8,4, idx+1)
# print(img.size())
# tr = transforms.ToPILImage()
# image = tr(img)
# print(image.size)
# image = np.asarray(image)
# plt.imshow(np.asarray(image))
# if labels[idx].item() == 0:
# ttl = "normal"
# else:
# ttl = "dysplastic"
# plt.title(ttl)
# plt.show()
# plot_all_images()
# train_and_val_1Fold(fold=0, m="VGG16_bn", num_epochs=30, lr=0.01, lr_decay=0.8, step_size=3, weight_decay=0.01, device="cuda", plotting=True)
test(runs=2,device="cuda", m="VGG16_bn_aug_pretrained") | [
"torch.nn.ReLU",
"torch.nn.Dropout",
"statistics.stdev",
"torch.nn.CrossEntropyLoss",
"torch.max",
"numpy.array",
"matplotlib.pyplot.imshow",
"numpy.mean",
"os.listdir",
"matplotlib.pyplot.plot",
"numpy.asarray",
"torchvision.models.vgg16_bn",
"matplotlib.pyplot.ylim",
"torchvision.transfo... | [((549, 569), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (559, 569), False, 'from PIL import Image\n'), ((615, 630), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (625, 630), True, 'import numpy as np\n'), ((760, 775), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (770, 775), True, 'import matplotlib.pyplot as plt\n'), ((780, 790), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (788, 790), True, 'import matplotlib.pyplot as plt\n'), ((974, 994), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (984, 994), False, 'from PIL import Image\n'), ((1040, 1055), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (1050, 1055), True, 'import numpy as np\n'), ((1185, 1200), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (1195, 1200), True, 'import matplotlib.pyplot as plt\n'), ((1205, 1215), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1213, 1215), True, 'import matplotlib.pyplot as plt\n'), ((1343, 1359), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1353, 1359), False, 'import os\n'), ((2299, 2315), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (2309, 2315), False, 'import os\n'), ((3922, 3938), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (3932, 3938), False, 'import os\n'), ((5602, 5618), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (5612, 5618), False, 'import os\n'), ((11015, 11061), 'torch.utils.data.DataLoader', 'DataLoader', (['test_data', 'batchsize'], {'shuffle': '(True)'}), '(test_data, batchsize, shuffle=True)\n', (11025, 11061), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((11226, 11268), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (11251, 11268), False, 'import torch\n'), ((16596, 16659), 'torch.utils.data.DataLoader', 'DataLoader', (['train_data', 'batchsize'], {'shuffle': '(True)', 'drop_last': '(True)'}), '(train_data, batchsize, shuffle=True, drop_last=True)\n', (16606, 16659), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((16677, 16722), 'torch.utils.data.DataLoader', 'DataLoader', (['val_data', 'batchsize'], {'shuffle': '(True)'}), '(val_data, batchsize, shuffle=True)\n', (16687, 16722), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((16741, 16787), 'torch.utils.data.DataLoader', 'DataLoader', (['test_data', 'batchsize'], {'shuffle': '(True)'}), '(test_data, batchsize, shuffle=True)\n', (16751, 16787), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((17962, 18041), 'torch.optim.lr_scheduler.StepLR', 'torch.optim.lr_scheduler.StepLR', (['optimizer'], {'step_size': 'step_size', 'gamma': 'lr_decay'}), '(optimizer, step_size=step_size, gamma=lr_decay)\n', (17993, 18041), False, 'import torch\n'), ((18076, 18118), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (18101, 18118), False, 'import torch\n'), ((21341, 21352), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (21349, 21352), True, 'import numpy as np\n'), ((5025, 5060), 'csv.reader', 'csv.reader', (['csv_file'], {'delimiter': '""","""'}), "(csv_file, delimiter=',')\n", (5035, 5060), False, 'import csv\n'), ((6893, 6932), 'PIL.Image.open', 'Image.open', (["('pT1_dataset/dataset/' + ID)"], {}), "('pT1_dataset/dataset/' + ID)\n", (6903, 6932), False, 'from PIL import Image\n'), ((7154, 7192), 'torch.tensor', 'torch.tensor', (['label'], {'dtype': 'torch.float'}), '(label, dtype=torch.float)\n', (7166, 7192), False, 'import torch\n'), ((7383, 7427), 'torch.tensor', 'torch.tensor', (['[img_patch]'], {'dtype': 'torch.float'}), '([img_patch], dtype=torch.float)\n', (7395, 7427), False, 'import torch\n'), ((14671, 14692), 'csv.writer', 'csv.writer', (['test_file'], {}), '(test_file)\n', (14681, 14692), False, 'import csv\n'), ((20204, 20224), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (20215, 20224), True, 'import matplotlib.pyplot as plt\n'), ((20231, 20265), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'train_accs'], {'color': '"""r"""'}), "(x, train_accs, color='r')\n", (20239, 20265), True, 'import matplotlib.pyplot as plt\n'), ((20274, 20290), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0.5)', '(1)'], {}), '(0.5, 1)\n', (20282, 20290), True, 'import matplotlib.pyplot as plt\n'), ((20299, 20331), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'val_accs'], {'color': '"""g"""'}), "(x, val_accs, color='g')\n", (20307, 20331), True, 'import matplotlib.pyplot as plt\n'), ((20340, 20360), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (20351, 20360), True, 'import matplotlib.pyplot as plt\n'), ((20367, 20403), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'train_losses'], {'color': '"""r"""'}), "(x, train_losses, color='r')\n", (20375, 20403), True, 'import matplotlib.pyplot as plt\n'), ((20412, 20446), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'val_losses'], {'color': '"""g"""'}), "(x, val_losses, color='g')\n", (20420, 20446), True, 'import matplotlib.pyplot as plt\n'), ((20455, 20465), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (20463, 20465), True, 'import matplotlib.pyplot as plt\n'), ((20495, 20517), 'numpy.asarray', 'np.asarray', (['train_accs'], {}), '(train_accs)\n', (20505, 20517), True, 'import numpy as np\n'), ((20519, 20539), 'numpy.asarray', 'np.asarray', (['val_accs'], {}), '(val_accs)\n', (20529, 20539), True, 'import numpy as np\n'), ((20541, 20565), 'numpy.asarray', 'np.asarray', (['train_losses'], {}), '(train_losses)\n', (20551, 20565), True, 'import numpy as np\n'), ((20567, 20589), 'numpy.asarray', 'np.asarray', (['val_losses'], {}), '(val_losses)\n', (20577, 20589), True, 'import numpy as np\n'), ((21364, 21379), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (21377, 21379), False, 'import torch\n'), ((1432, 1458), 'os.listdir', 'os.listdir', (['(path + patient)'], {}), '(path + patient)\n', (1442, 1458), False, 'import os\n'), ((2491, 2517), 'os.listdir', 'os.listdir', (['(path + patient)'], {}), '(path + patient)\n', (2501, 2517), False, 'import os\n'), ((7805, 7899), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(3)', 'out_channels': 'self.out_conv1', 'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_channels=3, out_channels=self.out_conv1, kernel_size=3, stride\n =1, padding=1)\n', (7814, 7899), True, 'import torch.nn as nn\n'), ((7908, 7917), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (7915, 7917), True, 'import torch.nn as nn\n'), ((7931, 7968), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (7943, 7968), True, 'import torch.nn as nn\n'), ((7982, 7999), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['(0.1)'], {}), '(0.1)\n', (7994, 7999), True, 'import torch.nn as nn\n'), ((8013, 8119), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'self.out_conv1', 'out_channels': 'self.out_conv2', 'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_channels=self.out_conv1, out_channels=self.out_conv2,\n kernel_size=3, stride=1, padding=1)\n', (8022, 8119), True, 'import torch.nn as nn\n'), ((8129, 8138), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (8136, 8138), True, 'import torch.nn as nn\n'), ((8152, 8189), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (8164, 8189), True, 'import torch.nn as nn\n'), ((8203, 8220), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['(0.1)'], {}), '(0.1)\n', (8215, 8220), True, 'import torch.nn as nn\n'), ((8234, 8340), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'self.out_conv2', 'out_channels': 'self.out_conv3', 'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_channels=self.out_conv2, out_channels=self.out_conv3,\n kernel_size=3, stride=1, padding=1)\n', (8243, 8340), True, 'import torch.nn as nn\n'), ((8350, 8359), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (8357, 8359), True, 'import torch.nn as nn\n'), ((8373, 8410), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (8385, 8410), True, 'import torch.nn as nn\n'), ((8424, 8441), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['(0.1)'], {}), '(0.1)\n', (8436, 8441), True, 'import torch.nn as nn\n'), ((8509, 8526), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (8519, 8526), True, 'import torch.nn as nn\n'), ((8540, 8687), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': '(self.final_img_size * self.final_img_size * self.out_conv3)', 'out_features': '(self.final_img_size * self.final_img_size * 16)'}), '(in_features=self.final_img_size * self.final_img_size * self.\n out_conv3, out_features=self.final_img_size * self.final_img_size * 16)\n', (8549, 8687), True, 'import torch.nn as nn\n'), ((8688, 8697), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (8695, 8697), True, 'import torch.nn as nn\n'), ((8711, 8728), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (8721, 8728), True, 'import torch.nn as nn\n'), ((8742, 8831), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': '(self.final_img_size * self.final_img_size * 16)', 'out_features': '(2)'}), '(in_features=self.final_img_size * self.final_img_size * 16,\n out_features=2)\n', (8751, 8831), True, 'import torch.nn as nn\n'), ((8837, 8849), 'torch.nn.Softmax', 'nn.Softmax', ([], {}), '()\n', (8847, 8849), True, 'import torch.nn as nn\n'), ((10747, 10786), 'torchvision.transforms.Resize', 'transforms.Resize', (['(img_size, img_size)'], {}), '((img_size, img_size))\n', (10764, 10786), False, 'from torchvision import transforms\n'), ((10828, 10849), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (10847, 10849), False, 'from torchvision import transforms\n'), ((12996, 13022), 'csv.writer', 'csv.writer', (['train_acc_file'], {}), '(train_acc_file)\n', (13006, 13022), False, 'import csv\n'), ((13055, 13082), 'csv.writer', 'csv.writer', (['train_loss_file'], {}), '(train_loss_file)\n', (13065, 13082), False, 'import csv\n'), ((13113, 13137), 'csv.writer', 'csv.writer', (['val_acc_file'], {}), '(val_acc_file)\n', (13123, 13137), False, 'import csv\n'), ((13168, 13193), 'csv.writer', 'csv.writer', (['val_loss_file'], {}), '(val_loss_file)\n', (13178, 13193), False, 'import csv\n'), ((13220, 13240), 'csv.writer', 'csv.writer', (['cls_file'], {}), '(cls_file)\n', (13230, 13240), False, 'import csv\n'), ((14916, 14935), 'statistics.mean', 'mean', (['all_test_accs'], {}), '(all_test_accs)\n', (14920, 14935), False, 'from statistics import stdev, mean\n'), ((14980, 15000), 'statistics.stdev', 'stdev', (['all_test_accs'], {}), '(all_test_accs)\n', (14985, 15000), False, 'from statistics import stdev, mean\n'), ((16164, 16203), 'torchvision.transforms.Resize', 'transforms.Resize', (['(img_size, img_size)'], {}), '((img_size, img_size))\n', (16181, 16203), False, 'from torchvision import transforms\n'), ((16240, 16261), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (16259, 16261), False, 'from torchvision import transforms\n'), ((18596, 18621), 'numpy.array', 'np.array', (['[0, 0, val_acc]'], {}), '([0, 0, val_acc])\n', (18604, 18621), True, 'import numpy as np\n'), ((18642, 18666), 'numpy.copy', 'np.copy', (['running_val_acc'], {}), '(running_val_acc)\n', (18649, 18666), True, 'import numpy as np\n'), ((19467, 19491), 'numpy.copy', 'np.copy', (['running_val_acc'], {}), '(running_val_acc)\n', (19474, 19491), True, 'import numpy as np\n'), ((19570, 19594), 'numpy.copy', 'np.copy', (['running_val_acc'], {}), '(running_val_acc)\n', (19577, 19594), True, 'import numpy as np\n'), ((19959, 19984), 'statistics.stdev', 'stdev', (['train_losses[-20:]'], {}), '(train_losses[-20:])\n', (19964, 19984), False, 'from statistics import stdev, mean\n'), ((19996, 20018), 'statistics.mean', 'mean', (['train_accs[-20:]'], {}), '(train_accs[-20:])\n', (20000, 20018), False, 'from statistics import stdev, mean\n'), ((1482, 1561), 'PIL.Image.open', 'Image.open', (["(path + patient + '/' + img_folder + '/' + img_folder + '-image.jpg')"], {}), "(path + patient + '/' + img_folder + '/' + img_folder + '-image.jpg')\n", (1492, 1561), False, 'from PIL import Image\n'), ((1581, 1596), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (1591, 1596), True, 'import numpy as np\n'), ((2576, 2655), 'PIL.Image.open', 'Image.open', (["(path + patient + '/' + img_folder + '/' + img_folder + '-image.jpg')"], {}), "(path + patient + '/' + img_folder + '/' + img_folder + '-image.jpg')\n", (2586, 2655), False, 'from PIL import Image\n'), ((2699, 2714), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (2709, 2714), True, 'import numpy as np\n'), ((3989, 4013), 'os.path.isfile', 'os.path.isfile', (['(path + f)'], {}), '(path + f)\n', (4003, 4013), False, 'import os\n'), ((5883, 5909), 'os.listdir', 'os.listdir', (['(path + patient)'], {}), '(path + patient)\n', (5893, 5909), False, 'import os\n'), ((6146, 6172), 'os.listdir', 'os.listdir', (['(path + patient)'], {}), '(path + patient)\n', (6156, 6172), False, 'import os\n'), ((6347, 6373), 'os.listdir', 'os.listdir', (['(path + patient)'], {}), '(path + patient)\n', (6357, 6373), False, 'import os\n'), ((7305, 7335), 're.findall', 're.findall', (['"""[0-9]+"""', 'filename'], {}), "('[0-9]+', filename)\n", (7315, 7335), False, 'import re\n'), ((14723, 14743), 'statistics.stdev', 'stdev', (['all_test_accs'], {}), '(all_test_accs)\n', (14728, 14743), False, 'from statistics import stdev, mean\n'), ((14745, 14764), 'statistics.mean', 'mean', (['all_test_accs'], {}), '(all_test_accs)\n', (14749, 14764), False, 'from statistics import stdev, mean\n'), ((15434, 15473), 'torchvision.transforms.Resize', 'transforms.Resize', (['(img_size, img_size)'], {}), '((img_size, img_size))\n', (15451, 15473), False, 'from torchvision import transforms\n'), ((15583, 15618), 'torchvision.transforms.RandomRotation', 'transforms.RandomRotation', (['(0, 360)'], {}), '((0, 360))\n', (15608, 15618), False, 'from torchvision import transforms\n'), ((15726, 15747), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (15745, 15747), False, 'from torchvision import transforms\n'), ((15806, 15845), 'torchvision.transforms.Resize', 'transforms.Resize', (['(img_size, img_size)'], {}), '((img_size, img_size))\n', (15823, 15845), False, 'from torchvision import transforms\n'), ((16100, 16121), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (16119, 16121), False, 'from torchvision import transforms\n'), ((19384, 19408), 'numpy.mean', 'np.mean', (['running_val_acc'], {}), '(running_val_acc)\n', (19391, 19408), True, 'import numpy as np\n'), ((19411, 19427), 'numpy.mean', 'np.mean', (['val_res'], {}), '(val_res)\n', (19418, 19427), True, 'import numpy as np\n'), ((2816, 2838), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(5)'}), "('font', size=5)\n", (2822, 2838), True, 'import matplotlib.pyplot as plt\n'), ((2886, 2914), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(10)', '(10)', 'counter'], {}), '(10, 10, counter)\n', (2897, 2914), True, 'import matplotlib.pyplot as plt\n'), ((2934, 2949), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (2944, 2949), True, 'import matplotlib.pyplot as plt\n'), ((3137, 3152), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3145, 3152), True, 'import matplotlib.pyplot as plt\n'), ((3226, 3236), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3234, 3236), True, 'import matplotlib.pyplot as plt\n'), ((3287, 3309), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(5)'}), "('font', size=5)\n", (3293, 3309), True, 'import matplotlib.pyplot as plt\n'), ((3330, 3358), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(10)', '(10)', 'counter'], {}), '(10, 10, counter)\n', (3341, 3358), True, 'import matplotlib.pyplot as plt\n'), ((3379, 3394), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (3389, 3394), True, 'import matplotlib.pyplot as plt\n'), ((3582, 3597), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3590, 3597), True, 'import matplotlib.pyplot as plt\n'), ((14272, 14302), 'statistics.mean', 'mean', (['test_accs_per_fold[fold]'], {}), '(test_accs_per_fold[fold])\n', (14276, 14302), False, 'from statistics import stdev, mean\n'), ((14328, 14359), 'statistics.stdev', 'stdev', (['test_accs_per_fold[fold]'], {}), '(test_accs_per_fold[fold])\n', (14333, 14359), False, 'from statistics import stdev, mean\n'), ((17231, 17241), 'torchvision.models.vgg16_bn', 'vgg16_bn', ([], {}), '()\n', (17239, 17241), False, 'from torchvision.models import vgg16_bn\n'), ((22695, 22746), 'numpy.argwhere', 'np.argwhere', (['(predicted_classes[:, 0] != label[:, 0])'], {}), '(predicted_classes[:, 0] != label[:, 0])\n', (22706, 22746), True, 'import numpy as np\n'), ((23158, 23209), 'numpy.argwhere', 'np.argwhere', (['(predicted_classes[:, 0] == label[:, 0])'], {}), '(predicted_classes[:, 0] == label[:, 0])\n', (23169, 23209), True, 'import numpy as np\n'), ((3023, 3046), 'matplotlib.pyplot.title', 'plt.title', (['"""dysplastic"""'], {}), "('dysplastic')\n", (3032, 3046), True, 'import matplotlib.pyplot as plt\n'), ((3097, 3116), 'matplotlib.pyplot.title', 'plt.title', (['"""normal"""'], {}), "('normal')\n", (3106, 3116), True, 'import matplotlib.pyplot as plt\n'), ((3468, 3491), 'matplotlib.pyplot.title', 'plt.title', (['"""dysplastic"""'], {}), "('dysplastic')\n", (3477, 3491), True, 'import matplotlib.pyplot as plt\n'), ((3542, 3561), 'matplotlib.pyplot.title', 'plt.title', (['"""normal"""'], {}), "('normal')\n", (3551, 3561), True, 'import matplotlib.pyplot as plt\n'), ((4173, 4189), 'csv.reader', 'csv.reader', (['file'], {}), '(file)\n', (4183, 4189), False, 'import csv\n'), ((17299, 17309), 'torchvision.models.vgg16_bn', 'vgg16_bn', ([], {}), '()\n', (17307, 17309), False, 'from torchvision.models import vgg16_bn\n'), ((20994, 21013), 'torch.max', 'torch.max', (['label', '(1)'], {}), '(label, 1)\n', (21003, 21013), False, 'import torch\n'), ((17378, 17403), 'torchvision.models.vgg16_bn', 'vgg16_bn', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (17386, 17403), False, 'from torchvision.models import vgg16_bn\n'), ((17558, 17583), 'torchvision.models.vgg16_bn', 'vgg16_bn', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (17566, 17583), False, 'from torchvision.models import vgg16_bn\n'), ((22421, 22441), 'torch.max', 'torch.max', (['labelT', '(1)'], {}), '(labelT, 1)\n', (22430, 22441), False, 'import torch\n')] |
import unittest
from nose.plugins.attrib import attr
from testing.utils import DumpResponse
import cloudsigma.resource as resource
@attr('acceptance_test')
class FirewallPolicyTest(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.client = resource.FirewallPolicy()
self.dump_response = DumpResponse(clients=[self.client])
self.base_policy = {
"name": "My awesome policy",
"rules": [
{
"dst_ip": "23",
"direction": "out",
"action": "drop",
"comment": "Drop traffic from the VM to IP address 172.16.17.32/32"
},
{
"src_ip": "172.16.31.10/24",
"ip_proto": "tcp",
"dst_port": "22",
"direction": "in",
"action": "accept",
"comment": "Allow SSH traffic to the VM from our office in Dubai"
},
{
"ip_proto": "tcp",
"dst_port": "22",
"direction": "in",
"action": "drop",
"comment": "Drop all other SSH traffic to the VM"
},
{
"src_ip": "!172.16.17.32",
"ip_proto": "udp",
"direction": "in",
"action": "drop",
"comment": "Drop all UDP traffic to the VM, not originating from 172.16.17.32"
},
{
"ip_proto": "tcp",
"dst_port": "!1:1024",
"direction": "in",
"action": "drop",
"comment": "Drop any traffic, to the VM with destination port not between 1-1024"
}
]
}
self._clean_policies()
def tearDown(self):
self._clean_policies()
def _clean_policies(self):
policies = self.client.list_detail()
server_client = resource.Server()
deleted_servers = []
for policy in policies:
for server in policy['servers']:
if server['uuid'] not in deleted_servers:
deleted_servers.append(server['uuid'])
server_client.delete(server['uuid'])
self.client.delete(policy['uuid'])
@attr('docs_snippets')
def test_get_schema(self):
with self.dump_response('fwpolicy_schema'):
self.client.get_schema()
@attr('docs_snippets')
def test_crud_policy(self):
base_policy = self.base_policy.copy()
with self.dump_response('fwpolicy_create_minimal'):
min_policy = self.client.create({})
self.assertDictContainsSubset({}, min_policy)
with self.dump_response('fwpolicy_create_full'):
full_policy = self.client.create(base_policy)
# Test if applied rules look like the ones returned from the API.
# The dict is subset will not work, because API
# alters/normalizes some of the data.
for idx, rules in enumerate(base_policy['rules']):
for key in rules:
match_a = str(full_policy['rules'][idx][key])
match_b = rules[key]
print(match_a, match_b)
self.assertTrue(match_a.startswith(match_b))
with self.dump_response('fwpolicy_list'):
self.client.list()
with self.dump_response('fwpolicy_list_detail'):
res = self.client.list_detail()
self.assertEqual(len(res), 2)
updated_policy = full_policy.copy()
updated_policy['rules'] = [updated_policy['rules'][0]]
with self.dump_response('fwpolicy_get'):
self.client.get(full_policy['uuid'])
with self.dump_response('fwpolicy_update'):
up_pol = self.client.update(full_policy['uuid'], updated_policy)
self.assertEqual(len(up_pol['rules']), 1)
with self.dump_response('fwpolicy_delete'):
self.client.delete(full_policy['uuid'])
self.client.delete(min_policy['uuid'])
res = self.client.list()
self.assertEqual(len(res), 0)
@attr('docs_snippets')
def test_server_fw_rules(self):
policy = self.client.create(self.base_policy)
server_def = {
'name': 'FirewalledServer',
'cpu': 1000,
'mem': 512 * 1024 ** 2,
'vnc_password': '<PASSWORD>',
"nics": [
{
"firewall_policy": policy['uuid'],
"ip_v4_conf": {
"ip": None,
"conf": "dhcp"
},
"model": "virtio",
}
],
}
server_client = resource.Server()
with DumpResponse(clients=[server_client])("fwpolicy_server_attach"):
server = server_client.create(server_def)
self.assertEqual(
server['nics'][0]['firewall_policy']['uuid'], policy['uuid']
)
self.client.delete(policy['uuid'])
server = server_client.get(server['uuid'])
self.assertIsNone(server['nics'][0]['firewall_policy'])
server_client.delete(server['uuid'])
| [
"nose.plugins.attrib.attr",
"testing.utils.DumpResponse",
"cloudsigma.resource.Server",
"cloudsigma.resource.FirewallPolicy",
"unittest.TestCase.setUp"
] | [((136, 159), 'nose.plugins.attrib.attr', 'attr', (['"""acceptance_test"""'], {}), "('acceptance_test')\n", (140, 159), False, 'from nose.plugins.attrib import attr\n'), ((2457, 2478), 'nose.plugins.attrib.attr', 'attr', (['"""docs_snippets"""'], {}), "('docs_snippets')\n", (2461, 2478), False, 'from nose.plugins.attrib import attr\n'), ((2605, 2626), 'nose.plugins.attrib.attr', 'attr', (['"""docs_snippets"""'], {}), "('docs_snippets')\n", (2609, 2626), False, 'from nose.plugins.attrib import attr\n'), ((4293, 4314), 'nose.plugins.attrib.attr', 'attr', (['"""docs_snippets"""'], {}), "('docs_snippets')\n", (4297, 4314), False, 'from nose.plugins.attrib import attr\n'), ((234, 263), 'unittest.TestCase.setUp', 'unittest.TestCase.setUp', (['self'], {}), '(self)\n', (257, 263), False, 'import unittest\n'), ((286, 311), 'cloudsigma.resource.FirewallPolicy', 'resource.FirewallPolicy', ([], {}), '()\n', (309, 311), True, 'import cloudsigma.resource as resource\n'), ((341, 376), 'testing.utils.DumpResponse', 'DumpResponse', ([], {'clients': '[self.client]'}), '(clients=[self.client])\n', (353, 376), False, 'from testing.utils import DumpResponse\n'), ((2106, 2123), 'cloudsigma.resource.Server', 'resource.Server', ([], {}), '()\n', (2121, 2123), True, 'import cloudsigma.resource as resource\n'), ((4907, 4924), 'cloudsigma.resource.Server', 'resource.Server', ([], {}), '()\n', (4922, 4924), True, 'import cloudsigma.resource as resource\n'), ((4938, 4975), 'testing.utils.DumpResponse', 'DumpResponse', ([], {'clients': '[server_client]'}), '(clients=[server_client])\n', (4950, 4975), False, 'from testing.utils import DumpResponse\n')] |
"""ЛР 3.3, <NAME>, М8О-303Б-18"""
import numpy as np
import fire # CLI
import matplotlib.pyplot as plt
from sem1.lab1_1.gauss import lu_decomposition, lu_solve
def f(coeffs, x):
"""Вычисление значения полинома с коэффициентами coeffs"""
return sum([x ** i * c for i, c in enumerate(coeffs)])
def sum_squared_errors(f, y):
"""Сумма квадратов ошибок"""
return sum((f_i - y_i) ** 2 for f_i, y_i in zip(f, y))
def lsm(x, y, n):
"""Подбор коэффициентов полинома степени n с помощью МНК"""
N = len(x)
mat = [[sum([x_j ** (i + j) for x_j in x]) for i in range(n + 1)] for j in range(n + 1)]
mat[0][0] = N + 1
b = [sum([x_j ** i * y_j for x_j, y_j in zip(x, y)]) for i in range(n + 1)]
mat = np.array(mat)
b = np.array(b)
p, l, u = lu_decomposition(mat)
b = b @ p
coeffs = lu_solve(l, u, b)
return coeffs
def main():
"""Аппроксимация таблично заданной функции многочленами 1-й и 2-й степеней с помощью МНК"""
init_dict = {
"x": [-3.0, -2.0, -1.0, 0.0, 1.0, 2.0],
"y": [0.04979, 0.13534, 0.36788, 1.0, 2.7183, 7.3891]
}
x, y = init_dict["x"], init_dict["y"]
xc = np.arange(min(x) - 1, max(x) + 1, .01)
c1 = lsm(x, y, 1)
y1 = f(c1, xc)
c2 = lsm(x, y, 2)
y2 = f(c2, xc)
plt.plot(x, y, "o", label="Входные данные")
plt.plot(xc, y1, label="Полином первой степени")
plt.plot(xc, y2, label="Полином второй степени")
plt.title("Аппроксимация МНК")
plt.grid(True)
plt.legend()
plt.savefig("plot.jpg", dpi=300)
plt.show()
e1 = sum_squared_errors(lsm(x, y, 1), y)
e2 = sum_squared_errors(lsm(x, y, 2), y)
print("Сумма квадратов ошибок:")
print("\tn = 1:", e1)
print("\tn = 2:", e2)
print("\nАппроксимирующие функции:")
xs = ["", "x", "x^2"]
for c, n in zip([c1, c2], [1, 2]):
print(f"\tn = {n}:", " + ".join([f"{v:.6f}{x}" for v, x in zip(c, xs)]))
if __name__ == "__main__":
fire.Fire(main)
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"fire.Fire",
"sem1.lab1_1.gauss.lu_decomposition",
"matplotlib.pyplot.plot",
"sem1.lab1_1.gauss.lu_solve",
"numpy.array",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((735, 748), 'numpy.array', 'np.array', (['mat'], {}), '(mat)\n', (743, 748), True, 'import numpy as np\n'), ((757, 768), 'numpy.array', 'np.array', (['b'], {}), '(b)\n', (765, 768), True, 'import numpy as np\n'), ((783, 804), 'sem1.lab1_1.gauss.lu_decomposition', 'lu_decomposition', (['mat'], {}), '(mat)\n', (799, 804), False, 'from sem1.lab1_1.gauss import lu_decomposition, lu_solve\n'), ((832, 849), 'sem1.lab1_1.gauss.lu_solve', 'lu_solve', (['l', 'u', 'b'], {}), '(l, u, b)\n', (840, 849), False, 'from sem1.lab1_1.gauss import lu_decomposition, lu_solve\n'), ((1295, 1338), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""o"""'], {'label': '"""Входные данные"""'}), "(x, y, 'o', label='Входные данные')\n", (1303, 1338), True, 'import matplotlib.pyplot as plt\n'), ((1343, 1391), 'matplotlib.pyplot.plot', 'plt.plot', (['xc', 'y1'], {'label': '"""Полином первой степени"""'}), "(xc, y1, label='Полином первой степени')\n", (1351, 1391), True, 'import matplotlib.pyplot as plt\n'), ((1396, 1444), 'matplotlib.pyplot.plot', 'plt.plot', (['xc', 'y2'], {'label': '"""Полином второй степени"""'}), "(xc, y2, label='Полином второй степени')\n", (1404, 1444), True, 'import matplotlib.pyplot as plt\n'), ((1450, 1480), 'matplotlib.pyplot.title', 'plt.title', (['"""Аппроксимация МНК"""'], {}), "('Аппроксимация МНК')\n", (1459, 1480), True, 'import matplotlib.pyplot as plt\n'), ((1485, 1499), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1493, 1499), True, 'import matplotlib.pyplot as plt\n'), ((1504, 1516), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1514, 1516), True, 'import matplotlib.pyplot as plt\n'), ((1522, 1554), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plot.jpg"""'], {'dpi': '(300)'}), "('plot.jpg', dpi=300)\n", (1533, 1554), True, 'import matplotlib.pyplot as plt\n'), ((1559, 1569), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1567, 1569), True, 'import matplotlib.pyplot as plt\n'), ((1973, 1988), 'fire.Fire', 'fire.Fire', (['main'], {}), '(main)\n', (1982, 1988), False, 'import fire\n')] |
from argparse import ArgumentParser
from itertools import starmap
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from fyne import blackscholes, heston
from matplotlib.patches import Patch
from scipy.stats import gaussian_kde
import settings
from align_settings import STARTTIME, ENDTIME
from utils import resample
def safe_xs(*args, **kwargs):
try:
return pd.Series.xs(*args, **kwargs)
except KeyError:
return np.nan
def get_tick_size(quote):
diffs = (quote['Ask'] + quote['Bid']).diff()
diffs = diffs[diffs > 1e-6]
return np.round(diffs.min(), 2)
def filter_tick_size(data, quote, size):
tick_size = quote.groupby('Strike').apply(get_tick_size)
return data.reindex(tick_size[tick_size == size].index, level='Strike')
def filter_trade_on_book(quote, trade):
max_expiry = np.max(quote.index.get_level_values('Expiry'))
trade = trade[trade.index.get_level_values('Expiry') <= max_expiry]
quote_aligned = trade.groupby(['Class', 'Expiry', 'Strike']
).apply(lambda o: resample(quote.xs(o.name),
o.xs(o.name).index))
valid_trades = ((trade['Price'] == quote_aligned['Bid']) |
(trade['Price'] == quote_aligned['Ask']))
filtered = trade[valid_trades]
quote_aligned = quote_aligned.loc[valid_trades]
filtered['Buy'] = filtered['Price'] == quote_aligned['Ask']
filtered['Half-spread'] = (quote_aligned['Ask'] - quote_aligned['Bid']).round(2)/2
return filtered
def compute_duration(quote):
quote = quote.copy()
quote['Half-spread'] = (quote['Ask'] - quote['Bid']).round(2)/2
time = quote.reset_index('Time'
).set_index('Half-spread', append=True)[['Time']]
time['Duration'] = time['Time'].groupby(['Class', 'Expiry', 'Strike']
).transform(lambda t: t.diff().shift(-1))
time['Time'] += time['Duration']/2
duration = time.set_index('Time', append=True)['Duration']
duration /= pd.to_timedelta('1s')
return duration
def compute_volume_duration(quote, trade):
trade = filter_trade_on_book(quote, trade)
volume = trade.set_index(['Half-spread', 'Buy'], append=True)['Volume']
duration = compute_duration(quote)
return volume, duration
def plot_arrival_rates_bubbles(volume, duration):
volume = volume.groupby(['Class', 'Expiry', 'Strike', 'Half-spread', 'Buy']
).sum()
duration = duration.groupby(['Class', 'Expiry', 'Strike',
'Half-spread']).sum()
duration = duration[duration > 300]
arrival_rate = volume.groupby(['Class', 'Expiry', 'Strike', 'Half-spread']
).transform(lambda d: d.xs(d.name
)/safe_xs(duration, d.name))
arrival_rate.name = 'Arrival rate'
fig, axes = plt.subplots(3, 2, sharey=True, sharex=True, figsize=(8, 10))
patches = [Patch(color='b', alpha=.5, label='Call'),
Patch(color='r', alpha=.5, label='Put')]
axes[0, 1].legend(handles=patches)
for row, (e, r_ex) in zip(axes, arrival_rate.groupby('Expiry')):
for bs in ['Buy', 'Sell']:
ax = row[0] if bs == 'Buy' else row[1]
ax.set_title("Expiry: {}, {}".format(
pd.to_datetime(e).strftime('%Y-%m-%d'), bs))
for cp, cl in [('C', 'b'), ('P', 'r')]:
r = r_ex.xs((cp, bs == 'Buy'), level=('Class', 'Buy'))
r.reset_index(['Strike', 'Half-spread']).plot.scatter(
x='Strike', y='Half-spread', s=20*r/r_ex.mean(), ax=ax,
xlim=(325, 550), ylim=(0, None), alpha=.5, color=cl)
return fig
def plot_arrival_rates(arrival_rate):
depths = arrival_rate.index.get_level_values('Half-spread')
arrival_rate = arrival_rate[depths > 0].dropna()
bandwidth = 0.25
levels = ['Class', 'Expiry', 'Buy']
kernel = arrival_rate.groupby(levels).apply(
lambda r: gaussian_kde(np.stack(r.xs(r.name, level=levels).index, axis=-1),
bandwidth, r.values))
xlen, ylen = 200, 150
xmin, xmax, ymin, ymax = -0.2, 0.15, 0.0, 0.3
x = np.linspace(xmin, xmax, xlen)
y = np.linspace(ymin, ymax, ylen)
x_b, y_b = np.broadcast_arrays(x[:, None], y[None, :])
fig, axes = plt.subplots(3, 2, sharex=True, sharey=True, figsize=(8, 10))
patches = [Patch(color='tab:blue', label='Call'),
Patch(color='tab:red', label='Put')]
axes[0, 1].legend(handles=patches)
for row, (e, k) in zip(axes, kernel.groupby('Expiry')):
row[0].set_title("Expiry: {}, Buy".format(
pd.to_datetime(e).strftime('%Y-%m-%d')))
row[1].set_title("Expiry: {}, Sell".format(
pd.to_datetime(e).strftime('%Y-%m-%d')))
for cp, cm in [('C', plt.cm.Blues), ('P', plt.cm.Reds)]:
z = k.xs((cp, e, True))(np.array([x_b.ravel(), y_b.ravel()]))
z = np.rot90(np.reshape(z, x_b.shape))
row[0].imshow(z, extent=[xmin, xmax, ymin, ymax], cmap=cm,
aspect='auto', alpha=.5)
z = k.xs((cp, e, False))(np.array([x_b.ravel(), y_b.ravel()]))
z = np.rot90(np.reshape(z, x_b.shape))
row[1].imshow(z, extent=[xmin, xmax, ymin, ymax], cmap=cm,
aspect='auto', alpha=.5)
return fig
def quote_slice(quote, start, end):
quote = quote.copy()
quote.loc[start] = np.nan
quote.sort_index(inplace=True)
quote.ffill(inplace=True)
return quote.loc[start:end]
def plot_intraday(volume, duration):
filtered_volume = volume.groupby(['Class', 'Expiry']).apply(lambda e: filter_tick_size(e.xs(e.name), quote.xs(e.name), 0.05))
filtered_duration = duration.groupby(['Class', 'Expiry']).apply(lambda e: e.xs(e.name).reindex(np.unique(filtered_volume.xs(e.name).index.get_level_values('Strike')), level='Strike').dropna())
filtered_duration = filtered_duration.groupby(['Class', 'Expiry', 'Strike']).apply(lambda o: o.xs(o.name).reindex(np.unique(filtered_volume.xs(o.name).index.get_level_values('Half-spread')), level='Half-spread').dropna())
volume_by_depth = filtered_volume.groupby('Expiry').apply(lambda e: e.groupby('Half-spread').sum().loc[.025:.225])
duration_by_depth = filtered_duration.groupby('Expiry').apply(lambda e: e.groupby('Half-spread').sum().loc[.025:.225])
volume_kde = filtered_volume.groupby('Expiry').apply(lambda e: e.groupby(['Half-spread', 'Time']).sum().loc[.025:.225].groupby('Half-spread').apply(lambda d: gaussian_kde((d.xs(d.name).index - pd.to_datetime('2016-01-04'))/pd.to_timedelta('1s'), weights=d, bw_method=.25)))
duration_kde = filtered_duration.groupby('Expiry').apply(lambda e: e.groupby(['Half-spread', 'Time']).sum().loc[.025:.225].groupby('Half-spread').apply(lambda d: gaussian_kde((d.xs(d.name).index - pd.to_datetime('2016-01-04'))/pd.to_timedelta('1s'), weights=d, bw_method=.05)))
fig_volume, axes = plt.subplots(3, 1, sharex=True, **settings.PLOT)
tmin, tmax = 8*3600, 16.5*3600
t = np.linspace(tmin, tmax, 100)
for ax, (e, v) in zip(axes, volume_kde.groupby('Expiry')):
z = volume_by_depth.xs(e).values*np.array([k.xs(d)(t) for d, k in v.xs(e).groupby('Half-spread')]).T
ax.imshow(np.rot90(z), extent=[tmin/3600, tmax/3600, 0.025, 0.225], aspect='auto')
ax.set_ylabel('Half-spread (€)')
ax.set_title('Expiry: {}'.format(pd.to_datetime(e).strftime('%Y-%m-%d')))
ax.set_xlabel('Time (hours)')
fig_volume.tight_layout()
fig_duration, axes = plt.subplots(3, 1, sharex=True, **settings.PLOT)
tmin, tmax = 8*3600, 16.5*3600
t = np.linspace(tmin, tmax, 100)
for ax, (e, du) in zip(axes, duration_kde.groupby('Expiry')):
z = duration_by_depth.xs(e).values*np.array([k.xs(d)(t) for d, k in du.xs(e).groupby('Half-spread')]).T
ax.imshow(np.rot90(z), extent=[tmin/3600, tmax/3600, 0.025, 0.225], aspect='auto')
ax.set_ylabel('Half-spread (€)')
ax.set_title('Expiry: {}'.format(pd.to_datetime(e).strftime('%Y-%m-%d')))
ax.set_xlabel('Time (hours)')
fig_duration.tight_layout()
fig_arrival, axes = plt.subplots(3, 1, sharex=True, **settings.PLOT)
tmin, tmax = 8*3600, 16.5*3600
t = np.linspace(tmin, tmax, 100)
for ax, (e, du), (_, v) in zip(axes, duration_kde.groupby('Expiry'), volume_kde.groupby('Expiry')):
z_v = volume_by_depth.xs(e).values*np.array([k.xs(d)(t) for d, k in v.xs(e).groupby('Half-spread')]).T
z_d = duration_by_depth.xs(e).values*np.array([k.xs(d)(t) for d, k in du.xs(e).groupby('Half-spread')]).T
z = np.clip(z_v/z_d, 1e-3, 1e1)
ax.imshow(np.rot90(np.log(z)), extent=[tmin/3600, tmax/3600, 0.025, 0.225], aspect='auto')
ax.set_ylabel('Half-spread (€)')
ax.set_title('Expiry: {}'.format(pd.to_datetime(e).strftime('%Y-%m-%d')))
ax.set_xlabel('Time (hours)')
fig_arrival.tight_layout()
return fig_volume, fig_duration, fig_arrival
if __name__ == '__main__':
cli = ArgumentParser()
cli.add_argument('quote_filename')
cli.add_argument('trade_filename')
cli.add_argument('dest_bubbles_filename')
cli.add_argument('dest_intraday_volume_filename')
cli.add_argument('dest_intraday_duration_filename')
cli.add_argument('dest_intraday_arrival_filename')
args = cli.parse_args()
quote = pd.read_parquet(args.quote_filename)
trade = pd.read_parquet(args.trade_filename).xs('AEX')
quote.sort_index(inplace=True)
volume, duration = compute_volume_duration(quote, trade)
fig = plot_arrival_rates_bubbles(volume, duration)
fig.savefig(args.dest_bubbles_filename)
figs = plot_intraday(volume, duration)
figs[0].savefig(args.dest_intraday_volume_filename)
figs[1].savefig(args.dest_intraday_duration_filename)
figs[2].savefig(args.dest_intraday_arrival_filename)
| [
"numpy.clip",
"pandas.to_timedelta",
"pandas.read_parquet",
"argparse.ArgumentParser",
"numpy.reshape",
"numpy.log",
"pandas.Series.xs",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"matplotlib.patches.Patch",
"numpy.rot90",
"numpy.broadcast_arrays",
"pandas.to_datetime"
] | [((2058, 2079), 'pandas.to_timedelta', 'pd.to_timedelta', (['"""1s"""'], {}), "('1s')\n", (2073, 2079), True, 'import pandas as pd\n'), ((2921, 2982), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(2)'], {'sharey': '(True)', 'sharex': '(True)', 'figsize': '(8, 10)'}), '(3, 2, sharey=True, sharex=True, figsize=(8, 10))\n', (2933, 2982), True, 'import matplotlib.pyplot as plt\n'), ((4251, 4280), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', 'xlen'], {}), '(xmin, xmax, xlen)\n', (4262, 4280), True, 'import numpy as np\n'), ((4289, 4318), 'numpy.linspace', 'np.linspace', (['ymin', 'ymax', 'ylen'], {}), '(ymin, ymax, ylen)\n', (4300, 4318), True, 'import numpy as np\n'), ((4334, 4377), 'numpy.broadcast_arrays', 'np.broadcast_arrays', (['x[:, None]', 'y[None, :]'], {}), '(x[:, None], y[None, :])\n', (4353, 4377), True, 'import numpy as np\n'), ((4395, 4456), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(2)'], {'sharex': '(True)', 'sharey': '(True)', 'figsize': '(8, 10)'}), '(3, 2, sharex=True, sharey=True, figsize=(8, 10))\n', (4407, 4456), True, 'import matplotlib.pyplot as plt\n'), ((7056, 7104), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'sharex': '(True)'}), '(3, 1, sharex=True, **settings.PLOT)\n', (7068, 7104), True, 'import matplotlib.pyplot as plt\n'), ((7148, 7176), 'numpy.linspace', 'np.linspace', (['tmin', 'tmax', '(100)'], {}), '(tmin, tmax, 100)\n', (7159, 7176), True, 'import numpy as np\n'), ((7653, 7701), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'sharex': '(True)'}), '(3, 1, sharex=True, **settings.PLOT)\n', (7665, 7701), True, 'import matplotlib.pyplot as plt\n'), ((7745, 7773), 'numpy.linspace', 'np.linspace', (['tmin', 'tmax', '(100)'], {}), '(tmin, tmax, 100)\n', (7756, 7773), True, 'import numpy as np\n'), ((8257, 8305), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'sharex': '(True)'}), '(3, 1, sharex=True, **settings.PLOT)\n', (8269, 8305), True, 'import matplotlib.pyplot as plt\n'), ((8349, 8377), 'numpy.linspace', 'np.linspace', (['tmin', 'tmax', '(100)'], {}), '(tmin, tmax, 100)\n', (8360, 8377), True, 'import numpy as np\n'), ((9123, 9139), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (9137, 9139), False, 'from argparse import ArgumentParser\n'), ((9470, 9506), 'pandas.read_parquet', 'pd.read_parquet', (['args.quote_filename'], {}), '(args.quote_filename)\n', (9485, 9506), True, 'import pandas as pd\n'), ((396, 425), 'pandas.Series.xs', 'pd.Series.xs', (['*args'], {}), '(*args, **kwargs)\n', (408, 425), True, 'import pandas as pd\n'), ((2998, 3039), 'matplotlib.patches.Patch', 'Patch', ([], {'color': '"""b"""', 'alpha': '(0.5)', 'label': '"""Call"""'}), "(color='b', alpha=0.5, label='Call')\n", (3003, 3039), False, 'from matplotlib.patches import Patch\n'), ((3055, 3095), 'matplotlib.patches.Patch', 'Patch', ([], {'color': '"""r"""', 'alpha': '(0.5)', 'label': '"""Put"""'}), "(color='r', alpha=0.5, label='Put')\n", (3060, 3095), False, 'from matplotlib.patches import Patch\n'), ((4472, 4509), 'matplotlib.patches.Patch', 'Patch', ([], {'color': '"""tab:blue"""', 'label': '"""Call"""'}), "(color='tab:blue', label='Call')\n", (4477, 4509), False, 'from matplotlib.patches import Patch\n'), ((4526, 4561), 'matplotlib.patches.Patch', 'Patch', ([], {'color': '"""tab:red"""', 'label': '"""Put"""'}), "(color='tab:red', label='Put')\n", (4531, 4561), False, 'from matplotlib.patches import Patch\n'), ((8719, 8750), 'numpy.clip', 'np.clip', (['(z_v / z_d)', '(0.001)', '(10.0)'], {}), '(z_v / z_d, 0.001, 10.0)\n', (8726, 8750), True, 'import numpy as np\n'), ((7367, 7378), 'numpy.rot90', 'np.rot90', (['z'], {}), '(z)\n', (7375, 7378), True, 'import numpy as np\n'), ((7970, 7981), 'numpy.rot90', 'np.rot90', (['z'], {}), '(z)\n', (7978, 7981), True, 'import numpy as np\n'), ((9519, 9555), 'pandas.read_parquet', 'pd.read_parquet', (['args.trade_filename'], {}), '(args.trade_filename)\n', (9534, 9555), True, 'import pandas as pd\n'), ((5035, 5059), 'numpy.reshape', 'np.reshape', (['z', 'x_b.shape'], {}), '(z, x_b.shape)\n', (5045, 5059), True, 'import numpy as np\n'), ((5283, 5307), 'numpy.reshape', 'np.reshape', (['z', 'x_b.shape'], {}), '(z, x_b.shape)\n', (5293, 5307), True, 'import numpy as np\n'), ((8774, 8783), 'numpy.log', 'np.log', (['z'], {}), '(z)\n', (8780, 8783), True, 'import numpy as np\n'), ((4725, 4742), 'pandas.to_datetime', 'pd.to_datetime', (['e'], {}), '(e)\n', (4739, 4742), True, 'import pandas as pd\n'), ((4830, 4847), 'pandas.to_datetime', 'pd.to_datetime', (['e'], {}), '(e)\n', (4844, 4847), True, 'import pandas as pd\n'), ((7522, 7539), 'pandas.to_datetime', 'pd.to_datetime', (['e'], {}), '(e)\n', (7536, 7539), True, 'import pandas as pd\n'), ((8125, 8142), 'pandas.to_datetime', 'pd.to_datetime', (['e'], {}), '(e)\n', (8139, 8142), True, 'import pandas as pd\n'), ((8928, 8945), 'pandas.to_datetime', 'pd.to_datetime', (['e'], {}), '(e)\n', (8942, 8945), True, 'import pandas as pd\n'), ((3357, 3374), 'pandas.to_datetime', 'pd.to_datetime', (['e'], {}), '(e)\n', (3371, 3374), True, 'import pandas as pd\n'), ((6699, 6720), 'pandas.to_timedelta', 'pd.to_timedelta', (['"""1s"""'], {}), "('1s')\n", (6714, 6720), True, 'import pandas as pd\n'), ((6981, 7002), 'pandas.to_timedelta', 'pd.to_timedelta', (['"""1s"""'], {}), "('1s')\n", (6996, 7002), True, 'import pandas as pd\n'), ((6669, 6697), 'pandas.to_datetime', 'pd.to_datetime', (['"""2016-01-04"""'], {}), "('2016-01-04')\n", (6683, 6697), True, 'import pandas as pd\n'), ((6951, 6979), 'pandas.to_datetime', 'pd.to_datetime', (['"""2016-01-04"""'], {}), "('2016-01-04')\n", (6965, 6979), True, 'import pandas as pd\n')] |
import unittest
import tests.helpers.util as util
from tetris.logic.board.Board import Board
from tetris.logic.tetromino.Tetromino import Tetromino
from tetris.util.containers import Dimension
class BoardTest(unittest.TestCase):
def setUp(self):
self.board = Board(Dimension(5, 4))
def test_new_board_is_empty(self):
util.check_board_state(self, self.board, [
".....",
".....",
".....",
"....."
])
def test_add_tetrominoes(self):
self.add_test_data()
util.check_board_state(self, self.board, [
".....",
"....#",
".#.##",
".####"
])
def test_moving_tetromino_inside_empty_board(self):
tetromino = Tetromino(0, self.board)
self.assertTrue(util.control_tetromino(tetromino, "DADDDSAADSDAD"))
def test_moving_tetromino_outside_board_box(self):
tetromino = Tetromino(0, self.board)
self.assertFalse(util.control_tetromino(tetromino, "A"))
self.assertFalse(util.control_tetromino(tetromino, "DDDD"))
self.assertFalse(util.control_tetromino(tetromino, "SSS"))
def test_invalid_tetromino_move_inside_used_board(self):
self.add_test_data()
tetromino = Tetromino(0, self.board)
self.assertFalse(util.control_tetromino(tetromino, "S"))
self.assertFalse(util.control_tetromino(tetromino, "DDD"))
def test_remove_one_full_row(self):
self.add_test_data()
self.board.remove_full_rows(0, 4)
util.check_board_state(self, self.board, [
".....",
"....#",
".#.##",
".####"
])
tetromino = Tetromino(1, self.board)
self.board.add_tetromino(tetromino)
util.check_board_state(self, self.board, [
"#....",
"#...#",
"##.##",
"#####"
])
self.board.remove_full_rows(0, 4)
util.check_board_state(self, self.board, [
".....",
"#....",
"#...#",
"##.##"
])
def test_remove_multiple_full_rows(self):
self.add_test_data()
self.board.remove_full_rows(0, 4)
tetromino = Tetromino(6, self.board)
util.control_tetromino(tetromino, "DS")
self.board.add_tetromino(tetromino)
tetromino = Tetromino(1, self.board)
self.board.add_tetromino(tetromino)
self.board.remove_full_rows(0, 4)
util.check_board_state(self, self.board, [
".....",
".....",
".....",
"#...."
])
def add_test_data(self):
# .....
# ....#
# .#.##
# .####
tetromino = Tetromino(3, self.board)
util.control_tetromino(tetromino, "WWWDSS")
self.board.add_tetromino(tetromino)
tetromino = Tetromino(6, self.board)
util.control_tetromino(tetromino, "WWDDWDS")
self.board.add_tetromino(tetromino)
| [
"tests.helpers.util.check_board_state",
"tetris.logic.tetromino.Tetromino.Tetromino",
"tests.helpers.util.control_tetromino",
"tetris.util.containers.Dimension"
] | [((344, 422), 'tests.helpers.util.check_board_state', 'util.check_board_state', (['self', 'self.board', "['.....', '.....', '.....', '.....']"], {}), "(self, self.board, ['.....', '.....', '.....', '.....'])\n", (366, 422), True, 'import tests.helpers.util as util\n'), ((555, 633), 'tests.helpers.util.check_board_state', 'util.check_board_state', (['self', 'self.board', "['.....', '....#', '.#.##', '.####']"], {}), "(self, self.board, ['.....', '....#', '.#.##', '.####'])\n", (577, 633), True, 'import tests.helpers.util as util\n'), ((769, 793), 'tetris.logic.tetromino.Tetromino.Tetromino', 'Tetromino', (['(0)', 'self.board'], {}), '(0, self.board)\n', (778, 793), False, 'from tetris.logic.tetromino.Tetromino import Tetromino\n'), ((946, 970), 'tetris.logic.tetromino.Tetromino.Tetromino', 'Tetromino', (['(0)', 'self.board'], {}), '(0, self.board)\n', (955, 970), False, 'from tetris.logic.tetromino.Tetromino import Tetromino\n'), ((1282, 1306), 'tetris.logic.tetromino.Tetromino.Tetromino', 'Tetromino', (['(0)', 'self.board'], {}), '(0, self.board)\n', (1291, 1306), False, 'from tetris.logic.tetromino.Tetromino import Tetromino\n'), ((1559, 1637), 'tests.helpers.util.check_board_state', 'util.check_board_state', (['self', 'self.board', "['.....', '....#', '.#.##', '.####']"], {}), "(self, self.board, ['.....', '....#', '.#.##', '.####'])\n", (1581, 1637), True, 'import tests.helpers.util as util\n'), ((1716, 1740), 'tetris.logic.tetromino.Tetromino.Tetromino', 'Tetromino', (['(1)', 'self.board'], {}), '(1, self.board)\n', (1725, 1740), False, 'from tetris.logic.tetromino.Tetromino import Tetromino\n'), ((1793, 1871), 'tests.helpers.util.check_board_state', 'util.check_board_state', (['self', 'self.board', "['#....', '#...#', '##.##', '#####']"], {}), "(self, self.board, ['#....', '#...#', '##.##', '#####'])\n", (1815, 1871), True, 'import tests.helpers.util as util\n'), ((1980, 2058), 'tests.helpers.util.check_board_state', 'util.check_board_state', (['self', 'self.board', "['.....', '#....', '#...#', '##.##']"], {}), "(self, self.board, ['.....', '#....', '#...#', '##.##'])\n", (2002, 2058), True, 'import tests.helpers.util as util\n'), ((2255, 2279), 'tetris.logic.tetromino.Tetromino.Tetromino', 'Tetromino', (['(6)', 'self.board'], {}), '(6, self.board)\n', (2264, 2279), False, 'from tetris.logic.tetromino.Tetromino import Tetromino\n'), ((2288, 2327), 'tests.helpers.util.control_tetromino', 'util.control_tetromino', (['tetromino', '"""DS"""'], {}), "(tetromino, 'DS')\n", (2310, 2327), True, 'import tests.helpers.util as util\n'), ((2392, 2416), 'tetris.logic.tetromino.Tetromino.Tetromino', 'Tetromino', (['(1)', 'self.board'], {}), '(1, self.board)\n', (2401, 2416), False, 'from tetris.logic.tetromino.Tetromino import Tetromino\n'), ((2511, 2589), 'tests.helpers.util.check_board_state', 'util.check_board_state', (['self', 'self.board', "['.....', '.....', '.....', '#....']"], {}), "(self, self.board, ['.....', '.....', '.....', '#....'])\n", (2533, 2589), True, 'import tests.helpers.util as util\n'), ((2770, 2794), 'tetris.logic.tetromino.Tetromino.Tetromino', 'Tetromino', (['(3)', 'self.board'], {}), '(3, self.board)\n', (2779, 2794), False, 'from tetris.logic.tetromino.Tetromino import Tetromino\n'), ((2803, 2846), 'tests.helpers.util.control_tetromino', 'util.control_tetromino', (['tetromino', '"""WWWDSS"""'], {}), "(tetromino, 'WWWDSS')\n", (2825, 2846), True, 'import tests.helpers.util as util\n'), ((2911, 2935), 'tetris.logic.tetromino.Tetromino.Tetromino', 'Tetromino', (['(6)', 'self.board'], {}), '(6, self.board)\n', (2920, 2935), False, 'from tetris.logic.tetromino.Tetromino import Tetromino\n'), ((2944, 2988), 'tests.helpers.util.control_tetromino', 'util.control_tetromino', (['tetromino', '"""WWDDWDS"""'], {}), "(tetromino, 'WWDDWDS')\n", (2966, 2988), True, 'import tests.helpers.util as util\n'), ((279, 294), 'tetris.util.containers.Dimension', 'Dimension', (['(5)', '(4)'], {}), '(5, 4)\n', (288, 294), False, 'from tetris.util.containers import Dimension\n'), ((818, 868), 'tests.helpers.util.control_tetromino', 'util.control_tetromino', (['tetromino', '"""DADDDSAADSDAD"""'], {}), "(tetromino, 'DADDDSAADSDAD')\n", (840, 868), True, 'import tests.helpers.util as util\n'), ((996, 1034), 'tests.helpers.util.control_tetromino', 'util.control_tetromino', (['tetromino', '"""A"""'], {}), "(tetromino, 'A')\n", (1018, 1034), True, 'import tests.helpers.util as util\n'), ((1061, 1102), 'tests.helpers.util.control_tetromino', 'util.control_tetromino', (['tetromino', '"""DDDD"""'], {}), "(tetromino, 'DDDD')\n", (1083, 1102), True, 'import tests.helpers.util as util\n'), ((1129, 1169), 'tests.helpers.util.control_tetromino', 'util.control_tetromino', (['tetromino', '"""SSS"""'], {}), "(tetromino, 'SSS')\n", (1151, 1169), True, 'import tests.helpers.util as util\n'), ((1332, 1370), 'tests.helpers.util.control_tetromino', 'util.control_tetromino', (['tetromino', '"""S"""'], {}), "(tetromino, 'S')\n", (1354, 1370), True, 'import tests.helpers.util as util\n'), ((1397, 1437), 'tests.helpers.util.control_tetromino', 'util.control_tetromino', (['tetromino', '"""DDD"""'], {}), "(tetromino, 'DDD')\n", (1419, 1437), True, 'import tests.helpers.util as util\n')] |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from botbuilder.dialogs import (
ComponentDialog,
WaterfallDialog,
WaterfallStepContext,
OAuthPrompt,
OAuthPromptSettings,
)
class SsoSignInDialog(ComponentDialog):
def __init__(self, connection_name: str):
super().__init__(SsoSignInDialog.__name__)
self.add_dialog(
OAuthPrompt(
OAuthPrompt.__name__,
OAuthPromptSettings(
connection_name=connection_name,
text=f"Sign in to the host bot using AAD for SSO and connection {connection_name}",
title="Sign In",
timeout=60000,
),
)
)
self.add_dialog(
WaterfallDialog(
WaterfallDialog.__name__,
[
self.signin_step,
self.display_token,
],
)
)
self.initial_dialog_id = WaterfallDialog.__name__
async def signin_step(self, step_context: WaterfallStepContext):
return await step_context.begin_dialog(OAuthPrompt.__name__)
async def display_token(self, step_context: WaterfallStepContext):
sso_token = step_context.result
if sso_token:
if isinstance(sso_token, dict):
token = sso_token.get("token")
else:
token = sso_token.token
await step_context.context.send_activity(f"Here is your token: {token}")
else:
await step_context.context.send_activity("No token was provided.")
return await step_context.end_dialog()
| [
"botbuilder.dialogs.OAuthPromptSettings",
"botbuilder.dialogs.WaterfallDialog"
] | [((815, 901), 'botbuilder.dialogs.WaterfallDialog', 'WaterfallDialog', (['WaterfallDialog.__name__', '[self.signin_step, self.display_token]'], {}), '(WaterfallDialog.__name__, [self.signin_step, self.\n display_token])\n', (830, 901), False, 'from botbuilder.dialogs import ComponentDialog, WaterfallDialog, WaterfallStepContext, OAuthPrompt, OAuthPromptSettings\n'), ((484, 662), 'botbuilder.dialogs.OAuthPromptSettings', 'OAuthPromptSettings', ([], {'connection_name': 'connection_name', 'text': 'f"""Sign in to the host bot using AAD for SSO and connection {connection_name}"""', 'title': '"""Sign In"""', 'timeout': '(60000)'}), "(connection_name=connection_name, text=\n f'Sign in to the host bot using AAD for SSO and connection {connection_name}'\n , title='Sign In', timeout=60000)\n", (503, 662), False, 'from botbuilder.dialogs import ComponentDialog, WaterfallDialog, WaterfallStepContext, OAuthPrompt, OAuthPromptSettings\n')] |
from pathlib import Path
import os
import dj_database_url
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent
DEBUG = False
ALLOWED_HOSTS = ['khafonline.com','www.khafonline.com']
MYSQL=True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'OPTIONS': {
'read_default_file': os.path.join(os.path.join(BASE_DIR, 'instamarket'),'secret_my_sql.cnf'),
},
}
}
TIME_ZONE = 'Asia/Tehran'
SITE_URL='/instamarket/'
ADMIN_URL=SITE_URL+'admin/'
STATIC_URL = SITE_URL+'static/'
STATIC_ROOT = '/home/khafonli/public_html/instamarket/staticfiles/'
MEDIA_URL = SITE_URL+'media/'
MEDIA_ROOT = '/home/khafonli/public_html/instamarket/media/'
STATICFILES_DIRS=['/home/khafonli/instamarket/static/']
PUSHER_IS_ENABLE=True
REMOTE_MEDIA=False
COMING_SOON=False
DOWNLOAD_ROOT=os.path.join(BASE_DIR,'download') | [
"os.path.join",
"pathlib.Path"
] | [((866, 900), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""download"""'], {}), "(BASE_DIR, 'download')\n", (878, 900), False, 'import os\n'), ((70, 84), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (74, 84), False, 'from pathlib import Path\n'), ((367, 404), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""instamarket"""'], {}), "(BASE_DIR, 'instamarket')\n", (379, 404), False, 'import os\n')] |
import os
import random
import datetime
import argparse
import numpy as np
from tqdm import tqdm
from model.unetdsbn import Unet2D
from utils.loss import dice_loss1
from datasets.dataset import Dataset, ToTensor, CreateOnehotLabel
import torch
import torchvision.transforms as tfs
from torch import optim
from torch.optim import Adam
from torch.backends import cudnn
from torch.nn import DataParallel
from torch.utils.data import DataLoader
parser = argparse.ArgumentParser('Dual Normalization U-Net Training')
parser.add_argument('--data_dir', type=str, default='./data/brats/npz_data')
parser.add_argument('--train_domain_list_1', nargs='+')
parser.add_argument('--train_domain_list_2', nargs='+')
parser.add_argument('--result_dir', type=str, default='./results/unet_dn')
parser.add_argument('--n_classes', type=int, default=2)
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--n_epochs', type=int, default=50)
parser.add_argument('--save_step', type=int, default=10)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--seed', type=int, default=1337, help='random seed')
parser.add_argument('--gpu_ids', type=str, default='0')
parser.add_argument('--deterministic', dest='deterministic', action='store_true')
args = parser.parse_args()
def repeat_dataloader(iterable):
""" repeat dataloader """
while True:
for x in iterable:
yield x
def worker_init_fn(worker_id):
random.seed(args.seed+worker_id)
if __name__== '__main__':
start_time = datetime.datetime.now()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_ids
base_dir = args.data_dir
batch_size = args.batch_size
save_step = args.save_step
lr = args.lr
train_domain_list_1 = args.train_domain_list_1
train_domain_list_2 = args.train_domain_list_2
max_epoch = args.n_epochs
result_dir = args.result_dir
n_classes = args.n_classes
log_dir = os.path.join(result_dir, 'log')
model_dir = os.path.join(result_dir, 'model')
if args.deterministic:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
if not os.path.exists(model_dir):
os.makedirs(model_dir)
dataloader_train = []
model = Unet2D(num_classes=n_classes, norm='dsbn', num_domains=2)
params_num = sum(p.numel() for p in model.parameters())
print("\nModle's Params: %.3fM" % (params_num / 1e6))
model = DataParallel(model).cuda()
optimizer = Adam(params=model.parameters(), lr=lr, betas=(0.9, 0.999))
exp_lr = optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.99)
dataset_1 = Dataset(base_dir=base_dir, split='train', domain_list=train_domain_list_1,
transforms=tfs.Compose([
CreateOnehotLabel(num_classes=n_classes),
ToTensor()
]))
dataloader_1 = DataLoader(dataset_1, batch_size=batch_size, shuffle=True, num_workers=8, pin_memory=True, drop_last=True, worker_init_fn=worker_init_fn)
dataloader_train.append(dataloader_1)
dataset_2 = Dataset(base_dir=base_dir, split='train', domain_list=train_domain_list_2,
transforms=tfs.Compose([
CreateOnehotLabel(num_classes=n_classes),
ToTensor()
]))
dataloader_2 = DataLoader(dataset_2, batch_size=batch_size, shuffle=True, num_workers=8, pin_memory=True, drop_last=True, worker_init_fn=worker_init_fn)
dataloader_train.append(dataloader_2)
for epoch_num in range(max_epoch):
data_iter = [repeat_dataloader(dataloader_train[i]) for i in range(2)]
print('Epoch: {}, LR: {}'.format(epoch_num, round(exp_lr.get_last_lr()[0], 6)))
tbar = tqdm(dataloader_train[0], ncols=150)
model.train()
for i, batch in enumerate(tbar):
### get all domains' sample_batch ###
sample_batches = [batch]
other_sample_batches = [next(data_iter[i]) for i in range(1, 2)]
sample_batches += other_sample_batches
total_loss = 0
count = 0
for train_idx in range(2):
count += 1
sample_data, sample_label = sample_batches[train_idx]['image'].cuda(), sample_batches[train_idx]['onehot_label'].cuda()
outputs_soft = model(sample_data, domain_label=train_idx*torch.ones(sample_data.shape[0], dtype=torch.long))
loss = dice_loss1(outputs_soft, sample_label)
total_loss += loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
tbar.set_description('Total Loss: {}'.format(round((total_loss / count), 6)))
exp_lr.step()
if (epoch_num + 1) % save_step == 0:
model_save_model_path = os.path.join(model_dir, 'epoch_{}.pth'.format(epoch_num))
torch.save(model.module.state_dict(), model_save_model_path)
print('save model to {}'.format(model_save_model_path))
model_save_model_path = os.path.join(model_dir, 'final_model.pth'.format(epoch_num))
torch.save(model.module.state_dict(), model_save_model_path)
print('save model to {}'.format(model_save_model_path))
end_time = datetime.datetime.now()
print('Finish running. Cost total time: {} hours'.format((end_time - start_time).seconds / 3600)) | [
"torch.manual_seed",
"os.path.exists",
"utils.loss.dice_loss1",
"argparse.ArgumentParser",
"torch.optim.lr_scheduler.ExponentialLR",
"os.makedirs",
"datasets.dataset.CreateOnehotLabel",
"tqdm.tqdm",
"os.path.join",
"torch.nn.DataParallel",
"random.seed",
"datasets.dataset.ToTensor",
"datetim... | [((454, 514), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Dual Normalization U-Net Training"""'], {}), "('Dual Normalization U-Net Training')\n", (477, 514), False, 'import argparse\n'), ((1464, 1498), 'random.seed', 'random.seed', (['(args.seed + worker_id)'], {}), '(args.seed + worker_id)\n', (1475, 1498), False, 'import random\n'), ((1541, 1564), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1562, 1564), False, 'import datetime\n'), ((1939, 1970), 'os.path.join', 'os.path.join', (['result_dir', '"""log"""'], {}), "(result_dir, 'log')\n", (1951, 1970), False, 'import os\n'), ((1987, 2020), 'os.path.join', 'os.path.join', (['result_dir', '"""model"""'], {}), "(result_dir, 'model')\n", (1999, 2020), False, 'import os\n'), ((2373, 2430), 'model.unetdsbn.Unet2D', 'Unet2D', ([], {'num_classes': 'n_classes', 'norm': '"""dsbn"""', 'num_domains': '(2)'}), "(num_classes=n_classes, norm='dsbn', num_domains=2)\n", (2379, 2430), False, 'from model.unetdsbn import Unet2D\n'), ((2678, 2733), 'torch.optim.lr_scheduler.ExponentialLR', 'optim.lr_scheduler.ExponentialLR', (['optimizer'], {'gamma': '(0.99)'}), '(optimizer, gamma=0.99)\n', (2710, 2733), False, 'from torch import optim\n'), ((3032, 3173), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset_1'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': '(8)', 'pin_memory': '(True)', 'drop_last': '(True)', 'worker_init_fn': 'worker_init_fn'}), '(dataset_1, batch_size=batch_size, shuffle=True, num_workers=8,\n pin_memory=True, drop_last=True, worker_init_fn=worker_init_fn)\n', (3042, 3173), False, 'from torch.utils.data import DataLoader\n'), ((3509, 3650), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset_2'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': '(8)', 'pin_memory': '(True)', 'drop_last': '(True)', 'worker_init_fn': 'worker_init_fn'}), '(dataset_2, batch_size=batch_size, shuffle=True, num_workers=8,\n pin_memory=True, drop_last=True, worker_init_fn=worker_init_fn)\n', (3519, 3650), False, 'from torch.utils.data import DataLoader\n'), ((5474, 5497), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5495, 5497), False, 'import datetime\n'), ((2124, 2146), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (2135, 2146), False, 'import random\n'), ((2155, 2180), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (2169, 2180), True, 'import numpy as np\n'), ((2189, 2217), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (2206, 2217), False, 'import torch\n'), ((2226, 2259), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (2248, 2259), False, 'import torch\n'), ((2276, 2301), 'os.path.exists', 'os.path.exists', (['model_dir'], {}), '(model_dir)\n', (2290, 2301), False, 'import os\n'), ((2311, 2333), 'os.makedirs', 'os.makedirs', (['model_dir'], {}), '(model_dir)\n', (2322, 2333), False, 'import os\n'), ((3915, 3951), 'tqdm.tqdm', 'tqdm', (['dataloader_train[0]'], {'ncols': '(150)'}), '(dataloader_train[0], ncols=150)\n', (3919, 3951), False, 'from tqdm import tqdm\n'), ((2561, 2580), 'torch.nn.DataParallel', 'DataParallel', (['model'], {}), '(model)\n', (2573, 2580), False, 'from torch.nn import DataParallel\n'), ((4632, 4670), 'utils.loss.dice_loss1', 'dice_loss1', (['outputs_soft', 'sample_label'], {}), '(outputs_soft, sample_label)\n', (4642, 4670), False, 'from utils.loss import dice_loss1\n'), ((2904, 2944), 'datasets.dataset.CreateOnehotLabel', 'CreateOnehotLabel', ([], {'num_classes': 'n_classes'}), '(num_classes=n_classes)\n', (2921, 2944), False, 'from datasets.dataset import Dataset, ToTensor, CreateOnehotLabel\n'), ((2974, 2984), 'datasets.dataset.ToTensor', 'ToTensor', ([], {}), '()\n', (2982, 2984), False, 'from datasets.dataset import Dataset, ToTensor, CreateOnehotLabel\n'), ((3381, 3421), 'datasets.dataset.CreateOnehotLabel', 'CreateOnehotLabel', ([], {'num_classes': 'n_classes'}), '(num_classes=n_classes)\n', (3398, 3421), False, 'from datasets.dataset import Dataset, ToTensor, CreateOnehotLabel\n'), ((3451, 3461), 'datasets.dataset.ToTensor', 'ToTensor', ([], {}), '()\n', (3459, 3461), False, 'from datasets.dataset import Dataset, ToTensor, CreateOnehotLabel\n'), ((4557, 4607), 'torch.ones', 'torch.ones', (['sample_data.shape[0]'], {'dtype': 'torch.long'}), '(sample_data.shape[0], dtype=torch.long)\n', (4567, 4607), False, 'import torch\n')] |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import pytest
import mindspore.dataset as ds
import mindspore.dataset.audio.transforms as audio
from mindspore import log as logger
from mindspore.dataset.audio.utils import Modulation, Interpolation
def count_unequal_element(data_expected, data_me, rtol, atol):
assert data_expected.shape == data_me.shape
total_count = len(data_expected.flatten())
error = np.abs(data_expected - data_me)
greater = np.greater(error, atol + np.abs(data_expected) * rtol)
loss_count = np.count_nonzero(greater)
assert (loss_count / total_count) < rtol, "\ndata_expected_std:{0}\ndata_me_error:{1}\nloss:{2}".format(
data_expected[greater], data_me[greater], error[greater])
def test_flanger_eager_sinusoidal_linear_float64():
""" mindspore eager mode normal testcase:flanger op"""
# Original waveform
waveform = np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float64)
# Expect waveform
expect_waveform = np.array([[0.10000000000, 0.19999999536, 0.29999998145],
[0.23391812865, 0.29239766081, 0.35087719298]], dtype=np.float64)
flanger_op = audio.Flanger(44100, 0.0, 2.0, 0.0, 71.0, 0.5, 25.0, Modulation.SINUSOIDAL, Interpolation.LINEAR)
# Filtered waveform by flanger
output = flanger_op(waveform)
count_unequal_element(expect_waveform, output, 0.0001, 0.0001)
def test_flanger_eager_triangular_linear_float32():
""" mindspore eager mode normal testcase:flanger op"""
# Original waveform
waveform = np.array([[-1.2, 2, -3.6], [1, 2.4, 3.7]], dtype=np.float32)
# Expect waveform
expect_waveform = np.array([[-1.0000000000, 1.0000000000, -1.0000000000],
[0.58479529619, 1.0000000000, 1.0000000000]], dtype=np.float32)
flanger_op = audio.Flanger(44100, 0.0, 2.0, 0.0, 71.0, 0.5, 25.0, Modulation.TRIANGULAR, Interpolation.LINEAR)
# Filtered waveform by flanger
output = flanger_op(waveform)
count_unequal_element(expect_waveform, output, 0.0001, 0.0001)
def test_flanger_eager_triangular_linear_int():
""" mindspore eager mode normal testcase:flanger op"""
# Original waveform
waveform = np.array([[-2, -3, 0], [2, 2, 3]], dtype=np.int)
# Expect waveform
expect_waveform = np.array([[-1, -1, 0],
[1, 1, 1]], dtype=np.int)
flanger_op = audio.Flanger(44100, 0.0, 2.0, 0.0, 71.0, 0.5, 25.0, Modulation.TRIANGULAR, Interpolation.LINEAR)
# Filtered waveform by flanger
output = flanger_op(waveform)
count_unequal_element(expect_waveform, output, 0.0001, 0.0001)
def test_flanger_shape_221():
""" mindspore eager mode normal testcase:flanger op"""
# Original waveform
waveform = np.array([[[1], [1.1]], [[0.9], [0.6]]], dtype=np.float64)
# Expect waveform
expect_waveform = np.array([[[1.00000000],
[0.64327485]],
[[0.90000000],
[0.35087719]]], dtype=np.float64)
flanger_op = audio.Flanger(44100)
# Filtered waveform by flanger
output = flanger_op(waveform)
count_unequal_element(expect_waveform, output, 0.0001, 0.0001)
def test_flanger_shape_11211():
""" mindspore eager mode normal testcase:flanger op"""
# Original waveform
waveform = np.array([[[[[0.44]], [[0.55]]]]], dtype=np.float64)
# Expect waveform
expect_waveform = np.array([[[[[0.44000000]], [[0.55000000]]]]], dtype=np.float64)
flanger_op = audio.Flanger(44100)
# Filtered waveform by flanger
output = flanger_op(waveform)
count_unequal_element(expect_waveform, output, 0.0001, 0.0001)
def test_flanger_pipeline():
""" mindspore pipeline mode normal testcase:flanger op"""
# Original waveform
waveform = np.array([[[1.1, 1.2, 1.3], [1.4, 1.5, 1.6]]], dtype=np.float64)
# Expect waveform
expect_waveform = np.array([[[1.00000000000, 1.00000000000, 1.00000000000],
[0.81871345029, 0.87719298245, 0.93567251461]]], dtype=np.float64)
data = (waveform, np.random.sample((1, 2, 1)))
dataset = ds.NumpySlicesDataset(data, ["channel", "sample"], shuffle=False)
flanger_op = audio.Flanger(44100)
# Filtered waveform by flanger
dataset = dataset.map(
input_columns=["channel"], operations=flanger_op, num_parallel_workers=1)
i = 0
for item in dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
count_unequal_element(expect_waveform[i, :],
item['channel'], 0.0001, 0.0001)
i += 1
def test_invalid_flanger_input():
def test_invalid_input(test_name, sample_rate, delay, depth, regen, width, speed, phase, modulation, interpolation,
error, error_msg):
logger.info("Test Flanger with bad input: {0}".format(test_name))
with pytest.raises(error) as error_info:
audio.Flanger(sample_rate, delay, depth, regen, width, speed, phase, modulation, interpolation)
assert error_msg in str(error_info.value)
test_invalid_input("invalid sample_rate parameter value", 0, 0.0, 2.0, 0.0, 71.0, 0.5, 25.0,
Modulation.SINUSOIDAL, Interpolation.LINEAR, ValueError,
"Input sample_rate is not within the required interval of [-2147483648, 0) and (0, 2147483647].")
test_invalid_input("invalid sample_rate parameter type as a float", 44100.5, 0.0, 2.0, 0.0, 71.0, 0.5, 25.0,
Modulation.SINUSOIDAL, Interpolation.LINEAR, TypeError,
"Argument sample_rate with value 44100.5 is not of "
"type [<class 'int'>], but got <class 'float'>.")
test_invalid_input("invalid sample_rate parameter type as a String", "44100", 0.0, 2.0, 0.0, 71.0, 0.5, 25.0,
Modulation.SINUSOIDAL, Interpolation.LINEAR, TypeError,
"Argument sample_rate with value 44100 is not of "
"type [<class 'int'>], but got <class 'str'>.")
test_invalid_input("invalid delay parameter type as a String", 44100, "0.0", 2.0, 0.0, 71.0, 0.5, 25.0,
Modulation.SINUSOIDAL, Interpolation.LINEAR, TypeError,
"Argument delay with value 0.0 is not of type [<class 'float'>, <class 'int'>],"
" but got <class 'str'>.")
test_invalid_input("invalid delay parameter value", 44100, 50, 2.0, 0.0, 71.0, 0.5, 25.0,
Modulation.SINUSOIDAL, Interpolation.LINEAR, ValueError,
"Input delay is not within the required interval of [0, 30].")
test_invalid_input("invalid depth parameter type as a String", 44100, 0.0, "2.0", 0.0, 71.0, 0.5, 25.0,
Modulation.SINUSOIDAL, Interpolation.LINEAR, TypeError,
"Argument depth with value 2.0 is not of type [<class 'float'>, <class 'int'>],"
" but got <class 'str'>.")
test_invalid_input("invalid depth parameter value", 44100, 0.0, 50.0, 0.0, 71.0, 0.5, 25.0,
Modulation.SINUSOIDAL, Interpolation.LINEAR, ValueError,
"Input depth is not within the required interval of [0, 10].")
test_invalid_input("invalid regen parameter type as a String", 44100, 0.0, 2.0, "0.0", 71.0, 0.5, 25.0,
Modulation.SINUSOIDAL, Interpolation.LINEAR, TypeError,
"Argument regen with value 0.0 is not of type [<class 'float'>, <class 'int'>],"
" but got <class 'str'>.")
test_invalid_input("invalid regen parameter value", 44100, 0.0, 2.0, 100.0, 71.0, 0.5, 25.0,
Modulation.SINUSOIDAL, Interpolation.LINEAR, ValueError,
"Input regen is not within the required interval of [-95, 95].")
test_invalid_input("invalid width parameter type as a String", 44100, 0.0, 2.0, 0.0, "71.0", 0.5, 25.0,
Modulation.SINUSOIDAL, Interpolation.LINEAR, TypeError,
"Argument width with value 71.0 is not of type [<class 'float'>, <class 'int'>],"
" but got <class 'str'>.")
test_invalid_input("invalid width parameter value", 44100, 0.0, 2.0, 0.0, 150.0, 0.5, 25.0,
Modulation.SINUSOIDAL, Interpolation.LINEAR, ValueError,
"Input width is not within the required interval of [0, 100].")
test_invalid_input("invalid speed parameter type as a String", 44100, 0.0, 2.0, 0.0, 71.0, "0.5", 25.0,
Modulation.SINUSOIDAL, Interpolation.LINEAR, TypeError,
"Argument speed with value 0.5 is not of type [<class 'float'>, <class 'int'>],"
" but got <class 'str'>.")
test_invalid_input("invalid speed parameter value", 44100, 0.0, 2.0, 0.0, 71.0, 50, 25.0,
Modulation.SINUSOIDAL, Interpolation.LINEAR, ValueError,
"Input speed is not within the required interval of [0.1, 10].")
test_invalid_input("invalid phase parameter type as a String", 44100, 0.0, 2.0, 0.0, 71.0, 0.5, "25.0",
Modulation.SINUSOIDAL, Interpolation.LINEAR, TypeError,
"Argument phase with value 25.0 is not of type [<class 'float'>, <class 'int'>],"
" but got <class 'str'>.")
test_invalid_input("invalid phase parameter value", 44100, 0.0, 2.0, 0.0, 71.0, 0.5, 150.0,
Modulation.SINUSOIDAL, Interpolation.LINEAR, ValueError,
"Input phase is not within the required interval of [0, 100].")
test_invalid_input("invalid modulation parameter value", 44100, 0.0, 2.0, 0.0, 71.0, 0.5, 25.0, "test",
Interpolation.LINEAR, TypeError,
"Argument modulation with value test is not of type [<enum 'Modulation'>], "
"but got <class 'str'>.")
test_invalid_input("invalid modulation parameter value", 44100, 0.0, 2.0, 0.0, 71.0, 0.5, 25.0,
Modulation.SINUSOIDAL, "test", TypeError,
"Argument interpolation with value test is not of type [<enum 'Interpolation'>], "
"but got <class 'str'>.")
if __name__ == '__main__':
test_flanger_eager_sinusoidal_linear_float64()
test_flanger_eager_triangular_linear_float32()
test_flanger_eager_triangular_linear_int()
test_flanger_shape_221()
test_flanger_shape_11211()
test_flanger_pipeline()
test_invalid_flanger_input()
| [
"mindspore.dataset.audio.transforms.Flanger",
"numpy.abs",
"mindspore.dataset.NumpySlicesDataset",
"numpy.count_nonzero",
"numpy.array",
"numpy.random.sample",
"pytest.raises"
] | [((1061, 1092), 'numpy.abs', 'np.abs', (['(data_expected - data_me)'], {}), '(data_expected - data_me)\n', (1067, 1092), True, 'import numpy as np\n'), ((1179, 1204), 'numpy.count_nonzero', 'np.count_nonzero', (['greater'], {}), '(greater)\n', (1195, 1204), True, 'import numpy as np\n'), ((1532, 1594), 'numpy.array', 'np.array', (['[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]'], {'dtype': 'np.float64'}), '([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float64)\n', (1540, 1594), True, 'import numpy as np\n'), ((1639, 1756), 'numpy.array', 'np.array', (['[[0.1, 0.19999999536, 0.29999998145], [0.23391812865, 0.29239766081, \n 0.35087719298]]'], {'dtype': 'np.float64'}), '([[0.1, 0.19999999536, 0.29999998145], [0.23391812865, \n 0.29239766081, 0.35087719298]], dtype=np.float64)\n', (1647, 1756), True, 'import numpy as np\n'), ((1811, 1912), 'mindspore.dataset.audio.transforms.Flanger', 'audio.Flanger', (['(44100)', '(0.0)', '(2.0)', '(0.0)', '(71.0)', '(0.5)', '(25.0)', 'Modulation.SINUSOIDAL', 'Interpolation.LINEAR'], {}), '(44100, 0.0, 2.0, 0.0, 71.0, 0.5, 25.0, Modulation.SINUSOIDAL,\n Interpolation.LINEAR)\n', (1824, 1912), True, 'import mindspore.dataset.audio.transforms as audio\n'), ((2197, 2257), 'numpy.array', 'np.array', (['[[-1.2, 2, -3.6], [1, 2.4, 3.7]]'], {'dtype': 'np.float32'}), '([[-1.2, 2, -3.6], [1, 2.4, 3.7]], dtype=np.float32)\n', (2205, 2257), True, 'import numpy as np\n'), ((2302, 2376), 'numpy.array', 'np.array', (['[[-1.0, 1.0, -1.0], [0.58479529619, 1.0, 1.0]]'], {'dtype': 'np.float32'}), '([[-1.0, 1.0, -1.0], [0.58479529619, 1.0, 1.0]], dtype=np.float32)\n', (2310, 2376), True, 'import numpy as np\n'), ((2471, 2572), 'mindspore.dataset.audio.transforms.Flanger', 'audio.Flanger', (['(44100)', '(0.0)', '(2.0)', '(0.0)', '(71.0)', '(0.5)', '(25.0)', 'Modulation.TRIANGULAR', 'Interpolation.LINEAR'], {}), '(44100, 0.0, 2.0, 0.0, 71.0, 0.5, 25.0, Modulation.TRIANGULAR,\n Interpolation.LINEAR)\n', (2484, 2572), True, 'import mindspore.dataset.audio.transforms as audio\n'), ((2853, 2901), 'numpy.array', 'np.array', (['[[-2, -3, 0], [2, 2, 3]]'], {'dtype': 'np.int'}), '([[-2, -3, 0], [2, 2, 3]], dtype=np.int)\n', (2861, 2901), True, 'import numpy as np\n'), ((2946, 2994), 'numpy.array', 'np.array', (['[[-1, -1, 0], [1, 1, 1]]'], {'dtype': 'np.int'}), '([[-1, -1, 0], [1, 1, 1]], dtype=np.int)\n', (2954, 2994), True, 'import numpy as np\n'), ((3044, 3145), 'mindspore.dataset.audio.transforms.Flanger', 'audio.Flanger', (['(44100)', '(0.0)', '(2.0)', '(0.0)', '(71.0)', '(0.5)', '(25.0)', 'Modulation.TRIANGULAR', 'Interpolation.LINEAR'], {}), '(44100, 0.0, 2.0, 0.0, 71.0, 0.5, 25.0, Modulation.TRIANGULAR,\n Interpolation.LINEAR)\n', (3057, 3145), True, 'import mindspore.dataset.audio.transforms as audio\n'), ((3408, 3466), 'numpy.array', 'np.array', (['[[[1], [1.1]], [[0.9], [0.6]]]'], {'dtype': 'np.float64'}), '([[[1], [1.1]], [[0.9], [0.6]]], dtype=np.float64)\n', (3416, 3466), True, 'import numpy as np\n'), ((3511, 3585), 'numpy.array', 'np.array', (['[[[1.0], [0.64327485]], [[0.9], [0.35087719]]]'], {'dtype': 'np.float64'}), '([[[1.0], [0.64327485]], [[0.9], [0.35087719]]], dtype=np.float64)\n', (3519, 3585), True, 'import numpy as np\n'), ((3717, 3737), 'mindspore.dataset.audio.transforms.Flanger', 'audio.Flanger', (['(44100)'], {}), '(44100)\n', (3730, 3737), True, 'import mindspore.dataset.audio.transforms as audio\n'), ((4006, 4058), 'numpy.array', 'np.array', (['[[[[[0.44]], [[0.55]]]]]'], {'dtype': 'np.float64'}), '([[[[[0.44]], [[0.55]]]]], dtype=np.float64)\n', (4014, 4058), True, 'import numpy as np\n'), ((4103, 4155), 'numpy.array', 'np.array', (['[[[[[0.44]], [[0.55]]]]]'], {'dtype': 'np.float64'}), '([[[[[0.44]], [[0.55]]]]], dtype=np.float64)\n', (4111, 4155), True, 'import numpy as np\n'), ((4186, 4206), 'mindspore.dataset.audio.transforms.Flanger', 'audio.Flanger', (['(44100)'], {}), '(44100)\n', (4199, 4206), True, 'import mindspore.dataset.audio.transforms as audio\n'), ((4475, 4539), 'numpy.array', 'np.array', (['[[[1.1, 1.2, 1.3], [1.4, 1.5, 1.6]]]'], {'dtype': 'np.float64'}), '([[[1.1, 1.2, 1.3], [1.4, 1.5, 1.6]]], dtype=np.float64)\n', (4483, 4539), True, 'import numpy as np\n'), ((4584, 4682), 'numpy.array', 'np.array', (['[[[1.0, 1.0, 1.0], [0.81871345029, 0.87719298245, 0.93567251461]]]'], {'dtype': 'np.float64'}), '([[[1.0, 1.0, 1.0], [0.81871345029, 0.87719298245, 0.93567251461]]],\n dtype=np.float64)\n', (4592, 4682), True, 'import numpy as np\n'), ((4807, 4872), 'mindspore.dataset.NumpySlicesDataset', 'ds.NumpySlicesDataset', (['data', "['channel', 'sample']"], {'shuffle': '(False)'}), "(data, ['channel', 'sample'], shuffle=False)\n", (4828, 4872), True, 'import mindspore.dataset as ds\n'), ((4890, 4910), 'mindspore.dataset.audio.transforms.Flanger', 'audio.Flanger', (['(44100)'], {}), '(44100)\n', (4903, 4910), True, 'import mindspore.dataset.audio.transforms as audio\n'), ((4764, 4791), 'numpy.random.sample', 'np.random.sample', (['(1, 2, 1)'], {}), '((1, 2, 1))\n', (4780, 4791), True, 'import numpy as np\n'), ((5564, 5584), 'pytest.raises', 'pytest.raises', (['error'], {}), '(error)\n', (5577, 5584), False, 'import pytest\n'), ((5612, 5711), 'mindspore.dataset.audio.transforms.Flanger', 'audio.Flanger', (['sample_rate', 'delay', 'depth', 'regen', 'width', 'speed', 'phase', 'modulation', 'interpolation'], {}), '(sample_rate, delay, depth, regen, width, speed, phase,\n modulation, interpolation)\n', (5625, 5711), True, 'import mindspore.dataset.audio.transforms as audio\n'), ((1132, 1153), 'numpy.abs', 'np.abs', (['data_expected'], {}), '(data_expected)\n', (1138, 1153), True, 'import numpy as np\n')] |
# Generated by Django 2.0.4 on 2018-04-17 07:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('workouts', '0008_auto_20180417_0934'),
]
operations = [
migrations.AlterField(
model_name='exercise',
name='exercise_slug',
field=models.SlugField(unique=True),
),
]
| [
"django.db.models.SlugField"
] | [((346, 375), 'django.db.models.SlugField', 'models.SlugField', ([], {'unique': '(True)'}), '(unique=True)\n', (362, 375), False, 'from django.db import migrations, models\n')] |
from aorist import (
Attribute,
NaturalNumber,
StringIdentifier,
DateString,
POSIXTimestamp,
PositiveFloat,
default_tabular_schema,
RowStruct,
StaticDataTable,
DataSchema,
StorageSetup,
RemoteStorageSetup,
Storage,
RemoteStorage,
RemoteLocation,
CSVEncoding,
Encoding,
DataSet,
DatumTemplate,
Asset,
WebLocation,
FileBasedStorageLayout,
CSVHeader,
FileHeader,
APIOrFileLayout,
SingleFileLayout,
FreeText,
Empty,
FIPSStateCode,
IntegerNumber,
)
attributes = [
Attribute(DateString("Date")),
Attribute(StringIdentifier("State")),
Attribute(NaturalNumber("Cases_Total")),
Attribute(NaturalNumber("Cases_White")),
Attribute(NaturalNumber("Cases_Black")),
Attribute(NaturalNumber("Cases_Latinx")),
Attribute(NaturalNumber("Cases_Asian")),
Attribute(NaturalNumber("Cases_AIAN")),
Attribute(NaturalNumber("Cases_NHPI")),
Attribute(NaturalNumber("Cases_Multiracial")),
Attribute(NaturalNumber("Cases_Other")),
Attribute(NaturalNumber("Cases_Unknown")),
Attribute(NaturalNumber("Cases_Ethnicity_Hispanic")),
Attribute(NaturalNumber("Cases_Ethnicity_NonHispanic")),
Attribute(NaturalNumber("Cases_Ethnicity_Unknown")),
Attribute(NaturalNumber("Deaths_Total")),
Attribute(NaturalNumber("Deaths_White")),
Attribute(NaturalNumber("Deaths_Black")),
Attribute(NaturalNumber("Deaths_Latinx")),
Attribute(NaturalNumber("Deaths_Asian")),
Attribute(NaturalNumber("Deaths_AIAN")),
Attribute(NaturalNumber("Deaths_NHPI")),
Attribute(NaturalNumber("Deaths_Multiracial")),
Attribute(NaturalNumber("Deaths_Other")),
Attribute(NaturalNumber("Deaths_Unknown")),
Attribute(NaturalNumber("Deaths_Ethnicity_Hispanic")),
Attribute(NaturalNumber("Deaths_Ethnicity_NonHispanic")),
Attribute(NaturalNumber("Deaths_Ethnicity_Unknown")),
Attribute(NaturalNumber("Hosp_Total")),
Attribute(NaturalNumber("Hosp_White")),
Attribute(NaturalNumber("Hosp_Black")),
Attribute(NaturalNumber("Hosp_Latinx")),
Attribute(NaturalNumber("Hosp_Asian")),
Attribute(NaturalNumber("Hosp_AIAN")),
Attribute(NaturalNumber("Hosp_NHPI")),
Attribute(NaturalNumber("Hosp_Multiracial")),
Attribute(NaturalNumber("Hosp_Other")),
Attribute(NaturalNumber("Hosp_Unknown")),
Attribute(NaturalNumber("Hosp_Ethnicity_Hispanic")),
Attribute(NaturalNumber("Hosp_Ethnicity_NonHispanic")),
Attribute(NaturalNumber("Hosp_Ethnicity_Unknown")),
Attribute(NaturalNumber("Tests_Total")),
Attribute(NaturalNumber("Tests_White")),
Attribute(NaturalNumber("Tests_Black")),
Attribute(NaturalNumber("Tests_Latinx")),
Attribute(NaturalNumber("Tests_Asian")),
Attribute(NaturalNumber("Tests_AIAN")),
Attribute(NaturalNumber("Tests_NHPI")),
Attribute(NaturalNumber("Tests_Multiracial")),
Attribute(NaturalNumber("Tests_Other")),
Attribute(NaturalNumber("Tests_Unknown")),
Attribute(NaturalNumber("Tests_Ethnicity_Hispanic")),
Attribute(NaturalNumber("Tests_Ethnicity_NonHispanic")),
Attribute(NaturalNumber("Tests_Ethnicity_Unknown")),
]
trcdt_datum = RowStruct(
name="the_racial_covid_data_tracker_datum",
attributes=attributes,
)
trcdt_schema = default_tabular_schema(
DatumTemplate(trcdt_datum), attributes
)
table = Asset(StaticDataTable(
name="the_racial_covid_data_tracker_table",
schema=DataSchema(trcdt_schema),
setup=StorageSetup(RemoteStorageSetup(
remote=Storage(RemoteStorage(
location=RemoteLocation(
WebLocation(
address=("https://docs.google.com/spreadsheets/d/e/2PACX-1vS8SzaERcKJOD"
"_EzrtCDK1dX1zkoMochlA9iHoHg_RSw3V8bkpfk1mpw4pfL5RdtSOyx_oScsUt"
"yXyk/pub?gid=43720681&single=true&output=csv"),
)
),
layout=APIOrFileLayout(
FileBasedStorageLayout(
SingleFileLayout()
),
),
encoding=Encoding(CSVEncoding(header=FileHeader(
CSVHeader(num_lines=1)
))),
)),
)),
tag="the_racial_covid_data_tracker",
))
trcdt_dataset = DataSet(
name="The-covid-racial-data-tracker",
description="""
The COVID Racial Data Tracker is a collaboration between the COVID
Tracking Project and the Boston University Center for Antiracist
Research. Together, they’re gathering the most complete and up-to
-date race and ethnicity data on COVID-19 in the United States.
""",
source_path=__file__,
datum_templates=[DatumTemplate(trcdt_datum)],
assets={
"The COVID Racial Data Tracker data": table,
},
access_policies=[]
)
| [
"aorist.WebLocation",
"aorist.SingleFileLayout",
"aorist.StringIdentifier",
"aorist.NaturalNumber",
"aorist.DataSchema",
"aorist.CSVHeader",
"aorist.RowStruct",
"aorist.DateString",
"aorist.DatumTemplate"
] | [((3206, 3282), 'aorist.RowStruct', 'RowStruct', ([], {'name': '"""the_racial_covid_data_tracker_datum"""', 'attributes': 'attributes'}), "(name='the_racial_covid_data_tracker_datum', attributes=attributes)\n", (3215, 3282), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((3338, 3364), 'aorist.DatumTemplate', 'DatumTemplate', (['trcdt_datum'], {}), '(trcdt_datum)\n', (3351, 3364), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((594, 612), 'aorist.DateString', 'DateString', (['"""Date"""'], {}), "('Date')\n", (604, 612), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((629, 654), 'aorist.StringIdentifier', 'StringIdentifier', (['"""State"""'], {}), "('State')\n", (645, 654), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((671, 699), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Cases_Total"""'], {}), "('Cases_Total')\n", (684, 699), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((716, 744), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Cases_White"""'], {}), "('Cases_White')\n", (729, 744), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((761, 789), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Cases_Black"""'], {}), "('Cases_Black')\n", (774, 789), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((806, 835), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Cases_Latinx"""'], {}), "('Cases_Latinx')\n", (819, 835), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((852, 880), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Cases_Asian"""'], {}), "('Cases_Asian')\n", (865, 880), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((897, 924), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Cases_AIAN"""'], {}), "('Cases_AIAN')\n", (910, 924), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((941, 968), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Cases_NHPI"""'], {}), "('Cases_NHPI')\n", (954, 968), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((985, 1019), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Cases_Multiracial"""'], {}), "('Cases_Multiracial')\n", (998, 1019), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((1036, 1064), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Cases_Other"""'], {}), "('Cases_Other')\n", (1049, 1064), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((1081, 1111), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Cases_Unknown"""'], {}), "('Cases_Unknown')\n", (1094, 1111), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((1128, 1169), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Cases_Ethnicity_Hispanic"""'], {}), "('Cases_Ethnicity_Hispanic')\n", (1141, 1169), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((1186, 1230), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Cases_Ethnicity_NonHispanic"""'], {}), "('Cases_Ethnicity_NonHispanic')\n", (1199, 1230), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((1247, 1287), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Cases_Ethnicity_Unknown"""'], {}), "('Cases_Ethnicity_Unknown')\n", (1260, 1287), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((1304, 1333), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Deaths_Total"""'], {}), "('Deaths_Total')\n", (1317, 1333), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((1350, 1379), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Deaths_White"""'], {}), "('Deaths_White')\n", (1363, 1379), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((1396, 1425), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Deaths_Black"""'], {}), "('Deaths_Black')\n", (1409, 1425), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((1442, 1472), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Deaths_Latinx"""'], {}), "('Deaths_Latinx')\n", (1455, 1472), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((1489, 1518), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Deaths_Asian"""'], {}), "('Deaths_Asian')\n", (1502, 1518), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((1535, 1563), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Deaths_AIAN"""'], {}), "('Deaths_AIAN')\n", (1548, 1563), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((1580, 1608), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Deaths_NHPI"""'], {}), "('Deaths_NHPI')\n", (1593, 1608), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((1625, 1660), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Deaths_Multiracial"""'], {}), "('Deaths_Multiracial')\n", (1638, 1660), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((1677, 1706), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Deaths_Other"""'], {}), "('Deaths_Other')\n", (1690, 1706), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((1723, 1754), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Deaths_Unknown"""'], {}), "('Deaths_Unknown')\n", (1736, 1754), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((1771, 1813), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Deaths_Ethnicity_Hispanic"""'], {}), "('Deaths_Ethnicity_Hispanic')\n", (1784, 1813), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((1830, 1875), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Deaths_Ethnicity_NonHispanic"""'], {}), "('Deaths_Ethnicity_NonHispanic')\n", (1843, 1875), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((1892, 1933), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Deaths_Ethnicity_Unknown"""'], {}), "('Deaths_Ethnicity_Unknown')\n", (1905, 1933), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((1950, 1977), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Hosp_Total"""'], {}), "('Hosp_Total')\n", (1963, 1977), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((1994, 2021), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Hosp_White"""'], {}), "('Hosp_White')\n", (2007, 2021), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((2038, 2065), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Hosp_Black"""'], {}), "('Hosp_Black')\n", (2051, 2065), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((2082, 2110), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Hosp_Latinx"""'], {}), "('Hosp_Latinx')\n", (2095, 2110), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((2127, 2154), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Hosp_Asian"""'], {}), "('Hosp_Asian')\n", (2140, 2154), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((2171, 2197), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Hosp_AIAN"""'], {}), "('Hosp_AIAN')\n", (2184, 2197), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((2214, 2240), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Hosp_NHPI"""'], {}), "('Hosp_NHPI')\n", (2227, 2240), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((2257, 2290), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Hosp_Multiracial"""'], {}), "('Hosp_Multiracial')\n", (2270, 2290), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((2307, 2334), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Hosp_Other"""'], {}), "('Hosp_Other')\n", (2320, 2334), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((2351, 2380), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Hosp_Unknown"""'], {}), "('Hosp_Unknown')\n", (2364, 2380), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((2397, 2437), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Hosp_Ethnicity_Hispanic"""'], {}), "('Hosp_Ethnicity_Hispanic')\n", (2410, 2437), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((2454, 2497), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Hosp_Ethnicity_NonHispanic"""'], {}), "('Hosp_Ethnicity_NonHispanic')\n", (2467, 2497), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((2514, 2553), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Hosp_Ethnicity_Unknown"""'], {}), "('Hosp_Ethnicity_Unknown')\n", (2527, 2553), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((2570, 2598), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Tests_Total"""'], {}), "('Tests_Total')\n", (2583, 2598), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((2615, 2643), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Tests_White"""'], {}), "('Tests_White')\n", (2628, 2643), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((2660, 2688), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Tests_Black"""'], {}), "('Tests_Black')\n", (2673, 2688), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((2705, 2734), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Tests_Latinx"""'], {}), "('Tests_Latinx')\n", (2718, 2734), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((2751, 2779), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Tests_Asian"""'], {}), "('Tests_Asian')\n", (2764, 2779), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((2796, 2823), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Tests_AIAN"""'], {}), "('Tests_AIAN')\n", (2809, 2823), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((2840, 2867), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Tests_NHPI"""'], {}), "('Tests_NHPI')\n", (2853, 2867), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((2884, 2918), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Tests_Multiracial"""'], {}), "('Tests_Multiracial')\n", (2897, 2918), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((2935, 2963), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Tests_Other"""'], {}), "('Tests_Other')\n", (2948, 2963), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((2980, 3010), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Tests_Unknown"""'], {}), "('Tests_Unknown')\n", (2993, 3010), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((3027, 3068), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Tests_Ethnicity_Hispanic"""'], {}), "('Tests_Ethnicity_Hispanic')\n", (3040, 3068), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((3085, 3129), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Tests_Ethnicity_NonHispanic"""'], {}), "('Tests_Ethnicity_NonHispanic')\n", (3098, 3129), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((3146, 3186), 'aorist.NaturalNumber', 'NaturalNumber', (['"""Tests_Ethnicity_Unknown"""'], {}), "('Tests_Ethnicity_Unknown')\n", (3159, 3186), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((3486, 3510), 'aorist.DataSchema', 'DataSchema', (['trcdt_schema'], {}), '(trcdt_schema)\n', (3496, 3510), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((4900, 4926), 'aorist.DatumTemplate', 'DatumTemplate', (['trcdt_datum'], {}), '(trcdt_datum)\n', (4913, 4926), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((3678, 3878), 'aorist.WebLocation', 'WebLocation', ([], {'address': '"""https://docs.google.com/spreadsheets/d/e/2PACX-1vS8SzaERcKJOD_EzrtCDK1dX1zkoMochlA9iHoHg_RSw3V8bkpfk1mpw4pfL5RdtSOyx_oScsUtyXyk/pub?gid=43720681&single=true&output=csv"""'}), "(address=\n 'https://docs.google.com/spreadsheets/d/e/2PACX-1vS8SzaERcKJOD_EzrtCDK1dX1zkoMochlA9iHoHg_RSw3V8bkpfk1mpw4pfL5RdtSOyx_oScsUtyXyk/pub?gid=43720681&single=true&output=csv'\n )\n", (3689, 3878), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((4149, 4167), 'aorist.SingleFileLayout', 'SingleFileLayout', ([], {}), '()\n', (4165, 4167), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n'), ((4311, 4333), 'aorist.CSVHeader', 'CSVHeader', ([], {'num_lines': '(1)'}), '(num_lines=1)\n', (4320, 4333), False, 'from aorist import Attribute, NaturalNumber, StringIdentifier, DateString, POSIXTimestamp, PositiveFloat, default_tabular_schema, RowStruct, StaticDataTable, DataSchema, StorageSetup, RemoteStorageSetup, Storage, RemoteStorage, RemoteLocation, CSVEncoding, Encoding, DataSet, DatumTemplate, Asset, WebLocation, FileBasedStorageLayout, CSVHeader, FileHeader, APIOrFileLayout, SingleFileLayout, FreeText, Empty, FIPSStateCode, IntegerNumber\n')] |
# ----------------------------------------------------------------------------
# Copyright (c) 2016--, gneiss development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import abc
import pandas as pd
from skbio.stats.composition import ilr_inv
from gneiss._model import Model
from gneiss.balances import balance_basis
class RegressionModel(Model):
def __init__(self, *args, **kwargs):
"""
Summary object for storing regression results.
A `RegressionResults` object stores information about the
individual balances used in the regression, the coefficients,
residuals. This object can be used to perform predictions.
In addition, summary statistics such as the coefficient
of determination for the overall fit can be calculated.
Parameters
----------
submodels : list of statsmodels objects
List of statsmodels result objects.
balances : pd.DataFrame
A table of balances where samples are rows and
balances are columns. These balances were calculated
using `tree`.
"""
self._beta = None
self._resid = None
self._fitted = False
super().__init__(*args, **kwargs)
# there is only one design matrix for regression
self.design_matrix = self.design_matrices
def coefficients(self, tree=None):
""" Returns coefficients from fit.
Parameters
----------
tree : skbio.TreeNode, optional
The tree used to perform the ilr transformation. If this
is specified, then the prediction will be represented as
proportions. Otherwise, if this is not specified, the prediction
will be represented as balances. (default: None).
Returns
-------
pd.DataFrame
A table of coefficients where rows are covariates,
and the columns are balances. If `tree` is specified, then
the columns are proportions.
"""
if not self._fitted:
ValueError(('Model not fitted - coefficients not calculated.'
'See `fit()`'))
coef = self._beta
if tree is not None:
basis, _ = balance_basis(tree)
c = ilr_inv(coef.values, basis=basis)
ids = [n.name for n in tree.tips()]
return pd.DataFrame(c, columns=ids, index=coef.index)
else:
return coef
def residuals(self, tree=None):
""" Returns calculated residuals from fit.
Parameters
----------
X : pd.DataFrame, optional
Input table of covariates. If not specified, then the
fitted values calculated from training the model will be
returned.
tree : skbio.TreeNode, optional
The tree used to perform the ilr transformation. If this
is specified, then the prediction will be represented
as proportions. Otherwise, if this is not specified,
the prediction will be represented as balances. (default: None).
Returns
-------
pd.DataFrame
A table of residuals where rows are covariates,
and the columns are balances. If `tree` is specified, then
the columns are proportions.
References
----------
.. [1] <NAME>. "A concise guide to compositional data analysis,
CDA work." Girona 24 (2003): 73-81.
"""
if not self._fitted:
ValueError(('Model not fitted - coefficients not calculated.'
'See `fit()`'))
resid = self._resid
if tree is not None:
basis, _ = balance_basis(tree)
proj_resid = ilr_inv(resid.values, basis=basis)
ids = [n.name for n in tree.tips()]
return pd.DataFrame(proj_resid,
columns=ids,
index=resid.index)
else:
return resid
@abc.abstractmethod
def predict(self, X=None, tree=None, **kwargs):
""" Performs a prediction based on model.
Parameters
----------
X : pd.DataFrame, optional
Input table of covariates, where columns are covariates, and
rows are samples. If not specified, then the fitted values
calculated from training the model will be returned.
tree : skbio.TreeNode, optional
The tree used to perform the ilr transformation. If this
is specified, then the prediction will be represented
as proportions. Otherwise, if this is not specified,
the prediction will be represented as balances. (default: None).
**kwargs : dict
Other arguments to be passed into the model prediction.
Returns
-------
pd.DataFrame
A table of predicted values where rows are covariates,
and the columns are balances. If `tree` is specified, then
the columns are proportions.
"""
if not self._fitted:
ValueError(('Model not fitted - coefficients not calculated.'
'See `fit()`'))
if X is None:
X = self.design_matrices
prediction = X.dot(self._beta)
if tree is not None:
basis, _ = balance_basis(tree)
proj_prediction = ilr_inv(prediction.values, basis=basis)
ids = [n.name for n in tree.tips()]
return pd.DataFrame(proj_prediction,
columns=ids,
index=prediction.index)
else:
return prediction
| [
"skbio.stats.composition.ilr_inv",
"pandas.DataFrame",
"gneiss.balances.balance_basis"
] | [((2433, 2452), 'gneiss.balances.balance_basis', 'balance_basis', (['tree'], {}), '(tree)\n', (2446, 2452), False, 'from gneiss.balances import balance_basis\n'), ((2469, 2502), 'skbio.stats.composition.ilr_inv', 'ilr_inv', (['coef.values'], {'basis': 'basis'}), '(coef.values, basis=basis)\n', (2476, 2502), False, 'from skbio.stats.composition import ilr_inv\n'), ((2570, 2616), 'pandas.DataFrame', 'pd.DataFrame', (['c'], {'columns': 'ids', 'index': 'coef.index'}), '(c, columns=ids, index=coef.index)\n', (2582, 2616), True, 'import pandas as pd\n'), ((3912, 3931), 'gneiss.balances.balance_basis', 'balance_basis', (['tree'], {}), '(tree)\n', (3925, 3931), False, 'from gneiss.balances import balance_basis\n'), ((3957, 3991), 'skbio.stats.composition.ilr_inv', 'ilr_inv', (['resid.values'], {'basis': 'basis'}), '(resid.values, basis=basis)\n', (3964, 3991), False, 'from skbio.stats.composition import ilr_inv\n'), ((4059, 4115), 'pandas.DataFrame', 'pd.DataFrame', (['proj_resid'], {'columns': 'ids', 'index': 'resid.index'}), '(proj_resid, columns=ids, index=resid.index)\n', (4071, 4115), True, 'import pandas as pd\n'), ((5580, 5599), 'gneiss.balances.balance_basis', 'balance_basis', (['tree'], {}), '(tree)\n', (5593, 5599), False, 'from gneiss.balances import balance_basis\n'), ((5630, 5669), 'skbio.stats.composition.ilr_inv', 'ilr_inv', (['prediction.values'], {'basis': 'basis'}), '(prediction.values, basis=basis)\n', (5637, 5669), False, 'from skbio.stats.composition import ilr_inv\n'), ((5737, 5803), 'pandas.DataFrame', 'pd.DataFrame', (['proj_prediction'], {'columns': 'ids', 'index': 'prediction.index'}), '(proj_prediction, columns=ids, index=prediction.index)\n', (5749, 5803), True, 'import pandas as pd\n')] |
'''
Solution for day 13 of the 2021 Advent of Code calendar.
Run it with the command `python -m adventofcode run_solution -y 2021 13` from the project root.
'''
import numpy as np
from adventofcode.types import Solution
def part1(data, exit_on_first_fold=False):
rows = [row for row in data.splitlines() if row and 'fold' not in row]
board = np.zeros(
(max([int(row.split(",")[1]) for row in rows]) + 1,
max([int(row.split(",")[0]) for row in rows]) + 1))
for row in data.splitlines():
if not row:
continue
if 'fold along x=' in row:
# reverse needed
x = int(row.split("=")[1])
base = board[:, 0:x]
fold = board[:, x + 1:]
reversed = np.flip(fold[::-1])
print(base, "\n", reversed, "\n")
board = base + reversed
print(board)
if exit_on_first_fold:
return board
continue
if 'fold along y=' in row:
print("folding..")
y = int(row.split("=")[1])
base = board[0:y, :]
fold = board[y + 1:, :]
reversed = np.fliplr(np.flip(fold))
print(base, "\n", reversed, "\n", fold)
board = base + reversed
print(board)
if exit_on_first_fold:
return board
continue
y, x = [int(c) for c in row.split(",")]
board[x][y] = 1
return board
def run(data: str) -> Solution:
return np.count_nonzero(part1(data, exit_on_first_fold=True)), part1(data, exit_on_first_fold=False)
| [
"numpy.flip"
] | [((754, 773), 'numpy.flip', 'np.flip', (['fold[::-1]'], {}), '(fold[::-1])\n', (761, 773), True, 'import numpy as np\n'), ((1173, 1186), 'numpy.flip', 'np.flip', (['fold'], {}), '(fold)\n', (1180, 1186), True, 'import numpy as np\n')] |
"""
Test data scaffolding.
Read: https://docs.djangoproject.com/en/dev/howto/custom-management-commands/
"""
import json
from random import randint
from django.core.management.base import BaseCommand, CommandError
from ensemble.models import (
Classification,
Model,
ModelVersion,
MediaFile,
VideoPrediction,
AudioPrediction,
)
class Command(BaseCommand):
help = "Scaffold some test data for ensemble"
def handle(self, *args, **options):
gunshot = Classification(name="GUNSHOT")
gunshot.save()
audioset_model = Model(name="audioset")
audioset_model.save()
audioset_model.classifications.add(gunshot)
audioset_model_trained = ModelVersion(
model=audioset_model, version="0.01alpha2"
)
audioset_model_trained.save()
for movie in json.loads(self.__movie_json, strict=False):
media_file = MediaFile(
name=movie["title"],
url=movie["sources"][0],
description=movie["description"],
)
media_file.save()
video_predictions = [
self.__generate_random_video_prediction(
media_file, gunshot, audioset_model_trained
)
for _ in range(1000)
]
audio_predictions = [
self.__generate_random_audio_prediction(
media_file, gunshot, audioset_model_trained
)
for _ in range(1000)
]
# VideoPrediction.objects.bulk_create(video_predictions)
# AudioPrediction.objects.bulk_create(audio_predictions)
for prediction in video_predictions + audio_predictions:
prediction.save()
def __generate_random_video_prediction(self, media_file, classification, model):
"""
Generate a random gunshot video prediction for the video
"""
x = randint(0, 1280)
y = randint(0, 720)
width = randint(1, 1280 - x + 1)
height = randint(1, 720 - y + 1)
return VideoPrediction(
media_file=media_file,
classification=classification,
confidence=randint(0, 100),
model_version=model,
time=randint(0, 600000),
x=x,
y=y,
width=width,
height=height,
)
def __generate_random_audio_prediction(self, media_file, classification, model):
"""
Generate a random 10 second audio prediction for a gunshot
"""
return AudioPrediction(
media_file=media_file,
classification=classification,
confidence=randint(0, 100),
model_version=model,
time=randint(0, 600000),
duration="10000",
)
__movie_json = """
[
{
"description": "Big Buck Bunny tells the story of a giant rabbit with a heart bigger than himself. When one sunny day three rodents rudely harass him, something snaps... and the rabbit ain't no bunny anymore! In the typical cartoon tradition he prepares the nasty rodents a comical revenge.\n\nLicensed under the Creative Commons Attribution license\nhttp://www.bigbuckbunny.org",
"sources": [
"http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/BigBuckBunny.mp4"
],
"subtitle": "By Blender Foundation",
"thumb": "images/BigBuckBunny.jpg",
"title": "Big Buck Bunny"
},
{
"description": "The first Blender Open Movie from 2006",
"sources": [
"http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/ElephantsDream.mp4"
],
"subtitle": "By Blender Foundation",
"thumb": "images/ElephantsDream.jpg",
"title": "Elephant Dream"
},
{
"description": "HBO GO now works with Chromecast -- the easiest way to enjoy online video on your TV. For when you want to settle into your Iron Throne to watch the latest episodes. For $35.\nLearn how to use Chromecast with HBO GO and more at google.com/chromecast.",
"sources": [
"http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/ForBiggerBlazes.mp4"
],
"subtitle": "By Google",
"thumb": "images/ForBiggerBlazes.jpg",
"title": "For Bigger Blazes"
},
{
"description": "Introducing Chromecast. The easiest way to enjoy online video and music on your TV—for when Batman's escapes aren't quite big enough. For $35. Learn how to use Chromecast with Google Play Movies and more at google.com/chromecast.",
"sources": [
"http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/ForBiggerEscapes.mp4"
],
"subtitle": "By Google",
"thumb": "images/ForBiggerEscapes.jpg",
"title": "For Bigger Escape"
},
{
"description": "Introducing Chromecast. The easiest way to enjoy online video and music on your TV. For $35. Find out more at google.com/chromecast.",
"sources": [
"http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/ForBiggerFun.mp4"
],
"subtitle": "By Google",
"thumb": "images/ForBiggerFun.jpg",
"title": "For Bigger Fun"
},
{
"description": "Introducing Chromecast. The easiest way to enjoy online video and music on your TV—for the times that call for bigger joyrides. For $35. Learn how to use Chromecast with YouTube and more at google.com/chromecast.",
"sources": [
"http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/ForBiggerJoyrides.mp4"
],
"subtitle": "By Google",
"thumb": "images/ForBiggerJoyrides.jpg",
"title": "For Bigger Joyrides"
},
{
"description": "Introducing Chromecast. The easiest way to enjoy online video and music on your TV—for when you want to make Buster's big meltdowns even bigger. For $35. Learn how to use Chromecast with Netflix and more at google.com/chromecast.",
"sources": [
"http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/ForBiggerMeltdowns.mp4"
],
"subtitle": "By Google",
"thumb": "images/ForBiggerMeltdowns.jpg",
"title": "For Bigger Meltdowns"
},
{
"description": "Sintel is an independently produced short film, initiated by the Blender Foundation as a means to further improve and validate the free/open source 3D creation suite Blender. With initial funding provided by 1000s of donations via the internet community, it has again proven to be a viable development model for both open 3D technology as for independent animation film.\nThis 15 minute film has been realized in the studio of the Amsterdam Blender Institute, by an international team of artists and developers. In addition to that, several crucial technical and creative targets have been realized online, by developers and artists and teams all over the world.\nwww.sintel.org",
"sources": [
"http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/Sintel.mp4"
],
"subtitle": "By Blender Foundation",
"thumb": "images/Sintel.jpg",
"title": "Sintel"
},
{
"description": "Smoking Tire takes the all-new Subaru Outback to the highest point we can find in hopes our customer-appreciation Balloon Launch will get some free T-shirts into the hands of our viewers.",
"sources": [
"http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/SubaruOutbackOnStreetAndDirt.mp4"
],
"subtitle": "By Garage419",
"thumb": "images/SubaruOutbackOnStreetAndDirt.jpg",
"title": "Subaru Outback On Street And Dirt"
},
{
"description": "Tears of Steel was realized with crowd-funding by users of the open source 3D creation tool Blender. Target was to improve and test a complete open and free pipeline for visual effects in film - and to make a compelling sci-fi film in Amsterdam, the Netherlands. The film itself, and all raw material used for making it, have been released under the Creatieve Commons 3.0 Attribution license. Visit the tearsofsteel.org website to find out more about this, or to purchase the 4-DVD box with a lot of extras. (CC) Blender Foundation - http://www.tearsofsteel.org",
"sources": [
"http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/TearsOfSteel.mp4"
],
"subtitle": "By Blender Foundation",
"thumb": "images/TearsOfSteel.jpg",
"title": "Tears of Steel"
},
{
"description": "The Smoking Tire heads out to Adams Motorsports Park in Riverside, CA to test the most requested car of 2010, the Volkswagen GTI. Will it beat the Mazdaspeed3's standard-setting lap time? Watch and see...",
"sources": [
"http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/VolkswagenGTIReview.mp4"
],
"subtitle": "By Garage419",
"thumb": "images/VolkswagenGTIReview.jpg",
"title": "Volkswagen GTI Review"
},
{
"description": "The Smoking Tire is going on the 2010 Bullrun Live Rally in a 2011 Shelby GT500, and posting a video from the road every single day! The only place to watch them is by subscribing to The Smoking Tire or watching at BlackMagicShine.com",
"sources": [
"http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/WeAreGoingOnBullrun.mp4"
],
"subtitle": "By Garage419",
"thumb": "images/WeAreGoingOnBullrun.jpg",
"title": "We Are Going On Bullrun"
},
{
"description": "The Smoking Tire meets up with Chris and Jorge from CarsForAGrand.com to see just how far $1,000 can go when looking for a car.The Smoking Tire meets up with Chris and Jorge from CarsForAGrand.com to see just how far $1,000 can go when looking for a car.",
"sources": [
"http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/WhatCarCanYouGetForAGrand.mp4"
],
"subtitle": "By Garage419",
"thumb": "images/WhatCarCanYouGetForAGrand.jpg",
"title": "What care can you get for a grand?"
}
]
"""
| [
"json.loads",
"ensemble.models.MediaFile",
"ensemble.models.Classification",
"ensemble.models.Model",
"ensemble.models.ModelVersion",
"random.randint"
] | [((491, 521), 'ensemble.models.Classification', 'Classification', ([], {'name': '"""GUNSHOT"""'}), "(name='GUNSHOT')\n", (505, 521), False, 'from ensemble.models import Classification, Model, ModelVersion, MediaFile, VideoPrediction, AudioPrediction\n'), ((570, 592), 'ensemble.models.Model', 'Model', ([], {'name': '"""audioset"""'}), "(name='audioset')\n", (575, 592), False, 'from ensemble.models import Classification, Model, ModelVersion, MediaFile, VideoPrediction, AudioPrediction\n'), ((708, 764), 'ensemble.models.ModelVersion', 'ModelVersion', ([], {'model': 'audioset_model', 'version': '"""0.01alpha2"""'}), "(model=audioset_model, version='0.01alpha2')\n", (720, 764), False, 'from ensemble.models import Classification, Model, ModelVersion, MediaFile, VideoPrediction, AudioPrediction\n'), ((847, 890), 'json.loads', 'json.loads', (['self.__movie_json'], {'strict': '(False)'}), '(self.__movie_json, strict=False)\n', (857, 890), False, 'import json\n'), ((1978, 1994), 'random.randint', 'randint', (['(0)', '(1280)'], {}), '(0, 1280)\n', (1985, 1994), False, 'from random import randint\n'), ((2007, 2022), 'random.randint', 'randint', (['(0)', '(720)'], {}), '(0, 720)\n', (2014, 2022), False, 'from random import randint\n'), ((2039, 2063), 'random.randint', 'randint', (['(1)', '(1280 - x + 1)'], {}), '(1, 1280 - x + 1)\n', (2046, 2063), False, 'from random import randint\n'), ((2081, 2104), 'random.randint', 'randint', (['(1)', '(720 - y + 1)'], {}), '(1, 720 - y + 1)\n', (2088, 2104), False, 'from random import randint\n'), ((917, 1011), 'ensemble.models.MediaFile', 'MediaFile', ([], {'name': "movie['title']", 'url': "movie['sources'][0]", 'description': "movie['description']"}), "(name=movie['title'], url=movie['sources'][0], description=movie[\n 'description'])\n", (926, 1011), False, 'from ensemble.models import Classification, Model, ModelVersion, MediaFile, VideoPrediction, AudioPrediction\n'), ((2238, 2253), 'random.randint', 'randint', (['(0)', '(100)'], {}), '(0, 100)\n', (2245, 2253), False, 'from random import randint\n'), ((2305, 2323), 'random.randint', 'randint', (['(0)', '(600000)'], {}), '(0, 600000)\n', (2312, 2323), False, 'from random import randint\n'), ((2731, 2746), 'random.randint', 'randint', (['(0)', '(100)'], {}), '(0, 100)\n', (2738, 2746), False, 'from random import randint\n'), ((2798, 2816), 'random.randint', 'randint', (['(0)', '(600000)'], {}), '(0, 600000)\n', (2805, 2816), False, 'from random import randint\n')] |
import sqlite3 as sql
import pandas as pd
from tabulate import tabulate
connect = sql.connect("rpg_db.sqlite3")
cursor = connect.cursor()
def total_char_count():
""" Total all characters """
print(pd.read_sql_query('''SELECT COUNT(distinct character_id)
FROM charactercreator_character;''', connect))
def sub_class():
'''Grab population count of each subclass of characters'''
print(pd.read_sql_query(
'''SELECT
"mages", COUNT(*)
From charactercreator_mage
UNION
SELECT "clerics", COUNT(*)
from charactercreator_cleric
UNION
SELECT "fighter", COUNT(*)
FROM charactercreator_fighter
UNION
SELECT "thieves", COUNT(*)
FROM charactercreator_thief;''', connect))
def total_items():
'''Total count of all items '''
print(pd.read_sql_query('''SELECT COUNT(distinct item_id)
FROM armory_item;''', connect))
def weapon_items():
''' Total count of all weapon items '''
print(pd.read_sql_query('''SELECT COUNT(distinct item_ptr_id)
FROM armory_weapon;''', connect))
def weapon_category():
'''Total count of all weapon items and non weapon items'''
print(pd.read_sql_query('''SELECT COUNT(distinct item_ptr_id)
FROM armory_weapon;''', connect))
print(pd.read_sql_query(
'''SELECT COUNT(distinct item_id)
FROM armory_item
WHERE item_id < 138;''', connect))
def character_items():
''' Count of all items each character has'''
total_items_character_query = ('''SELECT cc.character_id, cc.name, COUNT(cci.item_id) AS item_count
FROM charactercreator_character AS cc
INNER JOIN charactercreator_character_inventory AS cci
ON cc.character_id = cci.character_id
GROUP BY cc.character_id
LIMIT 20''')
total_items_character = cursor.execute(total_items_character_query).fetchall()
print(tabulate(total_items_character,
headers=['ID', 'Character Name', 'Item Count']))
def character_weapons():
''' Count of all the weapons each character has '''
total_weapons_character_query = ('''SELECT cc.character_id, cc.name, COUNT()
FROM charactercreator_character AS cc
INNER JOIN charactercreator_character_inventory AS cci
ON cc.character_id = cci.character_id
INNER JOIN armory_item as ai
ON cc.item_id = ai.item_id
INNER JOIN armory_weapon as aw
ON ai.item_id = aw.item_ptr_id
GROUP BY cc.character_id
LIMIT 20''')
total_weapons_character = cursor.execute(total_weapons_character_query).fetchall()
print(tabulate(total_weapons_character,
headers=['ID', 'Character Name', 'Weapon Count']))
def character_item_avg():
''' Average amount of items the characters have '''
average_character_items_query = (
'''SELECT AVG(item_count) FROM
(SELECT cc.character_id, COUNT(cci.item_id) AS item_count
FROM charactercreator_character AS cc
LEFT JOIN charactercreator_character_inventory AS cci
ON cc.character_id = cci.character_id
GROUP BY cc.character_id
);''')
average_character_items = cursor.execute(average_character_items_query).fetchone()[0]
print(f'The average of items per player is {average_character_items:.2f}')
def character_weapon_avg():
''' Average amount of weapons the characters have '''
average_character_weapons_query = ('''
SELECT AVG(weapon_count) FROM
(
SELECT cc.character_id, COUNT(aw.item_ptr_id) AS weapon_count
FROM charactercreator_character AS cc
INNER JOIN charactercreator_character_inventory as cci
ON cc.character_id = cci.character_id
INNER JOIN armory_item as ai
ON cci.item_id = ai.item_id
LEFT JOIN armory_weapon as aw
ON ai.item_id = aw.item_ptr_id
GROUP BY cc.character_id
);''')
average_character_weapons = cursor.execute(average_character_weapons_query).fetchone()[0]
print(f'The average of weapons per player is {average_character_weapons:.2f}')
| [
"pandas.read_sql_query",
"tabulate.tabulate",
"sqlite3.connect"
] | [((84, 113), 'sqlite3.connect', 'sql.connect', (['"""rpg_db.sqlite3"""'], {}), "('rpg_db.sqlite3')\n", (95, 113), True, 'import sqlite3 as sql\n'), ((208, 328), 'pandas.read_sql_query', 'pd.read_sql_query', (['"""SELECT COUNT(distinct character_id)\n FROM charactercreator_character;"""', 'connect'], {}), '(\n """SELECT COUNT(distinct character_id)\n FROM charactercreator_character;"""\n , connect)\n', (225, 328), True, 'import pandas as pd\n'), ((411, 787), 'pandas.read_sql_query', 'pd.read_sql_query', (['"""SELECT\n "mages", COUNT(*)\n From charactercreator_mage\n\n UNION\n\n SELECT "clerics", COUNT(*)\n from charactercreator_cleric\n\n UNION\n\n SELECT "fighter", COUNT(*)\n FROM charactercreator_fighter\n\n UNION\n\n SELECT "thieves", COUNT(*)\n FROM charactercreator_thief;"""', 'connect'], {}), '(\n """SELECT\n "mages", COUNT(*)\n From charactercreator_mage\n\n UNION\n\n SELECT "clerics", COUNT(*)\n from charactercreator_cleric\n\n UNION\n\n SELECT "fighter", COUNT(*)\n FROM charactercreator_fighter\n\n UNION\n\n SELECT "thieves", COUNT(*)\n FROM charactercreator_thief;"""\n , connect)\n', (428, 787), True, 'import pandas as pd\n'), ((854, 949), 'pandas.read_sql_query', 'pd.read_sql_query', (['"""SELECT COUNT(distinct item_id)\n FROM armory_item;"""', 'connect'], {}), '(\n """SELECT COUNT(distinct item_id)\n FROM armory_item;""", connect)\n', (871, 949), True, 'import pandas as pd\n'), ((1021, 1126), 'pandas.read_sql_query', 'pd.read_sql_query', (['"""SELECT COUNT(distinct item_ptr_id)\n FROM armory_weapon;"""', 'connect'], {}), '(\n """SELECT COUNT(distinct item_ptr_id)\n FROM armory_weapon;""",\n connect)\n', (1038, 1126), True, 'import pandas as pd\n'), ((1216, 1321), 'pandas.read_sql_query', 'pd.read_sql_query', (['"""SELECT COUNT(distinct item_ptr_id)\n FROM armory_weapon;"""', 'connect'], {}), '(\n """SELECT COUNT(distinct item_ptr_id)\n FROM armory_weapon;""",\n connect)\n', (1233, 1321), True, 'import pandas as pd\n'), ((1324, 1452), 'pandas.read_sql_query', 'pd.read_sql_query', (['"""SELECT COUNT(distinct item_id)\n FROM armory_item\n WHERE item_id < 138;"""', 'connect'], {}), '(\n """SELECT COUNT(distinct item_id)\n FROM armory_item\n WHERE item_id < 138;"""\n , connect)\n', (1341, 1452), True, 'import pandas as pd\n'), ((2072, 2151), 'tabulate.tabulate', 'tabulate', (['total_items_character'], {'headers': "['ID', 'Character Name', 'Item Count']"}), "(total_items_character, headers=['ID', 'Character Name', 'Item Count'])\n", (2080, 2151), False, 'from tabulate import tabulate\n'), ((3036, 3123), 'tabulate.tabulate', 'tabulate', (['total_weapons_character'], {'headers': "['ID', 'Character Name', 'Weapon Count']"}), "(total_weapons_character, headers=['ID', 'Character Name',\n 'Weapon Count'])\n", (3044, 3123), False, 'from tabulate import tabulate\n')] |
# encoding=utf-8
from __future__ import print_function
import sys
PYTHON_VERSION = sys.version_info[:2]
if (2, 7) != PYTHON_VERSION < (3, 5):
print("This mycobot version requires Python2.7, 3.5 or later.")
sys.exit(1)
import setuptools
import textwrap
import pymycobot
try:
long_description = (
open("README.md", encoding="utf-8").read()
+ open("docs/README.md", encoding="utf-8").read()
)
except:
long_description = textwrap.dedent(
"""\
# This is Python API for myCobot
This is a python API for serial communication with mycobot and controlling it.
[](https://www.elephantrobotics.com/en/myCobot-en/)
## Installation
**Notes**:
> Make sure that `Atom` is flashed into the top Atom, `Transponder` is flashed into the base Basic. <br>
> The firmware `Atom` and `Transponder` download address: [https://github.com/elephantrobotics/myCobot/tree/main/Software](https://github.com/elephantrobotics/myCobot/tree/main/Software)<br>
> You also can use myStudio to flash them, myStudio address: [https://github.com/elephantrobotics/myStudio/releases](https://github.com/elephantrobotics/myStudio/releases)
### Pip
```bash
pip install pymycobot --upgrade
```
### Source code
```bash
git clone https://github.com/elephantrobotics/pymycobot.git <your-path>
cd <your-path>/pymycobot
# Install
[sudo] python2 setup.py install
# or
[sudo] python3 setup.py install
```
## Usage:
```python
from pymycobot import MyCobot, Angle, Coord
from pymycobot import PI_PORT, PI_BAUD # For raspberry pi version of mycobot.
```
The [`demo`](./demo) directory stores some test case files.
You can find out which interfaces pymycobot provides in `pymycobot/README.md`.
Please go to [here](./docs/README.md).
"""
)
setuptools.setup(
name="pymycobot",
version=pymycobot.__version__,
author=pymycobot.__author__,
author_email=pymycobot.__email__,
description="Python API for serial communication of MyCobot.",
long_description=long_description,
long_description_content_type="text/markdown",
url=pymycobot.__git_url__,
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=["pyserial"],
python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, != 3.4.*",
)
| [
"textwrap.dedent",
"setuptools.find_packages",
"sys.exit"
] | [((215, 226), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (223, 226), False, 'import sys\n'), ((456, 2004), 'textwrap.dedent', 'textwrap.dedent', (['""" # This is Python API for myCobot\n\n This is a python API for serial communication with mycobot and controlling it.\n\n [](https://www.elephantrobotics.com/en/myCobot-en/)\n\n ## Installation\n\n **Notes**:\n\n > Make sure that `Atom` is flashed into the top Atom, `Transponder` is flashed into the base Basic. <br>\n > The firmware `Atom` and `Transponder` download address: [https://github.com/elephantrobotics/myCobot/tree/main/Software](https://github.com/elephantrobotics/myCobot/tree/main/Software)<br>\n > You also can use myStudio to flash them, myStudio address: [https://github.com/elephantrobotics/myStudio/releases](https://github.com/elephantrobotics/myStudio/releases)\n\n ### Pip\n\n ```bash\n pip install pymycobot --upgrade\n ```\n\n ### Source code\n\n ```bash\n git clone https://github.com/elephantrobotics/pymycobot.git <your-path>\n cd <your-path>/pymycobot\n # Install\n [sudo] python2 setup.py install\n # or\n [sudo] python3 setup.py install\n ```\n\n ## Usage:\n\n ```python\n from pymycobot import MyCobot, Angle, Coord\n from pymycobot import PI_PORT, PI_BAUD # For raspberry pi version of mycobot.\n ```\n\n The [`demo`](./demo) directory stores some test case files.\n\n You can find out which interfaces pymycobot provides in `pymycobot/README.md`.\n\n Please go to [here](./docs/README.md).\n """'], {}), '(\n """ # This is Python API for myCobot\n\n This is a python API for serial communication with mycobot and controlling it.\n\n [](https://www.elephantrobotics.com/en/myCobot-en/)\n\n ## Installation\n\n **Notes**:\n\n > Make sure that `Atom` is flashed into the top Atom, `Transponder` is flashed into the base Basic. <br>\n > The firmware `Atom` and `Transponder` download address: [https://github.com/elephantrobotics/myCobot/tree/main/Software](https://github.com/elephantrobotics/myCobot/tree/main/Software)<br>\n > You also can use myStudio to flash them, myStudio address: [https://github.com/elephantrobotics/myStudio/releases](https://github.com/elephantrobotics/myStudio/releases)\n\n ### Pip\n\n ```bash\n pip install pymycobot --upgrade\n ```\n\n ### Source code\n\n ```bash\n git clone https://github.com/elephantrobotics/pymycobot.git <your-path>\n cd <your-path>/pymycobot\n # Install\n [sudo] python2 setup.py install\n # or\n [sudo] python3 setup.py install\n ```\n\n ## Usage:\n\n ```python\n from pymycobot import MyCobot, Angle, Coord\n from pymycobot import PI_PORT, PI_BAUD # For raspberry pi version of mycobot.\n ```\n\n The [`demo`](./demo) directory stores some test case files.\n\n You can find out which interfaces pymycobot provides in `pymycobot/README.md`.\n\n Please go to [here](./docs/README.md).\n """\n )\n', (471, 2004), False, 'import textwrap\n'), ((2359, 2385), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (2383, 2385), False, 'import setuptools\n')] |
import argparse
import pickle
from utils.hit_rate_utils import NewHitRateEvaluator
from utils.constants import EVAL_SPLITS_DICT
from lib.refer import REFER
def threshold_with_confidence(exp_to_proposals, conf):
results = {}
for exp_id, proposals in exp_to_proposals.items():
assert len(proposals) >= 1
sorted_proposals = sorted(proposals, key=lambda p: p['score'], reverse=True)
thresh_proposals = [sorted_proposals[0]]
for prop in sorted_proposals[1:]:
if prop['score'] > conf:
thresh_proposals.append(prop)
else:
break
results[exp_id] = thresh_proposals
return results
def main(args):
dataset_splitby = '{}_{}'.format(args.dataset, args.split_by)
eval_splits = EVAL_SPLITS_DICT[dataset_splitby]
# Load proposals
proposal_path = 'cache/proposals_{}_{}_{}.pkl'.format(args.m, args.dataset, args.tid)
print('loading {} proposals from {}...'.format(args.m, proposal_path))
with open(proposal_path, 'rb') as f:
proposal_dict = pickle.load(f)
# Load refer
refer = REFER('data/refer', dataset=args.dataset, splitBy=args.split_by)
# Evaluate hit rate
print('Hit rate on {}\n'.format(dataset_splitby))
evaluator = NewHitRateEvaluator(refer, top_N=None, threshold=args.thresh)
print('conf: {:.3f}'.format(args.conf))
for split in eval_splits:
exp_to_proposals = proposal_dict[split]
exp_to_proposals = threshold_with_confidence(exp_to_proposals, args.conf)
proposal_per_ref, hit_rate = evaluator.eval_hit_rate(split, exp_to_proposals)
print('[{:5s}] hit rate: {:.2f} @ {:.2f}'.format(split, hit_rate*100, proposal_per_ref))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--m', type=str, required=True)
parser.add_argument('--dataset', default='refcoco')
parser.add_argument('--split-by', default='unc')
parser.add_argument('--tid', type=str, required=True)
parser.add_argument('--thresh', type=float, default=0.5)
parser.add_argument('--conf', type=float, required=True)
main(parser.parse_args())
| [
"pickle.load",
"argparse.ArgumentParser",
"lib.refer.REFER",
"utils.hit_rate_utils.NewHitRateEvaluator"
] | [((1113, 1177), 'lib.refer.REFER', 'REFER', (['"""data/refer"""'], {'dataset': 'args.dataset', 'splitBy': 'args.split_by'}), "('data/refer', dataset=args.dataset, splitBy=args.split_by)\n", (1118, 1177), False, 'from lib.refer import REFER\n'), ((1272, 1333), 'utils.hit_rate_utils.NewHitRateEvaluator', 'NewHitRateEvaluator', (['refer'], {'top_N': 'None', 'threshold': 'args.thresh'}), '(refer, top_N=None, threshold=args.thresh)\n', (1291, 1333), False, 'from utils.hit_rate_utils import NewHitRateEvaluator\n'), ((1763, 1788), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1786, 1788), False, 'import argparse\n'), ((1069, 1083), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1080, 1083), False, 'import pickle\n')] |