text stringlengths 957 885k |
|---|
"""animacion de la simulacion."""
import pygame
import sys
import copy
from pygame.locals import QUIT
from animacion.gui import gui
pygame.init()
def intefaz():
"""Mostrar Datos."""
# tll = "{0:.3f}".format(5.1234554321)
g.panel()
g.caja()
def control_evento():
"""Manejo de eventos."""
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
pygame.display.update()
def crear_Usuario(pos):
"""Entra un usuario."""
ventana.blit(usuario, pos)
def crear_cola(ncola):
"""Crear cola."""
primer = [10, 400]
listpos = [primer]
for i in range(1, ncola):
pos = [listpos[i - 1][0] + 72, 400]
listpos.append(pos)
return listpos
def crear_caja(ncola):
"""Crear caja."""
primer = [[136, 70], 1]
listpos = [primer]
for i in range(1, ncola):
pos = [[listpos[i - 1][0][0] + 150, 70], 1]
listpos.append(pos)
return listpos
def atender_cliente(pos, ncaja):
"""Atender cliente."""
if pos[0] < caja[ncaja][0][0]:
pos[0] += velocidad
if pos[1] > caja[ncaja][0][1]:
pos[1] -= velocidad
if pos == caja[ncaja][0]:
return 1
return pos
def mostrar_cola():
"""Prueba de cola."""
for pos in cola:
crear_Usuario(pos)
def mostrar_caja():
"""Prueba de cola."""
for i, ocupado in enumerate(cajaOcupado):
if ocupado:
crear_Usuario(caja[i][0])
def sacar_cola():
"""Eliminar uno de la cola."""
cola.pop()
def salir_Caja():
"""Saliendo usuario."""
for i in range(len(cajaOcupado)):
if cajaOcupado[i]:
saliendo[i][1] = 1
for i in range(len(cajaOcupado)):
if saliendo[i][1]:
cajaOcupado[i] = 0
if saliendo[i][0][0] > salida[0]:
saliendo[i][0][0] -= velocidad
if saliendo[i][0][0] <= salida[0]:
saliendo[i][0] = copy.copy(caja[i][0])
saliendo[i][1] = 0
caja[i][1] = 1
crear_Usuario(saliendo[i][0])
def obtener_caja_disponible():
"""Obtener caja disponible."""
for i in range(len(caja)):
if caja[i][1]:
caja[i][1] = 0
return i
def mover_Usuario():
"""MoverUsuario."""
for i in range(len(movimientoCola)):
if movimientoCola[i][1]:
movimientoCola[i][0] = atender_cliente(movimientoCola[i][0], i)
if type(movimientoCola[i][0]) == int:
movimientoCola[i][1] = 0
cajaOcupado[i] = 1
movimientoCola[i][0] = [10, 400]
crear_Usuario(movimientoCola[i][0])
ventana = pygame.display.set_mode((1000, 483))
pygame.display.set_caption("Simulacion de banco")
fondo = pygame.image.load("p2.jpg").convert()
usuario = pygame.image.load("pspr.png")
g = gui(ventana)
cola = crear_cola(7)
caja = crear_caja(4)
posInicial = [10, 400]
salida = [10, 70]
cajaOcupado = [0 for _ in range(4)]
saliendo = [[copy.copy(caja[i][0]), 0] for i in range(4)]
movimientoCola = [[posInicial[:], 0] for _ in range(4)]
velocidad = 1
atiende = 1000
cont = 0
while True:
ventana.fill((255, 255, 255))
ventana.blit(fondo, (0, 0))
mostrar_caja()
mostrar_cola()
if cont > atiende:
cont = 0
sacar_cola()
i = obtener_caja_disponible()
print("mover a la caja", i)
movimientoCola[i][1] = 1
else:
salir_Caja()
cont += 1
mover_Usuario()
intefaz()
control_evento()
|
<filename>tests/st/ops/gpu/test_relu_v2.py
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
import mindspore.ops.operations._grad_ops as G
class ReluNet(nn.Cell):
def __init__(self):
super(ReluNet, self).__init__()
self.relu = P.ReLU()
self.relu_grad = G.ReluGrad()
def construct(self, x, dy):
y = self.relu(x)
dx = self.relu_grad(dy, y)
return y, dx
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_ReluV2():
context.set_context(mode=context.GRAPH_MODE, device_target="GPU", save_graphs=True)
x = Tensor(np.array([[[[-1, 1, 10],
[1, -1, 1],
[10, 1, -1]]]]).astype(np.float32))
dy = Tensor(np.array([[[[1, 0, 3],
[0, 1, 0],
[2, 1, 1]]]]).astype(np.float32))
expect_y = np.array([[[[0, 1, 10,],
[1, 0, 1,],
[10, 1, 0.]]]]).astype(np.float32)
expect_dx = np.array([[[[0, 0, 3],
[0, 0, 0],
[2, 1, 0]]]]).astype(np.float32)
net = ReluNet()
y, dx = net(Tensor(x), Tensor(dy))
assert np.allclose(y.asnumpy(), expect_y)
assert np.allclose(dx.asnumpy(), expect_dx)
class AddReluNet(nn.Cell):
def __init__(self):
super(AddReluNet, self).__init__()
self.add = P.Add()
self.relu = P.ReLU()
self.relu_grad = G.ReluGrad()
def construct(self, x1, x2, dy):
y = self.add(x1, x2)
y = self.relu(y)
dx = self.relu_grad(dy, y)
return y, dx
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_AddRelu():
context.set_context(mode=context.GRAPH_MODE, device_target="GPU", save_graphs=True)
x1 = Tensor(np.array([[[[-1, 1, 10],
[1, -1, 1],
[10, 1, -1]]]]).astype(np.float32))
x2 = Tensor(np.array([[[[-1, 1, 10],
[1, -1, 1],
[10, 1, -1]]]]).astype(np.float32))
dy = Tensor(np.array([[[[1, 0, 3],
[0, 1, 0],
[2, 1, 1]]]]).astype(np.float32))
expect_y = np.array([[[[0, 2, 20],
[2, 0, 2],
[20, 2, 0]]]]).astype(np.float32)
expect_dx = np.array([[[[0, 0, 3],
[0, 0, 0],
[2, 1, 0]]]]).astype(np.float32)
net = AddReluNet()
y, dx1 = net(Tensor(x1), Tensor(x2), Tensor(dy))
assert np.allclose(y.asnumpy(), expect_y)
assert np.allclose(dx1.asnumpy(), expect_dx)
class AddReluGradNet(nn.Cell):
def __init__(self):
super(AddReluGradNet, self).__init__()
self.add = P.Add()
self.relu = P.ReLU()
self.relu_grad = G.ReluGrad()
def construct(self, x, dy1, dy2):
y = self.relu(x)
dy = self.add(dy1, dy2)
dx = self.relu_grad(dy, y)
return y, dx
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_AddReluGrad():
context.set_context(mode=context.GRAPH_MODE, device_target="GPU", save_graphs=True)
x = Tensor(np.array([[[[-1, 1, 10],
[1, -1, 1],
[10, 1, -1]]]]).astype(np.float32))
dy1 = Tensor(np.array([[[[1, 0, 3],
[0, 1, 0],
[2, 1, 1]]]]).astype(np.float32))
dy2 = Tensor(np.array([[[[1, 0, 3],
[0, 1, 0],
[2, 1, 1]]]]).astype(np.float32))
expect_y = np.array([[[[0, 1, 10,],
[1, 0, 1,],
[10, 1, 0.]]]]).astype(np.float32)
expect_dx = np.array([[[[0, 0, 6],
[0, 0, 0],
[4, 2, 0]]]]).astype(np.float32)
net = AddReluGradNet()
y, dx1 = net(Tensor(x), Tensor(dy1), Tensor(dy2))
assert np.allclose(y.asnumpy(), expect_y)
assert np.allclose(dx1.asnumpy(), expect_dx)
|
<gh_stars>0
"""
Listing 6.1 Word-level one-hot encoding (toy example)
"""
import numpy as np
# Initial data: one entry persample (in this example, a sampe is a sentence but it could be an entire document)
samples = ['The cat sat on the mat.','The dog ate my homework.']
# Builds an index of all tokens in the data
token_index = {}
for sample in samples:
for word in sample.split(): # Tokenizes samples via the split method. In real life punctuation and special characters would be stripped
if word not in token_index:
token_index[word] = len(token_index) + 1 # Assign unique index to each unique word. Note that you don't attribute index 0 to anything.
# Vectorizes the samples. You'll only consider the first max_length words in each sample.
max_length = 10
# Store results
results = np.zeros(shape=(len(samples),max_length,max(token_index.values()) + 1))
for i,sample in enumerate(samples):
for j,word in list(enumerate(sample.split()))[:max_length]:
index = token_index.get(word)
results[i,j,index] = 1.
"""
Listing 6.2 Character-level one-hot encoding (toy example)
"""
import string
samples = ['The cat sat on the mat.','The dog ate my homework.']
characters = string.printable # All printable ASCII characters
token_index = dict(zip(range(1,len(characters) + 1),characters))
max_length = 50
results = np.zeros((len(samples),max_length,max(token_index.keys()) + 1))
for i,sample in enumerate(samples):
for j,character in enumerate(sample):
index = token_index.get(character)
results[i,j,index] = 1.
"""
Listing 6.3 Using Keras for word-level one-hot encoding
"""
from keras.preprocessing.text import Tokenizer
samples = ['The cat sat on the mat.','The dog ate my homework.']
tokenizer = Tokenizer(num_words=1000) # Creates a tokenizer, configured to only take into account the 1000 most common words
tokenizer.fit_on_texts(samples) # Builds the word index
sequences = tokenizer.texts_to_sequences(samples) #Turns strings into a lists of integer indices
# You could also directly get the ont-hot binary representations
# Vectorization modes other than one-hot encoding are supported by this tokenizer
one_hot_results = tokenizer.texts_to_matrix(samples,mode='binary')
word_index = tokenizer.word_index
print('Found %s unique tokens. ' % len(word_index))
"""
Listing 6.4 Word-level one-hot encoding with hashing trick (toy example)
"""
samples = ['The cat sat on the mat.','The dog ate my homework.']
# Stores the words as vectors of size 1000.
# If you have close to 1000 words (or more) you'll see many hash collisions,
# Which will decrease the accuracy of this encoding method.
dimensionality = 1000
max_length = 10
results = np.zeros((len(samples),max_length,dimensionality))
for i,sample in enumerate(samples):
for j,word in list(enumerate(sample.split()))[:max_length]:
index = abs(hash(word)) % dimensionality # Hashes the words into a random integer index between 0 and 1000
results[i,j,index] = 1.
"""
Listing 6.5 Instantiating an Embedding layer
"""
from keras.layers import Embedding
# The Embedding layer takes at least two arguments:
# the number of possible tokens (here,1000: 1 + maximum word index)
# and the dimensionality of the embeddings (here,64).
embedding_layer = Embedding(1000,64)
"""
Listing 6.6 Loading the IMDB data for use in an Embedding layer
"""
from keras.datasets import imdb
from keras import preprocessing
max_features = 10000 # Number of words to consider as features
maxlen = 20 #Cuts off the text after this number of words (among the max_features most common words)
# Load the data as a list of integers
(x_train,y_train),(x_test,y_test) = imdb.load_data(num_words=max_features)
# Turns the list into a 2D integer tensor of shape (samples,maxlen)
x_train = preprocessing.sequence.pad_sequences(x_train,maxlen=maxlen)
x_test = preprocessing.sequence.pad_sequences(x_test,maxlen=maxlen)
"""
Listing 6.7 Using an Embedding layer and classifier on the IMDB data
"""
from keras.models import Sequential
from keras.layers import Flatten, Dense
model = Sequential()
# Specified the maximum input length to the Embedding layer so you can later flatten the embedded inputs.
# After the Embedding layer, the activations have shape (samples,maxlen,8)
model.add(Embedding(10000,8,input_length=maxlen))
# Flattens the 3D tensor of embeddings into a 2D tensor of shape (samples,maxlen * 8)
model.add(Flatten())
model.add(Dense(1,activation='sigmoid')) # Adds the classifier on top
model.compile(optimizer='rmsprop',loss='binary_crossentropy',metrics=['acc'])
model.summary()
history = model.fit(x_train,y_train,
epochs=10,
batch_size=32,
validation_split=0.2)
"""
Listing 6.8 Processing the labels of the raw IMDB data (Downloaded from http://mng.bz/0tIo)
"""
import os
imdb_dir = '/home/lqdev/Downloads/aclImdb'
train_dir = os.path.join(imdb_dir,'train')
labels = []
texts = []
for label_type in ['neg','pos']:
dir_name = os.path.join(train_dir,label_type)
for fname in os.listdir(dir_name):
if fname[-4:] == '.txt':
f = open(os.path.join(dir_name,fname))
texts.append(f.read())
f.close()
if label_type == 'neg':
labels.append(0)
else:
labels.append(1)
"""
Listing 6.9 Tokenizing the text of the raw IMDB data
"""
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import numpy as np
maxlen = 100 # Cuts off reviews after 100 words
training_samples = 200 # Trains on 200 samples
validation_samples = 10000 # Validates on 10000 samples
max_words = 10000 # Consider onyl the top 10000 words in the dataset
tokenizer = Tokenizer(num_words=max_words)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
print("Found %s unique tokens" % len(word_index))
data = pad_sequences(sequences,maxlen=maxlen)
labels = np.asarray(labels)
print('Shape of data tensor: ', data.shape)
print('Shape of label tensor: ', labels.shape)
# Splits the data into a training set and a validation set,
# but first shuffles the data, because you're starting with data
# in which samples are ordered (all negative first, then all positive)
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
x_train = data[:training_samples]
y_train = labels[:training_samples]
x_val = data[training_samples:training_samples + validation_samples]
y_val = labels[training_samples:training_samples + validation_samples]
"""
Listing 6.10 Parsing the GloVe word-embeddings file
"""
glove_dir = '/home/lqdev/Downloads/glove.6B'
embeddings_index = {}
f = open(os.path.join(glove_dir,'glove.6B.100d.txt'))
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:],dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Found %s word vectors.' % len(embeddings_index))
"""
Listing 6.11 Preparing the GloVe word-embeddings matrix
"""
embedding_dim = 100
embedding_matrix = np.zeros((max_words,embedding_dim))
for word,i in word_index.items():
if i < max_words:
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector # Words not found in the embedding index will be all zeros
"""
Listing 6.12 Model definition
"""
from keras.models import Sequential
from keras.layers import Embedding,Flatten,Dense
model = Sequential()
model.add(Embedding(max_words,embedding_dim,input_length=maxlen))
model.add(Flatten())
model.add(Dense(32,activation='relu'))
model.add(Dense(1,activation='sigmoid'))
model.summary()
"""
Listing 6.13 Loading pretrained word embeddings into the Embedding layer
"""
model.layers[0].set_weights([embedding_matrix])
model.layers[0].trainable=False
"""
Listing 6.14 Training and evaluation
"""
model.compile(optimizer='rmsprop',loss='binary_crossentropy',metrics=['acc'])
history = model.fit(x_train,y_train,
epochs=10,
batch_size=32,
validation_data=(x_val,y_val))
model.save_weights('pre_trained_glove_model.h5')
"""
Listing 6.15 Plotting the results
"""
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
"""
Listing 6.16 Training the same model without pretrained word embeddings
"""
from keras.models import Sequential
from keras.layers import Embedding,Flatten,Dense
model = Sequential()
model.add(Embedding(max_words,embedding_dim,input_length=maxlen))
model.add(Flatten())
model.add(Dense(32,activation='relu'))
model.add(Dense(1,activation='sigmoid'))
model.summary()
model.compile(optimizer='rmsprop',loss='binary_crossentropy',metrics=['acc'])
history = model.fit(x_train,y_train,
epochs=10,
batch_size=32,
validation_data=(x_val,y_val))
"""
Listing 6.17 Tokenizing the data of the test set
"""
test_dir = os.path.join(imdb_dir,'test')
labels = []
texts = []
for label_type in ['neg', 'pos']:
dir_name = os.path.join(test_dir, label_type)
for fname in sorted(os.listdir(dir_name)):
if fname[-4:] == '.txt':
f = open(os.path.join(dir_name, fname))
texts.append(f.read())
f.close()
if label_type == 'neg':
labels.append(0)
else:
labels.append(1)
sequences = tokenizer.texts_to_sequences(texts)
x_test = pad_sequences(sequences, maxlen=maxlen)
y_test = np.asarray(labels)
"""
Listing 6.18 Evaluating the model on the test set
"""
model.load_weights('pre_trained_glove_model.h5')
model.evaluate(x_test, y_test)
"""
Listing 6.19 Pseudocode RNN
"""
# state_t = 0 # The state at t
# for input_t in input_sequence: # Iterates over sequence elements
# output_t = f(input_t,state_t)
# state_t = output_t
"""
Listing 6.20 More detailed pseudocode for the RNN
"""
# state_t = 0
# for input_t in input_sequence:
# output_t = activation(dot(W,input_t) + dot(U,state_t) + b)
# state_t = output_t
"""
Listing 6.21 Numpy implementation of a simple RNN
"""
import numpy as np
timesteps = 1000 # Number of steps in the input sequence
input_features = 32 # Dimensionality of the input feature space
output_features = 64 # Dimensionality of the output feature space
inputs = np.random.random((timesteps,input_features)) # Input data: random noise for the sake of the example
state_t = np.zeros((output_features,)) # Initiali state: an all-zero vector
# Create random weight matrices
W = np.random.random((output_features,input_features))
U = np.random.random((output_features,output_features))
b = np.random.random((output_features,))
successive_outputs = []
for input_t in inputs: # input_t is a vector of shape (input_featurs,)
output_t = np.tanh(np.dot(W,input_t) + np.dot(U,state_t) + b) # Combines the input with the current state (the previous output) to obtain the current output
successive_outputs.append(output_t) # Stores this output in a list
state_t = output_t # Updates the state of the network for the next timestep
final_output_sequence = np.concatenate(successive_outputs,axis=0) # The final output is a 2D tensor of shape (timesteps,output_features)
"""
Listing 6.22 Preparing the IMDB data
"""
from keras.datasets import imdb
from keras.preprocessing import sequence
max_features = 10000 # Number of words to consider as features
maxlen = 500 # Cuts off texts after this many words (among the max_features of most common words)
batch_size = 32
print('Loading data...')
(input_train,y_train),(input_test,y_test) = imdb.load_data(num_words=max_features)
print(len(input_train),'train sequences')
print(len(input_test),'test sequences')
print('Pad sequences (samples x time)')
input_train = sequence.pad_sequences(input_train,maxlen=maxlen)
input_test = sequence.pad_sequences(input_test,maxlen=maxlen)
print('input_train shape:',input_train.shape)
print('input test shape:',input_test.shape)
"""
Listing 6.23 Training the model with Embedding and SimpleRNN layers
"""
from keras.models import Sequential
from keras.layers import Dense, Embedding,SimpleRNN
model = Sequential()
model.add(Embedding(max_features,32))
model.add(SimpleRNN(32))
model.add(Dense(1,activation='sigmoid'))
model.compile(optimizer='rmsprop',loss='binary_crossentropy',metrics=['acc'])
history = model.fit(input_train,y_train,
epochs=10,
batch_size=128,
validation_split=0.2)
"""
Listing 6.24 Plotting results
"""
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1,len(acc) + 1)
plt.plot(epochs,acc,'bo',label='Training acc')
plt.plot(epochs,val_acc,'b',label='Validation acc')
plt.title('Training and validationa accuracy')
plt.legend()
plt.figure()
plt.plot(epochs,loss,'bo',label='Training loss')
plt.plot(epochs,val_loss,'b',label='Validation loss')
plt.title('Training and validationa loss')
plt.legend()
plt.show()
"""
Listing 6.25 Pseudocode details of the LSTM architecture (1/2)
"""
# output_t = activation(dot(state_t,Uo) = dot(input_t,Wo) + dot(C_t,Vo) + bo)
# i_t = activation(dot(state_t,Ui) + dot(input_t,Wi) + bi)
# f_t = activation(dot(state_t,Uf) + dot(input_t,Wf) + bf)
# k_t = activation(dot(state_t,Uk) + dot(input_t,Wk) + bk)
"""
Listing 6.26 Pseudocode details of the LSTM architecture (2/2)
"""
# c_t+1 = i_t * k+t + c_t * f_t
"""
Listing 6.27 Using the LSTM layer in Keras
"""
from keras.layers import LSTM
model = Sequential()
model.add(Embedding(max_features,32))
model.add(LSTM(32))
model.add(Dense(1,activation='sigmoid'))
model.compile(optimizer='rmsprop',loss='binary_crossentropy',metrics=['acc'])
history = model.fit(input_train,y_train,epochs=10,batch_size=128,validation_split=0.2)
# Added plotting logic
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1,len(acc) + 1)
plt.plot(epochs,acc,'bo',label='Training acc')
plt.plot(epochs,val_acc,'b',label='Validation acc')
plt.title('Training and validationa accuracy')
plt.legend()
plt.figure()
plt.plot(epochs,loss,'bo',label='Training loss')
plt.plot(epochs,val_loss,'b',label='Validation loss')
plt.title('Training and validationa loss')
plt.legend()
plt.show()
"""
Listing 6.28 Inspecfing the data of the Jena weather dataset
"""
import os
data_dir = "/home/lqdev/Downloads/jena_climate"
fname = os.path.join(data_dir,'jena_climate_2009_2016.csv')
f = open(fname)
data = f.read()
f.close()
lines = data.split('\n')
header = lines[0].split(',')
lines = lines[1:]
print(header)
print(len(lines))
"""
Listing 6.29 Parsing the data
"""
import numpy as np
float_data = np.zeros((len(lines),len(header)-1))
for i,line in enumerate(lines):
values = [float(x) for x in line.split(',')[1:]]
float_data[i,:] = values
"""
6.30 Plotting the temperature timeseries
"""
import matplotlib.pyplot as plt
temp = float_data[:,1]
plt.plot(range(len(temp)),temp)
plt.show()
"""
Listing 6.31 PLotting the first 10 days of the temperature timeseries
"""
plt.plot(range(1440),temp[:1440])
plt.show()
"""
Listing 6.32 Normalizing the data
"""
mean = float_data[:200000].mean(axis=0)
float_data -= mean
std = float_data[:200000].std(axis=0)
float_data /= std
"""
Listing 6.33 Generator yielding timeseries samples and their targets
"""
def generator(data,lookback,delay,min_index,max_index,shuffle=False,batch_size=128,step=6):
if max_index is None:
max_index = len(data) - delay - 1
i = min_index + lookback
while 1:
if shuffle:
rows = np.random.randint(min_index + lookback,max_index,size=batch_size)
else:
if i + batch_size >= max_index:
i = min_index + lookback
rows = np.arange(i,min(i + batch_size,max_index))
i += len(rows)
samples = np.zeros((len(rows),lookback // step,data.shape[-1]))
targets = np.zeros((len(rows),))
for j,row in enumerate(rows):
indices = range(rows[j] - lookback,rows[j],step)
samples[j] = data[indices]
targets[j] = data[rows[j] + delay][1]
yield samples,targets
"""
Listing 6.34 Preparing the training, validation and test generators
"""
lookback = 1440
step = 6
delay = 144
batch_size = 128
train_gen = generator(float_data,
lookback=lookback,
delay=delay,
min_index=0,
max_index=200000,
shuffle=True,
step=step,
batch_size=batch_size)
val_gen = generator(float_data,
lookback=lookback,
delay=delay,
min_index=200001,
max_index=300000,
shuffle=True,
step=step,
batch_size=batch_size)
test_gen = generator(float_data,
lookback=lookback,
delay=delay,
min_index=300001,
max_index=None,
shuffle=True,
step=step,
batch_size=batch_size)
val_steps = (300000 - 200001 - lookback) # How many steps to draw from val_gen in order to see the entire validation set
test_steps = (len(float_data) - 300001 - lookback) # How many steps to draw from test_gen in order to see the entire test set
"""
Listing 6.35 Computing the common-sense baseline MAE
"""
def evaluate_naive_method():
batch_maes = []
for step in range(val_steps):
print("On step ",step)
samples,targets = next(val_gen)
print(step)
preds = samples[:,-1,1]
mae = np.mean(np.abs(preds-targets))
batch_maes.append(mae)
print(np.mean(batch_maes))
evaluate_naive_method()
"""
Listing 6.36 Converting the MAE back to Celcious error
"""
celsius_mae = 0.29 * std[1]
"""
Listing 6.37 Training and evaluating a densely connected model
"""
from keras.models import Sequential
from keras import layers
from keras.optimizers import RMSprop
model = Sequential()
model.add(layers.Flatten(input_shape=(lookback // step, float_data.shape[-1])))
model.add(layers.Dense(32, activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer=RMSprop(),loss='mae')
history = model.fit_generator(train_gen,
steps_per_epoch=500,
epochs=20,
validation_data=val_gen,
validation_steps=val_steps)
|
"""Switch for the Adaptive Lighting integration."""
from __future__ import annotations
import asyncio
import bisect
from collections import defaultdict
from copy import deepcopy
from dataclasses import dataclass
import datetime
from datetime import timedelta
import functools
import hashlib
import logging
import math
from typing import Any, Dict, List, Optional, Tuple, Union
import astral
import voluptuous as vol
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_BRIGHTNESS_PCT,
ATTR_BRIGHTNESS_STEP,
ATTR_BRIGHTNESS_STEP_PCT,
ATTR_COLOR_NAME,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
ATTR_KELVIN,
ATTR_RGB_COLOR,
ATTR_TRANSITION,
ATTR_WHITE_VALUE,
ATTR_XY_COLOR,
DOMAIN as LIGHT_DOMAIN,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_TRANSITION,
SUPPORT_WHITE_VALUE,
VALID_TRANSITION,
is_on,
COLOR_MODE_RGB,
COLOR_MODE_RGBW,
COLOR_MODE_HS,
COLOR_MODE_XY,
COLOR_MODE_COLOR_TEMP,
COLOR_MODE_BRIGHTNESS,
ATTR_SUPPORTED_COLOR_MODES,
)
from homeassistant.components.switch import DOMAIN as SWITCH_DOMAIN, SwitchEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_DOMAIN,
ATTR_ENTITY_ID,
ATTR_SERVICE,
ATTR_SERVICE_DATA,
ATTR_SUPPORTED_FEATURES,
CONF_NAME,
EVENT_CALL_SERVICE,
EVENT_HOMEASSISTANT_STARTED,
EVENT_STATE_CHANGED,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
SUN_EVENT_SUNRISE,
SUN_EVENT_SUNSET,
)
from homeassistant.core import (
Context,
Event,
HomeAssistant,
ServiceCall,
State,
callback,
)
from homeassistant.helpers import entity_platform
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import (
async_track_state_change_event,
async_track_time_interval,
)
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.helpers.sun import get_astral_location
from homeassistant.util import slugify
from homeassistant.util.color import (
color_RGB_to_xy,
color_temperature_kelvin_to_mired,
color_temperature_to_rgb,
color_xy_to_hs,
)
import homeassistant.util.dt as dt_util
from .const import (
ADAPT_BRIGHTNESS_SWITCH,
ADAPT_COLOR_SWITCH,
ATTR_ADAPT_BRIGHTNESS,
ATTR_ADAPT_COLOR,
ATTR_TURN_ON_OFF_LISTENER,
CONF_DETECT_NON_HA_CHANGES,
CONF_INITIAL_TRANSITION,
CONF_INTERVAL,
CONF_LIGHTS,
CONF_MANUAL_CONTROL,
CONF_MAX_BRIGHTNESS,
CONF_MAX_COLOR_TEMP,
CONF_MIN_BRIGHTNESS,
CONF_MIN_COLOR_TEMP,
CONF_ONLY_ONCE,
CONF_PREFER_RGB_COLOR,
CONF_SEPARATE_TURN_ON_COMMANDS,
CONF_SLEEP_BRIGHTNESS,
CONF_SLEEP_COLOR_TEMP,
CONF_SUNRISE_OFFSET,
CONF_SUNRISE_TIME,
CONF_SUNSET_OFFSET,
CONF_SUNSET_TIME,
CONF_TAKE_OVER_CONTROL,
CONF_TRANSITION,
CONF_TURN_ON_LIGHTS,
DOMAIN,
EXTRA_VALIDATION,
ICON,
SERVICE_APPLY,
SERVICE_SET_MANUAL_CONTROL,
SLEEP_MODE_SWITCH,
SUN_EVENT_MIDNIGHT,
SUN_EVENT_NOON,
TURNING_OFF_DELAY,
VALIDATION_TUPLES,
replace_none_str,
)
_SUPPORT_OPTS = {
"brightness": SUPPORT_BRIGHTNESS,
"white_value": SUPPORT_WHITE_VALUE,
"color_temp": SUPPORT_COLOR_TEMP,
"color": SUPPORT_COLOR,
"transition": SUPPORT_TRANSITION,
}
_ORDER = (SUN_EVENT_SUNRISE, SUN_EVENT_NOON, SUN_EVENT_SUNSET, SUN_EVENT_MIDNIGHT)
_ALLOWED_ORDERS = {_ORDER[i:] + _ORDER[:i] for i in range(len(_ORDER))}
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=10)
# Consider it a significant change when attribute changes more than
BRIGHTNESS_CHANGE = 25 # ≈10% of total range
COLOR_TEMP_CHANGE = 20 # ≈5% of total range
RGB_REDMEAN_CHANGE = 80 # ≈10% of total range
COLOR_ATTRS = { # Should ATTR_PROFILE be in here?
ATTR_COLOR_NAME,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
ATTR_KELVIN,
ATTR_RGB_COLOR,
ATTR_XY_COLOR,
}
BRIGHTNESS_ATTRS = {
ATTR_BRIGHTNESS,
ATTR_WHITE_VALUE,
ATTR_BRIGHTNESS_PCT,
ATTR_BRIGHTNESS_STEP,
ATTR_BRIGHTNESS_STEP_PCT,
}
# Keep a short domain version for the context instances (which can only be 36 chars)
_DOMAIN_SHORT = "adapt_lgt"
def _short_hash(string: str, length: int = 4) -> str:
"""Create a hash of 'string' with length 'length'."""
return hashlib.sha1(string.encode("UTF-8")).hexdigest()[:length]
def create_context(name: str, which: str, index: int) -> Context:
"""Create a context that can identify this integration."""
# Use a hash for the name because otherwise the context might become
# too long (max len == 36) to fit in the database.
name_hash = _short_hash(name)
return Context(id=f"{_DOMAIN_SHORT}_{name_hash}_{which}_{index}")
def is_our_context(context: Optional[Context]) -> bool:
"""Check whether this integration created 'context'."""
if context is None:
return False
return context.id.startswith(_DOMAIN_SHORT)
def _split_service_data(service_data, adapt_brightness, adapt_color):
"""Split service_data into two dictionaries (for color and brightness)."""
transition = service_data.get(ATTR_TRANSITION)
if transition is not None:
# Split the transition over both commands
service_data[ATTR_TRANSITION] /= 2
service_datas = []
if adapt_color:
service_data_color = service_data.copy()
service_data_color.pop(ATTR_WHITE_VALUE, None)
service_data_color.pop(ATTR_BRIGHTNESS, None)
service_datas.append(service_data_color)
if adapt_brightness:
service_data_brightness = service_data.copy()
service_data_brightness.pop(ATTR_RGB_COLOR, None)
service_data_brightness.pop(ATTR_COLOR_TEMP, None)
service_datas.append(service_data_brightness)
return service_datas
async def handle_apply(switch: AdaptiveSwitch, service_call: ServiceCall):
"""Handle the entity service apply."""
hass = switch.hass
data = service_call.data
all_lights = _expand_light_groups(hass, data[CONF_LIGHTS])
switch.turn_on_off_listener.lights.update(all_lights)
for light in all_lights:
if data[CONF_TURN_ON_LIGHTS] or is_on(hass, light):
await switch._adapt_light( # pylint: disable=protected-access
light,
data[CONF_TRANSITION],
data[ATTR_ADAPT_BRIGHTNESS],
data[ATTR_ADAPT_COLOR],
data[CONF_PREFER_RGB_COLOR],
force=True,
)
async def handle_set_manual_control(switch: AdaptiveSwitch, service_call: ServiceCall):
"""Set or unset lights as 'manually controlled'."""
lights = service_call.data[CONF_LIGHTS]
if not lights:
all_lights = switch._lights # pylint: disable=protected-access
else:
all_lights = _expand_light_groups(switch.hass, lights)
_LOGGER.debug(
"Called 'adaptive_lighting.set_manual_control' service with '%s'",
service_call.data,
)
if service_call.data[CONF_MANUAL_CONTROL]:
for light in all_lights:
switch.turn_on_off_listener.manual_control[light] = True
_fire_manual_control_event(switch, light, service_call.context)
else:
switch.turn_on_off_listener.reset(*all_lights)
# pylint: disable=protected-access
if switch.is_on:
await switch._update_attrs_and_maybe_adapt_lights(
all_lights,
transition=switch._initial_transition,
force=True,
context=switch.create_context("service"),
)
@callback
def _fire_manual_control_event(
switch: AdaptiveSwitch, light: str, context: Context, is_async=True
):
"""Fire an event that 'light' is marked as manual_control."""
hass = switch.hass
fire = hass.bus.async_fire if is_async else hass.bus.fire
_LOGGER.debug(
"'adaptive_lighting.manual_control' event fired for %s for light %s",
switch.entity_id,
light,
)
fire(
f"{DOMAIN}.manual_control",
{ATTR_ENTITY_ID: light, SWITCH_DOMAIN: switch.entity_id},
context=context,
)
async def async_setup_entry(
hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: bool
):
"""Set up the AdaptiveLighting switch."""
data = hass.data[DOMAIN]
assert config_entry.entry_id in data
if ATTR_TURN_ON_OFF_LISTENER not in data:
data[ATTR_TURN_ON_OFF_LISTENER] = TurnOnOffListener(hass)
turn_on_off_listener = data[ATTR_TURN_ON_OFF_LISTENER]
sleep_mode_switch = SimpleSwitch("Sleep Mode", False, hass, config_entry)
adapt_color_switch = SimpleSwitch("Adapt Color", True, hass, config_entry)
adapt_brightness_switch = SimpleSwitch("Adapt Brightness", True, hass, config_entry)
switch = AdaptiveSwitch(
hass,
config_entry,
turn_on_off_listener,
sleep_mode_switch,
adapt_color_switch,
adapt_brightness_switch,
)
data[config_entry.entry_id][SLEEP_MODE_SWITCH] = sleep_mode_switch
data[config_entry.entry_id][ADAPT_COLOR_SWITCH] = adapt_color_switch
data[config_entry.entry_id][ADAPT_BRIGHTNESS_SWITCH] = adapt_brightness_switch
data[config_entry.entry_id][SWITCH_DOMAIN] = switch
async_add_entities(
[switch, sleep_mode_switch, adapt_color_switch, adapt_brightness_switch],
update_before_add=True,
)
# Register `apply` service
platform = entity_platform.current_platform.get()
platform.async_register_entity_service(
SERVICE_APPLY,
{
vol.Required(CONF_LIGHTS): cv.entity_ids,
vol.Optional(
CONF_TRANSITION,
default=switch._initial_transition, # pylint: disable=protected-access
): VALID_TRANSITION,
vol.Optional(ATTR_ADAPT_BRIGHTNESS, default=True): cv.boolean,
vol.Optional(ATTR_ADAPT_COLOR, default=True): cv.boolean,
vol.Optional(CONF_PREFER_RGB_COLOR, default=False): cv.boolean,
vol.Optional(CONF_TURN_ON_LIGHTS, default=False): cv.boolean,
},
handle_apply,
)
platform.async_register_entity_service(
SERVICE_SET_MANUAL_CONTROL,
{
vol.Optional(CONF_LIGHTS, default=[]): cv.entity_ids,
vol.Optional(CONF_MANUAL_CONTROL, default=True): cv.boolean,
},
handle_set_manual_control,
)
def validate(config_entry: ConfigEntry):
"""Get the options and data from the config_entry and add defaults."""
defaults = {key: default for key, default, _ in VALIDATION_TUPLES}
data = deepcopy(defaults)
data.update(config_entry.options) # come from options flow
data.update(config_entry.data) # all yaml settings come from data
data = {key: replace_none_str(value) for key, value in data.items()}
for key, (validate_value, _) in EXTRA_VALIDATION.items():
value = data.get(key)
if value is not None:
data[key] = validate_value(value) # Fix the types of the inputs
return data
def match_switch_state_event(event: Event, from_or_to_state: List[str]):
"""Match state event when either 'from_state' or 'to_state' matches."""
old_state = event.data.get("old_state")
from_state_match = old_state is not None and old_state.state in from_or_to_state
new_state = event.data.get("new_state")
to_state_match = new_state is not None and new_state.state in from_or_to_state
match = from_state_match or to_state_match
return match
def _expand_light_groups(hass: HomeAssistant, lights: List[str]) -> List[str]:
all_lights = set()
turn_on_off_listener = hass.data[DOMAIN][ATTR_TURN_ON_OFF_LISTENER]
for light in lights:
state = hass.states.get(light)
if state is None:
_LOGGER.debug("State of %s is None", light)
all_lights.add(light)
elif "entity_id" in state.attributes: # it's a light group
group = state.attributes["entity_id"]
turn_on_off_listener.lights.discard(light)
all_lights.update(group)
_LOGGER.debug("Expanded %s to %s", light, group)
else:
all_lights.add(light)
return list(all_lights)
def _supported_features(hass: HomeAssistant, light: str):
state = hass.states.get(light)
supported_features = state.attributes[ATTR_SUPPORTED_FEATURES]
supported = {
key for key, value in _SUPPORT_OPTS.items() if supported_features & value
}
supported_color_modes = state.attributes.get(ATTR_SUPPORTED_COLOR_MODES, set())
if COLOR_MODE_RGB in supported_color_modes:
supported.add("color")
# Adding brightness here, see
# comment https://github.com/basnijholt/adaptive-lighting/issues/112#issuecomment-836944011
supported.add("brightness")
if COLOR_MODE_RGBW in supported_color_modes:
supported.add("color")
supported.add("brightness") # see above url
if COLOR_MODE_XY in supported_color_modes:
supported.add("color")
supported.add("brightness") # see above url
if COLOR_MODE_HS in supported_color_modes:
supported.add("color")
supported.add("brightness") # see above url
if COLOR_MODE_COLOR_TEMP in supported_color_modes:
supported.add("color_temp")
supported.add("brightness") # see above url
if COLOR_MODE_BRIGHTNESS in supported_color_modes:
supported.add("brightness")
return supported
def color_difference_redmean(
rgb1: Tuple[float, float, float], rgb2: Tuple[float, float, float]
) -> float:
"""Distance between colors in RGB space (redmean metric).
The maximal distance between (255, 255, 255) and (0, 0, 0) ≈ 765.
Sources:
- https://en.wikipedia.org/wiki/Color_difference#Euclidean
- https://www.compuphase.com/cmetric.htm
"""
r_hat = (rgb1[0] + rgb2[0]) / 2
delta_r, delta_g, delta_b = [(col1 - col2) for col1, col2 in zip(rgb1, rgb2)]
red_term = (2 + r_hat / 256) * delta_r ** 2
green_term = 4 * delta_g ** 2
blue_term = (2 + (255 - r_hat) / 256) * delta_b ** 2
return math.sqrt(red_term + green_term + blue_term)
def _attributes_have_changed(
light: str,
old_attributes: Dict[str, Any],
new_attributes: Dict[str, Any],
adapt_brightness: bool,
adapt_color: bool,
context: Context,
) -> bool:
if (
adapt_brightness
and ATTR_BRIGHTNESS in old_attributes
and ATTR_BRIGHTNESS in new_attributes
):
last_brightness = old_attributes[ATTR_BRIGHTNESS]
current_brightness = new_attributes[ATTR_BRIGHTNESS]
if abs(current_brightness - last_brightness) > BRIGHTNESS_CHANGE:
_LOGGER.debug(
"Brightness of '%s' significantly changed from %s to %s with"
" context.id='%s'",
light,
last_brightness,
current_brightness,
context.id,
)
return True
if (
adapt_brightness
and ATTR_WHITE_VALUE in old_attributes
and ATTR_WHITE_VALUE in new_attributes
):
last_white_value = old_attributes[ATTR_WHITE_VALUE]
current_white_value = new_attributes[ATTR_WHITE_VALUE]
if abs(current_white_value - last_white_value) > BRIGHTNESS_CHANGE:
_LOGGER.debug(
"White Value of '%s' significantly changed from %s to %s with"
" context.id='%s'",
light,
last_white_value,
current_white_value,
context.id,
)
return True
if (
adapt_color
and ATTR_COLOR_TEMP in old_attributes
and ATTR_COLOR_TEMP in new_attributes
):
last_color_temp = old_attributes[ATTR_COLOR_TEMP]
current_color_temp = new_attributes[ATTR_COLOR_TEMP]
if abs(current_color_temp - last_color_temp) > COLOR_TEMP_CHANGE:
_LOGGER.debug(
"Color temperature of '%s' significantly changed from %s to %s with"
" context.id='%s'",
light,
last_color_temp,
current_color_temp,
context.id,
)
return True
if (
adapt_color
and ATTR_RGB_COLOR in old_attributes
and ATTR_RGB_COLOR in new_attributes
):
last_rgb_color = old_attributes[ATTR_RGB_COLOR]
current_rgb_color = new_attributes[ATTR_RGB_COLOR]
redmean_change = color_difference_redmean(last_rgb_color, current_rgb_color)
if redmean_change > RGB_REDMEAN_CHANGE:
_LOGGER.debug(
"color RGB of '%s' significantly changed from %s to %s with"
" context.id='%s'",
light,
last_rgb_color,
current_rgb_color,
context.id,
)
return True
switched_color_temp = (
ATTR_RGB_COLOR in old_attributes and ATTR_RGB_COLOR not in new_attributes
)
switched_to_rgb_color = (
ATTR_COLOR_TEMP in old_attributes and ATTR_COLOR_TEMP not in new_attributes
)
if switched_color_temp or switched_to_rgb_color:
# Light switched from RGB mode to color_temp or visa versa
_LOGGER.debug(
"'%s' switched from RGB mode to color_temp or visa versa",
light,
)
return True
return False
class AdaptiveSwitch(SwitchEntity, RestoreEntity):
"""Representation of a Adaptive Lighting switch."""
def __init__(
self,
hass,
config_entry: ConfigEntry,
turn_on_off_listener: TurnOnOffListener,
sleep_mode_switch: SimpleSwitch,
adapt_color_switch: SimpleSwitch,
adapt_brightness_switch: SimpleSwitch,
):
"""Initialize the Adaptive Lighting switch."""
self.hass = hass
self.turn_on_off_listener = turn_on_off_listener
self.sleep_mode_switch = sleep_mode_switch
self.adapt_color_switch = adapt_color_switch
self.adapt_brightness_switch = adapt_brightness_switch
data = validate(config_entry)
self._name = data[CONF_NAME]
self._lights = data[CONF_LIGHTS]
self._detect_non_ha_changes = data[CONF_DETECT_NON_HA_CHANGES]
self._initial_transition = data[CONF_INITIAL_TRANSITION]
self._interval = data[CONF_INTERVAL]
self._only_once = data[CONF_ONLY_ONCE]
self._prefer_rgb_color = data[CONF_PREFER_RGB_COLOR]
self._separate_turn_on_commands = data[CONF_SEPARATE_TURN_ON_COMMANDS]
self._take_over_control = data[CONF_TAKE_OVER_CONTROL]
self._transition = min(
data[CONF_TRANSITION], self._interval.total_seconds() // 2
)
_loc = get_astral_location(self.hass)
if isinstance(_loc, tuple):
# Astral v2.2
location, _ = _loc
else:
# Astral v1
location = _loc
self._sun_light_settings = SunLightSettings(
name=self._name,
astral_location=location,
max_brightness=data[CONF_MAX_BRIGHTNESS],
max_color_temp=data[CONF_MAX_COLOR_TEMP],
min_brightness=data[CONF_MIN_BRIGHTNESS],
min_color_temp=data[CONF_MIN_COLOR_TEMP],
sleep_brightness=data[CONF_SLEEP_BRIGHTNESS],
sleep_color_temp=data[CONF_SLEEP_COLOR_TEMP],
sunrise_offset=data[CONF_SUNRISE_OFFSET],
sunrise_time=data[CONF_SUNRISE_TIME],
sunset_offset=data[CONF_SUNSET_OFFSET],
sunset_time=data[CONF_SUNSET_TIME],
time_zone=self.hass.config.time_zone,
)
# Set other attributes
self._icon = ICON
self._state = None
# Tracks 'off' → 'on' state changes
self._on_to_off_event: Dict[str, Event] = {}
# Tracks 'on' → 'off' state changes
self._off_to_on_event: Dict[str, Event] = {}
# Locks that prevent light adjusting when waiting for a light to 'turn_off'
self._locks: Dict[str, asyncio.Lock] = {}
# To count the number of `Context` instances
self._context_cnt: int = 0
# Set in self._update_attrs_and_maybe_adapt_lights
self._settings: Dict[str, Any] = {}
# Set and unset tracker in async_turn_on and async_turn_off
self.remove_listeners = []
_LOGGER.debug(
"%s: Setting up with '%s',"
" config_entry.data: '%s',"
" config_entry.options: '%s', converted to '%s'.",
self._name,
self._lights,
config_entry.data,
config_entry.options,
data,
)
@property
def name(self):
"""Return the name of the device if any."""
return f"Adaptive Lighting: {self._name}"
@property
def unique_id(self):
"""Return the unique ID of entity."""
return self._name
@property
def is_on(self) -> Optional[bool]:
"""Return true if adaptive lighting is on."""
return self._state
async def async_added_to_hass(self) -> None:
"""Call when entity about to be added to hass."""
if self.hass.is_running:
await self._setup_listeners()
else:
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STARTED, self._setup_listeners
)
last_state = await self.async_get_last_state()
is_new_entry = last_state is None # newly added to HA
if is_new_entry or last_state.state == STATE_ON:
await self.async_turn_on(adapt_lights=not self._only_once)
else:
self._state = False
assert not self.remove_listeners
async def async_will_remove_from_hass(self):
"""Remove the listeners upon removing the component."""
self._remove_listeners()
def _expand_light_groups(self) -> None:
all_lights = _expand_light_groups(self.hass, self._lights)
self.turn_on_off_listener.lights.update(all_lights)
self._lights = list(all_lights)
async def _setup_listeners(self, _=None) -> None:
_LOGGER.debug("%s: Called '_setup_listeners'", self._name)
if not self.is_on or not self.hass.is_running:
_LOGGER.debug("%s: Cancelled '_setup_listeners'", self._name)
return
assert not self.remove_listeners
remove_interval = async_track_time_interval(
self.hass, self._async_update_at_interval, self._interval
)
remove_sleep = async_track_state_change_event(
self.hass,
self.sleep_mode_switch.entity_id,
self._sleep_mode_switch_state_event,
)
self.remove_listeners.extend([remove_interval, remove_sleep])
if self._lights:
self._expand_light_groups()
remove_state = async_track_state_change_event(
self.hass, self._lights, self._light_event
)
self.remove_listeners.append(remove_state)
def _remove_listeners(self) -> None:
while self.remove_listeners:
remove_listener = self.remove_listeners.pop()
remove_listener()
@property
def icon(self) -> str:
"""Icon to use in the frontend, if any."""
return self._icon
@property
def device_state_attributes(self) -> Dict[str, Any]:
"""Return the attributes of the switch."""
if not self.is_on:
return {key: None for key in self._settings}
manual_control = [
light
for light in self._lights
if self.turn_on_off_listener.manual_control.get(light)
]
return dict(self._settings, manual_control=manual_control)
def create_context(self, which: str = "default") -> Context:
"""Create a context that identifies this Adaptive Lighting instance."""
# Right now the highest number of each context_id it can create is
# 'adapt_lgt_XXXX_turn_on_9999999999999'
# 'adapt_lgt_XXXX_interval_999999999999'
# 'adapt_lgt_XXXX_adapt_lights_99999999'
# 'adapt_lgt_XXXX_sleep_999999999999999'
# 'adapt_lgt_XXXX_light_event_999999999'
# 'adapt_lgt_XXXX_service_9999999999999'
# So 100 million calls before we run into the 36 chars limit.
context = create_context(self._name, which, self._context_cnt)
self._context_cnt += 1
return context
async def async_turn_on( # pylint: disable=arguments-differ
self, adapt_lights: bool = True
) -> None:
"""Turn on adaptive lighting."""
_LOGGER.debug(
"%s: Called 'async_turn_on', current state is '%s'", self._name, self._state
)
if self.is_on:
return
self._state = True
self.turn_on_off_listener.reset(*self._lights)
await self._setup_listeners()
if adapt_lights:
await self._update_attrs_and_maybe_adapt_lights(
transition=self._initial_transition,
force=True,
context=self.create_context("turn_on"),
)
async def async_turn_off(self, **kwargs) -> None:
"""Turn off adaptive lighting."""
if not self.is_on:
return
self._state = False
self._remove_listeners()
self.turn_on_off_listener.reset(*self._lights)
async def _async_update_at_interval(self, now=None) -> None:
await self._update_attrs_and_maybe_adapt_lights(
force=False, context=self.create_context("interval")
)
async def _adapt_light(
self,
light: str,
transition: Optional[int] = None,
adapt_brightness: Optional[bool] = None,
adapt_color: Optional[bool] = None,
prefer_rgb_color: Optional[bool] = None,
force: bool = False,
context: Optional[Context] = None,
) -> None:
lock = self._locks.get(light)
if lock is not None and lock.locked():
_LOGGER.debug("%s: '%s' is locked", self._name, light)
return
service_data = {ATTR_ENTITY_ID: light}
features = _supported_features(self.hass, light)
if transition is None:
transition = self._transition
if adapt_brightness is None:
adapt_brightness = self.adapt_brightness_switch.is_on
if adapt_color is None:
adapt_color = self.adapt_color_switch.is_on
if prefer_rgb_color is None:
prefer_rgb_color = self._prefer_rgb_color
if "transition" in features:
service_data[ATTR_TRANSITION] = transition
if "brightness" in features and adapt_brightness:
brightness = round(255 * self._settings["brightness_pct"] / 100)
service_data[ATTR_BRIGHTNESS] = brightness
if "white_value" in features and adapt_brightness:
white_value = round(255 * self._settings["brightness_pct"] / 100)
service_data[ATTR_WHITE_VALUE] = white_value
if (
"color_temp" in features
and adapt_color
and not (prefer_rgb_color and "color" in features)
):
attributes = self.hass.states.get(light).attributes
min_mireds, max_mireds = attributes["min_mireds"], attributes["max_mireds"]
color_temp_mired = self._settings["color_temp_mired"]
color_temp_mired = max(min(color_temp_mired, max_mireds), min_mireds)
service_data[ATTR_COLOR_TEMP] = color_temp_mired
elif "color" in features and adapt_color:
service_data[ATTR_RGB_COLOR] = self._settings["rgb_color"]
context = context or self.create_context("adapt_lights")
if (
self._take_over_control
and self._detect_non_ha_changes
and not force
and await self.turn_on_off_listener.significant_change(
self,
light,
adapt_brightness,
adapt_color,
context,
)
):
return
self.turn_on_off_listener.last_service_data[light] = service_data
async def turn_on(service_data):
_LOGGER.debug(
"%s: Scheduling 'light.turn_on' with the following 'service_data': %s"
" with context.id='%s'",
self._name,
service_data,
context.id,
)
await self.hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
service_data,
context=context,
)
if not self._separate_turn_on_commands:
await turn_on(service_data)
else:
# Could be a list of length 1 or 2
service_datas = _split_service_data(
service_data, adapt_brightness, adapt_color
)
await turn_on(service_datas[0])
if len(service_datas) == 2:
transition = service_datas[0].get(ATTR_TRANSITION)
if transition is not None:
await asyncio.sleep(transition)
await turn_on(service_datas[1])
async def _update_attrs_and_maybe_adapt_lights(
self,
lights: Optional[List[str]] = None,
transition: Optional[int] = None,
force: bool = False,
context: Optional[Context] = None,
) -> None:
assert context is not None
_LOGGER.debug(
"%s: '_update_attrs_and_maybe_adapt_lights' called with context.id='%s'",
self._name,
context.id,
)
assert self.is_on
self._settings = self._sun_light_settings.get_settings(
self.sleep_mode_switch.is_on
)
self.async_write_ha_state()
if lights is None:
lights = self._lights
if (self._only_once and not force) or not lights:
return
await self._adapt_lights(lights, transition, force, context)
async def _adapt_lights(
self,
lights: List[str],
transition: Optional[int],
force: bool,
context: Optional[Context],
) -> None:
assert context is not None
_LOGGER.debug(
"%s: '_adapt_lights(%s, %s, force=%s, context.id=%s)' called",
self.name,
lights,
transition,
force,
context.id,
)
for light in lights:
if not is_on(self.hass, light):
continue
if (
self._take_over_control
and self.turn_on_off_listener.is_manually_controlled(
self,
light,
force,
self.adapt_brightness_switch.is_on,
self.adapt_color_switch.is_on,
)
):
_LOGGER.debug(
"%s: '%s' is being manually controlled, stop adapting, context.id=%s.",
self._name,
light,
context.id,
)
continue
await self._adapt_light(light, transition, force=force, context=context)
async def _sleep_mode_switch_state_event(self, event: Event) -> None:
if not match_switch_state_event(event, (STATE_ON, STATE_OFF)):
return
_LOGGER.debug(
"%s: _sleep_mode_switch_state_event, event: '%s'", self._name, event
)
# Reset the manually controlled status when the "sleep mode" changes
self.turn_on_off_listener.reset(*self._lights)
await self._update_attrs_and_maybe_adapt_lights(
transition=self._initial_transition,
force=True,
context=self.create_context("sleep"),
)
async def _light_event(self, event: Event) -> None:
old_state = event.data.get("old_state")
new_state = event.data.get("new_state")
entity_id = event.data.get("entity_id")
if (
old_state is not None
and old_state.state == STATE_OFF
and new_state is not None
and new_state.state == STATE_ON
):
_LOGGER.debug(
"%s: Detected an 'off' → 'on' event for '%s' with context.id='%s'",
self._name,
entity_id,
event.context.id,
)
self.turn_on_off_listener.reset(entity_id, reset_manual_control=False)
# Tracks 'off' → 'on' state changes
self._off_to_on_event[entity_id] = event
lock = self._locks.get(entity_id)
if lock is None:
lock = self._locks[entity_id] = asyncio.Lock()
async with lock:
if await self.turn_on_off_listener.maybe_cancel_adjusting(
entity_id,
off_to_on_event=event,
on_to_off_event=self._on_to_off_event.get(entity_id),
):
# Stop if a rapid 'off' → 'on' → 'off' happens.
_LOGGER.debug(
"%s: Cancelling adjusting lights for %s", self._name, entity_id
)
return
await self._update_attrs_and_maybe_adapt_lights(
lights=[entity_id],
transition=self._initial_transition,
force=True,
context=self.create_context("light_event"),
)
elif (
old_state is not None
and old_state.state == STATE_ON
and new_state is not None
and new_state.state == STATE_OFF
):
# Tracks 'off' → 'on' state changes
self._on_to_off_event[entity_id] = event
self.turn_on_off_listener.reset(entity_id)
class SimpleSwitch(SwitchEntity, RestoreEntity):
"""Representation of a Adaptive Lighting switch."""
def __init__(
self, which: str, initial_state: bool, hass: HomeAssistant, config_entry
):
"""Initialize the Adaptive Lighting switch."""
self.hass = hass
data = validate(config_entry)
self._icon = ICON
self._state = None
self._which = which
name = data[CONF_NAME]
self._unique_id = f"{name}_{slugify(self._which)}"
self._name = f"Adaptive Lighting {which}: {name}"
self._initial_state = initial_state
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def unique_id(self):
"""Return the unique ID of entity."""
return self._unique_id
@property
def icon(self) -> str:
"""Icon to use in the frontend, if any."""
return self._icon
@property
def is_on(self) -> Optional[bool]:
"""Return true if adaptive lighting is on."""
return self._state
async def async_added_to_hass(self) -> None:
"""Call when entity about to be added to hass."""
last_state = await self.async_get_last_state()
_LOGGER.debug("%s: last state is %s", self._name, last_state)
if (last_state is None and self._initial_state) or (
last_state is not None and last_state.state == STATE_ON
):
await self.async_turn_on()
else:
await self.async_turn_off()
async def async_turn_on(self, **kwargs) -> None:
"""Turn on adaptive lighting sleep mode."""
self._state = True
async def async_turn_off(self, **kwargs) -> None:
"""Turn off adaptive lighting sleep mode."""
self._state = False
@dataclass(frozen=True)
class SunLightSettings:
"""Track the state of the sun and associated light settings."""
name: str
astral_location: astral.Location
max_brightness: int
max_color_temp: int
min_brightness: int
min_color_temp: int
sleep_brightness: int
sleep_color_temp: int
sunrise_offset: Optional[datetime.timedelta]
sunrise_time: Optional[datetime.time]
sunset_offset: Optional[datetime.timedelta]
sunset_time: Optional[datetime.time]
time_zone: datetime.tzinfo
def get_sun_events(self, date: datetime.datetime) -> Dict[str, float]:
"""Get the four sun event's timestamps at 'date'."""
def _replace_time(date: datetime.datetime, key: str) -> datetime.datetime:
time = getattr(self, f"{key}_time")
date_time = datetime.datetime.combine(date, time)
try: # HA ≤2021.05, https://github.com/basnijholt/adaptive-lighting/issues/128
utc_time = self.time_zone.localize(date_time).astimezone(dt_util.UTC)
except AttributeError: # HA ≥2021.06
utc_time = date_time.replace(tzinfo=dt_util.DEFAULT_TIME_ZONE).astimezone(dt_util.UTC)
return utc_time
location = self.astral_location
sunrise = (
location.sunrise(date, local=False)
if self.sunrise_time is None
else _replace_time(date, "sunrise")
) + self.sunrise_offset
sunset = (
location.sunset(date, local=False)
if self.sunset_time is None
else _replace_time(date, "sunset")
) + self.sunset_offset
if self.sunrise_time is None and self.sunset_time is None:
try:
# Astral v1
solar_noon = location.solar_noon(date, local=False)
solar_midnight = location.solar_midnight(date, local=False)
except AttributeError:
# Astral v2
solar_noon = location.noon(date, local=False)
solar_midnight = location.midnight(date, local=False)
else:
solar_noon = sunrise + (sunset - sunrise) / 2
solar_midnight = sunset + ((sunrise + timedelta(days=1)) - sunset) / 2
events = [
(SUN_EVENT_SUNRISE, sunrise.timestamp()),
(SUN_EVENT_SUNSET, sunset.timestamp()),
(SUN_EVENT_NOON, solar_noon.timestamp()),
(SUN_EVENT_MIDNIGHT, solar_midnight.timestamp()),
]
# Check whether order is correct
events = sorted(events, key=lambda x: x[1])
events_names, _ = zip(*events)
if events_names not in _ALLOWED_ORDERS:
msg = (
f"{self.name}: The sun events {events_names} are not in the expected"
" order. The Adaptive Lighting integration will not work!"
" This might happen if your sunrise/sunset offset is too large or"
" your manually set sunrise/sunset time is past/before noon/midnight."
)
_LOGGER.error(msg)
raise ValueError(msg)
return events
def relevant_events(self, now: datetime.datetime) -> List[Tuple[str, float]]:
"""Get the previous and next sun event."""
events = [
self.get_sun_events(now + timedelta(days=days)) for days in [-1, 0, 1]
]
events = sum(events, []) # flatten lists
events = sorted(events, key=lambda x: x[1])
i_now = bisect.bisect([ts for _, ts in events], now.timestamp())
return events[i_now - 1 : i_now + 1]
def calc_percent(self) -> float:
"""Calculate the position of the sun in %."""
now = dt_util.utcnow()
now_ts = now.timestamp()
today = self.relevant_events(now)
(_, prev_ts), (next_event, next_ts) = today
h, x = ( # pylint: disable=invalid-name
(prev_ts, next_ts)
if next_event in (SUN_EVENT_SUNSET, SUN_EVENT_SUNRISE)
else (next_ts, prev_ts)
)
k = 1 if next_event in (SUN_EVENT_SUNSET, SUN_EVENT_NOON) else -1
percentage = (0 - k) * ((now_ts - h) / (h - x)) ** 2 + k
return percentage
def calc_brightness_pct(self, percent: float, is_sleep: bool) -> float:
"""Calculate the brightness in %."""
if is_sleep:
return self.sleep_brightness
if percent > 0:
return self.max_brightness
delta_brightness = self.max_brightness - self.min_brightness
percent = 1 + percent
return (delta_brightness * percent) + self.min_brightness
def calc_color_temp_kelvin(self, percent: float, is_sleep: bool) -> float:
"""Calculate the color temperature in Kelvin."""
if is_sleep:
return self.sleep_color_temp
if percent > 0:
delta = self.max_color_temp - self.min_color_temp
return (delta * percent) + self.min_color_temp
return self.min_color_temp
def get_settings(
self, is_sleep
) -> Dict[str, Union[float, Tuple[float, float], Tuple[float, float, float]]]:
"""Get all light settings.
Calculating all values takes <0.5ms.
"""
percent = self.calc_percent()
brightness_pct = self.calc_brightness_pct(percent, is_sleep)
color_temp_kelvin = self.calc_color_temp_kelvin(percent, is_sleep)
color_temp_mired: float = color_temperature_kelvin_to_mired(color_temp_kelvin)
rgb_color: Tuple[float, float, float] = color_temperature_to_rgb(
color_temp_kelvin
)
xy_color: Tuple[float, float] = color_RGB_to_xy(*rgb_color)
hs_color: Tuple[float, float] = color_xy_to_hs(*xy_color)
return {
"brightness_pct": brightness_pct,
"color_temp_kelvin": color_temp_kelvin,
"color_temp_mired": color_temp_mired,
"rgb_color": rgb_color,
"xy_color": xy_color,
"hs_color": hs_color,
"sun_position": percent,
}
class TurnOnOffListener:
"""Track 'light.turn_off' and 'light.turn_on' service calls."""
def __init__(self, hass: HomeAssistant):
"""Initialize the TurnOnOffListener that is shared among all switches."""
self.hass = hass
self.lights = set()
# Tracks 'light.turn_off' service calls
self.turn_off_event: Dict[str, Event] = {}
# Tracks 'light.turn_on' service calls
self.turn_on_event: Dict[str, Event] = {}
# Keep 'asyncio.sleep' tasks that can be cancelled by 'light.turn_on' events
self.sleep_tasks: Dict[str, asyncio.Task] = {}
# Tracks which lights are manually controlled
self.manual_control: Dict[str, bool] = {}
# Counts the number of times (in a row) a light had a changed state.
self.cnt_significant_changes: Dict[str, int] = defaultdict(int)
# Track 'state_changed' events of self.lights resulting from this integration
self.last_state_change: Dict[str, List[State]] = {}
# Track last 'service_data' to 'light.turn_on' resulting from this integration
self.last_service_data: Dict[str, Dict[str, Any]] = {}
# When a state is different `max_cnt_significant_changes` times in a row,
# mark it as manually_controlled.
self.max_cnt_significant_changes = 2
self.remove_listener = self.hass.bus.async_listen(
EVENT_CALL_SERVICE, self.turn_on_off_event_listener
)
self.remove_listener2 = self.hass.bus.async_listen(
EVENT_STATE_CHANGED, self.state_changed_event_listener
)
def reset(self, *lights, reset_manual_control=True) -> None:
"""Reset the 'manual_control' status of the lights."""
for light in lights:
if reset_manual_control:
self.manual_control[light] = False
self.last_state_change.pop(light, None)
self.last_service_data.pop(light, None)
self.cnt_significant_changes[light] = 0
async def turn_on_off_event_listener(self, event: Event) -> None:
"""Track 'light.turn_off' and 'light.turn_on' service calls."""
domain = event.data.get(ATTR_DOMAIN)
if domain != LIGHT_DOMAIN:
return
service = event.data[ATTR_SERVICE]
service_data = event.data[ATTR_SERVICE_DATA]
entity_ids = cv.ensure_list_csv(service_data[ATTR_ENTITY_ID])
if not any(eid in self.lights for eid in entity_ids):
return
if service == SERVICE_TURN_OFF:
transition = service_data.get(ATTR_TRANSITION)
_LOGGER.debug(
"Detected an 'light.turn_off('%s', transition=%s)' event with context.id='%s'",
entity_ids,
transition,
event.context.id,
)
for eid in entity_ids:
self.turn_off_event[eid] = event
self.reset(eid)
elif service == SERVICE_TURN_ON:
_LOGGER.debug(
"Detected an 'light.turn_on('%s')' event with context.id='%s'",
entity_ids,
event.context.id,
)
for eid in entity_ids:
task = self.sleep_tasks.get(eid)
if task is not None:
task.cancel()
self.turn_on_event[eid] = event
async def state_changed_event_listener(self, event: Event) -> None:
"""Track 'state_changed' events."""
entity_id = event.data.get(ATTR_ENTITY_ID, "")
if entity_id not in self.lights or entity_id.split(".")[0] != LIGHT_DOMAIN:
return
new_state = event.data.get("new_state")
if new_state is not None and new_state.state == STATE_ON:
_LOGGER.debug(
"Detected a '%s' 'state_changed' event: '%s' with context.id='%s'",
entity_id,
new_state.attributes,
new_state.context.id,
)
if (
new_state is not None
and new_state.state == STATE_ON
and is_our_context(new_state.context)
):
# It is possible to have multiple state change events with the same context.
# This can happen because a `turn_on.light(brightness_pct=100, transition=30)`
# event leads to an instant state change of
# `new_state=dict(brightness=100, ...)`. However, after polling the light
# could still only be `new_state=dict(brightness=50, ...)`.
# We save all events because the first event change might indicate at what
# settings the light will be later *or* the second event might indicate a
# final state. The latter case happens for example when a light was
# called with a color_temp outside of its range (and HA reports the
# incorrect 'min_mireds' and 'max_mireds', which happens e.g., for
# Philips Hue White GU10 Bluetooth lights).
old_state: Optional[List[State]] = self.last_state_change.get(entity_id)
if (
old_state is not None
and old_state[0].context.id == new_state.context.id
):
# If there is already a state change event from this event (with this
# context) then append it to the already existing list.
_LOGGER.debug(
"State change event of '%s' is already in 'self.last_state_change' (%s)"
" adding this state also",
entity_id,
new_state.context.id,
)
self.last_state_change[entity_id].append(new_state)
else:
self.last_state_change[entity_id] = [new_state]
def is_manually_controlled(
self,
switch: AdaptiveSwitch,
light: str,
force: bool,
adapt_brightness: bool,
adapt_color: bool,
) -> bool:
"""Check if the light has been 'on' and is now manually controlled."""
manual_control = self.manual_control.setdefault(light, False)
if manual_control:
# Manually controlled until light is turned on and off
return True
turn_on_event = self.turn_on_event.get(light)
if (
turn_on_event is not None
and not is_our_context(turn_on_event.context)
and not force
):
keys = turn_on_event.data[ATTR_SERVICE_DATA].keys()
if (adapt_color and COLOR_ATTRS.intersection(keys)) or (
adapt_brightness and BRIGHTNESS_ATTRS.intersection(keys)
):
# Light was already on and 'light.turn_on' was not called by
# the adaptive_lighting integration.
manual_control = self.manual_control[light] = True
_fire_manual_control_event(switch, light, turn_on_event.context)
_LOGGER.debug(
"'%s' was already on and 'light.turn_on' was not called by the"
" adaptive_lighting integration (context.id='%s'), the Adaptive"
" Lighting will stop adapting the light until the switch or the"
" light turns off and then on again.",
light,
turn_on_event.context.id,
)
return manual_control
async def significant_change(
self,
switch: AdaptiveSwitch,
light: str,
adapt_brightness: bool,
adapt_color: bool,
context: Context,
) -> bool:
"""Has the light made a significant change since last update.
This method will detect changes that were made to the light without
calling 'light.turn_on', so outside of Home Assistant. If a change is
detected, we mark the light as 'manually controlled' until the light
or switch is turned 'off' and 'on' again.
"""
if light not in self.last_state_change:
return False
old_states: List[State] = self.last_state_change[light]
await self.hass.helpers.entity_component.async_update_entity(light)
new_state = self.hass.states.get(light)
compare_to = functools.partial(
_attributes_have_changed,
light=light,
new_attributes=new_state.attributes,
adapt_brightness=adapt_brightness,
adapt_color=adapt_color,
context=context,
)
for index, old_state in enumerate(old_states):
changed = compare_to(old_attributes=old_state.attributes)
if not changed:
_LOGGER.debug(
"State of '%s' didn't change wrt change event nr. %s (context.id=%s)",
light,
index,
context.id,
)
break
last_service_data = self.last_service_data.get(light)
if changed and last_service_data is not None:
# It can happen that the state change events that are associated
# with the last 'light.turn_on' call by this integration were not
# final states. Possibly a later EVENT_STATE_CHANGED happened, where
# the correct target brightness/color was reached.
changed = compare_to(old_attributes=last_service_data)
if not changed:
_LOGGER.debug(
"State of '%s' didn't change wrt 'last_service_data' (context.id=%s)",
light,
context.id,
)
n_changes = self.cnt_significant_changes[light]
if changed:
self.cnt_significant_changes[light] += 1
if n_changes >= self.max_cnt_significant_changes:
# Only mark a light as significantly changing, if changed==True
# N times in a row. We do this because sometimes a state changes
# happens only *after* a new update interval has already started.
self.manual_control[light] = True
_fire_manual_control_event(switch, light, context, is_async=False)
else:
if n_changes > 1:
_LOGGER.debug(
"State of '%s' had 'cnt_significant_changes=%s' but the state"
" changed to the expected settings now",
light,
n_changes,
)
self.cnt_significant_changes[light] = 0
return changed
async def maybe_cancel_adjusting(
self, entity_id: str, off_to_on_event: Event, on_to_off_event: Optional[Event]
) -> bool:
"""Cancel the adjusting of a light if it has just been turned off.
Possibly the lights just got a 'turn_off' call, however, the light
is actually still turning off (e.g., because of a 'transition') and
HA polls the light before the light is 100% off. This might trigger
a rapid switch 'off' → 'on' → 'off'. To prevent this component
from interfering on the 'on' state, we make sure to wait at least
TURNING_OFF_DELAY (or the 'turn_off' transition time) between a
'off' → 'on' event and then check whether the light is still 'on' or
if the brightness is still decreasing. Only if it is the case we
adjust the lights.
"""
if on_to_off_event is None:
# No state change has been registered before.
return False
id_on_to_off = on_to_off_event.context.id
turn_off_event = self.turn_off_event.get(entity_id)
if turn_off_event is not None:
transition = turn_off_event.data[ATTR_SERVICE_DATA].get(ATTR_TRANSITION)
else:
transition = None
turn_on_event = self.turn_on_event.get(entity_id)
id_turn_on = turn_on_event.context.id
id_off_to_on = off_to_on_event.context.id
if id_off_to_on == id_turn_on and id_off_to_on is not None:
# State change 'off' → 'on' triggered by 'light.turn_on'.
return False
if (
turn_off_event is not None
and id_on_to_off == turn_off_event.context.id
and id_on_to_off is not None
and transition is not None # 'turn_off' is called with transition=...
):
# State change 'on' → 'off' and 'light.turn_off(..., transition=...)' come
# from the same event, so wait at least the 'turn_off' transition time.
delay = max(transition, TURNING_OFF_DELAY)
else:
# State change 'off' → 'on' happened because the light state was set.
# Possibly because of polling.
delay = TURNING_OFF_DELAY
delta_time = (dt_util.utcnow() - on_to_off_event.time_fired).total_seconds()
if delta_time > delay:
return False
# Here we could just `return True` but because we want to prevent any updates
# from happening to this light (through async_track_time_interval or
# sleep_state) for some time, we wait below until the light
# is 'off' or the time has passed.
delay -= delta_time # delta_time has passed since the 'off' → 'on' event
_LOGGER.debug("Waiting with adjusting '%s' for %s", entity_id, delay)
for _ in range(3):
# It can happen that the actual transition time is longer than the
# specified time in the 'turn_off' service.
coro = asyncio.sleep(delay)
task = self.sleep_tasks[entity_id] = asyncio.ensure_future(coro)
try:
await task
except asyncio.CancelledError: # 'light.turn_on' has been called
_LOGGER.debug(
"Sleep task is cancelled due to 'light.turn_on('%s')' call",
entity_id,
)
return False
if not is_on(self.hass, entity_id):
return True
delay = TURNING_OFF_DELAY # next time only wait this long
if transition is not None:
# Always ignore when there's a 'turn_off' transition.
# Because it seems like HA cannot detect whether a light is
# transitioning into 'off'. Maybe needs some discussion/input?
return True
# Now we assume that the lights are still on and they were intended
# to be on. In case this still gives problems for some, we might
# choose to **only** adapt on 'light.turn_on' events and ignore
# other 'off' → 'on' state switches resulting from polling. That
# would mean we 'return True' here.
return False
|
"""Implementation (in 3D) of network in:
http://openaccess.thecvf.com/content_CVPRW_2019/papers/CLIC 2019/Zhou_End-to-end_Optimized_Image_Compression_with_Attention_Mechanism_CVPRW_2019_paper.pdf
Winner of CLIC 2019
"""
import torch
from torch import nn
from VarDACAE.nn.pytorch_gdn import GDN
from VarDACAE.nn.RAB import RAB
from VarDACAE.ML_utils import get_device
from VarDACAE.AEs.AE_Base import BaseAE
from VarDACAE.nn.explore.empty import Empty
class TucodecEncode(nn.Module):
def __init__(self, activation_constructor, Block, Cstd, sigmoid=False):
super(TucodecEncode, self).__init__()
device = get_device()
encode = True
#downsamples and upsamples
downsample1 = DownUp.downsample1(activation_constructor, Cstd, Cstd)
upsample1 = DownUp.upsample1(activation_constructor, Cstd, Cstd)
downsample2 = DownUp.downsample2(activation_constructor, Cstd, Cstd)
upsample2 = DownUp.upsample2(activation_constructor, Cstd, Cstd)
#main trunk first
self.conv1 = nn.Conv3d(1, Cstd, kernel_size=(3,3, 2), stride=2, padding=(1, 1, 0))
self.gdn2 = GDN(Cstd, device, not encode)
self.conv3 = nn.Conv3d(Cstd, Cstd, kernel_size=(2, 3, 2), stride=2, padding=(0, 1, 0))
self.gdn4 = GDN(Cstd, device, not encode)
self.rnab5 = RAB(encode, activation_constructor, Cstd, sigmoid, Block,
downsample=downsample1, upsample=upsample1)
self.conv6 = nn.Conv3d(Cstd, Cstd, kernel_size=(3,4,2), stride=2)
self.gdn7 = GDN(Cstd, device, not encode)
self.conv8 = nn.Conv3d(Cstd, Cstd, kernel_size=(3, 2, 2), stride=2, padding=(1,1,0))
self.rnab9 = RAB(encode, activation_constructor, Cstd, sigmoid, Block,
downsample=downsample2, upsample=upsample2)
#multi-res path
self.convA = nn.Conv3d(Cstd, Cstd, kernel_size=3, stride=8)
self.convB = nn.Conv3d(Cstd, Cstd, kernel_size=3, stride=4, padding=(0, 1, 0))
self.convC = nn.Conv3d(Cstd, Cstd, kernel_size=(3, 2, 3), stride=2, padding=1)
#final conv
self.conv10 = nn.Conv3d(4 * Cstd, Cstd, kernel_size=(2,2,2), stride=2)
def forward(self, x):
h, xa, xb, xc = self.trunk(x)
ha = self.convA(xa)
hb = self.convB(xb)
hc = self.convC(xc)
inp = torch.cat([h, ha, hb, hc], dim=1) #concat on channel
h = self.conv10(inp)
# h = self.act11(h)
# h = self.conv12(h) #to give same latent dim as baseline model
return h
def trunk(self, x):
x = self.conv1(x)
x = self.gdn2(x)
xa = x
x = self.conv3(x)
x = self.gdn4(x)
xb = x
x = self.rnab5(x)
x = self.conv6(x)
x = self.gdn7(x)
xc = x
x = self.conv8(x)
x = self.rnab9(x)
return x, xa, xb, xc
class TucodecDecode(nn.Module):
def __init__(self, activation_constructor, Block, Cstd, sigmoid=False):
super(TucodecDecode, self).__init__()
device = get_device()
encode = False
#downsamples and upsamples
downsample2 = DownUp.downsample2(activation_constructor, Cstd, Cstd)
upsample2 = DownUp.upsample2(activation_constructor, Cstd, Cstd)
downsample1 = DownUp.downsample1(activation_constructor, Cstd, Cstd)
upsample1 = DownUp.upsample1(activation_constructor, Cstd, Cstd)
#Keep numbering from Encoder
self.conv10 = nn.ConvTranspose3d( Cstd, Cstd, kernel_size=(2,2,2), stride=2)
self.rb10a = Block(encode, activation_constructor, Cstd,)
self.rb10b = Block(encode, activation_constructor, Cstd,)
self.rnab9 = RAB(encode, activation_constructor, Cstd, sigmoid, Block,
downsample=downsample2, upsample=upsample2)
self.conv8 = nn.ConvTranspose3d(Cstd, Cstd, kernel_size=(3, 2, 2), stride=2, padding=(1,1,0))
self.gdn7 = GDN(Cstd, device, encode)
self.conv6 = nn.ConvTranspose3d(Cstd, Cstd, kernel_size=(3,4,2), stride=2,)
self.rnab5 = RAB(encode, activation_constructor, Cstd, sigmoid, Block,
downsample=downsample1, upsample=upsample1)
self.gdn4 = GDN(Cstd, device, encode)
self.conv3 = nn.ConvTranspose3d(Cstd, Cstd, kernel_size=(2, 3, 2), stride=2, padding=(0, 1, 0))
self.gdn2 = GDN(Cstd, device, encode)
self.conv1 = nn.ConvTranspose3d(Cstd, 1, kernel_size=(3,3, 2), stride=2, padding=(1, 1, 0))
def forward(self, x):
x = self.conv10(x)
x = self.rb10a(x)
x = self.rb10b(x)
x = self.rnab9 (x)
x = self.conv8 (x)
x = self.gdn7(x)
x = self.conv6(x)
x = self.rnab5(x)
x = self.gdn4 (x)
x = self.conv3(x)
x = self.gdn2(x)
x = self.conv1(x)
return x
class DownUp:
@staticmethod
def downsample1(activation_constructor, Cin, channel_small):
"""First RAB downsample"""
conv1 = nn.Conv3d(Cin, Cin, kernel_size=(3, 2, 2), stride=(2,2,2))
conv2 = nn.Conv3d(Cin, Cin, kernel_size=(3, 3, 2), stride=(2,2,2), padding=(0, 0, 1))
conv3 = nn.Conv3d(Cin, Cin, kernel_size=(3, 3, 3), stride=(2,2,1), padding=(0, 0, 0))
return nn.Sequential(conv1, activation_constructor(Cin, False), #Empty("d", 1),
conv2, activation_constructor(Cin, False), #Empty("d", 2),
conv3, )#Empty("d", 3),)
@staticmethod
def upsample1(activation_constructor, Cin, channel_small):
"First RAB upsample"
conv1 = nn.ConvTranspose3d(Cin, Cin, kernel_size=(3, 3, 3), stride=(2,2,1), padding=(0, 0, 0))
conv2 = nn.ConvTranspose3d(Cin, Cin, kernel_size=(3, 3, 2), stride=(2,2,2), padding=(0, 0, 1))
conv3 = nn.ConvTranspose3d(Cin, Cin, kernel_size=(3, 2, 2), stride=(2,2,2))
return nn.Sequential(conv1, activation_constructor(Cin, False), #Empty("u", 1),
conv2, activation_constructor(Cin, False), #Empty("u", 2),
conv3, ) #Empty("u", 3))
@staticmethod
def downsample2(activation_constructor, Cin, channel_small):
"""Second RAB downsample"""
conv1 = nn.Conv3d(Cin, Cin, kernel_size=(2, 2, 2), stride=1,)
conv2 = nn.Conv3d(Cin, Cin, kernel_size=(3, 3, 1), stride=(2,2,1), padding=0)
conv3 = nn.Conv3d(Cin, Cin, kernel_size=(2, 2, 1), stride=1, padding=0)
return nn.Sequential(conv1, activation_constructor(Cin, False),
conv2, activation_constructor(Cin, False),
conv3, )
@staticmethod
def upsample2(activation_constructor, Cin, channel_small):
"""Second RAB upsample"""
conv1 = nn.ConvTranspose3d(Cin, Cin, kernel_size=(2, 2, 1), stride=1, padding=0)
conv2 = nn.ConvTranspose3d(Cin, Cin, kernel_size=(3, 3, 1), stride=(2,2,1), padding=0)
conv3 = nn.ConvTranspose3d(Cin, Cin, kernel_size=(2, 2, 2), stride=1,)
return nn.Sequential(conv1, activation_constructor(Cin, False),
conv2, activation_constructor(Cin, False),
conv3, )
|
<reponame>petrs/py-tpm-analysis
import constatnts
import os
from analytics.algtest import AlgtestCase
from analytics.windows import WindowsCase
# a record represents a folder of results either containing a single test scenario (1 dataset) or multiple
class Record:
record_count = 0
def __init__(self, path, index=0):
# destination path
self.path = path
self.original_name = os.path.basename(os.path.normpath(path)) # gets rewritten
self.folder_name = os.path.basename(os.path.normpath(path))
# object meta data
self.index = index
self.is_folder = False
self.test_type = None
self.detail = None
self.is_valid_result = True
self.number_of_results = 1
self.partial_results = []
self.flags = {
constatnts.RECORD_FLAG_NON_UNIQUE_FOLDER_NAME: False
}
# TPM specific data
self.data = None
# Meta data
Record.record_count += 1
# will return structured dataset data
def get_col(self):
return parse_data(self)
# processes test result metadata for both deep and shallow datasets (single / multi -result folders)
def get_meta(self):
if self.number_of_results > 1:
for record in self.partial_results:
if record.is_valid_result:
record.data.parse_meta()
record.data.parse_properties_fixed()
else:
if self.is_valid_result:
self.data.parse_meta()
self.data.parse_properties_fixed()
# processes test results attributes for both deep and shallow datasets (single / multi -result folders)
def get_results(self):
if self.number_of_results > 1:
for record in self.partial_results:
if record.is_valid_result:
record.data.parse_algorithms()
record.data.parse_commands()
record.data.parse_ecc()
record.data.parse_performance()
else:
if self.is_valid_result:
self.data.parse_algorithms()
self.data.parse_commands()
self.data.parse_ecc()
self.data.parse_performance()
# might be extended to mine more data
def get_performance(self):
pass
# finds the correct variant of each test result (to be able to parse it correctly)
def find_type(self):
if os.path.isdir(self.path):
self.is_folder = True
# folder contains only folders
if self.is_folder:
for path, directories, files in os.walk(self.path):
# variant 1-5
if check_variants(self, path, directories, files):
break
# variant 6: test in nested folders
if len(directories) == 1:
for path, directories, files in os.walk(os.path.join(self.path, next(iter(directories), None))):
# check recursive variant 1-5
if check_variants(self, path, directories, files):
break
break
# variant 7: multiple tests
elif len(directories) > 1:
self.number_of_results = len(directories)
self.test_type = constatnts.HAS_MULTIPLE_TEST
self.detail = constatnts.HAS_MULTIPLE_TEST
self.is_valid_result = False
for directory in directories:
new_directory = Record(os.path.join(path, directory))
new_directory.find_type()
self.partial_results.append(new_directory)
break
else:
self.test_type = constatnts.TEST_TYPE_UNSUPPORTED
self.is_valid_result = False
break
else:
if os.path.basename(os.path.normpath(self.path)) == 'TpmInformation.txt':
self.test_type = constatnts.TEST_TYPE_WINDOWS
self.detail = self.path
self.is_folder = False
else:
self.test_type = constatnts.TEST_TYPE_UNSUPPORTED
self.is_valid_result = False
if self.test_type == constatnts.TEST_TYPE_WINDOWS:
self.data = WindowsCase(self.detail)
elif self.test_type == constatnts.TEST_TYPE_ALGTEST:
self.data = AlgtestCase(self.detail)
def set_flag(self, name, value):
self.flags[name] = value
# definition of data per each dataset (1 test scenario)
def get_data(self):
return {
'original_name': self.original_name,
'manufacturer': self.data.manufacturer,
'firmware': self.data.firmware_version,
'vendor': self.data.vendor_string,
'no_tpm': self.data.no_tpm,
'inconclusive_ecc': self.data.inconclusive_ecc,
'supported_algorithms': self.data.supported_algorithms,
'supported_commands': self.data.supported_commands,
'supported_ecc': self.data.supported_ecc,
'properties_fixed': self.data.properties_fixed,
'performance': self.data.performance
}
# structure data for a folder of results
def parse_data(record):
data = {
'id': record.index,
'name': record.original_name,
'original_name': record.folder_name,
'dataset': []
}
dataset = []
if record.number_of_results > 1:
for record in record.partial_results:
if record.is_valid_result:
dataset.append(record.get_data())
else:
dataset.append(record.get_data())
data['dataset'] = dataset
return data
def check_variants(record, path, directories, files):
# variant 1: algtest folder with result, detail, performance folders within
if 'detail' in directories and 'performance' in directories:
record.test_type = constatnts.TEST_TYPE_ALGTEST
record.detail = os.path.join(path, 'detail')
return True
# variant 2: algtest folder with out
if directories == ['out']:
for path, directories, files in os.walk(os.path.join(record.path, 'out')):
if 'detail' in directories and 'performance' in directories:
record.test_type = constatnts.TEST_TYPE_ALGTEST
record.detail = os.path.join(path, 'detail')
# variant 3: algtest folder with detail files in this folder
elif not directories:
record.test_type = constatnts.TEST_TYPE_ALGTEST
record.detail = path
break
return True
# variant 4: windows test
if 'TpmInformation.txt' in files:
record.test_type = constatnts.TEST_TYPE_WINDOWS
record.detail = os.path.join(path, 'TpmInformation.txt')
return True
# variant 5: algtest in non structured way
if not directories and 'Quicktest_properties-fixed.txt' in files:
record.test_type = constatnts.TEST_TYPE_ALGTEST
record.detail = path
return True
return False
|
#!/usr/bin/env python
"""
node.py: Map all the node types of the PL grammar to node_id
Usage:
node.py --lang=<str> NODE_FILE [options]
Options:
-h --help Show this screen.
--lang=<str> target language
"""
from docopt import docopt
import pickle
import torch
from utils import pad_sents
class Node(object):
def __init__(self, node2id=None):
"""
@param node2id (dict): dictionary mapping nodes -> indices
"""
if node2id:
self.node2id = node2id
else:
self.node2id = dict()
self.node2id['<pad>'] = 0 #pad token
self.node2id['<start>'] = 1 #start token
self.pad_id = self.node2id['<pad>']
self.id2node = {v: k for k, v in self.node2id.items()}
def __getitem__(self, node):
""" Retrieve node's index.
@param node (str): node to look up.
@returns index (int): index of the node
"""
return self.node2id.get(node)
def __contains__(self, node):
""" Check if node is captured by Node.
@param node (str): node to look up
@returns contains (bool): whether node is contained
"""
return node in self.node2id
def __setitem__(self, key, value):
""" Raise error, if one tries to edit the Node.
"""
raise ValueError('Node dictionary is readonly')
def __len__(self):
""" Compute number of nodes in Node.
@returns len (int): number of nodes in Node
"""
return len(self.node2id)
def __repr__(self):
""" Representation of Node to be used
when printing the object.
"""
return 'Node[size=%d]' % len(self)
def id2node(self, n_id):
""" Return mapping of index to node.
@param n_id (int): node index
@returns node (str): node corresponding to index
"""
return self.id2node[n_id]
def add(self, node):
""" Add node to Node, if it is previously unseen.
@param node (str): node to add to Node
@return index (int): index that the node has been assigned
"""
if node not in self:
n_id = self.node2id[node] = len(self)
self.id2node[n_id] = node
return n_id
else:
return self[node]
def nodes2indices(self, sents):
""" Convert list of tokens or list of sentences of tokens
into list or list of list of indices.
@param sents (list[str] or list[list[str]]): sentence(s) containing either node or GenToken toks
@return node_ids (list[int] or list[list[int]]): sentence(s) in indices
"""
if type(sents[0]) == list:
return [[self[node] for node in sent] for sent in sents]
else:
sent = sents
return [self[node] for node in sent]
def indices2nodes(self, node_ids):
""" Convert list of indices into nodes.
@param node_ids (list[int]): list of node ids
@return sents (list[str]): list of nodes
"""
return [self.id2node[n_id] for n_id in node_ids]
def nodes2Tensor(self, sents):
"""
Convert list of tgt nodes tensor by padding required sents
where tgt sents contain nodes
@param sents (list[list[str]]): batch of tgt sents
@return node_tensor (torch.tensor (max_sent_len, batch_size))
"""
node_ids = self.nodes2indices(sents)
nodes_padded = pad_sents(node_ids, self.pad_id)
return torch.tensor(nodes_padded, dtype=torch.long)
@staticmethod
def build(grammar):
""" Given a grammar (ASDL) description of language, extract all node types
@param grammar (ASDLGrammar): grammar object described in the asdl file for the target language
@returns nodes (Node): Node instance produced from the grammar
"""
nodes = Node()
for field in grammar.fields: #field: Field(name, type, cardinality)
node_name = field.type.name #ASDLType(type_name)
node_cardinality = field.cardinality
if node_cardinality == 'optional':
node_name += '?'
elif node_cardinality == 'multiple':
node_name += '*'
nodes.add(node_name)
return nodes
def save(self, file_path):
""" Save Node to file as pickle dump.
@param file_path (str): file path to node file
"""
pickle.dump(self.node2id, open(file_path, 'wb'))
@staticmethod
def load(file_path):
"""
@param file_path (str): file path to node file
@returns Node object loaded from pickle dump
"""
node2id = pickle.load(open(file_path, 'rb'))
return Node(node2id)
if __name__ == '__main__':
args = docopt(__doc__)
lang = args['--lang']
if lang == 'lambda':
from lang.Lambda.asdl import ASDLGrammar
asdl_desc = open('lang/Lambda/lambda_asdl.txt').read()
grammar = ASDLGrammar.from_text(asdl_desc)
nodes = Node.build(grammar)
print('generated nodes: %d' % (len(nodes)))
nodes.save(args['NODE_FILE'])
print('nodes saved to %s' % args['NODE_FILE'])
else:
print('language: %s currently not supported' % (lang))
|
import os
import logging
from utils.feature_utils import FeatureUtils, PairFeatureUtils
from utils.file_utils import FileUtils
logger = logging.getLogger(__name__)
logging.basicConfig(level = logging.INFO)
def copy_clusters(clusters):
from copy import deepcopy
clusters = deepcopy(clusters)
return clusters
class MultiPassSieve:
def __init__(self, passage_dir, temp_dir, output_dir):
filenames = os.listdir(temp_dir)
self.clusters = {}
self.temp_dir = temp_dir
self.output_dir = output_dir
self.passage_dir = passage_dir
self.filenames = filenames
def run(self, log_step=10):
total_files = len(self.filenames)
for idx, name in enumerate(self.filenames):
if (idx + 1) % log_step == 0:
logger.info("Running MPS %d/%d" % (idx + 1, total_files))
mentions = FileUtils.read_pickle(self.temp_dir, name)
sents = FileUtils.read_passage_file(self.passage_dir, name)
self.clusters = copy_clusters(mentions)
self.pass_1()
self.pass_2(sents)
self.pass_3()
self.pass_4()
self.pass_5()
self.pass_6()
equivalent_class = self.build_mps_equivalent_class()
gold_equivalent_class = self.build_gold_equivalent_class(mentions)
FileUtils.write_mps_result_to_json(self.output_dir, name, equivalent_class)
FileUtils.write_gold_cluster_to_json(self.output_dir, name, gold_equivalent_class)
def is_same_cluster(self, mention_a, mention_b):
return mention_a["cluster"] == mention_b["cluster"]
def merge_cluster(self, mention_a, mention_b):
old_cluster = mention_b["cluster"]
new_cluster = mention_a["cluster"]
mention_b["cluster"] = new_cluster
for obj in self.clusters:
if obj["cluster"] == old_cluster:
obj["cluster"] = new_cluster
# Pass 1: Exact string match
def pass_1(self):
for antecedent_idx in range(len(self.clusters)):
c1 = self.clusters[antecedent_idx]
if FeatureUtils.is_pronoun(c1):
continue
for mention_idx in range(antecedent_idx, len(self.clusters)):
c2 = self.clusters[mention_idx]
if self.is_same_cluster(c1, c2) or FeatureUtils.is_pronoun(c2):
continue
if PairFeatureUtils.is_exact_match(c1, c2):
self.merge_cluster(c1, c2)
# Pass 2: Precise Constructs
def pass_2(self, sents):
for antecedent_idx in range(len(self.clusters)):
c1 = self.clusters[antecedent_idx]
if FeatureUtils.is_pronoun(c1):
continue
for mention_idx in range(antecedent_idx, len(self.clusters)):
c2 = self.clusters[mention_idx]
if self.is_same_cluster(c1, c2) or FeatureUtils.is_pronoun(c2):
continue
if PairFeatureUtils.is_appositive(c1, c2, sents) or PairFeatureUtils.is_copulative(c1, c2, sents) or PairFeatureUtils.is_abbreviation(c1, c2):
self.merge_cluster(c1, c2)
# Pass 3: Strict Head Match
def pass_3(self):
for antecedent_idx in range(len(self.clusters)):
c1 = self.clusters[antecedent_idx]
if FeatureUtils.is_pronoun(c1):
continue
for mention_idx in range(antecedent_idx, len(self.clusters)):
c2 = self.clusters[mention_idx]
if self.is_same_cluster(c1, c2) or FeatureUtils.is_pronoun(c2):
continue
elif PairFeatureUtils.is_demonstrative(c1, c2) or PairFeatureUtils.is_name_shortened(c1, c2):
self.merge_cluster(c1, c2)
break
if PairFeatureUtils.is_head_match(c1, c2):
self.merge_cluster(c1, c2)
break
# Pass 4: Proper Head Match
def pass_4(self):
for antecedent_idx in range(len(self.clusters)):
c1 = self.clusters[antecedent_idx]
if FeatureUtils.is_pronoun(c1):
continue
for mention_idx in range(antecedent_idx, len(self.clusters)):
c2 = self.clusters[mention_idx]
if self.is_same_cluster(c1, c2) or FeatureUtils.is_pronoun(c2):
continue
if PairFeatureUtils.is_full_proper_head_match(c1, c2):
self.merge_cluster(c1, c2)
# Pass 5: Relaxed Head Match
def pass_5(self):
for antecedent_idx in range(len(self.clusters)):
c1 = self.clusters[antecedent_idx]
if FeatureUtils.is_pronoun(c1):
continue
for mention_idx in range(antecedent_idx, len(self.clusters)):
c2 = self.clusters[mention_idx]
if self.is_same_cluster(c1, c2) or FeatureUtils.is_pronoun(c2):
continue
if PairFeatureUtils.is_relaxed_match(c1, c2):
self.merge_cluster(c1, c2)
# Pass 6: Pronoun
def pass_6(self):
candidate_idx = len(self.clusters) - 1
antecedent_idx = len(self.clusters) - 2
while candidate_idx > 0 and antecedent_idx >= 0:
if candidate_idx <= antecedent_idx:
antecedent_idx = candidate_idx - 1
elif self.clusters[candidate_idx] == "":
candidate_idx -= 1
elif self.clusters[antecedent_idx] == "":
antecedent_idx -= 1
elif FeatureUtils.is_pronoun(self.clusters[candidate_idx]):
if FeatureUtils.is_clitic(self.clusters[candidate_idx]) and abs(antecedent_idx - candidate_idx) <= 1 \
or FeatureUtils.is_location(self.clusters[antecedent_idx]) \
or FeatureUtils.is_pronoun(self.clusters[antecedent_idx]) \
or PairFeatureUtils.is_word_class_mismatch(self.clusters[candidate_idx], self.clusters[antecedent_idx]):
antecedent_idx -= 1
continue
self.clusters[candidate_idx]["cluster"] = self.clusters[antecedent_idx]["cluster"]
candidate_idx -= 1
antecedent_idx -= 1
else:
candidate_idx -= 1
antecedent_idx = candidate_idx - 1
def build_mps_equivalent_class(self):
cluster_found = {}
for cluster in self.clusters:
arr = cluster_found.get(cluster["cluster"])
if cluster_found.get(cluster["cluster"]) is None:
arr = []
cluster_found[cluster["cluster"]] = arr
arr.append(cluster["id"])
return cluster_found
def build_gold_equivalent_class(self, mentions):
gold = {}
for mention in mentions:
labels = mention["label"]
if not isinstance(mention["label"], list):
labels = [labels]
for label in labels:
if gold.get(label) is None:
gold[label] = []
if mention["id"] not in gold.get(label):
gold[label].append(mention["id"])
return gold
|
# -*- coding: utf-8 -*-
#
# Copyright 2016 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import matplotlib
matplotlib.use('Agg')
import sys, dateutil.parser, numpy, json, collections, math, scipy.optimize, argparse, os
from matplotlib import pyplot
def survival_plot(input_fns, exp_fit=False, display=False, outfile='survival_plot', years=5, title=None):
all_deltas = []
YEAR = 365.25 * 24 * 60 * 60
pyplot.figure(figsize=(13, 8))
pyplot.style.use('ggplot')
for fn in input_fns:
print('reading %s' % fn)
commit_history = json.load(open(fn))
print('counting %d commits' % len(commit_history))
deltas = collections.defaultdict(lambda: numpy.zeros(2))
total_n = 0
for commit, history in commit_history.items():
t0, orig_count = history[0]
total_n += orig_count
last_count = orig_count
for t, count in history[1:]:
deltas[t-t0] += (count-last_count, 0)
last_count = count
deltas[history[-1][0] - t0] += (-last_count, -orig_count)
all_deltas.append((total_n, deltas))
print('adding %d deltas...' % len(deltas))
total_k = total_n
P = 1.0
xs = []
ys = []
for t in sorted(deltas.keys()):
delta_k, delta_n = deltas[t]
xs.append(t / YEAR)
ys.append(100. * P)
P *= 1 + delta_k / total_n
total_k += delta_k
total_n += delta_n
if P < 0.05:
break
print('plotting...')
if exp_fit:
pyplot.plot(xs, ys, color='darkgray')
else:
parts = os.path.split(fn)
pyplot.plot(xs, ys, label=(len(parts) > 1 and parts[-2] or None))
def fit(k):
loss = 0.0
for total_n, deltas in all_deltas:
total_k = total_n
P = 1.0
for t in sorted(deltas.keys()):
delta_k, delta_n = deltas[t]
pred = total_n * math.exp(-k * t / YEAR)
loss += (total_n * P - pred)**2
P *= 1 + delta_k / total_n
total_k += delta_k
total_n += delta_n
print(k, loss)
return loss
if exp_fit:
print('fitting exponential function')
k = scipy.optimize.fmin(fit, 0.5, maxiter=50)[0]
ts = numpy.linspace(0, years, 1000)
ys = [100. * math.exp(-k * t) for t in ts]
pyplot.plot(ts, ys, color='red', label='Exponential fit, half-life = %.2f years' % (math.log(2) / k))
pyplot.xlabel('Years')
pyplot.ylabel('%')
pyplot.xlim([0, years])
pyplot.ylim([0, 100])
plot_title = '% of lines still present in code after n years'
if title:
plot_title = title + ': ' + plot_title
pyplot.title(plot_title)
pyplot.legend()
pyplot.tight_layout()
pyplot.savefig(outfile)
if display:
pyplot.show()
def survival_plot_cmdline():
parser = argparse.ArgumentParser(description='Plot survival plot')
parser.add_argument('--exp-fit', action='store_true', help='Plot exponential fit')
parser.add_argument('--display', action='store_true', help='Display plot')
parser.add_argument('--outfile', default='survival_plot.png', type=str, help='Output file to store results (default: %(default)s)')
parser.add_argument('--years', type=float, default=5, help='Number of years on x axis (default: %(default)s)')
parser.add_argument('--title', type=str, help='Optional title prefix for the plot')
parser.add_argument('input_fns', nargs='*')
kwargs = vars(parser.parse_args())
survival_plot(**kwargs)
if __name__ == '__main__':
survival_plot_cmdline()
|
<filename>limix/plot/manhattan.py
from __future__ import division
from numpy import arange, asarray, cumsum, flipud, log10
def plot_manhattan(df,
alpha=None,
null_style=dict(alpha=0.1, color='DarkBlue'),
alt_style=dict(alpha=0.5, color='Orange'),
ax=None):
r"""Produce a manhattan plot.
Parameters
----------
df : :class:`pandas.DataFrame`
A Pandas DataFrame containing columns pv for p-values, pos for
base-pair positions, and chrom for chromossome names..
alpha : float
Threshold for significance. Defaults to 0.01 significance level
(bonferroni-adjusted).
ax : :class:`matplotlib.axes.AxesSubplot`:
The target handle for this figure. If None, the current axes is set.
Returns
-------
:class:`matplotlib.axes.AxesSubplot`
Axes object.
Examples
--------
.. plot::
from numpy.random import RandomState
from numpy import arange, ones, kron
import pandas as pd
from limix.plot import plot_manhattan
from matplotlib import pyplot as plt
random = RandomState(1)
pv = random.rand(5000)
pv[1200:1250] = random.rand(50)**4
chrom = kron(arange(1,6), ones(1000))
pos = kron(ones(5), arange(1,1001))
data = dict(pv=pv, chrom=chrom, pos=pos)
plot_manhattan(pd.DataFrame(data=data))
plt.tight_layout()
plt.show()
"""
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
if 'pos' not in df:
df['pos'] = arange(df.shape[0])
if 'label' not in df:
chrom = df['chrom'].astype(int).astype(str)
pos = df['pos'].astype(int).astype(str)
df['label'] = (
'chrom' + chrom + '_pos' + pos)
df = _abs_pos(df)
if alpha is None:
alpha = 0.01 / df.shape[0]
ytop = -1.2 * log10(min(df['pv'].min(), alpha))
_plot_chrom_strips(ax, df, ytop)
_plot_points(ax, df, alpha, null_style, alt_style)
_set_frame(ax, df, ytop)
ax.set_ylabel('-log$_{10}$pv')
ax.set_xlabel('chromosome')
_set_ticks(ax, _chrom_bounds(df))
return ax
def _set_frame(ax, df, ytop):
ax.set_ylim(0, ytop)
ax.set_xlim(0, df['abs_pos'].max())
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
def _plot_points(ax, df, alpha, null_style, alt_style):
null_df = df.loc[df['pv'] >= alpha, :]
alt_df = df.loc[df['pv'] < alpha, :]
ax.plot(null_df['abs_pos'], -log10(null_df['pv']), '.', ms=5, **null_style)
ax.plot(alt_df['abs_pos'], -log10(alt_df['pv']), '.', ms=5, **alt_style)
for i in range(alt_df.shape[0]):
x = alt_df['abs_pos'].values[i]
y = -log10(alt_df['pv'].values[i])
_annotate(ax, x, y, alt_df['label'].values[i])
def _plot_chrom_strips(ax, df, ytop):
uchroms = df['chrom'].unique()
for i in range(0, len(uchroms), 2):
ax.fill_between(
df['abs_pos'],
0,
ytop,
where=df['chrom'] == uchroms[i],
facecolor='LightGray',
linewidth=0,
alpha=0.5)
def _set_ticks(ax, chrom_bounds):
n = len(chrom_bounds) - 1
xticks = asarray([chrom_bounds[i:i + 2].mean() for i in range(n)])
ax.set_xticks(xticks)
ax.tick_params(axis='x', which='both', labelsize=6)
ax.set_xticklabels(arange(1, n + 2))
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
def _abs_pos(df):
uchroms = df['chrom'].unique()
chrom_ends = [df['pos'][df['chrom'] == c].max() for c in uchroms]
offset = flipud(cumsum(chrom_ends)[:-1])
df['abs_pos'] = df['pos'].copy()
uchroms = list(reversed(uchroms))
for i in range(len(offset)):
ix = df['chrom'] == uchroms[i]
df.loc[ix, 'abs_pos'] = df.loc[ix, 'abs_pos'] + offset[i]
return df
def _chrom_bounds(df):
uchroms = df['chrom'].unique()
v = [df['abs_pos'][df['chrom'] == c].min() for c in uchroms]
return asarray(v + [df['abs_pos'].max()])
def _annotate(ax, x, y, text):
ax.annotate(
text,
xy=(x, y),
xytext=(-18, 18),
textcoords='offset points',
fontsize=6,
ha='center',
va='bottom',
bbox=dict(boxstyle='round,pad=0.2', fc='yellow', alpha=0.3),
arrowprops=dict(
arrowstyle='->', connectionstyle='arc3,rad=0.5', color='red'))
|
<reponame>uigc/equities<filename>options/bsm.py<gh_stars>0
# Option Pricing in Python using the Black-Scholes-Merton Model (BSM) - Nov 2018. Author: <NAME>.
# Copyright 2018, <NAME>, All Rights Reserved.
import numpy as np
import scipy.stats as si
'''
Black-Scholes-Merton Model for European Options
S: Spot stock price
K: Strike price
T: Time to maturity
r: Risk-free rate of interest (in decimals)
sigma: Stock volatility
q: Dividend rate (in decimals)
'''
def bs():
S = float(input("Spot stock price? "))
K = float(input("Strike price? "))
T = float(input("Time to maturity (years)? "))
r = float(input("Risk-free rate (decimals)? "))
sigma = float(input("Volatility (decimals)? "))
call = input("Call or put? ")
q = float(input("Dividends? Enter 0 if none. "))
if (q == 0): # No dividends.
d1 = (np.log(S / K) + (r + 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))
d2 = (np.log(S / K) + (r - 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))
N1 = si.norm.cdf(d1, 0.0, 1.0) # N(.) is the cumulative distribution function of the standard normal distribution.
N2 = si.norm.cdf(d2, 0.0, 1.0)
n1 = si.norm.cdf(-d1, 0.0, 1.0)
n2 = si.norm.cdf(-d2, 0.0, 1.0)
nPrime = np.exp((n1 ** 2)/2) / np.sqrt(2 * np.pi) # Standard normal probability density function.
gamma = nPrime / (S * sigma * np.sqrt(T))
vega = nPrime * S * np.sqrt(T)
if (call.lower() == "c") or (call.lower() == "call"):
call = (S * N1 - K * np.exp(-r * T) * N2)
cDelta = N1
cTheta = -((S * nPrime * sigma)/(2 * np.sqrt(T))) - (r * K * np.exp(-r * T) * N2)
cRho = K * T * np.exp(-r * T) * N2
print("Price of Call: $", round(call, 4),
"\n Call Delta: ", round(cDelta, 4),
"\n Gamma: ", round(gamma, 4),
"\n Vega: ", round(vega, 4),
"\n Call Theta: ", round(cTheta, 4),
"\n Call Rho: ", round(cRho, 4),
"\n d1: ", round(d1, 4),
"\n d2: ", round(d2, 4))
elif (call.lower() == "p") or (call.lower() == "put"):
put = (K * np.exp(-r * T) * n2 - S * n1)
pDelta = N1 - 1
pTheta = -((S * nPrime * sigma)/(2 * np.sqrt(T))) + (r * K * np.exp(-r * T) * n2)
pRho = -K * T * np.exp(-r * T) * n2
print("Price of Put: $", round(put, 4),
"\n Put Delta: ", round(pDelta, 4),
"\n Gamma: ", round(gamma, 4),
"\n Vega: ", round(vega, 4),
"\n Put Theta: ", round(pTheta, 4),
"\n Put Rho: ", round(pRho, 4),
"\n d1: ", round(d1, 4),
"\n d2: ", round(d2, 4))
else:
return "Invalid input."
elif (q > 0): # Dividends.
d1 = (np.log(S / K) + (r - q + 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))
d2 = (np.log(S / K) + (r - q - 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))
N1 = si.norm.cdf(d1, 0.0, 1.0)
N2 = si.norm.cdf(d2, 0.0, 1.0)
n1 = si.norm.cdf(-d1, 0.0, 1.0)
n2 = si.norm.cdf(-d2, 0.0, 1.0)
nPrime = np.exp((n1 ** 2)/2) / np.sqrt(2 * np.pi)
gamma = nPrime / (S * sigma * np.sqrt(T)) * np.exp(-q * T)
vega = nPrime * S * np.sqrt(T) * np.exp(-q * T)
if (call.lower() == "c") or (call.lower() == "call"):
call = (S * np.exp(-q * T) * N1 - K * np.exp(-r * T) * N2)
cDelta = N1 * np.exp(-q * T)
cTheta = -((S * nPrime * sigma * np.exp(-q * T))/(2 * np.sqrt(T))) - (r * K * np.exp(-r * T) * N2) + (q * S * np.exp(-q * T) * N1)
cRho = K * T * np.exp(-r * T) * N2
print("Price of Call: $", round(call, 4),
"\n Call Delta: ", round(cDelta, 4),
"\n Gamma: ", round(gamma, 4),
"\n Vega: ", round(vega, 4),
"\n Call Theta: ", round(cTheta, 4),
"\n Call Rho: ", round(cRho, 4),
"\n d1: ", round(d1, 4),
"\n d2: ", round(d2, 4))
elif (call.lower() == "p") or (call.lower() == "put"):
put = (K * np.exp(-r * T) * n2 - S * np.exp(-q * T) * n1)
pDelta = (N1 - 1) * np.exp(-q * T)
pTheta = -((S * nPrime * sigma * np.exp(-q * T))/(2 * np.sqrt(T))) + (r * K * np.exp(-r * T) * n2) - (q * S * np.exp(-q * T) * n1)
pRho = -K * T * np.exp(-r * T) * n2
print("Price of Put: $", round(put, 4),
"\n Put Delta: ", round(pDelta, 4),
"\n Gamma: ", round(gamma, 4),
"\n Vega: ", round(vega, 4),
"\n Put Theta: ", round(pTheta, 4),
"\n Put Rho: ", round(pRho, 4),
"\n d1: ", round(d1, 4),
"\n d2: ", round(d2, 4))
else:
return "Invalid input."
else:
return "Invalid dividends."
|
<reponame>deepio-oc/RPBot
import pytest
import io
from rpbot.reader.robot_results_parser import RobotResultsParser
@pytest.fixture
def reporter(mocker):
reporter = mocker.MagicMock()
yield reporter
@pytest.fixture
def parser(reporter):
parser = RobotResultsParser(reporter)
yield parser
simple_output_xml = """<?xml version="1.0" encoding="UTF-8"?>
<robot generator="Robot 3.2.1 (Python 3.8.2 on linux)" generated="20200926 15:33:43.760" rpa="false">
<suite id="s1" name="Test Rp" source="/p4_ws/doyou89.jung/workspace/projects/cosmos/atest/example/test_rp.robot">
<test id="s1-t1" name="test1">
<kw name="Log" library="BuiltIn">
<doc>Logs the given message with the given level.</doc>
<arguments>
<arg>test1</arg>
</arguments>
<msg timestamp="20200926 15:33:43.777" level="INFO">test1</msg>
<status status="PASS" starttime="20200926 15:33:43.777" endtime="20200926 15:33:43.777"></status>
</kw>
<status status="PASS" starttime="20200926 15:33:43.776" endtime="20200926 15:33:43.777" critical="yes"></status>
</test>
<status status="PASS" starttime="20200926 15:33:43.761" endtime="20200926 15:33:43.777"></status>
</suite>
<statistics>
<total>
<stat pass="1" fail="0">Critical Tests</stat>
<stat pass="1" fail="0">All Tests</stat>
</total>
<tag>
</tag>
<suite>
<stat pass="1" fail="0" id="s1" name="Test Rp">Test Rp</stat>
</suite>
</statistics>
<errors>
</errors>
</robot>
"""
simple_failed_output_xml = """<?xml version="1.0" encoding="UTF-8"?>
<robot generator="Robot 3.2.1 (Python 3.8.2 on linux)" generated="20200926 15:34:56.754" rpa="false">
<suite id="s1" name="Test Rp" source="/p4_ws/doyou89.jung/workspace/projects/cosmos/atest/example/test_rp.robot">
<test id="s1-t1" name="test1">
<kw name="Log" library="BuiltIn">
<doc>Logs the given message with the given level.</doc>
<arguments>
<arg>test1</arg>
</arguments>
<msg timestamp="20200926 15:34:56.771" level="INFO">test1</msg>
<status status="PASS" starttime="20200926 15:34:56.771" endtime="20200926 15:34:56.771"></status>
</kw>
<kw name="Fail" library="BuiltIn">
<doc>Fails the test with the given message and optionally alters its tags.</doc>
<arguments>
<arg>test fail message</arg>
</arguments>
<msg timestamp="20200926 15:34:56.771" level="FAIL">test fail message</msg>
<status status="FAIL" starttime="20200926 15:34:56.771" endtime="20200926 15:34:56.771"></status>
</kw>
<status status="FAIL" starttime="20200926 15:34:56.770" endtime="20200926 15:34:56.771" critical="yes">test fail message</status>
</test>
<status status="FAIL" starttime="20200926 15:34:56.755" endtime="20200926 15:34:56.772"></status>
</suite>
<statistics>
<total>
<stat pass="0" fail="1">Critical Tests</stat>
<stat pass="0" fail="1">All Tests</stat>
</total>
<tag>
</tag>
<suite>
<stat pass="0" fail="1" id="s1" name="Test Rp">Test Rp</stat>
</suite>
</statistics>
<errors>
</errors>
</robot>
"""
def test_xml_to_db(reporter, parser):
"""test xml_to_db"""
parser.xml_to_db(io.StringIO(simple_output_xml))
assert reporter.start_suite.call_count == 1
assert reporter.start_suite.call_args[0][0] == 'Test Rp'
assert reporter.start_suite.call_args[0][1]['id'] == 's1'
assert reporter.start_suite.call_args[0][1]['status'] == 'PASS'
assert reporter.start_test.call_count == 1
assert reporter.start_test.call_args[0][0] == 'test1'
assert reporter.start_test.call_args[0][1]['id'] == 's1-t1'
assert reporter.start_test.call_args[0][1]['status'] == 'PASS'
assert reporter.start_keyword.call_count == 1
assert reporter.start_keyword.call_args[0][0] == 'BuiltIn.Log'
assert reporter.start_keyword.call_args[0][1]['type'] == 'KEYWORD'
assert reporter.start_keyword.call_args[0][1]['kwname'] == 'Log'
assert reporter.start_keyword.call_args[0][1]['libname'] == 'BuiltIn'
assert reporter.start_keyword.call_args[0][1]['status'] == 'PASS'
assert reporter.end_keyword.call_count == 1
assert reporter.end_test.call_count == 1
assert reporter.end_suite.call_count == 2
def test_xml_to_db_with_fail(reporter, parser):
"""test xml_to_db with failed test"""
parser.xml_to_db(io.StringIO(simple_failed_output_xml))
assert reporter.start_suite.call_count == 1
assert reporter.start_suite.call_args[0][0] == 'Test Rp'
assert reporter.start_suite.call_args[0][1]['id'] == 's1'
assert reporter.start_suite.call_args[0][1]['status'] == 'FAIL'
assert reporter.start_test.call_count == 1
assert reporter.start_test.call_args[0][0] == 'test1'
assert reporter.start_test.call_args[0][1]['id'] == 's1-t1'
assert reporter.start_test.call_args[0][1]['status'] == 'FAIL'
assert reporter.start_keyword.call_count == 2
assert reporter.start_keyword.call_args_list[0][0][0] == 'BuiltIn.Log'
assert reporter.start_keyword.call_args_list[0][0][1]['type'] == 'KEYWORD'
assert reporter.start_keyword.call_args_list[0][0][1]['kwname'] == 'Log'
assert reporter.start_keyword.call_args_list[0][0][1]['libname'] == 'BuiltIn'
assert reporter.start_keyword.call_args_list[0][0][1]['status'] == 'PASS'
assert reporter.end_keyword.call_count == 2
assert reporter.end_test.call_count == 1
assert reporter.end_suite.call_count == 2
|
# Practice: Collections & Loops
This section is meant to give you additional practice, with a particular focus on collections and loops. However, we do assume that you have the previous section's material understood as well, so we can't forget about conditionals or variable types learned previously.
As in the last practice section, each question will include a handful of `assert` statements. After you write and execute the code to each question, each `assert` should "pass silently" (meaning: should give no output upon execution) indicating that you are on the right track. After you've written your code and run the `assert`s, you can always check your answers, as the answers to these questions are included in the "Answers" chapter of this book.
## Collections
**Collections Q1**. The list `trees` has been provided below for you.
***Use the `trees` variable and indexing*** to generate each of the four variables at left below, so that each stores the output at right below. For example, `trees_a` should refer to `trees` in its answer and use indexing to store the string 'Pine'.
- `trees_a` | `'Pine'`
- `trees_b` | `['Liquid Amber', 'Pepper', 'Podocarpus']`
- `trees_c` | `['Eucalyptus', 'Ficus', 'Tipuana', 'Pepper']`
- `trees_d` | `['Palms', 'Carrotwood', 'Tipuana', 'Podocarpus']`
Variable provided:
```python
trees = ['Palms', 'Jacaranda', 'Eucalyptus', 'Carrotwood',
'Ficus', 'Pine', 'Tipuana', 'Liquid Amber',
'Pepper', 'Podocarpus']
```
Checks you can use to see if you're on the right track:
assert trees_a == 'Pine'
assert trees_b == ['Liquid Amber', 'Pepper', 'Podocarpus']
assert trees_c == ['Eucalyptus', 'Ficus', 'Tipuana', 'Pepper']
assert trees_d == ['Palms', 'Carrotwood', 'Tipuana', 'Podocarpus']
**Collections Q2**.
Part I. Generate a list called `practice_list` that meets the following criteria:
- contains 6 elements/items
- has 2 strings, 1 float, 1 boolean, 1 dictionary, and 1 tuple as its elements
The specific elements in the list `practice_list` are up to you and can be stored in any order/position within the list, so long as they're in there and of the types specified above.
Checks you can use to see if you're on the right track:
assert isinstance(practice_list, list)
assert len(practice_list) == 6
Part II. Using the list you defined in Part 1 (`practice_list`), ***use indexing*** to return the slice/element of the list specified below, storing it in the variable name provided:
- `slice_1` | stores the second, third, and fourth elements in `practice_list`
- `slice_2` | stores the second, fourth, and sixth elements in `practice_list`
- `slice_3` | uses ***negative indexing*** to return the single element stored in the third position from the end of `practice_list`
Note on wording: the first _element_ in a given list is the same as the 0<sup>th</sup> _index_ of that list.
Checks you can use to see if you're on the right track:
assert len(slice_1) == 3
assert len(slice_2) == 3
assert slice_3
**Collections Q3**. Generate a dictionary called `practice_dict` that meets the following criteria:
- has three keys: 'name', 'favorite_game', and 'height'
- stores _your_ `name` (string), `favorite_game` (string), and `height` (int) in inches as each key's value
Checks you can use to see if you're on the right track:
assert isinstance(practice_dict, dict)
assert len(practice_dict) == 3
assert isinstance(practice_dict['name'], str)
assert isinstance(practice_dict['favorite_game'], str)
assert isinstance(practice_dict['height'], int)
**Collections Q4**.
Part I. Generate a dictionary called `grading` that meets the following criteria:
- has 5 keys: 'A', 'B', 'C', 'D', 'F'
- stores a tuple containg the lower and upper bound for each letter grade for each key's value.
Use the following ranges for reference:
- A: 90-100
- B: 80-90
- C: 70-80
- D: 60-70
- F: 0-60
Note that the upper bound for one letter will be the same value as the lower bound for the higher grade. This is expected behavior.
Checks you can use to see if you're on the right track:
assert isinstance(grading, dict)
assert len(grading) == 5
assert isinstance(list(grading.keys())[0], str)
assert isinstance(list(grading.values())[0], tuple)
assert list(grading.keys()) == ['A', 'B', 'C', 'D', 'F']
assert list(grading.values()) == [(90, 100), (80, 90), (70, 80),
(60, 70), (0, 60)]
Part II. Use the `grading` dictionary you just generated **and indexing** to generate two variables: `A_lower` and `A_upper`.
- `A_lower` will store the first (smaller) value in the tuple corresponding to the `'A'` key in `grading`.
- `A_upper` will store the second (larger) value in the tuple corresponding to the `'A'` key in `grading`
Note: Do not hard-code. For example, `A_lower = 90` is *not* the correct answer. Your code should reference `grading` and use indexing to store the value 90 in `A_lower`.
Checks you can use to see if you're on the right track:
assert A_lower == 90
assert A_upper == 100
**Collections Q5**. Create three variables ***using the dictionary `cogs18_dict` provided*** below and functions discussed in class that will store the variable specified:
1. `dict_a`: stores 'Prof'
2. `dict_b`: stores 'dict' (or, when printed: `<class 'dict'>`)
3. `dict_c`: stores `5`
Variable provided:
```python
cogs18_dict = {'Annie' : 'TA', 'Ashlesha' : 'TA', 'Paul' : 'TA',
'Mudit': 'TA', 'Ellis' : 'Prof'}
```
Note: each variable's line of code must have `cogs18_dict` as part of your code.
Checks you can use to see if you're on the right track:
assert dict_a == 'Prof'
assert dict_b == dict
assert dict_c == 5
## Loops
**Loops Q1**. Store your first name as a string in the variable `my_name` and initialize a `counter` (use that variable name)
Then, write a `for` loop that loops through the letters of your first name, increasing the counter by one for each letter in your name. The value stored in the counter at the end of your loop’s execution should be the number of letters in your first name.
Checks you can use to see if you're on the right track:
assert isinstance(counter, int)
assert isinstance(my_name, str)
assert len(my_name) == counter
**Loops Q2**. Somehow you've got a list of words, but you want them all combined into the single string `"I'm so excited that students are now eligible for the vaccine!"`. Write code that uses `vaccination_list` to accomplish this task.
Variable provided:
```python
vaccination_list = ["I'm ", "so ", "excited ", "that ", "students ", "are ",
"now ", "eligible ", "for ", "the ", "vaccine!"]
```
Checks you can use to see if you're on the right track:
assert sentence == "I'm so excited that students are now eligible for the vaccine!"
## Topic Synthesis
**Synthesis Q1**. Write code that:
1. Creates (initializes) a counter `val` that starts at 0
2. Creates (initializes) an empty list `output`
3. Creates a loop that will run as long as the value of `val` is less than or equal to 100
- Within the loop:
- first: if the value of `val` is divisible by 10, `append` the value of `val` to the list `output`
- then: increase the value of `val` by 1 each time through the loop
Checks you can use to see if you're on the right track:
assert val == 101
assert isinstance(output, list)
assert output == [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100] or output == [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
**Synthesis Q2**. Store your first name as a string in the variable `my_name` and initialize a `counter` (use that variable name).
Then, write a `for` loop that loops through the letters of your first name, increasing the counter by one for each consonant (non-vowel) in your name.
The value stored in the counter at the end of your loop’s execution should be the number of consonants in your first name. Be sure that 'B' and 'b' are counted as the same letter in your code.
Checks you can use to see if you're on the right track:
assert isinstance(counter, int)
assert isinstance(my_name, str)
**Synthesis Q3**.
Part I.
Store your first name as a string in the variable `my_name`. Then, write code that will generate a dictionary name_consonants that stores each consonant letter in `my_name` as a key in the dictionary, and the number of times each consonant shows up in my_name as the letter's value.
Noe that this code should run, regardless of what string is stored in my_name.
For example, name_consonants for 'Shannon' would return:
{'s': 1, 'h': 1, 'n': 3}
Note that if you have no consonants in your name, this code would still run, but would return an empty dictionary.
Checks you can use to see if you're on the right track:
assert isinstance(my_name, str)
assert isinstance(name_consonants, dict)
Part II.
Write code that will loop through the `name_consonants` dictionary created in the first part of this question and store the sum of the values in the dictionary into `consonant_count`. (For the name 'Shannon', `consonant_count` would store 5).
Checks you can use to see if you're on the right track:
assert consonant_count is not None
**Synthesis Q4**. Background: Imagine that you're taking a course with an instructional staff including TAs (graduate students) and IAs (undergraduates who previously took the course). This means that the IAs have previously completed the course final project, and you want to talk to someone who has previously completed the project to get some questions answered. As you have this thought, you suddenly remember you created a list of all the staff members in the course earlier in the quarter when you were practicing with lists. Perfect! You can use this to help you figure out who you should talk to, since you know that all of the IAs previously took this course and completed the project. They will be able to discuss their experience with you!
Variables provided:
```python
staff = ['Anu_IA', 'Bob_IA', 'Boning_IA', 'Bora_IA', 'David_TA', 'Emma_IA',
'Ellis_Prof', 'Frank_IA', 'Mani_IA', 'Harrison_IA', 'Shivani_TA']
```
Use code constructs to extract a list of individuals you could talk to, storing this output in a list `to_contact`.
Checks you can use to see if you're on the right track:
assert to_contact == ['Anu_IA',
'Bob_IA',
'Boning_IA',
'Bora_IA',
'Emma_IA',
'Frank_IA',
'Mani_IA',
'Harrison_IA']
**Synthesis Q5**. Imagine you want to determine the total number of students Professor Ellis has taught in COGS 18 during her first few years as a Professor.
To do so, use the provided dictionary `ellis_courses`. This dictionary stores course names and quarters as keys and the number of students enrolled as values.
Then, using the information in the `ellis_courses` dictionary, determine what code constructs you would need to sum the values for the students who have taken 'cogs18' with Professor Ellis. Store this value in the variable `cogs18_students`.
```python
ellis_courses = {'cogs9_wi19': 326,
'cogs108_sp19': 825,
'cogs18_sp19' : 272,
'cogs9_fa19' : 292,
'cogs18_fa19' : 301,
'cogs108_wi20' : 442,
'cogs18_sp20' : 307,
'cogs108_sp20' : 469,
'cogs18_su20' : 88,
'cogs18_fa20' : 330,
'cogs108_fa20' : 498,
'cogs18_wi21' : 99,
'cogs108_wi21' : 431,
'cogs18_sp21' : 100,
'cogs108_sp21': 311
}
```
Checks you can use to see if you're on the right track:
assert isinstance(cogs18_students, int)
assert cogs18_students == 1497
**Synthesis Q6**. Background: Professor Ellis has made a mess of her to do list (`to_do_list`, provided below). She accidentally combined her to do list with her grocery list and the the her daily step count from last week. She needs your help to detangle this mess!
Part I.
Your job is to separate `to_do_list` out into three separate lists:
- `steps` - a list of all the steps from last week (you know which values these are becuase they are integer values)
- `to_do` - a list that contains the to do list items (you know which values these are because they contain 'cogs' in the string)
- `grocery` - a list of all of the grocery list items (you know which values these are because they do NOT contain 'cogs' in the string)
Each of the above lists should contain only the elements from `to_do_list` that match the specified description above. Specifically:
- `steps` should be a list of integers
- `to_do` should be a list of strings
- `grocery` should be a list of strings
The values in `steps`, `to_do`, and `grocery` should appear in the same relative order as in the original list (`to_do_list`).
Note: This should not be hard-coded. Your answer should use code constructs discussed in class. In other words, your code should produce the correct lists, regardless of the specific values in `to_do_list`.
Variable provided:
```python
to_do_list = ['mushrooms', 'peanut butter', 'release cogs18 exam', 10000, 8500,
'release cogs108 lab', 6000, 'tahini', 15000, 'post cogs18 lectures',
'post cogs108 lectures', 'release cogs18 practice exam', 12000, 'pineapple',
'udon noodles', 18000, 8000, 'romaine lettuce']
```
Checks you can use to see if you're on the right track:
assert grocery == ['mushrooms', 'peanut butter',
'tahini', 'pineapple', 'udon noodles',
'romaine lettuce']
assert steps == [10000, 8500, 6000, 15000, 12000, 18000, 8000]
assert to_do == ['release cogs18 exam', 'release cogs108 lab',
'post cogs18 lectures', 'post cogs108 lectures',
'release cogs18 practice exam']
Part II.
Now that you've got the `steps` extracted, Professor Ellis needs a dictionary of which steps she took on which day.
Using the values in your list `steps`, create a dictionary `steps_dict` that stores each value as a different day's steps, starting with 'Monday'. For example, the first value in the `steps` list will be the value for the key 'Monday', the second for 'Tuesday', so on and so forth.
Be sure to spell out the day of the week, using a capital letter for the first letter in the day. The list `days_of_week` has been provided, as it may be helpful in answering the question.
Note: This should not be hard-coded. Your answer should use code constructs discussed in class. In other words, your code should produce the correct `steps_dict` if the specific values in `steps` were to change/differ.
Variable provided (may be helpful):
```python
days_of_week = ['Monday', 'Tuesday', 'Wednesday', 'Thursday',
'Friday', 'Saturday', 'Sunday']
```
Checks you can use to see if you're on the right track:
assert all(isinstance(x, str) for x in list(steps_dict.keys()))
assert all(isinstance(x, int) for x in list(steps_dict.values()))
assert set(list(steps_dict.keys())) == set(days_of_week)
assert sum(steps_dict.values()) == 77500 |
<gh_stars>1-10
import sys
from collections import defaultdict
from operator import attrgetter
from typing import Tuple
from enum import Enum, IntEnum
from src.exceptions import *
from src.helpers import has_enough_mana
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
class Phase(IntEnum):
DRAFT = 0
BATTLE = 1
ENDED = 2
class PlayerOrder(IntEnum):
FIRST = 0
SECOND = 1
def opposing(self):
return PlayerOrder((self + 1) % 2)
class Lane(IntEnum):
LEFT = 0
RIGHT = 1
class ActionType(Enum):
PICK = "PICK"
SUMMON = "SUMMON"
ATTACK = "ATTACK"
USE = "USE"
PASS = "PASS"
class Location(IntEnum):
PLAYER_HAND = 0
ENEMY_HAND = 1
PLAYER_BOARD = 10
PLAYER_LEFT_LANE = 10
PLAYER_RIGHT_LANE = 11
ENEMY_BOARD = 20
ENEMY_LEFT_LANE = 20
ENEMY_RIGHT_LANE = 21
class Player:
def __init__(self, player_id):
self.id = player_id
self.health = 30
self.base_mana = 0
self.bonus_mana = 0
self.mana = 0
self.next_rune = 25
self.bonus_draw = 0
self.last_drawn = 0
self.deck = []
self.hand = []
self.lanes = ([], [])
self.actions = []
def draw(self, amount: int = 1):
for i in range(amount):
if len(self.deck) == 0:
raise EmptyDeckError(amount - i)
if len(self.hand) >= 8:
raise FullHandError()
self.hand.append(self.deck.pop())
def damage(self, amount: int):
self.health -= amount
runes_lost = 0
while self.health <= self.next_rune and self.next_rune > 0:
self.next_rune -= 5
self.bonus_draw += 1
runes_lost += 1
return amount, runes_lost
def clone(self):
cloned_player = Player.empty_copy()
cloned_player.id = self.id
cloned_player.health = self.health
cloned_player.base_mana = self.base_mana
cloned_player.bonus_mana = self.bonus_mana
cloned_player.mana = self.mana
cloned_player.next_rune = self.next_rune
cloned_player.bonus_draw = self.bonus_draw
cloned_player.last_drawn = self.last_drawn
cloned_player.deck = [card.make_copy(card.instance_id)
for card in self.deck]
cloned_player.hand = [card.make_copy(card.instance_id)
for card in self.hand]
cloned_player.lanes = tuple([[card.make_copy(card.instance_id)
for card in lane]
for lane in self.lanes])
cloned_player.actions = list(self.actions)
return cloned_player
@staticmethod
def empty_copy():
class Empty(Player):
def __init__(self):
pass
new_copy = Empty()
new_copy.__class__ = Player
return new_copy
class Card:
def __init__(self, card_id, name, card_type, cost, attack, defense, keywords,
player_hp, enemy_hp, card_draw, text, instance_id=None):
self.id = card_id
self.instance_id = instance_id
self.name = name
self.type = card_type
self.cost = cost
self.attack = attack
self.defense = defense
self.keywords = set(list(keywords.replace("-", "")))
self.player_hp = player_hp
self.enemy_hp = enemy_hp
self.card_draw = card_draw
self.text = text
def has_ability(self, keyword: str) -> bool:
return keyword in self.keywords
def make_copy(self, instance_id=None) -> 'Card':
cloned_card = Card.empty_copy(self)
cloned_card.id = self.id
cloned_card.name = self.name
cloned_card.type = self.type
cloned_card.cost = self.cost
cloned_card.attack = self.attack
cloned_card.defense = self.defense
cloned_card.keywords = set(self.keywords)
cloned_card.player_hp = self.player_hp
cloned_card.enemy_hp = self.enemy_hp
cloned_card.card_draw = self.card_draw
cloned_card.text = self.text
if instance_id is not None:
cloned_card.instance_id = instance_id
else:
cloned_card.instance_id = None
return cloned_card
def __eq__(self, other):
return other is not None \
and self.instance_id is not None \
and other.instance_id is not None \
and self.instance_id == other.instance_id
def __repr__(self):
if self.name:
return f"({self.instance_id}: {self.name})"
else:
return f"({self.instance_id})"
@staticmethod
def empty_copy(card):
class Empty(Card):
def __init__(self):
pass
new_copy = Empty()
new_copy.__class__ = type(card)
return new_copy
class Creature(Card):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.is_dead = False
self.can_attack = False
self.has_attacked_this_turn = False
self.summon_counter = None
def remove_ability(self, ability: str):
self.keywords.discard(ability)
def add_ability(self, ability: str):
self.keywords.add(ability)
def able_to_attack(self) -> bool:
return not self.has_attacked_this_turn and \
(self.can_attack or self.has_ability('C'))
def damage(self, amount: int = 1, lethal: bool = False) -> int:
if amount <= 0:
return 0
if self.has_ability('W'):
self.remove_ability('W')
raise WardShieldError()
self.defense -= amount
if lethal or self.defense <= 0:
self.is_dead = True
return amount
def make_copy(self, instance_id=None) -> 'Card':
cloned_card = super().make_copy(instance_id)
cloned_card.summon_counter = self.summon_counter
cloned_card.is_dead = self.is_dead
cloned_card.can_attack = self.can_attack
cloned_card.has_attacked_this_turn = self.has_attacked_this_turn
return cloned_card
class Item(Card):
pass
class GreenItem(Item):
pass
class RedItem(Item):
pass
class BlueItem(Item):
pass
class Action:
def __init__(self, action_type, origin=None, target=None):
self.type = action_type
self.origin = origin
self.target = target
def __eq__(self, other):
return other is not None and \
self.type == other.type and \
self.origin == other.origin and \
self.target == other.target
def __repr__(self):
return f"{self.type} {self.origin} {self.target}"
def __hash__(self):
return hash((ord(self.type.value[0]), ord(self.type.value[1]),
self.origin, self.target))
def to_native(self):
if self.type == ActionType.PASS:
return "PASS"
elif self.type == ActionType.SUMMON:
return f"SUMMON {self.origin} {int(self.target)}"
else:
target = self.target if self.target is not None else -1
return f"{self.type.value} {self.origin} {target}"
class State:
def __init__(self):
self.instance_counter = 0
self.summon_counter = 0
self.phase = Phase.BATTLE
self.turn = 1
self.was_last_action_invalid = False
self.players = (Player(PlayerOrder.FIRST), Player(PlayerOrder.SECOND))
self._current_player = PlayerOrder.FIRST
self.__available_actions = None
self.history = []
self.winner = None
@property
def current_player(self) -> Player:
return self.players[self._current_player]
@property
def opposing_player(self) -> Player:
return self.players[(int(self._current_player) + 1) % 2]
@property
def available_actions(self) -> Tuple[Action]:
if self.__available_actions is not None:
return self.__available_actions
if self.phase == Phase.ENDED:
self.__available_actions = ()
else:
summon, attack, use = [], [], []
c_hand = self.current_player.hand
c_lanes = self.current_player.lanes
o_lanes = self.opposing_player.lanes
for card in filter(has_enough_mana(self.current_player.mana), c_hand):
origin = card.instance_id
if isinstance(card, Creature):
for lane in Lane:
if len(c_lanes[lane]) < 3:
summon.append(Action(ActionType.SUMMON, origin, lane))
elif isinstance(card, GreenItem):
for lane in Lane:
for friendly_creature in c_lanes[lane]:
target = friendly_creature.instance_id
use.append(Action(ActionType.USE, origin, target))
elif isinstance(card, RedItem):
for lane in Lane:
for enemy_creature in o_lanes[lane]:
target = enemy_creature.instance_id
use.append(Action(ActionType.USE, origin, target))
elif isinstance(card, BlueItem):
for lane in Lane:
for enemy_creature in o_lanes[lane]:
target = enemy_creature.instance_id
use.append(Action(ActionType.USE, origin, target))
use.append(Action(ActionType.USE, origin, None))
for lane in Lane:
guard_creatures = []
for enemy_creature in o_lanes[lane]:
if enemy_creature.has_ability('G'):
guard_creatures.append(enemy_creature)
if not guard_creatures:
valid_targets = o_lanes[lane] + [None]
else:
valid_targets = guard_creatures
for friendly_creature in filter(Creature.able_to_attack,
c_lanes[lane]):
origin = friendly_creature.instance_id
for valid_target in valid_targets:
if valid_target is not None:
valid_target = valid_target.instance_id
attack.append(Action(ActionType.ATTACK, origin, valid_target))
available_actions = summon + attack + use
self.__available_actions = tuple(available_actions)
return self.__available_actions
def act(self, action: Action):
self.was_last_action_invalid = False
if action.type == ActionType.PASS:
self._next_turn()
self._new_battle_turn()
else:
self._act_on_battle(action)
self.__available_actions = None
def undo(self):
changes = self.history.pop()
try:
for target, damage in changes["damage_dealt"]:
if isinstance(target, Player):
target.health += damage
elif isinstance(target, Creature):
target.defense += damage
except ValueError:
pass
try:
for target, hand, lane in changes["placed"]:
lane.remove(target)
hand.append(target)
target.summon_counter = None
self.summon_counter -= 1
except ValueError:
pass
try:
for target, mana in changes["mana_spent"]:
target.mana += mana
except ValueError:
pass
try:
for target, bonus_draw in changes["bonus_draw"]:
target.bonus_draw -= bonus_draw
except ValueError:
pass
try:
for target, stat, change in changes["stat_change"]:
if stat == "attack":
target.attack -= change
elif stat == "defense":
target.defense -= change
elif stat == "ability+":
target.keywords = target.keywords.difference(change)
elif stat == "ability-":
target.keywords = target.keywords.union(change)
except ValueError:
pass
try:
for card, origin in changes["destroyed"]:
origin.append(card)
if isinstance(card, Creature):
card.is_dead = False
except ValueError:
pass
try:
for player, runes_lost in changes["runes_lost"]:
player.next_rune += 5 * runes_lost
player.bonus_draw -= 1 * runes_lost
except ValueError:
pass
try:
for creature in changes["attacked"]:
creature.has_attacked_this_turn = False
except ValueError:
pass
def undo_all(self):
while self.history:
self.undo()
def _next_instance_id(self):
self.instance_counter += 1
return self.instance_counter
def _next_turn(self) -> bool:
if self._current_player == PlayerOrder.FIRST:
self._current_player = PlayerOrder.SECOND
return False
else:
self._current_player = PlayerOrder.FIRST
self.turn += 1
return True
def _new_battle_turn(self):
"""Initialize a battle turn"""
current_player = self.current_player
for creature in current_player.lanes[Lane.LEFT]:
creature.can_attack = True
creature.has_attacked_this_turn = False
for creature in current_player.lanes[Lane.RIGHT]:
creature.can_attack = True
creature.has_attacked_this_turn = False
if current_player.base_mana > 0 and current_player.mana == 0:
current_player.bonus_mana = 0
if current_player.base_mana < 12:
current_player.base_mana += 1
current_player.mana = current_player.base_mana \
+ current_player.bonus_mana
amount_to_draw = 1 + current_player.bonus_draw
if self.turn > 50:
current_player.deck = []
try:
current_player.draw(amount_to_draw)
except FullHandError:
pass
except EmptyDeckError as e:
for _ in range(e.remaining_draws):
deck_burn = current_player.health - current_player.next_rune
current_player.damage(deck_burn)
current_player.bonus_draw = 0
current_player.last_drawn = amount_to_draw
def _find_card(self, instance_id: int) -> Card:
c, o = self.current_player, self.opposing_player
location_mapping = {
Location.PLAYER_HAND: c.hand,
Location.ENEMY_HAND: o.hand,
Location.PLAYER_LEFT_LANE: c.lanes[0],
Location.PLAYER_RIGHT_LANE: c.lanes[1],
Location.ENEMY_LEFT_LANE: o.lanes[0],
Location.ENEMY_RIGHT_LANE: o.lanes[1]
}
for location, cards in location_mapping.items():
for card in cards:
if card.instance_id == instance_id:
return card
raise InvalidCardError(instance_id)
def _act_on_draft(self, action: Action):
"""Execute the action intended by the player in this draft turn"""
chosen_index = action.origin if action.origin is not None else 0
card = self.current_player.hand[chosen_index]
self.current_player.deck.append(card)
def _act_on_battle(self, action: Action):
"""Execute the actions intended by the player in this battle turn"""
try:
origin, target = action.origin, action.target
if isinstance(action.origin, int):
origin = self._find_card(origin)
if action.type == ActionType.SUMMON:
if isinstance(action.target, int):
target = Lane(target)
changes = self._do_summon(origin, target)
elif action.type == ActionType.ATTACK:
if isinstance(action.target, int):
target = self._find_card(target)
changes = self._do_attack(origin, target)
elif action.type == ActionType.USE:
if isinstance(action.target, int):
target = self._find_card(target)
changes = self._do_use(origin, target)
else:
raise MalformedActionError("Invalid action type")
action.resolved_origin = origin
action.resolved_target = target
self.current_player.actions.append(action)
except (NotEnoughManaError, MalformedActionError,
FullLaneError, InvalidCardError):
self.was_last_action_invalid = True
return
for player in self.players:
for lane in player.lanes:
for creature in lane:
if creature.is_dead:
lane.remove(creature)
changes["destroyed"].append((creature, lane))
if self.players[PlayerOrder.FIRST].health <= 0:
self.phase = Phase.ENDED
self.winner = PlayerOrder.SECOND
elif self.players[PlayerOrder.SECOND].health <= 0:
self.phase = Phase.ENDED
self.winner = PlayerOrder.FIRST
self.history.append(changes)
def _do_summon(self, origin, target):
changes = defaultdict(list)
current_player = self.current_player
opposing_player = self.opposing_player
current_player.hand.remove(origin)
origin.can_attack = False
origin.summon_counter = self.summon_counter
self.summon_counter += 1
current_player.lanes[target].append(origin)
changes["placed"].append((origin, current_player.hand, current_player.lanes[target]))
current_player.bonus_draw += origin.card_draw
changes["bonus_draw"].append((current_player, origin.card_draw))
damage_received, runes_lost = current_player.damage(-origin.player_hp)
changes["damage_dealt"].append((current_player, -origin.player_hp))
changes["runes_lost"].append((current_player, runes_lost))
damage_dealt, runes_lost = opposing_player.damage(-origin.enemy_hp)
changes["damage_dealt"].append((opposing_player, -origin.enemy_hp))
changes["runes_lost"].append((opposing_player, runes_lost))
current_player.mana -= origin.cost
changes["mana_spent"].append((current_player, origin.cost))
return changes
def _do_attack(self, origin, target):
changes = defaultdict(list)
current_player = self.current_player
opposing_player = self.opposing_player
if target is None:
damage_dealt, runes_lost = opposing_player.damage(origin.attack)
changes["damage_dealt"].append((opposing_player, damage_dealt))
changes["runes_lost"].append((opposing_player, runes_lost))
elif isinstance(target, Creature):
target_defense = target.defense
try:
damage_dealt = target.damage(
origin.attack,
lethal=origin.has_ability('L'))
changes["damage_dealt"].append((target, damage_dealt))
except WardShieldError:
damage_dealt = 0
changes["stat_change"].append((target, "ability-", {'W'}))
try:
damage_received = origin.damage(
target.attack,
lethal=target.has_ability('L'))
changes["damage_dealt"].append((origin, damage_received))
except WardShieldError:
changes["stat_change"].append((origin, "ability-", {'W'}))
excess_damage = damage_dealt - target_defense
if 'B' in origin.keywords and excess_damage > 0:
_, runes_lost = opposing_player.damage(excess_damage)
changes["damage_dealt"].append((opposing_player, excess_damage))
changes["runes_lost"].append((opposing_player, runes_lost))
else:
raise MalformedActionError("Target is not a creature or "
"a player")
if 'D' in origin.keywords:
current_player.health += damage_dealt
changes["damage_dealt"].append((current_player, -damage_dealt))
origin.has_attacked_this_turn = True
changes["attacked"].append(origin)
return changes
def _do_use(self, origin, target):
changes = defaultdict(list)
current_player = self.current_player
opposing_player = self.opposing_player
current_player.hand.remove(origin)
changes["destroyed"].append((origin, current_player.hand))
if isinstance(origin, GreenItem):
new_attack = max(0, target.attack + origin.attack)
keyword_gain = origin.keywords.difference(target.keywords)
changes["stat_change"].append((target, "attack", new_attack - target.attack))
changes["stat_change"].append((target, "defense", origin.defense))
changes["stat_change"].append((target, "ability+", keyword_gain))
target.attack = new_attack
target.defense += origin.defense
target.keywords = target.keywords.union(origin.keywords)
if target.defense <= 0:
target.is_dead = True
current_player.bonus_draw += origin.card_draw
changes["bonus_draw"].append((current_player, origin.card_draw))
_, runes_lost = current_player.damage(-origin.player_hp)
changes["damage_dealt"].append((current_player, -origin.player_hp))
changes["runes_lost"].append((current_player, runes_lost))
_, runes_lost = opposing_player.damage(-origin.enemy_hp)
changes["damage_dealt"].append((opposing_player, -origin.enemy_hp))
changes["runes_lost"].append((opposing_player, runes_lost))
elif isinstance(origin, RedItem):
new_attack = max(0, target.attack + origin.attack)
keyword_loss = origin.keywords.intersection(target.keywords)
changes["stat_change"].append((target, "attack", new_attack - target.attack))
changes["stat_change"].append((target, "ability-", keyword_loss))
target.attack = new_attack
target.keywords = target.keywords.difference(origin.keywords)
try:
damage_dealt = target.damage(-origin.defense)
changes["damage_dealt"].append((target, damage_dealt))
except WardShieldError:
changes["stat_change"].append((target, "ability-", {'W'}))
if target.defense <= 0:
target.is_dead = True
current_player.bonus_draw += origin.card_draw
changes["bonus_draw"].append((current_player, origin.card_draw))
_, runes_lost = current_player.damage(-origin.player_hp)
changes["damage_dealt"].append((current_player, -origin.player_hp))
changes["runes_lost"].append((current_player, runes_lost))
_, runes_lost = opposing_player.damage(-origin.enemy_hp)
changes["damage_dealt"].append((opposing_player, -origin.enemy_hp))
changes["runes_lost"].append((opposing_player, runes_lost))
elif isinstance(origin, BlueItem):
if isinstance(target, Creature):
new_attack = max(0, target.attack + origin.attack)
keyword_loss = origin.keywords.intersection(target.keywords)
changes["stat_change"].append((target, "attack", new_attack - target.attack))
changes["stat_change"].append((target, "ability-", keyword_loss))
target.attack = new_attack
target.keywords = target.keywords.difference(origin.keywords)
try:
damage_dealt = target.damage(-origin.defense)
changes["damage_dealt"].append((target, damage_dealt))
except WardShieldError:
changes["stat_change"].append((target, "ability-", {'W'}))
if target.defense <= 0:
target.is_dead = True
elif target is None:
damage_dealt, runes_lost = opposing_player.damage(-origin.defense)
changes["damage_dealt"].append((opposing_player, damage_dealt))
changes["runes_lost"].append((opposing_player, runes_lost))
else:
raise MalformedActionError("Invalid target")
current_player.bonus_draw += origin.card_draw
changes["bonus_draw"].append((current_player, origin.card_draw))
_, runes_lost = current_player.damage(-origin.player_hp)
changes["damage_dealt"].append((current_player, -origin.player_hp))
changes["runes_lost"].append((current_player, runes_lost))
_, runes_lost = opposing_player.damage(-origin.enemy_hp)
changes["damage_dealt"].append((opposing_player, -origin.enemy_hp))
changes["runes_lost"].append((opposing_player, runes_lost))
else:
error = "Card being used is not an item"
raise MalformedActionError(error)
current_player.mana -= origin.cost
changes["mana_spent"].append((current_player, origin.cost))
return changes
def clone(self) -> 'State':
cloned_state = State.empty_copy()
cloned_state.history = self.history
cloned_state.instance_counter = self.instance_counter
cloned_state.summon_counter = self.summon_counter
cloned_state.phase = self.phase
cloned_state.turn = self.turn
cloned_state._current_player = self._current_player
cloned_state.__available_actions = self.__available_actions
cloned_state.winner = self.winner
cloned_state.players = tuple([player.clone() for player in self.players])
return cloned_state
# return pickle.loads(pickle.dumps(self, -1))
@staticmethod
def from_native_input(str_state):
if isinstance(str_state, str):
str_state = str_state.split("\n")
state = State()
state.instance_counter = 1000
p, o = state.current_player, state.opposing_player
player_info = map(int, str_state[0].split())
opp_info = map(int, str_state[1].split())
p.health, p.mana, player_deck, p.next_rune, _ = player_info
o.health, o.mana, opp_deck, o.next_rune, o.bonus_draw = opp_info
o.bonus_draw -= 1
opp_hand, opp_actions = map(int, str_state[2].split())
def empty_card():
iid = state.instance_counter
state.instance_counter += 1
return Card(-1, "", 0, 99, 0, 0, "------", 0, 0, 0, "", iid)
p.deck = [empty_card() for _ in range(player_deck)]
o.deck = [empty_card() for _ in range(opp_deck)]
p.hand = []
o.hand = [empty_card() for _ in range(opp_hand)]
state.phase = Phase.DRAFT if p.mana == 0 else Phase.BATTLE
cards = str_state[4 + opp_actions:]
def int_except(value):
try:
return int(value)
except ValueError:
return value
summon_counter = 0
type_class_dict = {0: Creature, 1: GreenItem, 2: RedItem, 3: BlueItem}
for card in cards:
number, instance_id, location, card_type, cost, attack, \
defense, abilities, player_hp, enemy_hp, card_draw, lane \
= map(int_except, card.split())
type_class = type_class_dict[card_type]
card = type_class(number, "", card_type, cost, attack, defense,
abilities, player_hp, enemy_hp, card_draw,
"", instance_id)
if isinstance(card, Creature):
card.summon_counter = summon_counter
summon_counter += 1
card.can_attack = True
if location == 0:
p.hand.append(card)
elif location == 1:
card.can_attack = True
p.lanes[lane].append(card)
elif location == -1:
o.lanes[lane].append(card)
return state
def __str__(self) -> str:
encoding = ""
p, o = self.current_player, self.opposing_player
for cp in p, o:
draw = cp.last_drawn if cp == self.current_player else 1 + cp.bonus_draw
encoding += f"{cp.health} {cp.base_mana + cp.bonus_mana} " \
f"{len(cp.deck)} {cp.next_rune} {draw}\n"
op_hand = len(o.hand) if self.phase != Phase.DRAFT else 0
last_actions = []
for action in reversed(o.actions[:-1]):
if action.type == ActionType.PASS:
break
last_actions.append(action)
encoding += f"{op_hand} {len(last_actions)}\n"
for a in reversed(last_actions):
target_id = -1 if a.target is None else a.target
encoding += f"{a.resolved_origin.id} {a.type.name} " \
f"{a.origin} {target_id}\n"
cards = sorted(p.hand, key=attrgetter('instance_id')) + \
sorted(p.lanes[0] + p.lanes[1], key=attrgetter('summon_counter')) + \
sorted(o.lanes[0] + o.lanes[1], key=attrgetter('summon_counter'))
encoding += f"{len(cards)}\n"
for c in cards:
if c in p.hand:
c.location = 0
c.lane = -1
elif c in p.lanes[0] + p.lanes[1]:
c.location = 1
c.lane = 0 if c in p.lanes[0] else 1
elif c in o.lanes[0] + o.lanes[1]:
c.location = -1
c.lane = 0 if c in o.lanes[0] else 1
if isinstance(c.type, int):
c.cardType = c.type
elif c.type == 'creature':
c.cardType = 0
elif c.type == 'itemGreen':
c.cardType = 1
elif c.type == 'itemRed':
c.cardType = 2
elif c.type == 'itemBlue':
c.cardType = 3
abilities = list('------')
for i, a in enumerate(list('BCDGLW')):
if c.has_ability(a):
abilities[i] = a
c.abilities = "".join(abilities)
c.instance_id = -1 if c.instance_id is None else c.instance_id
for i, c in enumerate(cards):
encoding += f"{c.id} {c.instance_id} {c.location} {c.cardType} " \
f"{c.cost} {c.attack} {c.defense} {c.abilities} " \
f"{c.player_hp} {c.enemy_hp} {c.card_draw} {c.lane} \n"
return encoding
def can_play(self, card):
p, op = self.current_player, self.opposing_player
if card.cost > p.mana:
return False
if isinstance(card, Creature):
return sum(map(len, p.lanes)) < 6
elif isinstance(card, GreenItem):
return sum(map(len, p.lanes)) > 0
elif isinstance(card, RedItem):
return sum(map(len, op.lanes)) > 0
else:
return True
def is_draft(self):
return self.phase == Phase.DRAFT
def is_battle(self):
return self.phase == Phase.BATTLE
def is_ended(self):
return self.phase == Phase.ENDED
def __hash__(self):
return id(self)
@staticmethod
def empty_copy():
class Empty(State):
def __init__(self):
pass
new_copy = Empty()
new_copy.__class__ = State
return new_copy
|
<gh_stars>1-10
'''
Created on Mar 28, 2012
@author: jan
'''
import random
import numpy as np
from scipy.stats import norm, expon, gamma
from scipy.spatial.distance import squareform
def gaussian_influence(mu, width):
''' creates a 2D-function for gaussian influence sphere'''
return lambda x, y: np.exp(-width * ((x - mu[0]) ** 2 + (y - mu[1]) ** 2))
def correlated_samples(cov, num_sampl, marginal_dist):
''' creates correlated samples with a gaussian copula
cov: covariance matrix (copula)
num_sampl: numbers of sample to be drawn
marginal_dist: marginal distribution samples are drawn from
'''
# Create Gaussian Copula
dependence = np.random.multivariate_normal([0] * cov.shape[0], cov, num_sampl)
dependence_dist = norm()
uniform_dependence = dependence_dist.cdf(dependence)
#Transform marginals
dependend_samples = marginal_dist.ppf(uniform_dependence)
return dependend_samples
def group_covmtx(rho_intra, rho_inter, num_groups, num_objects):
''' create a covarince matrix with groups
create covariance matrix for num_groups*num_objects variables
in each group are num_objects with a covariance of rho_intra.
objects between groups have a covariance of rho_intra
'''
intra_mtx_size = int((num_objects ** 2 - num_objects) / 2)
intra_cov = 1 - squareform([1 - rho_intra] * intra_mtx_size)
cov = rho_inter * np.ones((num_groups * num_objects, num_groups * num_objects))
for group_num in range(num_groups):
cov[group_num * num_objects:(group_num + 1) * num_objects,
group_num * num_objects:(group_num + 1) * num_objects] = intra_cov
return cov
def adjusted_gamma(mean, var):
''' create a gamma distribution with defined mean and variance '''
scale = var / mean
shape = mean / scale
if shape > 1:
print('!!! Warning !!! - shape parameter: ', str(shape))
return gamma(shape, scale=scale)
def crosscor(a1, a2):
'''calculate crosscorrelation between two matrices'''
num_var = a1.shape[0]
return np.corrcoef(np.vstack((a1, a2)))[num_var:, :num_var]
class Dataset():
"""Surrogate dataset of sources and their mixed observations
Keyword arguments:
param : dictionary of parameters
shape: tuple with spatial extent of observed area in pixel
gridpoints: number of sources in one dimension
width: width of spatial influence
latents: number of sources
covgroups: number of correlated source groups
cov: correlation of activation within a source group
mean: mean source activation
var: source activation variance
no_samples: number of independent observations (stimuli)
act_time: model time cours of activation
noisevar: sigma of gaussian pixel noise
"""
def __init__(self, param):
# create spatial sources
num_grid = param.get('gridpoints', 9)
pixel = np.indices(param['shape'])
p_dist = param['shape'][0] / num_grid
self.points = np.indices((num_grid, num_grid)) * p_dist + p_dist
self.points = list(zip(self.points[0].flatten(), self.points[1].flatten()))
random.shuffle(self.points)
components = [gaussian_influence(mu, param['width'])(pixel[0], pixel[1])
for mu in self.points[:param['latents']]]
self.spt_sources = np.array([i.flatten() for i in components])
# generate activation timcourses
covgroups = param.get('covgroups', 4)
self.cov = group_covmtx(param['cov'], 0.1, covgroups, int(param['latents'] / covgroups))
marginal_dist = adjusted_gamma(param['mean'], param['var'])
self.activ_pre = correlated_samples(self.cov, param['no_samples'],
marginal_dist).T
self.activ_pre[np.isnan(self.activ_pre)] = 0
# fold with single stim timecourse
if param['act_time']:
self.activation = np.vstack([np.outer(i, param['act_time']).flatten()
for i in self.activ_pre]).T
self.observed_raw = np.dot(self.activation, self.spt_sources)
# add noise
noise = param['noisevar'] * np.random.randn(*self.observed_raw.shape)
self.observed = self.observed_raw.copy() + noise
def cor2source(self, estimator):
"""match sources to their best estimate and calc correlation
each source is matched to the estimator it's exhibits the highest spatial
correlation
Parameters
----------
estimator: ImageAnalysisComponents.TimeSeries
Returns
-------
matchid: numpy.array
i-th entry contains index of estimator matched to i-th source
st_cor: numpy.array
i-th entry contains temporal correlation of estimator for i-th source
sp_cor: numpy.array
i-th entry contains spatial correlation of estimator for i-th source
"""
# temporal corellation with all sources
tmp_cor = crosscor(self.activation.T, estimator._series.T)
# spatial correlations with all sources
sp_cor = crosscor(self.spt_sources, estimator.base._series)
matchid = np.nanargmax(np.abs(sp_cor), 0)
# temporal correlation at best spatial corralation
st_cor = np.abs(tmp_cor[matchid, range(self.spt_sources.shape[0])])
return matchid, st_cor, sp_cor
def mse2source(self, estimator, local=0):
"""match sources to their best estimate and calc mean squared error (MSE)
each source is matched to the estimator it's exhibits the lowest MSE
Parameters
----------
estimator: ImageAnalysisComponents.TimeSeries
Returns
-------
mse: numpy.array
i-th entry contains MSE of estimator matched to i-th source
"""
best_mse = []
for source_ind in range(self.activation.shape[1]):
mf_pixelpart = estimator.base._series
data_pixelpart = self.spt_sources[source_ind]
if local:
mask = self.spt_sources[source_ind] > local
mf_pixelpart = mf_pixelpart[:, mask]
data_pixelpart = data_pixelpart[mask]
source = np.outer(self.activation[:, source_ind], data_pixelpart)
source_norm = np.linalg.norm(source)
mse_components = []
for e_ind in range(estimator.num_objects):
estimate = np.outer(estimator._series[:, e_ind], mf_pixelpart[e_ind])
mse = np.linalg.norm(source - estimate) / source_norm
mse_components.append(mse)
best_mse.append(np.min(mse_components))
return best_mse
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: common_rpc.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from kik_unofficial.protobuf.google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2
import kik_unofficial.protobuf.protobuf_validation_pb2 as protobuf__validation__pb2
import kik_unofficial.protobuf.common_model_pb2 as common__model__pb2
import kik_unofficial.protobuf.kik_options_pb2 as kik__options__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='common_rpc.proto',
package='common',
syntax='proto3',
serialized_pb=_b('\n\x10\x63ommon_rpc.proto\x12\x06\x63ommon\x1a google/protobuf/descriptor.proto\x1a\x19protobuf_validation.proto\x1a\x12\x63ommon_model.proto\x1a\x11kik_options.proto\"\r\n\x0bVoidRequest\"\x0e\n\x0cVoidResponse\"1\n\x0bXiRequestId\x12\"\n\x02id\x18\x01 \x01(\x0b\x32\x0e.common.XiUuidB\x06\xca\x9d%\x02\x08\x01\"*\n\x0eXiRoutingToken\x12\x18\n\x05token\x18\x01 \x01(\tB\t\xca\x9d%\x05(\x01\x30\x80\x01\"\x85\x01\n\x15SelfDescribingMessage\x12@\n\x14\x66ield_descriptor_set\x18\x01 \x01(\x0b\x32\".google.protobuf.FileDescriptorSet\x12\x14\n\x0cmessage_name\x18\x02 \x01(\t\x12\x14\n\x0cmessage_data\x18\x03 \x01(\x0c\x42q\n\x13\x63om.kik.xiphias.rpcB\x0e\x43ommonRpcProtoP\x01ZBgithub.com/kikinteractive/xiphias-model-common/generated/go;common\xaa\xa3*\x02\x10\x01\x62\x06proto3')
,
dependencies=[google_dot_protobuf_dot_descriptor__pb2.DESCRIPTOR,protobuf__validation__pb2.DESCRIPTOR,common__model__pb2.DESCRIPTOR,kik__options__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_VOIDREQUEST = _descriptor.Descriptor(
name='VoidRequest',
full_name='common.VoidRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=128,
serialized_end=141,
)
_VOIDRESPONSE = _descriptor.Descriptor(
name='VoidResponse',
full_name='common.VoidResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=143,
serialized_end=157,
)
_XIREQUESTID = _descriptor.Descriptor(
name='XiRequestId',
full_name='common.XiRequestId',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='common.XiRequestId.id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\002\010\001'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=159,
serialized_end=208,
)
_XIROUTINGTOKEN = _descriptor.Descriptor(
name='XiRoutingToken',
full_name='common.XiRoutingToken',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='token', full_name='common.XiRoutingToken.token', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\005(\0010\200\001'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=210,
serialized_end=252,
)
_SELFDESCRIBINGMESSAGE = _descriptor.Descriptor(
name='SelfDescribingMessage',
full_name='common.SelfDescribingMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='field_descriptor_set', full_name='common.SelfDescribingMessage.field_descriptor_set', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='message_name', full_name='common.SelfDescribingMessage.message_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='message_data', full_name='common.SelfDescribingMessage.message_data', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=255,
serialized_end=388,
)
_XIREQUESTID.fields_by_name['id'].message_type = common__model__pb2._XIUUID
_SELFDESCRIBINGMESSAGE.fields_by_name['field_descriptor_set'].message_type = google_dot_protobuf_dot_descriptor__pb2._FILEDESCRIPTORSET
DESCRIPTOR.message_types_by_name['VoidRequest'] = _VOIDREQUEST
DESCRIPTOR.message_types_by_name['VoidResponse'] = _VOIDRESPONSE
DESCRIPTOR.message_types_by_name['XiRequestId'] = _XIREQUESTID
DESCRIPTOR.message_types_by_name['XiRoutingToken'] = _XIROUTINGTOKEN
DESCRIPTOR.message_types_by_name['SelfDescribingMessage'] = _SELFDESCRIBINGMESSAGE
VoidRequest = _reflection.GeneratedProtocolMessageType('VoidRequest', (_message.Message,), dict(
DESCRIPTOR = _VOIDREQUEST,
__module__ = 'common_rpc_pb2'
# @@protoc_insertion_point(class_scope:common.VoidRequest)
))
_sym_db.RegisterMessage(VoidRequest)
VoidResponse = _reflection.GeneratedProtocolMessageType('VoidResponse', (_message.Message,), dict(
DESCRIPTOR = _VOIDRESPONSE,
__module__ = 'common_rpc_pb2'
# @@protoc_insertion_point(class_scope:common.VoidResponse)
))
_sym_db.RegisterMessage(VoidResponse)
XiRequestId = _reflection.GeneratedProtocolMessageType('XiRequestId', (_message.Message,), dict(
DESCRIPTOR = _XIREQUESTID,
__module__ = 'common_rpc_pb2'
# @@protoc_insertion_point(class_scope:common.XiRequestId)
))
_sym_db.RegisterMessage(XiRequestId)
XiRoutingToken = _reflection.GeneratedProtocolMessageType('XiRoutingToken', (_message.Message,), dict(
DESCRIPTOR = _XIROUTINGTOKEN,
__module__ = 'common_rpc_pb2'
# @@protoc_insertion_point(class_scope:common.XiRoutingToken)
))
_sym_db.RegisterMessage(XiRoutingToken)
SelfDescribingMessage = _reflection.GeneratedProtocolMessageType('SelfDescribingMessage', (_message.Message,), dict(
DESCRIPTOR = _SELFDESCRIBINGMESSAGE,
__module__ = 'common_rpc_pb2'
# @@protoc_insertion_point(class_scope:common.SelfDescribingMessage)
))
_sym_db.RegisterMessage(SelfDescribingMessage)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\023com.kik.xiphias.rpcB\016CommonRpcProtoP\001ZBgithub.com/kikinteractive/xiphias-model-common/generated/go;common\252\243*\002\020\001'))
_XIREQUESTID.fields_by_name['id'].has_options = True
_XIREQUESTID.fields_by_name['id']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\002\010\001'))
_XIROUTINGTOKEN.fields_by_name['token'].has_options = True
_XIROUTINGTOKEN.fields_by_name['token']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\005(\0010\200\001'))
# @@protoc_insertion_point(module_scope)
|
import sys
import random
import json
import collections
from twisted.web.static import File
from twisted.python import log
from twisted.web.server import Site
from twisted.internet import reactor
import itertools
import codecs
import time
import os
from typing import Dict, List, Iterable, Any, Union, Optional, Tuple
from autobahn.twisted.websocket import WebSocketServerFactory, \
WebSocketServerProtocol
from autobahn.twisted.resource import WebSocketResource
# clients need to send:
# - on join, a user name and a room name
# - "start round"
# - "next round"
# servers need to send:
# - updated lists of users in the room
# - a round number, a wordlist, and a word
wordlists: Dict[str, List[str]] = {}
version = "v0.5"
wordlist_directory = 'wordlists'
for filename in os.listdir(wordlist_directory):
with open(os.path.join(wordlist_directory, filename)) as infile:
key = filename
if key.endswith('.txt'): key = key[:-4]
wordlists[key] = [line.strip() for line in infile]
class CastlefallProtocol(WebSocketServerProtocol):
def onOpen(self) -> None: pass
def connectionLost(self, reason) -> None:
self.factory.unregister(self)
def onMessage(self, payload: Union[str, bytes], isBinary: bool) -> None:
assert isinstance(self.factory, CastlefallFactory)
if isinstance(payload, bytes):
payload = codecs.decode(payload, 'utf-8')
data = json.loads(payload)
if 'name' in data:
room_name = data['room']
name = data.get('name')
print('{}: registering as {}'.format(self.peer, name))
self.factory.register(room_name, name, self)
if 'start' in data:
start_val = data['start']
print('{}: start {}'.format(self.peer, start_val))
self.factory.start_round(self, start_val)
if 'kick' in data:
kick_target = data['kick']
print('{}: kicking {}'.format(self.peer, kick_target))
self.factory.kick(self, kick_target)
if 'chat' in data:
chat_message = data['chat']
print('{} says: {}'.format(self.peer, chat_message))
self.factory.chat(self, chat_message)
if 'broadcastTimer' in data:
print('{} starts the timer'.format(self.peer))
self.factory.broadcast_timer(self)
def json_to_bytes(obj: dict) -> bytes:
return codecs.encode(json.dumps(obj), 'utf-8')
class ClientStatus:
def __init__(self, room: str, name: Optional[str]) -> None:
self.room = room
self.name = name
class Room:
def __init__(self) -> None:
self.d: Dict[str, CastlefallProtocol] = {}
self.spectators: Dict[str, CastlefallProtocol] = {}
self.round = 0
self.round_starter = ""
self.last_start = time.time()
self.players_in_round: List[str] = []
self.assigned_words: Dict[str, str] = {}
self.words: List[str] = []
self.words_left: Dict[str, List[str]] = collections.defaultdict(list)
def has_player(self, name: str) -> bool:
return name in self.d
def get_player_names(self) -> List[str]:
return list(sorted(self.d.keys()))
def get_player_client(self, name: str) -> CastlefallProtocol:
return self.d[name]
def set_player_client(self, name: str, p: CastlefallProtocol) -> None:
self.d[name] = p
def get_clients(self) -> Iterable[CastlefallProtocol]:
return itertools.chain(self.d.values(), self.spectators.values())
def add_spectator(self, client: CastlefallProtocol) -> None:
self.spectators[client.peer] = client
def get_num_spectators(self) -> int:
return len(self.spectators)
def delete_spectator(self, client: CastlefallProtocol) -> None:
if client.peer in self.spectators:
del self.spectators[client.peer]
def get_named_player_clients(self) -> Iterable[Tuple[str, CastlefallProtocol]]:
return self.d.items()
def get_named_all_clients(self) -> Iterable[Tuple[Optional[str], CastlefallProtocol]]:
return itertools.chain(self.d.items(), zip(itertools.repeat(None), self.spectators.values()))
def delete_player_client(self, name: str) -> None:
del self.d[name]
def clear_assigned_words(self):
self.assigned_words = {}
def get_assigned_word(self, name: str) -> Optional[str]:
return self.assigned_words.get(name)
def set_assigned_word(self, name: str, word: str) -> None:
self.assigned_words[name] = word
def select_words(self, key: str, num: int) -> List[str]:
left = self.words_left[key]
if len(left) < num:
print('(Re)shuffling words for {} {}'.format(key, num))
left = list(wordlists[key])
random.shuffle(left)
self.words_left[key] = left[num:]
return left[:num]
def send_spoilers(self) -> None:
players = self.players_in_round
words = {}
for p in players:
w = self.get_assigned_word(p)
if w in words:
words[w].append(p)
else:
words[w] = [p]
message = ""
for w,ps in words.items():
self.factory.broadcast(self, {'chat': {
'name': w,
'msg': ", ".join(ps),
}})
def start_round(self, starter: str, val: dict) -> None:
if self.round != val.get('round'):
raise Exception('Start fail: round out of sync')
if time.time() < self.last_start + 2:
raise Exception('Start fail: too soon')
self.send_spoilers()
self.round += 1
self.round_starter = starter
self.last_start = time.time()
try:
wordcount = int(val.get('wordcount', 18))
except ValueError as e:
wordcount = 18
words = self.select_words(val['wordlist'], wordcount)
named_clients = list(self.get_named_player_clients())
random.shuffle(named_clients)
half = len(named_clients) // 2
word1, word2 = random.sample(words, 2)
self.players_in_round = list(self.get_player_names())
self.clear_assigned_words()
self.words = words
print(', '.join(words))
for i, (name, client) in enumerate(named_clients):
word = word2 if i >= half else word1
self.set_assigned_word(name, word)
def get_words_shuffled(self) -> List[str]:
copy = list(self.words)
random.shuffle(copy)
return copy
class CastlefallFactory(WebSocketServerFactory):
def __init__(self, *args, **kwargs):
super(CastlefallFactory, self).__init__(*args, **kwargs)
# room -> (name -> client)
self.rooms: Dict[str, Room] = collections.defaultdict(Room)
self.status_for_peer: Dict[str, ClientStatus] = {} # peer -> status
def players_in_room(self, room: str) -> List[str]:
return list(sorted(self.rooms[room].get_player_names()))
def register(self, room_name: str, name: Optional[str],
client: CastlefallProtocol) -> None:
room = self.rooms[room_name]
if name:
if room.has_player(name):
old_client = room.get_player_client(name)
self.send(old_client, {
'error': 'Disconnected: your name was taken.',
})
del self.status_for_peer[old_client.peer]
# del room_dict[name] # will get overwritten
room.set_player_client(name, client)
self.status_for_peer[client.peer] = ClientStatus(room_name, name)
self.broadcast(room, {'players': room.get_player_names()})
else:
# spectator
room.add_spectator(client)
self.status_for_peer[client.peer] = ClientStatus(room_name, None)
self.broadcast(room, {'spectators': room.get_num_spectators()})
self.send(client, {
'players': room.get_player_names(),
'spectators': room.get_num_spectators(),
'room': room_name,
'round': room.round,
'starter': room.round_starter,
'playersinround': room.players_in_round,
'words': room.get_words_shuffled(),
'word': room.get_assigned_word(name) if name else None,
'wordlists': [[k, len(v)] for k, v in sorted(wordlists.items())],
'version': version,
})
def unregister(self, client: CastlefallProtocol) -> None:
if client.peer in self.status_for_peer:
status = self.status_for_peer[client.peer]
del self.status_for_peer[client.peer]
room = self.rooms[status.room]
if status.name:
if room.has_player(status.name):
room.delete_player_client(status.name)
else:
print("client's peer had name, but its name wasn't there :(")
else:
# spectator
room.delete_spectator(client)
self.broadcast(room, {
'players': room.get_player_names(),
'spectators': room.get_num_spectators(),
})
def kick(self, client: CastlefallProtocol, name: str):
_, room = self.name_and_room_playing_in(client)
if not room: return
if room.has_player(name):
client = room.get_player_client(name)
self.send(client, {
'error': 'Disconnected: you were kicked.',
})
room.delete_player_client(name)
if client.peer in self.status_for_peer:
del self.status_for_peer[client.peer]
else:
print("name had client, but the peer wasn't there :(")
self.broadcast(room, {'players': room.get_player_names()})
def chat(self, client: CastlefallProtocol, chat_message: str):
name, room = self.name_and_room_playing_in(client)
if room:
self.broadcast(room, {'chat': {
'name': name,
'msg': chat_message,
}})
else:
print("client's peer had name, but its name wasn't there :(")
def broadcast_timer(self, client: CastlefallProtocol):
name, room = self.name_and_room_playing_in(client)
if room:
self.broadcast(room, {'timer': {
'name': name,
}})
def broadcast(self, room: Room, obj: dict) -> None:
payload = json_to_bytes(obj)
for client in room.get_clients():
client.sendMessage(payload)
def send(self, client: CastlefallProtocol, obj: dict) -> None:
client.sendMessage(json_to_bytes(obj))
def name_and_room_playing_in(self, client: CastlefallProtocol) -> Optional[Room]:
"""The name and room the client is playing in.
If the client is not playing in a room, including if the client is
spectating, return None, None."""
if client.peer in self.status_for_peer:
status = self.status_for_peer[client.peer]
name = status.name
room = self.rooms[status.room]
if name:
if room.has_player(name):
return name, room
else:
print("client's peer had name, but its name wasn't there :(")
# else, is spectating
return None
def start_round(self, orig_client: CastlefallProtocol, val: dict) -> None:
client_name, room = self.name_and_room_playing_in(orig_client)
if room:
try:
room.start_round(client_name, val)
for name, client in room.get_named_all_clients():
self.send(client, {
'round': room.round,
'starter': room.round_starter,
'playersinround': room.players_in_round,
'words': room.get_words_shuffled(),
'word': room.get_assigned_word(name) if name else None,
})
except Exception as e:
self.send(orig_client, {
'error': str(e),
})
if __name__ == "__main__":
log.startLogging(sys.stdout)
if len(sys.argv) > 1 and sys.argv[1] == "prod":
print("Prod server")
factory = CastlefallFactory("ws://127.0.0.1:8372")
else:
print("Dev server")
factory = CastlefallFactory("ws://localhost:8372")
factory.protocol = CastlefallProtocol
resource = WebSocketResource(factory)
reactor.listenTCP(8372, factory)
reactor.run()
|
<filename>expyfun/io/_parse.py
# -*- coding: utf-8 -*-
"""File parsing functions
"""
import ast
from collections import OrderedDict
import csv
import json
import numpy as np
def read_tab_raw(fname, return_params=False):
"""Read .tab file from expyfun output without segmenting into trials.
Parameters
----------
fname : str
Input filename.
return_params : bool
If True, return the JSON-parsed comment header.
Returns
-------
data : list of tuple
The data with each line from the tab file being a tuple in a list.
Each tuple is of the form (``timestamp``, ``key``, ``value``).
params : dict
The JSON-parsed comment header. Only returned if
``return_params=True``.
See Also
--------
read_tab
"""
with open(fname, 'r') as f:
csvr = csv.reader(f, delimiter='\t')
lines = [c for c in csvr]
# first two lines are headers
assert len(lines[0]) == 1 and lines[0][0].startswith('# ')
if return_params:
params = json.loads(lines[0][0][2:], object_pairs_hook=OrderedDict)
else:
params = None
assert lines[1] == ['timestamp', 'event', 'value']
lines = lines[2:]
times = [float(line[0]) for line in lines]
keys = [line[1] for line in lines]
vals = [line[2] for line in lines]
data = list(zip(times, keys, vals))
return (data, params) if return_params else data
def read_tab(fname, group_start='trial_id', group_end='trial_ok',
return_params=False):
"""Read .tab file from expyfun output and segment into trials.
Parameters
----------
fname : str
Input filename.
group_start : str
Key to use to start a trial/row.
group_end : str | None
Key to use to end a trial/row. If None, the next ``group_start``
will end the current group.
return_params : bool
If True, return the JSON-parsed comment header.
Returns
-------
data : list of dict
The data, with a dict for each trial. Each value in the dict
is a list of tuples (event, time) for each occurrence of that
key.
params : dict
The JSON-parsed comment header. Only returned if
``return_params=True``.
See Also
--------
read_tab_raw
"""
# load everything into memory for ease of use
out = read_tab_raw(fname, return_params=return_params)
lines = out[0] if return_params else out
# determine the event fields
header = list(set([l[1] for l in lines]))
header.sort()
if group_start not in header:
raise ValueError('group_start "{0}" not in header: {1}'
''.format(group_start, header))
if group_end == group_start:
raise ValueError('group_start cannot equal group_end, use '
'group_end=None')
header = [header.pop(header.index(group_start))] + header
b1s = np.where([line[1] == group_start for line in lines])[0]
if group_end is None:
b2s = np.concatenate((b1s[1:], [len(lines)]))
else: # group_end is not None
if group_end not in header:
raise ValueError('group_end "{0}" not in header ({1})'
''.format(group_end, header))
header.append(header.pop(header.index(group_end)))
b2s = np.where([line[1] == group_end for line in lines])[0]
if len(b1s) != len(b2s) or not np.all(b1s < b2s):
raise RuntimeError('bad bounds in {0}:\n{1}\n{2}'
.format(fname, b1s, b2s))
data = []
for b1, b2 in zip(b1s, b2s):
assert lines[b1][1] == group_start # prevent stupidity
if group_end is not None:
b2 = b2 + 1 # include the end
assert lines[b2 - 1][1] == group_end
d = dict()
these_times = [float(line[0]) for line in lines[b1:b2]]
these_keys = [line[1] for line in lines[b1:b2]]
these_vals = [line[2] for line in lines[b1:b2]]
for ki, key in enumerate(header):
idx = np.where(key == np.array(these_keys))[0]
d[key] = [(these_vals[ii], these_times[ii]) for ii in idx]
data.append(d)
return (data, out[1]) if return_params else data
def reconstruct_tracker(fname):
"""Reconstruct TrackerUD, TrackerBinom, TrackerMHW objects from .tab files.
Parameters
----------
fname : str
Input filename.
Returns
-------
tr : list of TrackerUD or TrackerBinom or TrackerMHW
The tracker objects with all responses such that they are in their
stopped state (as long as the trackers were allowed to stop during
the generation of the file.) If only one tracker is found in the file,
it will still be stored in a list and will be accessible as ``tr[0]``.
"""
from ..stimuli import TrackerUD, TrackerBinom, TrackerMHW
# read in raw data
raw = read_tab_raw(fname)
# find tracker_identify and make list of IDs
tracker_idx = np.where([r[1] == 'tracker_identify' for r in raw])[0]
if len(tracker_idx) == 0:
raise ValueError('There are no Trackers in this file.')
tr = []
used_dict_idx = [] # they can have repeat names!
used_stop_idx = []
for ii in tracker_idx:
tracker_id = ast.literal_eval(raw[ii][2])['tracker_id']
tracker_type = ast.literal_eval(raw[ii][2])['tracker_type']
# find tracker_ID_init lines and get dict
init_str = 'tracker_' + str(tracker_id) + '_init'
tracker_dict_idx = np.where([r[1] == init_str for r in raw])[0]
tracker_dict_idx = np.setdiff1d(tracker_dict_idx, used_dict_idx)
tracker_dict_idx = tracker_dict_idx[0]
used_dict_idx.append(tracker_dict_idx)
tracker_dict = json.loads(raw[tracker_dict_idx][2])
td = dict(TrackerUD=TrackerUD, TrackerBinom=TrackerBinom,
TrackerMHW=TrackerMHW)
tr.append(td[tracker_type](**tracker_dict))
tr[-1]._tracker_id = tracker_id # make sure tracker has original ID
stop_str = 'tracker_' + str(tracker_id) + '_stop'
tracker_stop_idx = np.where([r[1] == stop_str for r in raw])[0]
tracker_stop_idx = np.setdiff1d(tracker_stop_idx, used_stop_idx)
if len(tracker_stop_idx) == 0:
raise ValueError('Tracker {} has not stopped. All Trackers '
'must be stopped.'.format(tracker_id))
tracker_stop_idx = tracker_stop_idx[0]
used_stop_idx.append(tracker_stop_idx)
responses = json.loads(raw[tracker_stop_idx][2])['responses']
# feed in responses from tracker_ID_stop
for r in responses:
tr[-1].respond(r)
return tr
def reconstruct_dealer(fname):
"""Reconstruct TrackerDealer object from .tab files.
The ``reconstruct_tracker`` function will be called to retrieve the
trackers.
Parameters
----------
fname : str
Input filename.
Returns
-------
dealer : list of TrackerDealer
The TrackerDealer objects with all responses such that they are in
their stopped state. If only one dealer is found in the file, it will
still be stored in a list and will be assessible as ``td[0]``.
"""
from ..stimuli import TrackerDealer
raw = read_tab_raw(fname)
# find info on dealer
dealer_idx = np.where([r[1] == 'dealer_identify' for r in raw])[0]
if len(dealer_idx) == 0:
raise ValueError('There are no TrackerDealers in this file.')
dealer = []
for ii in dealer_idx:
dealer_id = ast.literal_eval(raw[ii][2])['dealer_id']
dealer_init_str = 'dealer_' + str(dealer_id) + '_init'
dealer_dict_idx = np.where([r[1] == dealer_init_str
for r in raw])[0][0]
dealer_dict = ast.literal_eval(raw[dealer_dict_idx][2])
dealer_trackers = dealer_dict['trackers']
# match up tracker objects to id
trackers = reconstruct_tracker(fname)
tr_objects = []
for t in dealer_trackers:
idx = np.where([t == t_id._tracker_id for t_id in trackers])[0][0]
tr_objects.append(trackers[idx])
# make the dealer object
max_lag = dealer_dict['max_lag']
pace_rule = dealer_dict['pace_rule']
dealer.append(TrackerDealer(None, tr_objects, max_lag, pace_rule))
# force input responses/log data
dealer_stop_str = 'dealer_' + str(dealer_id) + '_stop'
dealer_stop_idx = np.where([r[1] == dealer_stop_str for r in raw])[0]
if len(dealer_stop_idx) == 0:
raise ValueError('TrackerDealer {} has not stopped. All dealers '
'must be stopped.'.format(dealer_id))
dealer_stop_log = json.loads(raw[dealer_stop_idx[0]][2])
shape = tuple(dealer_dict['shape'])
log_response_history = dealer_stop_log['response_history']
log_x_history = dealer_stop_log['x_history']
log_tracker_history = dealer_stop_log['tracker_history']
dealer[-1]._shape = shape
dealer[-1]._trackers.shape = shape
dealer[-1]._response_history = log_response_history
dealer[-1]._x_history = log_x_history
dealer[-1]._tracker_history = log_tracker_history
dealer[-1]._stopped = True
return dealer
|
#!/usr/bin/python
"""
Register brains, landmarks, and labels to a template.
(c) 2011, @rno klein
"""
import os
from os.path import exists
from subprocess import call
from numpy import float, isnan
# Run intensity-based registration
# 1. Register brains to template
# 2. Transform brains to each other via template
# 3. Transform landmarks to template
register_to_template = 1
transform_pairs_via_template = 1
transform_landmarks_to_template = 0
# Run landmark-driven registration to template:
register_landmarks_to_template = 0
transform_landmarks_via_template = 0
# Atlas-based evaluation for the above settings:
# 1. prepare target atlas mask
# 2. transform source atlas
# 3. fill #1 with #2
# 4. measure overlap of #3 with target atlas labels
prepare_target_mask = 0
evaluate_with_atlases = 1
verbose = 1
dim = 3
#
# Files
#
source_files = ['m1','m2','m3','m4','m5','m6','m7','m8','m9','m10','m11','m12']
target_files = ['m1','m2','m3','m4','m5','m6','m7','m8','m9','m10','m11','m12']
#source_files = ['m1','m2','m3','m4']#,'m5','m6']
#target_files = ['m1','m2','m3','m4']#,'m5','m6']
ANTSPATH = os.environ.get("ANTSPATH")
FSLPATH = '/usr/local/fsl/bin/'
out_path = '/hd2/Archive/registration_evaluation_2011_output/'
xfm_dir = os.path.join( out_path, 'Transforms/')
xfm_brain_dir = os.path.join( out_path, 'Transformed_Brains/')
xfm_landmarks_dir = os.path.join( out_path, 'Transformed_Landmarks/')
xfm_atlas_dir = os.path.join( out_path, 'Transformed_Atlases/')
atlas_dir = '/hd2/Brains/CUMC12/Atlases/'
brain_dir = '/hd2/Brains/CUMC12/Brains/'
brainmask_dir = '/hd2/Brains/CUMC12/BrainMasks/'
ext = '.nii.gz'
template = '/hd2/Brains/CUMC12/CUMC12template.nii.gz'
landmarks_dir = '/hd2/Brains/CUMC12/Landmarks/pits_kiho_im_binary/'
landmark_type = 'pits_kiho_im'
landmarks_dir = '/hd2/Brains/CUMC12/Landmarks/pits_yrjo_hame_binary/'
landmark_type = 'pits_yrjo_hame'
landmarks_dir = '/hd2/Brains/CUMC12/Landmarks/pits_forrest_bao_binary/'
landmark_type = 'pits_forrest_bao'
landmarks_dir = '/hd2/Brains/CUMC12/Landmarks/ribbons_brain_visa_binary/'
landmark_type = 'ribbons_brain_visa'
landmarks_dir = '/hd2/Brains/CUMC12/Landmarks/fundi_gang_li_binary/'
landmark_type = 'fundi_gang_li'
landmarks_dir = '/hd2/Brains/CUMC12/Landmarks/fundi_brain_visa_binary/'
landmark_type = 'fundi_brain_visa'
landmarks_dir = '/hd2/Brains/CUMC12/Landmarks/fundi_forrest_bao_binary/'
landmark_type = 'fundi_forrest_bao'
results_dir = os.path.join( out_path, 'Results/')
label_file = 'CUMC12_labels_regions.txt'
#
# Registration parameters
#
gradient_step_size = 0.5
iterations = "30x100x10"
options = " --use-Histogram-Matching"
initialize = " --number-of-affine-iterations 10000x10000x10000x10000x10000"
warp = ANTSPATH + "ANTS " + str(dim) + " -t SyN[" + str(gradient_step_size) +"] -i " + \
str(iterations) + options + initialize
apply_warp = ANTSPATH + "WarpImageMultiTransform " + str(dim)
#
# Regularization parameters
#
regularizer = "Gauss"
regularizer_setting = 3
deformation_field_sigma = 0
regularize = "-r Gauss[" + str(regularizer_setting) + ", " + \
str(deformation_field_sigma) + "]"
#
# Intensity parameters
#
intensity_measure = "CC"
intensity_weight = 1.0
intensity_setting = 3
#
# Landmark parameters
#
landmark_measure1 = "PSE"
landmark_measure2 = "MSQ"
landmark_weight1 = 0.1
landmark_weight2 = 0.1
percent = 1.0 # real number: 1.0 = 100%
boundary = 0 # 0: not only boundaries
sigma = 10
neighbor = 100
matching_iter = 100000 # partial matching iterations
if evaluate_with_atlases:
f = open(label_file,'r')
label_table = f.readlines()
f.close()
labels = []
for row in label_table:
labels.append(int(row.split()[0]))
#------------------------------------------
# Register brains and landmarks to template
#------------------------------------------
if register_to_template + transform_landmarks_to_template + \
prepare_target_mask > 0:
for file in source_files:
source = brain_dir+file+ext
output = xfm_dir+file+'_to_template'
out = '-o ' + output+ext
if os.path.exists(source) and os.path.exists(template) and os.path.exists(xfm_dir):
# Intensity-based registration to template:
if register_to_template:
intensity = [template, source, intensity_weight, intensity_setting]
intensity = "-m "+intensity_measure+"[" + ", ".join([str(s) for s in intensity]) + "]"
args = " ".join([warp, regularize, intensity, out])
if verbose: print(args); print(''); p = call(args, shell="True")
# Prepare binary (target atlas) masks for filling with labels:
if prepare_target_mask:
args = " ".join(['c3d', atlas_dir+file+ext, '-binarize -o', brainmask_dir+file+ext])
if verbose: print(args); print(''); p = call(args, shell="True")
# Transform landmarks to template space:
if transform_landmarks_to_template:
source_landmarks = landmarks_dir+file+ext
output_landmarks = xfm_landmarks_dir+file+'_to_template_'+landmark_type+ext
try:
os.path.exists(source_landmarks) and os.path.exists(xfm_landmarks_dir)
except:
raise NameError('Check ' + source_landmarks + ' and ' + xfm_landmarks_dir)
args = " ".join([apply_warp, source_landmarks, output_landmarks, \
'-R', template, output+'Warp'+ext, output+'Affine.txt', '--use-NN'])
if verbose: print(args); print(''); p = call(args, shell="True")
else:
if not os.path.exists(source):
raise NameError('Check input file ' + source)
elif not os.path.exists(template):
raise NameError('Check input file ' + template)
elif not os.path.exists(xfm_dir):
raise NameError('Check input file ' + xfm_dir)
#--------------------------------------------------------------
# Register landmarks to transformed landmarks in template space
#--------------------------------------------------------------
if register_landmarks_to_template:
for file in source_files:
source = brain_dir+file+ext
source_landmarks = landmarks_dir+file+ext
for file2 in target_files:
if file2 != file:
template_landmarks = xfm_landmarks_dir+file2+'_to_template_'+landmark_type+ext
output_xfm = xfm_dir+file+'_to_'+file2+'_in_template_space_'+landmark_type+ext
if os.path.exists(source) and os.path.exists(template) and \
os.path.exists(source_landmarks) and os.path.exists(template_landmarks):
# Intensity similarity:
intensity = [template, source, intensity_weight, intensity_setting]
intensity = " -m "+intensity_measure+"[" + ", ".join([str(s) for s in intensity]) + "]"
# Landmark similarity:
lm_args1 = [template, source, template_landmarks, source_landmarks,
landmark_weight1, percent, sigma, boundary, neighbor, matching_iter]
landmarks1 = ", ".join([" -m PSE[" + ", ".join([str(s) for s in lm_args1]) + "]"])
lm_args2 = [template_landmarks, source_landmarks, landmark_weight2, 0]
landmarks2 = " ".join([" -m MSQ[" + ", ".join([str(s) for s in lm_args2]) + "]"])
#
# Run command
#
args = " ".join([warp, '-o', output_xfm, regularize, intensity, landmarks1, landmarks2])
if verbose: print(args); print(''); p = call(args, shell="True")
else:
if not os.path.exists(source):
raise NameError('Check input file ' + source)
elif not os.path.exists(template):
raise NameError('Check input file ' + template)
elif not os.path.exists(source_landmarks):
raise NameError('Check input file ' + source_landmarks)
elif not os.path.exists(template_landmarks):
raise NameError('Check input file ' + template_landmarks)
#----------------------------------------------
# Apply intensity-based registration transforms
# to register brains to each other via template
#----------------------------------------------
if transform_pairs_via_template:
if evaluate_with_atlases:
avg_results_file = results_dir+'dice_jacc_overlaps.txt'
f_avg = open(avg_results_file, 'w');
for file in source_files:
source = brain_dir+file+ext
for file2 in target_files:
if file2 != file:
target = brain_dir+file2+ext
if os.path.exists(brain_dir+file+ext) and \
os.path.exists(brain_dir+file2+ext) and \
os.path.exists(xfm_dir+file+'_to_templateWarp.nii.gz'):
output_stem = file + '_to_' + file2
# Transform brains
args = " ".join([ANTSPATH+'WarpImageMultiTransform', str(dim), \
source, xfm_brain_dir+output_stem+ext, '-R',target, \
'-i', xfm_dir+file2+'_to_templateAffine.txt', \
xfm_dir+file2+'_to_templateInverseWarp.nii.gz', \
xfm_dir+file+'_to_templateWarp.nii.gz', \
xfm_dir+file+'_to_templateAffine.txt'])
#if verbose: print(args); print(''); p = call(args, shell="True")
if evaluate_with_atlases:
# Transform atlases
source_labels = atlas_dir+file+ext
target_labels = atlas_dir+file2+ext
args = " ".join([ANTSPATH+'WarpImageMultiTransform', str(dim), \
source_labels, xfm_atlas_dir+output_stem+ext, '-R', target_labels, \
'-i', xfm_dir+file2+'_to_templateAffine.txt', \
xfm_dir+file2+'_to_templateInverseWarp.nii.gz', \
xfm_dir+file+'_to_templateWarp.nii.gz', \
xfm_dir+file+'_to_templateAffine.txt','--use-NN'])
#if verbose: print(args); print(''); p = call(args, shell="True")
# Fill target atlas mask with transformed source atlas labels
args = " ".join(['ImageMath', str(dim), xfm_atlas_dir+output_stem+'_filled'+ext, \
'PropagateLabelsThroughMask', brainmask_dir+file2+ext, \
xfm_atlas_dir+output_stem+ext])
#if verbose: print(args); print(''); p = call(args, shell="True")
# Measure overlap of target atlas and transformed source atlas labels
results_file = results_dir+output_stem+'.txt'
f_eval = open(results_file, 'w');
average_dice = 0
average_jacc = 0
print(results_file)
for label in labels:
args = " ".join(['c3d', xfm_atlas_dir+output_stem+'_filled'+ext, \
atlas_dir+file2+ext, '-overlap', str(label), \
'>'+results_dir+'temp_overlap.txt'])
p = call(args, shell="True")
f = open(results_dir+'temp_overlap.txt','r')
temp = f.read()
if temp != '':
dice = float(temp.split()[-2].split(',')[0])
jacc = float(temp.split()[-1].split(',')[0])
else:
dice = 0.0
jacc = 0.0
if isnan(dice):
dice = 0.0
if isnan(jacc):
jacc = 0.0
print_out = ' '.join(['Label:', str(label), 'Dice:', str(dice), \
'Jaccard:', str(jacc)])
print(print_out)
f_eval.close()
f_eval = open(results_file, 'a')
f_eval.write(print_out + '\n')
average_dice += dice
average_jacc += jacc
average_dice = average_dice/len(labels)
average_jacc = average_jacc/len(labels)
print_out1 = 'Average Dice: ' + str(average_dice)
print_out2 = 'Average Jacc: ' + str(average_jacc)
print(print_out1);
print(print_out2)
f_eval.close()
f_eval = open(results_file, 'a')
f_eval.write(print_out1 + '\n' + print_out2 + '\n')
f_eval.close()
f_avg.close()
f_avg = open(avg_results_file, 'a');
f_avg.write(output_stem + ' ' + str(average_dice) + ' ' + str(average_jacc) + '\n')
else:
if not os.path.exists(brain_dir+file+ext):
raise NameError('Check input file ' + brain_dir+file+ext)
elif not os.path.exists(brain_dir+file2+ext):
raise NameError('Check input file ' + brain_dir+file2+ext)
elif not os.path.exists(xfm_dir+file+'Warp.nii.gz'):
raise NameError('Check input file ' + xfm_dir+file+'Warp.nii.gz')
if evaluate_with_atlases:
f_avg.close()
#----------------------------------------------
# Apply landmark-driven registration transforms
# to register brains to each other via template
#----------------------------------------------
if transform_landmarks_via_template:
if evaluate_with_atlases:
avg_results_file = results_dir+'dice_jacc_overlaps_'+landmark_type+'.txt'
f_avg = open(avg_results_file, 'w');
for file in source_files:
source = brain_dir+file+ext
source_landmarks = landmarks_dir+file+ext
for file2 in target_files:
if file2 != file:
target = brain_dir+file2+ext
target_landmarks = landmarks_dir+file2+ext
if os.path.exists(source) and \
os.path.exists(target) and \
os.path.exists(source_landmarks) and \
os.path.exists(target_landmarks):
pair = file+'_to_'+file2
inv_pair = file2+'_to_'+file
output_stem = pair+'_'+landmark_type
xfm_stem = xfm_dir+pair+'_in_template_space_'+landmark_type
inv_xfm_stem = xfm_dir+inv_pair+'_in_template_space_'+landmark_type
# Transform brains
if not os.path.exists(xfm_brain_dir+output_stem+ext):
args = " ".join([ANTSPATH+'WarpImageMultiTransform', str(dim), \
source, xfm_brain_dir+output_stem+ext, '-R', target, \
'-i', inv_xfm_stem+'Affine.txt', \
inv_xfm_stem+'InverseWarp.nii.gz', \
xfm_stem+'Warp.nii.gz', \
xfm_stem+'Affine.txt'])
if verbose: print(args); print(''); p = call(args, shell="True")
# Transform landmarks
if not os.path.exists(xfm_landmarks_dir+output_stem+ext):
args = " ".join([ANTSPATH+'WarpImageMultiTransform', str(dim), \
source_landmarks, xfm_landmarks_dir+output_stem+ext, '-R',target_landmarks, \
'-i', inv_xfm_stem+'Affine.txt', \
inv_xfm_stem+'InverseWarp.nii.gz', \
xfm_stem+'Warp.nii.gz', \
xfm_stem+'Affine.txt','--use-NN'])
if verbose: print(args); print(''); p = call(args, shell="True")
if evaluate_with_atlases:
if not os.path.exists(xfm_atlas_dir+output_stem+ext):
if not os.path.exists(results_dir+output_stem+'.txt'):
# Transform atlases
source_labels = atlas_dir+file+ext
target_labels = atlas_dir+file2+ext
args = " ".join([ANTSPATH+'WarpImageMultiTransform', str(dim), \
source_labels, xfm_atlas_dir+output_stem+ext, '-R', target_labels, \
'-i', inv_xfm_stem+'Affine.txt', \
inv_xfm_stem+'InverseWarp.nii.gz', \
xfm_stem+'Warp.nii.gz', \
xfm_stem+'Affine.txt','--use-NN'])
if verbose: print(args); print(''); p = call(args, shell="True")
# Fill target atlas mask with transformed source atlas labels
args = " ".join(['ImageMath', str(dim), xfm_atlas_dir+output_stem+'_filled'+ext, \
'PropagateLabelsThroughMask', brainmask_dir+file2+ext, \
xfm_atlas_dir+output_stem+ext])
if verbose: print(args); print(''); p = call(args, shell="True")
# Measure overlap of target atlas and transformed source atlas labels
results_file = results_dir+output_stem+'.txt'
f_eval = open(results_file, 'w');
average_dice = 0
average_jacc = 0
for label in labels:
args = " ".join(['c3d', xfm_atlas_dir+output_stem+'_filled'+ext, \
atlas_dir+file2+ext, '-overlap', str(label), \
'>'+results_dir+'temp_overlap.txt'])
p = call(args, shell="True")
f = open(results_dir+'temp_overlap.txt','r')
temp = f.read()
dice = 0
jacc = 0
if temp != '':
dice = float(temp.split()[-2].split(',')[0])
jacc = float(temp.split()[-1].split(',')[0])
print_out = " ".join(['Label:', str(label), 'Dice:', str(dice), \
'Jaccard:', str(jacc)])
print(print_out)
f_eval.close()
f_eval = open(results_file, 'a')
f_eval.write(print_out + '\n')
if isnan(dice):
dice = 0
if isnan(jacc):
jacc = 0
average_dice += dice
average_jacc += jacc
average_dice = average_dice/len(labels)
average_jacc = average_jacc/len(labels)
print_out1 = 'Average Dice: ' + str(average_dice)
print_out2 = 'Average Jacc: ' + str(average_jacc)
print(print_out1);
print(print_out2)
f_eval.close()
f_eval = open(results_file, 'a')
f_eval.write('\n' + print_out1 + '\n' + print_out2 + '\n\n')
f_eval.close()
f_avg.close()
f_avg = open(avg_results_file, 'a');
f_avg.write(output_stem + ' ' + str(average_dice) + ' ' + str(average_jacc) + '\n')
else:
if not os.path.exists(source_landmarks):
raise NameError('Check input file ' + source_landmarks)
elif not os.path.exists(target_landmarks):
raise NameError('Check input file ' + target_landmarks)
if evaluate_with_atlases:
f_avg.close()
|
import os
import tensorflow as tf
from util import constants
from util.config_util import get_model_params, get_task_params, get_train_params
from tf2_models.trainer import Trainer
from absl import app
from absl import flags
import numpy as np
from util.models import MODELS
from util.tasks import TASKS
import tensorflow_probability as tfp
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns; sns.set()
sns.set_style("whitegrid")
from tqdm import tqdm
def test_for_calibration(model, task, n_bins=10):
preds = []
correct_class_probs = []
predicted_class_probs = []
pred_logits = []
y_trues = []
batch_count = task.n_valid_batches
for x, y in task.valid_dataset:
logits = model(x)
pred_logits.extend(logits.numpy())
pred = tf.argmax(logits, axis=-1)
prob = task.get_probs_fn()(logits, labels=y, temperature=1)
preds.extend(pred.numpy())
y_trues.extend(y.numpy())
batch_indexes = tf.cast(tf.range(len(y), dtype=tf.int64), dtype=tf.int64)
true_indexes = tf.concat([batch_indexes[:,None], y[:,None]], axis=1)
pred_indexes = tf.concat([batch_indexes[:,None], tf.cast(pred[:,None], tf.int64)], axis=1)
correct_class_probs.extend(tf.gather_nd(prob, true_indexes).numpy())
predicted_class_probs.extend(tf.gather_nd(prob, pred_indexes).numpy())
batch_count -= 1
if batch_count == 0:
break
model_accuracy = np.asarray(preds) == np.asarray(y_trues)
return model_accuracy, predicted_class_probs, correct_class_probs, pred_logits, y_trues
def plot_calibration(model_accuracy, predicted_class_probs, correct_class_probs, n_bins=10):
p_confidence_bins = np.zeros(n_bins)
n_confidence_bins = np.zeros(n_bins)
total_confidence_bins = np.zeros(n_bins)
denominator = 100.0 / n_bins
for i in np.arange(len(model_accuracy)):
if model_accuracy[i]:
p_confidence_bins[min(int(predicted_class_probs[i]*100 // denominator),n_bins-1)] += 1.0
else:
n_confidence_bins[min(int(predicted_class_probs[i]*100 // denominator),n_bins-1)] -= 1.0
total_confidence_bins[min(int(predicted_class_probs[i]*100 // denominator),n_bins-1)] += 1
#sns.stripplot(model_accuracy,predicted_class_probs, color='blue', alpha=0.5, jitter=True)
#sns.stripplot(model_accuracy,correct_class_probs, color='green', alpha=0.2, jitter=True)
#sns.swarmplot(model_accuracy,predicted_class_probs, color='blue', alpha=0.5)
#plt.show()
sns.barplot(x=np.arange(0,n_bins)*denominator,
y=np.arange(0,n_bins)/n_bins,
color='green', alpha=0.2, edgecolor='black')
ax = sns.barplot(x=np.arange(0,n_bins)*denominator,
y=p_confidence_bins/total_confidence_bins,
color='red', alpha=0.5, edgecolor='black')
x_ticks = np.arange(0,n_bins,2)
x_tick_labels = x_ticks / np.float32(n_bins)
ax.set_xticks(x_ticks)
ax.set_xticklabels(x_tick_labels, fontsize=10)
return p_confidence_bins,n_confidence_bins,total_confidence_bins
def expected_calibration_error(teacher_accuracy, teacher_predicted_class_probs):
raise NotImplemented |
<gh_stars>0
#!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittest for network_setup.py module."""
import shutil
import subprocess
import tempfile
from google_compute_engine.network_setup import network_setup
from google_compute_engine.test_compat import mock
from google_compute_engine.test_compat import unittest
class NetworkSetupTest(unittest.TestCase):
def setUp(self):
# Create a temporary directory.
self.test_dir = tempfile.mkdtemp()
self.mock_logger = mock.Mock()
self.mock_watcher = mock.Mock()
self.mock_ip_forwarding_utils = mock.Mock()
self.mock_network_utils = mock.Mock()
self.metadata_key = 'metadata_key'
self.mock_distro_utils = mock.Mock()
self.mock_setup = mock.create_autospec(network_setup.NetworkSetup)
self.mock_setup.logger = self.mock_logger
self.mock_setup.watcher = self.mock_watcher
self.mock_setup.network_utils = self.mock_network_utils
self.mock_setup.network_interfaces = self.metadata_key
self.mock_setup.distro_utils = self.mock_distro_utils
self.mock_setup.network_path = '/etc/sysconfig/network-scripts'
self.mock_setup.dhclient_script = '/bin/script'
self.mock_setup.dhcp_command = ''
def tearDown(self):
# Remove the directory after the test.
shutil.rmtree(self.test_dir)
@mock.patch('google_compute_engine.network_setup.network_setup.network_utils')
@mock.patch('google_compute_engine.network_setup.network_setup.metadata_watcher')
@mock.patch('google_compute_engine.network_setup.network_setup.logger')
def testNetworkSetup(self, mock_logger, mock_watcher, mock_network_utils):
mock_logger_instance = mock.Mock()
mock_logger.Logger.return_value = mock_logger_instance
mocks = mock.Mock()
mocks.attach_mock(mock_logger, 'logger')
mocks.attach_mock(mock_watcher, 'watcher')
mocks.attach_mock(mock_network_utils, 'network')
with mock.patch.object(
network_setup.NetworkSetup, '_SetupNetworkInterfaces'):
network_setup.NetworkSetup(debug=True)
expected_calls = [
mock.call.logger.Logger(name=mock.ANY, debug=True, facility=mock.ANY),
mock.call.watcher.MetadataWatcher(logger=mock_logger_instance),
mock.call.network.NetworkUtils(logger=mock_logger_instance),
]
self.assertEqual(mocks.mock_calls, expected_calls)
@mock.patch('google_compute_engine.network_setup.network_setup.subprocess.check_call')
def testEnableNetworkInterfaces(self, mock_call):
mocks = mock.Mock()
mocks.attach_mock(mock_call, 'call')
mocks.attach_mock(self.mock_logger, 'logger')
mocks.attach_mock(self.mock_setup.distro_utils.EnableNetworkInterfaces, 'enable')
mock_call.side_effect = [None, subprocess.CalledProcessError(1, 'Test')]
# Return immediately with fewer than two interfaces.
network_setup.NetworkSetup._EnableNetworkInterfaces(self.mock_setup, None)
network_setup.NetworkSetup._EnableNetworkInterfaces(self.mock_setup, [])
# Enable interfaces with network manager enabled.
network_setup.NetworkSetup._EnableNetworkInterfaces(
self.mock_setup, ['A', 'B'])
# Enable interfaces with network manager is not present.
network_setup.NetworkSetup._EnableNetworkInterfaces(
self.mock_setup, ['C', 'D'])
# Run a user supplied command successfully.
self.mock_setup.dhcp_command = 'success'
network_setup.NetworkSetup._EnableNetworkInterfaces(
self.mock_setup, ['E', 'F'])
# Run a user supplied command and logger error messages.
self.mock_setup.dhcp_command = 'failure'
network_setup.NetworkSetup._EnableNetworkInterfaces(
self.mock_setup, ['G', 'H'])
expected_calls = [
# First calls with empty `interfaces` were no-ops.
mock.call.enable(['A', 'B'], mock.ANY, dhclient_script='/bin/script'),
mock.call.enable(['C', 'D'], mock.ANY, dhclient_script='/bin/script'),
mock.call.call(['success']),
mock.call.call(['failure']),
mock.call.logger.warning(mock.ANY),
]
self.assertEqual(mocks.mock_calls, expected_calls)
def testSetupNetworkInterfaces(self):
mocks = mock.Mock()
mocks.attach_mock(self.mock_logger, 'logger')
mocks.attach_mock(self.mock_watcher, 'watcher')
mocks.attach_mock(self.mock_network_utils, 'network')
mocks.attach_mock(self.mock_setup, 'setup')
self.mock_watcher.GetMetadata.return_value = [
{'mac': '1'}, {'mac': '2'}, {'mac': '3'}, {}]
self.mock_network_utils.GetNetworkInterface.side_effect = [
'eth0', 'eth1', None, None]
with mock.patch.object(
network_setup.NetworkSetup, '_EnableNetworkInterfaces'):
self.mock_setup.dhcp_command = 'command'
network_setup.NetworkSetup._SetupNetworkInterfaces(self.mock_setup)
expected_calls = [
mock.call.watcher.GetMetadata(
metadata_key=self.metadata_key, recursive=True),
mock.call.network.GetNetworkInterface('1'),
mock.call.network.GetNetworkInterface('2'),
mock.call.network.GetNetworkInterface('3'),
mock.call.logger.warning(mock.ANY, '3'),
mock.call.network.GetNetworkInterface(None),
mock.call.logger.warning(mock.ANY, None),
mock.call.setup._EnableNetworkInterfaces(['eth0', 'eth1']),
]
self.assertEqual(mocks.mock_calls, expected_calls)
|
import numba as nb
from numba import cuda
from numba.cuda.random import xoroshiro128p_uniform_float32
size = -1
k = -1
E = -1
N = -1
@cuda.jit
def tabuWVCP_NoRandom_AFISA(
rng_states,
D,
max_iter,
A,
W,
tColor,
vect_fit,
vect_score,
vect_conflicts,
alpha,
phi,
):
# vect_nb_vois, voisin
d = cuda.grid(1)
if d < D:
f = 0
tColor_local = nb.cuda.local.array((size), nb.int16)
gamma = nb.cuda.local.array((size, k), nb.int8)
gammaDepart = nb.cuda.local.array((size), nb.int8)
gammaArrive = nb.cuda.local.array((size, k), nb.int8)
max_weight = nb.cuda.local.array((k), nb.int8)
secondmax_weight = nb.cuda.local.array((k), nb.int8)
tabuTenure = nb.cuda.local.array((size), nb.int32)
tGroup_color = nb.cuda.local.array((k, N), nb.int16)
for c in range(k):
max_weight[c] = 0
secondmax_weight[c] = 0
for i in range(N):
tGroup_color[c, i] = -1
for x in range(size):
gammaDepart[x] = 0
for y in range(k):
gamma[x, y] = 0
gammaArrive[x, y] = 0
tColor_local[x] = int(tColor[d, x])
idx = 0
while tGroup_color[tColor_local[x], idx] != -1:
idx += 1
tGroup_color[tColor_local[x], idx] = x
tabuTenure[x] = -1
nb_conflicts = 0
score_wvcp = 0
for x in range(size):
for y in range(x):
if A[x, y] == 1:
gamma[x, tColor_local[y]] += 1
gamma[y, tColor_local[x]] += 1
if tColor_local[y] == tColor_local[x]:
nb_conflicts += 1
if W[x] >= max_weight[tColor_local[x]]:
score_wvcp += W[x] - max_weight[tColor_local[x]]
secondmax_weight[tColor_local[x]] = max_weight[tColor_local[x]]
max_weight[tColor_local[x]] = int(W[x])
elif W[x] > secondmax_weight[tColor_local[x]]:
secondmax_weight[tColor_local[x]] = int(W[x])
for x in range(size):
for c in range(k):
if W[x] > max_weight[c]:
gammaArrive[x, c] = W[x] - max_weight[c]
else:
gammaArrive[x, c] = 0
for x in range(size):
if W[x] == max_weight[tColor_local[x]]:
gammaDepart[x] = secondmax_weight[tColor_local[x]] - W[x]
else:
gammaDepart[x] = 0
f = score_wvcp + phi[d] * nb_conflicts
f_best = f
score_best = score_wvcp
nb_conflicts_best = nb_conflicts
for iter_ in range(max_iter):
best_delta = 9999
best_delta_conflicts = -1
best_delta_score = -1
best_x = -1
best_v = -1
for x in range(size):
v_x = tColor_local[x]
for v in range(k):
if v != v_x:
delta_score = gammaArrive[x, v] + gammaDepart[x]
delta_conflicts = gamma[x, v] - gamma[x, v_x]
delta = delta_score + phi[d] * delta_conflicts
if tabuTenure[x] <= iter_ or delta + f < f_best:
if delta < best_delta:
best_x = x
best_v = v
best_delta = delta
best_delta_conflicts = delta_conflicts
best_delta_score = delta_score
f += best_delta
score_wvcp += best_delta_score
nb_conflicts += best_delta_conflicts
old_color = tColor_local[best_x]
for y in range(size):
if A[best_x, y] == 1:
gamma[y, old_color] -= 1
gamma[y, best_v] += 1
tColor_local[best_x] = best_v
old_max_old_color = max_weight[old_color]
old_second_max_old_color = secondmax_weight[old_color]
max_weight[old_color] = 0
secondmax_weight[old_color] = 0
for idx in range(N):
x = tGroup_color[old_color, idx]
if x == best_x:
tGroup_color[old_color, idx] = -1
elif x != -1:
if W[x] >= max_weight[old_color]:
secondmax_weight[old_color] = max_weight[old_color]
max_weight[old_color] = int(W[x])
elif W[x] > secondmax_weight[old_color]:
secondmax_weight[old_color] = int(W[x])
idx = 0
while tGroup_color[best_v, idx] != -1:
idx += 1
tGroup_color[best_v, idx] = best_x
old_max_best_v = max_weight[best_v]
if W[best_x] >= max_weight[best_v]:
secondmax_weight[best_v] = max_weight[best_v]
max_weight[best_v] = int(W[best_x])
elif W[best_x] > secondmax_weight[best_v]:
secondmax_weight[best_v] = int(W[best_x])
if max_weight[old_color] != old_max_old_color:
for x in range(size):
if W[x] >= max_weight[old_color]:
gammaArrive[x, old_color] = W[x] - max_weight[old_color]
else:
gammaArrive[x, old_color] = 0
if max_weight[best_v] != old_max_best_v:
for x in range(size):
if W[x] >= max_weight[best_v]:
gammaArrive[x, best_v] = W[x] - max_weight[best_v]
else:
gammaArrive[x, best_v] = 0
if (
old_second_max_old_color != secondmax_weight[old_color]
or max_weight[old_color] != old_max_old_color
):
for idx in range(N):
x = tGroup_color[old_color, idx]
if x != -1:
if W[x] == max_weight[old_color]:
gammaDepart[x] = secondmax_weight[old_color] - W[x]
else:
gammaDepart[x] = 0
for idx in range(N):
x = tGroup_color[best_v, idx]
if x != -1:
if W[x] == max_weight[best_v]:
gammaDepart[x] = secondmax_weight[best_v] - W[x]
else:
gammaDepart[x] = 0
tabuTenure[best_x] = (
int(alpha * size)
+ int(10 * xoroshiro128p_uniform_float32(rng_states, d))
+ iter_
)
if f < f_best:
f_best = f
score_best = score_wvcp
nb_conflicts_best = nb_conflicts
for a in range(size):
tColor[d, a] = tColor_local[a]
vect_fit[d] = f_best
vect_score[d] = score_best
vect_conflicts[d] = nb_conflicts_best
@cuda.jit
def tabuGCP(rng_states, D, max_iter, A, tColor, vect_fit, alpha, tabuTenure):
# vect_nb_vois, voisin
d = cuda.grid(1)
if d < D:
f = 0
tColor_local = nb.cuda.local.array((size), nb.int16)
gamma = nb.cuda.local.array((size, k), nb.int8)
for x in range(size):
for y in range(k):
gamma[x, y] = 0
tabuTenure[d, x, y] = -1
tColor_local[x] = int(tColor[d, x])
f = 0
for x in range(size):
for y in range(x):
if A[x, y] == 1:
gamma[x, tColor_local[y]] += 1
gamma[y, tColor_local[x]] += 1
if tColor_local[y] == tColor_local[x]:
f += 1
f_best = f
for iter_ in range(max_iter):
best_delta = 9999
best_x = -1
best_v = -1
nbcfl = 0
for x in range(size):
v_x = tColor_local[x]
if gamma[x, v_x] > 0:
nbcfl += 1
for v in range(k):
if v != v_x:
delta = gamma[x, v] - gamma[x, v_x]
if tabuTenure[d, x, v] <= iter_ or delta + f < f_best:
if delta < best_delta:
best_x = x
best_v = v
best_delta = delta
f += best_delta
old_color = tColor_local[best_x]
for y in range(size):
if A[best_x, y] == 1:
gamma[y, old_color] -= 1
gamma[y, best_v] += 1
tColor_local[best_x] = best_v
tabuTenure[d, best_x, old_color] = (
int(alpha * nbcfl)
+ int(10 * xoroshiro128p_uniform_float32(rng_states, d))
+ iter_
)
if f < f_best:
f_best = f
for a in range(size):
tColor[d, a] = tColor_local[a]
vect_fit[d] = f_best
@cuda.jit
def tabuWVCP_NoRandom_AFISA_bigSize(
rng_states,
D,
max_iter,
A,
W,
tColor,
vect_fit,
vect_score,
vect_conflicts,
alpha,
phi,
gamma,
):
d = cuda.grid(1)
if d < D:
f = 0
tColor_local = nb.cuda.local.array((size), nb.int16)
gammaDepart = nb.cuda.local.array((size), nb.int8)
gammaArrive = nb.cuda.local.array((size, k), nb.int8)
max_weight = nb.cuda.local.array((k), nb.int8)
secondmax_weight = nb.cuda.local.array((k), nb.int8)
tabuTenure = nb.cuda.local.array((size), nb.int32)
tGroup_color = nb.cuda.local.array((k, N), nb.int16)
for c in range(k):
max_weight[c] = 0
secondmax_weight[c] = 0
for i in range(N):
tGroup_color[c, i] = -1
for x in range(size):
gammaDepart[x] = 0
for y in range(k):
gamma[d, x, y] = 0
gammaArrive[x, y] = 0
tColor_local[x] = int(tColor[d, x])
idx = 0
while tGroup_color[tColor_local[x], idx] != -1:
idx += 1
tGroup_color[tColor_local[x], idx] = x
tabuTenure[x] = -1
nb_conflicts = 0
score_wvcp = 0
for x in range(size):
for y in range(x):
if A[x, y] == 1:
gamma[d, x, tColor_local[y]] += 1
gamma[d, y, tColor_local[x]] += 1
if tColor_local[y] == tColor_local[x]:
nb_conflicts += 1
if W[x] >= max_weight[tColor_local[x]]:
score_wvcp += W[x] - max_weight[tColor_local[x]]
secondmax_weight[tColor_local[x]] = max_weight[tColor_local[x]]
max_weight[tColor_local[x]] = int(W[x])
elif W[x] > secondmax_weight[tColor_local[x]]:
secondmax_weight[tColor_local[x]] = int(W[x])
for x in range(size):
for c in range(k):
if W[x] > max_weight[c]:
gammaArrive[x, c] = W[x] - max_weight[c]
else:
gammaArrive[x, c] = 0
for x in range(size):
if W[x] == max_weight[tColor_local[x]]:
gammaDepart[x] = secondmax_weight[tColor_local[x]] - W[x]
else:
gammaDepart[x] = 0
f = score_wvcp + phi[d] * nb_conflicts
f_best = f
score_best = score_wvcp
nb_conflicts_best = nb_conflicts
for iter_ in range(max_iter):
best_delta = 9999
best_delta_conflicts = -1
best_delta_score = -1
best_x = -1
best_v = -1
for x in range(size):
v_x = tColor_local[x]
for v in range(k):
if v != v_x:
delta_score = gammaArrive[x, v] + gammaDepart[x]
delta_conflicts = gamma[d, x, v] - gamma[d, x, v_x]
delta = delta_score + phi[d] * delta_conflicts
if tabuTenure[x] <= iter_ or delta + f < f_best:
if delta < best_delta:
best_x = x
best_v = v
best_delta = delta
best_delta_conflicts = delta_conflicts
best_delta_score = delta_score
f += best_delta
score_wvcp += best_delta_score
nb_conflicts += best_delta_conflicts
old_color = tColor_local[best_x]
for y in range(size):
if A[best_x, y] == 1:
gamma[d, y, old_color] -= 1
gamma[d, y, best_v] += 1
tColor_local[best_x] = best_v
old_max_old_color = max_weight[old_color]
old_second_max_old_color = secondmax_weight[old_color]
max_weight[old_color] = 0
secondmax_weight[old_color] = 0
for idx in range(N):
x = tGroup_color[old_color, idx]
if x == best_x:
tGroup_color[old_color, idx] = -1
elif x != -1:
if W[x] >= max_weight[old_color]:
secondmax_weight[old_color] = max_weight[old_color]
max_weight[old_color] = int(W[x])
elif W[x] > secondmax_weight[old_color]:
secondmax_weight[old_color] = int(W[x])
idx = 0
while tGroup_color[best_v, idx] != -1:
idx += 1
tGroup_color[best_v, idx] = best_x
old_max_best_v = max_weight[best_v]
if W[best_x] >= max_weight[best_v]:
secondmax_weight[best_v] = max_weight[best_v]
max_weight[best_v] = int(W[best_x])
elif W[best_x] > secondmax_weight[best_v]:
secondmax_weight[best_v] = int(W[best_x])
if max_weight[old_color] != old_max_old_color:
for x in range(size):
if W[x] >= max_weight[old_color]:
gammaArrive[x, old_color] = W[x] - max_weight[old_color]
else:
gammaArrive[x, old_color] = 0
if max_weight[best_v] != old_max_best_v:
for x in range(size):
if W[x] >= max_weight[best_v]:
gammaArrive[x, best_v] = W[x] - max_weight[best_v]
else:
gammaArrive[x, best_v] = 0
if (
old_second_max_old_color != secondmax_weight[old_color]
or max_weight[old_color] != old_max_old_color
):
for idx in range(N):
x = tGroup_color[old_color, idx]
if x != -1:
if W[x] == max_weight[old_color]:
gammaDepart[x] = secondmax_weight[old_color] - W[x]
else:
gammaDepart[x] = 0
for idx in range(N):
x = tGroup_color[best_v, idx]
if x != -1:
if W[x] == max_weight[best_v]:
gammaDepart[x] = secondmax_weight[best_v] - W[x]
else:
gammaDepart[x] = 0
tabuTenure[best_x] = (
int(alpha * size)
+ int(10 * xoroshiro128p_uniform_float32(rng_states, d))
+ iter_
)
if f < f_best:
f_best = f
score_best = score_wvcp
nb_conflicts_best = nb_conflicts
for a in range(size):
tColor[d, a] = tColor_local[a]
vect_fit[d] = f_best
vect_score[d] = score_best
vect_conflicts[d] = nb_conflicts_best
@cuda.jit
def tabuWVCP_NoRandom_AFISA_heavyWeights(
rng_states,
D,
max_iter,
A,
W,
tColor,
vect_fit,
vect_score,
vect_conflicts,
alpha,
phi,
):
# vect_nb_vois, voisin
d = cuda.grid(1)
if d < D:
f = 0
tColor_local = nb.cuda.local.array((size), nb.int16)
gamma = nb.cuda.local.array((size, k), nb.int8)
gammaDepart = nb.cuda.local.array((size), nb.int16)
gammaArrive = nb.cuda.local.array((size, k), nb.int16)
max_weight = nb.cuda.local.array((k), nb.int16)
secondmax_weight = nb.cuda.local.array((k), nb.int16)
tabuTenure = nb.cuda.local.array((size), nb.int32)
tGroup_color = nb.cuda.local.array((k, N), nb.int16)
for c in range(k):
max_weight[c] = 0
secondmax_weight[c] = 0
for i in range(N):
tGroup_color[c, i] = -1
for x in range(size):
gammaDepart[x] = 0
for y in range(k):
gamma[x, y] = 0
gammaArrive[x, y] = 0
tColor_local[x] = int(tColor[d, x])
idx = 0
while tGroup_color[tColor_local[x], idx] != -1:
idx += 1
tGroup_color[tColor_local[x], idx] = x
tabuTenure[x] = -1
nb_conflicts = 0
score_wvcp = 0
for x in range(size):
# nb_vois = int(vect_nb_vois[x])
# for i in range(nb_vois):
# y = int(voisin[i])
for y in range(x):
if A[x, y] == 1:
gamma[x, tColor_local[y]] += 1
gamma[y, tColor_local[x]] += 1
if tColor_local[y] == tColor_local[x]:
nb_conflicts += 1
if W[x] >= max_weight[tColor_local[x]]:
score_wvcp += W[x] - max_weight[tColor_local[x]]
secondmax_weight[tColor_local[x]] = max_weight[tColor_local[x]]
max_weight[tColor_local[x]] = int(W[x])
elif W[x] > secondmax_weight[tColor_local[x]]:
secondmax_weight[tColor_local[x]] = int(W[x])
for x in range(size):
for c in range(k):
if W[x] > max_weight[c]:
gammaArrive[x, c] = W[x] - max_weight[c]
else:
gammaArrive[x, c] = 0
for x in range(size):
if W[x] == max_weight[tColor_local[x]]:
gammaDepart[x] = secondmax_weight[tColor_local[x]] - W[x]
else:
gammaDepart[x] = 0
f = score_wvcp + phi[d] * nb_conflicts
f_best = f
score_best = score_wvcp
nb_conflicts_best = nb_conflicts
for iter_ in range(max_iter):
best_delta = 99999
best_delta_conflicts = -1
best_delta_score = -1
best_x = -1
best_v = -1
for x in range(size):
v_x = tColor_local[x]
for v in range(k):
if v != v_x:
delta_score = gammaArrive[x, v] + gammaDepart[x]
delta_conflicts = gamma[x, v] - gamma[x, v_x]
delta = delta_score + phi[d] * delta_conflicts
if tabuTenure[x] <= iter_ or delta + f < f_best:
if delta < best_delta:
best_x = x
best_v = v
best_delta = delta
best_delta_conflicts = delta_conflicts
best_delta_score = delta_score
f += best_delta
score_wvcp += best_delta_score
nb_conflicts += best_delta_conflicts
old_color = tColor_local[best_x]
for y in range(size):
if A[best_x, y] == 1:
gamma[y, old_color] -= 1
gamma[y, best_v] += 1
tColor_local[best_x] = best_v
old_max_old_color = max_weight[old_color]
old_second_max_old_color = secondmax_weight[old_color]
max_weight[old_color] = 0
secondmax_weight[old_color] = 0
for idx in range(N):
x = tGroup_color[old_color, idx]
if x == best_x:
tGroup_color[old_color, idx] = -1
elif x != -1:
if W[x] >= max_weight[old_color]:
secondmax_weight[old_color] = max_weight[old_color]
max_weight[old_color] = int(W[x])
elif W[x] > secondmax_weight[old_color]:
secondmax_weight[old_color] = int(W[x])
idx = 0
while tGroup_color[best_v, idx] != -1:
idx += 1
tGroup_color[best_v, idx] = best_x
old_max_best_v = max_weight[best_v]
if W[best_x] >= max_weight[best_v]:
secondmax_weight[best_v] = max_weight[best_v]
max_weight[best_v] = int(W[best_x])
elif W[best_x] > secondmax_weight[best_v]:
secondmax_weight[best_v] = int(W[best_x])
if max_weight[old_color] != old_max_old_color:
for x in range(size):
if W[x] >= max_weight[old_color]:
gammaArrive[x, old_color] = W[x] - max_weight[old_color]
else:
gammaArrive[x, old_color] = 0
if max_weight[best_v] != old_max_best_v:
for x in range(size):
if W[x] >= max_weight[best_v]:
gammaArrive[x, best_v] = W[x] - max_weight[best_v]
else:
gammaArrive[x, best_v] = 0
if (
old_second_max_old_color != secondmax_weight[old_color]
or max_weight[old_color] != old_max_old_color
):
for idx in range(N):
x = tGroup_color[old_color, idx]
if x != -1:
if W[x] == max_weight[old_color]:
gammaDepart[x] = secondmax_weight[old_color] - W[x]
else:
gammaDepart[x] = 0
for idx in range(N):
x = tGroup_color[best_v, idx]
if x != -1:
if W[x] == max_weight[best_v]:
gammaDepart[x] = secondmax_weight[best_v] - W[x]
else:
gammaDepart[x] = 0
tabuTenure[best_x] = (
int(alpha * size)
+ int(10 * xoroshiro128p_uniform_float32(rng_states, d))
+ iter_
)
if f < f_best:
f_best = f
score_best = score_wvcp
nb_conflicts_best = nb_conflicts
for a in range(size):
tColor[d, a] = tColor_local[a]
vect_fit[d] = f_best
vect_score[d] = score_best
vect_conflicts[d] = nb_conflicts_best
|
import re
from baserow.core.utils import split_comma_separated_string
from baserow.contrib.database.fields.models import Field
def get_include_exclude_field_ids(table, include=None, exclude=None):
"""
Returns a list containing the field ids based on the value
of the include and exclude parameters.
:param table: The table where to select the fields from. Field id's that are
not in the table won't be included.
:type table: Table
:param include: The field ids that must be included. Only the provided ones
are going to be in the returned queryset. Multiple can be provided
separated by comma
:type include: Optional[str]
:param exclude: The field ids that must be excluded. Only the ones that are not
provided are going to be in the returned queryset. Multiple can be provided
separated by comma.
:type exclude: Optional[str]
:rtype: None or List[str]
"""
fields = get_include_exclude_fields(table, include, exclude)
field_ids = None
if include is not None or exclude is not None:
if fields:
field_ids = [field.get("id") for field in fields.values()]
else:
field_ids = []
return field_ids
def get_include_exclude_fields(
table, include=None, exclude=None, user_field_names=False
):
"""
Returns a field queryset containing the requested fields based on the value
and exclude parameter.
:param table: The table where to select the fields from. Field id's that are
not in the table won't be included.
:type table: Table
:param include: The field ids that must be included. Only the provided ones
are going to be in the returned queryset. Multiple can be provided
separated by comma
:type include: Optional[str]
:param exclude: The field ids that must be excluded. Only the ones that are not
provided are going to be in the returned queryset. Multiple can be provided
separated by comma.
:type exclude: Optional[str]
:return: A Field's QuerySet containing the allowed fields based on the provided
input.
:param user_field_names: If true then the value and exclude parameters are
retreated as a comma separated list of user field names instead of id's
:type user_field_names: bool
:rtype: QuerySet
"""
queryset = Field.objects.filter(table=table)
if user_field_names:
includes = extract_field_names_from_string(include)
excludes = extract_field_names_from_string(exclude)
filter_type = "name__in"
else:
includes = extract_field_ids_from_string(include)
excludes = extract_field_ids_from_string(exclude)
filter_type = "id__in"
if len(includes) == 0 and len(excludes) == 0:
return None
if len(includes) > 0:
queryset = queryset.filter(**{filter_type: includes})
if len(excludes) > 0:
queryset = queryset.exclude(**{filter_type: excludes})
return queryset
# noinspection PyMethodMayBeStatic
def extract_field_names_from_string(value):
"""
Given a comma separated string of field names this function will split the
string into a list of individual field names. For weird field names containing
commas etc the field should be escaped with quotes.
:param value: The string to split into a list of field names.
:return: A list of field names.
"""
if not value:
return []
return split_comma_separated_string(value)
def extract_field_ids_from_string(value):
"""
Extracts the field ids from a string. Multiple ids can be separated by a comma.
For example if you provide 'field_1,field_2' then [1, 2] is returned.
:param value: A string containing multiple ids separated by comma.
:type value: str
:return: A list containing the field ids as integers.
:rtype: list
"""
if not value:
return []
return [
int(re.sub("[^0-9]", "", str(v)))
for v in value.split(",")
if any(c.isdigit() for c in v)
]
|
import os
import time
from six.moves.urllib.parse import urlparse, urljoin, urlsplit, parse_qs
from conans.client.remote_manager import check_compressed_files
from conans.client.rest.differ import diff_snapshots
from conans.client.rest.rest_client_common import RestCommonMethods
from conans.client.rest.uploader_downloader import Downloader, Uploader
from conans.errors import NotFoundException, ConanException
from conans.model.info import ConanInfo
from conans.model.manifest import FileTreeManifest
from conans.paths import CONAN_MANIFEST, CONANINFO, EXPORT_SOURCES_TGZ_NAME, EXPORT_TGZ_NAME, \
PACKAGE_TGZ_NAME
from conans.util.files import decode_text, md5sum
from conans.util.log import logger
def complete_url(base_url, url):
""" Ensures that an url is absolute by completing relative urls with
the remote url. urls that are already absolute are not modified.
"""
if bool(urlparse(url).netloc):
return url
return urljoin(base_url, url)
class RestV1Methods(RestCommonMethods):
@property
def remote_api_url(self):
return "%s/v1" % self.remote_url.rstrip("/")
def _download_files(self, file_urls, output=None):
"""
:param: file_urls is a dict with {filename: url}
Its a generator, so it yields elements for memory performance
"""
downloader = Downloader(self.requester, output, self.verify_ssl)
# Take advantage of filenames ordering, so that conan_package.tgz and conan_export.tgz
# can be < conanfile, conaninfo, and sent always the last, so smaller files go first
for filename, resource_url in sorted(file_urls.items(), reverse=True):
if output:
output.writeln("Downloading %s" % filename)
auth, _ = self._file_server_capabilities(resource_url)
contents = downloader.download(resource_url, auth=auth)
if output:
output.writeln("")
yield os.path.normpath(filename), contents
def _file_server_capabilities(self, resource_url):
auth = None
dedup = False
urltokens = urlsplit(resource_url)
query_string = urltokens[3]
parsed_string_dict = parse_qs(query_string)
if "signature" not in parsed_string_dict and "Signature" not in parsed_string_dict:
# If monolithic server, we can use same auth, and server understand dedup
auth = self.auth
dedup = True
return auth, dedup
def get_conan_manifest(self, conan_reference):
"""Gets a FileTreeManifest from conans"""
# Obtain the URLs
url = "%s/conans/%s/digest" % (self.remote_api_url, "/".join(conan_reference))
urls = self._get_file_to_url_dict(url)
# Get the digest
contents = self._download_files(urls)
# Unroll generator and decode shas (plain text)
contents = {key: decode_text(value) for key, value in dict(contents).items()}
return FileTreeManifest.loads(contents[CONAN_MANIFEST])
def get_package_manifest(self, package_reference):
"""Gets a FileTreeManifest from a package"""
# Obtain the URLs
url = "%s/conans/%s/packages/%s/digest" % (self.remote_api_url,
"/".join(package_reference.conan),
package_reference.package_id)
urls = self._get_file_to_url_dict(url)
# Get the digest
contents = self._download_files(urls)
# Unroll generator and decode shas (plain text)
contents = {key: decode_text(value) for key, value in dict(contents).items()}
return FileTreeManifest.loads(contents[CONAN_MANIFEST])
def get_package_info(self, package_reference):
"""Gets a ConanInfo file from a package"""
url = "%s/conans/%s/packages/%s/download_urls" % (self.remote_api_url,
"/".join(package_reference.conan),
package_reference.package_id)
urls = self._get_file_to_url_dict(url)
if not urls:
raise NotFoundException("Package not found!")
if CONANINFO not in urls:
raise NotFoundException("Package %s doesn't have the %s file!" % (package_reference,
CONANINFO))
# Get the info (in memory)
contents = self._download_files({CONANINFO: urls[CONANINFO]})
# Unroll generator and decode shas (plain text)
contents = {key: decode_text(value) for key, value in dict(contents).items()}
return ConanInfo.loads(contents[CONANINFO])
def _get_file_to_url_dict(self, url, data=None):
"""Call to url and decode the json returning a dict of {filepath: url} dict
converting the url to a complete url when needed"""
urls = self.get_json(url, data=data)
return {filepath: complete_url(self.remote_url, url) for filepath, url in urls.items()}
def _upload_files(self, file_urls, files, output, retry, retry_wait):
t1 = time.time()
failed = []
uploader = Uploader(self.requester, output, self.verify_ssl)
# Take advantage of filenames ordering, so that conan_package.tgz and conan_export.tgz
# can be < conanfile, conaninfo, and sent always the last, so smaller files go first
for filename, resource_url in sorted(file_urls.items(), reverse=True):
output.rewrite_line("Uploading %s" % filename)
auth, dedup = self._file_server_capabilities(resource_url)
try:
response = uploader.upload(resource_url, files[filename], auth=auth, dedup=dedup,
retry=retry, retry_wait=retry_wait, headers=self._put_headers)
output.writeln("")
if not response.ok:
output.error("\nError uploading file: %s, '%s'" % (filename, response.content))
failed.append(filename)
else:
pass
except Exception as exc:
output.error("\nError uploading file: %s, '%s'" % (filename, exc))
failed.append(filename)
if failed:
raise ConanException("Execute upload again to retry upload the failed files: %s"
% ", ".join(failed))
else:
logger.debug("\nAll uploaded! Total time: %s\n" % str(time.time() - t1))
def _download_files_to_folder(self, file_urls, to_folder):
"""
:param: file_urls is a dict with {filename: abs_path}
It writes downloaded files to disk (appending to file, only keeps chunks in memory)
"""
downloader = Downloader(self.requester, self._output, self.verify_ssl)
ret = {}
# Take advantage of filenames ordering, so that conan_package.tgz and conan_export.tgz
# can be < conanfile, conaninfo, and sent always the last, so smaller files go first
for filename, resource_url in sorted(file_urls.items(), reverse=True):
if self._output:
self._output.writeln("Downloading %s" % filename)
auth, _ = self._file_server_capabilities(resource_url)
abs_path = os.path.join(to_folder, filename)
downloader.download(resource_url, abs_path, auth=auth)
if self._output:
self._output.writeln("")
ret[filename] = abs_path
return ret
def get_recipe(self, conan_reference, dest_folder):
urls = self._get_recipe_urls(conan_reference)
urls.pop(EXPORT_SOURCES_TGZ_NAME, None)
check_compressed_files(EXPORT_TGZ_NAME, urls)
zipped_files = self._download_files_to_folder(urls, dest_folder)
return zipped_files, conan_reference
def get_recipe_sources(self, conan_reference, dest_folder):
urls = self._get_recipe_urls(conan_reference)
check_compressed_files(EXPORT_SOURCES_TGZ_NAME, urls)
if EXPORT_SOURCES_TGZ_NAME not in urls:
return None
urls = {EXPORT_SOURCES_TGZ_NAME: urls[EXPORT_SOURCES_TGZ_NAME]}
zipped_files = self._download_files_to_folder(urls, dest_folder)
return zipped_files
def _get_recipe_urls(self, conan_reference):
"""Gets a dict of filename:contents from conans"""
# Get the conanfile snapshot first
url = "%s/conans/%s/download_urls" % (self.remote_api_url, "/".join(conan_reference))
urls = self._get_file_to_url_dict(url)
return urls
def get_package(self, package_reference, dest_folder):
urls = self._get_package_urls(package_reference)
check_compressed_files(PACKAGE_TGZ_NAME, urls)
zipped_files = self._download_files_to_folder(urls, dest_folder)
return zipped_files
def _get_package_urls(self, package_reference):
"""Gets a dict of filename:contents from package"""
url = "%s/conans/%s/packages/%s/download_urls" % (self.remote_api_url,
"/".join(package_reference.conan),
package_reference.package_id)
urls = self._get_file_to_url_dict(url)
if not urls:
raise NotFoundException("Package not found!")
return urls
def upload_recipe(self, conan_reference, the_files, retry, retry_wait, ignore_deleted_file,
no_overwrite):
"""
the_files: dict with relative_path: content
"""
self.check_credentials()
# Get the remote snapshot
remote_snapshot = self._get_conan_snapshot(conan_reference)
local_snapshot = {filename: md5sum(abs_path) for filename, abs_path in the_files.items()}
# Get the diff
new, modified, deleted = diff_snapshots(local_snapshot, remote_snapshot)
if ignore_deleted_file and ignore_deleted_file in deleted:
deleted.remove(ignore_deleted_file)
if not new and not deleted and modified in (["conanmanifest.txt"], []):
return False, conan_reference
if no_overwrite and remote_snapshot:
if no_overwrite in ("all", "recipe"):
raise ConanException("Local recipe is different from the remote recipe. "
"Forbidden overwrite")
files_to_upload = {filename.replace("\\", "/"): the_files[filename]
for filename in new + modified}
if files_to_upload:
# Get the upload urls
url = "%s/conans/%s/upload_urls" % (self.remote_api_url, "/".join(conan_reference))
filesizes = {filename.replace("\\", "/"): os.stat(abs_path).st_size
for filename, abs_path in files_to_upload.items()}
urls = self._get_file_to_url_dict(url, data=filesizes)
self._upload_files(urls, files_to_upload, self._output, retry, retry_wait)
if deleted:
self._remove_conanfile_files(conan_reference, deleted)
return (files_to_upload or deleted), conan_reference
def upload_package(self, package_reference, the_files, retry, retry_wait, no_overwrite):
"""
basedir: Base directory with the files to upload (for read the files in disk)
relative_files: relative paths to upload
"""
self.check_credentials()
t1 = time.time()
# Get the remote snapshot
remote_snapshot = self._get_package_snapshot(package_reference)
local_snapshot = {filename: md5sum(abs_path) for filename, abs_path in the_files.items()}
# Get the diff
new, modified, deleted = diff_snapshots(local_snapshot, remote_snapshot)
if not new and not deleted and modified in (["conanmanifest.txt"], []):
return False
if no_overwrite and remote_snapshot:
if no_overwrite == "all":
raise ConanException("Local package is different from the remote package. "
"Forbidden overwrite")
files_to_upload = {filename: the_files[filename] for filename in new + modified}
if files_to_upload: # Obtain upload urls
url = "%s/conans/%s/packages/%s/upload_urls" % (self.remote_api_url,
"/".join(package_reference.conan),
package_reference.package_id)
filesizes = {filename: os.stat(abs_path).st_size for filename,
abs_path in files_to_upload.items()}
self._output.rewrite_line("Requesting upload permissions...")
urls = self._get_file_to_url_dict(url, data=filesizes)
self._output.rewrite_line("Requesting upload permissions...Done!")
self._output.writeln("")
self._upload_files(urls, files_to_upload, self._output, retry, retry_wait)
if deleted:
self._remove_package_files(package_reference, deleted)
logger.debug("====> Time rest client upload_package: %f" % (time.time() - t1))
return files_to_upload or deleted
def _get_conan_snapshot(self, reference):
url = "%s/conans/%s" % (self.remote_api_url, '/'.join(reference))
try:
snapshot = self.get_json(url)
except NotFoundException:
snapshot = {}
norm_snapshot = {os.path.normpath(filename): the_md5
for filename, the_md5 in snapshot.items()}
return norm_snapshot
def _get_package_snapshot(self, package_reference):
url = "%s/conans/%s/packages/%s" % (self.remote_api_url,
"/".join(package_reference.conan),
package_reference.package_id)
try:
snapshot = self.get_json(url)
except NotFoundException:
snapshot = {}
norm_snapshot = {os.path.normpath(filename): the_md5
for filename, the_md5 in snapshot.items()}
return norm_snapshot
def get_path(self, conan_reference, package_id, path):
"""Gets a file content or a directory list"""
if not package_id:
url = "%s/conans/%s/download_urls" % (self.remote_api_url, "/".join(conan_reference))
else:
url = "%s/conans/%s/packages/%s/download_urls" % (self.remote_api_url,
"/".join(conan_reference),
package_id)
try:
urls = self._get_file_to_url_dict(url)
except NotFoundException:
if package_id:
raise NotFoundException("Package %s:%s not found" % (conan_reference, package_id))
else:
raise NotFoundException("Recipe %s not found" % str(conan_reference))
def is_dir(the_path):
if the_path == ".":
return True
for the_file in urls:
if the_path == the_file:
return False
elif the_file.startswith(the_path):
return True
raise NotFoundException("The specified path doesn't exist")
if is_dir(path):
ret = []
for the_file in urls:
if path == "." or the_file.startswith(path):
tmp = the_file[len(path)-1:].split("/", 1)[0]
if tmp not in ret:
ret.append(tmp)
return sorted(ret)
else:
downloader = Downloader(self.requester, None, self.verify_ssl)
auth, _ = self._file_server_capabilities(urls[path])
content = downloader.download(urls[path], auth=auth)
return decode_text(content)
|
__author__ = '<NAME>'
__license__ = "MIT"
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from math import sqrt
import datetime
from mpl_toolkits.basemap import Basemap
from matplotlib.colors import LinearSegmentedColormap
# Set the plot styles
parse = lambda x: datetime.datetime.fromtimestamp(float(x)/1000)
fig_width_pt = 345 # Get this from LaTeX using \showthe\columnwidth
inches_per_pt = 1.0/72.27 # Convert pt to inches
golden_mean = (sqrt(5)-1.0)/2.0 # Aesthetic ratio
fig_width = fig_width_pt*inches_per_pt # width in inches
fig_height = fig_width*golden_mean # height in inches
fig_size = [fig_width, fig_height]
sns.set_style("ticks")
sns.set_context("paper")
# Import the dataset Telecommunications
sliceSum = pd.DataFrame({})
for index in range(4,11):
sliceSum2 = pd.read_csv('nature/sms-call-internet-tn-2013-11-'+str(index).zfill(2) +'.txt', sep='\t', encoding="utf-8-sig", names=['CellID', 'datetime', 'countrycode', 'smsin', 'smsout', 'callin', 'callout', 'internet'], parse_dates=['datetime'], date_parser=parse)#, parse_dates=['datetime'])
sliceSum2 = sliceSum2.set_index('datetime')
sliceSum2['hour'] = sliceSum2.index.hour
sliceSum2['weekday'] = sliceSum2.index.weekday
sliceSum2 = sliceSum2.groupby(['hour','weekday', 'CellID'], as_index=False).sum()
sliceSum = sliceSum.append(sliceSum2)
sliceSum['idx'] = sliceSum['hour'] + (sliceSum['weekday']*24)
sliceSum.head()
# Import the dataset - precipitations
precipitation_df = pd.read_csv('nature/precipitation-trentino.csv', sep=',', names=['datetime','CellID','intensity'], encoding="utf-8-sig", parse_dates=['datetime'], date_parser=parse)
precipitation_df = precipitation_df.set_index(['datetime'], drop=False)
precipitation_df = precipitation_df.groupby(['CellID'], as_index=False).mean()
precipitation_df.head()
# Import the dataset - news
news_df = pd.read_csv('nature/news.csv', sep=',', encoding="utf-8-sig", parse_dates=['date'])
news_df.head()
# Import the grid, obtained from the geojson. The CSV has X, Y coordinates and CellID
csv_df = pd.read_csv('nature/grid_trentino.csv')
# Merge csv_df with Telecommunications, grouped by cellID
merge = pd.merge(sliceSum.groupby(['CellID'], as_index=False).sum(), csv_df, on='CellID')
# Extract the points for hexbin
points_x = np.array([x['X'] for i,x in merge.iterrows()])
points_y = np.array([x['Y'] for i,x in merge.iterrows()])
c = np.array([x['internet'] for i,x in merge.iterrows()])
# Trentino's boundingbox
a = (45.6730682227551, 10.4521594968354)
b = (46.5327699992773, 11.9627133503828)
# Trentino shapefile
# http://dati.trentino.it/dataset/limite-comprensoriale-027140/resource/ff1f1687-3f8f-427e-84d9-cf40c8b9b98a
m = Basemap(lat_0 = (a[0]+b[0])/2, lon_0 = (a[1]+b[1])/2, epsg=4326, llcrnrlon=a[1],llcrnrlat=a[0],urcrnrlon=b[1],urcrnrlat=b[0],)
m.readshapefile('nature/amm','Trentino_shapefile', color='0.35')
cmap = LinearSegmentedColormap.from_list("skil", sns.color_palette("RdBu_r", 7)[1:])
plt.register_cmap(cmap=cmap)
m.hexbin(points_x, points_y, cmap="skil", gridsize=50, C=c, bins='log', mincnt=1)
sns.despine(left=True, bottom=True)
plt.savefig('map.pdf', format='pdf', dpi=330, bbox_inches='tight')
# Import the dataset - social pulse, obtained from the geojson
social_df = pd.read_csv('nature/result2.csv', sep=',', encoding="utf-8-sig", parse_dates=['created'])
points_x = np.array([x['geomPoint.geom/coordinates/0'] for i,x in social_df.iterrows()])
points_y = np.array([x['geomPoint.geom/coordinates/1'] for i,x in social_df.iterrows()])
m = Basemap(lat_0 = (a[0]+b[0])/2, lon_0 = (a[1]+b[1])/2, epsg=4326, llcrnrlon=a[1], llcrnrlat=a[0], urcrnrlon=b[1], urcrnrlat=b[0],)
m.readshapefile('nature/amm', 'Trentino_shapefile', color='0.35')
m.hexbin(points_x, points_y, cmap="skil", gridsize=50, bins='log', mincnt=1)
sns.despine(left=True, bottom=True)
plt.savefig('map_social.pdf', format='pdf', dpi=330,bbox_inches='tight')
# Energy map
line_df = pd.read_csv('nature/line.csv', sep=',', encoding="utf-8-sig")
line_df['CellID'] = line_df['SQUAREID']
merge = pd.merge(line_df.groupby(['CellID'], as_index=False).sum(), csv_df, on='CellID')
points_x = np.array([x['X'] for i,x in merge.iterrows()])
points_y = np.array([x['Y'] for i,x in merge.iterrows()])
c = np.array([x['NR_UBICAZIONI'] for i,x in merge.iterrows()])
m = Basemap(lat_0 = (a[0]+b[0])/2, lon_0 = (a[1]+b[1])/2, epsg=4326, llcrnrlon=a[1],llcrnrlat=a[0],urcrnrlon=b[1],urcrnrlat=b[0],)
m.readshapefile('nature/amm','Trentino_shapefile', color='0.35')
m.hexbin(points_x, points_y, cmap="skil", gridsize=50, bins='log', C=c, mincnt=1)
sns.despine(left=True, bottom=True)
plt.savefig('map_line.pdf', format='pdf', dpi=330,bbox_inches='tight')
# Precipitation map
merge = pd.merge(precipitation_df, csv_df, on='CellID')
points_x = np.array([x['X'] for i,x in merge.iterrows()])
points_y = np.array([x['Y'] for i,x in merge.iterrows()])
c = np.array([x['intensity'] for i,x in merge.iterrows()])
m = Basemap(lat_0 = (a[0]+b[0])/2, lon_0 = (a[1]+b[1])/2, epsg=4326, llcrnrlon=a[1], llcrnrlat=a[0], urcrnrlon=b[1], urcrnrlat=b[0],)
m.readshapefile('nature/amm', 'Trentino_shapefile', color='0.35')
m.hexbin(points_x, points_y, cmap="skil", gridsize=50, bins='log', C=c, mincnt=1)
sns.despine(left=True, bottom=True)
plt.savefig('map_precipitation.pdf', format='pdf', dpi=330, bbox_inches='tight')
# News map
points_x = np.array([x['geomPoint.geom/coordinates/0'] for i,x in news_df.iterrows()])
points_y = np.array([x['geomPoint.geom/coordinates/1'] for i,x in news_df.iterrows()])
m = Basemap(lat_0 = (a[0]+b[0])/2, lon_0 = (a[1]+b[1])/2, epsg=4326, llcrnrlon=a[1],llcrnrlat=a[0],urcrnrlon=b[1],urcrnrlat=b[0],)
m.readshapefile('nature/amm','Trentino_shapefile', color='0.35')
m.hexbin(points_x, points_y, cmap="skil", gridsize=35, bins='log', mincnt=1)
sns.despine(left=True, bottom=True)
plt.savefig('map_news.pdf', format='pdf', dpi=330,bbox_inches='tight')
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import ctypes
def get_file_icon(path, large=True):
import ctypes
SHGetFileInfo = ctypes.windll.shell32.SHGetFileInfoW
SHGFI_ICON = 0x100
SHGFI_SYSICONINDEX = 0x4000
SHGFI_LARGEICON = 0x0
SHGFI_SMALLICON = 0x1
class SHFILEINFO(ctypes.Structure):
_fields_ = [
("hIcon", ctypes.c_void_p),
("iIcon", ctypes.c_int32),
("dwAttributes", ctypes.c_uint32),
("szDisplayName", ctypes.c_wchar * 260),
("szTypeName", ctypes.c_wchar * 80)]
info = SHFILEINFO()
flags = SHGFI_ICON | SHGFI_SYSICONINDEX
flags |= SHGFI_LARGEICON if large else SHGFI_SMALLICON
COINIT_APARTMENTTHREADED = 0x2
COINIT_DISABLE_OLE1DDE = 0x4
CoInitializeEx = ctypes.windll.ole32.CoInitializeEx
CoInitializeEx(None, COINIT_APARTMENTTHREADED | COINIT_DISABLE_OLE1DDE)
rc = SHGetFileInfo(path, 0, ctypes.byref(info), ctypes.sizeof(info), flags)
if not rc and info.iIcon:
return
return info.hIcon
from ctypes import wintypes
class BITMAPINFOHEADER(ctypes.Structure):
_fields_ = [
("biSize", wintypes.DWORD),
("biWidth", ctypes.c_long),
("biHeight", ctypes.c_long),
("biPlanes", wintypes.WORD),
("biBitCount", wintypes.WORD),
("biCompression", wintypes.DWORD),
("biSizeImage", wintypes.DWORD),
("biXPelsPerMeter", ctypes.c_long),
("biYPelsPerMeter", ctypes.c_long),
("biClrUsed", wintypes.DWORD),
("biClrImportant", wintypes.DWORD)
]
class BITMAPINFO(ctypes.Structure):
_fields_ = [
("bmiHeader", BITMAPINFOHEADER)
]
def qt_fromWinHBITMAP(hdc, h_bitmap, w, h):
"""
Original:
static QImage qt_fromWinHBITMAP(HDC hdc, HBITMAP bitmap, int w, int h)
{
BITMAPINFO bmi;
memset(&bmi, 0, sizeof(bmi));
bmi.bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
bmi.bmiHeader.biWidth = w;
bmi.bmiHeader.biHeight = -h;
bmi.bmiHeader.biPlanes = 1;
bmi.bmiHeader.biBitCount = 32;
bmi.bmiHeader.biCompression = BI_RGB;
bmi.bmiHeader.biSizeImage = w * h * 4;
QImage image(w, h, QImage::Format_ARGB32_Premultiplied);
if (image.isNull())
return image;
// Get bitmap bits
uchar *data = (uchar *) qMalloc(bmi.bmiHeader.biSizeImage);
if (GetDIBits(hdc, bitmap, 0, h, data, &bmi, DIB_RGB_COLORS)) {
// Create image and copy data into image.
for (int y=0; y<h; ++y) {
void *dest = (void *) image.scanLine(y);
void *src = data + y * image.bytesPerLine();
memcpy(dest, src, image.bytesPerLine());
}
} else {
qWarning("qt_fromWinHBITMAP(), failed to get bitmap bits");
}
qFree(data);
return image;
}
"""
import ctypes
GetDIBits = ctypes.windll.gdi32.GetDIBits
DIB_RGB_COLORS = 0
BI_RGB = 0
bitmapInfo = BITMAPINFO()
bitmapInfo.bmiHeader.biSize = ctypes.sizeof(BITMAPINFOHEADER)
bitmapInfo.bmiHeader.biWidth = w
bitmapInfo.bmiHeader.biHeight = -h
bitmapInfo.bmiHeader.biPlanes = 1
bitmapInfo.bmiHeader.biBitCount = 32
bitmapInfo.bmiHeader.biCompression = BI_RGB
bitmapInfo.bmiHeader.biSizeImage = w * h * 4
from PySide.QtGui import QImage
image = QImage(w, h, QImage.Format_ARGB32_Premultiplied)
if image.isNull():
return image
# Get bitmap bits
data = ctypes.create_string_buffer(bitmapInfo.bmiHeader.biSizeImage)
if GetDIBits(hdc, h_bitmap, 0, h, ctypes.byref(data), ctypes.byref(bitmapInfo), DIB_RGB_COLORS):
# Create image and copy data into image.
for y in range(h):
dest = image.scanLine(y)
src = data[y * image.bytesPerLine(): y * image.bytesPerLine() + image.bytesPerLine()]
for i in range(image.bytesPerLine()):
dest[i] = src[i]
else:
# qWarning("qt_fromWinHBITMAP(), failed to get bitmap bits");
print("qt_fromWinHBITMAP(), failed to get bitmap bits")
return image
def fromWinHICON(h_icon):
"""
Original:
QPixmap QPixmap::fromWinHICON(HICON icon)
{
bool foundAlpha = false;
HDC screenDevice = GetDC(0);
HDC hdc = CreateCompatibleDC(screenDevice);
ReleaseDC(0, screenDevice);
ICONINFO iconinfo;
bool result = GetIconInfo(icon, &iconinfo); //x and y Hotspot describes the icon center
if (!result)
qWarning("QPixmap::fromWinHICON(), failed to GetIconInfo()");
int w = iconinfo.xHotspot * 2;
int h = iconinfo.yHotspot * 2;
BITMAPINFOHEADER bitmapInfo;
bitmapInfo.biSize = sizeof(BITMAPINFOHEADER);
bitmapInfo.biWidth = w;
bitmapInfo.biHeight = h;
bitmapInfo.biPlanes = 1;
bitmapInfo.biBitCount = 32;
bitmapInfo.biCompression = BI_RGB;
bitmapInfo.biSizeImage = 0;
bitmapInfo.biXPelsPerMeter = 0;
bitmapInfo.biYPelsPerMeter = 0;
bitmapInfo.biClrUsed = 0;
bitmapInfo.biClrImportant = 0;
DWORD* bits;
HBITMAP winBitmap = CreateDIBSection(hdc, (BITMAPINFO*)&bitmapInfo, DIB_RGB_COLORS, (VOID**)&bits, NULL, 0);
HGDIOBJ oldhdc = (HBITMAP)SelectObject(hdc, winBitmap);
DrawIconEx( hdc, 0, 0, icon, iconinfo.xHotspot * 2, iconinfo.yHotspot * 2, 0, 0, DI_NORMAL);
QImage image = qt_fromWinHBITMAP(hdc, winBitmap, w, h);
for (int y = 0 ; y < h && !foundAlpha ; y++) {
QRgb *scanLine= reinterpret_cast<QRgb *>(image.scanLine(y));
for (int x = 0; x < w ; x++) {
if (qAlpha(scanLine[x]) != 0) {
foundAlpha = true;
break;
}
}
}
if (!foundAlpha) {
//If no alpha was found, we use the mask to set alpha values
DrawIconEx( hdc, 0, 0, icon, w, h, 0, 0, DI_MASK);
QImage mask = qt_fromWinHBITMAP(hdc, winBitmap, w, h);
for (int y = 0 ; y < h ; y++){
QRgb *scanlineImage = reinterpret_cast<QRgb *>(image.scanLine(y));
QRgb *scanlineMask = mask.isNull() ? 0 : reinterpret_cast<QRgb *>(mask.scanLine(y));
for (int x = 0; x < w ; x++){
if (scanlineMask && qRed(scanlineMask[x]) != 0)
scanlineImage[x] = 0; //mask out this pixel
else
scanlineImage[x] |= 0xff000000; // set the alpha channel to 255
}
}
}
//dispose resources created by iconinfo call
DeleteObject(iconinfo.hbmMask);
DeleteObject(iconinfo.hbmColor);
SelectObject(hdc, oldhdc); //restore state
DeleteObject(winBitmap);
DeleteDC(hdc);
return QPixmap::fromImage(image);
}
"""
import ctypes
BI_RGB = 0
GetDC = ctypes.windll.user32.GetDC
ReleaseDC = ctypes.windll.user32.ReleaseDC
DeleteDC = ctypes.windll.gdi32.DeleteDC
CreateCompatibleDC = ctypes.windll.gdi32.CreateCompatibleDC
CreateDIBSection = ctypes.windll.gdi32.CreateDIBSection
SelectObject = ctypes.windll.gdi32.SelectObject
DeleteObject = ctypes.windll.gdi32.DeleteObject
# foundAlpha = False
screenDevice = GetDC(0)
hdc = CreateCompatibleDC(screenDevice)
ReleaseDC(0, screenDevice)
from win32gui import GetIconInfo
iconinfo = GetIconInfo(h_icon)
flag, xHotspot, yHotspot, hbmMask, hbmColor = iconinfo
w = xHotspot * 2
h = yHotspot * 2
bitmapInfo = BITMAPINFO()
bitmapInfo.bmiHeader.biSize = ctypes.sizeof(BITMAPINFOHEADER)
bitmapInfo.bmiHeader.biWidth = w
bitmapInfo.bmiHeader.biHeight = h
bitmapInfo.bmiHeader.biPlanes = 1
bitmapInfo.bmiHeader.biBitCount = 32
bitmapInfo.bmiHeader.biCompression = BI_RGB
bitmapInfo.bmiHeader.biSizeImage = 0
bitmapInfo.bmiHeader.biXPelsPerMeter = 0
bitmapInfo.bmiHeader.biYPelsPerMeter = 0
bitmapInfo.bmiHeader.biClrUsed = 0
bitmapInfo.bmiHeader.biClrImportant = 0
DIB_RGB_COLORS = 0
winBitmap = CreateDIBSection(hdc, ctypes.byref(bitmapInfo), DIB_RGB_COLORS, 0, 0, 0)
oldhdc = SelectObject(hdc, winBitmap)
from win32gui import DrawIconEx
DI_NORMAL = 0x0003
DrawIconEx(hdc, 0, 0, h_icon, w, h, 0, 0, DI_NORMAL)
image = qt_fromWinHBITMAP(hdc, winBitmap, w, h)
# NOTE: Not working: "ValueError: memoryview: invalid value for format 'B'" in `scanlineImage[x] |= 0xff000000`
# from PySide.QtGui import qAlpha, qRed
#
# if not foundAlpha:
# for y in range(h):
# scanLine = image.scanLine(y)
#
# for x in range(w):
# if qAlpha(scanLine[x]) != 0:
# foundAlpha = True
# break
#
# if not foundAlpha:
# # If no alpha was found, we use the mask to set alpha values
# DI_MASK = 0x0001
# DrawIconEx(hdc, 0, 0, h_icon, w, h, 0, 0, DI_MASK)
# mask = qt_fromWinHBITMAP(hdc, winBitmap, w, h)
#
# for y in range(h):
# scanlineImage = image.scanLine(y)
# scanlineMask = 0 if mask.isNull() else mask.scanLine(y)
#
# for x in range(w):
# if scanlineMask != 0 and qRed(scanlineMask[x]) != 0:
# scanlineImage[x] = 0 # mask out this pixel
# else:
# scanlineImage[x] |= 0xff000000 # set the alpha channel to 255
# dispose resources created by iconinfo call
DeleteObject(hbmMask.handle)
DeleteObject(hbmColor.handle)
# restore state
SelectObject(hdc, oldhdc)
DeleteObject(winBitmap)
DeleteDC(hdc)
return image
if __name__ == '__main__':
h_icon = get_file_icon(r'C:\Users\ipetrash\Projects\alarm-clock\main.py')
h_icon = get_file_icon(r'C:\Users\ipetrash\Desktop\Будильник.lnk')
print('h_icon:', h_icon)
import sys
from PySide.QtGui import QApplication
QApplication(sys.argv)
px = fromWinHICON(h_icon)
print(px, px.size())
px.save('winapi_qt_get_icon_file_name.py.png')
from win32gui import DestroyIcon
DestroyIcon(h_icon)
|
<filename>tvrenamer/services/trakt_service.py
import logging
import os
from oslo_config import cfg
import trakt
from trakt.core import exceptions
from tvrenamer.services import base
LOG = logging.getLogger(__name__)
OPTS = [
cfg.StrOpt(
'client_id',
secret=True,
default=os.environ.get('TRAKT_CLIENT_ID'),
help='client id from your trakt account ENV[\'TRAKT_CLIENT_ID\']'),
cfg.StrOpt(
'client_secret',
secret=True,
default=os.environ.get('TRAKT_CLIENT_SECRET'),
help='client secret from your trakt account '
'ENV[\'TRAKT_CLIENT_SECRET\']'),
]
cfg.CONF.register_opts(OPTS, 'trakt')
def list_opts():
"""Returns a list of oslo_config options available in the library.
The returned list includes all oslo_config options which may be registered
at runtime by the library.
Each element of the list is a tuple. The first element is the name of the
group under which the list of elements in the second element will be
registered. A group name of None corresponds to the [DEFAULT] group in
config files.
The purpose of this is to allow tools like the Oslo sample config file
generator to discover the options exposed to users by this library.
:returns: a list of (group_name, opts) tuples
"""
from tvrenamer.common import tools
return tools.make_opt_list([OPTS], 'trakt')
class TraktService(base.Service):
"""Provides access trakt data service to lookup TV Series information.
`Trakt.tv <http://trakt.tv/>`_
Services used from trakt:
- search series by name
- lookup series by id
- get episode name(s) by season number and episode number(s)
"""
def __init__(self):
super(TraktService, self).__init__()
trakt.Trakt.configuration.defaults.client(
id=cfg.CONF.trakt.client_id,
secret=cfg.CONF.trakt.client_secret)
def get_series_by_name(self, series_name):
"""Perform lookup for series
:param str series_name: series name found within filename
:returns: instance of series
:rtype: object
"""
series = trakt.Trakt['search'].query(series_name, 'show')
if not series:
return None, 'Not Found'
return series[0], None
def get_series_by_id(self, series_id):
"""Perform lookup for series
:param int series_id: series id of series
:returns: instance of series
:rtype: object
"""
series = trakt.Trakt['search'].lookup(series_id, 'trakt-show')
if not series:
return None, 'Not Found'
return series, None
def get_series_name(self, series):
"""Perform lookup for name of series
:param object series: instance of a series
:returns: name of series
:rtype: str
"""
return series.title
def get_episode_name(self, series, episode_numbers, season_number):
"""Perform lookup for name of episode numbers for a given series.
:param object series: instance of a series
:param list episode_numbers: the episode sequence number
:param int season_number: numeric season of series
:returns: list of episode name
:rtype: list(str)
"""
ids = series.to_identifier()
series_id = ids['ids']['trakt']
epnames = []
for epno in episode_numbers:
try:
episode = trakt.Trakt['shows'].episode(
series_id, season_number, epno, exceptions=True)
except exceptions.RequestError as err:
LOG.exception('fetch episode %s S%sE%s failed',
series_id, season_number, epno)
return None, str(err)
epnames.append(episode.title)
return epnames, None
|
import os
import numpy
ROOT = '/home/lorenzp/adversialml/src/src/submodules/adversarial-detection/expts'
DATA_PATH = os.path.join(ROOT, 'data')
NUMPY_DATA_PATH = os.path.join(ROOT, 'numpy_data')
MODEL_PATH = os.path.join(ROOT, 'models')
OUTPUT_PATH = os.path.join(ROOT, 'outputs')
# Normalization constants for the different datasets
NORMALIZE_IMAGES = {
'mnist': ((0.1307,), (0.3081,)),
'cifar10': ((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
'svhn': ((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
}
NORMALIZE_IMAGES['cifar10aug'] = NORMALIZE_IMAGES['cifar10']
# Acronym for the proposed method
METHOD_NAME_PROPOSED = 'JTLA'
# METHOD_NAME_PROPOSED = 'ReBeL'
# Number of neighbors is calculated as a function of the data size (number of samples) `N`.
# Number of neighbors, `k = N ** NEIGHBORHOOD_CONST`.
NEIGHBORHOOD_CONST = 0.4
# Constants used by the NN-descent method
MIN_N_NEIGHBORS = 20
RHO = 0.5
SEED_DEFAULT = 123
# Default batch size
BATCH_SIZE_DEF = 128
# Default number of folds to use for cross-validation
CROSS_VAL_SIZE = 5
# Maximum Gaussian noise standard deviation. Found using the script `generate_noisy_data.py`
NOISE_STDEV_MAX = {
'mnist': 0.209405,
'cifar10': 0.034199,
'svhn': 0.038040
}
NOISE_STDEV_MAX['cifar10aug'] = NOISE_STDEV_MAX['cifar10']
# Minimum noise standard deviation values. Used to generate a range of noise standard deviations
NOISE_STDEV_MIN = {k: NOISE_STDEV_MAX[k] / 16. for k in NOISE_STDEV_MAX.keys()}
# Number of Gaussian noise standard deviation values to use
NUM_NOISE_VALUES = 10
# Distance metric to use if not specified
METRIC_DEF = 'cosine'
# Method to estimate intrinsic dimension
METHOD_INTRINSIC_DIM = 'lid_mle'
# Method for dimension reduction
METHOD_DIM_REDUCTION = 'NPP'
MAX_SAMPLES_DIM_REDUCTION = 10000
# Cumulative variance cutoff for PCA
PCA_CUTOFF = 0.995
# Proportion of noisy samples to include in the training or test folds of cross-validation
NOISE_PROPORTION = 0.05
# Number of top ranked layer test statistics to use for detection (default value)
NUM_TOP_RANKED = 3
# List of detection methods
DETECTION_METHODS = ['mahalanobis', 'proposed', 'lid', 'lid_class_cond', 'odds', 'dknn', 'trust']
# Method names to use for plots and results
METHOD_NAME_MAP = {
'proposed': 'proposed',
'lid': 'LID_ICLR',
'lid_class_cond': 'LID_ICLR_class',
'odds': 'odds_are_odd',
'dknn': 'deep_KNN',
'trust': 'trust_score',
'mahalanobis': 'deep_mahalanobis'
}
# List of layerwise test statistics supported by the proposed method
TEST_STATS_SUPPORTED = ['multinomial', 'binomial', 'lid', 'lle', 'distance', 'trust']
# Score type used by the proposed method
SCORE_TYPES = ['density', 'pvalue', 'klpe']
# Layers at which the trust score calculation is supported. 'prelogit' refers to the fully connected layer
# preceding the logit layer.
LAYERS_TRUST_SCORE = ['input', 'logit', 'prelogit']
CUSTOM_ATTACK = 'Custom'
# Maximum number of representative samples used by the custom attack. Limiting this size speeds up the custom attack
# generation and uses less memory
MAX_NUM_REPS = 2000
# norm type used for the different attack methods
ATTACK_NORM_MAP = {
'FGSM': 'inf',
'PGD': 'inf',
'CW': '2',
CUSTOM_ATTACK: '2'
}
# epsilon values used for the PGD and FGSM attacks
EPSILON_VALUES = [i / 255. for i in range(1, 21, 2)]
# Maximum FPR values for calculating partial AUC
FPR_MAX_PAUC = [0.01, 0.05, 0.1, 0.2]
# FPR threshold values for calculating TPR values.
# 0.1%, 0.5%, 1%, 5%, and 10%
FPR_THRESH = [0.001, 0.005, 0.01, 0.05, 0.1]
# Number of random samples used to estimate the p-value by the density method
NUM_RANDOM_SAMPLES = 20000
# Number of bootstrap resamples
NUM_BOOTSTRAP = 100
# Plot colors and markers
# https://matplotlib.org/2.0.2/examples/color/named_colors.html
COLORS = ['r', 'b', 'c', 'orange', 'g', 'm', 'lawngreen', 'grey', 'hotpink', 'y', 'steelblue', 'tan',
'lightsalmon', 'navy', 'gold']
MARKERS = ['o', '^', 'v', 's', '*', 'x', 'd', '>', '<', '1', 'h', 'P', '_', '2', '|', '3', '4']
|
import re
import unicodedata
from ..base import Plugin
class Unicode(Plugin):
# Plugin
def handleInitialize(self):
self.registerCommand("unicode", self.handleUnicodeSearch).setDescription("Searches for Unicode code points by name.").addParameter("search text")
self.registerCommand("utf8", self.handleUnicodeSearch).setDescription("Searches for Unicode code points by name.").addParameter("search text")
self.registerCommand("utf16", self.handleUnicodeSearch).setDescription("Searches for Unicode code points by name.").addParameter("search text")
def handleEnabled(self):
self.chatbot.messageReceived.addListener("Unicode", self.handleMessageReceived)
def handleDisabled(self):
self.chatbot.messageReceived.removeListener("Unicode")
# Unicode
# Internal
def handleMessageReceived(self, chatbot, message):
text = message.content
if not text.startswith("u\""): return
self.dissectUnicode(message, text)
def handleUnicodeSearch(self, command, commandInvocation, message):
searchText = commandInvocation.fullArguments
searchText = searchText.strip()
lowercaseSearchText = searchText.lower()
if len(lowercaseSearchText) == 0:
message.channel.postMessage("You must provide a search term!")
return
results = []
aborted = False
for i in range(0, 0x110000):
c = chr(i)
name = unicodedata.name(c, "").lower()
if lowercaseSearchText in name:
if len(results) >= 5:
aborted = True
break
else:
results.append(self.formatCharacterInformation(c))
if len(results) == 0:
if self.containsInterestingUtf8(searchText):
self.dissectUnicode(message, text)
else:
message.channel.postMessage("No matching code points found :(")
else:
if aborted: results.append("...")
reply = "\n".join(results)
reply = "```\n" + reply + "```"
message.channel.postMessage(reply)
def dissectUnicode(self, message, text, showUtf8 = None, showUtf16 = None):
try:
text = self.interpretInput(text)
except UnicodeDecodeError:
message.channel.postMessage("Invalid utf-8!")
return
# Truncate
truncated = False
if len(text) > 5:
text = text[0:5]
truncated = True
content = ""
if showUtf8 or (showUtf8 is None and self.containsInterestingUtf8(text)):
content += "UTF-8: \"" + self.escapeUtf8(text) + ("..." if truncated else "") + "\"\n"
if showUtf16 or (showUtf16 is None and self.containsInterestingUtf16(text)):
content += "UTF-16LE: \"" + self.escapeUtf16LE(text) + ("..." if truncated else "") + "\"\n"
lines = []
for c in text:
lines.append(self.formatCharacterInformation(c))
if truncated: lines.append("...")
content += "\n".join(lines)
content = "```\n" + content + "```"
message.channel.postMessage(content)
def formatCharacterInformation(self, c):
codePoint = ord(c)
printableCharacter = c
if unicodedata.category(c) in ("Zs", "Zl", "Zp", "Cc", "Cf", "Cs", "Co", "Cn"):
printableCharacter = " "
name = unicodedata.name(c, "")
return "U+%06X %s %s" % (codePoint, printableCharacter, name)
def escapeUtf8(self, str):
bytes = str.encode("utf-8")
return self.escapeBytes(bytes)
escapedString = ""
for uint8 in bytes:
if 0x20 <= uint8 and uint8 < 0x7F:
escapedString += chr(uint8)
else:
escapedString += "\\x%02x" % uint8
return escapedString
def escapeUtf16LE(self, str):
bytes = str.encode("utf-16le")[2:]
return self.escapeBytes(bytes)
def escapeUtf16BE(self, str):
bytes = str.encode("utf-16be")[2:]
return self.escapeBytes(bytes)
def escapeBytes(self, bytes):
escapedString = ""
for uint8 in bytes:
if 0x20 <= uint8 and uint8 < 0x7F:
escapedString += chr(uint8)
else:
escapedString += "\\x%02x" % uint8
return escapedString
def unescape(self, str):
bytes = bytearray()
i = 0
while i < len(str):
c = str[i]
i += 1
if c == "\\":
if i >= len(str):
bytes.extend(b"\\")
break
c = str[i]
i += 1
if c == "\\": bytes.extend(b"\\")
elif c == "\r": bytes.extend(b"\r")
elif c == "\n": bytes.extend(b"\n")
elif c == "\t": bytes.extend(b"\t")
elif c == "x":
n = str[i:i + 2]
i += 2
try:
n = int(n, 16)
bytes.append(n)
except ValueError:
bytes.extend(b"\\x")
bytes.extend(n)
elif c == "u":
n = str[i:i + 4]
i += 4
try:
n = int(n, 16)
bytes.extend(chr(n).encode("utf-8"))
except ValueError:
bytes.extend(b"\\u")
bytes.extend(n)
elif c == "U":
n = str[i:i + 6]
i += 6
try:
n = int(n, 16)
bytes.extend(chr(n).encode("utf-8"))
except ValueError:
bytes.extend(b"\\U")
bytes.extend(n)
else:
bytes.extend(b"\\")
bytes.extend(c.encode("utf-8"))
else:
bytes.extend(c.encode("utf-8"))
return bytes.decode("utf-8")
def containsInterestingUtf8(self, str):
for c in str:
if ord(c) >= 0x80: return True
return False
def containsInterestingUtf16(self, str):
for c in str:
if ord(c) >= 0x010000: return True
return False
def interpretInput(self, str):
if str.startswith("u\""):
str = str[2:-1] if str.endswith("\"") else str[2:]
str = self.unescape(str)
elif str.startswith("\""):
str = str[1:-1] if str.endswith("\"") else str[1:]
str = self.unescape(str)
return str
|
<filename>locora/grid_solvent/solvent_field.py
import numpy as np
from locora.grid_solvent.spatial import field
from locora.grid_solvent.spatial import set_euler as _set_euler
from locora.grid_solvent.spatial import set_quaternion as _set_quaternion
from locora.grid_solvent.crd_systems import internal_rectangular
class solvent_field(internal_rectangular):
"""
Class for calculating and manipulating boxes of solvent molecules.
"""
def __init__(self, N_solvent, N_sites, Dims, Verbose=False):
super(solvent_field, self).__init__(Dims)
self.N_solvent = N_solvent
self.N_sites = N_sites
self.crds_shape = (N_solvent*N_sites, 3)
self.crds = np.zeros(self.crds_shape, dtype=np.float)
self.inside_idxs = np.array([], dtype=np.int) ### Array containing oxygen water idxs starting
### with indx 0 as the first occuring water molecule
### in the selection.
self.uc = np.eye(3,3) ### uc: Simulation box vectors as matrix presentation
self.mic_idx = np.zeros(N_solvent, dtype=int) ### mic_idx: current simulation box idx for each water
### according minimum-image-convetion
self.N_inside = 0
### These coordinates and vectors are being calculated in frac space
self.xx1_wat = 0 ### O-H1 vector
self.xx2_wat = 0 ### O-H2 vector
self.yy_wat = 0 ### cross product zz_wat and xx1_wat
self.zz_wat = 0 ### O-H1/O-H2 orthogonal vector
self.O_crds_frac = 0
self.H1_crds_frac = 0
self.H2_crds_frac = 0
### These coordinates are being calculated in real space
self.O_crds = 0
self.H1_crds = 0
self.H2_crds = 0
self.q_dist = 0
self.theta = 0
self.phi = 0
self.psi = 0
self.a = [-1.,0.,1.]
self.b = [-1.,0.,1.]
self.c = [-1.,0.,1.]
self.verbose = Verbose
if self.verbose:
print "N_solvent:", self.N_solvent
print "N_sites: ", self.N_sites
def update_field(self, crds, uc=None):
if crds.shape != self.crds_shape:
raise Warning("New crds array has shape %s. Original crds array had shape %s. Both must have equal shape." %(crds.shape, self.crds_shape))
self.crds[:] = crds
image=False
if type(uc)!=None:
self.uc[:] = uc
image = True
self._set_inside_crds(image)
self._set_watcrds()
self._set_euler()
self._set_quaternion()
def _set_inside_crds(self, image):
if image:
###FIXME: Port this routine to a C extension.
uc_inv = np.linalg.inv(self.uc)
crds = self.crds[::self.N_sites]
_crds = np.zeros_like(crds)
crds_inv = crds.dot(uc_inv)
crds_inv = crds_inv - np.floor(crds_inv)
_crds_inv = np.zeros_like(crds_inv)
solvent_frac = np.zeros_like(crds_inv)
_solvent_frac = np.zeros_like(crds_inv)
nearest_frac2 = np.zeros(self.N_solvent, dtype=float)
_nearest_frac2 = np.zeros(self.N_solvent, dtype=float)
center_inv = self.center.dot(uc_inv)
center_inv = center_inv - np.floor(center_inv)
self.center = center_inv.dot(self.uc)
self.origin = np.zeros(3)
self.origin = self.center - self.get_real(self.bins/2)
i = 0
for a_i in self.a:
for b_i in self.b:
for c_i in self.c:
_crds_inv[:] = crds_inv + np.array([a_i, b_i, c_i])
_crds[:] = _crds_inv.dot(self.uc)
_solvent_frac[:] = self.get_frac(_crds)
if i==0:
nearest_frac2[:] = np.power(_solvent_frac-self.bins*0.5, 2).sum(axis=1)
solvent_frac[:] = np.copy(_solvent_frac)
self.mic_idx[:] = 0
else:
_nearest_frac2[:] = np.power(_solvent_frac-self.bins*0.5, 2).sum(axis=1)
update = np.where(_nearest_frac2<nearest_frac2)[0]
nearest_frac2[update] = np.copy(_nearest_frac2[update])
solvent_frac[update] = _solvent_frac[update]
self.mic_idx[update] = i
i += 1
valids = np.where( (solvent_frac[:,0] >= 0.) * (solvent_frac[:,0] < self.bins[0]) * \
(solvent_frac[:,1] >= 0.) * (solvent_frac[:,1] < self.bins[1]) * \
(solvent_frac[:,2] >= 0.) * (solvent_frac[:,2] < self.bins[2]) )[0]
self.N_inside = valids.shape[0]
self.inside_idxs = valids*self.N_sites
if self.verbose:
print "Found %d water molecules inside grid." %self.N_inside
O_crds_inv = self.crds[self.inside_idxs].dot(uc_inv)
H1_crds_inv = self.crds[self.inside_idxs+1].dot(uc_inv)
H2_crds_inv = self.crds[self.inside_idxs+2].dot(uc_inv)
O_crds_inv = O_crds_inv - np.floor(O_crds_inv)
H1_crds_inv = H1_crds_inv - np.floor(H1_crds_inv)
H2_crds_inv = H2_crds_inv - np.floor(H2_crds_inv)
_mic_idx = self.mic_idx[valids]
self.O_crds_frac = np.zeros_like(O_crds_inv)
self.H1_crds_frac = np.zeros_like(H1_crds_inv)
self.H2_crds_frac = np.zeros_like(H2_crds_inv)
### These are the reals space coordinates
self.O_crds = np.zeros_like(O_crds_inv)
self.H1_crds = np.zeros_like(H1_crds_inv)
self.H2_crds = np.zeros_like(H2_crds_inv)
i=0
for a_i in self.a:
for b_i in self.b:
for c_i in self.c:
sele = np.where(_mic_idx==i)[0]
cell = np.array([a_i, b_i, c_i])
if sele.shape[0]>0:
crds_mic_inv = O_crds_inv[sele] + cell
crds_mic = crds_mic_inv.dot(self.uc)
solvent_frac = self.get_frac(crds_mic)
self.O_crds_frac[sele] = np.copy(solvent_frac)
self.O_crds[sele] = np.copy(crds_mic)
crds_mic_inv = H1_crds_inv[sele] + cell
crds_mic = crds_mic_inv.dot(self.uc)
solvent_frac = self.get_frac(crds_mic)
self.H1_crds_frac[sele] = np.copy(solvent_frac)
self.H1_crds[sele] = np.copy(crds_mic)
crds_mic_inv = H2_crds_inv[sele] + cell
crds_mic = crds_mic_inv.dot(self.uc)
solvent_frac = self.get_frac(crds_mic)
self.H2_crds_frac[sele] = np.copy(solvent_frac)
self.H2_crds[sele] = np.copy(crds_mic)
i += 1
else:
crds = self.crds[::self.N_sites]
solvent_frac = self.get_frac(crds)
valids = np.where( (solvent_frac[:,0] >= 0.) * (solvent_frac[:,0] < self.bins[0]) * \
(solvent_frac[:,1] >= 0.) * (solvent_frac[:,1] < self.bins[1]) * \
(solvent_frac[:,2] >= 0.) * (solvent_frac[:,2] < self.bins[2]) )[0]
self.N_inside = valids.shape[0]
self.inside_idxs = valids*self.N_sites
if self.verbose:
print "Found %d water molecules inside grid." %self.N_inside
### These coordinates are being calculated in real space
self.O_crds = self.crds[self.inside_idxs]
self.H1_crds = self.crds[self.inside_idxs+1]
self.H2_crds = self.crds[self.inside_idxs+2]
self.O_crds_frac = self.get_frac(self.O_crds)
self.H1_crds_frac = self.get_frac(self.H1_crds)
self.H2_crds_frac = self.get_frac(self.H2_crds)
def _set_watcrds(self):
self.xx1_wat = self.H1_crds_frac - self.O_crds_frac
xx1_norm = np.linalg.norm(self.xx1_wat, axis=-1)
self.xx1_wat = np.einsum('ij,i->ij', self.xx1_wat, 1./xx1_norm)
self.xx2_wat = self.H2_crds_frac - self.O_crds_frac
xx2_norm = np.linalg.norm(self.xx2_wat, axis=-1)
self.xx2_wat = np.einsum('ij,i->ij', self.xx2_wat, 1./xx2_norm)
self.zz_wat = np.cross(self.xx1_wat, self.xx2_wat)
zz_norm = np.linalg.norm(self.zz_wat, axis=-1)
self.zz_wat = np.einsum('ij,i->ij', self.zz_wat, 1./zz_norm)
self.yy_wat = np.cross(self.xx1_wat, self.zz_wat)
yy_norm = np.linalg.norm(self.yy_wat, axis=-1)
self.yy_wat = np.einsum('ij,i->ij', self.yy_wat, 1./yy_norm)
def _set_euler(self):
self.theta = np.zeros(self.N_inside, dtype=np.float)
self.phi = np.zeros(self.N_inside, dtype=np.float)
self.psi = np.zeros(self.N_inside, dtype=np.float)
_set_euler(self.O_crds,
self.H1_crds,
self.H2_crds,
self.xx,
self.yy,
self.zz,
self.theta,
self.phi,
self.psi)
def _set_quaternion(self):
self.q_dist = np.zeros((self.N_inside, 4), dtype=np.float)
_set_quaternion(self.q_dist,
self.theta,
self.phi,
self.psi)
|
import numpy as np
#from LEM_initial_landscape import *
def slope_direction(eta_vector,nrows,ncols,dx,dy,validID,bn_ID):
#neighbors
z = eta_vector.reshape(nrows,ncols)
xn = [-1,0,1,-1,1,-1, 0, 1]
yn = [1, 1,1, 0,0,-1,-1,-1]
dn = [2.**.5,1.,2.**.5,1.,1.,2.**.5,1.,2.**.5]
# 530
# 6 1
# 742
delta_z = np.zeros((nrows,ncols)) #[[0. for i in xrange(grid_cells+2)]for j in xrange(grid_cells+2)]
slope = np.zeros((nrows,ncols))
direction = np.zeros((nrows,ncols),dtype=np.int)
# hole = np.zeros((nrows,ncols))
hole=np.zeros(1)
#===========================
# outlet elevation drops...
#===========================
# o_nrow = int(np.argmin(z)/ncols)
# o_ncol = int(np.argmin(z)-o_nrow*ncols)
# z[o_nrow][o_ncol] = z[o_nrow][o_ncol]*1.0-2.0
#===========================
# slope inside boundary
#===========================
# validID
# for i in range (1, nrows-1):
# for j in range (1,ncols-1):
validID_insideID = np.where((validID>ncols-1)&(validID<(nrows-1)*ncols)& (validID%ncols!=0)& ((validID+1)%(ncols)!=0))[0]
validID_inside = validID[validID_insideID]
for k in range(len(validID_inside)): #len(validID_inside)
i = validID_inside[k]//ncols
j = validID_inside[k] - i*ncols
slope_temp = np.zeros(8)
for n in range (0,8):
slope_temp[n]=(z[i][j]-z[i+xn[n]][j+yn[n]])/(dx*dn[n])
direction[i][j]=np.argmax(slope_temp)
slope[i][j]= np.max(slope_temp) # or slope_temp[direction[x][y]]
if slope[i][j]<0.0:
n = direction[i][j]
delta_z[i][j]= - dx*dn[n]*slope[i][j]
z[i][j]=z[i][j]+delta_z[i][j]
slope[i][j]=0.0
hole[0]=1
## n1, n2= slope_temp[np.argpartition(slope_temp, -2)][-2:]
# n1, n2=slope_temp.argsort()[-2:][::-1]
# slope2 = slope_temp[n2]
# delta_z[i][j]= - ((dx*dn[n1]*slope[i][j])+(dx*dn[n2]*slope2-dx*dn[n1]*slope[i][j])/4.0)
# z[i][j]=z[i][j]+delta_z[i][j]
# slope[i][j]=(z[i][j]-z[i+xn[n1]][j+yn[n1]])/(dx*dn[n1])
# hole[0]=1
#===========================
# up boundary
#===========================
order =np.array([1,2,4,6,7])
xn_u = [0,1,1, 0, 1]
yn_u = [1,1,0,-1,-1]
dn_u = [1.,2.**.5,1.,1.,2.**.5]
slope_temp = np.zeros(5)
for j in range (1,ncols-1):
for n in range (0,5):
slope_temp[n]=(z[0][j]-z[0+xn_u[n]][j+yn_u[n]])/(dx*dn_u[n])
direction_temp=np.argmax(slope_temp)
direction[0][j] = order[direction_temp]
slope[0][j]= np.max(slope_temp) # or slope_temp[direction[x][y]]
if slope[0][j]<0.0:
n = direction[0][j]
delta_z[0][j]= - dx*dn[n]*slope[0][j]
z[0][j]=z[0][j]+delta_z[0][j]
slope[0][j]=0.0
hole[0]=1
##===========================
## the real boundary of the dem polygon
##===========================
direction_vector = direction.flatten()*1
direction_vector[bn_ID] = 4
direction = direction_vector.reshape(nrows,ncols)
#=========================================
# down boundary & boundary condition
#=========================================
order = np.array([6,5,3,1,0])
for j in range (1,ncols-1):
for n in range (0,5):
slope_temp[n]=(z[nrows-1][j]-z[nrows-1-xn_u[n]][j-yn_u[n]])/(dx*dn_u[n])
direction_temp = np.argmax(slope_temp)
direction[nrows-1][j]=order[direction_temp]
slope[nrows-1][j]= np.max(slope_temp) # or slope_temp[direction[x][y]]
if slope[nrows-1][j]<0.0:
# direction[nrows-1][j]= 8
n = direction[nrows-1][j]
delta_z[nrows-1][j]= - dx*dn[n]*slope[nrows-1][j]
z[nrows-1][j]=z[nrows-1][j]+delta_z[nrows-1][j]
slope[nrows-1][j]=0.0
hole[0]=1
# slope[nrows-1][j]= outlet_slope #0.005
#
# for j in xrange (1,ncols-1):
#
## for n in xrange (0,5):
## slope_temp[n]=(z[nrows-1][j]-z[nrows-1-xn_u[n]][j-yn_u[n]])/(dx*dn_u[n])
## direction_temp = np.argmax(slope_temp)
# direction[nrows-1][j]= 8 #order[direction_temp]
# slope[nrows-1][j]= outlet_slope# np.max(slope_temp) # or slope_temp[direction[x][y]]
#
#===========================
# left boundary
#===========================
for i in range(1, nrows-1):
for n in range (0,5):
slope_temp[n]=(z[i][0]-z[i+xn[n]][0+yn[n]])/(dx*dn[n])
direction[i][0]=np.argmax(slope_temp)
slope[i][0]= np.max(slope_temp) # or slope_temp[direction[x][y]]
if slope[i][0]<0.0:
n = direction[i][0]
delta_z[i][0]= - dx*dn[n]*slope[i][0]
z[i][0]=z[i][0]+delta_z[i][0]
slope[i][0]=0.0
hole[0]=1
#===========================
# right boundary
#===========================
for i in range(1, nrows-1):
for n in range (0,5):
slope_temp[n]=(z[i][ncols-1]-z[i+xn[n+3]][ncols-1+yn[n+3]])/(dx*dn[n+3])
direction[i][ncols-1]=np.argmax(slope_temp)+3
slope[i][ncols-1]= np.max(slope_temp) # or slope_temp[direction[x][y]]
if slope[i][ncols-1]<0.0:
n = direction[i][ncols-1]
delta_z[i][ncols-1]= - dx*dn[n]*slope[i][ncols-1]
z[i][ncols-1]=z[i][ncols-1]+delta_z[i][ncols-1]
slope[i][ncols-1]=0.0
hole[0]=1
#===========================
# Up-left boundary
#===========================
slope_temp = np.zeros(3)
order_c = np.array([1,2,4])
xn_ul = [0,1,1]
yn_ul = [1,1, 0]
dn_ul = [1.,2.**.5,1.]
for n in range (0,3):
slope_temp[n]=(z[0][0]-z[0+xn_ul[n]][0+yn_ul[n]])/(dx*dn_ul[n])
direction_temp = np.argmax(slope_temp)
direction[0][0]=order_c[direction_temp]
slope[0][0]= np.max(slope_temp) # or slope_temp[direction[x][y]]
if slope[0][0]<0.0:
n = direction[0][0]
delta_z[0][0]= - dx*dn[n]*slope[0][0]
z[0][0]=z[0][0]+delta_z[0][0]
slope[0][0]=0.0
hole[0]=1
#===========================
# Up-right boundary
#===========================
order_c = np.array([6,7,4])
for n in range (0,3):
slope_temp[n]=(z[0][ncols-1]-z[0+xn_ul[n]][ncols-1-yn_ul[n]])/(dx*dn_ul[n])
direction_temp = np.argmax(slope_temp)
direction[0][ncols-1]=order_c[direction_temp]
slope[0][ncols-1]= np.max(slope_temp) # or slope_temp[direction[x][y]]
if slope[0][ncols-1]<0.0:
n = direction[0][ncols-1]
delta_z[0][ncols-1]= - dx*dn[n]*slope[0][ncols-1]
z[0][ncols-1]=z[0][ncols-1]+delta_z[0][ncols-1]
slope[0][ncols-1]=0.0
hole[0]=1
#===========================
# down_left boundary
#===========================
order_c = np.array([1,0,3])
for n in range (0,3):
slope_temp[n]=(z[nrows-1][0]-z[nrows-1-xn_ul[n]][0+yn_ul[n]])/(dx*dn_ul[n])
direction_temp = np.argmax(slope_temp)
direction[nrows-1][0]=order_c[direction_temp]
slope[nrows-1][0]= np.max(slope_temp) # or slope_temp[direction[x][y]]
if slope[nrows-1][0]<0.0:
n = direction[nrows-1][0]
delta_z[nrows-1][0]= - dx*dn[n]*slope[nrows-1][0]
z[nrows-1][0]=z[nrows-1][0]+delta_z[nrows-1][0]
slope[nrows-1][0]=0.0
hole[0]=1
#===========================
# down_right boundary
#===========================
order_c = np.array([6,5,3])
slope_temp = np.zeros(3)
for n in range (0,3):
slope_temp[n]=(z[nrows-1][ncols-1]-z[nrows-1-xn_ul[n]][ncols-1-yn_ul[n]])/(dx*dn_ul[n])
direction_temp = np.argmax(slope_temp)
direction[nrows-1][ncols-1]=order_c[direction_temp]
slope[nrows-1][ncols-1]= np.max(slope_temp) # or slope_temp[direction[x][y]]
if slope[nrows-1][ncols-1]<0.0:
n = direction[nrows-1][ncols-1]
delta_z[nrows-1][ncols-1]= - dx*dn[n]*slope[nrows-1][ncols-1]
z[nrows-1][ncols-1]=z[nrows-1][ncols-1]+delta_z[nrows-1][ncols-1]
slope[nrows-1][ncols-1]=0.0
hole[0]=1
#===========================
# outlet
#===========================
# slope[o_nrow][o_ncol] = np.max(slope)
# direction[o_nrow][o_ncol] = 8
# slope_vector = np.array(slope).flatten()
# delta_z_vector = np.array(delta_z).flatten()
return slope, direction
#if __name__ == "__main__":
# X,Y,Z = ini_landscape()
#
# fig = plt.figure()
# ax = fig.gca(projection='3d')
#
# surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.jet,
# linewidth=0, antialiased=False)
# ax.zaxis.set_major_locator(LinearLocator(10))
# ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
#
# fig.colorbar(surf, shrink=0.5, aspect=10)
# plt.show() |
import actions
import logger
import testutil
import test_engine
log = logger.Logger(__name__, logger.INFO)
def _bulk_update(table_name, col_names, row_data):
return actions.BulkUpdateRecord(
*testutil.table_data_from_rows(table_name, col_names, row_data))
class TestDerived(test_engine.EngineTestCase):
sample = testutil.parse_test_sample({
"SCHEMA": [
[1, "Customers", [
[1, "firstName", "Text", False, "", "", ""],
[2, "lastName", "Text", False, "", "", ""],
[3, "state", "Text", False, "", "", ""],
]],
[2, "Orders", [
[10, "year", "Int", False, "", "", ""],
[11, "customer", "Ref:Customers", False, "", "", ""],
[12, "product", "Text", False, "", "", ""],
[13, "amount", "Numeric", False, "", "", ""],
]],
],
"DATA": {
"Customers": [
["id", "firstName", "lastName", "state"],
[1, "Lois", "Long", "NY"],
[2, "Felix", "Myers", "NY"],
[3, "Grace", "Hawkins", "CT"],
[4, "Bessie", "Green", "NJ"],
[5, "Jerome", "Daniel", "CT"],
],
"Orders": [
["id", "year", "customer", "product", "amount" ],
[1, 2012, 3, "A", 15 ],
[2, 2013, 2, "A", 15 ],
[3, 2013, 3, "A", 15 ],
[4, 2014, 1, "B", 35 ],
[5, 2014, 5, "B", 35 ],
[6, 2014, 3, "A", 16 ],
[7, 2015, 1, "A", 17 ],
[8, 2015, 2, "B", 36 ],
[9, 2015, 3, "B", 36 ],
[10, 2015, 5, "A", 17 ],
]
}
})
def test_group_by_one(self):
"""
Test basic summary table operation, for a table grouped by one columns.
"""
self.load_sample(self.sample)
# Create a derived table summarizing count and total of orders by year.
self.apply_user_action(["CreateViewSection", 2, 0, 'record', [10]])
# Check the results.
self.assertPartialData("GristSummary_6_Orders", ["id", "year", "count", "amount", "group" ], [
[1, 2012, 1, 15, [1]],
[2, 2013, 2, 30, [2,3]],
[3, 2014, 3, 86, [4,5,6]],
[4, 2015, 4, 106, [7,8,9,10]],
])
# Updating amounts should cause totals to be updated in the summary.
out_actions = self.update_records("Orders", ["id", "amount"], [
[1, 14],
[2, 14]
])
self.assertPartialOutActions(out_actions, {
"stored": [
actions.BulkUpdateRecord("Orders", [1,2], {'amount': [14, 14]}),
actions.BulkUpdateRecord("GristSummary_6_Orders", [1,2], {'amount': [14, 29]})
],
"calls": {"GristSummary_6_Orders": {"amount": 2}}
})
# Changing a record from one product to another should cause the two affected lines to change.
out_actions = self.update_record("Orders", 10, year=2012)
self.assertPartialOutActions(out_actions, {
"stored": [
actions.UpdateRecord("Orders", 10, {"year": 2012}),
actions.BulkUpdateRecord("GristSummary_6_Orders", [1,4], {"amount": [31.0, 89.0]}),
actions.BulkUpdateRecord("GristSummary_6_Orders", [1,4], {"count": [2,3]}),
actions.BulkUpdateRecord("GristSummary_6_Orders", [1,4], {"group": [[1,10], [7,8,9]]}),
],
"calls": {"GristSummary_6_Orders": {"group": 2, "amount": 2, "count": 2},
"Orders": {"#lookup##summary#GristSummary_6_Orders": 1,
"#summary#GristSummary_6_Orders": 1}}
})
self.assertPartialData("GristSummary_6_Orders", ["id", "year", "count", "amount", "group" ], [
[1, 2012, 2, 31.0, [1,10]],
[2, 2013, 2, 29.0, [2,3]],
[3, 2014, 3, 86.0, [4,5,6]],
[4, 2015, 3, 89.0, [7,8,9]],
])
# Changing a record to a new year that wasn't in the summary should cause an add-record.
out_actions = self.update_record("Orders", 10, year=1999)
self.assertPartialOutActions(out_actions, {
"stored": [
actions.UpdateRecord("Orders", 10, {"year": 1999}),
actions.AddRecord("GristSummary_6_Orders", 5, {'year': 1999}),
actions.BulkUpdateRecord("GristSummary_6_Orders", [1,5], {"amount": [14.0, 17.0]}),
actions.BulkUpdateRecord("GristSummary_6_Orders", [1,5], {"count": [1,1]}),
actions.BulkUpdateRecord("GristSummary_6_Orders", [1,5], {"group": [[1], [10]]}),
],
"calls": {
"GristSummary_6_Orders": {
'#lookup#year': 1, "group": 2, "amount": 2, "count": 2, "#lookup#": 1
},
"Orders": {"#lookup##summary#GristSummary_6_Orders": 1,
"#summary#GristSummary_6_Orders": 1}}
})
self.assertPartialData("GristSummary_6_Orders", ["id", "year", "count", "amount", "group" ], [
[1, 2012, 1, 14.0, [1]],
[2, 2013, 2, 29.0, [2,3]],
[3, 2014, 3, 86.0, [4,5,6]],
[4, 2015, 3, 89.0, [7,8,9]],
[5, 1999, 1, 17.0, [10]],
])
def test_group_by_two(self):
"""
Test a summary table created by grouping on two columns.
"""
self.load_sample(self.sample)
self.apply_user_action(["CreateViewSection", 2, 0, 'record', [10, 12]])
self.assertPartialData("GristSummary_6_Orders", [
"id", "year", "product", "count", "amount", "group"
], [
[1, 2012, "A", 1, 15.0, [1]],
[2, 2013, "A", 2, 30.0, [2,3]],
[3, 2014, "B", 2, 70.0, [4,5]],
[4, 2014, "A", 1, 16.0, [6]],
[5, 2015, "A", 2, 34.0, [7,10]],
[6, 2015, "B", 2, 72.0, [8,9]],
])
# Changing a record from one product to another should cause the two affected lines to change,
# or new lines to be created as needed.
out_actions = self.update_records("Orders", ["id", "product"], [
[2, "B"],
[6, "B"],
[7, "C"],
])
self.assertPartialOutActions(out_actions, {
"stored": [
actions.BulkUpdateRecord("Orders", [2, 6, 7], {"product": ["B", "B", "C"]}),
actions.AddRecord("GristSummary_6_Orders", 7, {'year': 2013, 'product': 'B'}),
actions.AddRecord("GristSummary_6_Orders", 8, {'year': 2015, 'product': 'C'}),
actions.BulkUpdateRecord("GristSummary_6_Orders", [2,3,4,5,7,8], {
"amount": [15.0, 86.0, 0, 17.0, 15.0, 17.0]
}),
actions.BulkUpdateRecord("GristSummary_6_Orders", [2,3,4,5,7,8], {
"count": [1, 3, 0, 1, 1, 1]
}),
actions.BulkUpdateRecord("GristSummary_6_Orders", [2,3,4,5,7,8], {
"group": [[3], [4,5,6], [], [10], [2], [7]]
}),
],
})
# Verify the results.
self.assertPartialData("GristSummary_6_Orders", [
"id", "year", "product", "count", "amount", "group"
], [
[1, 2012, "A", 1, 15.0, [1]],
[2, 2013, "A", 1, 15.0, [3]],
[3, 2014, "B", 3, 86.0, [4,5,6]],
[4, 2014, "A", 0, 0.0, []],
[5, 2015, "A", 1, 17.0, [10]],
[6, 2015, "B", 2, 72.0, [8,9]],
[7, 2013, "B", 1, 15.0, [2]],
[8, 2015, "C", 1, 17.0, [7]],
])
def test_group_with_references(self):
"""
Test summary tables grouped on indirect values. In this example we want for each
customer.state, the number of customers and the total of their orders, which we can do either
as a summary on the Customers table, or a summary on the Orders table.
"""
self.load_sample(self.sample)
# Create a summary on the Customers table. Adding orders involves a lookup for each customer.
self.apply_user_action(["CreateViewSection", 1, 0, 'record', [3]])
self.add_column("GristSummary_9_Customers", "totalAmount",
formula="sum(sum(Orders.lookupRecords(customer=c).amount) for c in $group)")
self.assertPartialData("GristSummary_9_Customers", ["id", "state", "count", "totalAmount"], [
[1, "NY", 2, 103.0 ],
[2, "CT", 2, 134.0 ],
[3, "NJ", 1, 0.0 ],
])
# # Create the same summary on the Orders table, looking up 'state' via the Customer reference.
# self.apply_user_action(["AddDerivedTableSource", "Summary4", "Orders",
# {"state": "$customer.state"}])
# self.add_column("Summary4", "numCustomers", formula="len(set($source_Orders.customer))")
# self.add_column("Summary4", "totalAmount", formula="sum($source_Orders.amount)")
# self.assertPartialData("Summary4", ["id", "state", "numCustomers", "totalAmount"], [
# [1, "CT", 2, 134.0 ],
# [2, "NY", 2, 103.0 ],
# ])
# In either case, changing an amount (from 36->37 for a CT customer) should update summaries.
out_actions = self.update_record('Orders', 9, amount=37)
self.assertPartialOutActions(out_actions, {
"stored": [
actions.UpdateRecord("Orders", 9, {"amount": 37}),
actions.UpdateRecord("GristSummary_9_Customers", 2, {"totalAmount": 135.0}),
]
})
# In either case, changing a customer's state should trigger recomputation too.
# We are changing a NY customer with $51 in orders to MA.
self.update_record('Customers', 2, state="MA")
self.assertPartialData("GristSummary_9_Customers", ["id", "state", "count", "totalAmount"], [
[1, "NY", 1, 52.0 ],
[2, "CT", 2, 135.0 ],
[3, "NJ", 1, 0.0 ],
[4, "MA", 1, 51.0 ],
])
# self.assertPartialData("Summary4", ["id", "state", "numCustomers", "totalAmount"], [
# [1, "CT", 2, 135.0 ],
# [2, "NY", 1, 52.0 ],
# [3, "MA", 1, 51.0 ],
# ])
# Similarly, changing an Order to refer to a different customer should update both tables.
# Here we are changing a $17 order (#7) for a NY customer (#1) to a NJ customer (#4).
out_actions = self.update_record("Orders", 7, customer=4)
# self.assertPartialOutActions(out_actions, {
# "stored": [actions.UpdateRecord("Orders", 7, {"customer": 4}),
# actions.AddRecord("Summary4", 4, {"state": "NJ"}),
# actions.UpdateRecord("Summary4", 4, {"manualSort": 4.0})]
# })
self.assertPartialData("GristSummary_9_Customers", ["id", "state", "count", "totalAmount"], [
[1, "NY", 1, 35.0 ],
[2, "CT", 2, 135.0 ],
[3, "NJ", 1, 17.0 ],
[4, "MA", 1, 51.0 ],
])
# self.assertPartialData("Summary4", ["id", "state", "numCustomers", "totalAmount"], [
# [1, "CT", 2, 135.0 ],
# [2, "NY", 1, 35.0 ],
# [3, "MA", 1, 51.0 ],
# [4, "NJ", 1, 17.0 ],
# ])
def test_deletions(self):
self.load_sample(self.sample)
# Create a summary table summarizing count and total of orders by year.
self.apply_user_action(["CreateViewSection", 2, 0, 'record', [10]])
self.assertPartialData("GristSummary_6_Orders", ["id", "year", "count", "amount", "group" ], [
[1, 2012, 1, 15.0, [1]],
[2, 2013, 2, 30.0, [2,3]],
[3, 2014, 3, 86.0, [4,5,6]],
[4, 2015, 4, 106.0, [7,8,9,10]],
])
# Update a record so that a new line appears in the summary table.
out_actions_update = self.update_record("Orders", 1, year=2007)
self.assertPartialData("GristSummary_6_Orders", ["id", "year", "count", "amount", "group" ], [
[1, 2012, 0, 0.0, []],
[2, 2013, 2, 30.0, [2,3]],
[3, 2014, 3, 86.0, [4,5,6]],
[4, 2015, 4, 106.0, [7,8,9,10]],
[5, 2007, 1, 15.0, [1]],
])
# Undo and ensure that the new line is gone from the summary table.
out_actions_undo = self.apply_undo_actions(out_actions_update.undo)
self.assertPartialData("GristSummary_6_Orders", ["id", "year", "count", "amount", "group" ], [
[1, 2012, 1, 15.0, [1]],
[2, 2013, 2, 30.0, [2,3]],
[3, 2014, 3, 86.0, [4,5,6]],
[4, 2015, 4, 106.0, [7,8,9,10]],
])
self.assertPartialOutActions(out_actions_undo, {
"stored": [
actions.UpdateRecord("GristSummary_6_Orders", 1, {"group": [1]}),
actions.UpdateRecord("GristSummary_6_Orders", 1, {"count": 1}),
actions.UpdateRecord("GristSummary_6_Orders", 1, {"amount": 15.0}),
actions.RemoveRecord("GristSummary_6_Orders", 5),
actions.UpdateRecord("Orders", 1, {"year": 2012}),
],
"calls": {"GristSummary_6_Orders": {"group": 1, "amount": 1, "count": 1},
"Orders": {"#lookup##summary#GristSummary_6_Orders": 1,
"#summary#GristSummary_6_Orders": 1}}
})
|
from akf_corelib.conditional_print import ConditionalPrint
from akf_corelib.configuration_handler import ConfigurationHandler
from akf_corelib.random import Random
import numpy as np
class LineFeatures():
counter_special_chars = -1
counter_alphanumerical_chars = -1
counter_numbers = -1
counter_chars = -1
counter_alphabetical = -1
counter_words = -1
counter_spaces = -1
counters_alphabetical_ratios = []
counters_wordlengths = []
counters_numbers = []
special_chars_ratio = -1
alphanumerical_chars_ratio = -1
alphabetical_ratio = -1
spaces_ratio = -1
numbers_ratio = -1
x_box_sizes = []
x_gaps = []
maximum_x_gap = None
mean_x_gap = None
median_x_gap = None
many_numbers_in_first_word = False
many_alphabetical_in_middle_words = False
many_alphabetical_in_last_word = False
def __init__(self, cpr):
self.cpr = cpr
def print_me(self):
self.cpr.print("alle cntr:", self.counter_chars)
self.cpr.print("spec cntr:", self.counter_special_chars, "ratio", self.special_chars_ratio)
self.cpr.print("alnr cntr:", self.counter_alphanumerical_chars, "ratio", self.alphanumerical_chars_ratio)
self.cpr.print("albt cntr:", self.counter_alphabetical, "ratio", self.alphabetical_ratio)
self.cpr.print("spce cntr:", self.counter_spaces, "ratio", self.spaces_ratio)
self.cpr.print("nmbr cntr:", self.counter_numbers, "ratio", self.numbers_ratio)
self.cpr.print("x_box_sizes", self.x_box_sizes)
self.cpr.print("x_gaps", self.x_gaps)
self.cpr.print("x_gap_max_size", self.maximum_x_gap)
self.cpr.print("x_gaps_mean", self.mean_x_gap)
self.cpr.print("x_gaps_median", self.median_x_gap)
class FeatureExtractor():
def __init__(self):
config_handler = ConfigurationHandler(first_init=False)
self.config = config_handler.get_config()
self.cpr = ConditionalPrint(self.config.PRINT_FEATURE_EXTRACTOR, self.config.PRINT_EXCEPTION_LEVEL,
self.config.PRINT_WARNING_LEVEL, leading_tag=self.__class__.__name__ )
self.filter_start_words = ["Fernruf:", "Vorstand:", "Fernschreiber:",
"von","Gründung:", "Ordnungsnr.", "Ordnungsnr",
"Grundkapital:","Umstellung"]
def extract_file_features(self, ocromore_data):
all_line_features = []
for line in ocromore_data['lines']:
current_line_features = self.extract_line_features(line)
all_line_features.append(current_line_features)
ocromore_data['line_features'] = all_line_features
return ocromore_data
def extract_line_features(self, line):
final_line_features = {}
whole_text = line['text']
self.cpr.print("recognizing text:", whole_text)
# counters
counter_special_chars = 0
counter_alphanumerical_chars = 0
counter_numbers = 0
counter_chars = len(whole_text)
counter_alphabetical = 0
counter_words = 0
counters_alphabetical_ratios = []
counters_wordlengths = []
counters_numbers = []
character_index = 0
# special conditions
ultimo_is_first_word = False
first_word_no_table_indicator = False
starts_with_parenthesis = False
ends_with_parenthesis = False
last_xstop = 0
x_box_sizes = []
x_gaps = []
for word_obj in line['words']:
word_index = word_obj['word_index']
word_text = word_obj['text']
hocr_coordinates = word_obj['hocr_coordinates']
word_xstart = hocr_coordinates[0]
word_xstop = hocr_coordinates[2]
word_box_size = word_xstop - word_xstart
x_box_sizes.append(word_box_size)
if word_index >= 1:
x_gap = word_xstop - last_xstop
x_gaps.append(x_gap)
#line.data['word_x0']
if word_text is None or word_text == "":
continue
if word_index == 0:
if word_text in self.filter_start_words:
first_word_no_table_indicator = True
if word_text.lower() == "ultimo":
ultimo_is_first_word = True
if word_text[0] == "(":
starts_with_parenthesis = True
if word_index == len(whole_text)-1:
if word_text[-1] == ")":
ends_with_parenthesis = True
counter_alphabetical_chars_word = 0
counter_alphanumerical_chars_word = 0
counter_numbers_word = 0
counter_words += 1
word_list = list(word_text)
for char in word_list:
if Random.is_special_character(char):
counter_special_chars += 1
elif Random.is_alphanumerical_character(char):
counter_alphanumerical_chars += 1
counter_alphanumerical_chars_word += 1
if char.isdigit():
counter_numbers += 1
counter_numbers_word += 1
counter_alphabetical_word = counter_alphanumerical_chars_word - counter_numbers_word
ratio_alphabetical_word = np.round(counter_alphabetical_word/len(word_text), 2)
counters_alphabetical_ratios.append(ratio_alphabetical_word)
counters_wordlengths.append(len(word_text))
counters_numbers.append(counter_numbers_word)
character_index += len(word_text)
last_xstop = word_xstop
# get number of spaces
len_whole_unspace = len(whole_text.replace(" ", ""))
counter_spaces = counter_chars - len_whole_unspace
# set alphabetical counter
counter_alphabetical = counter_alphanumerical_chars - counter_numbers
if counter_chars == 0:
self.cpr.printw("no chars in line:", str(line['line_index']),"no features here")
return False
special_chars_ratio = counter_special_chars/ counter_chars
alphanumerical_chars_ratio = counter_alphanumerical_chars / counter_chars
alphabetical_ratio = counter_alphabetical / counter_chars
spaces_ratio = counter_spaces/ counter_chars
numbers_ratio = counter_numbers / counter_chars
maximum_x_gap = None
mean_x_gap = None
median_x_gap = None
if len(x_gaps) >= 1:
maximum_x_gap = max(x_gaps)
mean_x_gap = np.mean(x_gaps)
median_x_gap = np.median(x_gaps)
many_numbers_in_first_word = False
many_alphabetical_in_middle_words = False
many_alphabetical_in_last_word = False
# check some middle and last word conditions
for counter_index, counter in enumerate(counters_wordlengths):
if counter_index == 0:
ctr_numbers = counters_numbers[counter_index]
numbers_ratio_word = np.round(ctr_numbers/counter,2)
if numbers_ratio_word > 0.8:
many_numbers_in_first_word = True
elif counter_index == len(counters_wordlengths)-1:
if counter >= 4:
alphabetical_ratio_word = counters_alphabetical_ratios[counter_index]
if alphabetical_ratio_word >= 0.75:
many_alphabetical_in_last_word = True
else:
if counter >= 4:
alphabetical_ratio_word = counters_alphabetical_ratios[counter_index]
if alphabetical_ratio_word >= 0.75:
many_alphabetical_in_middle_words = True
final_line_features = LineFeatures(cpr=self.cpr)
final_line_features.many_alphabetical_in_last_word = many_alphabetical_in_last_word
final_line_features.counter_special_chars = counter_special_chars
final_line_features.counter_chars = counter_chars
final_line_features.counter_spaces = counter_spaces
final_line_features.counter_numbers = counter_numbers
final_line_features.counter_alphabetical = counter_alphabetical
final_line_features.counter_alphanumerical_chars = counter_alphanumerical_chars
final_line_features.counter_words = counter_words
final_line_features.counters_numbers = counters_numbers
final_line_features.counters_wordlengths = counters_wordlengths
final_line_features.counters_alphabetical_ratios = counters_alphabetical_ratios
final_line_features.numbers_ratio = numbers_ratio
final_line_features.alphabetical_ratio = alphabetical_ratio
final_line_features.alphanumerical_chars_ratio = alphanumerical_chars_ratio
final_line_features.special_chars_ratio = special_chars_ratio
final_line_features.spaces_ratio = spaces_ratio
final_line_features.many_alphabetical_in_last_word = many_alphabetical_in_last_word
final_line_features.many_alphabetical_in_middle_words = many_alphabetical_in_middle_words
final_line_features.many_numbers_in_first_word = many_numbers_in_first_word
final_line_features.x_box_sizes = x_box_sizes
final_line_features.x_gaps = x_gaps
final_line_features.maximum_x_gap = maximum_x_gap
final_line_features.mean_x_gap = mean_x_gap
final_line_features.median_x_gap = median_x_gap
return final_line_features |
<gh_stars>0
import numpy as np
import h5py
import numpy as np
import platform
import os
import json
import sys
import argparse
import scipy.ndimage as nd
import pickle
from contextlib import redirect_stdout
from ipdb import set_trace as stop
if (platform.node() == 'viga'):
os.environ["THEANO_FLAGS"] = "mode=FAST_RUN,device=cpu,floatX=float32"
os.environ["KERAS_BACKEND"] = "tensorflow"
if (platform.node() != 'viga'):
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from keras.layers import Input, Dense, Convolution1D, MaxPooling1D, Flatten, merge, GaussianNoise, ZeroPadding1D
from keras.callbacks import ModelCheckpoint, Callback
from keras.models import Model, model_from_json
from keras.utils.visualize_util import plot as kerasPlot
import keras.optimizers
from keras.utils import np_utils
#from ipdb import set_trace as stop
def running_mean(x, N):
cumsum = np.cumsum(np.insert(x, 0, 0))
return (cumsum[N:] - cumsum[:-N]) / N
class LossHistory(Callback):
def __init__(self, root, losses):
self.root = root
self.losses = losses
def on_epoch_end(self, batch, logs={}):
self.losses.append(logs)
with open("{0}_loss.json".format(self.root), 'w') as f:
json.dump(self.losses, f)
def finalize(self):
pass
class trainDNNFull(object):
def __init__(self, root, noise, option):
self.root = root
self.nFeatures = 50
self.kernelSize = 3
self.poolLength = 2
self.nLambda = np.asarray([2920, 2400, 1893])
self.nLambdaNew = np.asarray([2944, 2400, 1920])
self.batchSize = 512
self.option = option
dims = np.asarray([7, 9, 5, 9, 5, 11, 11])
self.nClasses = dims * 5
self.noise = noise
self.lower = np.asarray([])
self.upper = np.asarray([])
self.dataFile = "/scratch1/aasensio/deepLearning/DNStars/database/database.h5"
f = h5py.File(self.dataFile, 'r')
pars = f.get("parameters")
self.nModels, _ = pars.shape
self.lower = np.min(pars, axis=0)
self.upper = np.max(pars, axis=0)
f.close()
self.nTraining = int(self.nModels * 0.9)
self.nValidation = int(self.nModels * 0.1)
self.nBatchsPerEpochTraining = int(self.nTraining / self.batchSize)
self.nBatchsPerEpochValidation = int(self.nValidation / self.batchSize)
self.nTraining = self.nBatchsPerEpochTraining * self.batchSize
self.nValidation = self.nBatchsPerEpochValidation * self.batchSize
print("Training set: {0}".format(self.nTraining))
print(" - Batch size: {0}".format(self.batchSize))
print(" - Batches per epoch: {0}".format(self.nBatchsPerEpochTraining))
print("Validation set: {0}".format(self.nValidation))
print(" - Batch size: {0}".format(self.batchSize))
print(" - Batches per epoch: {0}".format(self.nBatchsPerEpochValidation))
def transformToCategorical(self, data, index):
valuesInt = np.floor((data - self.lower[index]) / (self.upper[index] - self.lower[index]) * (self.nClasses[index]-1)).astype('int32')
return np_utils.to_categorical(valuesInt, self.nClasses[index])
def training_generator(self):
f = h5py.File(self.dataFile, 'r')
pars = f.get("parameters")
flux = f.get("flux")
while 1:
for i in range(self.nBatchsPerEpochTraining):
outTrain = []
for j in range(7):
outTrain.append(self.transformToCategorical(pars[i*self.batchSize:(i+1)*self.batchSize,j], j))
continuum1 = nd.filters.uniform_filter1d(flux[i*self.batchSize:(i+1)*self.batchSize,0:2920], axis=1, size=30, mode='nearest')
continuum2 = nd.filters.uniform_filter1d(flux[i*self.batchSize:(i+1)*self.batchSize,2920:2920+2400], axis=1, size=30, mode='nearest')
continuum3 = nd.filters.uniform_filter1d(flux[i*self.batchSize:(i+1)*self.batchSize,2920+2400:], axis=1, size=30, mode='nearest')
piece1 = np.atleast_3d(flux[i*self.batchSize:(i+1)*self.batchSize,0:2920] / continuum1).astype('float32')
piece2 = np.atleast_3d(flux[i*self.batchSize:(i+1)*self.batchSize,2920:2920+2400] / continuum2).astype('float32')
piece3 = np.atleast_3d(flux[i*self.batchSize:(i+1)*self.batchSize,2920+2400:] / continuum3).astype('float32')
yield [piece1, piece2, piece3], outTrain
f.close()
def validation_generator(self):
f = h5py.File(self.dataFile, 'r')
pars = f.get("parameters")
flux = f.get("flux")
while 1:
for i in range(self.nBatchsPerEpochValidation):
outTrain = []
for j in range(7):
outTrain.append(self.transformToCategorical(pars[self.nTraining+i*self.batchSize:self.nTraining+(i+1)*self.batchSize,j], j))
continuum1 = nd.filters.uniform_filter1d(flux[self.nTraining+i*self.batchSize:self.nTraining+(i+1)*self.batchSize,0:2920], axis=1, size=30, mode='nearest')
continuum2 = nd.filters.uniform_filter1d(flux[self.nTraining+i*self.batchSize:self.nTraining+(i+1)*self.batchSize,2920:2920+2400], axis=1, size=30, mode='nearest')
continuum3 = nd.filters.uniform_filter1d(flux[self.nTraining+i*self.batchSize:self.nTraining+(i+1)*self.batchSize,2920+2400:], axis=1, size=30, mode='nearest')
piece1 = np.atleast_3d(flux[self.nTraining+i*self.batchSize:self.nTraining+(i+1)*self.batchSize,0:2920] / continuum1).astype('float32')
piece2 = np.atleast_3d(flux[self.nTraining+i*self.batchSize:self.nTraining+(i+1)*self.batchSize,2920:2920+2400] / continuum2).astype('float32')
piece3 = np.atleast_3d(flux[self.nTraining+i*self.batchSize:self.nTraining+(i+1)*self.batchSize,2920+2400:] / continuum3).astype('float32')
yield [piece1, piece2, piece3], outTrain
f.close()
def validation_generator_prediction(self):
f = h5py.File(self.dataFile, 'r')
flux = f.get("flux")
while 1:
for i in range(self.nBatchsPerEpochValidation):
batch = flux[self.nTraining+i*self.batchSize:self.nTraining+(i+1)*self.batchSize,:]
batch += np.random.normal(loc=0.0, scale=self.noise, size=batch.shape)
continuum = np.copy(batch)
for k in range(30):
continuum = nd.filters.uniform_filter1d(continuum, axis=1, size=30, mode='nearest')
yield np.atleast_3d(batch / continuum).astype('float32')
f.close()
def defineNetwork(self):
print("Setting up network...")
# Piece 1
flux1 = Input(shape=(2920,1), name='flux1_input')
x = GaussianNoise(sigma=self.noise)(flux1)
x = ZeroPadding1D(padding=24)(x)
x = Convolution1D(nb_filter=self.nFeatures, filter_length=self.kernelSize, activation='relu', border_mode='same', init='he_normal', name='conv1_1')(x)
x = MaxPooling1D(pool_length=self.poolLength, name='pool1_1')(x)
x = Convolution1D(nb_filter=self.nFeatures, filter_length=2*self.kernelSize, activation='relu', border_mode='same', init='he_normal', name='conv2_1')(x)
x = MaxPooling1D(pool_length=self.poolLength, name='pool2_1')(x)
x = Convolution1D(nb_filter=self.nFeatures, filter_length=3*self.kernelSize, activation='relu', border_mode='same', init='he_normal', name='conv3_1')(x)
x = MaxPooling1D(pool_length=self.poolLength, name='pool3_1')(x)
x = Convolution1D(nb_filter=self.nFeatures, filter_length=4*self.kernelSize, activation='relu', border_mode='same', init='he_normal', name='conv4_1')(x)
x = MaxPooling1D(pool_length=self.poolLength, name='pool4_1')(x)
x = Convolution1D(nb_filter=self.nFeatures, filter_length=5*self.kernelSize, activation='relu', border_mode='same', init='he_normal', name='conv5_1')(x)
x = MaxPooling1D(pool_length=self.poolLength, name='pool5_1')(x)
flux1_flat = Flatten(name='flat_1')(x)
# Piece 2
flux2 = Input(shape=(2400,1), name='flux2_input')
x = GaussianNoise(sigma=self.noise)(flux2)
x = Convolution1D(nb_filter=self.nFeatures, filter_length=self.kernelSize, activation='relu', border_mode='same', init='he_normal', name='conv1_2')(x)
x = MaxPooling1D(pool_length=self.poolLength, name='pool1_2')(x)
x = Convolution1D(nb_filter=self.nFeatures, filter_length=2*self.kernelSize, activation='relu', border_mode='same', init='he_normal', name='conv2_2')(x)
x = MaxPooling1D(pool_length=self.poolLength, name='pool2_2')(x)
x = Convolution1D(nb_filter=self.nFeatures, filter_length=3*self.kernelSize, activation='relu', border_mode='same', init='he_normal', name='conv3_2')(x)
x = MaxPooling1D(pool_length=self.poolLength, name='pool3_2')(x)
x = Convolution1D(nb_filter=self.nFeatures, filter_length=4*self.kernelSize, activation='relu', border_mode='same', init='he_normal', name='conv4_2')(x)
x = MaxPooling1D(pool_length=self.poolLength, name='pool4_2')(x)
x = Convolution1D(nb_filter=self.nFeatures, filter_length=5*self.kernelSize, activation='relu', border_mode='same', init='he_normal', name='conv5_2')(x)
x = MaxPooling1D(pool_length=self.poolLength, name='pool5_2')(x)
flux2_flat = Flatten(name='flat_2')(x)
# Piece 3
flux3 = Input(shape=(1893,1), name='flux3_input')
x = GaussianNoise(sigma=self.noise)(flux3)
x = ZeroPadding1D(padding=27)(x)
x = Convolution1D(nb_filter=self.nFeatures, filter_length=self.kernelSize, activation='relu', border_mode='same', init='he_normal', name='conv1_3')(x)
x = MaxPooling1D(pool_length=self.poolLength, name='pool1_3')(x)
x = Convolution1D(nb_filter=self.nFeatures, filter_length=2*self.kernelSize, activation='relu', border_mode='same', init='he_normal', name='conv2_3')(x)
x = MaxPooling1D(pool_length=self.poolLength, name='pool2_3')(x)
x = Convolution1D(nb_filter=self.nFeatures, filter_length=3*self.kernelSize, activation='relu', border_mode='same', init='he_normal', name='conv3_3')(x)
x = MaxPooling1D(pool_length=self.poolLength, name='pool3_3')(x)
x = Convolution1D(nb_filter=self.nFeatures, filter_length=4*self.kernelSize, activation='relu', border_mode='same', init='he_normal', name='conv4_3')(x)
x = MaxPooling1D(pool_length=self.poolLength, name='pool4_3')(x)
x = Convolution1D(nb_filter=self.nFeatures, filter_length=5*self.kernelSize, activation='relu', border_mode='same', init='he_normal', name='conv5_3')(x)
x = MaxPooling1D(pool_length=self.poolLength, name='pool5_3')(x)
flux3_flat = Flatten(name='flat_3')(x)
x = merge([flux1_flat, flux2_flat, flux3_flat], mode='concat', name='merge')
labels = ['metals', 'C', 'N', 'O', 'alpha', 'log10vdop', 'Teff', 'logg']
out = [None] * 7
for i in range(7):
out[i] = Dense(self.nClasses[i], activation='softmax', name='out_{0}'.format(labels[i]))(x)
self.model = Model(input=[flux1, flux2, flux3], output=out)
json_string = self.model.to_json()
f = open('{0}_model.json'.format(self.root), 'w')
f.write(json_string)
f.close()
kerasPlot(self.model, to_file='{0}_model.png'.format(self.root), show_shapes=True)
with open('{0}_summary.txt'.format(self.root), 'w') as f:
with redirect_stdout(f):
self.model.summary()
def compileNetwork(self):
self.model.compile(loss='categorical_crossentropy', optimizer='nadam', metrics=['accuracy'])
def readNetwork(self):
print("Reading previous network...")
f = open('{0}_model.json'.format(self.root), 'r')
json_string = f.read()
f.close()
self.model = model_from_json(json_string)
self.model.load_weights("{0}_weights.hdf5".format(self.root))
def trainCNN(self, nIterations):
print("Training network...")
self.checkpointer = ModelCheckpoint(filepath="{0}_weights.hdf5".format(self.root), verbose=1, save_best_only=True)
# Recover losses from previous run
if (self.option == 'continue'):
with open("{0}_loss.json".format(self.root), 'r') as f:
losses = json.load(f)
else:
losses = []
self.history = LossHistory(self.root, losses)
self.metrics = self.model.fit_generator(self.training_generator(), self.nTraining, nb_epoch=nIterations,
callbacks=[self.checkpointer, self.history], validation_data=self.validation_generator(), nb_val_samples=self.nValidation,
max_q_size=30)
self.history.finalize()
def predictCNN(self):
print("Predicting validation data...")
out = self.model.predict_generator(self.validation_generator_prediction(), self.nValidation, max_q_size=30)
print("Saving validation data...")
with open("{0}_{1}_prob.pkl".format(self.root, self.noise), "wb") as outfile:
pickle.dump(out, outfile, pickle.HIGHEST_PROTOCOL)
def predictCNN2(self):
print("Predicting validation data...")
f = h5py.File(self.dataFile, 'r')
flux = f.get("flux")
batch = flux[0:1024,:]
batch += np.random.normal(loc=0.0, scale=self.noise, size=batch.shape)
continuum = np.copy(batch)
for k in range(30):
continuum = nd.filters.uniform_filter1d(continuum, axis=1, size=30, mode='nearest')
inTest = np.atleast_3d(batch / continuum).astype('float32')
out = self.model.predict(inTest, verbose=1)
print("Saving validation data...")
with open("{0}_{1}_prob.pkl".format(self.root, self.noise), "wb") as outfile:
pickle.dump(out, outfile, pickle.HIGHEST_PROTOCOL)
if (__name__ == '__main__'):
parser = argparse.ArgumentParser(description='Train/predict for spectra')
parser.add_argument('-o','--out', help='Output files')
parser.add_argument('-e','--epochs', help='Number of epochs', default=10)
parser.add_argument('-n','--noise', help='Noise to add during training/prediction', default=0.0)
parser.add_argument('-a','--action', help='Action', choices=['start', 'continue', 'predict'], required=True)
parsed = vars(parser.parse_args())
root = parsed['out']
nEpochs = int(parsed['epochs'])
option = parsed['action']
noise = float(parsed['noise'])
out = trainDNNFull(root, noise, option)
if (option == 'start'):
out.defineNetwork()
out.compileNetwork()
if (option == 'continue' or option == 'predict'):
out.readNetwork()
if (option == 'start' or option == 'continue'):
out.compileNetwork()
out.trainCNN(nEpochs)
if (option == 'predict'):
out.predictCNN2() |
<reponame>jkl1337/ankisport
# coding=utf-8
from PyQt4 import QtCore, QtGui
from aqt import mw
from aqt.qt import *
from aqt.utils import showWarning, tooltip
from exporter import TOMLNoteExporter
import pytoml as toml
class ExportDialog(QDialog):
def __init__(self, mw):
QDialog.__init__(self, mw, Qt.Window)
self.mw = mw
self.setup_ui()
self.fill_values()
def open_profile(self):
path_name = self.getProfilePathName()
if path_name:
self.profile_edit.setText(path_name)
def open_output(self):
path_name = self.getOutputPathName()
if path_name:
self.output_edit.setText(path_name)
def on_accept(self):
ok = self.readValues()
if ok:
exporter = TOMLNoteExporter(mw.col, query=mw.ankisport.query, sets=mw.ankisport.sets)
ok = exporter.doExport(mw.ankisport.output_path, verify=mw.ankisport.verify)
if ok:
tooltip("Exported %d notes" % exporter.count, parent=self.mw)
if ok:
QDialog.accept(self)
def on_reject(self):
self.close()
def getProfilePathName(self):
filter = 'TOML Files (*.toml)'
return unicode(QFileDialog.getOpenFileName(mw, "Exporter Profile",
mw.ankisport.profile_path, filter))
def getOutputPathName(self):
filter = 'TOML Files (*.toml)'
return unicode(QFileDialog.getSaveFileName(mw, "Export to file",
mw.ankisport.output_path, filter))
def fill_values(self):
self.profile_edit.setText(mw.ankisport.profile_path)
self.output_edit.setText(mw.ankisport.output_path)
self.verify_btn.setChecked(mw.ankisport.verify)
def readValues(self):
mw.ankisport.profile_path = self.profile_edit.text()
if mw.ankisport.profile_path == "":
showWarning("The export profile is not set")
return False
mw.ankisport.output_path = self.output_edit.text()
if mw.ankisport.output_path == "":
showWarning("The export path is not set")
return False
mw.ankisport.verify = self.verify_btn.isChecked()
with open(mw.ankisport.profile_path, 'r') as f:
t = toml.load(f)
mw.ankisport.query = t['query']
mw.ankisport.sets = t.get('sets', [])
return True
def setup_ui(self):
self.setWindowModality(QtCore.Qt.ApplicationModal)
self.resize(718, 358)
self.setSizeGripEnabled(True)
self.setModal(True)
l_main = QtGui.QVBoxLayout(self)
grid = QtGui.QGridLayout()
profile_label = QLabel('Profile')
grid.addWidget(profile_label, 0, 0, 1, 1)
self.profile_edit = QLineEdit()
grid.addWidget(self.profile_edit, 0, 1, 1, 3)
profile_btn = QPushButton("Open &Profile", clicked=self.open_profile)
grid.addWidget(profile_btn, 0, 4, 1, 1)
output_label = QLabel('Output')
grid.addWidget(output_label, 1, 0, 1, 1)
self.output_edit = QLineEdit()
grid.addWidget(self.output_edit, 1, 1, 1, 3)
output_btn = QPushButton("Open &Output", clicked=self.open_output)
grid.addWidget(output_btn, 1, 4, 1, 1)
self.verify_btn = QCheckBox('&Verify')
grid.addWidget(self.verify_btn, 2, 0, 1, 2)
l_main.addLayout(grid)
button_box = QDialogButtonBox(self)
button_box.setOrientation(QtCore.Qt.Horizontal)
button_box.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
button_box.accepted.connect(self.on_accept)
button_box.rejected.connect(self.on_reject)
l_main.addWidget(button_box)
self.setLayout(l_main)
class Settings(object):
def __init__(self):
dir = QDesktopServices.storageLocation(QDesktopServices.DesktopLocation)
self.profile_path = os.path.join(dir, "settings.toml")
self.output_path = os.path.join(dir, "export.toml")
self.query = ""
self.sets = []
self.verify = False
def displayDialog():
dlg = ExportDialog(mw)
dlg.exec_()
def load_addon():
action = QAction('TOML Export...', mw)
action.triggered.connect(displayDialog)
mw.form.menuTools.addAction(action)
mw.ankisport = Settings()
|
<reponame>Defense-Cyber-Crime-Center/plaso<filename>tests/output/interface.py<gh_stars>1-10
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
from plaso.output import interface
from plaso.output import manager
from tests.cli import test_lib as cli_test_lib
from tests.output import test_lib
class TestEvent(object):
"""Simple class that defines a dummy event."""
def __init__(self, timestamp, entry):
self.date = u'03/01/2012'
try:
self.timestamp = int(timestamp)
except ValueError:
self.timestamp = 0
self.entry = entry
def EqualityString(self):
return u';'.join(map(str, [self.timestamp, self.entry]))
class TestOutputModule(interface.LinearOutputModule):
"""This is a test output module that provides a simple XML."""
NAME = u'test_xml'
DESCRIPTION = u'Test output that provides a simple mocked XML.'
def WriteEventBody(self, event_object):
"""Writes the body of an event object to the output.
Args:
event_object: the event object (instance of EventObject).
"""
self._WriteLine((
u'\t<Date>{0:s}</Date>\n\t<Time>{1:d}</Time>\n'
u'\t<Entry>{2:s}</Entry>\n').format(
event_object.date, event_object.timestamp, event_object.entry))
def WriteEventEnd(self):
"""Writes the end of an event object to the output."""
self._WriteLine(u'</Event>\n')
def WriteEventStart(self):
"""Writes the start of an event object to the output."""
self._WriteLine(u'<Event>\n')
def WriteFooter(self):
"""Writes the footer to the output."""
self._WriteLine(u'</EventFile>\n')
def WriteHeader(self):
"""Writes the header to the output."""
self._WriteLine(u'<EventFile>\n')
class LinearOutputModuleTest(test_lib.OutputModuleTestCase):
"""Tests the linear output module."""
def testOutput(self):
"""Tests an implementation of output module."""
events = [
TestEvent(123456, u'My Event Is Now!'),
TestEvent(123458, u'There is no tomorrow.'),
TestEvent(123462, u'Tomorrow is now.'),
TestEvent(123489, u'This is just some stuff to fill the line.')]
output_mediator = self._CreateOutputMediator()
output_writer = cli_test_lib.TestOutputWriter()
output_module = TestOutputModule(output_mediator)
output_module.SetOutputWriter(output_writer)
output_module.WriteHeader()
for event_object in events:
output_module.WriteEvent(event_object)
output_module.WriteFooter()
expected_output = (
b'<EventFile>\n'
b'<Event>\n'
b'\t<Date>03/01/2012</Date>\n'
b'\t<Time>123456</Time>\n'
b'\t<Entry>My Event Is Now!</Entry>\n'
b'</Event>\n'
b'<Event>\n'
b'\t<Date>03/01/2012</Date>\n'
b'\t<Time>123458</Time>\n'
b'\t<Entry>There is no tomorrow.</Entry>\n'
b'</Event>\n'
b'<Event>\n'
b'\t<Date>03/01/2012</Date>\n'
b'\t<Time>123462</Time>\n'
b'\t<Entry>Tomorrow is now.</Entry>\n'
b'</Event>\n'
b'<Event>\n'
b'\t<Date>03/01/2012</Date>\n'
b'\t<Time>123489</Time>\n'
b'\t<Entry>This is just some stuff to fill the line.</Entry>\n'
b'</Event>\n'
b'</EventFile>\n')
output = output_writer.ReadOutput()
self.assertEqual(output, expected_output)
def testOutputList(self):
"""Test listing up all available registered modules."""
manager.OutputManager.RegisterOutput(TestOutputModule)
module_seen = False
for name, description in manager.OutputManager.GetOutputs():
if name == 'test_xml':
module_seen = True
self.assertEqual(description, (
u'Test output that provides a simple mocked XML.'))
self.assertTrue(module_seen)
manager.OutputManager.DeregisterOutput(TestOutputModule)
class EventBufferTest(test_lib.OutputModuleTestCase):
"""Few unit tests for the EventBuffer class."""
def _CheckBufferLength(self, event_buffer, expected_length):
"""Checks the length of the event buffer.
Args:
event_buffer: the event buffer object (instance of EventBuffer).
expect_length: the expected event buffer length.
"""
if not event_buffer.check_dedups:
expected_length = 0
# pylint: disable=protected-access
self.assertEqual(len(event_buffer._buffer_dict), expected_length)
def testFlush(self):
"""Test to ensure we empty our buffers and sends to output properly."""
output_mediator = self._CreateOutputMediator()
output_writer = cli_test_lib.TestOutputWriter()
output_module = TestOutputModule(output_mediator)
output_module.SetOutputWriter(output_writer)
event_buffer = interface.EventBuffer(output_module, False)
event_buffer.Append(TestEvent(123456, u'Now is now'))
self._CheckBufferLength(event_buffer, 1)
# Add three events.
event_buffer.Append(TestEvent(123456, u'OMG I AM DIFFERENT'))
event_buffer.Append(TestEvent(123456, u'Now is now'))
event_buffer.Append(TestEvent(123456, u'Now is now'))
self._CheckBufferLength(event_buffer, 2)
event_buffer.Flush()
self._CheckBufferLength(event_buffer, 0)
event_buffer.Append(TestEvent(123456, u'Now is now'))
event_buffer.Append(TestEvent(123456, u'Now is now'))
event_buffer.Append(TestEvent(123456, u'Different again :)'))
self._CheckBufferLength(event_buffer, 2)
event_buffer.Append(TestEvent(123457, u'Now is different'))
self._CheckBufferLength(event_buffer, 1)
if __name__ == '__main__':
unittest.main()
|
# @Author: <NAME> <arthur>
# @Date: 10.05.2021
# @Filename: test_analysis.py
# @Last modified by: arthur
# @Last modified time: 15.09.2021
import pyrexMD.analysis.analyze as ana
import pyrexMD.misc as misc
import MDAnalysis as mda
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from numpy.testing import assert_allclose
from Bio.PDB.PDBExceptions import PDBConstructionWarning
from unittest.mock import patch
import pathlib
import pytest
import os
# find main directory of pyrexMD
posixpath = pathlib.Path(".").rglob("*core.py") # generator for matching paths
pathname = posixpath.send(None).as_posix() # get first path name
main_dir = os.path.relpath(os.path.realpath(pathname).rstrip("core.py")) # main directory of pyrexMD
# set up test paths
cwd = os.getcwd()
print(f"cwd: {cwd}")
pre = f"{main_dir}/tests/files/1l2y"
pdb = f"{pre}/1l2y_ref.pdb"
tpr = f"{pre}/traj.tpr"
xtc = f"{pre}/traj.xtc"
def test_get_timeconversion():
mobile = mda.Universe(tpr, xtc, tpr_resid_from_one=True)
ana.get_time_conversion(mobile, tu="ns")
mobile.trajectory.units["time"] = "ns"
ana.get_time_conversion(mobile, tu="ps")
with pytest.raises(TypeError):
ana.get_time_conversion("wrong_type", tu="ps")
return
def test_get_FASTA():
with pytest.warns(PDBConstructionWarning):
assert ana.get_FASTA(pdb) == ['NLYIQWLKDGGPSSGRPPPS']
return
def test_alignto():
mobile = mda.Universe(tpr, xtc, tpr_resid_from_one=True)
ref = mda.Universe(pdb)
val = ana.alignto(mobile, ref, "protein and name CA", "protein and name CA")
assert assert_allclose(val, (0.37619036475899914, 0.3503198898884487)) == None
return
@patch("matplotlib.pyplot.show")
def test_get_RMSD(mock_show):
mobile = mda.Universe(tpr, xtc, tpr_resid_from_one=True)
ref = mda.Universe(pdb)
val = ana.get_RMSD(mobile, ref, sel1='backbone', sel2='backbone', plot=True)
expected = np.load(f"{pre}/get_RMSD.npy")
assert assert_allclose(val[0], expected[0]) == None
assert assert_allclose(val[1], expected[1]) == None
assert assert_allclose(val[2], expected[2]) == None
plt.close("all")
return
@patch("matplotlib.pyplot.show")
def test_get_RMSF(mock_show):
mobile = mda.Universe(tpr, xtc, tpr_resid_from_one=True)
val = ana.get_RMSF(mobile, "backbone", plot=True)
expected = np.load(f"{pre}/get_RMSF.npy")
assert assert_allclose(val, expected) == None
# coverage 2nd plot case
val = ana.get_RMSF(mobile, "protein and name CA", plot=True)
with pytest.raises(TypeError):
val = ana.get_RMSF("wrong_type", "backbone", plot=True)
plt.close("all")
return
def test_HELP_sss_None2int():
mobile = mda.Universe(tpr, xtc, tpr_resid_from_one=True)
cfg = misc.CONFIG({"sss": [None, None, None],
"start": None,
"stop": None,
"step": None})
expected = misc.CONFIG({"sss": [0, 21, 1],
"start": 0,
"stop": 21,
"step": 1})
val = ana._HELP_sss_None2int(mobile, cfg)
assert val.items() == expected.items()
############################################################################
mobile = mda.Universe(tpr, xtc, tpr_resid_from_one=True)
DM = ana.get_Distance_Matrices(mobile, stop=10)
cfg = misc.CONFIG({"sss": [None, None, None],
"start": None,
"stop": None,
"step": None})
expected = misc.CONFIG({"sss": [0, 10, 1],
"start": 0,
"stop": 10,
"step": 1})
val = ana._HELP_sss_None2int(DM, cfg)
assert val.items() == expected.items()
return
def test_get_Distance_Matrices():
mobile = mda.Universe(tpr, xtc, tpr_resid_from_one=True)
val = ana.get_Distance_Matrices(mobile)
expected = np.load(f"{pre}/get_Distance_Matrices.npy")
assert assert_allclose(val, expected) == None
# coverage
mobile_list = [f"{pdb}", f"{pdb}"]
ana.get_Distance_Matrices(mobile_list, flatten=True)
ana.get_Distance_Matrices(mobile_list, flatten=False)
with pytest.raises(TypeError):
mobile_list = [f"{pdb}", f"non_pdb_extension.txt"]
ana.get_Distance_Matrices(mobile_list)
return
def test_get_shortest_RES_distances():
ref = mda.Universe(pdb)
val = ana.get_shortest_RES_distances(ref, sel="protein")
expected = np.load(f"{pre}/shortest_RES_distances.npy", allow_pickle=True)
assert assert_allclose(val[0], expected[0]) == None
assert (val[1] == expected[1]).all() # this could cause problems as mixed types
# coverage
ana.get_shortest_RES_distances(pdb, sel="protein") # type string
ana.get_shortest_RES_distances(ref.atoms, sel="protein") # type atom grp
return
def test_get_trendline():
X = [0, 1, 2, 3, 4, 5]
Y = [0, 10, 20, 20, 5, 15]
X_val, Y_val = ana.get_trendline(X, Y, compress=2)
X_expected = [0.5, 2.5, 4.5]
Y_expected = [5.0, 20.0, 10.0]
assert X_val == X_expected
assert Y_val == Y_expected
# coverage
ana.get_trendline(np.array(X), np.array(Y), compress=2) # type: array
ana.get_trendline(np.array(X), np.array(Y), compress=20) # remainder != 0
with pytest.raises(ValueError):
ana.get_trendline(X, Y[:-2], compress=2)
return
@patch("matplotlib.pyplot.show")
def test_plot_trendline(mock_show):
X = [0, 1, 2, 3, 4, 5]
Y = [0, 10, 20, 20, 5, 15]
trendline = ana.plot_trendline(X, Y, compress=2)
assert isinstance(trendline, matplotlib.lines.Line2D)
# coverage
fig, ax = misc.figure()
ana.plot_trendline(X, Y, compress=2, fig=fig) # use existing figure: fit type
ana.plot_trendline(X, Y, compress=2, fig=fig) # remove existing trendline
ana.plot_trendline(X, Y, compress=2, fig=5) # use existing figure: int type
plt.close("all")
return
@patch("matplotlib.pyplot.show")
def test_remove_trendline(mock_show):
X = [0, 1, 2, 3, 4, 5]
Y = [0, 10, 20, 20, 5, 15]
trendline = ana.plot_trendline(X, Y, compress=2)
ana.remove_trendline()
#coverage: Figure has no trendline object (or already removed)
ana.remove_trendline()
# coverage: trendline passed
fig, ax = misc.figure()
trendline = ana.plot_trendline(X, Y, compress=2)
ana.remove_trendline(trendline=trendline)
# coverage: figure passed
fig, ax = misc.figure()
trendline = ana.plot_trendline(X, Y, compress=2)
ana.remove_trendline(fig=fig)
plt.close("all")
return
@patch("matplotlib.pyplot.show")
def test_PLOT(mock_show):
xdata = range(10)
ydata = range(10)
ana.PLOT(xdata, ydata)
plt.close("all")
return
@patch("matplotlib.pyplot.show")
def test_plot_RMSD(mock_show):
xdata = range(10)
ydata = range(10)
# coverage
ana.plot_RMSD(RMSD_file=f"{pre}/RMSD.log", verbose=None)
ana.plot_RMSD(RMSD_file=f"{pre}/RMSD.log", verbose=True, cut_min=0)
ana.plot_RMSD(RMSD_file=f"{pre}/RMSD.log", verbose=False, title="title", save_as="./test.png")
ana.plot_RMSD(RMSD_file=f"{pre}/RMSD.log", verbose=False, title="title", save_as="./test.png", filedir="./")
misc.rm("./test.png")
plt.close("all")
return
def test_HELP_setup_bins():
X = [0, 50, 100]
bins, bins_step, cfg = ana._HELP_setup_bins(X, n_bins=2)
assert np.all(bins == np.array([0., 50., 100.]))
assert bins_step == 50.0
assert isinstance(cfg, misc.CONFIG)
bins, bins_step, cfg = ana._HELP_setup_bins(X, bins=[0, 50])
assert np.all(bins == np.array([0, 50]))
assert bins_step == 50
assert isinstance(cfg, misc.CONFIG)
return
@patch("matplotlib.pyplot.show")
def test_plot_hist(mock_show):
ref = mda.Universe(pdb, tpr_resid_from_one=True)
mobile = mda.Universe(tpr, xtc, tpr_resid_from_one=True)
FRAME, TIME, RMSD = ana.get_RMSD(mobile, ref, sel1="name CA", sel2="name CA")
# coverage
fig, ax = misc.figure()
fig, ax, hist = ana.plot_hist([RMSD, RMSD, RMSD], ec=None, ax=ax, num=fig.number, fig=fig) # multiple arrays, coverage of if cases
fig, ax, hist = ana.plot_hist([RMSD, RMSD, RMSD], orientation="horizontal")
fig, ax, hist = ana.plot_hist(RMSD, apply_cut_limits=True, orientation="horizontal") # apply_cut_limits + orientation
fig, ax, hist = ana.plot_hist(RMSD, apply_cut_limits=False, orientation="horizontal") # apply_cut_limits + orientation
fig, ax, hist = ana.plot_hist(RMSD, apply_cut_limits=True, orientation="vertical") # apply_cut_limits + orientation
fig, ax, hist = ana.plot_hist(RMSD, apply_cut_limits=False, orientation="vertical") # apply_cut_limits + orientation
fig, ax, hist = ana.plot_hist([RMSD, RMSD, RMSD], title="title", xlabel="xlabel", ylabel="ylabel", save_as="./temp.png") # labels + savefig
misc.rm("./temp.png")
plt.close("all")
return
@ patch("matplotlib.pyplot.show")
def test_plot_deltahist(mock_show):
# coverage
fig, ax = ana.plot_deltahist(RMSD_file=f"{pre}/RMSD2.log", RMSD_ref=f"{pre}/RMSD.log", logscale=True, orientation="horizontal", apply_cut_limits=True, show_all_hist=True)
fig, ax = ana.plot_deltahist(RMSD_file=f"{pre}/RMSD2.log", RMSD_ref=f"{pre}/RMSD.log", logscale=False, orientation="horizontal", title="title", num=2)
fig, ax = ana.plot_deltahist(RMSD_file=f"{pre}/RMSD2.log", RMSD_ref=f"{pre}/RMSD.log", logscale=True, orientation="vertical", save_as="./temp.png", apply_cut_limits=True)
fig, ax = ana.plot_deltahist(RMSD_file=f"{pre}/RMSD2.log", RMSD_ref=f"{pre}/RMSD.log", logscale=False, orientation="vertical")
misc.rm("./temp.png")
plt.close("all")
return
def test_HELP_convert_RMSD_nm2angstrom():
RMSD_A = misc.read_file(f"{pre}/RMSD.log", usecols=1)
RMSD_nm = RMSD_A/10
val = ana._HELP_convert_RMSD_nm2angstrom(RMSD_nm)
assert assert_allclose(val, RMSD_A) == None
return
@ patch("matplotlib.pyplot.show")
def test_plot_HEATMAP(mock_show):
data = np.random.randint(5, size=(5, 10))
fig, ax = ana.plot_HEATMAP(data, title="title", xlabel="xlabel", ylabel="ylabel", cbar_label="cbar_label", save_as="./temp.pdf")
misc.rm("./temp.pdf")
plt.close("all")
return
@ patch("matplotlib.pyplot.show")
def test_plot_HEATMAP_REX_RMSD(mock_show):
# coverage
fig, ax = ana.plot_HEATMAP_REX_RMSD(REX_RMSD_dir=f"{pre}/REX_RMSD_DIR", title="title", save_as="./temp.pdf")
fig, ax = ana.plot_HEATMAP_REX_RMSD(REX_RMSD_dir=f"{pre}/REX_RMSD_DIR", auto_convert=False)
misc.rm("./temp.pdf")
plt.close("all")
return
|
# Authors: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
from pathlib import Path
import numpy as np
from numpy.core.records import fromarrays
from scipy.io import savemat
import mne
from ..utils import have
def write_fif(fname, raw):
raw.save(fname)
def write_set(fname, raw):
"""Export raw to EEGLAB .set file."""
data = raw.get_data() * 1e6 # convert to microvolts
fs = raw.info["sfreq"]
times = raw.times
ch_names = raw.info["ch_names"]
chanlocs = fromarrays([ch_names], names=["labels"])
events = fromarrays([raw.annotations.description,
raw.annotations.onset * fs + 1,
raw.annotations.duration * fs],
names=["type", "latency", "duration"])
savemat(fname, dict(EEG=dict(data=data,
setname=fname,
nbchan=data.shape[0],
pnts=data.shape[1],
trials=1,
srate=fs,
xmin=times[0],
xmax=times[-1],
chanlocs=chanlocs,
event=events,
icawinv=[],
icasphere=[],
icaweights=[])),
appendmat=False)
def write_edf(fname, raw):
"""Export raw to EDF/BDF file (requires pyEDFlib)."""
import pyedflib
ext = "".join(Path(fname).suffixes)
if ext == ".edf":
filetype = pyedflib.FILETYPE_EDFPLUS
dmin, dmax = -32768, 32767
elif ext == ".bdf":
filetype = pyedflib.FILETYPE_BDFPLUS
dmin, dmax = -8388608, 8388607
data = raw.get_data() * 1e6 # convert to microvolts
fs = raw.info["sfreq"]
nchan = raw.info["nchan"]
ch_names = raw.info["ch_names"]
if raw.info["meas_date"] is not None:
meas_date = raw.info["meas_date"]
else:
meas_date = None
prefilter = (f"{raw.info['highpass']}Hz - "
f"{raw.info['lowpass']}")
pmin, pmax = data.min(axis=1), data.max(axis=1)
f = pyedflib.EdfWriter(fname, nchan, filetype)
channel_info = []
data_list = []
for i in range(nchan):
channel_info.append(dict(label=ch_names[i],
dimension="uV",
sample_rate=fs,
physical_min=pmin[i],
physical_max=pmax[i],
digital_min=dmin,
digital_max=dmax,
transducer="",
prefilter=prefilter))
data_list.append(data[i])
f.setTechnician("Exported by MNELAB")
f.setSignalHeaders(channel_info)
if raw.info["meas_date"] is not None:
f.setStartdatetime(meas_date)
# note that currently, only blocks of whole seconds can be written
f.writeSamples(data_list)
for ann in raw.annotations:
f.writeAnnotation(ann["onset"], ann["duration"], ann["description"])
def write_bv(fname, raw, events=None):
"""Export data to BrainVision EEG/VHDR/VMRK file (requires pybv)."""
import pybv
name, ext = Path(fname).stem, "".join(Path(fname).suffixes)
parent = Path(fname).parent
data = raw.get_data()
fs = raw.info["sfreq"]
ch_names = raw.info["ch_names"]
if events is None:
if raw.annotations:
events = mne.events_from_annotations(raw)[0]
dur = raw.annotations.duration * fs
events = np.column_stack([events[:, [0, 2]], dur.astype(int)])
else:
events = events[:, [0, 2]]
pybv.write_brainvision(data, fs, ch_names, name, parent, events=events)
# supported write file formats
# this dict contains each supported file extension as a key
# the corresponding value is a list with three elements: (1) the writer
# function, (2) the full file format name, and (3) a (comma-separated) string
# indicating the supported objects (currently either raw or epoch)
writers = {".fif": [write_fif, "Elekta Neuromag", "raw,epoch"],
".fif.gz": [write_fif, "Elekta Neuromag", "raw,epoch"],
".set": [write_set, "EEGLAB", "raw"]}
if have["pybv"]:
writers.update({".eeg": [write_bv, "BrainVision", "raw"]})
if have["pyedflib"]:
writers.update({".edf": [write_edf, "European Data Format", "raw"],
".bdf": [write_edf, "Biosemi Data Format", "raw"]})
def write_raw(fname, raw):
ext = "".join(Path(fname).suffixes)
if ext in writers:
writers[ext][0](fname, raw)
else:
raise ValueError(f"Unknown file type {ext}.")
|
<gh_stars>1-10
"""
This is an example settings/local.py file.
These settings overrides what's in settings/base.py
"""
import logging
# To extend any settings from settings/base.py here's an example:
#from . import base
#INSTALLED_APPS = base.INSTALLED_APPS + ['debug_toolbar']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db/development.sqlite3',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
#'OPTIONS': {
# 'init_command': 'SET storage_engine=InnoDB',
# 'charset' : 'utf8',
# 'use_unicode' : True,
#},
#'TEST_CHARSET': 'utf8',
#'TEST_COLLATION': 'utf8_general_ci',
},
# 'slave': {
# ...
# },
}
# Uncomment this and set to all slave DBs in use on the site.
# SLAVE_DATABASES = ['slave']
# Recipients of traceback emails and other notifications.
ADMINS = (
# ('<NAME>', '<EMAIL>'),
)
MANAGERS = ADMINS
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Debugging displays nice error messages, but leaks memory. Set this to False
# on all server instances and True only for development.
DEBUG = TEMPLATE_DEBUG = True
# Is this a development instance? Set this to True on development/master
# instances and False on stage/prod.
DEV = True
# Make this unique, and don't share it with anybody. It cannot be blank.
SECRET_KEY = '6fi+(7^_zc*ic#s6*eo21l)9jl(h443f9oym8wpqqcr1&)nrr('
# Uncomment these to activate and customize Celery:
# CELERY_ALWAYS_EAGER = False # required to activate celeryd
# BROKER_HOST = 'localhost'
# BROKER_PORT = 5672
# BROKER_USER = 'django'
# BROKER_PASSWORD = '<PASSWORD>'
# BROKER_VHOST = 'django'
# CELERY_RESULT_BACKEND = 'amqp'
## Log settings
LOG_LEVEL = logging.INFO
HAS_SYSLOG = True
SYSLOG_TAG = "http_app_inventory" # Make this unique to your project.
# Remove this configuration variable to use your custom logging configuration
LOGGING_CONFIG = None
LOGGING = {
'version': 1,
'loggers': {
'inventory': {
'level': "DEBUG"
}
}
}
# Common Event Format logging parameters
#CEF_PRODUCT = 'inventory'
#CEF_VENDOR = 'Your Company'
#CEF_VERSION = '0'
#CEF_DEVICE_VERSION = '0'
INTERNAL_IPS = ('127.0.0.1')
# Enable these options for memcached
#CACHE_BACKEND= "memcached://127.0.0.1:11211/"
#CACHE_MIDDLEWARE_ANONYMOUS_ONLY=True
# Set this to true if you use a proxy that sets X-Forwarded-Host
#USE_X_FORWARDED_HOST = False
# Email settings
# These are set so that you can test email sending
# by running a 'dummy' SMTP server by running:
# >> python -m smtpd -n -c DebuggingServer localhost:1025
# These settings should be changed in production
# CHANGEME
ACCOUNT_ACTIVATION_DAYS=7
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
EMAIL_HOST_USER = ""
EMAIL_HOST_PASSWORD = ""
EMAIL_USE_TLS = False
DEFAULT_FROM_EMAIL = '<EMAIL>'
|
<gh_stars>0
"""Core methods for find-data extraction/parsing."""
import os
import sys
import re
import json
from datetime import date, datetime
from normalization import author_role
from cons_platforms import PLATFORM_ALIASES
from region_tree import search_region_tree
from tag_match import tag_match, yolo_spl, _clean_entry, drop_none
from store import (ArticleInfo, GameInfoCore, GameInfoAuthor, GameInfoCompany,
GameInfoEngine, GameInfoRelease, GameInfoGenre, insert_game_info)
sys.path.append('../')
from gameworm import tty_colors
ASSERTIVE_SUBJECTS = ["video game", "video games", "vg", "cvg"]
CROSSMEDIA_SUBJECTS = ["media franchise", "animanga/header", "film", "television",
"toy", "character", "comics character", "game", "comic book title", "hollywood cartoon",
"comics character", "comics organization", "lego theme", "comics meta series",
"wrestling promotion"]
GAME_SERIES_SUBJECTS = ["video game series", "video games series", "vg series", "video game character"]
markup_rm = re.compile(",\s\[\[[0-9]{4}\sin\svideo\sgaming\||titlestyle")
markup_sep1 = re.compile("({{collapsible list\s?\|\s?(title=\s?[\w+\s,]+)?|\/|<[^<]+?>|{{\s?[A-Za-z]+\s?\||'''|''|\[\[|\]\]|}}|\\n|\s?=\s?([a-z\-]+:[a-z\s0-9]+(\;|\s\|))|{{\s?(vg|video game\s)release)", flags=re.I)
markup_sep2 = re.compile("\s?§\s?\|?§?\s?")
remove_date_commas = re.compile(
"(\s[0-9]{1,2})?(Jan(uary)?|Feb(ruary)?|Mar(ch)|Apr(il)|May|Jun(e)?|Jul(y)?|Aug(ust)?|Sep(tember)?|Oct(ober)?|Nov(ember)?|Dec(ember))(\s[0-9]{1,2})?(?P<comma>\s?\,)"
)
special_date_pattern = re.compile(
"((start|release)\s?date|dts|§)\|?([0-9]{4})\|([0-9]{1,2})\|([0-9]{1,2})")
special_date_pattern2 = re.compile(
"(mid|late|early|q1|q2|q3|q4|fall|sprint|summer|winter|holidays)(-|\s)[0-9]{4}"
)
_isnoise = re.compile("\s?[\-\|\\|\[|\{|\}|\$|\%|\?|\.|,|url]\s?")
class Platform:
def __init__(self, code, alias_used=None):
if code not in PLATFORM_ALIASES.keys():
code = find_plat_code(alias_used)
if not code:
raise ValueError("Not a known platform/platform-aliases: %s" % alias_used)
self.alias_used = alias_used
self.code = code
def __repr__(self):
return "<%s>" % self.code
class Region:
def __init__(self, vl):
self.vl = vl
def __repr__(self):
return "<%s>" % self.vl
def _is_region(inp):
out = search_region_tree(inp)
if out:
return Region(out)
return None
def _is_date(inp):
if inp in ["TBA"]:
return date(2099, 1, 1)
patterns = [
"%B %d %Y",
"%B %Y",
"%d %B %Y",
"%d %b %Y",
"%b %Y",
"%Y-%m-%d",
"%Y",
"%B %dth %Y",
]
for ptt in patterns:
try:
el = datetime.strptime(inp, ptt).date()
if el.year < 1970 or el.year > 2022:
raise ValueError("not a valid release year!")
return el
except ValueError:
continue
return None
def _repl_dt_comma(mo):
if mo.group("comma"):
span = mo.span()
return mo.string[span[0]:span[1] - 1]
def _repl_special_dt(mo):
span = mo.span()
parts = mo.string[span[0] + 1:span[1] - 1].split("|")
print(parts)
p3 = 1
try:
p3 = int(parts[3])
if p3 < 1 or p3 > 28:
raise ValueError("going with a safe day range")
except (ValueError, IndexError):
p3 = 1
pass
dt = date(int(parts[1]), int(parts[2]), p3)
return dt.strftime("|%B %d %Y")
def prenorm(inp):
inp = markup_rm.sub(' ', inp)
inp = markup_sep1.sub("§", inp)
inp = markup_sep2.sub("§", inp)
inp = markup_sep2.sub("§", inp)
inp = inp.strip("§")
inp = remove_date_commas.sub(_repl_dt_comma, inp)
inp = special_date_pattern.sub(_repl_special_dt, inp)
return inp
yf = re.compile("§|\||,|\sand\s|\(|\)\/")
def yank_forward(inp):
if not yf.match(inp[-1]):
inp += "§"
values = []
sk = 0
last_yank = 0
while sk < len(inp):
mt = yf.match(inp[sk:])
if mt:
values.append(inp[last_yank:sk])
last_yank = sk + 1
sk += 1
return values
def find_plat_code(alias):
for code, aliases in PLATFORM_ALIASES.items():
if alias == code:
return code
for al in aliases:
if alias == al:
return code
return None
def is_plat(text):
text = text.strip()
code = find_plat_code(text)
if code:
return Platform(code, text)
return None
def outer_peel(raw_content):
wpi = None
if isinstance(raw_content, dict):
pass
else:
raw_content = json.loads(raw_content)
for lvl in ["query", "pages", "$PID", "revisions", 0, "*"]:
if lvl == "$PID":
nk = len(raw_content)
for k in raw_content.keys():
wpi = k
lvl = k
if nk > 1:
if "game" in raw_content[k].get("title", "blank"):
break
else:
break
if isinstance(lvl, str) and (lvl not in raw_content):
print(tty_colors.danger(lvl, " not in content dict"))
return None, False
raw_content = raw_content[lvl]
return wpi, raw_content
def get_infobox(rev_content):
si, sj = None, None
for x in re.finditer("\{\{\s?Infobox", rev_content, flags=re.I):
si, sj = x.span()
break
if si is None:
print(tty_colors.danger("Infobox not found!"))
return None, False
slic = rev_content[si:]
ib_subject = rev_content[sj + 1:].split("|", 1)[0]
ib_subject = markup_sep1.sub("", ib_subject).strip()
ib_content = tag_match(slic)
return ib_subject, ib_content
def inner_peel(rev):
field_hits = 0
expected_values = {
"title": None,
"image": None,
"caption": None,
"developer": None,
"publisher": None,
"designer": None,
"composer": None,
"engine": None,
"released": None,
"release": None,
"genre": None,
"modes": None,
"series": None,
"director": None,
"producer": None,
"programmer": None,
"artist": None,
"writer": None,
"platforms": None,
"creator": None,
"first release version": None,
"first release date": None,
"latest release version": None,
"latest release date": None,
"platform of origin": None,
"year of inception": None,
"spinoffs": None,
"first release": None,
}
expected_info_fields = expected_values.keys()
info_spl = yolo_spl(rev, "\n|")
for chunk in info_spl[1:]:
if "=" not in chunk:
continue
field, value = chunk.split("=", 1)
field = _clean_entry(field).lower()
if field in expected_info_fields:
if field == "release":
field = "released"
expected_values[field] = value.strip()
if len(expected_values[field]) > 0:
field_hits += 1
else:
print("unexpected field: ", field, "with value: ", value)
print("info hits: ", field_hits)
return drop_none(expected_values)
def _generic_list_extraction(val):
val = prenorm(val)
val = re.sub('<[^<]+?>|\[|\]|http://.+|\(.+\)', '', val)
return re.split("§|\||,|;", val)
def author_extraction(wpi, val, role):
val2 = re.sub("<[^<]+?>|http://.+?(\||\])|\[|\]|\(.+\)", "§", val)
val2 = set([x.strip().title() for x in re.split(",|§|;|\\n", val2)])
out = []
for val in val2:
if len(val) > 5 and not re.findall("[^\w\s\.\']", val, flags=re.I):
if len(val) > 128:
import pdb; pdb.set_trace()
out.append(GameInfoAuthor(wpi, val, role))
return out
def engine_extraction(wpi, val):
values = list(set(re.findall("\[\[.+?\]\]", val)))
return [GameInfoEngine(wpi, vv) for vv in values]
def mode_extraction(wpi, val):
return re.findall('\[\[.+?\]\]', val)
def platform_extraction(wpi, val):
values = list(set(re.findall("\[\[.+?\]\]", val)))
out = []
for vv in values:
el = is_plat(vv)
if el:
out.append(el)
return out
def genre_extraction(wpi, val):
values = list(set(re.findall("\[\[.+?\]\]", val)))
return [GameInfoGenre(wpi, vv) for vv in values]
def company_extraction(wpi, val, role):
values = list(set(re.findall("\[\[.+?\]\]", val)))
return [GameInfoCompany(wpi, re.sub("\#.+?\]", "", vv), role) for vv in values]
def id_sequence(sequence):
prev = None
out = []
for x in sequence:
if prev == x.__class__:
continue
out.append(x.__class__)
prev = x.__class__
if len(out) == 1 and out[0] is date:
return "SIMPLE_DATE"
elif len(out) > 2:
if (out[0] is Platform and out[1] is Region and out[2] is date):
return "P-R-D"
elif (out[0] is Region and out[1] is date and out[2] is Platform):
return "R-D-P"
elif (out[0] is Region and out[1] is date and out[2] is not Platform):
return "R-D"
elif (out[0] is date and out[1] is Platform and out[2] is not Region):
return "D-P"
elif (out[0] is Platform and out[1] is Platform and out[2] is not Region):
return "D-P"
if len(out) >= 2 and (out[0] is Platform and out[1] is date):
return "P-D"
if len(out) >= 2 and (out[0] is Region and out[1] is date):
return "R-D"
return "UNK %d - %s" % (len(out), out)
def build_p_r_d(sequence):
from copy import copy
prev_plat = None
out = []
closed = False
curr_sequence = []
for idx, si in enumerate(sequence):
if isinstance(si, Platform):
if prev_plat == si.code:
continue
prev_plat = si.code
if closed:
out += copy(curr_sequence)
curr_sequence = []
closed = False
curr_sequence.append(GameInfoRelease(si))
if isinstance(si, Region):
ow = False
for ri in curr_sequence:
if ri.region is None:
ri.region = si
ow = True
if not ow:
ext = []
for rl in curr_sequence:
ext.append(GameInfoRelease(rl.platform, si))
curr_sequence += ext
if isinstance(si, date):
for ri in curr_sequence:
if ri.rdate is None:
ri.rdate = si
closed = True
return out
def build_p_d(sequence):
closed = False
out = []
for item in sequence:
if isinstance(item, Platform):
closed = False
out.append(GameInfoRelease(item))
if isinstance(item, date):
closed = True
for o in out:
if not o.rdate:
o.rdate = item
return out
def release_extraction(wpi, raw_val, platforms_fallback=[]):
sequence = []
inp = prenorm(raw_val)
if len(inp) <= 1:
return []
vls = yank_forward(inp)
for vv in vls:
rg = pl = dt = None
noise = bool(_isnoise.match(vv))
if noise:
continue
rg = _is_region(vv)
if rg:
sequence.append(rg)
continue
pl = is_plat(vv)
if pl:
sequence.append(pl)
continue
dt = _is_date(vv)
if dt:
sequence.append(dt)
# if noise:
# out = tty_colors.warning("NOISE", vv)
# elif dt or rg or pl:
# out = tty_colors.success(vv, dt, rg, pl)
# else:
# out = tty_colors.danger("UNRECOGNIZED", vv, dt, rg, pl)
# print(out)
sq_type = id_sequence(sequence)
if sq_type == "R-D" and platforms_fallback:
sequence = platforms_fallback + sequence
sq_type = "P-R-D"
if sq_type == "SIMPLE_DATE" and platforms_fallback:
sequence = platforms_fallback + sequence
sq_type = "P-D"
if sq_type == "P-R-D":
return build_p_r_d(sequence)
if sq_type == "P-D":
return build_p_d(sequence)
else:
if (not sequence or sequence[0] is None) and platforms_fallback:
return release_last_effort(raw_val, platforms_fallback)
print(sq_type)
print(sq_type)
print(sq_type)
return release_last_effort(raw_val, platforms_fallback)
return []
def release_last_effort(val, platforms):
gr = None
val = prenorm(val)
dt = re.findall("[0-9]{4}", val)
if not dt:
return []
dt = _is_date(dt[0])
if not platforms:
platforms = [Platform("FIXME")]
gr = GameInfoRelease(platforms[0], rdate=dt)
rg = re.findall("[A-Z]{2,3}", val)
if rg:
rg = _is_region(rg[0])
gr.region = rg
return [gr]
def _assertive_proc(src_title, wpi, ib_subject, inp):
info_kv = inner_peel(inp)
final_title, img, img_caption = None, None, None
authors, game_releases, companies, engines, platforms, modes, genres = [], [], [], [], [], [], []
if "platforms" in info_kv.keys():
platforms = platform_extraction(wpi, info_kv["platforms"])
for k, val in info_kv.items():
if not val or len(val) == 1:
print(tty_colors.warning("blank val for key %s" % k))
continue
if k == "title":
final_title = re.sub("<[^<]+?>|http://.+?(\||\])|\[|\]|\(.+\)|\{\{.+?\}\}", "", val)
if k == "image":
img = re.sub(r"File:|\[|\]|alt\=.+|Image:|\|.+", "", val)
if k == "caption":
img_caption = re.sub("<[^<]+?>|http://.+?(\||\])|\[|\]|\(.+\)|\{\{.+?\}\}", "", val)
auth_role = author_role(k)
if auth_role[0]:
authors += author_extraction(wpi, val, auth_role[2])
if k in ["released", "release", "first release version", "first release date", "latest release version",
"latest release date", "year of inception", "first release"]:
game_releases += release_extraction(wpi, val, platforms)
if k in ["developer", "publisher"]:
companies += company_extraction(wpi, val, k)
if k in ["engine"]:
engines += engine_extraction(wpi, val)
if k in ["modes"]:
modes += mode_extraction(wpi, val)
if k in ["platform of origin"]:
platforms += platform_extraction(wpi, info_kv["platforms"])
if k in ["genre"]:
genres += genre_extraction(wpi, val)
if not final_title:
final_title = src_title
a_info = ArticleInfo(src_title, final_title, wpi, ib_subject, "MISSING-TODO", None, None)
g_core = GameInfoCore(wpi, True, img, img_caption, genres, modes)
t1 = datetime.now()
insert_game_info( a_info, g_core, platforms=platforms, authors=authors, companies=companies, engines=engines, releases=game_releases)
print(final_title, tty_colors.success((datetime.now() - t1).total_seconds()))
return "SS"
def macro(conn, ent, did_redir=None):
ent = re.sub("\[\[|\]\]", '', ent).strip()
try:
resp_ct = conn.fetch(ent)
except Exception as e:
return "E0"
wpi, rev = outer_peel(resp_ct)
if not wpi or not rev:
return "E1"
should_redir = re.search("\#redirect", rev, flags=re.I)
if should_redir:
redir_to = re.findall("\[\[.+?\]\]", rev, flags=re.I)
if redir_to:
redir_to = redir_to[0].strip("[").strip("]")
if redir_to and ent:
return "E1"
print(tty_colors.success("REDIR FROM: %s TO: %s." % (ent, redir_to)))
return macro(conn, redir_to, did_redir=True)
ib_subject, inp = get_infobox(rev)
if not ib_subject or not inp:
return "E2"
ib_subject = ib_subject.lower()
assertive = ib_subject in ASSERTIVE_SUBJECTS
if assertive:
return _assertive_proc(ent, wpi, ib_subject, inp)
game_series = ib_subject in GAME_SERIES_SUBJECTS
if game_series:
return "VGS"
cross = ib_subject in CROSSMEDIA_SUBJECTS
if cross:
print("TODO-CROSSMEDIA CONFIRMATION")
return "CM"
# skip_msg = tty_colors.danger("Skipping. Src Title: %s Subject: %s" % (ent, ib_subject))
# print(skip_msg)
return "E3"
if __name__ == "__main__":
stats = {}
ecount = 0
raws_path = "/home/joao/projetos/gameworm/.data/wiki/raws/"
for ent in os.listdir(raws_path):
ecount += 1
fpath = os.path.join(raws_path, ent)
fh = open(fpath, "r")
ct = fh.read()
fh.close()
t1 = datetime.now()
out = macro(ent, ct)
if out not in stats:
stats.update({out: 1})
else:
stats[out] += 1
print((datetime.now() - t1).total_seconds())
print(ecount)
print(stats)
|
<reponame>busyyang/torch_ecg
"""
"""
import os, sys, re, logging
import time, datetime
from functools import reduce
from copy import deepcopy
from itertools import repeat
from numbers import Real, Number
from typing import Union, Optional, List, Tuple, Dict, Sequence, NoReturn
import numpy as np
import pandas as pd
__all__ = [
"dict_to_str",
"str2bool",
"get_date_str",
"mask_to_intervals",
"list_sum",
"gen_gaussian_noise", "gen_sinusoidal_noise", "gen_baseline_wander",
"get_record_list_recursive3",
"init_logger",
]
def dict_to_str(d:Union[dict, list, tuple], current_depth:int=1, indent_spaces:int=4) -> str:
""" finished, checked,
convert a (possibly) nested dict into a `str` of json-like formatted form,
this nested dict might also contain lists or tuples of dict (and of str, int, etc.)
Parameters:
-----------
d: dict, or list, or tuple,
a (possibly) nested `dict`, or a list of `dict`
current_depth: int, default 1,
depth of `d` in the (possible) parent `dict` or `list`
indent_spaces: int, default 4,
the indent spaces of each depth
Returns:
--------
s: str,
the formatted string
"""
assert isinstance(d, (dict, list, tuple))
if len(d) == 0:
s = f"{{}}" if isinstance(d, dict) else f"[]"
return s
# flat_types = (Number, bool, str,)
flat_types = (Number, bool,)
flat_sep = ", "
s = "\n"
unit_indent = " "*indent_spaces
prefix = unit_indent*current_depth
if isinstance(d, (list, tuple)):
if all([isinstance(v, flat_types) for v in d]):
len_per_line = 110
current_len = len(prefix) + 1 # + 1 for a comma
val = []
for idx, v in enumerate(d):
add_v = f"\042{v}\042" if isinstance(v, str) else str(v)
add_len = len(add_v) + len(flat_sep)
if current_len + add_len > len_per_line:
val = ", ".join([item for item in val])
s += f"{prefix}{val},\n"
val = [add_v]
current_len = len(prefix) + 1 + len(add_v)
else:
val.append(add_v)
current_len += add_len
if len(val) > 0:
val = ", ".join([item for item in val])
s += f"{prefix}{val}\n"
else:
for v in d:
if isinstance(v, (dict, list, tuple)):
s += f"{prefix}{dict_to_str(v, current_depth+1)}\n"
else:
val = f"\042{v}\042" if isinstance(v, str) else v
s += f"{prefix}{val}\n"
elif isinstance(d, dict):
for k, v in d.items():
key = f"\042{k}\042" if isinstance(k, str) else k
if isinstance(v, (dict, list, tuple)):
s += f"{prefix}{key}: {dict_to_str(v, current_depth+1)}\n"
else:
val = f"\042{v}\042" if isinstance(v, str) else v
s += f"{prefix}{key}: {val}\n"
s += unit_indent*(current_depth-1)
s = f"{{{s}}}" if isinstance(d, dict) else f"[{s}]"
return s
def str2bool(v:Union[str, bool]) -> bool:
""" finished, checked,
converts a "boolean" value possibly in the format of str to bool
Parameters:
-----------
v: str or bool,
the "boolean" value
Returns:
--------
b: bool,
`v` in the format of bool
References:
-----------
https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
"""
if isinstance(v, bool):
b = v
elif v.lower() in ("yes", "true", "t", "y", "1"):
b = True
elif v.lower() in ("no", "false", "f", "n", "0"):
b = False
else:
raise ValueError("Boolean value expected.")
return b
def get_date_str(fmt:Optional[str]=None):
"""
"""
now = datetime.datetime.now()
_fmt = fmt or "%Y-%m-%d-%H-%M-%S"
ds = now.strftime(_fmt)
return ds
def mask_to_intervals(mask:np.ndarray, vals:Optional[Union[int,Sequence[int]]]=None) -> Union[list, dict]:
""" finished, checked,
Parameters:
-----------
mask: ndarray,
1d mask
vals: int or sequence of int, optional,
values in `mask` to obtain intervals
Returns:
--------
intervals: dict or list,
the intervals corr. to each value in `vals` if `vals` is `None` or `Sequence`;
or the intervals corr. to `vals` if `vals` is int.
each interval is of the form `[a,b]`, left inclusive, right exclusive
"""
if vals is None:
_vals = list(set(mask))
elif isinstance(vals, int):
_vals = [vals]
else:
_vals = vals
# assert set(_vals) & set(mask) == set(_vals)
intervals = {v:[] for v in _vals}
for v in _vals:
valid_inds = np.where(np.array(mask)==v)[0]
if len(valid_inds) == 0:
continue
split_indices = np.where(np.diff(valid_inds)>1)[0]
split_indices = split_indices.tolist() + (split_indices+1).tolist()
split_indices = sorted([0] + split_indices + [len(valid_inds)-1])
for idx in range(len(split_indices)//2):
intervals[v].append(
[valid_inds[split_indices[2*idx]], valid_inds[split_indices[2*idx+1]]+1]
)
if isinstance(vals, int):
intervals = intervals[vals]
return intervals
def list_sum(l:Sequence[list]) -> list:
""" finished, checked,
"""
return reduce(lambda a,b: a+b, l, [])
def gen_gaussian_noise(siglen:int, mean:Real=0, std:Real=0) -> np.ndarray:
""" finished, checked,
generate 1d Gaussian noise of given length, mean, and standard deviation
Parameters:
-----------
siglen: int,
length of the noise signal
mean: real number, default 0,
mean of the noise
std: real number, default 0,
standard deviation of the noise
Returns:
--------
gn: ndarray,
the gaussian noise of given length, mean, and standard deviation
"""
gn = np.random.normal(mean, std, siglen)
return gn
def gen_sinusoidal_noise(siglen:int, start_phase:Real, end_phase:Real, amplitude:Real, amplitude_mean:Real=0, amplitude_std:Real=0) -> np.ndarray:
""" finished, checked,
generate 1d sinusoidal noise of given length, amplitude, start phase, and end phase
Parameters:
-----------
siglen: int,
length of the (noise) signal
start_phase: real number,
start phase, with units in degrees
end_phase: real number,
end phase, with units in degrees
amplitude: real number,
amplitude of the sinusoidal curve
amplitude_mean: real number,
mean amplitude of an extra Gaussian noise
amplitude_std: real number, default 0,
standard deviation of an extra Gaussian noise
Returns:
--------
sn: ndarray,
the sinusoidal noise of given length, amplitude, start phase, and end phase
"""
sn = np.linspace(start_phase, end_phase, siglen)
sn = amplitude * np.sin(np.pi * sn / 180)
sn += gen_gaussian_noise(siglen, amplitude_mean, amplitude_std)
return sn
def gen_baseline_wander(siglen:int, fs:Real, bw_fs:Union[Real,Sequence[Real]], amplitude:Union[Real,Sequence[Real]], amplitude_mean:Real=0, amplitude_std:Real=0) -> np.ndarray:
""" finished, checked,
generate 1d baseline wander of given length, amplitude, and frequency
Parameters:
-----------
siglen: int,
length of the (noise) signal
fs: real number,
sampling frequency of the original signal
bw_fs: real number, or list of real numbers,
frequency (frequencies) of the baseline wander
amplitude: real number, or list of real numbers,
amplitude of the baseline wander (corr. to each frequency band)
amplitude_mean: real number, default 0,
mean amplitude of an extra Gaussian noise
amplitude_std: real number, default 0,
standard deviation of an extra Gaussian noise
Returns:
--------
bw: ndarray,
the baseline wander of given length, amplitude, frequency
Example:
--------
>>> gen_baseline_wander(4000, 400, [0.4,0.1,0.05], [0.1,0.2,0.4])
"""
bw = gen_gaussian_noise(siglen, amplitude_mean, amplitude_std)
if isinstance(bw_fs, Real):
_bw_fs = [bw_fs]
else:
_bw_fs = bw_fs
if isinstance(amplitude, Real):
_amplitude = list(repeat(amplitude, len(_bw_fs)))
else:
_amplitude = amplitude
assert len(_bw_fs) == len(_amplitude)
duration = (siglen / fs)
for bf, a in zip(_bw_fs, _amplitude):
start_phase = np.random.randint(0,360)
end_phase = duration * bf * 360 + start_phase
bw += gen_sinusoidal_noise(siglen, start_phase, end_phase, a, 0, 0)
return bw
def get_record_list_recursive3(db_dir:str, rec_patterns:Union[str,Dict[str,str]]) -> Union[List[str], Dict[str, List[str]]]:
""" finished, checked,
get the list of records in `db_dir` recursively,
for example, there are two folders "patient1", "patient2" in `db_dir`,
and there are records "A0001", "A0002", ... in "patient1"; "B0001", "B0002", ... in "patient2",
then the output would be "patient1{sep}A0001", ..., "patient2{sep}B0001", ...,
sep is determined by the system
Parameters:
-----------
db_dir: str,
the parent (root) path of the whole database
rec_patterns: str or dict,
pattern of the record filenames, e.g. "A(?:\d+).mat",
or patterns of several subsets, e.g. `{"A": "A(?:\d+).mat"}`
Returns:
--------
res: list of str,
list of records, in lexicographical order
"""
if isinstance(rec_patterns, str):
res = []
elif isinstance(rec_patterns, dict):
res = {k:[] for k in rec_patterns.keys()}
db_dir = os.path.join(db_dir, "tmp").replace("tmp", "") # make sure `db_dir` ends with a sep
roots = [db_dir]
while len(roots) > 0:
new_roots = []
for r in roots:
tmp = [os.path.join(r, item) for item in os.listdir(r)]
# res += [item for item in tmp if os.path.isfile(item)]
if isinstance(rec_patterns, str):
res += list(filter(re.compile(rec_patterns).search, tmp))
elif isinstance(rec_patterns, dict):
for k in rec_patterns.keys():
res[k] += list(filter(re.compile(rec_patterns[k]).search, tmp))
new_roots += [item for item in tmp if os.path.isdir(item)]
roots = deepcopy(new_roots)
if isinstance(rec_patterns, str):
res = [os.path.splitext(item)[0].replace(db_dir, "") for item in res]
res = sorted(res)
elif isinstance(rec_patterns, dict):
for k in rec_patterns.keys():
res[k] = [os.path.splitext(item)[0].replace(db_dir, "") for item in res[k]]
res[k] = sorted(res[k])
return res
def init_logger(log_dir:str, log_file:Optional[str]=None, mode:str="a", verbose:int=0) -> logging.Logger:
""" finished, checked,
Parameters:
-----------
log_dir: str,
directory of the log file
log_file: str, optional,
name of the log file
mode: str, default "a",
mode of writing the log file, can be one of "a", "w"
verbose: int, default 0,
log verbosity
Returns:
--------
logger: Logger
"""
if log_file is None:
log_file = f"log_{get_date_str()}.txt"
if not os.path.exists(log_dir):
os.makedirs(log_dir)
log_file = os.path.join(log_dir, log_file)
print(f"log file path: {log_file}")
logger = logging.getLogger("ECG-UNET")
c_handler = logging.StreamHandler(sys.stdout)
f_handler = logging.FileHandler(log_file)
if verbose >= 2:
print("levels of c_handler and f_handler are set DEBUG")
c_handler.setLevel(logging.DEBUG)
f_handler.setLevel(logging.DEBUG)
logger.setLevel(logging.DEBUG)
elif verbose >= 1:
print("level of c_handler is set INFO, level of f_handler is set DEBUG")
c_handler.setLevel(logging.INFO)
f_handler.setLevel(logging.DEBUG)
logger.setLevel(logging.DEBUG)
else:
print("levels of c_handler and f_handler are set WARNING")
c_handler.setLevel(logging.WARNING)
f_handler.setLevel(logging.WARNING)
logger.setLevel(logging.WARNING)
c_format = logging.Formatter("%(name)s - %(levelname)s - %(message)s")
f_format = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
c_handler.setFormatter(c_format)
f_handler.setFormatter(f_format)
logger.addHandler(c_handler)
logger.addHandler(f_handler)
return logger
def _remove_spikes_naive(sig:np.ndarray) -> np.ndarray:
""" finished, checked,
remove `spikes` from `sig` using a naive method proposed in entry 0416 of CPSC2019
`spikes` here refers to abrupt large bumps with (abs) value larger than 20 mV,
do NOT confuse with `spikes` in paced rhythm
Parameters:
-----------
sig: ndarray,
single-lead ECG signal with potential spikes
Returns:
--------
filtered_sig: ndarray,
ECG signal with `spikes` removed
"""
b = list(filter(lambda k: k > 0, np.argwhere(np.abs(sig)>20).squeeze(-1)))
filtered_sig = sig.copy()
for k in b:
filtered_sig[k] = filtered_sig[k-1]
return filtered_sig
|
#!/usr/bin/env python
import os.path
import numpy as np
from gmprocess.io.knet.core import is_knet, read_knet
import pkg_resources
from gmprocess.utils.test_utils import read_data_dir
def test():
dpath = os.path.join('data', 'testdata', 'knet', 'us2000cnnl')
datadir = pkg_resources.resource_filename('gmprocess', dpath)
knet_file1 = os.path.join(datadir, 'AOM0051801241951.EW')
knet_file2 = os.path.join(datadir, 'AOM0051801241951.NS')
knet_file3 = os.path.join(datadir, 'AOM0051801241951.UD')
assert is_knet(knet_file1)
assert is_knet(os.path.abspath(__file__)) is False
# test a knet file with npoints % 10 == 0
stream1 = read_knet(knet_file1)[0]
stream2 = read_knet(knet_file2)[0]
stream3 = read_knet(knet_file3)[0]
np.testing.assert_almost_equal(stream1[0].max(), -37.149, decimal=2)
np.testing.assert_almost_equal(stream2[0].max(), 32.859, decimal=2)
np.testing.assert_almost_equal(stream3[0].max(), 49.000, decimal=2)
# test a file that has a number of points divisible by 8
knet_file4 = os.path.join(datadir, 'AOM0011801241951.EW')
knet_file5 = os.path.join(datadir, 'AOM0011801241951.NS')
knet_file6 = os.path.join(datadir, 'AOM0011801241951.UD')
stream4 = read_knet(knet_file4)[0]
stream5 = read_knet(knet_file5)[0]
stream6 = read_knet(knet_file6)[0]
np.testing.assert_almost_equal(stream4[0].max(), -11.435, decimal=2)
np.testing.assert_almost_equal(stream5[0].max(), 12.412, decimal=2)
np.testing.assert_almost_equal(stream6[0].max(), -9.284, decimal=2)
# test that a file that is not knet format raises an Exception
try:
knet_files, _ = read_data_dir(
'geonet',
'nz2018p115908',
'20161113_110256_WTMC_20.V1A')
knet_file = knet_files[0]
read_knet(knet_file)[0]
success = True
except Exception:
success = False
assert not success
# test some kiknet files
dpath = os.path.join('data', 'testdata', 'kiknet', 'usp000a1b0')
datadir = pkg_resources.resource_filename('gmprocess', dpath)
kiknet_file1 = os.path.join(datadir, 'AICH040010061330.EW2')
kiknet_file2 = os.path.join(datadir, 'AICH040010061330.NS2')
kiknet_file3 = os.path.join(datadir, 'AICH040010061330.UD2')
assert is_knet(knet_file1)
stream1 = read_knet(kiknet_file1)[0] # east-west
stream2 = read_knet(kiknet_file2)[0] # north-south
stream3 = read_knet(kiknet_file3)[0] # vertical
assert stream1[0].stats['channel'] == 'HN2'
assert stream2[0].stats['channel'] == 'HN1'
assert stream3[0].stats['channel'] == 'HNZ'
ewmax = np.abs(stream1[0].data).max()
nsmax = np.abs(stream2[0].data).max()
udmax = np.abs(stream3[0].data).max()
np.testing.assert_almost_equal(ewmax, 5.020, decimal=1)
np.testing.assert_almost_equal(nsmax, 10.749, decimal=1)
np.testing.assert_almost_equal(udmax, 9.111, decimal=1)
if __name__ == '__main__':
os.environ['CALLED_FROM_PYTEST'] = 'True'
test()
|
# coding=UTF-8
from .forms import Form, FormPlGen, FormSg
from .attributes import Gender
from .xml_helpers import formsg_node, formpl_node, formplgen_node, write_sg, write_pl, write_pl_gen
from typing import List
import xml.etree.ElementTree as ET
class Noun:
def __str__(self) -> str:
return self._gramadan_string()
def _gramadan_string(self) -> str:
snom = 'sgNom: [' + '] ['.join([f.value for f in self.sg_nom]) + '] \n'
sgen = 'sgGen: [' + '] ['.join([f.value for f in self.sg_gen]) + '] \n'
svoc = 'sgVoc: [' + '] ['.join([f.value for f in self.sg_voc]) + '] \n'
sdat = 'sgDat: [' + '] ['.join([f.value for f in self.sg_dat]) + '] \n'
pnom = 'plNom: [' + '] ['.join([f.value for f in self.pl_nom]) + '] \n'
pgen = 'plGen: [' + '] ['.join([f.value for f in self.pl_gen]) + '] \n'
pvoc = 'plVoc: [' + '] ['.join([f.value for f in self.pl_voc]) + '] \n'
return snom + sgen + svoc + sdat + pnom + pgen + pvoc
def __init__(self,
source = None,
definite: bool = False,
proper: bool = False,
immutable: bool = False,
article_genitive: bool = False,
disambig: str = "",
declension: int = 0,
sg_nom: List[FormSg] = None,
sg_gen: List[FormSg] = None,
sg_voc: List[FormSg] = None,
sg_dat: List[FormSg] = None,
pl_nom: List[Form] = None,
pl_gen: List[FormPlGen] = None,
pl_voc: List[Form] = None,
count: List[Form] = None) -> None:
self.is_definite: bool = definite
self.is_proper: bool = proper
self.is_immutable: bool = immutable
self.article_genitive: bool = article_genitive
# Keep track of generated "dative"
self.artificial_dative = True
self.disambig: str = disambig
self.declension: int = declension
self.sg_nom: list[FormSg] = sg_nom
self.sg_gen: list[FormSg] = sg_gen
self.sg_voc: list[FormSg] = sg_voc
self.sg_dat: list[FormSg] = sg_dat
self.pl_nom: list[Form] = pl_nom
self.pl_gen: list[FormPlGen] = pl_gen
self.pl_voc: list[Form] = pl_voc
self.count: list[Form] = count
if self.sg_nom is None:
self.sg_nom = []
if self.sg_gen is None:
self.sg_gen = []
if self.sg_voc is None:
self.sg_voc = []
if self.sg_dat is None:
self.sg_dat = []
if self.pl_nom is None:
self.pl_nom = []
if self.pl_gen is None:
self.pl_gen = []
if self.pl_voc is None:
self.pl_voc = []
if self.count is None:
self.count = []
if source is not None:
self._empty()
self.from_xml(source)
self.add_dative()
def _empty(self):
"""Clear the current contents"""
self.is_definite = False
self.is_proper = False
self.is_immutable = False
self.article_genitive = False
self.disambig = ""
self.declension = 0
self.sg_nom = []
self.sg_gen = []
self.sg_voc = []
self.sg_dat = []
self.pl_nom = []
self.pl_gen = []
self.pl_voc = []
self.count = []
def get_lemma(self) -> str:
lemma_form = self.sg_nom[0]
if lemma_form:
return lemma_form.value
else:
return ""
def get_identifier(self) -> str:
"""
Get an identifier for this noun
Note: called getNickname() in Gramadán
"""
gender = "fem" if self.get_gender() == Gender.Fem else "masc"
disambig = ""
if self.disambig != "":
disambig = "_" + self.disambig
outlem = self.get_lemma().replace(" ", "_")
return f'{outlem}_{gender}_{self.declension}{disambig}'
def get_gender(self) -> Gender:
return self.sg_nom[0].gender
def add_dative(self) -> None:
if len(self.sg_dat) == 0:
for form in self.sg_nom:
self.sg_dat.append(FormSg(form.value, form.gender))
self.artificial_dative = True
def to_xml(self):
props = {}
props['default'] = self.get_lemma()
props['declension'] = str(self.declension)
props['disambig'] = self.disambig
props['isProper'] = '1' if self.is_proper else '0'
props['isDefinite'] = '1' if self.is_definite else '0'
props['isImmutable'] = '1' if self.is_immutable else '0'
props['allowArticledGenitive'] = '1' if self.article_genitive else '0'
root = ET.Element('noun', props)
write_sg(self.sg_nom, 'sgNom', root)
write_sg(self.sg_gen, 'sgGen', root)
if not self.artificial_dative:
write_sg(self.sg_dat, 'sgDat', root)
write_sg(self.sg_voc, 'sgVoc', root)
write_pl(self.pl_nom, 'plNom', root)
write_pl_gen(self.pl_gen, 'plGen', root)
write_pl(self.pl_voc, 'plVoc', root)
write_pl(self.count, 'count', root)
return ET.tostring(root, encoding='UTF-8')
def from_xml(self, source) -> None:
"""
Initialise from XML in BuNaMo format:
>>> from pygramadan.noun import Noun
>>> import io
>>> xml = \"\"\"<noun default="ainm" declension="4" disambig="" isProper="0" isDefinite="0" allowArticledGenitive="0">
... <sgNom default="ainm" gender="masc" />
... <sgGen default="ainm" gender="masc" />
... <plNom default="ainmneacha" />
... <plGen default="ainmneacha" strength="strong" />
... </noun>\"\"\"
>>> sio = io.StringIO(xml)
>>> ainm = Noun(source=sio)
"""
tree = ET.parse(source)
root = tree.getroot()
self.is_definite = True if root.attrib['isDefinite'] == '1' else False
self.is_proper = True if root.attrib['isProper'] == '1' else False
if 'isImmutable' in root.attrib and root.attrib['isImmutable'] == '1':
self.is_immutable = True
else:
self.is_immutable = False
if 'allowArticledGenitive' in root.attrib and root.attrib['allowArticledGenitive'] == '1':
self.article_genitive = True
else:
self.article_genitive = False
self.disambig = root.attrib['disambig']
self.declension = int(root.attrib['declension'])
formsg_node(root, './sgNom', self.sg_nom)
formsg_node(root, './sgGen', self.sg_gen)
formsg_node(root, './sgVoc', self.sg_voc)
formsg_node(root, './sgDat', self.sg_dat)
if len(self.sg_dat) != 0:
self.artificial_dative = False
formpl_node(root, './plNom', self.pl_nom)
formplgen_node(root, './plGen', self.pl_gen)
formpl_node(root, './plVoc', self.pl_voc)
formpl_node(root, './count', self.count)
def get_all_forms(self, fake_dative = False):
"""
Returns a list of tuples, `(part-of-speech, form)`:
>>> ainm.get_all_forms()
[('sg_nom', 'ainm'), ('pl_gen', 'ainmneacha'), ('sg_gen', 'ainm'), ('pl_nom', 'ainmneacha')]
If `fake_dative` is false, generated "dative" (usually nominative) forms are omitted
"""
forms = set()
for nom_sg in self.sg_nom:
tpl = ('sg_nom', nom_sg.value)
forms.add(tpl)
for gen_sg in self.sg_gen:
tpl = ('sg_gen', gen_sg.value)
forms.add(tpl)
for voc_sg in self.sg_voc:
tpl = ('sg_voc', voc_sg.value)
forms.add(tpl)
for dat_sg in self.sg_dat:
if not self.artificial_dative or fake_dative:
tpl = ('sg_dat', dat_sg.value)
forms.add(tpl)
for nom_pl in self.pl_nom:
tpl = ('pl_nom', nom_pl.value)
forms.add(tpl)
for gen_pl in self.pl_gen:
tpl = ('pl_gen', gen_pl.value)
forms.add(tpl)
for voc_pl in self.pl_voc:
tpl = ('pl_voc', voc_pl.value)
forms.add(tpl)
for count in self.count:
tpl = ('count', count.value)
forms.add(tpl)
return list(forms)
def get_unique_forms(self):
"""
Returns a list of unique word forms:
>>> ainm.get_unique_forms()
['ainm', 'ainmneacha']
"""
return list(set([a[1] for a in self.get_all_forms()]))
|
<gh_stars>10-100
##############################################################################
# Copyright (c) 2017 Ericsson AB and others.
# Author: <NAME> (<EMAIL>)
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
from abc import abstractmethod
import os
from opnfv.utils import opnfv_logger as logger
from opnfv.utils import ssh_utils
logger = logger.Logger(__name__).getLogger()
class Deployment(object):
def __init__(self,
installer,
installer_ip,
scenario,
pod,
status,
openstack_version,
sdn_controller,
nodes=None):
self.deployment_info = {
'installer': installer,
'installer_ip': installer_ip,
'scenario': scenario,
'pod': pod,
'status': status,
'openstack_version': openstack_version,
'sdn_controller': sdn_controller,
'nodes': nodes
}
def _get_openstack_release(self):
'''
Translates an openstack version into the release name
'''
os_versions = {
'12': 'Liberty',
'13': 'Mitaka',
'14': 'Newton',
'15': 'Ocata',
'16': 'Pike',
'17': 'Queens'
}
try:
version = self.deployment_info['openstack_version'].split('.')[0]
name = os_versions[version]
return name
except Exception:
return 'Unknown release'
def get_dict(self):
'''
Returns a dictionary will all the attributes
'''
return self.deployment_info
def __str__(self):
'''
Override of the str method
'''
s = '''
INSTALLER: {installer}
SCENARIO: {scenario}
INSTALLER IP: {installer_ip}
POD: {pod}
STATUS: {status}
OPENSTACK: {openstack_version} ({openstack_release})
SDN: {sdn_controller}
NODES:
'''.format(installer=self.deployment_info['installer'],
scenario=self.deployment_info['scenario'],
installer_ip=self.deployment_info['installer_ip'],
pod=self.deployment_info['pod'],
status=self.deployment_info['status'],
openstack_version=self.deployment_info[
'openstack_version'],
openstack_release=self._get_openstack_release(),
sdn_controller=self.deployment_info['sdn_controller'])
for node in self.deployment_info['nodes']:
s += '{node_object}\n'.format(node_object=node)
return s
class Role():
INSTALLER = 'installer'
CONTROLLER = 'controller'
COMPUTE = 'compute'
ODL = 'opendaylight'
ONOS = 'onos'
class NodeStatus():
STATUS_OK = 'active'
STATUS_INACTIVE = 'inactive'
STATUS_OFFLINE = 'offline'
STATUS_ERROR = 'error'
STATUS_UNUSED = 'unused'
STATUS_UNKNOWN = 'unknown'
class Node(object):
def __init__(self,
id,
ip,
name,
status,
roles=None,
ssh_client=None,
info=None):
self.id = id
self.ip = ip
self.name = name
self.status = status
self.ssh_client = ssh_client
self.roles = roles
self.info = info
self.cpu_info = 'unknown'
self.memory = 'unknown'
self.ovs = 'unknown'
if ssh_client and Role.INSTALLER not in self.roles:
sys_info = self.get_system_info()
self.cpu_info = sys_info['cpu_info']
self.memory = sys_info['memory']
self.ovs = self.get_ovs_info()
def get_file(self, src, dest):
'''
SCP file from a node
'''
if self.status is not NodeStatus.STATUS_OK:
logger.info("The node %s is not active" % self.ip)
return 1
logger.info("Fetching %s from %s" % (src, self.ip))
get_file_result = ssh_utils.get_file(self.ssh_client, src, dest)
if get_file_result is None:
logger.error("SFTP failed to retrieve the file.")
else:
logger.info("Successfully copied %s:%s to %s" %
(self.ip, src, dest))
return get_file_result
def put_file(self, src, dest):
'''
SCP file to a node
'''
if self.status is not NodeStatus.STATUS_OK:
logger.info("The node %s is not active" % self.ip)
return 1
logger.info("Copying %s to %s" % (src, self.ip))
put_file_result = ssh_utils.put_file(self.ssh_client, src, dest)
if put_file_result is None:
logger.error("SFTP failed to retrieve the file.")
else:
logger.info("Successfully copied %s to %s:%s" %
(src, dest, self.ip))
return put_file_result
def run_cmd(self, cmd):
'''
Run command remotely on a node
'''
if self.status is not NodeStatus.STATUS_OK:
logger.error(
"Error running command %s. The node %s is not active"
% (cmd, self.ip))
return None
_, stdout, stderr = (self.ssh_client.exec_command(cmd))
error = stderr.readlines()
if len(error) > 0:
logger.error("error %s" % ''.join(error))
return None
output = ''.join(stdout.readlines()).rstrip()
return output
def get_dict(self):
'''
Returns a dictionary with all the attributes
'''
return {
'id': self.id,
'ip': self.ip,
'name': self.name,
'status': self.status,
'roles': self.roles,
'cpu_info': self.cpu_info,
'memory': self.memory,
'ovs': self.ovs,
'info': self.info
}
def is_active(self):
'''
Returns if the node is active
'''
if self.status == NodeStatus.STATUS_OK:
return True
return False
def is_controller(self):
'''
Returns if the node is a controller
'''
return Role.CONTROLLER in self.roles
def is_compute(self):
'''
Returns if the node is a compute
'''
return Role.COMPUTE in self.roles
def is_odl(self):
'''
Returns if the node is an opendaylight
'''
return Role.ODL in self.roles
def is_onos(self):
'''
Returns if the node is an ONOS
'''
return Role.ONOS in self.roles
def get_ovs_info(self):
'''
Returns the ovs version installed
'''
if self.is_active():
cmd = "ovs-vsctl --version 2>/dev/null|head -1| sed 's/^.*) //'"
return self.run_cmd(cmd) or None
return None
def get_system_info(self):
'''
Returns system information
'''
cmd = 'grep MemTotal /proc/meminfo'
memory = self.run_cmd(cmd).partition('MemTotal:')[-1].strip().encode()
cpu_info = {}
cmd = 'lscpu'
result = self.run_cmd(cmd)
for line in result.splitlines():
if line.startswith('CPU(s)'):
cpu_info['num_cpus'] = line.split(' ')[-1].encode()
elif line.startswith('Thread(s) per core'):
cpu_info['threads/core'] = line.split(' ')[-1].encode()
elif line.startswith('Core(s) per socket'):
cpu_info['cores/socket'] = line.split(' ')[-1].encode()
elif line.startswith('Model name'):
cpu_info['model'] = line.partition(
'Model name:')[-1].strip().encode()
elif line.startswith('Architecture'):
cpu_info['arch'] = line.split(' ')[-1].encode()
return {'memory': memory, 'cpu_info': cpu_info}
def __str__(self):
return '''
name: {name}
id: {id}
ip: {ip}
status: {status}
roles: {roles}
cpu: {cpu_info}
memory: {memory}
ovs: {ovs}
info: {info}'''.format(name=self.name,
id=self.id,
ip=self.ip,
status=self.status,
roles=self.roles,
cpu_info=self.cpu_info,
memory=self.memory,
ovs=self.ovs,
info=self.info)
class DeploymentHandler(object):
EX_OK = os.EX_OK
EX_ERROR = os.EX_SOFTWARE
FUNCTION_NOT_IMPLEMENTED = "Function not implemented by adapter!"
def __init__(self,
installer,
installer_ip,
installer_user,
installer_pwd=None,
pkey_file=None):
self.installer = installer.lower()
self.installer_ip = installer_ip
self.installer_user = installer_user
self.installer_pwd = <PASSWORD>pwd
self.pkey_file = pkey_file
if pkey_file is not None and not os.path.isfile(pkey_file):
raise Exception(
'The private key file %s does not exist!' % pkey_file)
self.installer_connection = ssh_utils.get_ssh_client(
hostname=self.installer_ip,
username=self.installer_user,
password=self.installer_pwd,
pkey_file=self.pkey_file)
if self.installer_connection:
self.installer_node = Node(id='',
ip=installer_ip,
name=installer,
status=NodeStatus.STATUS_OK,
ssh_client=self.installer_connection,
roles=Role.INSTALLER)
else:
raise Exception(
'Cannot establish connection to the installer node!')
self.nodes = self.get_nodes()
@abstractmethod
def get_openstack_version(self):
'''
Returns a string of the openstack version (nova-compute)
'''
raise Exception(DeploymentHandler.FUNCTION_NOT_IMPLEMENTED)
@abstractmethod
def get_sdn_version(self):
'''
Returns a string of the sdn controller and its version, if exists
'''
raise Exception(DeploymentHandler.FUNCTION_NOT_IMPLEMENTED)
@abstractmethod
def get_deployment_status(self):
'''
Returns a string of the status of the deployment
'''
raise Exception(DeploymentHandler.FUNCTION_NOT_IMPLEMENTED)
@abstractmethod
def get_nodes(self, options=None):
'''
Generates a list of all the nodes in the deployment
'''
raise Exception(DeploymentHandler.FUNCTION_NOT_IMPLEMENTED)
def get_installer_node(self):
'''
Returns the installer node object
'''
return self.installer_node
def get_arch(self):
'''
Returns the architecture of the first compute node found
'''
arch = None
for node in self.nodes:
if node.is_compute():
arch = node.cpu_info.get('arch', None)
if arch:
break
return arch
def get_deployment_info(self):
'''
Returns an object of type Deployment
'''
return Deployment(installer=self.installer,
installer_ip=self.installer_ip,
scenario=os.getenv('DEPLOY_SCENARIO', 'Unknown'),
status=self.get_deployment_status(),
pod=os.getenv('NODE_NAME', 'Unknown'),
openstack_version=self.get_openstack_version(),
sdn_controller=self.get_sdn_version(),
nodes=self.nodes)
|
"""
env.py:
Read a bash script and learn the environment variables into a python dictionary.
This allows MySQL database host, database, username and password to be set in
environment variables for both bash scripts and Python programs.
Can read environment variables in bash scripts such as:
export FOO=bar
export BAR=biff
and return an associative array of {'FOO':'bar','BAR':biff}
Can find variables in /etc/profile.d/*.bash.
Can process JSON configuration files in the form:
{ "*": {'name':'val',...},
"E1": {'name':'val',...},
"E2": {'name':'val',...}}
and search for the value of 'name' going to the current environment (e.g. E1), and then to the default environemnt (e.g. *)
"""
import os
import re
import pwd
import sys
import glob
import json
import logging
from os.path import dirname,basename,abspath
VARS_RE = re.compile(r"^(export)?\s*(?P<name>[a-zA-Z][a-zA-Z0-9_]*)=(?P<value>.*)$")
EXPORT_RE = re.compile(r"^export ([a-zA-Z][a-zA-Z0-9_]*)=(.*)$")
def get_vars(fname):
"""Read the bash EXPORT variables in fname and return them in a dictionary
:param fname: the name of a bash script
"""
ret = {}
with open(fname, 'r') as f:
for line in f:
m = VARS_RE.search(line)
if m:
name = m.group('name')
value = m.group('value')
if (len(value)>0) and (value[0] in ['"', "'"]) and (value[0]==value[-1]):
value = value[1:-1]
ret[name] = value
return ret
def get_env(pathname=None, *, profile_dir=None, prefix=None):
"""Read the BASH file and extract the variables. Currently this is
done with pattern matching. Another way would be to run the BASH
script as a subshell and then do a printenv and actually capture the
variables
:param pathname: if provided, use this path
:param profile_dir: If provided, search this directory
:param prefix: if provided and profile_dir is provided, search for all files in the directory
:return: the variables that were learned.
"""
if (pathname is not None) and (profile_dir is not None):
raise ValueError("pathname and profile_dir canot both be provided")
if (profile_dir is not None) and (prefix is None):
raise ValueError("If profile_dir is provided, pathname must be provided.")
if profile_dir:
names = sorted(glob.glob(os.path.join(profile_dir, prefix+"*")))
if len(names)==0:
raise FileNotFoundError(f"No file with prefix {prefix} in {profile_dir}")
pathname = names[0]
ret = {}
for (key,val) in get_vars(pathname).items():
ret[key] = os.environ[key] = os.path.expandvars(val)
return ret
def get_census_env():
"""Legacy to be deleted.
Look for a script in /etc/profile.d/ beginning with 'census' and read the variables in it.
"""
return get_env(profile_dir = '/etc/profile.d', prefix = 'census')
def get_home():
"""Return the current user's home directory without using the HOME variable. """
return pwd.getpwuid(os.getuid()).pw_dir
def dump(out):
print("==== ENV ====",file=out)
for (key,val) in os.environ.items():
print(f"{key}={val}",file=out)
class JSONConfigReader:
@classmethod
def searchFile(self,path):
"""Search for the file named by path in the current directory, and then in every directory up to the root.
Then every directory from ctool's directory to root.
When found, return it. Otherwise return path if the file exists, otherwise raise an exception
"""
checked = []
name = os.path.join( os.getcwd(), basename(path))
while dirname(name) != '/':
checked.append(name)
if os.path.exists(name):
return name
name = os.path.join( dirname(dirname(name)), basename(name))
name = os.path.join( dirname(abspath(__file__)), basename(path))
while dirname(name) != '/':
checked.append(name)
if os.path.exists(name):
return name
name = os.path.join( dirname(dirname(name)), basename(name))
if os.path.exists(path):
return path
for check in checked:
logging.error(f"checked {check}")
raise FileNotFoundError(path)
def __init__(self, *, path=None, search=True, config=None, environment=None, envar=None ):
"""
:param path: location of JSON config file.
:param search: Search from current directory up to root for a file with the same filename as `path` before using `path`.
:param config: If provided, use the configuration specified in this dictionary, instead of path.
:param environment: Specifies the environment inside the JSON dictionary that should be used, and then default to '*'.
:param envar: Specifics an os.environ[] name that should be used for the environment.
"""
self.environment= '*'
self.path = None
if (path is not None) and (config is not None):
raise ValueError("Only path or config can be specified")
if (environment is not None) and (envar is not None):
raise ValueError("Only environment or envar can be specified")
if path:
# If search is true, search for the named config file from the current directory to the root
# directory. If it isn't found, use the pathname
if search:
self.path = self.searchFile(path)
else:
self.path = path
self.config = json.load(open(self.path))
else:
self.path = 'provided dictionary'
self.config = config
if environment:
self.environment = environment
if envar:
self.environment = os.environ[envar]
def get_config(self, variable_name, environment=None):
# Handle one layer deep of FOO.BAR to search in FOO's directory for BAR.
if environment is None:
environment = self.environment
if "." in variable_name:
(name,ext) = variable_name.split(".",1)
val = self.get_config(name, environment)
return val[ext]
for check in [environment,'*']:
try:
return self.config[check][variable_name]
except KeyError:
pass
print(f"config:\n{json.dumps(self.config,default=str,indent=4)}",file=sys.stderr)
raise KeyError(f"{variable_name} not in {check} or '*' in {self.path}")
if __name__=="__main__":
"""Read a file and print the learned variables"""
import argparse
parser = argparse.ArgumentParser(description='Import the Digital Corpora logs.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("envfile",help="File with environment variables set by bash")
args = parser.parse_args()
d = get_vars(args.envfile)
for (k,v) in d.items():
print(f"{k}={v}")
|
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ["CUDA_DEVICE_ORDER"] = "PCI_" \
"BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
import tensorflow as tf
def permute_batched_tensor_3dim(batched_x, batched_perm_ids):
indices = tf.tile(tf.expand_dims(batched_perm_ids, 2), [1,1,batched_x.shape[2]])
# Create additional indices
i1, i2 = tf.meshgrid(tf.range(batched_x.shape[0]),
tf.range(batched_x.shape[2]), indexing="ij")
i1 = tf.tile(i1[:, tf.newaxis, :], [1, batched_x.shape[1], 1])
i2 = tf.tile(i2[:, tf.newaxis, :], [1, batched_x.shape[1], 1])
# Create final indices
idx = tf.stack([i1, indices, i2], axis=-1)
temp = tf.scatter_nd(idx, batched_x, batched_x.shape)
return temp
def permute_batched_tensor_2dim(batched_x, batched_perm_ids):
batched_x = tf.expand_dims(batched_x, axis=-1)
out = permute_batched_tensor_3dim(batched_x, batched_perm_ids)
return tf.squeeze(out)
def test_permute_tensor_batched_3d():
batch_size = 8
num_vars = 10
dims = 3
x = tf.random.normal((batch_size, num_vars, dims))
np_perm_id = np.vstack([np.random.permutation(num_vars) for i in range(batch_size)])
tf_perm_id = tf.Variable(np_perm_id, dtype=tf.int32)
x_perm = permute_batched_tensor_3dim(x, tf_perm_id)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
[y, y_perm] = sess.run([x, x_perm])
for i in range(batch_size):
print("X was")
print(y[i])
print("Permuted it becomes")
print(y_perm[i])
print("Ids were")
print(np_perm_id[i])
for i in range(batch_size):
for y_i, y_perm_i, id in zip(y, y_perm, np_perm_id):
for i, id_i in enumerate(id):
for m, n in zip(y_perm_i[id_i], y_i[i]):
assert m == n
def test_permute_tensor_batched_2d():
batch_size = 8
num_vars = 10
x = tf.random.normal((batch_size, num_vars))
np_perm_id = np.vstack([np.random.permutation(num_vars) for i in range(batch_size)])
tf_perm_id = tf.Variable(np_perm_id, dtype=tf.int32)
x_perm = permute_batched_tensor_2dim(x, tf_perm_id)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
[y, y_perm] = sess.run([x, x_perm])
print("X was")
print(y)
print("Permuted it becomes")
print(y_perm)
print("Ids were")
print(np_perm_id)
# for y_i, y_perm_i, id in zip(y, y_perm, np_perm_id):
# for i, id_i in enumerate(id):
# for m, n in zip(y_perm_i[id_i], y_i[i]):
# assert m == n
#
if __name__ == "__main__":
# test_permute_tensor_batched()
test_permute_tensor_batched_2d()
|
import torch as tn
from torchvision import datasets, transforms
import torchtt as tntt
import torch.nn as nn
import matplotlib.pyplot as plt
import numpy as np
import datetime
data_dir = 'Cat_Dog_data/'
transform_train = transforms.Compose([transforms.Resize(64), transforms.CenterCrop(64), transforms.ToTensor()])
dataset_train = datasets.ImageFolder(data_dir, transform=transform_train)
dataloader_train = tn.utils.data.DataLoader(dataset_train, batch_size=3250, shuffle=True, pin_memory=True)
transform_test = transforms.Compose([transforms.Resize(64), transforms.CenterCrop(64), transforms.ToTensor()])
dataset_test = datasets.ImageFolder(data_dir, transform=transform_test)
dataloader_test = tn.utils.data.DataLoader(dataset_test, batch_size=3500, shuffle=True, pin_memory=True)
class BasicTT(nn.Module):
def __init__(self):
super().__init__()
self.ttl1 = tntt.nn.LinearLayerTT([3,8,8,8,8], [8,4,4,4,4], [1,3,2,2,2,1])
self.ttl2 = tntt.nn.LinearLayerTT([8,4,4,4,4], [4,2,2,2,2], [1,2,2,2,2,1])
self.ttl3 = tntt.nn.LinearLayerTT([4,2,2,2,2], [2,2,2,2,2], [1,2,2,2,2,1])
self.linear = nn.Linear(32, 2, dtype = tn.float32)
self.logsoftmax = nn.LogSoftmax(1)
def forward(self, x):
x = self.ttl1(x)
x = tn.relu(x)
x = self.ttl2(x)
x = tn.relu(x)
x = self.ttl3(x)
x = tn.relu(x)
x = x.view(-1,32)
x = self.linear(x)
return self.logsoftmax(x)
device_name = 'cuda:0'
model = BasicTT()
model.to(device_name)
optimizer = tn.optim.SGD(model.parameters(), lr=0.00001, momentum=0.9)
# optimizer = tn.optim.Adam(model.parameters(), lr=0.001)
loss_function = tn.nn.CrossEntropyLoss()
def do_epoch(i):
loss_total = 0.0
for k, data in enumerate(dataloader_train):
# tme = datetime.datetime.now()
inputs, labels = data[0].to(device_name), data[1].to(device_name)
# tme = datetime.datetime.now() - tme
# print('t1',tme)
# tme = datetime.datetime.now()
inputs = tn.reshape(inputs,[-1,3,8,8,8,8])
# tme = datetime.datetime.now() - tme
# print('t2',tme)
# tme = datetime.datetime.now()
optimizer.zero_grad()
# Make predictions for this batch
outputs = model(inputs)
# Compute the loss and its gradients
loss = loss_function(outputs, labels)
loss.backward()
# Adjust learning weights
optimizer.step()
# tme = datetime.datetime.now() - tme
# print('t3',tme)
loss_total += loss.item()
# print('\t\tbatch %d error %e'%(k+1,loss))
return loss_total/len(dataloader_train)
def test_loss():
loss_total = 0
for data in dataloader_test:
inputs, labels = data[0].to(device_name), data[1].to(device_name)
inputs = tn.reshape(inputs,[-1,3,8,8,8,8])
outputs = model(inputs)
loss = loss_function(outputs, labels)
loss_total += loss.item()
return loss_total/len(dataloader_test)
n_epochs = 1000
history_test = []
history_train = []
for epoch in range(n_epochs):
print('Epoch ',epoch+1)
time_epoch = datetime.datetime.now()
model.train(True)
average_loss = do_epoch(epoch)
model.train(False)
average_test_loss = test_loss()
time_epoch = datetime.datetime.now() - time_epoch
print('\tTraining loss %e test loss %e'%(average_loss,average_test_loss))
print('\tTime for the epoch',time_epoch)
history_test.append(average_test_loss)
history_train.append(average_loss)
plt.figure()
plt.plot(np.arange(n_epochs)+1,np.array(history_train))
plt.plot(np.arange(n_epochs)+1,np.array(history_test))
plt.legend(['training','test']) |
<gh_stars>0
import numpy as np
import pickle as pkl
import networkx as nx
import scipy.sparse as sp
from scipy.sparse.linalg.eigen.arpack import eigsh
import sys
import random
def parse_index_file(filename):
"""Parse index file."""
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def sample_mask(idx, l):
"""Create mask."""
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
def load_data():
# First we load this data
features = pkl.load(
open('/home/c/cksash/kdd/node_vectors_joined.pkl', 'rb')
)
num_nodes = features.shape[0]
features = sp.csr_matrix(features)
adj_mat = pkl.load(
open('/home/c/cksash/kdd/joined_adjmatrix.pkl', 'rb')
)
adj_mat = sp.csr_matrix(adj_mat)
labels = pkl.load(
open('/home/c/cksash/kdd/all_labels.pkl', 'rb')
)
# Now we perform the train-test-val split
indices = list(range(num_nodes))
random.shuffle(indices)
train_ratio = 0.7
val_ratio = 0.1
test_ratio = 0.2
assert(train_ratio + val_ratio + test_ratio == 1.0)
total_elements = len(indices)
train_indices = indices[0: int(train_ratio * total_elements)]
indices = indices[int(train_ratio * total_elements):]
val_indices = indices[0: int(val_ratio * total_elements)]
indices = indices[int(val_ratio * total_elements):]
test_indices = indices
print(train_indices)
print(val_indices)
print(test_indices)
# Initialize the masks
train_mask = sample_mask(train_indices, labels.shape[0])
val_mask = sample_mask(val_indices, labels.shape[0])
test_mask = sample_mask(test_indices, labels.shape[0])
y_train = np.zeros(labels.shape)
y_val = np.zeros(labels.shape)
y_test = np.zeros(labels.shape)
y_train[train_mask, :] = labels[train_mask, :]
y_val[val_mask, :] = labels[val_mask, :]
y_test[test_mask, :] = labels[test_mask, :]
print(adj_mat.shape)
print(features.shape)
return adj_mat, features, y_train, y_val, y_test, train_mask, val_mask, test_mask
def sparse_to_tuple(sparse_mx):
"""Convert sparse matrix to tuple representation."""
def to_tuple(mx):
if not sp.isspmatrix_coo(mx):
mx = mx.tocoo()
coords = np.vstack((mx.row, mx.col)).transpose()
values = mx.data
shape = mx.shape
return coords, values, shape
if isinstance(sparse_mx, list):
for i in range(len(sparse_mx)):
sparse_mx[i] = to_tuple(sparse_mx[i])
else:
sparse_mx = to_tuple(sparse_mx)
return sparse_mx
def preprocess_features(features):
"""Row-normalize feature matrix and convert to tuple representation"""
rowsum = np.array(features.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
features = r_mat_inv.dot(features)
return sparse_to_tuple(features)
def normalize_adj(adj):
"""Symmetrically normalize adjacency matrix."""
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
def preprocess_adj(adj):
"""Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation."""
adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0]))
return sparse_to_tuple(adj_normalized)
def construct_feed_dict(features, support, labels, labels_mask, placeholders):
"""Construct feed dictionary."""
feed_dict = dict()
feed_dict.update({placeholders['labels']: labels})
feed_dict.update({placeholders['labels_mask']: labels_mask})
feed_dict.update({placeholders['features']: features})
feed_dict.update({placeholders['support'][i]: support[i] for i in range(len(support))})
feed_dict.update({placeholders['num_features_nonzero']: features[1].shape})
return feed_dict
def chebyshev_polynomials(adj, k):
"""Calculate Chebyshev polynomials up to order k. Return a list of sparse matrices (tuple representation)."""
print("Calculating Chebyshev polynomials up to order {}...".format(k))
adj_normalized = normalize_adj(adj)
laplacian = sp.eye(adj.shape[0]) - adj_normalized
largest_eigval, _ = eigsh(laplacian, 1, which='LM')
scaled_laplacian = (2. / largest_eigval[0]) * laplacian - sp.eye(adj.shape[0])
t_k = list()
t_k.append(sp.eye(adj.shape[0]))
t_k.append(scaled_laplacian)
def chebyshev_recurrence(t_k_minus_one, t_k_minus_two, scaled_lap):
s_lap = sp.csr_matrix(scaled_lap, copy=True)
return 2 * s_lap.dot(t_k_minus_one) - t_k_minus_two
for i in range(2, k+1):
t_k.append(chebyshev_recurrence(t_k[-1], t_k[-2], scaled_laplacian))
return sparse_to_tuple(t_k)
|
<reponame>BlackLight/platypush
import datetime
import enum
import logging
import threading
import croniter
from dateutil.tz import gettz
from platypush.procedure import Procedure
from platypush.utils import is_functional_cron
logger = logging.getLogger('platypush:cron')
class CronjobState(enum.IntEnum):
IDLE = 0
WAIT = 1
RUNNING = 2
DONE = 3
ERROR = 4
class Cronjob(threading.Thread):
def __init__(self, name, cron_expression, actions):
super().__init__()
self.cron_expression = cron_expression
self.name = name
self.state = CronjobState.IDLE
self._should_stop = threading.Event()
if isinstance(actions, dict) or isinstance(actions, list):
self.actions = Procedure.build(name=name + '__Cron', _async=False, requests=actions)
else:
self.actions = actions
def run(self):
self.state = CronjobState.WAIT
self.wait()
if self.should_stop():
return
self.state = CronjobState.RUNNING
try:
logger.info('Running cronjob {}'.format(self.name))
context = {}
if isinstance(self.actions, Procedure):
response = self.actions.execute(_async=False, **context)
else:
response = self.actions(**context)
logger.info('Response from cronjob {}: {}'.format(self.name, response))
self.state = CronjobState.DONE
except Exception as e:
logger.exception(e)
self.state = CronjobState.ERROR
def wait(self):
now = datetime.datetime.now().replace(tzinfo=gettz()) # lgtm [py/call-to-non-callable]
cron = croniter.croniter(self.cron_expression, now)
next_run = cron.get_next()
self._should_stop.wait(next_run - now.timestamp())
def stop(self):
self._should_stop.set()
def should_stop(self):
return self._should_stop.is_set()
class CronScheduler(threading.Thread):
def __init__(self, jobs):
super().__init__()
self.jobs_config = jobs
self._jobs = {}
self._should_stop = threading.Event()
logger.info('Cron scheduler initialized with {} jobs'.
format(len(self.jobs_config.keys())))
def _get_job(self, name, config):
job = self._jobs.get(name)
if job and job.state not in [CronjobState.DONE, CronjobState.ERROR]:
return job
if isinstance(config, dict):
self._jobs[name] = Cronjob(name=name, cron_expression=config['cron_expression'],
actions=config['actions'])
elif is_functional_cron(config):
self._jobs[name] = Cronjob(name=name, cron_expression=config.cron_expression,
actions=config)
else:
raise AssertionError('Expected type dict or function for cron {}, got {}'.format(
name, type(config)))
return self._jobs[name]
def stop(self):
for job in self._jobs.values():
job.stop()
self._should_stop.set()
def should_stop(self):
return self._should_stop.is_set()
def run(self):
logger.info('Running cron scheduler')
while not self.should_stop():
for (job_name, job_config) in self.jobs_config.items():
job = self._get_job(name=job_name, config=job_config)
if job.state == CronjobState.IDLE:
job.start()
self._should_stop.wait(timeout=0.5)
logger.info('Terminating cron scheduler')
# vim:sw=4:ts=4:et:
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
This file provides the definition of the convolutional heads used to predict masks, as well as the losses
"""
from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from detectron2.structures import Instances
from detectron2.layers.blocks import DepthwiseSeparableConv2d
from ..util.misc import interpolate
from .misc import MLP
class MaskHead(nn.Module):
def __init__(self, hidden_dim, fpn_dims, num_frames):
super().__init__()
self.num_frames = num_frames
self.lay1 = torch.nn.Conv2d(hidden_dim, hidden_dim, 3, padding=1)
self.gn1 = torch.nn.GroupNorm(32, hidden_dim)
self.lay2 = torch.nn.Conv2d(hidden_dim, hidden_dim, 3, padding=1)
self.gn2 = torch.nn.GroupNorm(32, hidden_dim)
self.lay3 = torch.nn.Conv2d(hidden_dim, hidden_dim, 3, padding=1)
self.gn3 = torch.nn.GroupNorm(32, hidden_dim)
self.out_lay = DepthwiseSeparableConv2d(hidden_dim, hidden_dim, 5, padding=2, activation1=F.relu, activation2=F.relu)
self.adapter1 = torch.nn.Conv2d(fpn_dims[0], hidden_dim, 1)
self.adapter2 = torch.nn.Conv2d(fpn_dims[1], hidden_dim, 1)
self.convert_to_weight = MLP(hidden_dim, hidden_dim, hidden_dim, 3)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_uniform_(m.weight, a=1)
nn.init.constant_(m.bias, 0)
def forward(self, x: Tensor, fpns: List[Tensor], tq: Tensor):
x = self.lay1(x)
x = self.gn1(x)
cur_fpn = self.adapter1(fpns[0])
x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest")
x = self.lay2(x)
x = self.gn2(x)
cur_fpn = self.adapter2(fpns[1])
x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest")
x = self.lay3(x)
x = self.gn3(x)
x = F.relu(x)
BT, C, H, W = x.shape
L, B, N, C = tq.shape
T = BT // B
x = self.out_lay(x)
w = self.convert_to_weight(tq).permute(1,0,2,3)
w = w.unsqueeze(1).repeat(1,T,1,1,1)
mask_logits = F.conv2d(x.view(1, BT*C, H, W), w.reshape(B*T*L*N, C, 1, 1), groups=BT)
mask_logits = mask_logits.view(B, T, L, N, H, W).permute(2, 0, 3, 1, 4, 5)
return mask_logits
def dice_coef(inputs, targets):
inputs = inputs.sigmoid()
inputs = inputs.flatten(1).unsqueeze(1)
targets = targets.flatten(1).unsqueeze(0)
numerator = 2 * (inputs * targets).sum(2)
denominator = inputs.sum(-1) + targets.sum(-1)
# NOTE coef doesn't be subtracted to 1 as it is not necessary for computing costs
coef = (numerator + 1) / (denominator + 1)
return coef
def dice_loss(inputs, targets, num_boxes):
"""
Compute the DICE loss, similar to generalized IOU for masks
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
"""
inputs = inputs.sigmoid()
inputs = inputs.flatten(1)
numerator = 2 * (inputs * targets).sum(1)
denominator = inputs.sum(-1) + targets.sum(-1)
loss = 1 - (numerator + 1) / (denominator + 1)
return loss.sum() / num_boxes
def sigmoid_focal_coef(inputs, targets, alpha: float = 0.25, gamma: float = 2):
N, M = len(inputs), len(targets)
inputs = inputs.flatten(1).unsqueeze(1).expand(-1, M, -1)
targets = targets.flatten(1).unsqueeze(0).expand(N, -1, -1)
prob = inputs.sigmoid()
ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
p_t = prob * targets + (1 - prob) * (1 - targets)
coef = ce_loss * ((1 - p_t) ** gamma)
if alpha >= 0:
alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
coef = alpha_t * coef
return coef.mean(2)
def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2):
"""
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
alpha: (optional) Weighting factor in range (0,1) to balance
positive vs negative examples. Default = -1 (no weighting).
gamma: Exponent of the modulating factor (1 - p_t) to
balance easy vs hard examples.
Returns:
Loss tensor
"""
prob = inputs.sigmoid()
ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
p_t = prob * targets + (1 - prob) * (1 - targets)
loss = ce_loss * ((1 - p_t) ** gamma)
if alpha >= 0:
alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
loss = alpha_t * loss
return loss.mean(1).sum() / num_boxes
def segmentation_postprocess(results, output_height, output_width, mask_threshold=0.5):
"""
For instance segmentation whose masks are size of batched output,
not regional sizes as from R-CNN based predictor.
"""
scale_x, scale_y = (float(output_width) / results.image_size[1], float(output_height) / results.image_size[0])
results = Instances((output_height, output_width), **results.get_fields())
if results.has("pred_boxes"):
output_boxes = results.pred_boxes
elif results.has("proposal_boxes"):
output_boxes = results.proposal_boxes
output_boxes.scale(scale_x, scale_y)
output_boxes.clip(results.image_size)
if results.has("pred_masks"):
results.pred_masks = interpolate(
results.pred_masks.float().unsqueeze(1), size=(output_height, output_width),
mode='bilinear'
).squeeze(1) > 0.5
results = results[output_boxes.nonempty()]
return results
|
<reponame>jimwaldo/HarvardX-Tools<filename>src/main/python/logs/buildClassHistograph.py
#!/usr/bin/env python
"""
Run from a directory containing directories with name harvardx-YYYY-MM-DD and which has
a set of directories that might contain a ClassList.csv file, this program will
create a set of csv files (written to the current directory) that will contain a line
for each date (as determined by the directory name) and the count of the events during
the week ending with that date.
@author: waldo
"""
import glob
import sys
import csv
import re
def addEvent(hdict, clDate, nev):
"""
Add a count of events to the supplied dictionary for a class,
for the date supplied. If there is already an entry for this
date, it means that the course is being offered in multiple forms
(i.e., both edge and on-line) and the additional events should be
added to the total
TODO: when doing incremental updates, there needs to be some
way of determining if the count should be added (if the course if
being offered in multiple cases) or ignored (if it is the second time
the events for this week have been attempted to add)
"""
if (clDate in hdict):
hdict[clDate] += nev
else:
hdict[clDate] = nev
return
def buildFromFile(fin, clDate, cdict):
"""
Read in a csv file with lines of classname, number of events and
add the events to the dictionary for the class. If the class does
not have an event dictionary, create one for it.
"""
for cname, nev in fin:
if cname not in cdict:
cdict[cname] = {}
addEvent(cdict[cname], clDate, nev)
return
def getDatefromName(fname):
"""
Returns the date of the form YYYY-MM-DD from fname, or an empty
string if there is no string that matches. If there are multiple
matches, this returns the first of them.
"""
dates = re.findall(r"\d{4}-\d{2}-\d{2}", fname)
if len(dates) == 0:
return ''
else:
return(dates[1])
def getClassfromFileName(fname):
"""
Get a class name from the filename. Assume that the filename
is formed from the classname + EvHist.csv, strip any directory
names and then return the class preface in what is left.
"""
if '/' in fname:
fileName = fname[fname.rindex('/')+1:]
else:
fileName = fname
return fileName[ : fileName.rindex('EvHist')]
def getFileNamefromClass(cName, dirName=''):
"""
Construct a filename from the class name. Buy default, the
file will be in the current working directory. A directory name
can be passed in as well; if it is it is prepended to the
filename
"""
fname = cName + 'EvHist.csv'
if len(dirName) != 0:
fname = dirName + '/' + fname
return fname
def processWeek(clDict, f):
"""
Given a file name and a dictionary of events indexed by class names,
open the file, make a csv.reader object, process the file, and then
close the open file.
"""
print "processing file", f
ff = open(f, 'r')
fin = csv.reader(ff)
fDate = getDatefromName(f)
buildFromFile(fin, fDate, clDict)
ff.close()
return
def writeHistFile(c, histDict):
"""
Writes a file of week, event count for a particular class. The file
will be named by the class name + EvHist.csv.
"""
fname = getFileNamefromClass(c)
f = open(fname, 'w')
fout = csv.writer(f)
fout.writerow(['Date','Event Count'])
for d in sorted(histDict.iterkeys()):
fout.writerow([d, histDict[d]])
f.close()
return
if __name__ == '__main__':
flist = glob.glob('*/*/ClassList.csv')
if not flist:
print 'No files found'
sys.exit(1)
clDict = {}
for f in flist:
processWeek(clDict, f)
for c in clDict.keys():
writeHistFile(c, clDict[c])
|
<filename>emergency_stop.py
#!/usr/bin/env python
"""
Este programa implementa un freno de emergencia para evitar accidentes en Duckietown.
"""
import sys
import argparse
import gym
import gym_duckietown
from gym_duckietown.envs import DuckietownEnv
import numpy as np
import cv2
def mov_duckiebot(key):
# La acción de Duckiebot consiste en dos valores:
# velocidad lineal y velocidad de giro
actions = {ord('w'): np.array([1.0, 0.0]),
ord('s'): np.array([-1.0, 0.0]),
ord('a'): np.array([0.0, 1.0]),
ord('d'): np.array([0.0, -1.0]),
ord('q'): np.array([0.3, 1.0]),
ord('e'): np.array([0.3, -1.0])
}
action = actions.get(key, np.array([0.0, 0.0]))
return action
def det_duckie(obs):
### DETECTOR HECHO EN LA MISIÓN ANTERIOR
dets = list()
lower_yellow = np.array([15,155,155])
upper_yellow = np.array([45,255,255])
min_area = 2500
#Filrar colores de la imagen en el rango utilizado
img_out = cv2.cvtColor(obs, cv2.COLOR_RGB2HSV)
# Filtrar colores de la imagen en el rango utilizando
mask = cv2.inRange(img_out, lower_yellow, upper_yellow)
# Bitwise-AND entre máscara (mask) y original (obs) para visualizar lo filtrado
img_out = cv2.bitwise_and(obs, obs, mask = mask)
# Se define kernel para operaciones morfológicas
kernel = np.ones((5,5),np.uint8)
# Aplicar operaciones morfológicas para eliminar ruido
# Esto corresponde a hacer un Opening
# https://docs.opencv.org/trunk/d9/d61/tutorial_py_morphological_ops.html
#Operacion morfologica erode
mask_erode = cv2.erode(mask, kernel, iterations = 1)
#Operacion morfologica dilate
mask_dilate = cv2.dilate(mask_erode, kernel, iterations = 1)
# Busca contornos de blobs
# https://docs.opencv.org/trunk/d3/d05/tutorial_py_table_of_contents_contours.html
contours, hierarchy = cv2.findContours(mask_dilate, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
# Obtener rectangulo que bordea un contorno
x, y, w, h = cv2.boundingRect(cnt)
if w*h > min_area:
# En lugar de dibujar, se agrega a la lista
dets.append((x,y,w,h))
return dets
def draw_dets(obs, dets):
for d in dets:
x1, y1 = d[0], d[1]
x2 = x1 + d[2]
y2 = y1 + d[3]
cv2.rectangle(obs, (int(x1), int(y1)), (int(x2),int(y2)), (0,255,0), 3)
return obs
def red_alert(obs):
red_img = np.zeros((480, 640, 3), dtype = np.uint8)
red_img[:,:,0] = 255
blend = cv2.addWeighted(obs, 0.5, red_img, 0.5, 0)
return blend
if __name__ == '__main__':
# Se leen los argumentos de entrada
parser = argparse.ArgumentParser()
parser.add_argument('--env-name', default="Duckietown-udem1-v1")
parser.add_argument('--map-name', default='free')
args = parser.parse_args()
# Definición del environment
if args.env_name and args.env_name.find('Duckietown') != -1:
env = DuckietownEnv(
map_name = args.map_name,
domain_rand = False,
)
else:
env = gym.make(args.env_name)
# Se reinicia el environment
env.reset()
# Inicialmente no hay alerta
alert = False
# Posición del pato en el mapa (fija)
duck_pos = np.array([2,0,2])
# Constante que se debe calcular
C = 60 # f * dr (f es constante, dr es conocido), con dr altura del pato = 0.08
while True:
# Captura la tecla que está siendo apretada y almacena su valor en key
key = cv2.waitKey(0)
# Si la tecla es Esc, se sale del loop y termina el programa
if key == 27:
break
# Se define la acción dada la tecla presionada
action = mov_duckiebot(key)
# Si hay alerta evitar que el Duckiebot avance
if alert:
action[0]= np.min([0.0 , action[0]]) # de esta forma podemos mover el duckiebot despues de detenerse:))))
# Se ejecuta la acción definida anteriormente y se retorna la observación (obs),
# la evaluación (reward), etc
obs, reward, done, info = env.step(action)
# Detección de patos, retorna lista de detecciones
dets = det_duckie(obs)
# Dibuja las detecciones
obs = draw_dets( obs, dets )
# Obtener posición del duckiebot
dbot_pos = env.cur_pos
# Calcular distancia real entre posición del duckiebot y pato
# esta distancia se utiliza para calcular la constante
dist = np.sqrt(np.sum(duck_pos-env.cur_pos)**2) #dist posicion de robot y pato, con formula de suma de diferencia por coordenanada al cuadrado en raiz, sacada de internet
# La alerta se desactiva (opción por defecto)
alert = False
for d in dets:
# Alto de la detección en pixeles
p = d[3]
# La aproximación se calcula según la fórmula mostrada en la capacitación
d_aprox = C/p
# Muestra información relevante
print('p:', p)
print('Da:', d_aprox)
print('Dr:', dist)
# Si la distancia es muy pequeña activa alerta
if d_aprox < 0.3:
# Activar alarma
alert = True
# Muestra ventana en rojo
obs = red_alert(obs)
# Se muestra en una ventana llamada "patos" la observación del simulador
cv2.imshow('patos', cv2.cvtColor(obs, cv2.COLOR_RGB2BGR))
# Se cierra el environment y termina el programa
env.close()
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
from future.builtins.disabled import *
import re
from ._common import *
class Clone(SubCommand):
def __init__(self, *args, **kwargs):
super(Clone, self).__init__(*args, **kwargs)
@classmethod
def addParser(cls, cmdLineParser, subparsers):
parser = cmdLineParser.getSubParser(
"clone", subparsers,
help="Clone VMs")
parser.add_argument(
"name",
metavar="name",
help="VM to clone from")
parser.add_argument(
"--snap", type=str,
metavar="<source snapshot>",
dest="srcSnap",
help="Snapshot to clone from, default is latest")
parser.add_argument(
"--target", nargs="+", required=True,
metavar="target",
help="List of target VMs to create")
parser.add_argument(
"--disk-mode", nargs="+", type=cmdLineParser.diskModeType,
default=["all"],
metavar="<disk-mode>", dest="diskMode",
help="Delta backing for disks, only store deltas, default to *all*" +
"\n all: link all disks\n none: copy all disks\n ctrlNr-slotNr: link specific disk")
parser.add_argument(
"--cpus", type=int,
metavar="cpus",
help="CPUs")
parser.add_argument(
"--memory", type=int,
metavar="memory",
help="Memory in MB")
parser.add_argument(
"--extra-config", nargs="+", type=cmdLineParser.extraConfigType,
default=[],
metavar="key=value",
dest="extraConfig",
help="Extra config, use key=value")
parser.add_argument(
"--datastore", type=str,
metavar="datastore",
help="Datastore name")
parser.add_argument(
"--host", type=str,
metavar="host",
help="Host name (which host to place the new VMs on)")
parser.add_argument(
"--poweron", action="store_true",
help="Power the cloned VMs on")
parser.add_argument(
"--csm", type=str,
metavar="customization",
help="Path to customication file")
parser.set_defaults(cloneArgs=["name",
"srcSnap", "target", "diskMode",
"cpus", "memory", "host", "datastore", "poweron", "csm",
"extraConfig"])
@export
def clone(self, name=None, srcSnap=None, target=None, diskMode=[], poweron=False,
host=None, datastore=None, memory=None, cpus=None, csm=None, extraConfig=[]):
self._checkType(name, (str, vim.VirtualMachine))
self._checkType(srcSnap, (type(None), str))
self._checkType(target, list)
[self._checkType(x, str) for x in target]
self._checkType(diskMode, list)
for dm in diskMode:
if isinstance(dm, tuple):
[self._checkType(x, int) for x in dm]
assert len(dm) == 2
else:
self._checkType(dm, (str, type(None)))
self._checkType(poweron, bool)
self._checkType(host, (type(None), str))
self._checkType(datastore, (type(None), str))
self._checkType(memory, (type(None), int))
self._checkType(cpus, (type(None), int))
self._checkType(csm, (type(None), str))
self._checkType(extraConfig, list)
for ec in extraConfig:
self._checkType(ec, tuple)
[self._checkType(x, str) for x in ec]
assert len(ec) == 2
regexps = [re.compile("^{}$".format(re.escape(name)))]
fromVm = self.getRegisteredVms(regexps=regexps)[0]
pool = None
if host:
(pool, host) = self.getPoolHostByHostname(host)
assert pool is not None
assert host is not None
# fix newint class
cpus = self._toNativeInt(cpus)
memory = self._toNativeInt(memory)
if datastore:
datastore = self.getDatastoreByName(datastore)
assert datastore is not None
# find snapshot to clone from
fromSnapshot = None
for root, tree in self._walkVmSnapshots(fromVm):
if not srcSnap:
# find latest snapshot
fromSnapshot = tree.snapshot
elif srcSnap == tree.name:
fromSnapshot = tree.snapshot
break
if srcSnap and not fromSnapshot:
raise LookupError("Snapshot '{}' not found".format(srcSnap))
assert not fromSnapshot or isinstance(fromSnapshot, vim.vm.Snapshot)
# evaluate disko mode
linkAll = False
linkedDisks = []
assertDiskModeLen0 = True
if None in diskMode:
linkAll = False
diskMode.remove(None)
elif "all" in diskMode:
linkAll = True
diskMode.remove("all")
else:
assertDiskModeLen0 = False
if assertDiskModeLen0 and len(diskMode) != 0:
raise RuntimeError("Disk mode must have all | none | [disk1, disk2, ...]")
# walk disk mode
for x in diskMode:
linkedDisks.append(x)
if fromVm.config.template and diskMode:
raise RuntimeError("Template can not be linked")
if (linkedDisks or linkAll) and not fromSnapshot:
raise RuntimeError("Linking disk but no snapshot exist.")
diskLocator = []
DISK = vim.vm.device.VirtualDisk
fromVmDisks = fromVm
if fromSnapshot:
fromVmDisks = fromSnapshot
for (ctrlNr, slotNr, disk) in VirtualMachineDiskLayout(fromVmDisks):
if not isinstance(disk, DISK):
# skip cdroms
continue
diskMoveType = "moveAllDiskBackingsAndDisallowSharing"
key = (ctrlNr, slotNr)
if linkAll or key in linkedDisks:
try:
linkedDisks.remove(key)
except ValueError:
pass
diskMoveType = "createNewChildDiskBacking"
locator = {"datastore": disk.backing.datastore,
"diskId": disk.key,
"diskMoveType": diskMoveType}
diskLocator.append(locator)
if linkedDisks:
raise LookupError("Not all disk to be linked where found, missing: '{}'".format(
repr(linkedDisks)))
relocationSpec = vim.vm.RelocateSpec()
relocationSpec.disk = list(map(lambda kwargs: vim.vm.RelocateSpec.DiskLocator(**kwargs),
diskLocator))
if pool:
relocationSpec.pool = pool
if host:
relocationSpec.host = host
if datastore:
relocationSpec.datastore = datastore
configSpec = vim.vm.ConfigSpec()
if memory:
configSpec.memoryMB = memory
if cpus:
configSpec.numCPUs = cpus
# cores per socket (number of cpus per socket!!!)
configSpec.numCoresPerSocket = cpus
self._configSpecAddExtraConfig(configSpec, extraConfig)
folder = fromVm.parent
if not folder:
datacenter = self.getDatacenters().keys()[0]
folder = dc.vmFoler.childEntity[0]
cloneSpec = vim.vm.CloneSpec()
cloneSpec.powerOn = poweron
cloneSpec.template = False
cloneSpec.location = relocationSpec
cloneSpec.config = configSpec
if fromSnapshot:
cloneSpec.snapshot = fromSnapshot
if csm:
cloneSpec.customization = self.getCSMByName(csm)
rc = 0
for toName in target:
self.logger.info("Cloning {} -> {}".format(fromVm.name, target))
task = fromVm.Clone(name=toName, folder=folder, spec=cloneSpec)
vcTask = VcTask(task)
vcTask.waitTaskDone()
if vcTask.isSuccess():
self.logger.info("Success")
else:
msg = vcTask.error()
self.logger.error("Failed {}".format(repr(msg)))
rc = 2
continue
if rc:
raise RuntimeError("Clone failed")
return rc
|
<filename>busca.py<gh_stars>0
import os
import tkinter as tk
from tkinter import ttk
#chave = input("Palavra chave: ")
#b = ("start chrome https://www.google.com/search?q=site%3Aempregacampinas.com.br+%22{}%22+inurl:2022".format(chave))
#os.system(b)
class Tela:
def __init__(self, master):
#Imagem EMPREGA CAMPINAS
cab = tk.PhotoImage(file="cab.png")
img = tk.Label(janela, image=cab)
img.cab = cab
img.place(x=145, y=2)
#Imagem de RODAPE
rodape = tk.PhotoImage(file="rodape.png")
img2 = tk.Label(janela, image=rodape)
img2.rodape = rodape
img2.place(x=0, y=370)
borracha = tk.PhotoImage(file="borracha.png")
self.img3 = tk.Label(janela, image=borracha)
self.img3.borracha = borracha
self.img3.place(x=540, y=125)
self.img3.bind("<Button-1>", self.bor1)
borracha2 = tk.PhotoImage(file="borracha2.png")
self.img4 = tk.Label(janela, image=borracha2)
self.img4.borracha2 = borracha2
self.img4.place(x=480, y=248)
self.img4.bind("<Button-1>", self.bor2)
###########################################################################
self.chave = tk.Label(janela, text="Pesquisar vagas para:")
self.chave["font"] = ("Helvetica", "17")
self.chave.config(bg="white", foreground="darkblue")
self.chave.place(x=30, y=130)
self.chaveE = tk.Entry(janela)
self.chaveE["font"] = ("Helvetica", "17")
self.chaveE.config(bg="#C0C0C0", foreground="red")
self.chaveE.place(x=270, y=130)
self.cidade = tk.Label(janela, text="Cidade:")
self.cidade["font"] = ("Helvetica", "17")
self.cidade.config(bg="white", foreground="darkblue")
self.cidade.place(x=150, y=180)
self.cidadeE = tk.Entry(janela)
self.cidadeE["font"] = ("Helvetica", "17")
self.cidadeE.config(bg="#C0C0C0", foreground="red")
self.cidadeE.place(x=270, y=180)
self.mess = tk.Label(janela, text="Mês da vaga:")
self.mess["font"] = ("Helvetica", "17")
self.mess.config(bg="white", foreground="darkblue")
self.mess.place(x=80, y=250)
meses = ["JANEIRO", "FEVEREIRO", "MARÇO", "ABRIL", "MAIO", "JUNHO", "JULHO", "AGOSTO", "SETEMBRO", "OUTUBRO", "NOVEMBRO", "DEZEMBRO"]
self.mesE = ttk.Combobox(janela, values=meses)
self.mesE["font"] = ("Helvetica", "17")
self.mesE.place(x=270, y=250, width=200)
self.buscar = tk.Button(janela, text="Buscar")
self.buscar["font"] = ("Helvetica", "17")
self.buscar.config(bg="#FFD700", foreground="black")
self.buscar.place(x=320, y=300)
self.buscar.bind("<Button-1>", self.buscarr)
self.limpar = tk.Button(janela, text="Limpar")
self.limpar["font"] = ("Helvetica", "17")
self.limpar.config(bg="red", foreground="white")
self.limpar.place(x=200, y=300)
self.limpar.bind("<Button-1>", self.limparr)
def buscarr(self, event):
chave = self.chaveE.get()
mes = self.mesE.get()
cidade = self.cidadeE.get()
if mes == "JANEIRO":
mes = "01"
if mes == "FEVEREIRO":
mes = "02"
if mes == "MARÇO":
mes = "03"
if mes == "ABRIL":
mes = "04"
if mes == "MAIO":
mes = "05"
if mes == "JUNHO":
mes = "06"
if mes == "JULHO":
mes = "07"
if mes == "AGOSTO":
mes = "08"
if mes == "SETEMBRO":
mes = "09"
if mes == "OUTUBRO":
mes = "10"
if mes == "NOVEMBRO":
mes = "11"
if mes == "DEZEMBRO":
mes = "12"
b = ("start chrome https://www.google.com/search?q=site%3Aempregacampinas.com.br+%22{}%22+inurl:2022/{}+intext:{}".format(chave,mes,cidade))
os.system(b)
def limparr(self, event):
self.chaveE.delete(0, "end")
self.mesE.delete(0, "end")
def bor1(self, event):
self.chaveE.delete(0, "end")
def bor2(self, event):
self.mesE.delete(0, "end")
janela = tk.Tk()
Tela(janela)
janela.geometry("600x420+200+100")
janela.title("Busca vagas")
janela.config(bg="white")
janela.resizable(width=False, height=False)
janela.mainloop()
|
<reponame>zplab/zplib
import numpy
def weighted_mean_and_std(x, w):
"""Return the mean and standard deviation of the data points x, weighted by
the weights in w (which do not need to sum to 1)."""
w = numpy.array(w, dtype=float)
w /= w.sum()
x = numpy.asarray(x)
weighted_mean = (w*x).sum()
squared_diff = (x - weighted_mean)**2
weighted_var = (w * squared_diff).sum()
return weighted_mean, numpy.sqrt(weighted_var)
def weighted_mean(x, w):
"""Return the mean of the data points x, weighted by the weights in w
(which do not need to sum to 1)."""
w = numpy.array(w, dtype=float)
w /= w.sum()
x = numpy.asarray(x)
return (w*x).sum()
def _gaussian(x, mu=0, sigma=1):
return (1/numpy.sqrt(2 * numpy.pi * sigma**2) * numpy.exp(-0.5 * ((numpy.asarray(x)-mu)/sigma)**2))
def gaussian_mean(x, y, p, std=1):
"""Given a set of positions x where values y were observed, calculate
the gaussian-weighted mean of those values at a set of new positions p,
where the gaussian has a specied standard deviation.
"""
return numpy.array([weighted_mean(y, _gaussian(x, mu=pp, sigma=std)) for pp in p])
def savitzky_golay(data, kernel=11, order=4):
"""Apply Savitzky-Golay smoothing to the input data.
http://en.wikipedia.org/wiki/Savitzky-Golay_filter
"""
kernel = abs(int(kernel))
order = abs(int(order))
if kernel % 2 != 1 or kernel < 1:
raise TypeError("kernel size must be a positive odd number, was: %d" % kernel)
if kernel < order + 2:
raise TypeError("kernel is to small for the polynomals\nshould be > order + 2")
order_range = range(order+1)
half_window = (kernel-1) // 2
m = numpy.linalg.pinv([[k**i for i in order_range] for k in range(-half_window, half_window+1)])[0]
window_size = len(m)
half_window = (window_size-1) // 2
offsets = range(-half_window, half_window+1)
offset_data = list(zip(offsets, m))
smooth_data = list()
data = numpy.concatenate((numpy.ones(half_window)*data[0], data, numpy.ones(half_window)*data[-1]))
for i in range(half_window, len(data) - half_window):
value = 0.0
for offset, weight in offset_data:
value += weight * data[i + offset]
smooth_data.append(value)
return numpy.array(smooth_data)
def lowess(x, y, f=2/3., iters=3, outlier_threshold=6, weights=None, degree=1):
"""Apply LOWESS to fit a nonparametric regression curve to a scatterplot.
http://en.wikipedia.org/wiki/Local_regression
Parameters:
x, y: 1-d arrays containing data points in x and y.
f: smoothing parameter in range [0, 1]. Lower values = less smoothing.
iter: number of robustifying iterations (after each of which outliers
are detected and excluded). Larger numbers = more robustness, but
slower run-time.
outlier_threshold: data points that are further from the lowess estimate
than outlier_threshold * numpy.median(numpy.abs(residuals)) are
declared outliers.
degree: degree of locally weighted fit. Generally 1 is fine, though for
data with local minima/maxima, degree=2 may fit better.
Returns: array of smoothed y-values for the input x-values.
"""
x = numpy.asarray(x)
y = numpy.asarray(y)
r = max(4, int(numpy.ceil(f*(len(x)-1))))
# below hogs RAM for large input, without much speed gain.
# h = [numpy.sort(numpy.abs(x - xv))[r] for xv in x]
# w = numpy.clip(numpy.abs(numpy.subtract.outer(x, x) / h), 0, 1)
# w = (1 - w**3)**3
delta = 1
max_dists = numpy.empty_like(x)
if weights is None:
weights = 1
for it in range(iters):
y_est = []
for i, xv in enumerate(x): # for xv, wv in zip(x, w.T):
x_dists = numpy.abs(x - xv)
if it == 0:
max_dist = numpy.partition(x_dists, r)[r]
max_dists[i] = max_dist
else:
max_dist = max_dists[i]
wv = numpy.clip(x_dists/max_dist, 0, 1)
wv = (1 - wv**3)**3
final_weights = delta * wv * weights
if degree > 1:
poly = numpy.poly1d(numpy.polyfit(x, y, degree, w=final_weights))
y_est.append(poly(xv))
else: # faster to hard-code weighted linear regression formula
weighted_x = final_weights * x
b1 = numpy.dot(final_weights, y)
b2 = numpy.dot(weighted_x, y)
A11 = numpy.sum(final_weights)
A12 = A21 = numpy.sum(weighted_x)
A22 = numpy.dot(weighted_x, x)
# solve linear system A*beta = b where A = [[A11, A12], [A21, A22]] and b = [b1, b2]
determinant = A11 * A22 - A12 * A21
beta1 = (A22*b1 - A12*b2) / determinant
beta2 = (A11*b2 - A21*b1) / determinant
y_est.append(beta1 + beta2 * xv)
y_est = numpy.array(y_est)
residuals = y - y_est
s = numpy.median(numpy.abs(residuals))
if s > 0:
delta = numpy.clip(residuals / (outlier_threshold * s), -1, 1)
delta = (1 - delta**2)**2
return numpy.array(y_est)
def robust_polyfit(x, y, degree=2, iters=3):
"""Fit a polynomial to scattered data, robust to outliers.
Parameters:
x, y: 1-d arrays containing data points in x and y.
degree: degree of the polynomial to fit.
iter: number of robustifying iterations (after each of which outliers
are detected and excluded). Larger numbers = more robustness, but
slower run-time.
Returns: polynomial coefficients, array of smoothed y-values for the input x-values.
"""
x, y = numpy.asarray(x), numpy.asarray(y)
weights = numpy.ones(len(x), float) # delta in original formulation
for _ in range(iters):
cs = numpy.polynomial.polynomial.polyfit(x, y, degree, w=weights)
y_est = numpy.polynomial.polynomial.polyval(x, cs)
residuals = y - y_est
s = numpy.median(numpy.abs(residuals))
if s > 0:
weights = (residuals / (6 * s)).clip(-1, 1)
weights = (1 - weights**2)**2
return cs, y_est
|
#
#
# All Rights Reserved.
# Copyright 2013 OpenStack LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from paxes_cinder.k2aclient import client
from paxes_cinder.k2aclient.v1 import uom_manager
from paxes_cinder.k2aclient.v1 import paxes_manager
from paxes_cinder.k2aclient.v1 import cluster_manager
from paxes_cinder.k2aclient.v1 import logicalpartition_manager
from paxes_cinder.k2aclient.v1 import managedsystem_manager
from paxes_cinder.k2aclient.v1 import managementconsole_manager
from paxes_cinder.k2aclient.v1 import sharedstoragepool_manager
from paxes_cinder.k2aclient.v1 import virtualioserver_manager
from paxes_cinder.k2aclient.v1 import clientnetworkadapter_manager
from paxes_cinder.k2aclient.v1 import virtualnetwork_manager
from paxes_cinder.k2aclient.v1 import virtualswitch_manager
from paxes_cinder.k2aclient.v1 import web_file_manager
from paxes_cinder.k2aclient.v1 import web_job_manager
from paxes_cinder.k2aclient.v1 import v1k2loader
from paxes_cinder.k2aclient.k2exclogger import K2ResponseLogger
class Client(object):
"""
Top-level object to access k2.
Create an instance with your creds::
>>> client = Client(K2_USERNAME, K2_PASSWORD, ...)
Then call methods on its managers::
>>> client.managedsystem.list()
...
"""
k2loader = v1k2loader
def __init__(self,
k2_url,
k2_username,
k2_password,
k2_auditmemento="<PASSWORD>",
k2_certpath=None,
retries=0,
timeout=None,
excdir="/tmp/k2aexc",
k2o_use_cache=False):
self.uom = uom_manager.UomManager(self)
self.paxes = paxes_manager.PowerVcManager(self)
# UOM
self.cluster = cluster_manager.ClusterManager(self)
self.logicalpartition = \
logicalpartition_manager.LogicalPartitionManager(self)
self.managedsystem = managedsystem_manager.ManagedSystemManager(self)
self.managementconsole = \
managementconsole_manager.ManagementConsoleManager(self)
self.sharedstoragepool = \
sharedstoragepool_manager.SharedStoragePoolManager(self)
self.virtualioserver = \
virtualioserver_manager.VirtualIOServerManager(self)
self.clientnetworkadapter = \
clientnetworkadapter_manager.ClientNetworkAdapterManager(self)
self.virtualnetwork = \
virtualnetwork_manager.VirtualNetworkManager(self)
self.virtualswitch = \
virtualswitch_manager.VirtualSwitchManager(self)
# WEB
self.web_file = web_file_manager.FileManager(self)
self.web_job = web_job_manager.JobManager(self)
self.retries = retries
if excdir is None:
msg = ("excdir may not be assigned to None")
raise ValueError(msg)
self.exclogger = K2ResponseLogger(excdir)
self.client = client.HTTPClient(
k2_url,
k2_username,
k2_password,
k2_auditmemento,
k2_certpath,
timeout=timeout,
exclogger=self.exclogger,
k2o_use_cache=k2o_use_cache)
def authenticate(self):
"""Authenticate against the server.
Normally this is called automatically when you first access the API,
but you can call this method to force authentication right now.
Returns on success; raises :exc:`exceptions.Unauthorized` if the
credentials are wrong.
"""
self.client.authenticate()
|
import threading
import requests
import traceback
import time
import os
import json
import atexit
import subprocess
import logging
import sys
import configparser
from pathlib import Path
# BEGIN Parsing config
config = configparser.ConfigParser()
config.read("supervisor-config.ini")
default_config = config["DEFAULT"]
SUBMITTED_WORK_FILENAME = default_config["SubmittedWorkFilename"]
WORKER_EXECUTABLE_PATH = default_config["WorkerExecutablePath"]
VALID_NONCES_DIRECTORY = default_config["ValidNoncesDirectory"]
CONTROLLER_URI = default_config["ControllerUri"]
DEVICE_LIST_STRING = default_config.get("DeviceList") or "0"
# END
def create_valid_nonces_directory():
if not os.path.exists(VALID_NONCES_DIRECTORY):
os.mkdir(VALID_NONCES_DIRECTORY)
def get_or_init_submitted_nonces() -> set[str]:
fle = Path(SUBMITTED_WORK_FILENAME)
fle.touch(exist_ok=True)
with open(SUBMITTED_WORK_FILENAME, 'r') as f:
return set(map(lambda n: str.lower(str.strip(n)), f.readlines()))
def append_submitted_nonce(nonce):
with open(SUBMITTED_WORK_FILENAME, 'a+') as f:
f.write(nonce)
f.write('\n')
def spawn_worker(sender_bits, last_mined_assets, difficulty_target, nonces_directory, device_id):
if not os.path.exists(WORKER_EXECUTABLE_PATH):
raise Exception(f"Worker Executable Path '{WORKER_EXECUTABLE_PATH}' not found!")
# The worker will start at a random nonce between 0 and 2^64
return subprocess.Popen([
WORKER_EXECUTABLE_PATH,
'-a', sender_bits,
'-l', last_mined_assets,
'-d', difficulty_target,
'-n', nonces_directory,
'-x', device_id
])
class NonceStatus:
FAILS_DIFFICULTY_TEST = "FAILS_DIFFICULTY_TEST"
PRODUCES_EXISTING_MPUNK = "PRODUCES_EXISTING_MPUNK"
PRODUCES_EXISTING_OG_PUNK = "PRODUCES_EXISTING_OG_PUNK"
VALID = "VALID"
def main():
logger = logging.getLogger("mpunks-supervisor")
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler(sys.stdout))
thread_state_lock = threading.Lock()
thread_state = {
'recentlyFetchedInputs': None,
'workerManagerInputs': None,
'processes': [],
'exit': False
}
def kill_workers():
for p in thread_state['processes']:
p.kill()
thread_state['processes'] = []
atexit.register(kill_workers)
def inputs_fetcher(controller_uri, state):
while True:
with thread_state_lock:
try:
resp = requests.get(f'{controller_uri}/mining-inputs').json()
status = resp['status']
if status != 'success':
raise Exception(f'Received invalid status in response: {resp}')
state['recentlyFetchedInputs'] = resp['payload']
logger.info('Successfully fetched and updated mining inputs')
logger.info(state)
except Exception as e:
logger.error(f'Error fetching inputs: {e}')
traceback.print_stack()
time.sleep(5)
def worker_manager(state):
while True:
with thread_state_lock:
if json.dumps(state['workerManagerInputs']) != json.dumps(state['recentlyFetchedInputs']):
logger.info("Inputs diff detected. Re-spawning workers...")
kill_workers()
recently_fetched_inputs = state['recentlyFetchedInputs']
last_mined_assets = recently_fetched_inputs['lastMinedAssets']
sender_address = recently_fetched_inputs['senderAddress']
difficulty_target = recently_fetched_inputs['difficultyTarget']
device_ids = DEVICE_LIST_STRING.split(',')
for device_id in device_ids:
p = spawn_worker(
sender_address,
last_mined_assets,
difficulty_target,
VALID_NONCES_DIRECTORY,
device_id
)
time.sleep(3)
process_alive = (p.poll() is None)
if not process_alive:
logger.fatal(f'Failed to launch worker with device_id {device_id}. Exiting...')
state['exit'] = True
break
state['processes'].append(p)
state['workerManagerInputs'] = recently_fetched_inputs
time.sleep(1)
def work_submitter(controller_uri):
while True:
try:
nonces = set(map(str.lower, os.listdir(VALID_NONCES_DIRECTORY)))
submitted_nonces = get_or_init_submitted_nonces()
unsubmitted_nonces = nonces - submitted_nonces
for nonce_file_name in unsubmitted_nonces:
hex_nonce = f'0x{nonce_file_name}'
resp = requests.post(f'{controller_uri}/submit-work?nonce={hex_nonce}')
json_data = resp.json()
req_status = json_data['status']
if req_status == "success":
append_submitted_nonce(nonce_file_name)
else:
payload = json_data['payload']
if 'nonceStatus' in payload:
nonce_status = payload['nonceStatus']
if nonce_status != NonceStatus.VALID:
# Only add to submitted nonces if the nonce wasn't valid
append_submitted_nonce(nonce_file_name)
log_payload = {
'action': 'submitted_work',
'nonce': hex_nonce,
'resp': resp.json()
}
logger.info(json.dumps(log_payload))
except Exception as e:
logger.error(f'Error while watching for work to submit, or while submitting work: {e}')
traceback.print_stack()
time.sleep(5)
inputs_fetcher_thread = threading.Thread(target=inputs_fetcher, args=(CONTROLLER_URI, thread_state), daemon=True)
worker_manager_thread = threading.Thread(target=worker_manager, args=(thread_state,), daemon=True)
work_submitter_thread = threading.Thread(target=work_submitter, args=(CONTROLLER_URI,), daemon=True)
create_valid_nonces_directory()
inputs_fetcher_thread.start()
worker_manager_thread.start()
work_submitter_thread.start()
while True:
if thread_state['exit']:
sys.exit(1)
time.sleep(0.3)
if __name__ == '__main__':
main()
|
<reponame>noragami/scraptimus
#!/usr/bin/env python
# -*- coding: utf8 -*-
from bs4 import BeautifulSoup
from argparse import ArgumentParser
from platform import python_version_tuple
import json
import pandas as pd
import re
import requests
import time
if python_version_tuple()[0] == u'2':
def input(prompt): return raw_input(prompt.encode('utf8')).decode('utf8')
__author__ = u'"noragami", "noragami" <<EMAIL>>'
__version__ = '1.0'
class Scraptimus():
def __init__(self):
print(u"""
______ ______ ______ ______ ______ ______ __ __ __ __ __ ______
/\ ___\/\ ___\/\ == \/\ __ \/\ == \/\__ _\/\ \/\ "-./ \/\ \/\ \/\ ___\
\ \___ \ \ \___\ \ __<\ \ __ \ \ _-/\/_/\ \/\ \ \ \ \-./\ \ \ \_\ \ \___ \
\/\_____\ \_____\ \_\ \_\ \_\ \_\ \_\ \ \_\ \ \_\ \_\ \ \_\ \_____\/\_____\
\/_____/\/_____/\/_/ /_/\/_/\/_/\/_/ \/_/ \/_/\/_/ \/_/\/_____/\/_____/
created by {__author__}
Version: {__version__}
""".format(__author__=__author__, __version__=__version__))
URL = 'http://www.co-optimus.com/ajax/ajax_games.php?game-title-filter=&system=&countDirection=at+least&playerCount=2&page=%d&sort=&sortDirection='
def set_args(self):
""" Create parser for command line arguments """
parser = ArgumentParser(
prog=u'python -m scraptimus',
description='Scrape and export to a file the list of games found at Co-optimus website.\n\t\tDefault format is json.')
parser.add_argument('-f', '--filename',
help=u'Override the default filename')
group = parser.add_mutually_exclusive_group()
group.add_argument(
'-j', '--json', help=u'Export to a json file', action='store_true')
group.add_argument(
'-c', '--csv', help=u'Export to a csv file', action='store_true')
parser.add_argument(
'-s', '--startpage', help=u'Define where to start. Default is 1')
parser.add_argument(
'-e', '--endpage', help=u'Define where to end. Default is all the pages')
return parser
def scraper(self, start_page=None, end_page=None, records=[]):
print('Started... please wait.')
r = requests.get(self.URL % start_page)
soup = BeautifulSoup(r.text, 'lxml') # html.parser is slower
rows = iter(soup.find('table').find_all('tr'))
# skip first row
next(rows)
for row in rows:
idx = row['id']
cells = row.find_all('td')
title = cells[0].strong.string
genre = cells[0].label.string
system = cells[1].a.string
online = int(cells[2].string)
couch = int(cells[3].string)
combo = int(cells[4].string)
features = []
for link in cells[5].find_all('a'):
features.extend(link['class'])
features = [x for x in features if features.count(
x) == 1] # remove duplicated
features.remove('features-icon') # remove unwanted
review_score = float(
cells[6].div.div.string) if cells[6].div else None
user_rating = float(
cells[7].i.string) if cells[7].i else None
release_date = cells[8].span.string if cells[8].span else None
records.append({'id': idx, 'title': title, 'genre': genre, 'system': system,
'online': online, 'couch': couch, 'combo': combo,
'features': ','.join(features), 'review_score': review_score,
'user_rating': user_rating, 'release_date': release_date})
for tag in soup.find_all(string=re.compile("^Next$")):
next_page = int(re.search(r'\d+', tag.parent['onclick']).group())
if end_page is None or end_page > next_page:
self.scraper(start_page=next_page,
end_page=end_page, records=records)
else:
break
return records
def export_to_csv(self, filename=None, records=None, separator='|'):
filename = '%s.csv' % filename
df = pd.DataFrame(records, columns=['id', 'title', 'genre', 'system', 'online',
'couch', 'combo', 'features', 'review_score',
'user_rating', 'release_date'])
df.to_csv(filename, index=False, encoding='utf-8', sep=separator)
def export_to_json(self, filename=None, records=None):
filename = '%s.json' % filename
with open(filename, 'w') as outfile:
json.dump(records, outfile, indent=4)
def scrap(self):
parser = self.set_args()
args = parser.parse_args()
start_page = int(args.startpage) if args.startpage else 1
end_page = int(args.endpage) if args.endpage else None
records = self.scraper(
start_page=start_page, end_page=end_page, records=[])
filename = '%s-%d-%d' % (
args.filename if args.filename else '%s' % time.strftime("%Y%m%d"),
start_page,
end_page if end_page else 0)
if args.csv:
self.export_to_csv(filename=filename, records=records)
else:
self.export_to_json(filename=filename, records=records)
print('Finished!')
if __name__ == '__main__':
scraptimus = Scraptimus()
scraptimus.scrap()
|
import os
import unittest
import pickle
from pandas import read_csv
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from fp.traindata_samplers import CompleteData
from fp.missingvalue_handlers import CompleteCaseAnalysis
from fp.scalers import NoScaler
from fp.learners import NonTunedLogisticRegression, NonTunedDecisionTree
from fp.pre_processors import NoPreProcessing
from fp.post_processors import NoPostProcessing
from fp.experiments import BinaryClassificationExperiment
class TestSuiteExperiments(unittest.TestCase):
@unittest.mock.patch.object(BinaryClassificationExperiment, 'generate_timestamp', unittest.mock.MagicMock(return_value='2020-01-01_00-00-00-000'))
def setUp(self):
self.data = read_csv('fp/tests/resource/input/data.csv')
self.annotated_train_data = pickle.load(open('fp/tests/resource/input/data_annotated.obj', 'rb'))
self.validation_dataset = pickle.load(open('fp/tests/resource/input/data_validation.obj', 'rb'))
self.validation_dataset_with_predictions = pickle.load(open('fp/tests/resource/input/data_validation_with_predictions.obj', 'rb'))
self.testset_with_predictions = pickle.load(open('fp/tests/resource/input/data_test_with_predictions.obj', 'rb'))
class TestBinaryClassificationExperiment(BinaryClassificationExperiment):
def __init__(self, data):
# User defined arguments
fixed_random_seed = 0xbeef
train_data_sampler = CompleteData()
missing_value_handler = CompleteCaseAnalysis()
numeric_attribute_scaler = NoScaler()
learners = [NonTunedLogisticRegression(), NonTunedDecisionTree()]
pre_processors = [NoPreProcessing()]
post_processors = [NoPostProcessing()]
# Fixed arguments for dataset
test_set_ratio = 0.2
validation_set_ratio = 0.1
label_name = 'credit'
positive_label = 1
numeric_attribute_names = ['month', 'credit_amount', 'residence_since', 'age', 'number_of_credits',
'people_liable_for']
categorical_attribute_names = ['credit_history', 'savings', 'employment']
attributes_to_drop_names = ['personal_status', 'status', 'purpose', 'investment_as_income_percentage',
'other_debtors', 'property', 'installment_plans', 'housing', 'skill_level',
'telephone', 'foreign_worker']
protected_attribute_names = ['sex']
privileged_classes = [[1.0]]
privileged_groups = [{'sex': 1.0}]
unprivileged_groups = [{'sex': 0.0}]
dataset_metadata = {'label_maps': [{1.0: 1, 0.0: 0}],
'protected_attribute_maps': [{1.0: 'male', 0.0: 'female'}]
}
dataset_name = 'test_dataset'
# Constructor call
super().__init__(fixed_random_seed, test_set_ratio, validation_set_ratio,
label_name, positive_label, numeric_attribute_names,
categorical_attribute_names, attributes_to_drop_names,
train_data_sampler, missing_value_handler, numeric_attribute_scaler,
learners, pre_processors, post_processors,
protected_attribute_names, privileged_classes, privileged_groups,
unprivileged_groups, dataset_metadata, dataset_name)
def load_raw_data(self):
return read_csv('fp/tests/resource/input/data.csv')
self.experiment = TestBinaryClassificationExperiment(self.data)
def test_constructor(self):
self.assertEqual(self.experiment.fixed_random_seed, 0xbeef)
self.assertEqual(self.experiment.test_set_ratio, 0.2)
self.assertEqual(self.experiment.validation_set_ratio, 0.1)
self.assertEqual(self.experiment.label_name, 'credit')
self.assertEqual(self.experiment.positive_label, 1)
self.assertEqual(self.experiment.numeric_attribute_names, ['month', 'credit_amount', 'residence_since',
'age', 'number_of_credits', 'people_liable_for'])
self.assertEqual(self.experiment.categorical_attribute_names, ['credit_history', 'savings', 'employment'])
self.assertEqual(self.experiment.attributes_to_drop_names, ['personal_status', 'status', 'purpose',
'investment_as_income_percentage', 'other_debtors',
'property', 'installment_plans', 'housing',
'skill_level', 'telephone', 'foreign_worker'])
self.assertEqual(type(self.experiment.train_data_sampler), CompleteData)
self.assertEqual(type(self.experiment.missing_value_handler), CompleteCaseAnalysis)
self.assertEqual(type(self.experiment.numeric_attribute_scaler), NoScaler)
self.assertEqual(len(self.experiment.learners), 2)
self.assertEqual(type(self.experiment.learners[0]), NonTunedLogisticRegression)
self.assertEqual(type(self.experiment.learners[1]), NonTunedDecisionTree)
self.assertEqual(len(self.experiment.pre_processors), 1)
self.assertEqual(type(self.experiment.pre_processors[0]), NoPreProcessing)
self.assertEqual(len(self.experiment.post_processors), 1)
self.assertEqual(type(self.experiment.post_processors[0]), NoPostProcessing)
self.assertEqual(self.experiment.protected_attribute_names, ['sex'])
self.assertEqual(self.experiment.privileged_classes, [[1.0]])
self.assertEqual(self.experiment.privileged_groups, [{'sex': 1.0}])
self.assertEqual(self.experiment.unprivileged_groups, [{'sex': 0.0}])
self.assertEqual(self.experiment.dataset_metadata, {'label_maps': [{1.0: 1, 0.0: 0}],
'protected_attribute_maps': [{1.0: 'male', 0.0: 'female'}]
})
self.assertEqual(self.experiment.dataset_name, 'test_dataset')
self.assertEqual(self.experiment.log_path, 'logs/')
self.assertEqual(self.experiment.exec_timestamp, '2020-01-01_00-00-00-000')
def test_unique_file_name(self):
self.assertEqual(self.experiment.unique_file_name(self.experiment.learners[0],
self.experiment.pre_processors[0],
self.experiment.post_processors[0]),
'test_dataset__LogisticRegression-notuning__complete_case__complete_data__no_scaler__no_pre_processing__no_post_processing')
self.assertEqual(self.experiment.unique_file_name(self.experiment.learners[1],
self.experiment.pre_processors[0],
self.experiment.post_processors[0]),
'test_dataset__DecisionTree-notuning__complete_case__complete_data__no_scaler__no_pre_processing__no_post_processing')
def test_generate_file_path(self):
self.assertEqual(self.experiment.generate_file_path(''), 'logs/2020-01-01_00-00-00-000_test_dataset/')
self.assertEqual(self.experiment.generate_file_path('test.csv'), 'logs/2020-01-01_00-00-00-000_test_dataset/test.csv')
@unittest.mock.patch.object(BinaryClassificationExperiment, 'generate_timestamp', unittest.mock.MagicMock(return_value='2020-01-01_00-00-00-000'))
def test_generate_timestamp(self):
self.assertEqual(self.experiment.generate_timestamp(), '2020-01-01_00-00-00-000')
def test_learn_classifier(self):
result_learner_0 = self.experiment.learn_classifier(self.experiment.learners[0],
self.annotated_train_data,
self.experiment.fixed_random_seed)
self.assertEqual(type(result_learner_0), type(SGDClassifier()))
result_learner_1 = self.experiment.learn_classifier(self.experiment.learners[1],
self.annotated_train_data,
self.experiment.fixed_random_seed)
self.assertEqual(type(result_learner_1), type(DecisionTreeClassifier()))
def test_preprocess_data(self):
result = self.experiment.preprocess_data(self.experiment.pre_processors[0],
self.annotated_train_data)
self.assertEqual(type(result), type(self.annotated_train_data))
self.assertEqual(result, self.annotated_train_data)
def test_post_process_predictions(self):
result = self.experiment.post_process_predictions(self.experiment.post_processors[0],
self.validation_dataset,
self.validation_dataset_with_predictions,
self.testset_with_predictions)
self.assertEqual(type(result), type(self.testset_with_predictions))
if __name__ == '__main__':
unittest.main()
|
import numpy as np
import re
class SWEEncoder_ja:
def __init__(self, bpe, emoji):
self.bpe = [[b] if (b==',' or ',' not in b) else b.split(',') for b in bpe]
self.swe = {}
for idx, b in enumerate(self.bpe):
for wd in b:
self.swe[wd] = idx
self.emoji = emoji
self.maxlen = np.max([len(w) for w in self.swe.keys()])
self.content_repatter1 = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
self.content_repatter2 = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
self.content_repatter3 = re.compile(r'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}')
self.content_repatter4 = re.compile(r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
self.content_repatter5 = re.compile(r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
self.content_repatter6 = re.compile(r'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*')
keisen = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
blocks = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
self.content_trans1 = str.maketrans({k:'<BLOCK>' for k in keisen+blocks})
def __len__(self):
return len(self.bpe)
def clean_text(self, content):
content = self.content_repatter1.sub("<URL>" ,content)
content = self.content_repatter2.sub("<EMAIL>" ,content)
content = self.content_repatter3.sub("<TEL>" ,content)
content = self.content_repatter4.sub("<DATE>" ,content)
content = self.content_repatter5.sub("<DATE>" ,content)
content = self.content_repatter6.sub("<PRICE>" ,content)
content = content.translate(self.content_trans1)
while '<BLOCK><BLOCK>' in content:
content = content.replace('<BLOCK><BLOCK>', '<BLOCK>')
return content
def encode(self, text, clean=False):
text = text.replace(' ', '<SP>')
text = text.replace(' ', '<SP>')
text = text.replace('\r\n', '<BR>')
text = text.replace('\n', '<BR>')
text = text.replace('\r', '<BR>')
text = text.replace('\t', '<TAB>')
text = text.replace('—', 'ー')
text = text.replace('−', 'ー')
for k,v in self.emoji['emoji'].items():
if k in text:
text = text.replace(k, v)
if clean:
text = self.clean_text(text)
def checkkigou(x):
e = x.encode()
if len(x) == 1 and len(e)==2:
c = (int(e[0])<<8)+int(e[1])
if (c >= 0xc2a1 and c <= 0xc2bf) or (c >= 0xc780 and c <= 0xc783) or \
(c >= 0xcab9 and c <= 0xcbbf) or (c >= 0xcc80 and c <= 0xcda2):
return True
return False
def checku2e(x):
e = x.encode()
if len(x) == 1 and len(e)==3:
c = (int(e[0])<<16)+(int(e[1])<<8)+int(e[2])
if c >= 0xe28080 and c <= 0xe2b07f:
return True
return False
pos = 0
result = []
while pos < len(text):
end = min(len(text), pos+self.maxlen+1) if text[pos]=='<' else pos+3
kouho = []
for e in range(end, pos, -1):
wd = text[pos:e]
if wd in self.swe:
if wd[0]=='<' and len(wd) > 2:
kouho = [(self.swe[wd], e)]
break
else:
kouho.append((self.swe[wd], e))
if len(kouho) > 0:
wp,e = sorted(kouho, key=lambda x:x[0])[0]
result.append(wp)
pos = e
else:
end = pos+1
wd = text[pos:end]
if checkkigou(wd):
result.append(self.swe['<KIGOU>'])
elif checku2e(wd):
result.append(self.swe['<U2000U2BFF>'])
else:
for i in wd.encode('utf-8'):
result.append(self.swe['<|byte%d|>'%i])
pos = end
return result
def decode(self, tokens, breakline='\n'):
words = []
byte_tokens = []
for i in tokens:
word = self.bpe[i][0]
if word[:6] == '<|byte' and word[-2:] == '|>':
byte_tokens.append(int(word[6:-2]))
else:
if len(byte_tokens) > 0:
words.append(bytearray(byte_tokens).decode('utf-8', errors='replace'))
byte_tokens = []
if word[:7] == '<|emoji' and word[-2:] == '|>':
words.append(self.emoji['emoji_inv'][word])
elif word == '<SP>':
words.append(' ')
elif word == '<BR>':
words.append(breakline)
elif word == '<TAB>':
words.append('\t')
elif word == '<BLOCK>':
words.append('▀')
elif word == '<KIGOU>':
words.append('ǀ')
elif word == '<U2000U2BFF>':
words.append('‖')
else:
words.append(word)
if len(byte_tokens) > 0:
words.append(bytearray(byte_tokens).decode('utf-8', errors='replace'))
text = ''.join(words)
return text
if __name__=='__main__':
import argparse
import shutil
import os
import json
from tqdm import tqdm
import pickle
import uuid
from multiprocessing import Pool
parser = argparse.ArgumentParser()
parser.add_argument("--src_dir", help="source dir", required=True )
parser.add_argument("--dst_file", help="destnation file", required=True )
parser.add_argument("--tmp_dir", help="tempolary file", default="tmpfiles" )
parser.add_argument("--vocabulary", help="vocabulary file", default="ja-swe32k.txt" )
parser.add_argument("--num_process", help="process num", type=int, default=8 )
parser.add_argument("--combine", help="Concatenate files with <|endoftext|> separator into chunks of this minimum size", type=int, default=50000 )
parser.add_argument('--clean_text', action='store_true')
parser.add_argument("--tmpsilze", help="num chunks in tempolary file", type=int, default=5000 )
args = parser.parse_args()
if os.path.isdir(args.tmp_dir):
shutil.rmtree(args.tmp_dir)
os.mkdir(args.tmp_dir)
with open(args.vocabulary, encoding='utf-8') as f:
bpe = f.read().split('\n')
with open('emoji.json', encoding='utf-8') as f:
emoji = json.loads(f.read())
enc = SWEEncoder_ja(bpe, emoji)
array_file = []
def _proc(i):
token_chunks = []
raw_text = ''
for j, (curDir, dirs, files) in enumerate(array_file):
if not (j % args.num_process == i):
continue
print('append #',curDir)
for file in tqdm(files):
if file.endswith(".txt"):
input = os.path.join(curDir, file)
with open(input, 'r', encoding='utf-8') as fp:
raw_text += fp.read()
raw_text += '<|endoftext|>'
if len(raw_text) >= args.combine:
tokens = np.stack(enc.encode(raw_text, clean=args.clean_text))
token_chunks.append(tokens)
raw_text = ''
if raw_text and len(raw_text) > 0:
tokens = np.stack(enc.encode(raw_text))
token_chunks.append(tokens)
if len(token_chunks) > args.tmpsilze:
with open(os.path.join(args.tmp_dir, '%s.pkl'%str(uuid.uuid4())), 'wb') as f:
pickle.dump(token_chunks, f)
token_chunks = []
with open(os.path.join(args.tmp_dir, '%s.pkl'%str(uuid.uuid4())), 'wb') as f:
pickle.dump(token_chunks, f)
for curDir, dirs, files in os.walk(args.src_dir):
array_file.append((curDir, dirs, files))
with Pool(args.num_process) as p:
p.map(_proc, list(range(args.num_process)))
token_chunks = []
for s in os.listdir(args.tmp_dir):
with open(os.path.join(args.tmp_dir, s), 'rb') as f:
token_chunks.extend(pickle.load(f))
np.savez_compressed(args.dst_file, *token_chunks)
shutil.rmtree(args.tmp_dir)
|
'''Tests configuration.'''
import multiprocessing
import os
import sys
import pytest
TEST_DIR = os.path.abspath(os.path.dirname(__file__))
if TEST_DIR not in sys.path:
sys.path.append(TEST_DIR)
from server import test_server_process # noqa: E402
def values_list():
return ['foo', 'bar', 'baz', 1, -1.5, True, False, None]
def value():
return 'foo'
@pytest.fixture
def create_request_args_files():
def _create_request_args_files(args_group):
files = []
if 'arguments' in args_group:
if 'files' in args_group['arguments']:
fileargs = args_group['arguments']['files']
for filearg, value in fileargs.items():
if isinstance(value, str):
files.append(open(value, 'wb'))
else:
files.append(open(value[0], 'wb'))
return files
return _create_request_args_files
@pytest.fixture
def assert_request_args():
def _assert_request_args(request_args, response_args):
# print('\nREQUEST ARGS: ', request_args)
# print('RESPONSE ARGS:', response_args)
content_type = None
if 'headers' in request_args:
for hname, hvalue in request_args['headers'].items():
assert hname
assert isinstance(hname, str)
assert hvalue
assert isinstance(hvalue, str)
assert hname in response_args['headers']
assert hvalue == request_args['headers'][hname]
if hname.lower() == 'content-type':
content_type = hvalue
if 'parameters' in request_args:
for param in request_args['parameters']:
if content_type == 'text/plain':
assert not param['name']
else:
assert param['name']
assert isinstance(param['name'], str)
_param_found = False
for _param in response_args['parameters']:
if str(param['name']) == _param['name']:
_param_found = True
assert str(param['value']) == _param['value']
if content_type == 'text/plain':
assert not _param['name']
else:
assert _param['name']
assert isinstance(_param['name'], str)
break
assert _param_found
if 'files' in request_args:
for fp_name, fp_value in request_args['files'].items():
assert fp_name
assert isinstance(fp_name, str)
_file_param_found = False
for _fp_name, _fp_value in response_args['files'].items():
assert _fp_name
assert isinstance(_fp_name, str)
if str(fp_name) == _fp_name:
_file_param_found = True
if isinstance(fp_value, str):
request_value = fp_value
else:
request_value = [
el if i != 1 else el.strip()
for i, el in enumerate(fp_value)
]
assert request_value == _fp_value
assert _file_param_found
if 'kwargs' in request_args:
if 'cookies' in request_args['kwargs']:
for cname, cvalue in request_args['kwargs']['cookies'].items():
assert cname
assert isinstance(cname, str)
assert cvalue
assert isinstance(cvalue, str)
assert cname in response_args['cookies']
assert cvalue == request_args['kwargs']['cookies'][cname]
return _assert_request_args
def on_start():
proc = multiprocessing.Process(target=test_server_process, args=())
proc.start()
return proc
@pytest.fixture(autouse=True, scope='session')
def _session_fixture():
proc = on_start()
yield
proc.terminate()
|
import sys
import time
from pathlib import Path
import torchvision.transforms
from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox, QFileDialog
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtCore import QThread, QDir
import numpy as np
import cv2
from form import Ui_OakDDetector
from oakd_camera import OakDCamera
from fusenet import load_fusenet_model, transforms, predict, contour_filter
from scipy.ndimage.morphology import distance_transform_edt as bwdist
import matplotlib.pyplot as plt
class OakDDetector(QMainWindow, Ui_OakDDetector):
def __init__(self):
super(OakDDetector, self).__init__()
self._fps = 5
self._conf_thresh = 245
self._max_depth = 10000
self._is_streaming = False
self._is_connected = False
self._is_recorded = False
self.rgb = None
self.depth = None
self.pred = None
self.writer = None
self._save_root = None
# used to record the time when we processed last frame
self.prev_frame_time = time.time()
# used to record the time at which we processed current frame
self.new_frame_time = time.time()
# Defines color for labels such as 0, 1, 2
self.label_color = np.array([[255, 0, 0], [0, 255, 0], [0, 0, 255]])
self._camera = OakDCamera(self._fps, self._conf_thresh, self._max_depth)
self.model = load_fusenet_model()
self._load_ui()
def _load_ui(self):
# Load *.ui file
self._ui = Ui_OakDDetector()
self._ui.setupUi(self)
# Setup default for ui
self._ui.streamButton.setEnabled(True)
self._ui.stopButton.setEnabled(False)
self._ui.predButton.setEnabled(True)
self._ui.recordButton.setEnabled(True)
# Connect Qt objects to methods
self._ui.streamButton.clicked.connect(self._stream_btn_clicked)
self._ui.stopButton.clicked.connect(self._stop_btn_clicked)
self._ui.predButton.clicked.connect(self._pred_btn_clicked)
self._ui.recordButton.clicked.connect(self._record_btn_clicked)
self._ui.browseButton.clicked.connect(self._browse_btn_clicked)
def _stream_btn_clicked(self):
if self._camera.is_connected():
if self._camera.is_paused():
self._camera.resume()
self._camera.signals.connect(self._view_data)
self._camera.start(QThread.LowPriority)
# Lock stream button and activate stop button
self._ui.streamButton.setEnabled(not self._ui.streamButton.isEnabled())
self._ui.stopButton.setEnabled(not self._ui.stopButton.isEnabled())
self._is_streaming = True
def _stop_btn_clicked(self):
if self._is_recorded:
msg = QMessageBox(text='Please stop capture before stopping stream!')
msg.exec()
return
self._camera.signals.disconnect(self._view_data)
self._camera.signals.disconnect(self._predict_data)
self._camera.pause()
self._ui.rgbLabel.clear()
self._ui.depthLabel.clear()
self._ui.predLabel.clear()
self._ui.rgbLabel.setText('RGB')
self._ui.depthLabel.setText('DEPTH')
self._ui.predLabel.setText('PREDICT')
self._ui.streamButton.setEnabled(not self._ui.streamButton.isEnabled())
self._ui.stopButton.setEnabled(not self._ui.stopButton.isEnabled())
self._ui.predButton.setEnabled(not self._ui.predButton.isEnabled())
self._is_streaming = False
def _pred_btn_clicked(self):
if not self._is_streaming:
msg = QMessageBox(text='Please press \'Stream\' before predicting!')
msg.exec()
return
self._camera.signals.connect(self._predict_data)
self._ui.predButton.setEnabled(not self._ui.predButton.isEnabled())
def _record_btn_clicked(self):
if not self._is_recorded:
# if self._camera.is_paused() or not self._is_streaming:
# msg = QMessageBox(text='Please stream camera!')
# msg.exec()
# return
if self._save_root is None or self._save_root == '':
msg = QMessageBox(text='The saving directory is empty!')
msg.exec()
return
self._camera.start(QThread.LowPriority)
self._camera.signals.connect(self._predict_data)
self._camera.signals.connect(self._record_data)
self._is_recorded = True
self._ui.recordButton.setText('Stop')
else:
self._camera.signals.disconnect(self._record_data)
self._camera.signals.disconnect(self._predict_data)
self._is_recorded = False
self._ui.recordButton.setText('Record')
self.writer.release()
self._ui.browseButton.setEnabled(not self._ui.browseButton.isEnabled())
self._ui.browseLineEdit.setEnabled(not self._ui.browseLineEdit.isEnabled())
def _browse_btn_clicked(self):
self._save_root = QFileDialog.getExistingDirectory(self, 'Select a directory', self._save_root)
if self._save_root:
self._save_root = QDir.toNativeSeparators(self._save_root)
self._ui.browseLineEdit.setText(self._save_root)
self.writer = cv2.VideoWriter(self._save_root + '/record.avi',
cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'),
15,
(1280 * 3, 720))
def _view_data(self, data):
self._update_view_label(data[0], self._ui.rgbLabel, 'rgb')
self._update_view_label(data[1], self._ui.depthLabel, 'depth')
pass
def _predict_data(self, data):
# font which we will be using to display FPS
rgb = data[0].copy()
depth = cv2.normalize(data[1], None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1)
rgb = transforms(rgb)
depth = transforms(depth[:, :, np.newaxis])
pred = predict(self.model, rgb, depth)
pred = pred.astype(np.uint8)
# Filters out small objects
mask = np.zeros(pred.shape, np.uint8)
mask[pred == 2] = 1
_, label_filter_binary = cv2.threshold(mask, 0, 255, cv2.THRESH_BINARY_INV)
contours, _ = cv2.findContours(label_filter_binary, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
contours = [contours[i] for i in range(len(contours)) if
(cv2.contourArea(contours[i]) > 200) and (cv2.contourArea(contours[i]) <= 50000)]
filtered_contours = contour_filter(pred, contours, 30)
mask = np.zeros(pred.shape, np.uint8)
cv2.drawContours(mask, filtered_contours, -1, 1, cv2.FILLED)
# mask = cv2.normalize(mask, None, 0, 1, cv2.NORM_MINMAX, cv2.CV_8UC1)
pred[(pred == 2)] = 0
pred[mask == 1] = 2
# Defines alpha value for blended image
# Label map
rgb = np.array(torchvision.transforms.ToPILImage()(rgb).convert('RGB'))
rgb = cv2.cvtColor(rgb, cv2.COLOR_RGB2BGR)
blend_scale = 0.5
for i in range(3):
rgb[:, :, i][pred == 0] = \
blend_scale * self.label_color[2, i] + (1 - blend_scale) * rgb[:, :, i][pred == 0]
rgb[:, :, i][pred == 1] = \
blend_scale * self.label_color[1, i] + (1 - blend_scale) * rgb[:, :, i][pred == 1]
rgb[:, :, i][pred == 2] = \
blend_scale * self.label_color[0, i] + (1 - blend_scale) * rgb[:, :, i][pred == 2]
rgb = self._draw_result(rgb, data[1], filtered_contours)
# key, route = self._draw_path(pred)
# if key != None:
# rgb = cv2.resize(rgb, (640, 360), interpolation=cv2.INTER_LINEAR)
# for i in range(route.shape[0]):
# cv2.circle(rgb, (int(route[i, 0]), int(route[i, 1])), 2, (255, 0, 0), 2)
# tic = time.time()
# path = cv2.cvtColor(rgb, cv2.COLOR_RGB2BGR)
# cv2.imwrite(f"path_{tic}.png", path)
# time when we finish processing for this frame
self.new_frame_time = time.time()
self._fps = int(1. / (self.new_frame_time - self.prev_frame_time))
self.prev_frame_time = self.new_frame_time
cv2.putText(rgb,
'FPS:' + str(self._fps),
(10, 50),
cv2.FONT_HERSHEY_SIMPLEX,
1, (255, 255, 255), 1, cv2.LINE_AA)
color_anomaly_map = QImage(rgb.data, rgb.shape[1], rgb.shape[0], rgb.shape[1] * 3, QImage.Format_RGB888)
self._ui.predLabel.setPixmap(QPixmap(color_anomaly_map))
self.map = cv2.resize(rgb, (1280, 720), interpolation=cv2.INTER_LINEAR)
self.map = cv2.cvtColor(self.map, cv2.COLOR_RGB2BGR)
def _record_data(self, data):
self.rgb = data[0]
self.depth = cv2.normalize(data[1], None, 255, 0, cv2.NORM_INF, cv2.CV_8U)
self.depth = cv2.applyColorMap(self.depth, cv2.COLORMAP_JET)
if (self.map is not None) and (self.rgb is not None) and (self.depth is not None):
data = np.concatenate([self.rgb, self.depth, self.map], axis=1)
self.writer.write(data)
@staticmethod
def _update_view_label(img, label, mode='rgb'):
if mode == 'disp':
img = (img * (255 / 96)).astype(np.uint8)
img = cv2.applyColorMap(img, cv2.COLORMAP_MAGMA)
elif mode == 'depth':
img = cv2.normalize(img, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1)
img = cv2.applyColorMap(img, cv2.COLORMAP_MAGMA)
img = cv2.resize(img, (label.width(), label.height()))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = QImage(img.data, img.shape[1], img.shape[0], img.shape[1] * 3, QImage.Format_RGB888)
img = QPixmap(img)
label.setPixmap(img)
def _draw_result(self, rgb, depth, contours):
rgb = cv2.resize(rgb, (self._ui.predLabel.width(), self._ui.predLabel.height()), interpolation=cv2.INTER_LINEAR)
for i in range(len(contours)):
x, y, w, h = cv2.boundingRect(contours[i])
x_center, y_center = int(x + w / 2), int(y + h / 2)
delta = 3
region = depth[3 * y_center - delta: 3 * y_center + delta, 4 * x_center - delta: 4 * x_center + delta]
region[region == 0] == np.nan
distance = np.round(np.max(region))
# Draws to output image the result
cv2.circle(rgb, (2 * x_center, int(1.5 * y_center)), 2, (255, 255, 255), 2)
cv2.rectangle(rgb, (2 * x, int(1.5 * y)), (2 * (x + w), int(1.5 * (y + h))), (255, 0, 0), 2)
cv2.putText(rgb,
'X: ' + str(round(x - 640, 2)) + 'mm',
(2 * x + 50, int(1.5 * y) + 30),
cv2.FONT_HERSHEY_SIMPLEX,
0.5, (255, 255, 255), 2)
cv2.putText(rgb,
'Y: ' + str(round(360 - y, 2)) + 'mm',
(2 * x + 50, int(1.5 * y) + 45),
cv2.FONT_HERSHEY_SIMPLEX,
0.5, (255, 255, 255), 2)
cv2.putText(rgb,
'Z: ' + str(distance) + 'mm',
(2 * x + 50, int(1.5 * y) + 60),
cv2.FONT_HERSHEY_SIMPLEX,
0.5, (255, 255, 255), 2)
return rgb
def _draw_path(self, pred):
# Binary map
binary_map = np.ones(pred.shape)
binary_map[pred == 2] = 0
binary_map[pred == 0] = 0
binary_map = cv2.resize(binary_map, (640, 360))
h, w= binary_map.shape[:2]
alpha = 1/24
T = w * alpha
dct = {}
key = None
i = 0
for row in range(h):
dct[row] = list()
value = 0
temp = []
for col in range(w):
if binary_map[row, col] == 1 and value == 0:
temp.append([row, col])
value = 1
elif binary_map[row, col] == 0 and len(temp) == 1 and value == 1:
temp.append([row, col-1])
if len(temp) == 2 and value == 1:
value = 0
if abs(temp[0][1] - temp[1][1]) >= T:
dct[row].append(temp)
temp = []
if i == 0:
key = row
i += 1
if len(dct[row]) == 0:
del dct[row]
if key != None:
des_joint = None
farest_points = dct[key]
if len(farest_points) > 1:
longest = abs(farest_points[0][1][1] - farest_points[0][0][1])
for i in range(len(farest_points)):
distance = abs(farest_points[i][1][1] - farest_points[i][0][1])
if distance >= longest:
des_joint = farest_points[i]
else:
des_joint = farest_points[0]
elif len(farest_points) == 1:
des_joint = farest_points[0]
destination = [int((des_joint[0][1] + des_joint[1][1]) / 2), int(des_joint[0][0])]
d = bwdist(binary_map == 1)
# Rescale and transform distance
d2 = (d/100.) + 1
d0 = 2
nu = 800
repulsive = nu*((1/d2 - 1/d0)**2)
repulsive[d2 > d0] = 0
[x, y] = np.meshgrid(np.arange(w), np.arange(h))
goal = destination
start = [w//2, h-20]
xi = 1/700
attractive = xi * ((x - goal[0])**2 + (y - goal[1])**2)
f = attractive + repulsive
route = self._gradientBasedPlanner(f, start, goal, 700)
else:
route = [0, 0]
return key, route
def _gradientBasedPlanner(self, f, start_coords, end_coords, max_its):
[gy, gx] = np.gradient(-f)
route = np.vstack([np.array(start_coords), np.array(start_coords)])
for i in range(max_its):
current_point = route[-1, :]
if sum(abs(current_point - end_coords)) < 5.0:
break
ix = int(round(current_point[1]))
iy = int(round(current_point[0]))
# print(ix, iy)
if ix >= 360:
ix = 359
vx = gx[ix, iy]
vy = gy[ix, iy]
dt = 1/np.linalg.norm([vx, vy])
next_point = current_point + dt * np.array([vx, vy])
route = np.vstack([route, next_point])
route = route[1:, :]
return route
def closeEvent(self, event):
if self._is_recorded:
msg = QMessageBox(text='Please stop capturing!')
msg.exec()
event.ignore()
else:
self._camera.close()
event.accept()
if __name__ == '__main__':
app = QApplication(sys.argv)
# Force the style to be the same on all OS
app.setStyle('Fusion')
#
widget = OakDDetector()
widget.show()
sys.exit(app.exec())
|
'''
Created on Feb 15, 2014
@author: sethjn
This sample shows how to do some basic things with playground.
It does not use the PlaygroundNode interface. To see an example
of that, check out computePi.py.
'''
# We will use "BOOL1" and "STRING" in our message definition
from playground.network.packet.fieldtypes import BOOL, STRING
from playground.network.common import PlaygroundAddress
# MessageDefinition is the base class of all automatically serializable messages
from playground.network.packet import PacketType
import playground
import sys, time, os, logging, asyncio
#logger = logging.getLogger(__name__)
class EchoPacket(PacketType):
"""
EchoProtocolPacket is a simple message for sending a bit of
data and getting the same data back as a response (echo). The
"header" is simply a 1-byte boolean that indicates whether or
not it is the original message or the echo.
"""
# We can use **ANY** string for the identifier. A common convention is to
# Do a fully qualified name of some set of messages.
DEFINITION_IDENTIFIER = "test.EchoPacket"
# Message version needs to be x.y where x is the "major" version
# and y is the "minor" version. All Major versions should be
# backwards compatible. Look at "ClientToClientMessage" for
# an example of multiple versions
DEFINITION_VERSION = "1.0"
FIELDS = [
("original", BOOL),
("message", STRING)
]
class EchoServerProtocol(asyncio.Protocol):
"""
This is our class for the Server's protocol. It simply receives
an EchoProtocolMessage and sends back a response
"""
def __init__(self):
self.deserializer = EchoPacket.Deserializer()
self.transport = None
def connection_made(self, transport):
print("Received a connection from {}".format(transport.get_extra_info("peername")))
self.transport = transport
def connection_lost(self, reason=None):
print("Lost connection to client. Cleaning up.")
def data_received(self, data):
self.deserializer.update(data)
for echoPacket in self.deserializer.nextPackets():
if echoPacket.original:
print("Got {} from client.".format(echoPacket.message))
if echoPacket.message == "__QUIT__":
print("Client instructed server to quit. Terminating")
self.transport.close()
return
responsePacket = EchoPacket()
responsePacket.original = False # To prevent potentially infinte loops?
responsePacket.message = echoPacket.message
self.transport.write(responsePacket.__serialize__())
else:
print("Got a packet from client not marked as 'original'. Dropping")
class EchoClientProtocol(asyncio.Protocol):
"""
This is our class for the Client's protocol. It provides an interface
for sending a message. When it receives a response, it prints it out.
"""
def __init__(self, callback=None):
self.buffer = ""
if callback:
self.callback = callback
else:
self.callback = print
self.transport = None
self.deserializer = EchoPacket.Deserializer()
def close(self):
self.__sendMessageActual("__QUIT__")
def connection_made(self, transport):
print("Connected to {}".format(transport.get_extra_info("peername")))
self.transport = transport
def data_received(self, data):
self.deserializer.update(data)
for echoPacket in self.deserializer.nextPackets():
if echoPacket.original == False:
self.callback(echoPacket.message)
else:
print("Got a message from server marked as original. Dropping.")
def send(self, data):
echoPacket = EchoPacket(original=True, message=data)
self.transport.write(echoPacket.__serialize__())
class EchoControl:
def __init__(self):
self.txProtocol = None
def buildProtocol(self):
return EchoClientProtocol(self.callback)
def connect(self, txProtocol):
self.txProtocol = txProtocol
print("Echo Connection to Server Established!")
self.txProtocol = txProtocol
sys.stdout.write("Enter Message: ")
sys.stdout.flush()
asyncio.get_event_loop().add_reader(sys.stdin, self.stdinAlert)
def callback(self, message):
print("Server Response: {}".format(message))
sys.stdout.write("\nEnter Message: ")
sys.stdout.flush()
def stdinAlert(self):
data = sys.stdin.readline()
if data and data[-1] == "\n":
data = data[:-1] # strip off \n
self.txProtocol.send(data)
USAGE = """usage: echotest <mode> [-stack=<stack_name>]
mode is either 'server' or a server's address (client mode)"""
if __name__=="__main__":
echoArgs = {}
stack = "default"
args= sys.argv[1:]
i = 0
for arg in args:
if arg.startswith("-"):
k,v = arg.split("=")
echoArgs[k]=v
else:
echoArgs[i] = arg
i+=1
if "-stack" in echoArgs:
stack = echoArgs["-stack"]
if not 0 in echoArgs:
sys.exit(USAGE)
mode = echoArgs[0]
loop = asyncio.get_event_loop()
loop.set_debug(enabled=True)
from playground.common.logging import EnablePresetLogging, PRESET_DEBUG
EnablePresetLogging(PRESET_DEBUG)
if mode.lower() == "server":
coro = playground.getConnector(stack).create_playground_server(lambda: EchoServerProtocol(), 101)
server = loop.run_until_complete(coro)
print("Echo Server Started at {}".format(server.sockets[0].gethostname()))
loop.run_forever()
loop.close()
else:
remoteAddress = mode
control = EchoControl()
coro = playground.getConnector(stack).create_playground_connection(control.buildProtocol, remoteAddress, 101)
transport, protocol = loop.run_until_complete(coro)
print("Echo Client Connected. Starting UI t:{}. p:{}".format(transport, protocol))
control.connect(protocol)
loop.run_forever()
loop.close()
|
# -*- coding: utf-8 -*-
"""This module defines classes which can be used to build interfaces from
external packages to the data description and assosiated objects.
Each interface type is a subclass of the Interface abstract class and certain
attributes and methods must be defined for these to become "concrete".
The methods "declare_inputs" and "declare_outputs" declare which variables from
the internal data catalog are used as inputs and which are provided as outputs.
The method "declare_optional" describes any optional inputs, i.e. those that
may be fulfilled or not (fulfilled means not None).
For the MapInterface class another method "declare_id_map" is required. This
is the mapping from the internal data catalog to names used locally by the module
for inputs. This means changes to variable names on either side of the
interface only effects this variable.
The method "connect" is used to execute the function. Inputs can be collected
using the method "get_local" using the local variable name as the argument.
The outputs should be stored using the method "set_local", where the local
variable name and the data must be given.
.. moduleauthor:: <NAME> <<EMAIL>>
.. module:: interface
:platform: Windows
:synopsis: Interfaces classes
.. moduleauthor:: <NAME> <<EMAIL>>
"""
# Set up logging
import logging
module_logger = logging.getLogger(__name__)
import os
import abc
import pandas as pd
from attrdict import AttrDict
from sqlalchemy.exc import SQLAlchemyError
from polite.abc import abstractclassmethod
from ..utilities.misc import Injective
class MyAttrDict(AttrDict):
"""Removes type changing behaviour of AttrDict"""
def _build(self, obj):
return obj
class MaskVariable(object):
'''Class for declaring a masked variable. A masked variable is not
registered as an input (not for outputs yet) if a variable does not
exist in a given datastate or that variable does not have a certain
value'''
def __init__(self, var_id, unmask_variable=None, unmask_values=None):
self.variable_id = var_id
self.unmask_variable = unmask_variable
self.unmask_values = unmask_values
return
class Interface(object):
'''The base abstract class for all interface types'''
__metaclass__ = abc.ABCMeta
def __init__(self):
# The data map should have keys which are the union of the inputs and
# outputs.
self.data = None
self.init_maps()
# Check that the optional identifiers have been set correctly
self._check_optional_valid()
return
@abstractclassmethod
def get_name(cls):
'''A class method for the common name of the interface.
Returns:
str: A unique string
'''
return cls()
@abstractclassmethod
def declare_inputs(cls):
'''A class method to declare all the variables required as inputs by
this interface.
Returns:
list: List of inputs identifiers
Example:
The returned value can be None or a list of identifier strings which
appear in the data descriptions. For example::
inputs = ["My:first:variable",
"My:second:variable",
]
'''
return cls()
@abstractclassmethod
def declare_outputs(cls):
'''A class method to declare all the output variables provided by
this interface.
Returns:
list: List of output identifiers
Example:
The returned value can be None or a list of identifier strings which
appear in the data descriptions. For example::
outputs = ["My:first:variable",
"My:third:variable",
]
'''
return cls()
@abstractclassmethod
def declare_optional(cls):
'''A class method to declare all the variables which should be flagged
as optional.
Returns:
list: List of optional variable identifiers
Note:
Currently only inputs marked as optional have any logical effect.
However, this may change in future releases hence the general
approach.
Example:
The returned value can be None or a list of identifier strings which
appear in the declare_inputs output. For example::
optional = ["My:first:variable",
]
'''
return cls()
@abc.abstractmethod
def connect(self):
'''The connect method is used to execute the external program and
populate the interface data store with values.
Note:
Collecting data from the interface for use in the external program
can be accessed using self.data.my_input_variable. To put new values
into the interface once the program has run we set
self.data.my_output_variable = value
'''
return
def init_maps(self):
all_keys = self._get_all_ids()
new_map = {}
for key in all_keys:
new_map[key] = None
self.data = new_map
return
def put_data(self, identifier, data):
'''Put data into the interface, before connecting
Args:
identifier (str): Universal identifier for the data to set
data: Value of the data to set
'''
if identifier not in self.data:
errStr = ("Identifier {} not recognised for "
"interface {}.").format(identifier,
self.get_name())
raise KeyError(errStr)
self.data[identifier] = data
return
def get_data(self, identifier):
'''Get data from the interface after connecting
Args:
identifier (str): Universal identifier for the data to get'''
data = self.data[identifier]
return data
@classmethod
def get_inputs(cls, drop_masks=False):
"""Get all inputs provided by the interface"""
if cls.declare_inputs() is None: return ([], [])
input_declaration = cls.declare_inputs()
if input_declaration is None: input_declaration = []
input_ids = []
for declared_input in input_declaration:
if isinstance(declared_input, str):
input_ids.append(declared_input)
elif isinstance(declared_input, MaskVariable):
input_ids.append(declared_input.variable_id)
all_inputs = set(input_ids)
if cls.declare_optional() is None:
all_optional = set()
else:
all_optional = set(cls.declare_optional())
optional_inputs = list(all_inputs & all_optional)
if drop_masks:
required_inputs = input_ids
else:
required_inputs = input_declaration
return required_inputs, optional_inputs
@classmethod
def get_outputs(cls):
"""Get all ouptuts provided by the interface"""
outputs = cls.declare_outputs()
if outputs is None: outputs = []
return outputs
def _update_data(self, data_dict):
if data_dict is None: return
for key, value in data_dict.iteritems():
self.put_data(key, value)
return
def _check_optional_valid(self):
input_identifiers = self.declare_inputs()
optional_indentifiers = self.declare_optional()
if input_identifiers is None: input_identifiers = []
if optional_indentifiers is None: return
all_identifiers = []
for identifier in input_identifiers:
if isinstance(identifier, MaskVariable):
all_identifiers.append(identifier.variable_id)
else:
all_identifiers.append(identifier)
all_inputs = set(all_identifiers)
all_optional = set(optional_indentifiers)
# Resolve erroneous mappings
err_mapped = all_optional - all_inputs
if err_mapped:
bad_ids_str = ", ".join(err_mapped)
errStr = ("The following identifiers are declared optional "
"without being declared as inputs in interface {}: "
"{}").format(self.get_name(), bad_ids_str)
raise KeyError(errStr)
return
def _check_ids(cls, given_keys, valid_keys):
invalid_keys = set(given_keys) - set(valid_keys)
if len(invalid_keys) > 0:
if len(invalid_keys) == 1:
key_string = "".join(invalid_keys)
else:
key_string = ", ".join(invalid_keys)
errStr = "The keys {} are not valid.".format(key_string)
raise KeyError(errStr)
return
def _get_all_ids(cls):
if cls.declare_inputs():
all_keys = cls.declare_inputs()
else:
all_keys = []
if cls.declare_outputs():
all_keys.extend(cls.declare_outputs())
return all_keys
class WeightedInterface(Interface):
@abstractclassmethod
def declare_weight(cls):
'''A class method to declare interface weighting
'''
return cls()
class MapInterface(Interface):
def __init__(self):
self.valid_id_map = None
super(MapInterface, self).__init__()
return
@abstractclassmethod
def declare_id_map(cls):
'''Declare the mapping for variable identifiers in the data description
to local names for use in the interface. This helps isolate changes in
the data description or interface from effecting the other.
Returns:
dict: Mapping of local to data description variable identifiers
Example:
The returned value must be a dictionary containing all the inputs and
outputs from the data description and a local alias string. For
example::
id_map = {"var1": "My:first:variable",
"var2": "My:second:variable",
"var3": "My:third:variable"
}
'''
return cls()
def put_data(self, identifier, data):
local_key = self.valid_id_map.get(identifier)
if local_key not in self.data:
errStr = ("Identifier {} not recognised for "
"interface {}.").format(local_key,
self.get_name())
raise KeyError(errStr)
setattr(self.data, local_key, data)
return
def get_data(self, identifier):
local_key = self.valid_id_map.get(identifier)
if local_key not in self.data:
errStr = ("Identifier {} not recognised for "
"interface {}.").format(local_key,
self.get_name())
raise KeyError(errStr)
data = getattr(self.data, local_key)
return data
def init_maps(self):
self._check_map_valid()
id_map = self.declare_id_map()
self.valid_id_map = Injective()
self.data = MyAttrDict()
for local, universal in id_map.iteritems():
if "." in local:
errStr = ("The character '.' may not be included in id_map "
"key for variable {}").format(universal)
raise ValueError(errStr)
self.valid_id_map.add(local, universal)
setattr(self.data, local, None)
return
def _update_data(self, data_dict):
if data_dict is None: return
for key, value in data_dict.iteritems():
local_key = self.valid_id_map.get(key)
setattr(self.data, local_key, value)
return
def _check_map_valid(self):
'''Test to see if all the input and output variables are in the map.
'''
input_identifiers = self.declare_inputs()
output_indentifiers = self.declare_outputs()
raw_identifiers = []
all_identifiers = []
if input_identifiers is not None:
raw_identifiers.extend(input_identifiers)
if output_indentifiers is not None:
raw_identifiers.extend(output_indentifiers)
for identifier in raw_identifiers:
if isinstance(identifier, MaskVariable):
all_identifiers.append(identifier.variable_id)
else:
all_identifiers.append(identifier)
all_identifiers = set(all_identifiers)
test_id_map = self.declare_id_map()
all_mapped = set(test_id_map.values())
# Resolve missing mappings
not_mapped = all_identifiers - all_mapped
if not_mapped:
bad_ids_str = ", ".join(not_mapped)
errStr = ("The following identifiers have not been mapped in "
"interface {}: {}").format(self.get_name(), bad_ids_str)
raise KeyError(errStr)
# Resolve duplicate mappings
dupes = test_id_map.values()
for x in all_mapped: dupes.remove(x)
if dupes:
bad_ids_str = ", ".join(dupes)
errStr = ("The following identifiers have multiple mappings in "
"interface {}: {}").format(self.get_name(), bad_ids_str)
raise KeyError(errStr)
# Resolve erroneous mappings
err_mapped = all_mapped - all_identifiers
if err_mapped:
bad_ids_str = ", ".join(err_mapped)
errStr = ("The following identifiers have been erroneously "
"mapped in interface {}: {}").format(self.get_name(),
bad_ids_str)
raise KeyError(errStr)
return
class MetaInterface(MapInterface):
"""Mapped interface which retains metadata"""
def __init__(self):
self.meta = None
super(MetaInterface, self).__init__()
return
def init_maps(self):
self._check_map_valid()
id_map = self.declare_id_map()
self.valid_id_map = Injective()
self.data = MyAttrDict()
self.meta = MyAttrDict()
for local, universal in id_map.iteritems():
if "." in local:
errStr = ("The character '.' may not be included in id_map "
"key for variable {}").format(universal)
raise ValueError(errStr)
self.valid_id_map.add(local, universal)
setattr(self.data, local, None)
setattr(self.meta, local, None)
return
def put_meta(self, identifier, metadata):
'''Put metadata into the interface, before connecting
Args:
identifier (str): Universal identifier for the data to set
metadata: MetaData object for the variable
'''
local_key = self.valid_id_map.get(identifier)
if local_key not in self.data:
errStr = ("Identifier {} not recognised for "
"interface {}.").format(local_key,
self.get_name())
raise KeyError(errStr)
setattr(self.meta, local_key, metadata)
return
class RawInterface(Interface):
'''Interface for collecting any number of declared inputs using python
objects.'''
def __init__(self):
super(RawInterface, self).__init__()
return
@classmethod
def declare_inputs(cls):
return None
@classmethod
def declare_optional(cls):
return None
def get_data(self, identifier):
data = super(RawInterface, self).get_data(identifier)
return data
def set_variables(self, var_dict):
'''Add variables to the interface using a dictionary such that the
items are provided using identifier: value pairs
Args:
var_dict (dict): Dictionary of identifier: data pairs
'''
self._update_data(var_dict)
return
def connect(self):
return None
class FileInterface(MapInterface):
def __init__(self):
super(FileInterface, self).__init__()
self._path = None
return
@abstractclassmethod
def get_valid_extensions(cls):
return cls
@abc.abstractmethod
def connect(self):
self.check_path()
return
def get_data(self, identifier):
self.check_path()
data = super(FileInterface, self).get_data(identifier)
return data
def get_file_path(self):
'''Get the path to the file to be read'''
return self._path
def set_file_path(self, file_path):
'''Set the path to the file to be read
Args:
file_path (str): File path'''
self._path = file_path
return
def check_path(self, check_exists=False):
# Test for file path
if self._path is None:
errStr = ('The file path must be set for FileInterface '
'classes.')
raise ValueError(errStr)
_, file_ext = os.path.splitext(self._path)
if file_ext not in self.get_valid_extensions():
extStr = ", ".join(self.get_valid_extensions())
errStr = ("File extension '{}' is not valid. Available are "
"'{}'").format(file_ext, extStr)
raise IOError(errStr)
if check_exists and not os.path.exists(self._path):
errStr = ("No file or directory exists for path "
"'{}'").format(self._path)
raise IOError(errStr)
return
class QueryInterface(MetaInterface):
"""Interface for making database queries"""
def __init__(self):
super(QueryInterface, self).__init__()
self._db = None
return
def put_database(self, database):
self._db = database
return
def get_dataframe(self, table_name):
df = pd.read_sql(table_name, self._db._engine)
return df
def safe_connect(self, attempts=3):
"""Retry database actions on failure and rollback if necessary"""
while attempts:
attempts -= 1
success = False
try:
self.connect()
self._db.session.commit()
success = True
except SQLAlchemyError as exc:
if (hasattr(exc, "connection_invalidated") and
exc.connection_invalidated):
self._db.session.rollback()
if not attempts:
raise exc
finally:
self._db.session.close()
if success: return
msg = "Remaining database connection attempts: {}".format(attempts)
module_logger.info(msg)
return
class AutoInterface(MetaInterface):
'''AutoInterface subclass for creating automated simple interfaces
'''
unavailable_ids = []
def __init__(self):
super(AutoInterface, self).__init__()
return
@abstractclassmethod
def get_connect_name(cls):
'''A class method for returning the name of the automatic interface
connect method
Returns:
str: The method name
'''
return cls()
@classmethod
def get_method_names(cls):
'''A class method for returning the names of additional automatic
class methods
Returns:
str: The method name
'''
return None
|
from flask import Flask, render_template, request
from datetime import date, datetime
from message import text
import psycopg2
import math
import simplejson
import prediction
class CustomFlask(Flask):
jinja_options = Flask.jinja_options.copy()
jinja_options.update(dict(
block_start_string='<%',
block_end_string='%>',
variable_start_string='%%',
variable_end_string='%%',
comment_start_string='<#',
comment_end_string='#>',
))
app = CustomFlask(__name__)
def get_age(born):
today = date.today()
return today.year - born.year - ((today.month, today.day) < (born.month, born.day))
def date_handler(obj):
return obj.isoformat() if hasattr(obj, 'isoformat') else obj
def create_input_params_obj(glucose, blood_pressure, insulin, BMI, date_of_birth):
params = {
"Inputs": {
"input1":
[{
'glucose': glucose,
'pressure': blood_pressure,
'insulin': insulin,
'BMI': BMI,
'age': get_age(date_of_birth),
}],
},
"GlobalParameters": {}
}
return params
@app.route('/')
def hello_world():
return render_template("index.html")
@app.route('/api/history/<userid>')
def api_history(userid):
conn = psycopg2.connect(database='user_logs', user='root', host='172.16.58.3', port=26257)
conn.set_session(autocommit=True)
cur = conn.cursor()
cur.execute("SELECT email, first_name, last_name, phone_number, mass, height, date_of_birth, blood_pressure, glucose, insulin, timestamp FROM history WHERE email=%s ORDER BY timestamp asc", (userid,))
rows = cur.fetchall()
columns = ('email', 'first_name', 'last_name', 'phone_number', 'mass', 'height', 'date_of_birth', 'blood_pressure', 'glucose', 'insulin', 'timestamp')
output = []
for row in rows:
output.append(dict(zip(columns, row)))
# Close the database connection.
cur.close()
conn.close()
return simplejson.dumps(output, default=date_handler)
@app.route('/api/go', methods = ['POST'])
def api_go():
conn = psycopg2.connect(database='user_logs', user='root', host='172.16.58.3', port=26257)
conn.set_session(autocommit=True)
cur = conn.cursor()
data = request.json
email = data["email"]
first_name = data["first_name"]
last_name = data["last_name"]
phone_number = data["phone_number"]
mass = float(data["mass"])
height = float(data["height"])
date_of_birth = datetime.strptime(data["date_of_birth"], '%Y-%M-%d')
blood_pressure = float(data["blood_pressure"])
glucose = float(data["glucose"])
insulin = float(data["insulin"])
timestamp = datetime.now()
cur.execute("INSERT INTO history (email, first_name, last_name, phone_number, mass, height, date_of_birth, blood_pressure, glucose, insulin, timestamp) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)",
(email, first_name, last_name, phone_number, mass, height, date_of_birth, blood_pressure, glucose, insulin, timestamp,))
# Close the database connection.
cur.close()
conn.close()
# Get response from Azure Machine Learning Model
input_params = create_input_params_obj(glucose, blood_pressure, insulin, mass/(height**2), date_of_birth)
output_response = prediction.get_response(input_params)
# Send text message to user
text(first_name, phone_number, '%.1f' % round((float(output_response["probability"])*100), 1), output_response["patient_is_diabetic"])
return simplejson.dumps(output_response, default=date_handler)
if __name__ == '__main__':
app.run(debug=True) |
<gh_stars>10-100
# -*- coding: utf-8 -*-
from __future__ import (unicode_literals, absolute_import, division)
import datetime
import random
import logging
import collections
import six
from dateutil import relativedelta
from django.core.urlresolvers import reverse
from django.core.exceptions import ObjectDoesNotExist
from django.shortcuts import render
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.views import generic
from django.utils.decorators import method_decorator
from django.utils.translation import ngettext_lazy, pgettext, ugettext_lazy as _
from django.views.decorators.csrf import csrf_exempt
from django import db
from django.utils.encoding import force_text
from django.utils import timezone, timesince
from django.contrib.humanize.templatetags import humanize
from django.views.decorators.cache import never_cache
from django.db.models import Q, F, Max, Min, Sum, Avg, Count, When, Case
from django.template.loader import render_to_string
from django.template.context import RequestContext
import cacheops
from julia import shortcuts
from brake.decorators import ratelimit
from .signals import stream_data_received
from . import decorators, models, forms, definitions, utils, const, templatetags
from .definitions import STAT
logger = logging.getLogger(__name__)
class AjaxTemplateViewMixin(object):
def get_ajax_template_names(self):
return [self.ajax_template_name]
def get_html_template_names(self):
return super(AjaxTemplateViewMixin, self).get_template_names()
def get_template_names(self, *args, **kwargs):
if self.request.is_ajax():
return self.get_ajax_template_names()
return self.get_html_template_names()
class AnnualViewMixin(object):
# min days since jan 01 the new year will not considered interesting (since lack of data)
MIN_YEAR_DAYS = 7
# list of available years
years = None
# selected year
year = None
# current year
year_now = None
@property
def year_min(self):
return self.years[0]
@property
def year_max(self):
return self.years[-1]
def __init__(self, *args, **kwargs):
self.year_now = timezone.now().year
# get a range of all available years starting from the earliest one
self.years = list(range(self.get_min_year() or self.year_now, self.year_now + 1))
super(AnnualViewMixin, self).__init__(*args, **kwargs)
def get(self, *args, **kwargs):
if not kwargs.get('year'):
# skip the current year if its too early..
if (timezone.now() - models.Rank.get_period_for_year(self.year_max)[0]).days < self.MIN_YEAR_DAYS and len(self.years) > 1:
#..unless its the only year
self.year = self.years[-2]
else:
self.year = self.years[-1]
else:
self.year = int(kwargs['year'])
# raise 404 if the year is not in the list
if self.year not in self.years:
raise Http404(_('%(year)s is not a valid year.') % {'year': self.year })
return super(AnnualViewMixin, self).get(*args, **kwargs)
def get_context_data(self, *args, **kwargs):
context_data = super(AnnualViewMixin, self).get_context_data(*args, **kwargs)
context_data.update({
# get the 2 close years for navigation
'years': list(reversed([year for year in self.years if abs(self.year - year) <= 1])),
'year': self.year,
'years_extreme': {
'min': self.year_min,
'max': self.year_max,
},
'year_now': self.year_now,
'year_previous': (self.year - 1) if self.year > self.year_min else None,
'year_next': (self.year + 1) if self.year < self.year_max else None,
})
return context_data
@classmethod
def get_min_year(cls):
# cache untill tomorrow
@cacheops.cached(timeout=(utils.tomorrow()-timezone.now()).seconds)
def _get_min_year():
return models.Rank.objects.aggregate(year=db.models.Min('year'))['year']
return _get_min_year()
class FilterViewMixin(object):
def get_context_data(self, *args, **kwargs):
filters = self.request.GET.copy()
if 'page' in filters:
del filters['page']
context_data = super(FilterViewMixin, self).get_context_data(*args, **kwargs)
context_data.update({
'filters': filters,
})
return context_data
class SummaryViewMixin(object):
def get_context_data(self, *args, **kwargs):
context_data = super(SummaryViewMixin, self).get_context_data(*args, **kwargs)
context_data.update({
'summary': self.get_summary(),
})
return context_data
def get_summary(self):
raise NotImplementedError
@classmethod
def get_period(cls):
now = timezone.now()
today = utils.today()
weekday = now.isoweekday()
# get the curent month's date
month = datetime.datetime(now.year, now.month, 1, tzinfo=timezone.utc)
# get the current week's date
week = today-datetime.timedelta(days=weekday-1)
# display current month summary
if now.day >= 25:
return (_('Monthly Summary'), month, now)
# display past month summary
elif now.day <= 5:
return (_('Monthly Summary'), month-relativedelta.relativedelta(months=1), now)
# display current week summary
elif weekday >= 4:
return (_('Weekly Summary'), week, now)
# display past week summary
elif weekday <= 2:
return (_('Weekly Summary'), week-datetime.timedelta(days=7), now)
else:
return (None,)*3
class FeaturedViewMixin(AnnualViewMixin):
sample = 20
limit = 10
min_time = 600
min_score = 200
def get_context_data(self, *args, **kwargs):
context_data = super(FeaturedViewMixin, self).get_context_data(*args, **kwargs)
context_data.update({
'featured': self.get_featured_games(),
})
return context_data
def get_featured_period(self):
return models.Rank.get_period_for_year(self.year)
def get_featured_games(self):
start, end = self.get_featured_period()
if start is None:
return None
# get random offset
offset = random.randint(0, self.sample)
qs = (
models.Game.objects
.extra(
select={
'score_total': 'score_swat + score_sus',
'score_avg': '(score_swat + score_sus) / time',
},
order_by=('-score_avg',),
where=['score_swat + score_sus >= %s'],
params=[self.min_score]
)
.filter(
date_finished__gte=start,
date_finished__lte=end,
time__gte=self.min_time,
player_num__gte=models.Profile.MIN_PLAYERS
)
)
return qs[offset:offset+self.limit]
class StreamView(generic.View):
STATUS_OK = '0'
STATUS_ERROR = '1'
@staticmethod
def status(request, code, messages=None):
"""
Return an integer status code followed by an optional message.
The status and message are delimited with a new line
Examples:
1. 0
2. 0\nData has been accepted
3. 1\nOutdated mod version. Please update to 1.2.3
3. 1\nInvalid server key
4. 1\nThe server is not registered
"""
if not isinstance(messages, (list, tuple)):
messages = [messages]
# send the status code along with optional list of messages
return HttpResponse(
'\n'.join(map(force_text, filter(None, [code] + list(messages))))
)
@method_decorator(decorators.requires_valid_request(definitions.stream_pattern_node))
@method_decorator(decorators.requires_authorized_source)
def post(self, request):
logger.info('received stream data from %s:%s (%s)',
request.stream_source.ip, request.stream_source.port, request.META['REMOTE_ADDR'])
messages = []
error = False
# collect messages of the signal handlers
response = stream_data_received.send_robust(
sender=None,
data=request.stream_data,
server=request.stream_source,
raw=request.stream_data_raw,
request=request
)
for _, message in response:
# response may itself be a list of messages
if isinstance(message, (tuple, list)):
messages.extend(message)
# or an exception..
elif isinstance(message, Exception):
messages.append(str(message))
error = True
else:
messages.append(message)
status = StreamView.STATUS_OK if not error else StreamView.STATUS_ERROR
return StreamView.status(request, status, messages)
def get(self, request):
"""Display data streaming tutorial."""
return render(request, 'tracker/chapters/stream/stream.html', {})
@method_decorator(never_cache)
@method_decorator(csrf_exempt)
def dispatch(self, *args, **kwargs):
return super(StreamView, self).dispatch(*args, **kwargs)
class MainView(SummaryViewMixin, FeaturedViewMixin, generic.ListView):
template_name = 'tracker/chapters/main/main.html'
model = models.Article
sample = 0 # always return the same featured games
summary = (
(
_('Highest Score'),
db.models.Sum('score'),
lambda value: ngettext_lazy('%d points', '%d points') % value
),
(
_('Highest Playtime'),
Sum(Case(When(game__gametype__in=definitions.MODES_VERSUS, then='time'))),
lambda value: templatetags.humantime(value)
),
(
_('Best Round Score'),
db.models.Max('score'),
lambda value: ngettext_lazy('%d point', '%d points') % value
),
(
_('Most Kills'),
db.models.Sum('kills'),
lambda value: ngettext_lazy('%d kill', '%d kills') % value
),
(
_('Most Arrests'),
db.models.Sum('arrests'),
lambda value: ngettext_lazy('%d arrest', '%d arrests') % value
),
(
_('Highest Kill Streak'),
db.models.Max('kill_streak'),
lambda value: ngettext_lazy('%d kill', '%d kills') % value
),
)
def get_queryset(self, *args, **kwargs):
"""Display the latest 5 articles."""
return self.model.published.latest(5)
def get_featured_period(self):
return self.get_period()[1:]
def get_summary(self):
# cache untill tomorrow
@cacheops.cached(timeout=(utils.tomorrow()-timezone.now()).seconds)
def _get_summary():
summary = []
period_title, start, end = self.get_period()
if start is None:
return None
for title, agg_obj, translate in self.summary:
try:
player = (
models.Player.objects.qualified(start, end)
.select_related('alias__profile')
.filter(alias__profile__name__isnull=False)
.values('alias__profile')
.annotate(num=agg_obj)
.filter(num__isnull=False)
.order_by('-num')[0:1]
.get()
)
except ObjectDoesNotExist:
pass
else:
summary.append({
'title': title,
'profile': player['alias__profile'],
'points': player['num'],
'points_translated': translate(player['num'])
})
# prefetch profile instances
qs = models.Profile.objects.select_related('loadout')
pks = [entry['profile'] for entry in summary]
prefetched = {obj.pk: obj for obj in list(qs.filter(pk__in=pks))}
# replace profile pks with actual Profile instances
for entry in summary:
entry['profile'] = prefetched[entry['profile']]
return {
'title': period_title,
'object_list': summary,
}
return _get_summary()
class TopListView(AnnualViewMixin, generic.ListView):
template_name = 'tracker/chapters/top/top.html'
model = models.Rank
# list of categories
boards = (
(STAT.SCORE, _('Score'), 'int'),
(STAT.SPM, ('Score/Minute'), 'ratio'),
(STAT.TIME, ('Time Played'), 'time'),
(STAT.COOP_SCORE, ('CO-OP Score'), 'int'),
)
# limit of players per category
limit = 5
def get_queryset(self, *args, **kwargs):
return (
super(TopListView, self).get_queryset(*args, **kwargs)
.select_related('profile', 'profile__loadout', 'profile__game_last')
.filter(position__isnull=False, year=self.year)
.order_by('position')
)
def get_context_data(self, *args, **kwargs):
context_data = super(TopListView, self).get_context_data(*args, **kwargs)
context_data.update(self.get_objects())
return context_data
def get_objects(self):
boards = []
qs = (
self.get_queryset()
.filter(
# get the ids of the specified categories
category__in=map(lambda board: board[0], self.boards),
position__lte=self.limit
)
)
for leaderboard, title, type in self.boards:
# get a list of ranked players for each specified leaderboard
objects = []
for obj in qs:
# append profiles with the same leaderboard type
if obj.category == leaderboard:
objects.append(obj)
boards.append((definitions.STATS[leaderboard], title, type, objects))
return {'boards': boards}
class BoardListView(TopListView):
template_name = 'tracker/chapters/leaderboard/leaderboard.html'
paginate_by = 20
boards = (
# Group name:
# stat id, human_name, stat display type
#
# the url/context name is obtained with defitions.STATS dict
[_('Score'), (
(STAT.SCORE, _('Score'), 'int'),
(STAT.TIME, _('Time Played'), 'hours'),
(STAT.WINS, _('Wins'), 'int'),
(STAT.SPM, _('Score/Minute'), 'ratio'),
(STAT.SPR, _('Score/Round'), 'ratio'),
(STAT.TOP_SCORE, _('Best Score'), 'int'),
)],
[_('Kills'), (
(STAT.KILLS, _('Kills'), 'int'),
(STAT.ARRESTS, _('Arrests'), 'int'),
#(STAT.TOP_KILLS, _('Top Kills'), 'int'),
#(STAT.TOP_ARRESTS, _('Top Arrests'), 'int'),
(STAT.KDR, _('K/D Ratio'), 'ratio'),
(STAT.AMMO_ACCURACY, _('Accuracy'), 'percent'),
(STAT.KILL_STREAK, _('Best Kill Streak'), 'int'),
(STAT.ARREST_STREAK, _('Best Arrest Streak'), 'int'),
)],
[_('VIP Escort'), (
(STAT.VIP_ESCAPES, _('VIP Escapes'), 'int'),
(STAT.VIP_CAPTURES, _('VIP Captures'), 'int'),
(STAT.VIP_RESCUES, _('VIP Rescues'), 'int'),
(STAT.VIP_KILLS_VALID, _('VIP Kills'), 'int'),
)],
[_('Rapid Deployment'), (
(STAT.RD_BOMBS_DEFUSED, _('Bombs Disarmed'), 'int'),
)],
# [_('Smash and Grab'), (
# (STAT.SG_ESCAPES, _('Case Escapes'), 'int'),
# (STAT.SG_KILLS, _('Case Carrier Kills'), 'int'),
# )],
[_('CO-OP'), (
(STAT.COOP_SCORE, _('Score'), 'int'),
(STAT.COOP_TIME, _('Time Played'), 'hours'),
(STAT.COOP_GAMES, _('Missions Attempted'), 'int'),
(STAT.COOP_WINS, _('Missions Completed'), 'int'),
(STAT.COOP_ENEMY_ARRESTS, _('Suspects Arrested'), 'int'),
(STAT.COOP_ENEMY_KILLS, _('Suspects Neutralized'), 'int'),
#(STAT.COOP_HOSTAGE_ARRESTS, _('Civilians Arrested'), 'int'),
)],
)
board_name_default = 'score'
def __init__(self, *args, **kwargs):
super(BoardListView, self).__init__(*args, **kwargs)
self.board_list = self.get_boards()
def get(self, *args, **kwargs):
"""Set the active leaderboard."""
board_name = self.kwargs.get('board_name')
# set default
if not board_name:
board_name = self.get_default_board()
# check the selected board name
if board_name not in self.board_list:
raise Http404
# get the board details
self.board = self.board_list[board_name]
return super(BoardListView, self).get(*args, **kwargs)
def get_queryset(self, *args, **kwargs):
return (
super(BoardListView, self).get_queryset(*args, **kwargs)
.filter(category=self.board['id'])
)
def get_context_data(self, *args, **kwargs):
context_data = super(BoardListView, self).get_context_data(*args, **kwargs)
context_data.update({
'board_list': self.board_list,
'board': self.board,
})
return context_data
@classmethod
def get_boards(cls):
"""
Return an ordered dict mapping stat categories to leaderboard extended details.
"""
leadeboards = collections.OrderedDict()
for category, boards in cls.boards:
for (stat_id, title, type) in boards:
# e.g. {'vip_kills': {'category': 'VIP Escort', ..},..}
leadeboards[definitions.STATS[stat_id]] = {
'id': stat_id,
'name': definitions.STATS[stat_id],
'category': category,
'title': title,
'type': type,
}
return leadeboards
def get_objects(self):
return {}
def get_default_board(self):
return self.board_name_default
class GameListBaseView(FeaturedViewMixin, generic.ListView):
limit = 5
class GameListView(FilterViewMixin, GameListBaseView):
template_name = 'tracker/chapters/game/list_history.html'
model = models.Game
paginate_by = 50
form_class = forms.GameFilterForm
form = None
def get(self, request, *args, **kwargs):
self.form = self.form_class(data=request.GET)
return super(GameListView, self).get(request, *args, **kwargs)
def get_context_data(self, *args, **kwargs):
context_data = super(GameListView, self).get_context_data(*args, **kwargs)
context_data.update({
'form': self.form,
})
return context_data
def get_queryset(self, *args, **kwargs):
# cache the result
qs = super(GameListView, self).get_queryset(*args, **kwargs)
# only do further lookup if the form is bound and valid
if not self.form.is_valid():
return qs.none()
# filter by map
if self.form.cleaned_data.get('mapname'):
qs = qs.filter(mapname=self.form.cleaned_data['mapname'])
# filter by gametime
if self.form.cleaned_data.get('gametime'):
qs = qs.filter(time__gte=self.form.cleaned_data['gametime']*60)
# filter by outcome
if self.form.cleaned_data.get('outcome'):
qs = qs.filter(outcome=self.form.cleaned_data['outcome'])
# filter by gametype
if self.form.cleaned_data.get('gametype'):
qs = qs.filter(gametype=self.form.cleaned_data['gametype'])
# filter by server
if self.form.cleaned_data.get('server'):
qs = qs.filter(server=self.form.cleaned_data['server'])
# filter by participated players
if self.form.cleaned_data.get('players'):
for name in self.form.cleaned_data['players']:
qs = qs.filter(player__alias__name__iexact=name)
# filter by year
if self.form.cleaned_data.get('year'):
qs = qs.filter(date_finished__year=self.form.cleaned_data['year'])
# filter by month
if self.form.cleaned_data.get('month'):
qs = qs.filter(date_finished__month=self.form.cleaned_data['month'])
# filter by day
if self.form.cleaned_data.get('day'):
qs = qs.filter(date_finished__day=self.form.cleaned_data['day'])
# cache the queryset
return qs.order_by('-date_finished') # .distinct() #.cache()
class GameOnlineListView(GameListBaseView):
template_name = 'tracker/chapters/game/list_online.html'
model = models.Server
paginate_by = 50
def get_queryset(self):
"""Return a list of ServerStatus objects."""
return self.model.objects.status.filter(player_num='[1-9]*').sort('-player_num')
class GameDetailView(generic.DetailView):
TEMPLATE_DEFAULT = 'tracker/chapters/game/detail.html'
TEMPLATE_MODE = 'tracker/chapters/game/detail_mode%(mode)s.html'
pk_url_kwarg = 'game_id'
model = models.Game
categories = {
'all': (
('score', _('Highest Score'), ngettext_lazy('%d point', '%d points')),
('kills', _('Most Kills'), ngettext_lazy('%d kill', '%d kills')),
('arrests', _('Most Arrests'), ngettext_lazy('%d arrest', '%d arrests')),
('ammo_accuracy', _('Highest Accuracy'), _('%d%%')),
('ammo_shots', _('Most Ammo Fired'), ngettext_lazy('%d bullet', '%d bullets')),
('kill_streak', _('Highest Kill Streak'), ngettext_lazy('%d kill', '%d kills')),
('arrest_streak', _('Highest Arrest Streak'), ngettext_lazy('%d arrest', '%d arrests')),
),
definitions.MODE_VIP: (
('vip_captures', _('Most VIP captures'), ngettext_lazy('%d capture', '%d captures')),
('vip_rescues', _('Most VIP rescues'), ngettext_lazy('%d rescue', '%d rescues')),
),
}
def get_template_names(self, *args, **kwargs):
return [self.TEMPLATE_MODE % {'mode': self.object.gametype}, self.TEMPLATE_DEFAULT]
def get_context_data(self, *args, **kwargs):
players = sorted(
models.Player.objects.prefetched().filter(game=self.object.pk),
# sort by score, kills, arrests, -deaths
key=lambda player: (player.score, player.kills, player.arrests, -player.deaths),
reverse=True
)
# pick players that finished the game
players_online = [player for player in players if not player.dropped]
# pick the ones that have dropped
players_dropped = [player for player in players if player.dropped]
context_data = super(GameDetailView, self).get_context_data(*args, **kwargs)
context_data.update({
'players': players,
'players_online': players_online,
'players_dropped': players_dropped,
'players_blue': [player for player in players_online if player.team == definitions.TEAM_BLUE],
'players_red': [player for player in players_online if player.team == definitions.TEAM_RED],
'players_best': self.get_best_players(players),
'games_close': self.get_close_games(),
})
# coop specific details
if self.object.coop_game:
procedures = self.object.procedure_set.all()
procedures_bonus = list(filter(
lambda procedure: procedure.name_translated.startswith('bonus'),
procedures
))
procedures_penalty = list(filter(
lambda procedure: procedure.name_translated.startswith('penalty') and procedure.score,
procedures
))
score_bonus = utils.calc_coop_score(procedures_bonus)
score_penalty = utils.calc_coop_score(procedures_penalty)
context_data.update({
'objectives': self.object.objective_set.all(),
'procedures': {
'bonus': {'list': procedures_bonus, 'score': score_bonus},
'penalty': {'list': procedures_penalty, 'score': score_penalty},
},
# normal: 0-100
'coop_rank': self.get_coop_rank(self.object.coop_score_normal),
})
return context_data
def get_queryset(self, *args, **kwargs):
return (super(GameDetailView, self).get_queryset(*args, **kwargs).select_related('server'))
def get_close_games(self):
"Return the games preceding and following this one."
qs = models.Game.objects.filter(server=self.object.server)
return {
'previous': qs.filter(pk__lt=self.object.pk).order_by('-pk').first(),
'next': qs.filter(pk__gt=self.object.pk).order_by('pk').first(),
}
def get_best_players(self, players):
categories = []
best = []
# append common stats
categories.extend(self.categories['all'])
# append mode specific stats
if self.object.gametype in self.categories:
categories.extend(self.categories[self.object.gametype])
for category, category_translated, points_translated in categories:
sortable = sorted(players, key=utils.sort_key(category), reverse=True)
player = next(iter(sortable), None)
if player:
points = getattr(player, category)
# only add the player if he/she has actually earned some points
if points:
best.append({
'category': category,
'category_translated': category_translated,
'player': player,
'points': points,
'points_translated': points_translated % points,
})
# shuffile
random.shuffle(best)
return best
@staticmethod
def get_coop_rank(score):
if score >= 100:
return _('Chief Inspector')
elif score >= 95:
return _('Inspector')
elif score >= 90:
return _('Captain')
elif score >= 85:
return _('Lieutenant')
elif score >= 80:
return _('Sergeant')
elif score >= 75:
return _('Patrol Officer')
elif score >= 70:
return _('Reserve Officer')
elif score >= 60:
return _('Non-sworn Officer')
elif score >= 50:
return _('Recruit')
elif score >= 35:
return _('Washout')
elif score >= 20:
return _('Vigilante')
elif score >= 0:
return _('Menace')
return _('Cheater')
class ServerListView(AjaxTemplateViewMixin, FilterViewMixin, generic.ListView):
template_name = 'tracker/chapters/server/list.html'
ajax_template_name = 'tracker/chapters/server/list_ajax.html'
model = models.Server
form_class = forms.ServerFilterForm
form = None
@method_decorator(never_cache)
def dispatch(self, *args, **kwargs):
return super(ServerListView, self).dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
self.form = self.form_class(data=request.GET)
return super(ServerListView, self).get(request, *args, **kwargs)
def get_queryset(self, *args, **kwargs):
if not self.form.is_valid():
return super(ServerListView, self).get_queryset(*args, **kwargs).none()
# assemble filters
filters = {}
# filter empty servers
if self.form.cleaned_data.get('filter_empty'):
filters['is_empty'] = False
# filter full servers
if self.form.cleaned_data.get('filter_full'):
filters['is_full'] = False
# filter password protected servers
if self.form.cleaned_data.get('filter_passworded'):
filters['passworded'] = False
# filter by game label (SWAT4, SWAT4X)
if self.form.cleaned_data.get('gamename'):
filters['gamename'] = utils.escape_cache_key(self.form.cleaned_data['gamename'])
# filter by game version (1.0, 1.1)
if self.form.cleaned_data.get('gamever'):
filters['gamever'] = utils.escape_cache_key(self.form.cleaned_data['gamever'])
# filter servers by gametype (VIP, BS, etc)
if self.form.cleaned_data.get('gametype'):
filters['gametype'] = utils.escape_cache_key(self.form.cleaned_data['gametype'])
return self.model.objects.status.filter(**filters).sort('-pinned', '-player_num')
def get_context_data(self, *args, **kwargs):
context_data = super(ServerListView, self).get_context_data(*args, **kwargs)
context_data.update({
'form': self.form,
})
return context_data
class ServerDetailView(AjaxTemplateViewMixin, generic.DetailView):
TEMPLATE_DEFAULT = 'tracker/chapters/server/detail.html'
TEMPLATE_MODE = 'tracker/chapters/server/detail_mode%(mode)s.html'
AJAX_TEMPLATE_DEFAULT = 'tracker/chapters/server/detail_ajax.html'
AJAX_TEMPLATE_MODE = 'tracker/chapters/server/detail_mode%(mode)s_ajax.html'
model = models.Server
class ServerNotAvailable(Exception):
pass
@method_decorator(never_cache)
def dispatch(self, *args, **kwargs):
return super(ServerDetailView, self).dispatch(*args, **kwargs)
def get_html_template_names(self, *args, **kwargs):
return [
self.TEMPLATE_MODE % {'mode': self.status.gametype},
self.TEMPLATE_DEFAULT
]
def get_ajax_template_names(self):
return [
self.AJAX_TEMPLATE_MODE % {'mode': self.status.gametype},
self.AJAX_TEMPLATE_DEFAULT
]
def get(self, request, *args, **kwargs):
try:
response = super(ServerDetailView, self).get(request, *args, **kwargs)
except self.ServerNotAvailable:
# Override ajax response so it returns 404 in case of an error
if self.request.is_ajax():
raise Http404('The server is not available.')
return render(request, 'tracker/chapters/server/cap.html', {})
else:
return response
def get_object(self, *args, **kwargs):
ip = self.kwargs.get('server_ip')
port = self.kwargs.get('server_port')
if not (ip and port):
raise AttributeError
try:
obj = self.get_queryset().get(ip=ip, port=port)
except ObjectDoesNotExist:
raise Http404('No server found matching ip=%(ip)s, port=%(port)s' % {'ip': ip, 'port': port})
if not (obj.enabled and obj.listed):
raise self.ServerNotAvailable
# attempt to fetch cached server status
self.status = obj.status
if not self.status:
raise self.ServerNotAvailable
return obj
def get_context_data(self, *args, **kwargs):
context_data = super(ServerDetailView, self).get_context_data(*args, **kwargs)
# sort players by score, kills, arrests, -deaths
players = sorted(
self.status.players,
key=lambda player: (player.get('score', 0), player.get('kills', 0), player.get('arrests', 0), -player.get('deaths', 0)),
reverse=True
)
context_data.update({
'status': self.status,
'players': players,
'players_blue': [player for player in players if player.get('team', 0) == definitions.TEAM_BLUE],
'players_red': [player for player in players if player.get('team', 0) == definitions.TEAM_RED],
})
return context_data
class ProfileBaseView(AnnualViewMixin, generic.DetailView):
RECENT_TIME = 60*60*24
RECENT_MAX = 50
RECENT_MAX_MAPS = 5
award_list = (
(STAT.SPM,
ngettext_lazy(
'Best score/minute ratio in %(year)s',
'%(ordinal)s best score/minute ratio in %(year)s',
'position'
)
),
(STAT.SPR,
ngettext_lazy(
'Best score/round ratio in %(year)s',
'%(ordinal)s best score/round ratio in %(year)s',
'position'
)
),
(STAT.KDR,
ngettext_lazy(
'Best kills/deaths ratio in %(year)s',
'%(ordinal)s best kills/deaths ratio in %(year)s',
'position'
)
),
(STAT.AMMO_ACCURACY,
ngettext_lazy(
'Highest accuracy in %(year)s',
'%(ordinal)s highest accuracy in %(year)s',
'position'
)
),
(STAT.SCORE,
ngettext_lazy(
'Highest score in %(year)s',
'%(ordinal)s highest score in %(year)s',
'position'
)
),
(STAT.TOP_SCORE,
ngettext_lazy(
'Highest round score in %(year)s',
'%(ordinal)s highest round score in %(year)s',
'position'
)
),
(STAT.TIME,
ngettext_lazy(
'Highest playtime in %(year)s',
'%(ordinal)s highest playtime in %(year)s',
'position'
)
),
(STAT.KILLS,
ngettext_lazy(
'Most kills in %(year)s',
'%(ordinal)s most kills in %(year)s',
'position'
)
),
(STAT.ARRESTS,
ngettext_lazy(
'Most arrests in %(year)s',
'%(ordinal)s most arrests in %(year)s',
'position'
)
),
(STAT.KILL_STREAK,
ngettext_lazy(
'Highest kill streak in %(year)s',
'%(ordinal)s highest kill streak in %(year)s',
'position'
)
),
(STAT.VIP_ESCAPES,
ngettext_lazy(
'Most VIP escapes in %(year)s',
'%(ordinal)s most VIP escapes in %(year)s',
'position'
)
),
(STAT.VIP_CAPTURES,
ngettext_lazy(
'Most VIP captures in %(year)s',
'%(ordinal)s most VIP captures in %(year)s',
'position'
)
),
(STAT.VIP_RESCUES,
ngettext_lazy(
'Most VIP rescues in %(year)s',
'%(ordinal)s most VIP rescues in %(year)s',
'position'
)
),
(STAT.COOP_SCORE,
ngettext_lazy(
'Highest CO-OP score in %(year)s',
'%(ordinal)s highest CO-OP score in %(year)s',
'position'
)
),
(STAT.COOP_WINS,
ngettext_lazy(
'Most CO-OP missions completed in %(year)s',
'%(ordinal)s most CO-OP missions completed in %(year)s',
'position'
)
),
(STAT.COOP_TIME,
ngettext_lazy(
'Highest CO-OP playtime in %(year)s',
'%(ordinal)s highest CO-OP playtime in %(year)s',
'position'
)
),
)
# max position which can be nominated for an award
award_max_position = 5
class ProfileNotPrepared(Exception):
pass
model = models.Profile
pk_url_kwarg = 'profile_id'
def get(self, request, *args, **kwargs):
"""
Retrive the requested profile entry.
If the entry appears to be empty (i.e. no popular name, loadout, etc set),
return a "Profile not available error page".
"""
try:
response = super(ProfileBaseView, self).get(request, *args, **kwargs)
except self.ProfileNotPrepared:
return render(request, 'tracker/chapters/profile/cap.html', {})
else:
# redirect to the latest avaiable profile.. unless a year is specified
if not kwargs.get('year') and self.object.last_seen.year != self.year_now:
return HttpResponseRedirect(
templatetags.profile_url(self.object, request.resolver_match.view_name, **{'year': self.object.last_seen.year})
)
return response
def get_queryset(self, *args, **kwargs):
return (super(ProfileBaseView, self).get_queryset(*args, **kwargs)
.select_related('loadout', 'game_first', 'game_last')
)
def get_object(self, *args, **kwargs):
"""
Obtain the object instance by calling the parent get_object method.
In case the profile object is not considered popular
(i.e. it has no popular name or team set), raise ProfileBaseView.ProfileNotPrepared.
"""
obj = super(ProfileBaseView, self).get_object(*args, **kwargs)
if not obj.popular:
raise self.ProfileNotPrepared
return obj
def get_context_data(self, *args, **kwargs):
# limit the years list with the range of years the player played in
self.years = list(range(self.object.first_seen.year, self.object.last_seen.year + 1))
context_data = super(ProfileBaseView, self).get_context_data(*args, **kwargs)
context_data.update({
'recent': self.get_recent_games(),
'award': self.get_award(),
#'all': self.object.aggregate_mode_stats(models.Profile.SET_STATS_ALL, *models.Rank.get_period_for_now()),
})
return context_data
def get_games(self):
"""Return a queryset of profile related games sorted by id in descending order."""
return (models.Game.objects
.filter(player__alias__profile=self.object)
.order_by('-date_finished')
.distinct('pk', 'date_finished')
)
def get_recent_games(self):
"""
Return a LIST of the latest profile related games limited by
ProfileBaseView.GAMES_RECENT_MAX or ProfileBaseView.MAPS_RECENT_MAX (whichever is lower).
"""
@cacheops.cached(timeout=60*5, extra=self.object.pk)
def _get_recent_games():
recent = []
min_date = self.object.game_last.date_finished - datetime.timedelta(seconds=self.RECENT_TIME)
games = self.get_games().filter(date_finished__gte=min_date)[:self.RECENT_MAX]
# limit by number of maps
maps = set()
for game in games:
maps.add(game.mapname)
recent.append(game)
if len(maps) >= self.RECENT_MAX_MAPS:
break
return recent
return _get_recent_games()
def get_stats(self):
"""
Return a DICT of the profile related rank cached stats.
Example:
{'score': 123, 'kills': 345, ...}
"""
stored = {}
# turn the id => name tuple into a dict
mapping = dict(definitions.STATS)
for score in self.object.rank_set.filter(year=self.year):
stored[score.category] = score.points
# set zeroes to the rest of the categories
return {value: stored[key] if key in stored else 0.0 for key, value in six.iteritems(mapping)}
def get_award(self):
"""
Return the first matching ranking position from the leaderboards specified in `rank_list`
"""
@cacheops.cached(timeout=60*60, extra=self.object.pk)
def _get_award():
# get the category ids
categories = tuple(map(lambda entry: entry[0], self.award_list))
qs = (models.Rank.objects
.filter(
profile=self.object,
position__lte=self.award_max_position,
category__in=categories,
)
)
# sort entries of the same position by rank_list in asecending order and years in descending order
# return the very first entry
key = lambda entry: (entry.position, categories.index(entry.category), -entry.year)
return next(iter(sorted(qs, key=key)), None)
award = _get_award()
if award:
for category, text in self.award_list:
if award.category == category:
return {
'title': text % {
'ordinal': humanize.ordinal(award.position),
'position': award.position,
'year': award.year
},
'obj': award,
}
return None
class ProfileDetailView(ProfileBaseView):
template_name = 'tracker/chapters/profile/overview.html'
def get_context_data(self, *args, **kwargs):
stats = self.get_stats()
maps = [] # self.get_maps()
context_data = super(ProfileDetailView, self).get_context_data(*args, **kwargs)
context_data.update({
# calculate rank
'rank': utils.Rank(definitions.RANKS, stats['score']),
# combine rank stats with weapon based stats
'stats': stats,
# get the players best games
'best': (
(
_('First Appearance'),
self.object.game_first,
humanize.naturaltime(self.object.game_first.date_finished)
),
(
_('Best Score'),
self.get_best_game('score', stats['top_score']),
(ngettext_lazy('%(points)s point', '%(points)s points', int(stats['top_score']))
% {'points': int(stats['top_score'])}
)
),
(
_('Top Kills'),
self.get_best_game('kills', stats['top_kills']),
(ngettext_lazy('%(points)d kill', '%(points)d kills', int(stats['top_kills']))
% {'points': int(stats['top_kills'])}
)
),
(
_('Top Arrests'),
self.get_best_game('arrests', stats['top_arrests']),
(ngettext_lazy('%(points)d arrest', '%(points)d arrests', int(stats['top_arrests']))
% {'points': int(stats['top_arrests'])}
)
),
(
_('Best Kill Streak'),
self.get_best_game('kill_streak', stats['kill_streak']),
(ngettext_lazy('%(points)d kill in a row', '%(points)d kills in a row', int(stats['kill_streak']))
% {'points': int(stats['kill_streak'])}
)
),
(
_('Best Arrest Streak'),
self.get_best_game('arrest_streak', stats['arrest_streak']),
(ngettext_lazy('%(points)d arrest in a row', '%(points)d arrests in a row', int(stats['arrest_streak']))
% {'points': int(stats['arrest_streak'])}
)
),
),
# maps + best maps
'map_list': maps,
'map_best': utils.rank_dicts(maps),
# get player wide max ratio values
'max': self.get_max(),
})
return context_data
def get_maps(self):
"""Aggegate map stats."""
items = {
'time': (
Sum(
Case(When(game__gametype__in=definitions.MODES_VERSUS, then='time'))
)
),
'games': (
Count(
Case(When(game__gametype__in=definitions.MODES_VERSUS, then='game')),
distinct=True
)
),
'overall_score': Sum('score'),
'best_score': Max('score'),
'kills': Sum('kills'),
'deaths': (
Sum(
Case(When(game__gametype__in=definitions.MODES_VERSUS, then='deaths'))
)
),
'wins': (
Count(
Case(
When(
Q(team=definitions.TEAM_BLUE, game__outcome__in=definitions.SWAT_GAMES) |
Q(team=definitions.TEAM_RED, game__outcome__in=definitions.SUS_GAMES),
then='game'
),
),
distinct=True
)
),
'losses': (
Count(
Case(
When(
Q(team=definitions.TEAM_BLUE, game__outcome__in=definitions.SUS_GAMES) |
Q(team=definitions.TEAM_RED, game__outcome__in=definitions.SWAT_GAMES),
then='game'
)
),
distinct=True
)
),
}
@cacheops.cached(timeout=60*60, extra=(self.object.pk, self.year))
def _get_maps():
period = models.Rank.get_period_for_year(self.year)
aggregated = self.object.aggregate(items, *period, group_by='game__mapname', group_by_as='mapname')
return [item for item in aggregated if item['games']]
return _get_maps()
def get_best_game(self, field, points):
"""
Return a Player instance with `field` equal to `points`.
"""
@cacheops.cached(timeout=60*60, extra=(self.object.pk, field, self.year))
def _get_best_game():
try:
assert points > 0
period = models.Rank.get_period_for_year(self.year)
player = (self.object.qualified(*period)
.select_related('game')
.filter(**{field: points})[:1]
.get()
)
except (AssertionError, ObjectDoesNotExist):
return None
else:
return player.game
return _get_best_game()
def get_best_weapon(self, aggregated):
"""Return the weapon with most kills."""
return next(iter(sorted(six.itervalues(aggregated), key=lambda weapon: weapon['kills'], reverse=True)), None)
def get_max(self, *categories):
"Return the max values for the K/D and S/M stats."
@cacheops.cached(timeout=60*60*24)
def _get_max():
return (
models.Rank.objects
.filter(year=self.year)
.aggregate(
spm=Max(Case(When(category=STAT.SPM, then='points'))),
kdr=Max(Case(When(category=STAT.KDR, then='points')))
)
)
return _get_max()
class ProfileWeaponListView(ProfileBaseView):
template_name = 'tracker/chapters/profile/equipment.html'
def get_context_data(self, *args, **kwargs):
# get a list of used weapons (ie a weapon with at least one kill or a shot)
weapons = {
weapon: stats for weapon, stats in six.iteritems(self.get_weapons())
if stats['kills'] or stats['shots']
}
# sort primary and secondary weapons by accuracy
primary = sorted(
self.filter_weapons(definitions.WEAPONS_PRIMARY, weapons).values(),
key=lambda weapon: weapon['accuracy'],
reverse=True
)
secondary = sorted(
self.filter_weapons(definitions.WEAPONS_SECONDARY, weapons).values(),
key=lambda weapon: weapon['accuracy'],
reverse=True
)
# sort tactical weapons by number of shots
tactical = sorted(
self.filter_weapons(definitions.WEAPONS_TACTICAL, weapons).values(),
key=lambda weapon: weapon['shots'],
reverse=True
)
context_data = super(ProfileWeaponListView, self).get_context_data(*args, **kwargs)
context_data.update({
'primary': primary,
'primary_best': utils.rank_dicts(primary),
'secondary': secondary,
'secondary_best': utils.rank_dicts(secondary),
'tactical': tactical,
'tactical_best': utils.rank_dicts(tactical),
# retrieve the most popular loadout for the selected year
# unless the selected year is the current year
'loadout': self.object.loadout if (self.year == self.year_now) else self.get_favourite_loadout(),
})
return context_data
def get_weapons(self):
"""
Return weapon usage stats dict for non-COOP gamemodes.
Example:
{0: {'name': 0, 'kills': 123, ...}, 1: {...}, ...}
"""
@cacheops.cached(timeout=60*60*24, extra=(self.object.pk, self.year))
def _get_weapons():
aggregated = self.object.aggregate_weapon_stats(
*models.Rank.get_period_for_year(self.year),
filters={'game__gametype__in': definitions.MODES_VERSUS}
)
return aggregated
return _get_weapons()
def get_favourite_loadout(self):
@cacheops.cached(timeout=60*60*24, extra=(self.object.pk, self.year))
def _get_favourite_loadout():
return self.object.fetch_popular_loadout(year=self.year)
return _get_favourite_loadout()
@staticmethod
def filter_weapons(pattern, weapons):
filtered = {}
for weapon in weapons:
# check whether the weapon code is in the unmapped pattern tuple
if int(weapon) in pattern:
filtered[weapon] = weapons[weapon]
return filtered
class ProfileCoopDetailView(ProfileBaseView):
template_name = 'tracker/chapters/profile/coop.html'
def get_context_data(self, *args, **kwargs):
context_data = super(ProfileCoopDetailView, self).get_context_data(*args, **kwargs)
maps = self.get_maps()
equipment = self.get_favourite_equipment()
# annotate with fav equipment
for item in maps:
item['loadout'] = equipment[item['mapname']] if item['mapname'] in equipment else None
context_data.update({
'stats':self.get_stats(),
'maps': maps,
})
return context_data
def get_maps(self):
items = {
# best coop score
'score_best': (
Max('game__coop_score')
),
# average coop score
'score_avg': (
Avg('game__coop_score')
),
# average mission time
'time_avg': (
Avg(
Case(When(game__outcome__in=definitions.COMPLETED_MISSIONS, then='game__time'))
)
),
# best mission time
'time_best': (
Min(
Case(When(game__outcome__in=definitions.COMPLETED_MISSIONS, then='game__time'))
)
),
# total number of missions completed
'missions_completed': (
Count(
Case(When(game__outcome__in=definitions.COMPLETED_MISSIONS, then='game')),
distinct=True
)
),
# total number of missions failed
'missions_failed': (
Count(
Case(When(game__outcome__in=definitions.FAILED_MISSIONS, then='game')),
distinct=True
)
),
}
@cacheops.cached(timeout=60*60, extra=(self.object.pk, self.year))
def _coop_get_maps():
aggregated = self.object.aggregate(
items,
*models.Rank.get_period_for_year(self.year),
group_by='game__mapname',
group_by_as='mapname',
filters={'game__gametype__in': definitions.MODES_COOP, 'dropped': False}
)
return [item for item in aggregated if item['time_best']]
return _coop_get_maps()
def get_favourite_equipment(self):
@cacheops.cached(timeout=60*60, extra=(self.object.pk, self.year))
def _coop_get_favourite_equipment():
maps = {}
period = models.Rank.get_period_for_year(self.year)
aggregated = (self.object
.qualified(*period)
.filter(game__gametype__in=definitions.MODES_COOP)
.values('game__mapname', 'loadout')
.annotate(count=db.models.Count('loadout')).order_by('-count')
)
for item in aggregated:
# filter out None entries
if item['game__mapname'] is None or item['loadout'] is None:
continue
# since the aggregate query is ordered by loadout count,
# we get the most popular loadout for every first occurence of a map
if item['game__mapname'] not in maps:
maps[item['game__mapname']] = item['loadout']
# prefetch Loadout objects
# construct a pk => Loadout object dict from the queryset
prefetched = {
obj.pk: obj for obj in models.Loadout.objects.filter(pk__in=maps.values())
}
return {
mapname: prefetched[pk] for mapname, pk in six.iteritems(maps)
}
return _coop_get_favourite_equipment()
class ProfileRankingListView(ProfileBaseView, generic.list.MultipleObjectMixin):
template_name = 'tracker/chapters/profile/ranking.html'
def get_context_data(self, *args, **kwargs):
self.object_list = self.get_ranks()
context_data = super(ProfileRankingListView, self).get_context_data(*args, **kwargs)
context_data.update({
'rank_list': self.object_list,
})
return context_data
def get_ranks(self):
categories = []
# lend board list from the Leaderboad view
ranks = BoardListView.get_boards()
# set defaults and acqure a list of categories
for details in six.itervalues(ranks):
details.update({
'points': 0,
'position': None,
})
categories.append(details['id'])
entries = (models.Rank.objects
.filter(
profile=self.object,
year=self.year,
category__in=categories
)
)
# set the extra 'points' and 'position' dict item to leaderboards the player has been ranked in
for entry in entries:
ranks[definitions.STATS[entry.category]].update({
'points': entry.points,
'position': entry.position,
})
return ranks
class ProfileHistoryListView(ProfileBaseView, generic.list.MultipleObjectMixin):
template_name = 'tracker/chapters/profile/history.html'
paginate_by = 50
object_list = None
def get_context_data(self, *args, **kwargs):
start, end = models.Rank.get_period_for_year(self.year)
# for paginator
self.object_list = (self.get_games()
.filter(date_finished__gte=start, date_finished__lte=end)
)
context_data = super(ProfileHistoryListView, self).get_context_data(*args, **kwargs)
return context_data
class PlayerSearchView(FilterViewMixin, generic.ListView):
template_name = 'tracker/chapters/search/search.html'
model = models.Alias
paginate_by = 20
form_class = forms.PlayerSearchForm
form = None
@method_decorator(never_cache)
def dispatch(self, *args, **kwargs):
return super(PlayerSearchView, self).dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
self.form = self.form_class(data=request.GET or None)
return super(PlayerSearchView, self).get(request, *args, **kwargs)
def get_queryset(self, *args, **kwargs):
qs = super(PlayerSearchView, self).get_queryset(*args, **kwargs)
if not self.form.is_valid():
return qs.none()
# search by player name
return (qs
.select_related('profile', 'profile__game_last', 'profile__loadout')
.filter(
name__icontains=self.form.cleaned_data['player'],
profile__game_last__isnull=False,
profile__name__isnull=False,
profile__team__isnull=False
)
.order_by('-profile__game_last__date_finished')
.distinct('profile__game_last__date_finished', 'profile')
)
def get_context_data(self, *args, **kwargs):
context_data = super(PlayerSearchView, self).get_context_data(*args, **kwargs)
context_data.update({
'form': self.form,
'term_random': self.get_random_name(),
})
return context_data
def get_random_name(self):
@cacheops.cached(timeout=60*60)
def _get_random_name():
queryset = models.Profile.objects.filter(name__isnull=False)
try:
profile = queryset[random.randrange(1, queryset.count())]
except (IndexError, ValueError):
return None
return profile.name
return _get_random_name()
class ProfileRedirectView(generic.RedirectView):
permanent = True
"""Redirect /player/Name/ requests to the search view."""
def get_redirect_url(self, *args, **kwargs):
return '%s?player=%s' % (reverse('tracker:search'), kwargs.get('name', ''))
class APIMotdViewMixin(object):
# default values
time_initial = 60 # start displaying in 60 seconds after a map launch
time_repeat = 0 # display once
def get_context_data(self, *args, **kwargs):
# parse initial time
try:
time_initial = int(self.request.GET['initial'])
assert time_initial >= 0
except:
# set default value
time_initial = self.time_initial
# parse repeat time
try:
time_repeat = int(self.request.GET['repeat'])
assert time_repeat >= 0
except:
# set default
time_repeat = self.time_repeat
context_data = super(APIMotdViewMixin, self).get_context_data(*args, **kwargs)
context_data.update({
'time_initial': time_initial,
'time_repeat': time_repeat,
'nodelay': ('nodelay' in self.request.GET),
})
return context_data
class APIMotdSummaryView(SummaryViewMixin, APIMotdViewMixin, generic.TemplateView):
template_name = 'tracker/api/motd/summary.html'
# borrow summary from MainView
summary = MainView.summary
get_summary = MainView.get_summary
class APIMotdLeaderboardView(APIMotdViewMixin, BoardListView):
template_name = 'tracker/api/motd/leaderboard.html'
model = models.Rank
limit = 5
def get_queryset(self, *args, **kwargs):
return super(APIMotdLeaderboardView, self).get_queryset(*args, **kwargs)[:self.limit]
def get_context_data(self, *args, **kwargs):
context_data = super(APIMotdLeaderboardView, self).get_context_data(*args, **kwargs)
context_data.update({
'limit': self.limit,
})
return context_data
def get_default_board(self):
"""Return a random leaderboard name."""
return random.choice(list(self.board_list))
class APIWhoisView(generic.View):
template_name = 'tracker/api/whois/%(command)s.html'
pattern_node = shortcuts.parse_pattern(const.WHOIS_PATTERN)
command_name = None
class CommandError(Exception):
pass
@staticmethod
def status(request, code, command, message=None):
"""
Return an integer status code and command id followed by an optional message.
The response components are delimited with a newline.
Example:
0\nGBAh1La\n127.0.0.1 belongs to localhost
1\nNbaY1hd\nInvalid IP address
"""
return HttpResponse('\n'.join(list(filter(None, [code, command, message]))))
# limit request rate to the whois api
@method_decorator(ratelimit(rate='60/m', block=False))
@method_decorator(decorators.requires_valid_source)
@method_decorator(decorators.requires_valid_request(pattern_node))
def dispatch(self, *args, **kwargs):
return super(APIWhoisView, self).dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
self.command_name = self.request.stream_data['command'].value
try:
# annotate request rate limit block reason
if getattr(request, 'limited', False):
raise self.CommandError(_('Request rate limit exceeded'))
context_data = self.get_context_data()
except self.CommandError as e:
# respond with an error code
return self.status(request, '1', request.stream_data['command_id'].value, str(e))
# respond with command result
return self.status(
request,
'0',
request.stream_data['command_id'].value,
render_to_string(self.get_template_names(), context_data, RequestContext(request))
)
def get_context_data(self, *args, **kwargs):
context_data = {}
# only handle commands that have assotiated handlers
try:
method = getattr(self, 'handle_%s' % self.command_name)
except AttributeError:
raise self.CommandError(_('%(command)s is not a valid command name') % {'command': self.command_name})
else:
# invoke an assotiated command context data handler
# passing the provided argument
context_data.update(method(self.request.stream_data['args'].value))
return context_data
def get_template_names(self, *args, **kwargs):
return self.template_name % {'command': self.command_name}
def handle_whois(self, arg):
"""
Handle a whois command.
Args:
arg - command argument
argument is expected to be a player name
and an IP address delimited by \t (tab character)
"""
try:
name, ip = arg.split('\t')
except:
raise self.CommandError(_('%(arg)s is not a valid argument for the whois command') % {'arg': arg})
# get isp
try:
isp = models.ISP.objects.match_or_create(ip)[0]
except ObjectDoesNotExist:
isp = None
except ValueError:
raise self.CommandError(_('%(ip)s is not a valid IP address') % {'ip': ip})
# attempt to match profile
try:
profile = models.Profile.objects.match_smart(name=name, isp=isp, ip=ip)
except ObjectDoesNotExist:
profile = None
return {
'name': name,
'ip': ip,
'isp': isp.name if isp else None,
'country': isp.country if isp else None,
'profile': profile,
}
|
description = 'PUMA multi detector device'
group = 'lowlevel'
import math
excludes = ['detector']
modules = ['nicos_mlz.puma.commands']
vis = ('devlist', 'namespace', 'metadata')
devices = dict(
med = device('nicos_mlz.puma.devices.PumaMultiDetectorLayout',
description = 'PUMA multi detector',
rotdetector = ['rd1', 'rd2', 'rd3', 'rd4', 'rd5', 'rd6', 'rd7', 'rd8',
'rd9', 'rd10', 'rd11'],
rotguide = ['rg1', 'rg2', 'rg3', 'rg4', 'rg5', 'rg6', 'rg7', 'rg8',
'rg9', 'rg10', 'rg11'],
att = device('nicos.devices.generic.Axis',
motor = device('nicos_mlz.puma.devices.VirtualReferenceMotor',
abslimits = (-90, 15),
unit = 'deg',
refpos = -1,
fmtstr = '%.3f',
),
precision = 0.01,
),
parkpos = [-15, -17.5, -20., -22.5, -25., -27.5, -30., -32.5, -35.,
-37.5, -40.,
3.5, 2.75, 2.5, 2.25, 2.0, 1.75, 1.5, 1.25, 1.0, 0.75, 0.5],
),
monitor = device('nicos.devices.generic.VirtualCounter',
description = 'Monitor',
fmtstr = '%d',
type = 'monitor',
visibility = (),
),
timer = device('nicos.devices.generic.VirtualTimer',
description = 'timer',
fmtstr = '%.2f',
unit = 's',
visibility = (),
),
image = device('nicos.devices.generic.VirtualImage',
description = 'Image data device',
fmtstr = '%d',
pollinterval = 86400,
size = (1, 11),
visibility = (),
),
det = device('nicos.devices.generic.Detector',
description = 'Multidetector with single channels',
timers = ['timer'],
monitors = ['monitor'],
# images = ['image'],
# counters = ['image'],
counters = ['ctr1', 'ctr2', 'ctr3', 'ctr4', 'ctr5',
'ctr6', 'ctr7', 'ctr8', 'ctr9', 'ctr10',
'ctr11'],
maxage = 86400,
pollinterval = None,
),
)
for i in range(11):
devices['rd%d' % (i + 1)] = device('nicos.devices.generic.Axis',
description = 'Rotation detector %d multidetector' % (i + 1),
motor = device('nicos_mlz.puma.devices.VirtualReferenceMotor',
abslimits = (-42 + (11 - (i + 1)) * 2.5, 13 - i * 2.4),
unit = 'deg',
refpos = -13.5 - i * 2.5,
fmtstr = '%.3f',
speed = 3,
),
precision = 0.01,
visibility = vis,
)
devices['rg%d' % (i + 1)] = device('nicos.devices.generic.Axis',
description = 'Rotation guide %d multidetector' % (i + 1),
motor = device('nicos_mlz.puma.devices.VirtualReferenceMotor',
abslimits = (-8, 25),
unit = 'deg',
refpos = -7.7,
fmtstr = '%.3f',
speed = 1,
),
precision = 0.01,
visibility = vis,
)
devices['ctr%d' % (i + 1)] = device('nicos.devices.generic.VirtualCounter',
visibility = (),
type = 'counter',
countrate = 1 + int(2000 * math.exp(-((i + 1) - 6) ** 2 / 2.)),
fmtstr = '%d',
)
startupcode = '''
SetDetectors(det)
'''
|
<gh_stars>1-10
from __future__ import annotations
import asyncio
import os
from datetime import datetime
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Iterable, Optional, overload
from typing_extensions import Final, Literal
from ...client import Client
from ...ext import commands
from ...game import TF2, Game
from ...gateway import Msgs
from ...protobufs import GCMsg, GCMsgProto
from ...user import ClientUser, User
from .enums import Language
from .protobufs.struct_messages import CraftResponse
from .state import GCState
if TYPE_CHECKING:
from steam.ext import tf2
from ...comment import Comment
from ...invite import ClanInvite, UserInvite
from ...message import Message
from ...trade import Inventory, TradeOffer
from ..commands import Context
from .backpack import BackPack, BackPackItem, Schema
__all__ = (
"Client",
"Bot",
)
class TF2ClientUser(ClientUser):
@overload
async def inventory(self, game: Literal[TF2]) -> BackPack:
...
@overload
async def inventory(self, game: Game) -> Inventory:
...
class Client(Client):
GAME: Final[Game] = TF2
user: TF2ClientUser
def __init__(self, loop: Optional[asyncio.AbstractEventLoop] = None, **options: Any):
game = options.pop("game", None)
if game is not None: # don't let them overwrite the main game
try:
options["games"].append(game)
except (TypeError, KeyError):
options["games"] = [game]
options["game"] = self.GAME
self._original_games: Optional[list[Game]] = options.get("games")
self._crafting_lock = asyncio.Lock()
super().__init__(loop=loop, **options)
def _get_state(self, **options: Any) -> GCState:
return GCState(client=self, **options)
@property
def schema(self) -> Schema:
"""Optional[:class:`multidict.MultiDict`]: TF2's item schema. ``None`` if the user isn't ready."""
return self._connection.schema
@property
def backpack_slots(self) -> int:
"""The client's number of backpack slots."""
return self._connection.backpack_slots
def is_premium(self) -> bool:
"""
Optional[:class:`bool`]: Whether or not the client's account has TF2 premium. ``None`` if the user isn't ready.
"""
return self._connection._is_premium # type: ignore
def set_language(self, file: os.PathLike[str]) -> None:
"""Set the localization files for your bot.
This isn't necessary in most situations.
"""
from . import VDF_DECODER
file = Path(file).resolve()
self._connection.language = VDF_DECODER(file.read_text())
async def craft(self, items: Iterable[BackPackItem], recipe: int = -2) -> Optional[list[BackPackItem]]:
"""|coro|
Craft a set of items together with an optional recipe.
Parameters
----------
items
The items to craft.
recipe
The recipe to craft them with default is -2 (wildcard). Setting for metal crafts isn't required. See
https://github.com/DontAskM8/TF2-Crafting-Recipe/blob/master/craftRecipe.json for other recipe details.
Return
------
The crafted items, ``None`` if crafting failed.
"""
def check_gc_msg(msg: GCMsg[Any]) -> bool:
if isinstance(msg.body, CraftResponse):
if not msg.body.being_used: # craft queue is FIFO, so this works fine
msg.body.being_used = True
nonlocal ids
ids = list(msg.body.id_list)
return True
return False
def check_crafting_complete(items: list[BackPackItem]) -> bool:
return [item.asset_id for item in items] == ids
ids = []
future = self.loop.create_future()
listeners = self._listeners.setdefault("crafting_complete", [])
listeners.append((future, check_crafting_complete))
await self.ws.send_gc_message(GCMsg(Language.Craft, recipe=recipe, items=[item.id for item in items]))
try:
resp = await self.wait_for("gc_message_receive", check=check_gc_msg, timeout=60)
except asyncio.TimeoutError:
recipe_id = -1
else:
recipe_id = resp.body.recipe_id
if recipe_id == -1:
future.cancel() # cancel the future (it's cleaned from _listeners up by dispatch)
return None
return await future
async def wait_for_gc_ready(self) -> None:
await self._connection._gc_ready.wait()
# boring subclass stuff
async def _handle_ready(self) -> None:
self._connection._unpatched_inventory = self.user.inventory
await super()._handle_ready()
async def _on_gc_connect(self) -> None:
"""
await self._connection._connected.wait()
while True: # this is ok-ish as gateway.KeepAliveHandler should catch any blocking and disconnects
await self.ws.send_gc_message(GCMsgProto(Language.ClientHello))
await asyncio.sleep(5)
"""
# this breaks things not sure why can't be bothered finding out stuff seems to work without pinging.
if TYPE_CHECKING:
async def on_gc_connect(self) -> None:
"""|coro|
Called after the client receives the welcome message from the GC.
Warning
-------
This is called every time we craft an item and disconnect so same warnings apply to
:meth:`steam.Client.on_connect`
"""
async def on_gc_disconnect(self) -> None:
"""|coro|
Called after the client receives the goodbye message from the GC.
Warning
-------
This is called every time we craft an item and disconnect so same warnings apply to
:meth:`steam.Client.on_connect`
"""
async def on_gc_ready(self) -> None:
"""|coro|
Called after the client connects to the GC and has the :attr:`schema`, :meth:`Client.user.inventory` and set
up and account info (:meth:`is_premium` and :attr:`backpack_slots`).
Warning
-------
This is called every time we craft an item and disconnect so same warnings apply to
:meth:`steam.Client.on_connect`
"""
async def on_account_update(self) -> None:
"""|coro|
Called when the client user's account is updated. This can happen from any one of the below changing:
- :meth:`is_premium`
- :attr:`backpack_slots`
"""
async def on_crafting_complete(self, items: list[tf2.BackPackItem]) -> None:
"""|coro|
Called after a crafting recipe is completed.
Parameters
----------
items: list[:class:`tf2.BackPackItem`]
The items the craft request created.
"""
async def on_item_receive(self, item: tf2.BackPackItem) -> None:
"""|coro|
Called when the client receives an item.
Parameters
----------
item: :class:`tf2.BackPackItem`
The received item.
"""
async def on_item_remove(self, item: tf2.BackPackItem) -> None:
"""|coro|
Called when the client has an item removed from its backpack.
Parameters
----------
item: :class:`tf2.BackPackItem`
The removed item.
"""
async def on_item_update(self, before: tf2.BackPackItem, after: tf2.BackPackItem) -> None:
"""|coro|
Called when the client has an item in its backpack updated.
Parameters
----------
before: :class:`tf2.BackPackItem`
The item before being updated.
after: :class:`tf2.BackPackItem`
The item now.
"""
@overload
async def wait_for(
self,
event: Literal[
"connect",
"disconnect",
"ready",
"login",
"logout",
"gc_connect",
"gc_disconnect",
"gc_ready",
"account_update",
],
*,
check: Callable[[], bool] = ...,
timeout: Optional[float] = ...,
) -> None:
...
@overload
async def wait_for(
self,
event: Literal["error"],
*,
check: Callable[[str, Exception, tuple[Any, ...], dict[str, Any]], bool] = ...,
timeout: Optional[float] = ...,
) -> tuple[str, Exception, tuple, dict]:
...
@overload
async def wait_for(
self,
event: Literal["message"],
*,
check: Callable[[Message], bool] = ...,
timeout: Optional[float] = ...,
) -> Message:
...
@overload
async def wait_for(
self,
event: Literal["comment"],
*,
check: Callable[[Comment], bool] = ...,
timeout: Optional[float] = ...,
) -> Comment:
...
@overload
async def wait_for(
self,
event: Literal["user_update"],
*,
check: Callable[[User, User], bool] = ...,
timeout: Optional[float] = ...,
) -> tuple[User, User]:
...
@overload
async def wait_for(
self,
event: Literal["typing"],
*,
check: Callable[[User, datetime], bool] = ...,
timeout: Optional[float] = ...,
) -> tuple[User, datetime]:
...
@overload
async def wait_for(
self,
event: Literal[
"trade_receive",
"trade_send",
"trade_accept",
"trade_decline",
"trade_cancel",
"trade_expire",
"trade_counter",
],
*,
check: Callable[[TradeOffer], bool] = ...,
timeout: Optional[float] = ...,
) -> TradeOffer:
...
@overload
async def wait_for(
self,
event: Literal["user_invite"],
*,
check: Callable[[UserInvite], bool] = ...,
timeout: Optional[float] = ...,
) -> UserInvite:
...
@overload
async def wait_for(
self,
event: Literal["clan_invite"],
*,
check: Callable[[ClanInvite], bool] = ...,
timeout: Optional[float] = ...,
) -> ClanInvite:
...
@overload
async def wait_for(
self,
event: Literal[
"socket_receive",
"socket_send",
],
*,
check: Callable[[Msgs], bool] = ...,
timeout: Optional[float] = ...,
) -> Msgs:
...
@overload
async def wait_for(
self,
event: Literal["crafting_complete"],
*,
check: Callable[[list[tf2.BackPackItem]], bool] = ...,
timeout: Optional[float] = ...,
) -> list[tf2.BackPackItem]:
...
@overload
async def wait_for(
self,
event: Literal[
"item_receive",
"item_remove",
"item_update",
],
*,
check: Callable[[BackPackItem], bool] = ...,
timeout: Optional[float] = ...,
) -> BackPackItem:
...
class Bot(commands.Bot, Client):
if TYPE_CHECKING:
@overload
async def wait_for(
self,
event: Literal[
"connect",
"disconnect",
"ready",
"login",
"logout",
"gc_connect",
"gc_disconnect",
"gc_ready",
"account_update",
],
*,
check: Callable[[], bool] = ...,
timeout: Optional[float] = ...,
) -> None:
...
@overload
async def wait_for(
self,
event: Literal["error"],
*,
check: Callable[[str, Exception, tuple[Any, ...], dict[str, Any]], bool] = ...,
timeout: Optional[float] = ...,
) -> tuple[str, Exception, tuple, dict]:
...
@overload
async def wait_for(
self,
event: Literal["message"],
*,
check: Callable[[Message], bool] = ...,
timeout: Optional[float] = ...,
) -> Message:
...
@overload
async def wait_for(
self,
event: Literal["comment"],
*,
check: Callable[[Comment], bool] = ...,
timeout: Optional[float] = ...,
) -> Comment:
...
@overload
async def wait_for(
self,
event: Literal["user_update"],
*,
check: Callable[[User, User], bool] = ...,
timeout: Optional[float] = ...,
) -> tuple[User, User]:
...
@overload
async def wait_for(
self,
event: Literal["typing"],
*,
check: Callable[[User, datetime], bool] = ...,
timeout: Optional[float] = ...,
) -> tuple[User, datetime]:
...
@overload
async def wait_for(
self,
event: Literal[
"trade_receive",
"trade_send",
"trade_accept",
"trade_decline",
"trade_cancel",
"trade_expire",
"trade_counter",
],
*,
check: Callable[[TradeOffer], bool] = ...,
timeout: Optional[float] = ...,
) -> TradeOffer:
...
@overload
async def wait_for(
self,
event: Literal["user_invite"],
*,
check: Callable[[UserInvite], bool] = ...,
timeout: Optional[float] = ...,
) -> UserInvite:
...
@overload
async def wait_for(
self,
event: Literal["clan_invite"],
*,
check: Callable[[ClanInvite], bool] = ...,
timeout: Optional[float] = ...,
) -> ClanInvite:
...
@overload
async def wait_for(
self,
event: Literal[
"socket_receive",
"socket_send",
],
*,
check: Callable[[Msgs], bool] = ...,
timeout: Optional[float] = ...,
) -> Msgs:
...
@overload
async def wait_for(
self,
event: Literal["command_error"],
*,
check: Callable[[Context, Exception], bool] = ...,
timeout: Optional[float] = ...,
) -> tuple[Context, Exception]:
...
@overload
async def wait_for(
self,
event: Literal["command"],
*,
check: Callable[[Context], bool] = ...,
timeout: Optional[float] = ...,
) -> Context:
...
@overload
async def wait_for(
self,
event: Literal["command_completion"],
*,
check: Callable[[Context], bool] = ...,
timeout: Optional[float] = ...,
) -> Context:
...
@overload
async def wait_for(
self,
event: Literal["crafting_complete"],
*,
check: Callable[[list[tf2.BackPackItem]], bool] = ...,
timeout: Optional[float] = ...,
) -> list[tf2.BackPackItem]:
...
@overload
async def wait_for(
self,
event: Literal[
"item_receive",
"item_remove",
"item_update",
],
*,
check: Callable[[BackPackItem], bool] = ...,
timeout: Optional[float] = ...,
) -> BackPackItem:
...
|
<filename>cyborg/tests/unit/accelerator/drivers/spdk/nvmf/test_nvmf.py
# Copyright 2017 Huawei Technologies Co.,LTD.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from cyborg.accelerator.drivers.spdk.nvmf.nvmf import NVMFDRIVER
from cyborg.accelerator.drivers.spdk.util import common_fun
from cyborg.accelerator.drivers.spdk.util.pyspdk.nvmf_client import NvmfTgt
from cyborg.tests import base
class TestNVMFDRIVER(base.TestCase):
def setUp(self,):
super(TestNVMFDRIVER, self).setUp()
self.nvmf_driver = NVMFDRIVER()
def tearDown(self):
super(TestNVMFDRIVER, self).tearDown()
self.vhost_driver = None
@mock.patch.object(NVMFDRIVER, 'get_one_accelerator')
def test_discover_accelerator(self, mock_get_one_accelerator):
expect_accelerator = {
'server': 'nvmf',
'bdevs': [{"num_blocks": 131072,
"name": "nvme1",
"block_size": 512
}],
'subsystems': [{"core": 0,
"nqn": "nqn.2018-01.org.nvmexpress.discovery",
"hosts": []
}]
}
alive = mock.Mock(return_value=False)
self.nvmf_driver.py.is_alive = alive
check_error = mock.Mock(return_value=False)
common_fun.check_for_setup_error = check_error
self.assertFalse(
mock_get_one_accelerator.called,
"Failed to discover_accelerator if py not alive."
)
alive = mock.Mock(return_value=True)
self.nvmf_driver.py.is_alive = alive
check_error = mock.Mock(return_value=True)
common_fun.check_for_setup_error = check_error
acce_client = NvmfTgt(self.nvmf_driver.py)
bdevs_fake = [{"num_blocks": 131072,
"name": "nvme1",
"block_size": 512
}]
bdev_list = mock.Mock(return_value=bdevs_fake)
acce_client.get_bdevs = bdev_list
subsystems_fake = [{"core": 0,
"nqn": "nqn.2018-01.org.nvmexpress.discovery",
"hosts": []
}]
subsystem_list = mock.Mock(return_value=subsystems_fake)
acce_client.get_nvmf_subsystems = subsystem_list
accelerator_fake = {
'server': self.nvmf_driver.SERVER,
'bdevs': acce_client.get_bdevs(),
'subsystems': acce_client.get_nvmf_subsystems()
}
success_send = mock.Mock(return_value=accelerator_fake)
self.nvmf_driver.get_one_accelerator = success_send
accelerator = self.nvmf_driver.discover_accelerator()
self.assertEqual(accelerator, expect_accelerator)
def test_accelerator_list(self):
expect_accelerators = [{
'server': 'nvmf',
'bdevs': [{"num_blocks": 131072,
"name": "nvme1",
"block_size": 512
}],
'subsystems':
[{"core": 0,
"nqn": "nqn.2018-01.org.nvmexpress.discovery",
"hosts": []
}]
},
{
'server': 'nvnf_tgt',
'bdevs': [{"num_blocks": 131072,
"name": "nvme1",
"block_size": 512
}],
'subsystems':
[{"core": 0,
"nqn": "nqn.2018-01.org.nvmexpress.discovery",
"hosts": []
}]
}
]
success_send = mock.Mock(return_value=expect_accelerators)
self.nvmf_driver.get_all_accelerators = success_send
self.assertEqual(self.nvmf_driver.accelerator_list(),
expect_accelerators)
def test_install_accelerator(self):
pass
def test_uninstall_accelerator(self):
pass
def test_update(self):
pass
def test_attach_instance(self):
pass
def test_detach_instance(self):
pass
def test_delete_subsystem(self):
pass
def test_construct_subsystem(self):
pass
|
<reponame>stevejaker/psychic-journey
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Sample Message.py program
"""
import re
import mysql.connector
# import fuzzy
# import uuid # To be used eventually
# from DBINFO import *
# import db_manage
class Message(object):
"""
"""
def __init__(self, msg, db=None, message_type='user_msg', **kwargs):
# self.soundex = fuzzy.Soundex(4)
# print(msg)
self.db = db # db_manage.DbManage()
if 'client_msg_id' in msg:
self.msg_id = msg['client_msg_id']
self.msg_text = self._correct_text(msg['text'])
self.thread_ts = msg['ts'] # ts for parent message to thread the message to
self.invalid = False
else:
self.invalid = True
self.text = None
self.as_user = False
self.attachment = None
self.username = None
self.icon_url = None
def _correct_text(self, text):
"""
Handles some much needed correcting
Slack Encodes the `&` character (only needed if `&` is TAG_CHAR), so this
"""
return text.lower().replace("&", "&").replace(":",'') # .replace("&unknown", "&unown") # Might be needed in future
def is_valid(self):
"""
If message details are NOT in SQL server, uploads, otherwise returns False.
"""
if self.invalid:
return False
read_messages = self.db.get_messages()
# print(read_messages)
if self.msg_id in read_messages:
return False
else:
self.db.save_message(self.msg_id,print_statement=True)
status, message = self.db.processText(self.msg_text)
self.text = message
return status
# Below should be removed from
# def get_info_str(self): # Needs Work
# return f"{self.username}\n"
# def print_to_console(self):
# print(f"""
# as_user = False
# username = {self.username}
# icon_url = {self.icon_url}
# attachments = {self.attachment}
# """)
# def is_in_text(self, tag):
# split_text = self.text.split(TAG_CHAR)
# if len(split_text) > 1:
# for text in split_text[1:]:
# if self.check_permutations(tag, text):
# return True
# return False
# def check_permutations(self, tag, text):
# """
# Individually checks ALL tag elements against
# ALL test elements
# """
# tag_list = tag.split()
# text_list = text.split()
# for i, t in enumerate(tag_list):
# if self.soundex(t) != self.soundex(text_list[i]):
# return False
# return True
# def get_messages(self):
# sql_statement = f"SELECT msg_id FROM `{DB_MESSAGE_TABLE}`"
# self.cursor.execute(sql_statement)
# return [i[0] for i in self.cursor.fetchall()]
# def save_message(self):
# sql_statement = f"INSERT INTO `{DB_MESSAGE_TABLE}` ( msg_id ) VALUES ( '{self.msg_id}' )"
# print(sql_statement)
# self.cursor.execute(sql_statement)
# self.conn.commit()
# def getAllTags(self):
# # Get tags from sql
# sql_statement = f"SELECT DISTINCT tag FROM `{DB_TAGS_TABLE}`"
# self.cursor.execute(sql_statement)
# return self.cursor.fetchall()
# def getTags(self):
# tags = self.getAllTags()
# return [t[0] for t in tags if t[0] in self.msg_text]
# # return [t[0] for t in tags if self.is_in_text(t[0])] # Future addition
# def getUsers(self, tags):
# # Get users from sql
# if tags == []:
# return []
# tags = ", ".join([f'"{tag}"' for tag in tags]) # Convert tags to comma separated list
# sql_statement = f'SELECT DISTINCT username FROM `{DB_TAGS_TABLE}` WHERE tag in ( {tags} )'
# self.cursor.execute(sql_statement)
# usernames = self.cursor.fetchall()
# return [f"<@{u[0]}>" for u in usernames]
# def createMessage(self, users, tags):
# self.text = f"`{', '.join(tags)}` tags(s) used {' '.join(users)}"
# def send_all_tags(self):
# tags = [t[0] for t in self.getAllTags()]
# self.text = f"*ALL TAGS BEING SCANNED*\n`{', '.join(tags)}`"
# def run_unit_tests(self):
# import UNIT_TESTS
# UNIT_TESTS.run()
# def processText(self):
# # If the message is eligible to tag users, will set self.text.
# # Otherwise, returns False
# users = []
# tags = []
# if TAG_CHAR in self.msg_text:
# if "&export all tags" in self.msg_text:
# self.send_all_tags()
# return True
# elif "&run tests" == self.msg_text:
# self.run_unit_tests()
# return True
# tags = self.getTags()
# users = self.getUsers(tags)
# if users == [] or tags == []:
# return False
# else:
# self.createMessage(users, tags)
# return True
# def DEBUG(self, tag):
# if TAG_CHAR in self.msg_text:
# if tag in self.msg_text:
# return True
# return False
|
"""Client for staging inputs for Galaxy Tools and Workflows.
Implement as a connector to serve a bridge between galactic_job_json
utility and a Galaxy API library.
"""
import abc
import json
import logging
import os
import yaml
from galaxy.tool_util.cwl.util import (
DirectoryUploadTarget,
FileLiteralTarget,
FileUploadTarget,
galactic_job_json,
path_or_uri_to_uri,
)
log = logging.getLogger(__name__)
UPLOAD_TOOL_ID = "upload1"
LOAD_TOOLS_FROM_PATH = True
DEFAULT_USE_FETCH_API = True
DEFAULT_FILE_TYPE = "auto"
DEFAULT_DBKEY = "?"
class StagingInterace(metaclass=abc.ABCMeta):
"""Client that parses a job input and populates files into the Galaxy API.
Abstract class that must override _post (and optionally other things such
_attach_file, _log, etc..) to adapt to bioblend (for Planemo) or using the
tool test interactor infrastructure.
"""
@abc.abstractmethod
def _post(self, api_path, payload, files_attached=False):
"""Make a post to the Galaxy API along supplied path."""
def _attach_file(self, path):
return open(path, 'rb')
def _tools_post(self, payload, files_attached=False):
tool_response = self._post("tools", payload, files_attached=files_attached)
for job in tool_response.get("jobs", []):
self._handle_job(job)
return tool_response
def _fetch_post(self, payload, files_attached=False):
tool_response = self._post("tools/fetch", payload, files_attached=files_attached)
for job in tool_response.get("jobs", []):
self._handle_job(job)
return tool_response
def _handle_job(self, job_response):
"""Implementer can decide if to wait for job(s) individually or not here."""
def stage(self, tool_or_workflow, history_id, job=None, job_path=None, use_path_paste=LOAD_TOOLS_FROM_PATH, to_posix_lines=True, job_dir="."):
files_attached = [False]
def upload_func_fetch(upload_target):
def _attach_file(upload_payload, uri, index=0):
uri = path_or_uri_to_uri(uri)
is_path = uri.startswith("file://")
if not is_path or use_path_paste:
return {"src": "url", "url": uri}
else:
files_attached[0] = True
path = uri[len("file://"):]
upload_payload["__files"][f"files_{index}|file_data"] = self._attach_file(path)
return {"src": "files"}
fetch_payload = None
if isinstance(upload_target, FileUploadTarget):
file_path = upload_target.path
file_type = upload_target.properties.get('filetype', None) or DEFAULT_FILE_TYPE
dbkey = upload_target.properties.get('dbkey', None) or DEFAULT_DBKEY
fetch_payload = _fetch_payload(
history_id,
file_type=file_type,
dbkey=dbkey,
to_posix_lines=to_posix_lines,
)
name = _file_path_to_name(file_path)
if file_path is not None:
src = _attach_file(fetch_payload, file_path)
fetch_payload["targets"][0]["elements"][0].update(src)
if upload_target.composite_data:
composite_items = []
for i, composite_data in enumerate(upload_target.composite_data):
composite_item_src = _attach_file(fetch_payload, composite_data, index=i)
composite_items.append(composite_item_src)
fetch_payload["targets"][0]["elements"][0]["src"] = "composite"
fetch_payload["targets"][0]["elements"][0]["composite"] = {
"items": composite_items,
}
tags = upload_target.properties.get("tags")
if tags:
fetch_payload["targets"][0]["elements"][0]["tags"] = tags
fetch_payload["targets"][0]["elements"][0]["name"] = name
elif isinstance(upload_target, FileLiteralTarget):
fetch_payload = _fetch_payload(history_id)
# For file literals - take them as is - never convert line endings.
fetch_payload["targets"][0]["elements"][0].update({
"src": "pasted",
"paste_content": upload_target.contents,
"to_posix_lines": False,
})
tags = upload_target.properties.get("tags")
if tags:
fetch_payload["targets"][0]["elements"][0]["tags"] = tags
elif isinstance(upload_target, DirectoryUploadTarget):
fetch_payload = _fetch_payload(history_id, file_type="directory")
fetch_payload["targets"][0].pop("elements")
tar_path = upload_target.path
src = _attach_file(fetch_payload, tar_path)
fetch_payload["targets"][0]["elements_from"] = src
else:
content = json.dumps(upload_target.object)
fetch_payload = _fetch_payload(history_id, file_type="expression.json")
fetch_payload["targets"][0]["elements"][0].update({
"src": "pasted",
"paste_content": content,
})
tags = upload_target.properties.get("tags")
fetch_payload["targets"][0]["elements"][0]["tags"] = tags
return self._fetch_post(fetch_payload, files_attached=files_attached[0])
# Save legacy upload_func to target older Galaxy servers
def upload_func(upload_target):
def _attach_file(upload_payload, uri, index=0):
uri = path_or_uri_to_uri(uri)
is_path = uri.startswith("file://")
if not is_path or use_path_paste:
upload_payload["inputs"]["files_%d|url_paste" % index] = uri
else:
files_attached[0] = True
path = uri[len("file://"):]
upload_payload["__files"]["files_%d|file_data" % index] = self._attach_file(path)
if isinstance(upload_target, FileUploadTarget):
file_path = upload_target.path
file_type = upload_target.properties.get('filetype', None) or DEFAULT_FILE_TYPE
dbkey = upload_target.properties.get('dbkey', None) or DEFAULT_DBKEY
upload_payload = _upload_payload(
history_id,
file_type=file_type,
to_posix_lines=dbkey,
)
name = _file_path_to_name(file_path)
upload_payload["inputs"]["files_0|auto_decompress"] = False
upload_payload["inputs"]["auto_decompress"] = False
if file_path is not None:
_attach_file(upload_payload, file_path)
upload_payload["inputs"]["files_0|NAME"] = name
if upload_target.secondary_files:
_attach_file(upload_payload, upload_target.secondary_files, index=1)
upload_payload["inputs"]["files_1|type"] = "upload_dataset"
upload_payload["inputs"]["files_1|auto_decompress"] = True
upload_payload["inputs"]["file_count"] = "2"
upload_payload["inputs"]["force_composite"] = "True"
# galaxy.exceptions.RequestParameterInvalidException: Not input source type
# defined for input '{'class': 'File', 'filetype': 'imzml', 'composite_data':
# ['Example_Continuous.imzML', 'Example_Continuous.ibd']}'.\n"}]]
if upload_target.composite_data:
for i, composite_data in enumerate(upload_target.composite_data):
upload_payload["inputs"][f"files_{i}|type"] = "upload_dataset"
_attach_file(upload_payload, composite_data, index=i)
self._log(f"upload_payload is {upload_payload}")
return self._tools_post(upload_payload, files_attached=files_attached[0])
elif isinstance(upload_target, FileLiteralTarget):
# For file literals - take them as is - never convert line endings.
payload = _upload_payload(history_id, file_type="auto", auto_decompress=False, to_posix_lines=False)
payload["inputs"]["files_0|url_paste"] = upload_target.contents
return self._tools_post(payload)
elif isinstance(upload_target, DirectoryUploadTarget):
tar_path = upload_target.tar_path
upload_payload = _upload_payload(
history_id,
file_type="tar",
)
upload_payload["inputs"]["files_0|auto_decompress"] = False
_attach_file(upload_payload, tar_path)
tar_upload_response = self._tools_post(upload_payload, files_attached=files_attached[0])
convert_payload = dict(
tool_id="CONVERTER_tar_to_directory",
tool_inputs={"input1": {"src": "hda", "id": tar_upload_response["outputs"][0]["id"]}},
history_id=history_id,
)
convert_response = self._tools_post(convert_payload)
assert "outputs" in convert_response, convert_response
return convert_response
else:
content = json.dumps(upload_target.object)
payload = _upload_payload(history_id, file_type="expression.json")
payload["files_0|url_paste"] = content
return self._tools_post(payload)
def create_collection_func(element_identifiers, collection_type):
payload = {
"name": "dataset collection",
"instance_type": "history",
"history_id": history_id,
"element_identifiers": element_identifiers,
"collection_type": collection_type,
"fields": None if collection_type != "record" else "auto",
}
return self._post("dataset_collections", payload)
if job_path is not None:
assert job is None
with open(job_path) as f:
job = yaml.safe_load(f)
job_dir = os.path.dirname(os.path.abspath(job_path))
else:
assert job is not None
assert job_dir is not None
if self.use_fetch_api:
upload = upload_func_fetch
else:
upload = upload_func
job_dict, datasets = galactic_job_json(
job,
job_dir,
upload,
create_collection_func,
tool_or_workflow,
)
return job_dict, datasets
# extension point for planemo to override logging
def _log(self, message):
log.debug(message)
@abc.abstractproperty
def use_fetch_api(self):
"""Return true is this should use (modern) data fetch API."""
class InteractorStaging(StagingInterace):
def __init__(self, galaxy_interactor, use_fetch_api=DEFAULT_USE_FETCH_API):
self.galaxy_interactor = galaxy_interactor
self._use_fetch_api = use_fetch_api
def _post(self, api_path, payload, files_attached=False):
response = self.galaxy_interactor._post(api_path, payload, json=True)
assert response.status_code == 200, response.text
return response.json()
def _handle_job(self, job_response):
self.galaxy_interactor.wait_for_job(job_response["id"])
@property
def use_fetch_api(self):
return self._use_fetch_api
def _file_path_to_name(file_path):
if file_path is not None:
name = os.path.basename(file_path)
else:
name = "defaultname"
return name
def _upload_payload(history_id, tool_id=UPLOAD_TOOL_ID, file_type=DEFAULT_FILE_TYPE, dbkey=DEFAULT_DBKEY, **kwd):
"""Adapted from bioblend tools client."""
payload = {}
payload["history_id"] = history_id
payload["tool_id"] = tool_id
tool_input = {}
tool_input["file_type"] = file_type
tool_input["dbkey"] = dbkey
if not kwd.get('to_posix_lines', True):
tool_input['files_0|to_posix_lines'] = False
elif kwd.get('space_to_tab', False):
tool_input['files_0|space_to_tab'] = 'Yes'
if 'file_name' in kwd:
tool_input["files_0|NAME"] = kwd['file_name']
tool_input["files_0|type"] = "upload_dataset"
payload["inputs"] = tool_input
payload["__files"] = {}
return payload
def _fetch_payload(history_id, file_type=DEFAULT_FILE_TYPE, dbkey=DEFAULT_DBKEY, **kwd):
element = {
"ext": file_type,
"dbkey": dbkey,
}
for arg in ['to_posix_lines', 'space_to_tab']:
if arg in kwd:
element[arg] = kwd[arg]
if 'file_name' in kwd:
element['name'] = kwd['file_name']
target = {
"destination": {"type": "hdas"},
"elements": [element],
"auto_decompress": False,
}
targets = [target]
payload = {
"history_id": history_id,
"targets": targets,
"__files": {}
}
return payload
|
# Copyright (c) 2018 ISP RAS (http://www.ispras.ru)
# Ivannikov Institute for System Programming of the Russian Academy of Sciences
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
import re
from clade import Clade
from clade.extensions.opts import cc_preprocessor_opts
def test_cc_load_deps_by_id(tmpdir, cmds_file):
c = Clade(tmpdir, cmds_file)
e = c.parse("CC")
for cmd in e.load_all_cmds(compile_only=True):
deps = e.load_deps_by_id(cmd["id"])
assert deps
for cmd_in in cmd["in"]:
assert cmd_in in deps
@pytest.mark.parametrize("with_opts", [True, False])
@pytest.mark.parametrize("with_deps", [True, False])
def test_cc_load_all_cmds(tmpdir, cmds_file, with_opts, with_deps):
c = Clade(tmpdir, cmds_file)
e = c.parse("CC")
cmds = list(e.load_all_cmds(with_opts=with_opts, with_deps=with_deps))
assert len(cmds) > 1
for cmd in cmds:
assert ("opts" in cmd) == with_opts
assert ("deps" in cmd) == with_deps
cmd_by_id = e.load_cmd_by_id(cmd["id"])
assert cmd_by_id["id"] == cmd["id"]
assert cmd_by_id["in"] == cmd["in"]
assert cmd_by_id["out"] == cmd["out"]
if with_opts:
assert cmd["opts"] == e.load_opts_by_id(cmd["id"])
if with_deps:
assert cmd["deps"] == e.load_deps_by_id(cmd["id"])
@pytest.mark.parametrize("store_deps", [True, False])
def test_cc_store_deps(tmpdir, cmds_file, store_deps):
conf = {"Compiler.store_deps": store_deps}
c = Clade(tmpdir, cmds_file, conf)
e = c.parse("CC")
storage_dir = e.extensions["Storage"].get_storage_dir()
for cmd in e.load_all_cmds(with_deps=True, compile_only=True):
for file in cmd["deps"]:
if not os.path.isabs(file):
file = os.path.join(cmd["cwd"], file)
assert os.path.exists(storage_dir + os.sep + file) == store_deps
@pytest.mark.parametrize("with_system_header_files", [True, False])
def test_cc_with_system_header_files(tmpdir, cmds_file, with_system_header_files):
conf = {"CC.with_system_header_files": with_system_header_files}
c = Clade(tmpdir, cmds_file, conf)
e = c.parse("CC")
for cmd in e.load_all_cmds(with_deps=True, compile_only=True):
if not with_system_header_files:
for file in cmd["deps"]:
assert not re.search(r"/usr", file)
else:
for file in cmd["deps"]:
if re.search(r"/usr", file):
break
else:
assert False
@pytest.mark.parametrize("ignore_cc1", [True, False])
def test_cc_ignore_cc1(tmpdir, cmds_file, ignore_cc1):
conf = {"CC.ignore_cc1": ignore_cc1}
c = Clade(tmpdir, cmds_file, conf)
e = c.parse("CC")
found_cc1 = False
for cmd in e.load_all_cmds(with_opts=True):
if"-cc1" in cmd["opts"]:
found_cc1 = True
if ignore_cc1 or found_cc1:
assert ignore_cc1 != found_cc1
@pytest.mark.parametrize("compile_only", [True, False])
def test_cc_exclude_list_deps(tmpdir, cmds_file, compile_only):
c = Clade(tmpdir, cmds_file)
e = c.parse("CC")
found_deps_opt = False
for cmd in e.load_all_cmds(with_opts=True, compile_only=compile_only):
if set(cc_preprocessor_opts).intersection(cmd["opts"]):
found_deps_opt = True
assert compile_only != found_deps_opt
@pytest.mark.parametrize("exclude_list", [[], ["/dev/null"]])
@pytest.mark.parametrize("exclude_list_in", [[], ["-"]])
@pytest.mark.parametrize("exclude_list_out", [[], ["/dev/null"]])
def test_cc_exclude_list(tmpdir, cmds_file, exclude_list, exclude_list_in, exclude_list_out):
conf = {
"Common.exclude_list": exclude_list,
"Common.exclude_list_in": exclude_list_in,
"Common.exclude_list_out": exclude_list_out
}
c = Clade(tmpdir, cmds_file, conf)
e = c.parse("CC")
assert len(list(e.load_all_cmds()))
@pytest.mark.parametrize("include_list", [[], ["test_project"]])
def test_cc_include_list(tmpdir, cmds_file, include_list):
conf = {
"Common.include_list": include_list
}
c = Clade(tmpdir, cmds_file, conf)
e = c.parse("CC")
assert len(list(e.load_all_cmds()))
def test_cc_empty_conf(tmpdir, cmds_file):
c = Clade(tmpdir, cmds_file)
e = c.parse("CC")
assert len(list(e.load_all_cmds()))
def test_cc_empty_which_list(tmpdir, cmds_file):
conf = {
"CC.which_list": []
}
c = Clade(tmpdir, cmds_file, conf)
e = c.parse("CC")
assert len(list(e.load_all_cmds())) == 0
def test_cc_preprocess(tmpdir, cmds_file):
conf = {
"Compiler.preprocess_cmds": True
}
c = Clade(tmpdir, cmds_file, conf)
e = c.parse("CC")
assert e.get_all_pre_files()
|
from stormed.util import WithFields
class Declare(WithFields):
_name = "queue.declare"
_class_id = 50
_method_id = 10
_sync = True
_content = False
_fields = [
('ticket' , 'short'),
('queue' , 'shortstr'),
('passive' , 'bit'),
('durable' , 'bit'),
('exclusive' , 'bit'),
('auto_delete' , 'bit'),
('nowait' , 'bit'),
('arguments' , 'table'),
]
class DeclareOk(WithFields):
_name = "queue.declare-ok"
_class_id = 50
_method_id = 11
_sync = False
_content = False
_fields = [
('queue' , 'shortstr'),
('message_count' , 'long'),
('consumer_count' , 'long'),
]
class Bind(WithFields):
_name = "queue.bind"
_class_id = 50
_method_id = 20
_sync = True
_content = False
_fields = [
('ticket' , 'short'),
('queue' , 'shortstr'),
('exchange' , 'shortstr'),
('routing_key' , 'shortstr'),
('nowait' , 'bit'),
('arguments' , 'table'),
]
class BindOk(WithFields):
_name = "queue.bind-ok"
_class_id = 50
_method_id = 21
_sync = False
_content = False
_fields = [
]
class Purge(WithFields):
_name = "queue.purge"
_class_id = 50
_method_id = 30
_sync = True
_content = False
_fields = [
('ticket' , 'short'),
('queue' , 'shortstr'),
('nowait' , 'bit'),
]
class PurgeOk(WithFields):
_name = "queue.purge-ok"
_class_id = 50
_method_id = 31
_sync = False
_content = False
_fields = [
('message_count' , 'long'),
]
class Delete(WithFields):
_name = "queue.delete"
_class_id = 50
_method_id = 40
_sync = True
_content = False
_fields = [
('ticket' , 'short'),
('queue' , 'shortstr'),
('if_unused' , 'bit'),
('if_empty' , 'bit'),
('nowait' , 'bit'),
]
class DeleteOk(WithFields):
_name = "queue.delete-ok"
_class_id = 50
_method_id = 41
_sync = False
_content = False
_fields = [
('message_count' , 'long'),
]
class Unbind(WithFields):
_name = "queue.unbind"
_class_id = 50
_method_id = 50
_sync = True
_content = False
_fields = [
('ticket' , 'short'),
('queue' , 'shortstr'),
('exchange' , 'shortstr'),
('routing_key' , 'shortstr'),
('arguments' , 'table'),
]
class UnbindOk(WithFields):
_name = "queue.unbind-ok"
_class_id = 50
_method_id = 51
_sync = False
_content = False
_fields = [
]
id2method = {
10: Declare,
11: DeclareOk,
20: Bind,
21: BindOk,
30: Purge,
31: PurgeOk,
40: Delete,
41: DeleteOk,
50: Unbind,
51: UnbindOk,
}
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 1.3.31
#
# Don't modify this file, modify the SWIG interface instead.
# This file is compatible with both classic and new-style classes.
import _lbiemesher
import new
new_instancemethod = new.instancemethod
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'PySwigObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static) or hasattr(self,name):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError,name
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
import types
try:
_object = types.ObjectType
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
del types
DEFAULT_ERR = _lbiemesher.DEFAULT_ERR
DEFAULT_ERR_IN = _lbiemesher.DEFAULT_ERR_IN
DEFAULT_IVAL = _lbiemesher.DEFAULT_IVAL
DEFAULT_IVAL_IN = _lbiemesher.DEFAULT_IVAL_IN
SINGLE = _lbiemesher.SINGLE
HEXA = _lbiemesher.HEXA
DOUBLE = _lbiemesher.DOUBLE
TETRA = _lbiemesher.TETRA
T_4_H = _lbiemesher.T_4_H
TETRA2 = _lbiemesher.TETRA2
class LBIE_Mesher(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, LBIE_Mesher, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, LBIE_Mesher, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _lbiemesher.new_LBIE_Mesher(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _lbiemesher.delete_LBIE_Mesher
__del__ = lambda self : None;
__swig_setmethods__["oc"] = _lbiemesher.LBIE_Mesher_oc_set
__swig_getmethods__["oc"] = _lbiemesher.LBIE_Mesher_oc_get
if _newclass:oc = _swig_property(_lbiemesher.LBIE_Mesher_oc_get, _lbiemesher.LBIE_Mesher_oc_set)
def inputData(*args, **kwargs): return _lbiemesher.LBIE_Mesher_inputData(*args, **kwargs)
def fileOpen(*args, **kwargs): return _lbiemesher.LBIE_Mesher_fileOpen(*args, **kwargs)
def fileSave(*args, **kwargs): return _lbiemesher.LBIE_Mesher_fileSave(*args, **kwargs)
def setMesh(*args, **kwargs): return _lbiemesher.LBIE_Mesher_setMesh(*args, **kwargs)
def errorChange(*args, **kwargs): return _lbiemesher.LBIE_Mesher_errorChange(*args, **kwargs)
def errorChange_in(*args, **kwargs): return _lbiemesher.LBIE_Mesher_errorChange_in(*args, **kwargs)
def isovalueChange(*args, **kwargs): return _lbiemesher.LBIE_Mesher_isovalueChange(*args, **kwargs)
def isovalueChange_in(*args, **kwargs): return _lbiemesher.LBIE_Mesher_isovalueChange_in(*args, **kwargs)
def outTriangle(*args, **kwargs): return _lbiemesher.LBIE_Mesher_outTriangle(*args, **kwargs)
def outTetra(*args, **kwargs): return _lbiemesher.LBIE_Mesher_outTetra(*args, **kwargs)
def outHexa(*args, **kwargs): return _lbiemesher.LBIE_Mesher_outHexa(*args, **kwargs)
def outQuad(*args, **kwargs): return _lbiemesher.LBIE_Mesher_outQuad(*args, **kwargs)
def getNumFaces(*args, **kwargs): return _lbiemesher.LBIE_Mesher_getNumFaces(*args, **kwargs)
def getNumVerts(*args, **kwargs): return _lbiemesher.LBIE_Mesher_getNumVerts(*args, **kwargs)
def getVolMin(*args, **kwargs): return _lbiemesher.LBIE_Mesher_getVolMin(*args, **kwargs)
def getVolMax(*args, **kwargs): return _lbiemesher.LBIE_Mesher_getVolMax(*args, **kwargs)
def getOuterSurface(*args, **kwargs): return _lbiemesher.LBIE_Mesher_getOuterSurface(*args, **kwargs)
def setXCutPlane(*args, **kwargs): return _lbiemesher.LBIE_Mesher_setXCutPlane(*args, **kwargs)
def setZCutPlane(*args, **kwargs): return _lbiemesher.LBIE_Mesher_setZCutPlane(*args, **kwargs)
def getSurface(*args, **kwargs): return _lbiemesher.LBIE_Mesher_getSurface(*args, **kwargs)
LBIE_Mesher_swigregister = _lbiemesher.LBIE_Mesher_swigregister
LBIE_Mesher_swigregister(LBIE_Mesher)
|
<filename>backend/api/models.py
import datetime
from django.db import models
class Company(models.Model):
name = models.CharField(unique=True, max_length=55)
def __str__(self):
return "%s" % (self.name)
class Province(models.Model):
code = models.CharField(primary_key=True, max_length=2)
def __str__(self):
return "%s" % (self.code)
class City(models.Model):
name = models.CharField(max_length=55)
province = models.ForeignKey(Province, models.PROTECT)
class Meta:
unique_together = ('name', 'province')
ordering = ('province', 'name')
def __str__(self):
return "%s, %s" % (self.name, self.province)
class VehicleTag(models.Model):
id = models.CharField(primary_key=True, max_length=3)
description = models.CharField(max_length=35)
category_choices = [
('body', 'Body'),
('drivetrain', 'Drivetrain'),
('engine', 'Engine'),
('fuel', 'Fuel'),
('transmission', 'Transmission'),
]
category = models.CharField(max_length=15, choices=category_choices)
class Meta:
ordering = ('category', 'id')
def __str__(self):
return "%s" % (self.description)
class Vehicle(models.Model):
#overrun range by 2 so we allow next model year options
year_choices = [(r, r) for r in range(1950, datetime.date.today().year+2)]
year = models.IntegerField(choices=year_choices)
manufacturer = models.CharField(max_length=35)
model = models.CharField(max_length=25)
submodel = models.CharField(max_length=10, blank=True)
vin = models.CharField(max_length=25, blank=True)
engine_displacement_liters = models.DecimalField(
max_digits=4,
decimal_places=2,
null=True,
blank=True
)
body = models.ForeignKey(
VehicleTag,
models.PROTECT,
null=True,
blank=True,
limit_choices_to={'category':'body'},
related_name='body'
)
engine = models.ForeignKey(
VehicleTag,
models.PROTECT,
null=True,
blank=True,
limit_choices_to={'category':'engine'},
related_name='engine'
)
fuel = models.ForeignKey(
VehicleTag,
models.PROTECT,
null=True,
blank=True,
limit_choices_to={'category':'fuel'},
related_name='fuel'
)
transmission = models.ForeignKey(
VehicleTag,
models.PROTECT,
null=True,
blank=True,
limit_choices_to={'category':'transmission'},
related_name='transmission'
)
drivetrain = models.ForeignKey(
VehicleTag,
models.PROTECT,
null=True,
blank=True,
limit_choices_to={'category':'drivetrain'},
related_name='drivetrain'
)
def __str__(self):
return "%s %s %s" % (self.year, self.manufacturer, self.model)
class Fuel(models.Model):
date = models.DateTimeField()
liters = models.DecimalField(max_digits=6, decimal_places=3)
distance = models.DecimalField(max_digits=5, decimal_places=1)
price_l = models.DecimalField(max_digits=4, decimal_places=3)
notes = models.CharField(max_length=150, blank=True)
city = models.ForeignKey(City, models.PROTECT)
company = models.ForeignKey(Company, models.PROTECT)
vehicle = models.ForeignKey(Vehicle, models.PROTECT, related_name='vehicle_id')
class Meta:
ordering = ('vehicle', 'date')
|
<gh_stars>0
import datetime, uuid
from django.db import models
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from django.core.validators import ValidationError
from Laelia.apps.base.fields import MinMaxFloatField
from Laelia.apps.base.functions import funcTime
from Laelia.apps.base.mixins import UrlBase, CreationModificationDatesBase, DateTimeBase, ScheduleBase
class CareRelationMixin(UrlBase):
relation = models.ForeignKey('base.Relation', on_delete=models.SET_NULL, blank=True, null=True)
uuid = models.UUIDField(default=uuid.uuid4, unique=True, editable=False)
class Meta: abstract = True
def clean(self):
super(CareRelationMixin, self).clean()
if not self.date: self.date = timezone.localdate()
class TimeLineMixin(CareRelationMixin, DateTimeBase):
timeline_age = MinMaxFloatField(min_value=0, max_value=100, blank=True, null=True)
timeline_date = models.DateField(blank=True, null=True)
class Meta:
abstract = True
@staticmethod
def age_from_date(patient, date):
return ((date - patient.birth_date) / funcTime(365)).__round__(1)
@staticmethod
def date_from_age(patient, age=None):
if age: return funcTime('today') - ((patient.age - age) * funcTime(365))
@classmethod
def from_age(cls, age):
return cls(timeline_age=age)
@classmethod
def from_date(cls, date):
return cls(timeline_date=date)
@classmethod
def from_year(cls, year):
return cls(timeline_date=datetime.date(year=year, month=7, day=1))
@classmethod
def from_year_month(cls, year, month):
return cls(timeline_date=datetime.date(year=year, month=month, day=1))
def clean(self):
if self.relation:
if self.timeline_date:
self.timeline_age = TimeLineMixin.age_from_date(patient=self.relation.patient, date=self.timeline_date)
elif self.timeline_age:
self.timeline_date = TimeLineMixin.date_from_age(patient=self.relation.patient, age=self.timeline_age)
else:
self.timeline_date = datetime.date.today()
self.timeline_age = TimeLineMixin.age_from_date(patient=self.relation.patient, date=self.timeline_date)
else:
raise ValidationError(_('You need a relation'))
class EventBase(TimeLineMixin):
class EventType(models.TextChoices):
ABUSE = 'abuse', _('abuse')
TRAUMA = 'trauma', _('trauma')
REWARD = 'reward', _('reward')
STRESSOR = 'stressor', _('stressor')
ACHIEVEMENT = 'achievement', _('achievement')
LOSS = 'loss', _('loss')
event_type = models.CharField(blank=True, null=True, max_length=50, choices=EventType.choices)
class ExperienceIntensity(models.IntegerChoices):
MILD = 1, _('mild')
MODERATE = 2, _('moderate')
INTENSE = 3, _('intense')
experience_intensity = models.IntegerField(choices=ExperienceIntensity.choices, blank=True, null=True)
class SubjectiveExperience(models.IntegerChoices):
POSITIVE = 1, _('positive')
NEUTRAL = 0, _('neutral')
NEGATIVE = -1, _('negative')
subjective_experience = models.IntegerField(choices=SubjectiveExperience.choices, blank=True, null=True)
notes = models.TextField(_('Notes'), blank=True, null=True)
class Meta: abstract = True
class PharmacotherapyMixin(models.Model):
class PharmacotherapyContext(models.TextChoices):
SIDE_EFFECT = 'side effect', _('side effect')
GOOD_RESPONSE = 'good response', _('good response')
NO_RESPONSE = 'no response', _('no response')
pharmaco_context = models.CharField(choices=PharmacotherapyContext.choices, blank=True, null=True, max_length=50)
class Meta: abstract = True
class DocumentBase(DateTimeBase, UrlBase):
uuid = models.UUIDField(default=uuid.uuid4, unique=True, editable=False)
relation = models.ForeignKey('base.Relation', on_delete=models.CASCADE, blank=True, null=True)
class DocumentType(models.TextChoices):
VISIT_ATTENDANCE = 'visit attendance', _('visit attendance')
TREATMENT_ATTENDANCE = 'treatment attendance', _('treatment attendance')
LEAVE = 'leave', _('leave')
CAPABILITY = 'capability', _('capability')
DISABILITY = 'disability', _('disability')
REPORT = 'report', _('report')
REFERRAL = 'referral', _('referral')
RETURN = 'return', _('return')
document_type = models.CharField(choices=DocumentType.choices, max_length=50, blank=True, null=True)
document_note = models.TextField(blank=True, null=True)
days = models.IntegerField(blank=True, null=True)
class Meta:
abstract = True
def clean(self):
super(DocumentBase, self).clean()
if not self.relation: raise ValidationError(_("Please select a Relation"))
if not self.document_type: raise ValidationError(_("Please select a Document Type"))
if not self.document_date: self.document_date = datetime.date.today()
if self.document_type == self.DocumentType.VISIT_ATTENDANCE:
if not self.from_time or not self.to_time:
msg = _("You need a 'from time' and 'to time' for Visit Attendance Certificate")
raise ValidationError(msg)
elif self.document_type == self.DocumentType.LEAVE:
if not self.days:
msg = _("You need a 'leave days' for Leave Certificate")
raise ValidationError(msg)
def verbose_1(self):
if self.document_type == self.DocumentType.VISIT_ATTENDANCE:
title = 'ATESTADO DE COMPARECIMENTO'
text1 = f'Certifico que {self.relation.patient} compareceu em visita com ' \
f'{self.relation.professional} ' \
f' dia {self.document_date.strftime("%d/%m/%Y")}.'
text2 = f'Local: {self.relation.professional.full_address}.\n Horario: {self.from_time.strftime("%H:%M")} - {self.to_time.strftime("%H:%M")} '
elif self.document_type == self.DocumentType.LEAVE:
title = 'LICENÇA MÉDICA'
text1 = f'Atesto para os devidos fins que {self.relation.patient} está em acompanhamento médico exibindo incapacidade ' \
f'funcional temporária decorrente de transtorno ativo. Solicito afastamento das suas atividades por {self.days} dias a ' \
f'partir de {self.document_date.strftime("%d/%m/%Y")}, enquando deve ficar sob supervisão. '
text2 = 'Ao final deste período deverá comparecer em nova visita médica onde ' \
f'passará por avaliação da resposta, tolerabilidade e aderência ao tratamento indicado.'
elif self.document_type == self.DocumentType.TREATMENT_ATTENDANCE:
title = 'CERTIFICADO DE TRATAMENTO'
text1 = f'Certifico que {self.relation.patient} esta em acompanhamento iniciado em {self.relation.start_date.strftime("%d/%m/%Y") if self.relation.start_date else self.relation.created.strftime("%d/%m/%Y")}.'
text2 = f'Me encontro à disposição para maiores esclarecimentos caso necessário.'
return title, text1, text2
class VisitBase(CareRelationMixin, DateTimeBase):
urgency = models.BooleanField(_('Acute?'), default=False)
new_complaint = models.BooleanField(_('New?'), default=False)
complaint = models.CharField(max_length=200, blank=True, null=True)
subjective = models.TextField(_('Subjective'), blank=True, null=True)
objective = models.TextField(_('Objective'), blank=True, null=True)
assessment = models.TextField(_('Assessment'), blank=True, null=True)
plan = models.TextField(_('Plan'), blank=True, null=True)
class Meta: abstract = True
def clean(self):
super(VisitBase, self).clean()
if not self.date: self.date = timezone.now().date()
if not self.from_date: self.from_date = self.date
if not self.from_time: self.from_time = timezone.now().time()
class AttendanceMixin(CareRelationMixin, DateTimeBase):
start = models.DateTimeField(blank=True, null=True)
end = models.DateTimeField(blank=True, null=True)
class Meta: abstract = True
class DocumentMixin(CareRelationMixin, DateTimeBase):
introduction = models.TextField(blank=True, null=True)
discussion = models.TextField(blank=True, null=True)
conclusion = models.TextField(blank=True, null=True)
class Meta: abstract = True
class VisitAttendanceBase(ScheduleBase, CareRelationMixin):
class VisitReason(models.TextChoices):
FIRST_VISIT = 'first visit', _('first visit')
FOLLOW_UP_VISIT = 'follow up visit', _('follow up visit')
EMERGENCY_VISIT = 'emergency visit', _('emergency visit')
THERAPY_SESSION = 'therapy session', _('therapy session')
DIAGNOSTIC_PROCEDURE = 'diagnostic procedure', _('diagnostic procedure')
TREATMENT_PROCEDURE = 'treatment procedure', _('treatment procedure')
visit_reason = models.CharField(max_length=50, choices=VisitReason.choices, blank=True, null=True)
def clean(self):
super(VisitAttendanceBase, self).clean()
if not self.relation: raise ValidationError(_('Please select Relation'))
if not self.date: raise ValidationError(_('Please fill Date'))
@property
def as_list_of_strings(self):
strings, text = list(), str()
strings.append(f'ATESTADO DE COMPARECIMENTO')
text += f'Certifico que {self.relation.patient}'
if self.visit_reason: text += f' compareceu em atendimento com {self.relation.professional} para {self.get_visit_reason_display()}'
else: text += f' compareceu em atendimento com {self.relation.professional}'
text += f' no dia {self.date.strftime("%d/%m/%Y")}'
if self.hour and self.min: text += f' às {self.hour}:{self.min}.'
elif self.hour: text += f' às {self.hour} h.'
else: text += f'.'
if self.duration:
date_time = datetime.datetime(day=self.date.day, month=self.date.month, year=self.date.year, hour=self.hour, minute=self.min)
duration = datetime.timedelta(minutes=self.duration)
end = date_time + duration
text += f' Não pode comparecer às suas atividades até {end.strftime(" às %H:%M h")}'
if end.date() == self.date: text += ' do mesmo dia.'
else: text += f' do dia {end.date()}.'
strings.append(text)
return strings
class TreatmentAttendanceBase(CareRelationMixin):
start = models.DateField(_("Start Date"), blank=True, null=True)
end = models.DateField(_("End Date"), blank=True, null=True)
visits = models.IntegerField(_('Visits Completed'), help_text=_('Leave blank for auto completion'), blank=True, null=True)
amount_paid = models.DecimalField(decimal_places=2, max_digits=7, blank=True, null=True)
@property
def as_list_of_strings(self):
strings, text = list(), str()
strings.append(f'CERTIFICAÇÃO DE TRATAMENTO')
if self.relation: text += f'Certifico que {self.relation.patient}'
if self.end != None: text += f' esteve em tratamento com {self.relation.professional} entre {self.start.strftime("%d/%m/%Y")} e {self.end.strftime("%d/%m/%Y")}.'
else: text += f' está em tratamento com {self.relation.professional} iniciado em {self.start.strftime("%d/%m/%Y")} até o momento atual.'
if self.visits and self.amount_paid: text += f' Compareceu ao longo deste período em {self.visits} visitas com custo total para o paciente de R$ {self.amount_paid}.'
elif self.visits: text += f' Compareceu ao longo deste período em {self.visits} visitas no total.'
elif self.amount_paid: text += f' O custo total ao longo deste período foi de R$ {self.amount_paid}.'
strings.append(text)
return strings
class PrescriptionBase(CareRelationMixin, CreationModificationDatesBase):
comercial_drug = models.ForeignKey('meds.ComercialDrug', on_delete=models.CASCADE)
dose = models.DecimalField(max_digits=5, decimal_places=2, blank=True, null=True)
dosage_regimen = models.IntegerField(choices=[(x, f'{x}x') for x in range(1, 13)], default=1, blank=True, null=True)
class FrequencyChoices(models.IntegerChoices):
DAILY = 1, _('daily')
WEEKLY = 7, _('weekly')
BIWEEKLY = 14, _('biweekly')
MONTHLY = 30, _('month')
frequency = models.IntegerField(choices=FrequencyChoices.choices, default=FrequencyChoices.DAILY)
duration = models.IntegerField(blank=True, null=True,
help_text=_('Days treatment duration. Keep blank for continuous'))
class Meta: abstract = True
class MedicalLicenceBase(DocumentMixin):
from_time = None
to_date = None
to_time = None
class DisabilityType(models.TextChoices):
PERMANENT_DISABILITY = 'permanent disability', _('permanent disability')
TEMPORARY_DISABILITY = 'temporary disability', _('temporary disability')
disability_type = models.CharField(choices=DisabilityType.choices, blank=True, null=True, max_length=50)
leave_days = models.IntegerField()
class Meta: abstract = True
|
"""
pyart.aux_io.read_gamic
=======================
Utilities for reading gamic hdf5 files.
.. autosummary::
:toctree: generated/
read_gamic
_h5_to_dict
_h5_moments_to_dict
"""
import datetime
import h5py
import numpy as np
from ..config import FileMetadata
from ..io.common import make_time_unit_str
from ..core.radar import Radar
def read_gamic(filename, field_names=None, additional_metadata=None,
file_field_names=False, exclude_fields=None):
"""
Read a gamic hdf5 file.
Parameters
----------
filename : str
Name of gamic hdf5 file to read data from.
Returns
-------
radar : Radar
Radar object.
Notes
-----
First Test.
"""
# create metadata retrieval object
filemetadata = FileMetadata('cfradial', field_names, additional_metadata,
file_field_names, exclude_fields)
# open h5 file and get handle
h5obj = h5py.File(filename, 'r')
# initialize metadata as dict
metadata = dict()
# 4.1 Global attribute -> move to metadata dictionary
# no global attribs in gamic hdf5
# 4.2 Dimensions (do nothing) TODO check if n_points present
# 4.3 Global variable -> move to metadata dictionary
metadata['volume_number'] = 0
#map some global vars to possible gamic counterparts
global_vars = {'platform_type': 'fixed', 'instrument_type': 'radar',
'primary_axis': 'axis_z', 'source': 'software',
'references': 'template_name',
'instrument_name': 'site_name',
'institution': 'host_name', 'version': 'sdp_name'}
# ignore time_* global variables, these are calculated from the time
# variable when the file is written.
for var, default_value in global_vars.iteritems():
try:
metadata[var] = h5obj['/how'].attrs[default_value]
except KeyError:
metadata[var] = default_value
# 4.4 coordinate variables -> create attribute dictionaries
time = filemetadata.get_metadata('time')
# test for possible TimeStamp Issues
try:
scan_time = datetime.datetime.strptime(
h5obj['/scan0/how'].attrs['timestamp'], '%Y-%m-%dT%H:%M:%SZ')
except ValueError:
scan_time = datetime.datetime.strptime(
h5obj['/scan0/how'].attrs['timestamp'], '%Y-%m-%dT%H:%M:%S.000Z')
time['units'] = make_time_unit_str(scan_time)
# get scan0 ray header
# in volume file there are several scans, scanX
ray_header = h5obj['/scan0/ray_header']
# get microseconds since linux epoch
scan_time_se = (scan_time -
datetime.datetime(1970, 1, 1)).total_seconds() * 1e6
# get timestamp array and subtract epoch, change to seconds again
time['data'] = (np.array(ray_header['timestamp']) - scan_time_se) / 1e6
# get range, bin etc
range_start = h5obj['/scan0/how'].attrs['range_start']
range_ = h5obj['/scan0/how'].attrs['range']
bin_count = h5obj['/scan0/how'].attrs['bin_count']
range_step = h5obj['/scan0/how'].attrs['range_step']
range_samples = h5obj['/scan0/how'].attrs['range_samples']
_range = filemetadata.get_metadata('range')
# create range array
_range['data'] = np.linspace(
range_start + (range_step * range_samples / 2.),
range_ - (range_step * range_samples / 2.), bin_count)
# 4.5 Ray dimension variables TODO working with this
# 4.6 Location variables -> create attribute dictionaries
# gamic location variables are in subgroup /where
latitude = _h5_to_dict(h5obj, '/where', 'lat', 'latitude', filemetadata)
longitude = _h5_to_dict(h5obj, '/where', 'lon', 'longitude', filemetadata)
altitude = _h5_to_dict(h5obj, '/where', 'height', 'altitude',
filemetadata)
altitude_agl = None
## 4.7 Sweep variables -> create atrribute dictionaries
# if only one scan -> one sweep
# TODO: account for volume scans
sweep_number = filemetadata.get_metadata('sweep_number')
sweep_number['data'] = np.array([0])
sweep_mode = _h5_to_dict(h5obj, '/what', 'object', 'sweep_mode',
filemetadata)
if 'PVOL' in sweep_mode['data']:
fixed_angle = _h5_to_dict(h5obj, '/scan0/how', 'elevation',
'fixed_angle', filemetadata)
elif 'RHI' in sweep_mode['data']:
fixed_angle = _h5_to_dict(h5obj, '/scan0/how', 'azimuth',
'fixed_angle', filemetadata)
sweep_start_ray_index = filemetadata.get_metadata('sweep_start_ray_index')
sweep_start_ray_index['data'] = np.array([0])
sweep_end_ray_index = filemetadata.get_metadata('sweep_end_ray_index')
sweep_end_ray_index['data'] = [h5obj['/scan0/how'].attrs['ray_count'] - 1]
# target scan speed is used
target_scan_rate = _h5_to_dict(h5obj, '/scan0/how', 'scan_speed',
'target_scan_rate', filemetadata)
# first sweep mode determines scan_type
mode = sweep_mode['data']
if "PVOL" in mode:
scan_type = "ppi"
elif "sec" in mode:
scan_type = "sector"
elif "RHI" in mode:
scan_type = "rhi"
else:
scan_type = "other"
# 4.8 Sensor pointing variables -> create attribute dictionaries
azi_start = np.array(ray_header['azimuth_start'])
azi_stop = np.array(ray_header['azimuth_stop'])
azimuth = filemetadata.get_metadata('azimuth')
zero_index = np.where(azi_stop < azi_start)
azi_stop[zero_index[0]] += 360
azimuth['data'] = (azi_start + azi_stop) / 2.0
ele_start = np.array(ray_header['elevation_start'])
ele_stop = np.array(ray_header['elevation_stop'])
elevation = filemetadata.get_metadata('elevation')
elevation['data'] = (ele_start + ele_stop) / 2.0
# scan speed per ray could be read
scan_rate = None
# antenna transistions are not recorder in gamic hdf5
antenna_transition = None
# 4.9 Moving platform geo-reference variables
# TODO moving radar subclass
# 4.10 Moments field data variables -> field attribute dictionary
# moments are in subgroup /scanX
moments = h5obj['/scan0']
fields = dict(
[(moments[k].attrs['moment'], _h5_moments_to_dict(v, filemetadata))
for k, v in moments.iteritems() if 'mom' in k])
## 4.5 instrument_parameters sub-convention -> instrument_parameters dict
## the meta_group attribute is often set incorrectly so we cannot
## use this as a indicator of instrument_parameters
## need to discuss gamic hdf5 instrument_parameters
##keys = _find_all_meta_group_vars(ncvars, 'instrument_parameters')
#valid_keys = ['frequency', 'follow_mode', 'pulse_width', 'prt_mode',
#'prt', 'prt_ratio', 'polarization_mode', 'nyquist_velocity',
#'unambiguous_range', 'n_samples', 'sampling_ration']
#keys = [k for k in valid_keys if k in ncvars]
#instrument_parameters = dict((k, _ncvar_to_dict(ncvars[k])) for k in keys)
## 4.6 radar_parameters sub-convention -> instrument_parameters dict
## the meta_group attribute is often set incorrectly so we cannot
## use this as a indicator of instrument_parameters
##keys = _find_all_meta_group_vars(ncvars, 'radar_parameters')
#valid_keys = ['radar_antenna_gain_h', 'radar_antenna_gain_v',
#'radar_beam_width_h', 'radar_beam_width_v',
#'radar_reciever_bandwidth',
#'radar_measured_transmit_power_h',
#'radar_measured_transmit_power_v']
## these keys are not in CF/Radial 1.2 standard but are common
#valid_keys += ['radar_rx_bandwidth', 'measured_transmit_power_h',
#'measured_transmit_power_v']
#keys = [k for k in valid_keys if k in ncvars]
#radar_parameters = dict((k, _ncvar_to_dict(ncvars[k])) for k in keys)
#instrument_parameters.update(radar_parameters) # add to instr_params
#if instrument_parameters == {}: # if no parameters set to None
instrument_parameters = None
# 4.7 lidar_parameters sub-convention -> skip
## 4.8 radar_calibration sub-convention -> radar_calibration
#keys = _find_all_meta_group_vars(ncvars, 'radar_calibration')
#radar_calibration = dict((k, _ncvar_to_dict(ncvars[k])) for k in keys)
radar_calibration = None
return Radar(
time, _range, fields, metadata, scan_type,
latitude, longitude, altitude,
sweep_number, sweep_mode, fixed_angle, sweep_start_ray_index,
sweep_end_ray_index,
azimuth, elevation,
instrument_parameters=instrument_parameters,
radar_calibration=radar_calibration,
altitude_agl=altitude_agl,
scan_rate=scan_rate,
antenna_transition=antenna_transition)
def _h5_to_dict(h5obj, h5path, h5var, ncstring, fmd):
""" Convert a HDF Attribute variable to a dictionary. """
d = fmd.get_metadata(ncstring)
d['data'] = np.array([h5obj[h5path].attrs[h5var]])
return d
def _h5_moments_to_dict(h5obj, fmd):
"""
Convert gamic HDF5 moment Dataset and attached Attributes to a dictionary.
"""
d = fmd.get_metadata('default')
d['valid_min'] = h5obj.attrs['dyn_range_min']
d['valid_max'] = h5obj.attrs['dyn_range_max']
d['standard_name'] = h5obj.attrs['moment']
d['long_name'] = h5obj.attrs['moment']
#d['units'] =
if h5obj.attrs['format'] == 'UV8':
div = 256.0
else:
div = 65536.0
d['data'] = (d['valid_min'] + h5obj[...] *
(d['valid_max'] - d['valid_min']) / div)
return d
|
<gh_stars>1-10
import os
import torch
import shutil
import numpy as np
import torch.nn.functional as F
from network import get_network
from PIL import Image
from scipy.io import wavfile
from torch import topk
from torch.utils.data.dataloader import default_collate
from vad import read_wave, write_wave, frame_generator, vad_collector
from torchvision.utils import save_image
from dataset import reload_batch_face, reload_batch_voice
# from config import DATASET_PARAMETERS, NETWORKS_PARAMETERS
class Meter(object):
# Computes and stores the average and current value
def __init__(self, name, display, fmt=':f'):
self.name = name
self.display = display
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name}:{' + self.display + self.fmt + '},'
return fmtstr.format(**self.__dict__)
def get_collate_fn(nframe_range):
def collate_fn(batch):
min_nframe, max_nframe = nframe_range
assert min_nframe <= max_nframe
num_frame = np.random.randint(min_nframe, max_nframe+1)
pt = np.random.randint(0, max_nframe-num_frame+1)
batch = [(item[0][..., pt:pt+num_frame], item[1])
for item in batch]
return default_collate(batch)
return collate_fn
def cycle(dataloader):
while True:
for data, label in dataloader:
yield data, label
def save_model(net, model_path):
model_dir = os.path.dirname(model_path)
if not os.path.exists(model_dir):
os.makedirs(model_dir)
torch.save(net.state_dict(), model_path)
def retrieve_face(face_image, f_net, c_net, face_dict, k):
f_net.eval()
c_net.eval()
result = c_net(f_net(face_image))
res, ind = result.topk(k, largest=True)
# print(ind)
faces = [face_dict[i.item()] for i in ind.view(-1)]
f_net.train()
c_net.train()
return faces
def get_cos(fake_image, real_image):
return F.cosine_similarity(fake_image, real_image)
def test_image(iter_id, e_net, g_net_o, g_net_y, label, voice_loader, face_loader, face_dict):
g_net_o.eval()
g_net_y.eval()
# f_net.eval()
# g_net2 = torch.load('G.pt')
# c_net = torch.load(c_net, 'C.pt')
# f_net = torch.load(f_net, 'face.pt')
# g_net2 = torch.load('G_addedA.pt')
if not os.path.exists('new_results/{}'.format(label)):
os.makedirs('new_results/{}'.format(label))
if not os.path.exists('new_results/{}/{}/Input'.format(label, iter_id)):
os.makedirs('new_results/{}/{}/Input'.format(label, iter_id))
os.makedirs('new_results/{}/{}/Obj'.format(label, iter_id))
os.makedirs('new_results/{}/{}/conversion'.format(label, iter_id))
os.makedirs('new_results/{}/{}/conversion_y'.format(label, iter_id))
# os.makedirs('results/{}/{}/conversion2'.format(label, iter_id))
# os.makedirs('results/{}/{}/retrieve'.format(label, iter_id))
voice_iterator = iter(cycle(voice_loader))
face_iterator = iter(cycle(face_loader))
# voiceB, voiceB_label = next(voice_iterator)
# # faceA, faceA_label = next(face_iterator) # real face
# faceB_items = [face_dict[v_label.item()] for v_label in voiceB_label]
# faceB = reload_batch_face(faceB_items)
# voiceB, faceA = voiceB.cuda(), faceA.cuda()
# get voice embeddings
# embedding_B = e_net(voiceB)
# embedding_B_y = F.normalize(embedding_B)
# embedding_B = F.normalize(embedding_B).view(embedding_B.size()[0], -1)
# cos1, cos2, cos3, cos4, cos5 = 0, 0, 0, 0, 0
for i in range(200):
print('********iter{}*********'.format(i))
faceA, faceA_label = next(face_iterator) # real face
voiceB, voiceB_label = next(voice_iterator)
faceB_items = [face_dict[v_label.item()] for v_label in voiceB_label]
faceB = reload_batch_face(faceB_items)
# voiceB, faceA = voiceB.cuda(), faceA.cuda()
# get voice embeddings
embedding_B = e_net(voiceB)
embedding_B_y = F.normalize(embedding_B)
embedding_B = F.normalize(embedding_B).view(embedding_B.size()[0], -1)
# face_iterator = iter(cycle(face_loader))
# faceA, faceA_label = next(face_iterator) # real face
scaled_images = faceA * 2 - 1
fake_faceB = g_net_o(scaled_images, embedding_B)
fake_faceB = (fake_faceB + 1) / 2
fake_faceB2 = g_net_y(embedding_B_y)
# fake_faceB2 = (fake_faceB2 + 1) / 2
# print(f_net(fake_faceB))
# cos1 += get_cos(f_net(fake_faceB.cuda()), f_net(faceB.cuda()))
# cos2 += get_cos(f_net(fake_faceB2.cuda()), f_net(faceB.cuda()))
# cos3 += get_cos(f_net(fake_faceB.cuda()), f_net(faceA.cuda()))
# cos4 += get_cos(f_net(fake_faceB2.cuda()), f_net(faceA.cuda()))
# cos5 += get_cos(f_net(faceB.cuda()), f_net(faceA.cuda()))
# retrieve = retrieve_face(fake_faceB, f_net, c_net, face_dict, 5)
# if not os.path.exists('results/{}/{}/retrieve/{}'.format(label, iter_id, i+1)):
# os.makedirs('results/{}/{}/retrieve/{}'.format(label, iter_id, i+1))
# for j in range(len(retrieve)):
# # print(type(retrieve[j]))
# face = reload_batch_face([retrieve[j]])
# save_image(face, 'results/{}/{}/retrieve/{}/{}.png'.format(label, iter_id, i+1, j+1))
save_image(faceA, 'new_results/{}/{}/Input/img{}.png'.format(label, iter_id, i+1))
save_image(fake_faceB, 'new_results/{}/{}/conversion/img{}.png'.format(label, iter_id, i+1))
save_image(fake_faceB2, 'new_results/{}/{}/conversion_y/img{}.png'.format(label, iter_id, i+1))
save_image(faceB, 'new_results/{}/{}/Obj/img{}.png'.format(label, iter_id, i+1))
# g_net.train()
# f_net.train()
# print(cos1.item()/49, cos2.item()/49, cos3.item()/49, cos4.item()/49, cos5.item()/49)
# return cos1.item()/49
def rm_sil(voice_file, vad_obj):
"""
This code snippet is basically taken from the repository
'https://github.com/wiseman/py-webrtcvad'
It removes the silence clips in a speech recording
"""
audio, sample_rate = read_wave(voice_file)
frames = frame_generator(20, audio, sample_rate)
frames = list(frames)
segments = vad_collector(sample_rate, 20, 50, vad_obj, frames)
if os.path.exists('tmp/'):
shutil.rmtree('tmp/')
os.makedirs('tmp/')
wave_data = []
for i, segment in enumerate(segments):
segment_file = 'tmp/' + str(i) + '.wav'
write_wave(segment_file, segment, sample_rate)
wave_data.append(wavfile.read(segment_file)[1])
shutil.rmtree('tmp/')
if wave_data:
vad_voice = np.concatenate(wave_data).astype('int16')
return vad_voice
def get_fbank(voice, mfc_obj):
# Extract log mel-spectrogra
fbank = mfc_obj.sig2logspec(voice).astype('float32')
# Mean and variance normalization of each mel-frequency
fbank = fbank - fbank.mean(axis=0)
fbank = fbank / (fbank.std(axis=0)+np.finfo(np.float32).eps)
# If the duration of a voice recording is less than 10 seconds (1000 frames),
# repeat the recording until it is longer than 10 seconds and crop.
full_frame_number = 1000
init_frame_number = fbank.shape[0]
while fbank.shape[0] < full_frame_number:
fbank = np.append(fbank, fbank[0:init_frame_number], axis=0)
fbank = fbank[0:full_frame_number,:]
return fbank
def voice2face(e_net, g_net, voice_file, vad_obj, mfc_obj, GPU=True):
vad_voice = rm_sil(voice_file, vad_obj)
fbank = get_fbank(vad_voice, mfc_obj)
fbank = fbank.T[np.newaxis, ...]
fbank = torch.from_numpy(fbank.astype('float32'))
if GPU:
fbank = fbank.cuda()
embedding = e_net(fbank)
embedding = F.normalize(embedding)
face = g_net(embedding)
return face
def experiment(e_net, voice_iterator, face_iterator, face_dict, voice_dict):
g_net = torch.load('G.pt')
g_net.eval()
for i in range(200):
voiceB, voiceB_label = next(voice_iterator)
faceA, faceA_label = next(face_iterator) # real face
voiceB_label = voiceB_label.repeat(1)
# TODO: since voiceB and faceA in different identities,
# need to reuse load_voice and load_face to get corresponding faceB and voiceA
faceB_items = [face_dict[v_label.item()] for v_label in voiceB_label]
voiceA_items = [voice_dict[f_label.item()] for f_label in faceA_label]
path = voice_dict[voiceB_label.item()]['filepath']
path = path.split('/')[3]
name = voice_dict[voiceB_label.item()]['name']
faceB = reload_batch_face(faceB_items)
# voiceA = reload_batch_voice(voiceA_items, DATASET_PARAMETERS['nframe_range'][1])
voiceB, voiceB_label = voiceB.cuda(), voiceB_label.cuda()
faceA, faceA_label = faceA.cuda(), faceA_label.cuda()
faceB = faceB.cuda()
embedding_B = e_net(voiceB)
embedding_B = F.normalize(embedding_B).view(embedding_B.size()[0], -1)
scaled_images = faceB * 2 - 1
fake_faceB = g_net(scaled_images, embedding_B)
fake_faceB = (fake_faceB + 1) / 2
if not os.path.exists('results/same2/{}'.format(i+1)):
os.makedirs('results/same2/{}'.format(i+1))
save_image(faceB, 'results/same2/{}/{}_{}.png'.format(i+1, path, name))
save_image(fake_faceB, 'results/same2/{}/fake{}.png'.format(i+1, name)) |
#pylint: disable-all
import os
import yaml
import pytest
# we use environments variable to mark slow instead of register new pytest marks here.
AWNAS_TEST_SLOW = os.environ.get("AWNAS_TEST_SLOW", None)
sample_cfg_str = """
## ---- Component search_space ----
# ---- Type cnn ----
search_space_type: cnn
search_space_cfg:
# Schedulable attributes:
num_cell_groups: 2
num_init_nodes: 2
num_layers: 5
cell_layout: null
reduce_cell_groups:
- 1
num_steps: 4
num_node_inputs: 2
shared_primitives:
- none
- max_pool_3x3
- avg_pool_3x3
- skip_connect
- sep_conv_3x3
- sep_conv_5x5
- dil_conv_3x3
- dil_conv_5x5
cell_shared_primitives: null
# ---- End Type cnn ----
## ---- End Component search_space ----
## ---- Component dataset ----
# ---- Type cifar10 ----
dataset_type: cifar10
dataset_cfg:
# Schedulable attributes:
cutout: null
# ---- End Type cifar10 ----
## ---- End Component dataset ----
## ---- Component final_model ----
# ---- Type cnn_final_model ----
final_model_type: cnn_final_model
final_model_cfg:
# Schedulable attributes: dropout_path_rate
num_classes: 10
init_channels: 10
layer_channels: []
stem_multiplier: 3
dropout_rate: 0.1
dropout_path_rate: 0.2
auxiliary_head: false
auxiliary_cfg: null
use_stem: conv_bn_3x3
stem_stride: 1
stem_affine: true
cell_use_preprocess: true
cell_pool_batchnorm: false
cell_group_kwargs: null
cell_independent_conn: false
schedule_cfg: null
# ---- End Type cnn_final_model ----
## ---- End Component final_model ----
## ---- Component final_trainer ----
# ---- Type cnn_trainer ----
final_trainer_type: cnn_trainer
final_trainer_cfg:
# Schedulable attributes:
epochs: 50
batch_size: 96
optimizer_type: SGD
optimizer_kwargs: null
learning_rate: 0.05
momentum: 0.9
warmup_epochs: 0
optimizer_scheduler:
T_max: 50
eta_min: 0.001
type: CosineAnnealingLR
weight_decay: 0.0003
no_bias_decay: false
grad_clip: 5.0
auxiliary_head: false
auxiliary_weight: 0.0
add_regularization: false
save_as_state_dict: false
eval_no_grad: true
schedule_cfg: null
# ---- End Type cnn_trainer ----
## ---- End Component final_trainer ----
## ---- Component objective ----
# ---- Type classification ----
objective_type: classification
objective_cfg:
# Schedulable attributes:
{}
# ---- End Type classification ----
## ---- End Component objective ----
"""
@pytest.mark.skipif(not AWNAS_TEST_SLOW, reason="tune evaluator is slow")
def test_bf_tune_evaluator(tmp_path):
from aw_nas.objective import ClassificationObjective
from aw_nas.evaluator.bftune import BFTuneEvaluator
from aw_nas import get_search_space
ss = get_search_space("cnn")
objective = ClassificationObjective(ss)
t_cfg_fname = os.path.join(tmp_path, "template.yaml")
with open(t_cfg_fname, "w") as cfg_f:
cfg_f.write(sample_cfg_str)
evaluator = BFTuneEvaluator(None, None, objective, template_cfg_file=t_cfg_fname,
save_every=10, bf_checkpoints=[1, 2, 3])
rollout = ss.random_sample()
rollout.train_dir = os.path.join(tmp_path, str(hash(rollout)))
print("train dir: ", rollout.train_dir)
rollout = evaluator.evaluate_rollouts([rollout], is_training=True)[0]
print("perf after stage 0: ", rollout.perf["reward"])
rollout = evaluator.evaluate_rollouts([rollout], is_training=True)[0]
print("perf after stage 1: ", rollout.perf["reward"])
|
import json
import pytricia
from src.reader import FileReader
from src.dl.download_zoom import ZoomCidrDownloader
from src.pytrie_support import PytrieSupport
from src.whois import whois
def run_test():
# 128 bits needed for holding IPv6
pyt = pytricia.PyTricia(128)
support = PytrieSupport()
# read all the files locally
reader = FileReader()
data = reader.read('data/raw/aws.json')
support.add_aws_cidr(pyt, json.loads(data))
data = reader.read('data/raw/azure-public.json')
support.add_azure_cidr(pyt, json.loads(data))
data = reader.read('data/raw/azure-china.json')
support.add_azure_cidr(pyt, json.loads(data))
data = reader.read('data/raw/azure-germany.json')
support.add_azure_cidr(pyt, json.loads(data))
data = reader.read('data/raw/azure-gov.json')
support.add_azure_cidr(pyt, json.loads(data))
data = reader.read('data/raw/gcp.txt')
support.add_gcp_cidr(pyt, data)
data = reader.read('data/raw/cloudflare-ipv4.txt')
support.add_cloudflare_cidr(pyt, data)
data = reader.read('data/raw/cloudflare-ipv6.txt')
support.add_cloudflare_cidr(pyt, data)
data = reader.read('data/raw/fastly.json')
support.add_fastly_cidr(pyt, json.loads(data))
zoom = ZoomCidrDownloader()
data = reader.read('data/raw/zoom-crc.txt')
support.add_zoom_cidr(pyt, data,
zoom.get_config().get('source').get('zoom'),
zoom.get_config().get('url').get('zoom'))
data = reader.read('data/raw/zoom-meeting.txt')
support.add_zoom_cidr(pyt, data,
zoom.get_config().get('source').get('meeting'),
zoom.get_config().get('url').get('meeting'))
data = reader.read('data/raw/zoom-phone.txt')
support.add_zoom_cidr(pyt, data,
zoom.get_config().get('source').get('phone'),
zoom.get_config().get('url').get('phone'))
data = reader.read('data/raw/zoom-range.txt')
support.add_zoom_cidr(pyt, data,
zoom.get_config().get('source').get('range'),
zoom.get_config().get('url').get('range'))
data = reader.read('data/raw/datadog.json')
support.add_datadog_cidr(pyt, json.loads(data))
# data = reader.read('data/raw/github.json')
# support.add_github_cidr(pyt, json.loads(data))
data = reader.read('data/raw/atlassian.json')
support.add_atlassian_cidr(pyt, json.loads(data))
data = reader.read('data/raw/pingdom-ipv4.txt')
support.add_pingdom_cidr(pyt, data)
data = reader.read('data/raw/pingdom-ipv6.txt')
support.add_pingdom_cidr(pyt, data)
'''
Testing - AWS
{'region': 'us-east-1', 'service': 'EC2'}
{'region': 'eu-west-2', 'service': 'S3'}
'''
print(pyt.get('172.16.58.3'))
print(pyt.get('2a05:d07a:c000::'))
print(pyt.get('172.16.58.3'))
'''
Testing - Azure
{'region': '', 'platform': 'Azure', 'systemService': '', 'cloud': 'Public'}
{'region': '', 'platform': 'Azure',
'systemService': 'AzureAppConfiguration', 'cloud': 'AzureGovernment'}
'''
print(pyt.get('172.16.31.10'))
print(pyt.get('192.168.3.11'))
'''
Testing - Google
'''
# print(pyt.get('172.16.31.10'))
# print(pyt.get('192.168.127.12'))
'''
Testing - Cloudflare
'''
print(pyt.get('172.16.58.3'))
print(pyt.get('2c0f:f248::'))
'''
Testing - Fastly
'''
print(pyt.get('192.168.127.12'))
print(pyt.get('2a04:4e40::'))
print(pyt.get('2c0f:f248::'))
'''
Testing - Zoom
'''
print(pyt.get('172.16.17.32'))
print(pyt.get('172.16.17.32'))
'''
Testing - Datadog
'''
print(pyt.get('192.168.127.12/32'))
print(pyt.get('172.16.58.3/32'))
print(pyt.get('172.16.58.3/20'))
print(pyt.get('172.16.31.10/25'))
print(pyt.get('2600:1f18:63f7:b900::/56'))
print(pyt.get('192.168.127.12/32'))
print(pyt.get(pyt.parent('192.168.127.12/32')))
print(pyt.get(pyt.parent('172.16.58.3/32')))
print(pyt.get(pyt.parent('172.16.58.3/20')))
print(pyt.get(pyt.parent('172.16.31.10/25')))
print(pyt.get(pyt.parent('2600:1f18:63f7:b900::/56')))
print(pyt.get(pyt.parent('192.168.127.12/32')))
'''
Testing - Github
'''
# print(pyt.get('172.16.58.3/32'))
# print(pyt.get('192.168.3.11'))
'''
Testing - Atlassian
'''
print(pyt.get('192.168.3.11/25'))
print(pyt.get(pyt.parent('192.168.3.11/25')))
'''
Testing - Pingdom
'''
print(pyt.get('192.168.3.11'))
print(pyt.get('fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b'))
'''
Testing - Whois
'''
print(whois('1.1.1.1'))
def main():
'''
Test it out!
'''
run_test()
if __name__ == "__main__":
main()
|
import streamlink, sys, datetime, time, os, requests, datetime
def cDateTime():
format = "%Y-%m-%d_%H-%M-%S"
now_utc = datetime.datetime.now()
return now_utc.strftime(format)
def cDate():
format = "%Y-%m-%d"
now_utc = datetime.datetime.now()
return now_utc.strftime(format)
def logToFile(s):
d = cDate()
dt = cDateTime()
msg = "[{}] {}".format(dt, s)
print(msg)
f = open("LOG_{}.txt".format(d), 'a')
f.write("{}\n".format(msg))
f.close()
def discordNotif(discord_url, discord_message):
discord_payload = {
"content": discord_message,
}
attempts = 1
sleepDelay = 3
status_code = 0
while status_code != 204:
if(attempts > 3): return
jsonpayload=discord_payload
discord_request = requests.post(discord_url, jsonpayload)
status_code = discord_request.status_code
if discord_request.status_code == 204:
print("Successfully called Discord API.")
return
else:
print("Webhook failed to send, status code: {} | Attempt: {} | Retrying in {} seconds.".format(status_code, attempts, sleepDelay))
time.sleep(sleepDelay)
attempts += 1
discord_webhook = "YOUR_WEBHOOK_HERE"
stream_url = "https://www.twitch.tv/"
streamer_name = ""
stream_quality = "best"
convert_to_mp4 = False
done = False
# Conversions to MP4
if("-c" in sys.argv):
convert_to_mp4 = True
logToFile("Will convert to mp4 after recording is done.")
# Usernames
if(len(sys.argv) > 1):
streamer_name = sys.argv[1]
else:
print("args: {} <twitch url OR twitch name> <quality> -c (convert to mp4 (ffmpeg needed)) -w (wait for stream online) -r ((-w needed for -r to work) wait for next stream after current ends)".format(sys.argv[0]))
streamer_name = input("Enter twitch username: ")
if("twitch.tv" in streamer_name):
stream_url = streamer_name
streamer_name = stream_url.split('/')[-1]
else:
stream_url += streamer_name
while(not done):
streams = streamlink.streams(stream_url)
if(len(streams) == 0):
if("-w" not in sys.argv):
logToFile("{} is not streaming... exiting.".format(streamer_name))
sys.exit(0)
logToFile("{} is not streaming now. Retrying every 5 minutes...".format(streamer_name))
while(len(streams) == 0):
time.sleep(300)
streams = streamlink.streams(stream_url)
if(len(sys.argv) > 2):
if(sys.argv[2] in streams):
print("Changing qualities to: {}".format(sys.argv[2]))
stream_quality = sys.argv[2]
else:
for q in streams:
if(sys.argv[2] in q):
print("Changing qualities to: {}".format(sys.argv[2]))
stream_quality = q
break
logToFile("Recording from {} using quality {}".format(streamer_name, stream_quality))
a_q = ""
for s in streams:
a_q += s + " "
logToFile("Available qualities: {}".format(a_q))
stream = streams[stream_quality]
# Saving stream to file.
fd = stream.open()
filename = os.path.join("./", "{}_{}_{}.ts".format(stream_quality, streamer_name, cDateTime()))
save = open(filename, 'wb')
start_time = time.time()
elapsed_time = 0
byte_written = 0
logToFile("[+] Saving stream to {}".format(filename))
while(True):
try:
sd = fd.read(1024)
sd_len = len(sd)
if(sd_len != 0):
save.write(sd)
byte_written += sd_len
else:
logToFile("Stream connection lost. Stream offline?")
break
elapsed_time = time.time() - start_time
except Exception as e:
#urllib3.connection exception type when lost internet
logToFile("[{}]Exception: {}. -------Exiting-------".format(cDateTime(),e))
discordNotif(discord_webhook,"[{}]Stream_saver exception: {}".format(cDateTime(),e))
break
except KeyboardInterrupt:
logToFile("[KeyboardInterrupt]\nExiting.".format(cDateTime()))
break
fd.close()
save.close()
if(convert_to_mp4):
logToFile("Converting to mp4...")
os.system("ffmpeg -i {} -c copy {}.mp4".format(filename, filename))
#os.system("rm {}".format(filename))
logToFile("Conversion to mp4 finished.")
logToFile("Elapsed time saved to file: {}".format(datetime.timedelta(seconds=elapsed_time)))
logToFile("MB written to file: {}".format((byte_written/1024) / 1024))
logToFile("-----------------------------------")
if("-r" in sys.argv):
done = False
else:
done = True
sys.exit(0)
|
import json
from ..views import View
from ..controllers import Controller
from ..utils.structures import data_get
class HttpTestResponse:
def __init__(self, testcase, application, request, response, route):
self.testcase = testcase
self.application = application
self.request = request
self.response = response
self.route = route
self.content = None
self.status = None
self.get_response()
def get_response(self):
self.content = self.response.get_response_content()
return self
def get_content(self):
"""Take care of decoding content if bytes and returns str."""
return (
self.content.decode("utf-8")
if isinstance(self.content, bytes)
else str(self.content)
)
def assertContains(self, content):
assert (
content in self.get_content()
), f"{content} not found in {self.get_content()}"
return self
def assertNotContains(self, content):
assert content not in self.get_content()
return self
def assertContainsInOrder(self, *content):
response_content = self.get_content()
index = 0
for content_string in content:
found_at_index = response_content.find(content_string, index)
assert found_at_index != -1
index = found_at_index + len(content_string)
return self
def assertIsNamed(self, name):
assert (
self.route.get_name() == name
), f"Route name is {self.route.get_name()}. Asserted {name}"
return self
def assertIsNotNamed(self, name=None):
if name is None:
assert self.route.name is None, "Route has a name: {}".format(
self.route.name
)
else:
assert (
self.route.get_name() != name
), f"Route name {self.route.get_name()} matches expected {name}"
return self
def assertIsStatus(self, status):
assert self.response.is_status(
status
), f"Status is {self.response.get_status_code()}. Asserted {status}"
return self
def assertNotFound(self):
return self.assertIsStatus(404)
def assertOk(self):
return self.assertIsStatus(200)
def assertCreated(self):
return self.assertIsStatus(201)
def assertLimited(self) -> "HttpTestResponse":
return self.assertIsStatus(429)
def assertSuccessful(self):
assert 200 <= self.response.get_status_code() < 300
return self
def assertNoContent(self, status=204):
assert not self.get_content()
return self.assertIsStatus(status)
def assertUnauthorized(self):
return self.assertIsStatus(401)
def assertForbidden(self):
return self.assertIsStatus(403)
def assertError(self):
return self.assertIsStatus(500)
def assertHasHeader(self, name, value=None):
header_value = self.response.header(name)
assert header_value, f"Could not find the header {name}"
if value:
assert (
value == header_value
), f"Header '{name}' does not equal {value} but {header_value}"
return self
def assertHeaderMissing(self, name):
assert not self.response.header(name)
return self
def dumpRequestHeaders(self):
"""Dump request headers."""
self.testcase.dump(self.request.header_bag.to_dict(), "Request Headers")
return self
def dumpResponseHeaders(self):
"""Dump response headers."""
self.testcase.dump(self.response.header_bag.to_dict(), "Response Headers")
return self
def ddHeaders(self):
"""Dump request and response headers and die."""
self.dumpRequestHeaders()
self.dumpResponseHeaders()
self.testcase.stop()
def assertLocation(self, location):
return self.assertHasHeader("Location", location)
def assertRedirect(self, url=None, name=None, params={}):
# we could assert 301 or 302 code => what if user uses another status code in redirect()
# here we are sure
assert self.get_content() == "Redirecting ..."
if url:
self.assertLocation(url)
elif name:
url = self.response._get_url_from_route_name(name, params)
self.assertLocation(url)
return self
def assertCookie(self, name, value=None):
assert self.request.cookie_jar.exists(name)
if value is not None:
assert self.request.cookie_jar.get(name).value == value
return self
def assertPlainCookie(self, name):
assert self.request.cookie_jar.exists(name)
assert not self.request.cookie_jar.get(name).secure
return self
def assertCookieExpired(self, name):
self.assertCookie(name)
assert self.request.cookie_jar.is_expired(name)
return self
def assertCookieNotExpired(self, name):
return not self.assertCookieExpired(name)
def assertCookieMissing(self, name):
assert not self.request.cookie_jar.exists(name)
return self
def assertSessionHas(self, key, value=None):
"""Assert that session contains the given key with the corresponding value if given.
The session driver can be specified if necessary."""
session = self.request.session
assert session.has(key)
if value is not None:
real_value = session.get(key)
assert (
real_value == value
), f"Value for {key} is {real_value}, expected {value}"
return self
def assertSessionMissing(self, key):
"""Assert that session does not contain the given key. The session driver can be specified
if necessary."""
assert not self.request.session.get(key)
return self
def assertSessionHasErrors(self, keys=[]):
"""Assert that session contains errors for the given list of keys (meaning that each given key
exists in 'errors' dict in session.) If no keys are given this will assert that the
sessions has errors without checking specific keys."""
session = self.request.session
assert session.has("errors")
if keys:
errors = session.get("errors")
for key in keys:
assert errors.get(key)
return self
def assertSessionHasNoErrors(self, keys=[]):
"""Assert that session does not have any errors (meaning that session does not contain an
'errors' key or 'errors' key is empty. If a list of keys is given, this will check
that there are no errors for each of those keys."""
session = self.request.session
if not keys:
assert not session.has("errors")
else:
errors = session.get("errors")
for key in keys:
assert not errors.get(key)
return self
def dumpSession(self):
"""Dump session data."""
self.testcase.dump(self.application.make("session").all(), "Session Data")
return self
def ddSession(self):
"""Dump session data and die."""
self.dumpSession()
self.testcase.stop()
def _ensure_response_has_view(self):
"""Ensure that the response has a view as its original content."""
if not (self.response.original and isinstance(self.response.original, View)):
raise ValueError("The response is not a view")
def assertViewIs(self, name):
"""Assert that request renders the given view name."""
self._ensure_response_has_view()
assert (
self.response.original.template == name
), f"Template {self.response.original.template} is not equal to {name}"
return self
def assertViewHas(self, key, value=None):
"""Assert that view context contains a given data key (and eventually associated value)."""
self._ensure_response_has_view()
value_at_key = data_get(self.response.original.dictionary, key)
assert value_at_key
if value:
assert value_at_key == value
return self
def assertViewHasExact(self, keys):
"""Assert that view context contains exactly the data keys (or the complete data dict)."""
self._ensure_response_has_view()
if isinstance(keys, list):
assert set(keys) == set(self.response.original.dictionary.keys()) - set(
self.response.original._shared.keys()
)
else:
view_data = self.response.original.dictionary
for key in self.response.original._shared:
del view_data[key]
assert keys == view_data
return self
def assertViewMissing(self, key):
"""Assert that given data key is not in the view context."""
self._ensure_response_has_view()
assert not data_get(self.response.original.dictionary, key)
return self
def assertGuest(self, guard="web"):
assert not self.application.make("auth").guard(guard).user()
return self
def assertAuthenticated(self, user=None, guard="web"):
"""Assert that user is authenticated. If a user a given assert that the given is
authenticated."""
logged_user = self.application.make("auth").guard(guard).user()
assert logged_user
if user:
assert user.get_id() == logged_user.get_id()
return self
def assertHasHttpMiddleware(self, middleware):
"""Assert that the request/response cycle has the given middleware. The HTTP middleware
class should be given."""
assert middleware in self.application.make("middleware").http_middleware
return self
def assertHasRouteMiddleware(self, middleware):
"""Assert that the route has the given middleware. The registration key of the middleware
should be used."""
assert middleware in self.application.make("middleware").route_middleware
return self
def assertHasController(self, controller):
"""Assert that route used the given controller. The controller can be a class or
a string. If it's a string it should be formatted as follow: ControllerName@method"""
if isinstance(controller, str) and "@" in controller:
assert self.route.controller == controller
elif issubclass(controller, Controller):
assert self.route.controller_class == controller
else:
raise ValueError(
"controller must be a string like YourController@index or a Controller class"
)
return self
def assertRouteHasParameter(self, key, value=None):
assert key in self.route.url_list, "Route does not contain parameter {key}."
if value is not None:
assert self.request.param(key) == str(value)
pass
return self
def _ensure_response_is_json(self):
"""Parse response back from JSON into a dict, if an error happens the response was not
a JSON string."""
try:
return json.loads(self.response.content)
except ValueError:
raise ValueError("The response was not JSON serializable")
def assertJson(self, data={}):
"""Assert that response is JSON and contains the given data dictionary. The assertion will
pass even if it is not an exact match."""
response_data = self._ensure_response_is_json()
assert data.items() <= response_data.items()
return self
def assertJsonPath(self, path, value=None):
"""Assert that response is JSON and contains the given path, with eventually the given
value if provided. The path is a dotted path."""
response_data = self._ensure_response_is_json()
data_at_path = data_get(response_data, path)
assert data_at_path == value, f"'{data_at_path}' does not equal {value}"
return self
def assertJsonExact(self, data):
"""Assert that response is JSON and is exactly the given data."""
response_data = self._ensure_response_is_json()
assert response_data == data, f"'{response_data}' does not equal {data}"
return self
def assertJsonCount(self, count, key=None):
"""Assert that JSON response is JSON and has the given count of keys at root level
or at the given key."""
response_data = self._ensure_response_is_json()
if key is not None:
response_data = response_data.get(key, {})
response_count = len(response_data.keys())
assert (
response_count == count
), f"JSON response count is {response_count}. Asserted {count}."
return self
def assertJsonMissing(self, path):
"""Assert that JSON response is JSON and does not contain given path.
The path can be a dotted path"""
response_data = self._ensure_response_is_json()
assert not data_get(
response_data, path
), f"'{response_data}' is not missing from {data_get(response_data, path)}"
return self
|
#!encoding=utf-8
'''
Created on 2015年10月12日
@author: Yafei
'''
import MySQLdb
import sys
class DB(object):
'''
classdocs
'''
def __init__(self, conf):
'''
Constructor
'''
self.db_host = conf.db_host
self.db_port = conf.db_port
self.db_user = conf.db_user
self.db_pwd = <PASSWORD>
self.db_name = conf.db_name
self.db_charset = conf.db_charset
def get_conn(self):
conn = None
try:
conn=MySQLdb.connect(host=self.db_host, \
user=self.db_user,\
passwd=<PASSWORD>,\
db=self.db_name,\
port=self.db_port,\
charset=self.db_charset)
except MySQLdb.Error,e:
conn = None
print >>sys.stderr, "Mysql Error %d: %s" % (e.args[0], e.args[1])
return conn
def close(self,conn):
conn.close()
'''
import MySQLdb
try:
conn=MySQLdb.connect(host='localhost',user='root',passwd='<PASSWORD>',db='test',port=3306)
cur=conn.cursor()
cur.execute('select * from user')
cur.close()
conn.close()
except MySQLdb.Error,e:
print "Mysql Error %d: %s" % (e.args[0], e.args[1])
请注意修改你的数据库,主机名,用户名,密码。
下面来大致演示一下插入数据,批量插入数据,更新数据的例子吧:
import MySQLdb
try:
conn=MySQLdb.connect(host='localhost',user='root',passwd='<PASSWORD>',port=3306)
cur=conn.cursor()
cur.execute('create database if not exists python')
conn.select_db('python')
cur.execute('create table test(id int,info varchar(20))')
value=[1,'hi rollen']
cur.execute('insert into test values(%s,%s)',value)
values=[]
for i in range(20):
values.append((i,'hi rollen'+str(i)))
cur.executemany('insert into test values(%s,%s)',values)
cur.execute('update test set info="I am rollen" where id=3')
conn.commit()
cur.close()
conn.close()
except MySQLdb.Error,e:
print "Mysql Error %d: %s" % (e.args[0], e.args[1])
请注意一定要有conn.commit()这句来提交事务,要不然不能真正的插入数据。
运行之后我的MySQL数据库的结果就不上图了。
import MySQLdb
try:
conn=MySQLdb.connect(host='localhost',user='root',passwd='<PASSWORD>',port=3306)
cur=conn.cursor()
conn.select_db('python')
count=cur.execute('select * from test')
print 'there has %s rows record' % count
result=cur.fetchone()
print result
print 'ID: %s info %s' % result
results=cur.fetchmany(5)
for r in results:
print r
print '=='*10
cur.scroll(0,mode='absolute')
results=cur.fetchall()
for r in results:
print r[1]
conn.commit()
cur.close()
conn.close()
except MySQLdb.Error,e:
print "Mysql Error %d: %s" % (e.args[0], e.args[1])
运行结果就不贴了,太长了。
查询后中文会正确显示,但在数据库中却是乱码的。经过我从网上查找,发现用一个属性有可搞定:
在Python代码
conn = MySQLdb.Connect(host='localhost', user='root', passwd='<PASSWORD>', db='python') 中加一个属性:
改为:
conn = MySQLdb.Connect(host='localhost', user='root', passwd='<PASSWORD>', db='python',charset='utf8')
charset是要跟你数据库的编码一样,如果是数据库是gb2312 ,则写charset='gb2312'。
下面贴一下常用的函数:
然后,这个连接对象也提供了对事务操作的支持,标准的方法
commit() 提交
rollback() 回滚
cursor用来执行命令的方法:
callproc(self, procname, args):用来执行存储过程,接收的参数为存储过程名和参数列表,返回值为受影响的行数
execute(self, query, args):执行单条sql语句,接收的参数为sql语句本身和使用的参数列表,返回值为受影响的行数
executemany(self, query, args):执行单挑sql语句,但是重复执行参数列表里的参数,返回值为受影响的行数
nextset(self):移动到下一个结果集
cursor用来接收返回值的方法:
fetchall(self):接收全部的返回结果行.
fetchmany(self, size=None):接收size条返回结果行.如果size的值大于返回的结果行的数量,则会返回cursor.arraysize条数据.
fetchone(self):返回一条结果行.
scroll(self, value, mode='relative'):移动指针到某一行.如果mode='relative',则表示从当前所在行移动value条,如果 mode='absolute',则表示从结果集的第一行移动value条.
''' |
"""Docker controller"""
from time import sleep
from typing import Optional
from urllib.parse import urlparse
from django.conf import settings
from django.utils.text import slugify
from docker import DockerClient as UpstreamDockerClient
from docker.errors import DockerException, NotFound
from docker.models.containers import Container
from docker.utils.utils import kwargs_from_env
from paramiko.ssh_exception import SSHException
from structlog.stdlib import get_logger
from yaml import safe_dump
from authentik import __version__
from authentik.outposts.controllers.base import BaseClient, BaseController, ControllerException
from authentik.outposts.docker_ssh import DockerInlineSSH, SSHManagedExternallyException
from authentik.outposts.docker_tls import DockerInlineTLS
from authentik.outposts.managed import MANAGED_OUTPOST
from authentik.outposts.models import (
DockerServiceConnection,
Outpost,
OutpostServiceConnectionState,
ServiceConnectionInvalid,
)
class DockerClient(UpstreamDockerClient, BaseClient):
"""Custom docker client, which can handle TLS and SSH from a database."""
tls: Optional[DockerInlineTLS]
ssh: Optional[DockerInlineSSH]
def __init__(self, connection: DockerServiceConnection):
self.tls = None
self.ssh = None
self.logger = get_logger()
if connection.local:
# Same result as DockerClient.from_env
super().__init__(**kwargs_from_env())
else:
parsed_url = urlparse(connection.url)
tls_config = False
if parsed_url.scheme == "ssh":
try:
self.ssh = DockerInlineSSH(parsed_url.hostname, connection.tls_authentication)
self.ssh.write()
except SSHManagedExternallyException as exc:
# SSH config is managed externally
self.logger.info(f"SSH Managed externally: {exc}")
else:
self.tls = DockerInlineTLS(
verification_kp=connection.tls_verification,
authentication_kp=connection.tls_authentication,
)
tls_config = self.tls.write()
try:
super().__init__(
base_url=connection.url,
tls=tls_config,
)
except SSHException as exc:
raise ServiceConnectionInvalid from exc
# Ensure the client actually works
self.containers.list()
def fetch_state(self) -> OutpostServiceConnectionState:
try:
return OutpostServiceConnectionState(version=self.info()["ServerVersion"], healthy=True)
except (ServiceConnectionInvalid, DockerException):
return OutpostServiceConnectionState(version="", healthy=False)
def __exit__(self, exc_type, exc_value, traceback):
if self.tls:
self.logger.debug("Cleaning up TLS")
self.tls.cleanup()
if self.ssh:
self.logger.debug("Cleaning up SSH")
self.ssh.cleanup()
class DockerController(BaseController):
"""Docker controller"""
client: DockerClient
container: Container
connection: DockerServiceConnection
def __init__(self, outpost: Outpost, connection: DockerServiceConnection) -> None:
super().__init__(outpost, connection)
if outpost.managed == MANAGED_OUTPOST:
return
try:
self.client = DockerClient(connection)
except DockerException as exc:
self.logger.warning(exc)
raise ControllerException from exc
@property
def name(self) -> str:
"""Get the name of the object this reconciler manages"""
return (
self.outpost.config.object_naming_template
% {
"name": slugify(self.outpost.name),
"uuid": self.outpost.uuid.hex,
}
).lower()
def _get_labels(self) -> dict[str, str]:
labels = {
"io.goauthentik.outpost-uuid": self.outpost.pk.hex,
}
if self.outpost.config.docker_labels:
labels.update(self.outpost.config.docker_labels)
return labels
def _get_env(self) -> dict[str, str]:
return {
"AUTHENTIK_HOST": self.outpost.config.authentik_host.lower(),
"AUTHENTIK_INSECURE": str(self.outpost.config.authentik_host_insecure).lower(),
"AUTHENTIK_TOKEN": self.outpost.token.key,
"AUTHENTIK_HOST_BROWSER": self.outpost.config.authentik_host_browser,
}
def _comp_env(self, container: Container) -> bool:
"""Check if container's env is equal to what we would set. Return true if container needs
to be rebuilt."""
should_be = self._get_env()
container_env = container.attrs.get("Config", {}).get("Env", [])
for key, expected_value in should_be.items():
entry = f"{key.upper()}={expected_value}"
if entry not in container_env:
return True
return False
def _comp_labels(self, container: Container) -> bool:
"""Check if container's labels is equal to what we would set. Return true if container needs
to be rebuilt."""
should_be = self._get_labels()
for key, expected_value in should_be.items():
if key not in container.labels:
return True
if container.labels[key] != expected_value:
return True
return False
def _comp_ports(self, container: Container) -> bool:
"""Check that the container has the correct ports exposed. Return true if container needs
to be rebuilt."""
# with TEST enabled, we use host-network
if settings.TEST:
return False
# When the container isn't running, the API doesn't report any port mappings
if container.status != "running":
return False
# {'3389/tcp': [
# {'HostIp': '0.0.0.0', 'HostPort': '389'},
# {'HostIp': '::', 'HostPort': '389'}
# ]}
# If no ports are mapped (either mapping disabled, or host network)
if not container.ports or not self.outpost.config.docker_map_ports:
return False
for port in self.deployment_ports:
key = f"{port.inner_port or port.port}/{port.protocol.lower()}"
if not container.ports.get(key, None):
return True
host_matching = False
for host_port in container.ports[key]:
host_matching = host_port.get("HostPort") == str(port.port)
if not host_matching:
return True
return False
def try_pull_image(self):
"""Try to pull the image needed for this outpost based on the CONFIG
`outposts.container_image_base`, but fall back to known-good images"""
image = self.get_container_image()
try:
self.client.images.pull(image)
except DockerException: # pragma: no cover
image = f"goauthentik.io/{self.outpost.type}:latest"
self.client.images.pull(image)
return image
def _get_container(self) -> tuple[Container, bool]:
try:
return self.client.containers.get(self.name), False
except NotFound:
self.logger.info("(Re-)creating container...")
image_name = self.try_pull_image()
container_args = {
"image": image_name,
"name": self.name,
"detach": True,
"environment": self._get_env(),
"labels": self._get_labels(),
"restart_policy": {"Name": "unless-stopped"},
"network": self.outpost.config.docker_network,
}
if self.outpost.config.docker_map_ports:
container_args["ports"] = {
f"{port.inner_port or port.port}/{port.protocol.lower()}": str(port.port)
for port in self.deployment_ports
}
if settings.TEST:
del container_args["ports"]
del container_args["network"]
container_args["network_mode"] = "host"
return (
self.client.containers.create(**container_args),
True,
)
def _migrate_container_name(self): # pragma: no cover
"""Migrate 2021.9 to 2021.10+"""
old_name = f"authentik-proxy-{self.outpost.uuid.hex}"
try:
old_container: Container = self.client.containers.get(old_name)
old_container.kill()
old_container.remove()
except NotFound:
return
# pylint: disable=too-many-return-statements
def up(self, depth=1):
if self.outpost.managed == MANAGED_OUTPOST:
return None
if depth >= 10:
raise ControllerException("Giving up since we exceeded recursion limit.")
self._migrate_container_name()
try:
container, has_been_created = self._get_container()
if has_been_created:
container.start()
return None
# Check if the container is out of date, delete it and retry
if len(container.image.tags) > 0:
should_image = self.try_pull_image()
if should_image not in container.image.tags: # pragma: no cover
self.logger.info(
"Container has mismatched image, re-creating...",
has=container.image.tags,
should=should_image,
)
self.down()
return self.up(depth + 1)
# Check container's ports
if self._comp_ports(container):
self.logger.info("Container has mis-matched ports, re-creating...")
self.down()
return self.up(depth + 1)
# Check that container values match our values
if self._comp_env(container):
self.logger.info("Container has outdated config, re-creating...")
self.down()
return self.up(depth + 1)
# Check that container values match our values
if self._comp_labels(container):
self.logger.info("Container has outdated labels, re-creating...")
self.down()
return self.up(depth + 1)
if (
container.attrs.get("HostConfig", {})
.get("RestartPolicy", {})
.get("Name", "")
.lower()
!= "unless-stopped"
):
self.logger.info("Container has mis-matched restart policy, re-creating...")
self.down()
return self.up(depth + 1)
# Check that container is healthy
if container.status == "running" and container.attrs.get("State", {}).get(
"Health", {}
).get("Status", "") not in ["healthy", "starting"]:
# At this point we know the config is correct, but the container isn't healthy,
# so we just restart it with the same config
if has_been_created:
# Since we've just created the container, give it some time to start.
# If its still not up by then, restart it
self.logger.info("Container is unhealthy and new, giving it time to boot.")
sleep(60)
self.logger.info("Container is unhealthy, restarting...")
container.restart()
return None
# Check that container is running
if container.status != "running":
self.logger.info("Container is not running, restarting...")
container.start()
return None
self.logger.info("Container is running")
return None
except DockerException as exc:
raise ControllerException(str(exc)) from exc
def down(self):
if self.outpost.managed == MANAGED_OUTPOST:
return
try:
container, _ = self._get_container()
if container.status == "running":
self.logger.info("Stopping container.")
container.kill()
self.logger.info("Removing container.")
container.remove(force=True)
except DockerException as exc:
raise ControllerException(str(exc)) from exc
def get_static_deployment(self) -> str:
"""Generate docker-compose yaml for proxy, version 3.5"""
ports = [
f"{port.port}:{port.inner_port or port.port}/{port.protocol.lower()}"
for port in self.deployment_ports
]
image_name = self.get_container_image()
compose = {
"version": "3.5",
"services": {
f"authentik_{self.outpost.type}": {
"image": image_name,
"ports": ports,
"environment": {
"AUTHENTIK_HOST": self.outpost.config.authentik_host,
"AUTHENTIK_INSECURE": str(self.outpost.config.authentik_host_insecure),
"AUTHENTIK_TOKEN": self.outpost.token.key,
"AUTHENTIK_HOST_BROWSER": self.outpost.config.authentik_host_browser,
},
"labels": self._get_labels(),
}
},
}
return safe_dump(compose, default_flow_style=False)
|
<gh_stars>1-10
import sys
import os
import re
import glob
import subprocess
import shutil
import warnings
import tclwrapper
from tclwrapper.tclutil import *
import bluespecrepl.verilog_mutator as verilog_mutator
import bluespecrepl.pyverilatorbsv as pyverilatorbsv
class BSVProject:
"""Bluespec System Verilog Project class.
This class allows for BSV projects to be manipulated from Python. Projects
can be created by the __init__ function or they can be imported from
*.bspec files. Projects can also be exported to *.bspec files.
Each project has the following project configuration variables:
bsv_path -- list of directories containing BSV source files
v_path -- list of directories containing Verilog source files
build_dir -- output directory for .bo/.ba files
sim_dir -- output directory for bluesim files (except the executable)
verilog_dir -- output directory for verilog files
info_dir -- output directory for miscelanious info files
f_dir -- base directory used for relative paths in BSV files
sim_exe -- name for bluesim executable
bsc_options -- list of additional command line arguments for bsc
rts_options -- list of RTS command line arguments for bsc
"""
# paths that are always appended to the end of the user-specified paths
default_paths = ['+']
# automatically add these to self.bsc_options in the __init__ function
default_bsc_options = ['-aggressive-conditions', '-keep-fires']
def __init__(self, top_file = None, top_module = None, bsv_path = [], v_path = None, build_dir = 'build_dir', sim_dir = 'sim_dir', verilog_dir = 'verilog_dir', info_dir = 'info_dir', f_dir = '.', sim_exe = 'sim.out', bsc_options = [], rts_options = [], bspec_file = None):
if bspec_file is not None:
self.import_bspec_project_file(bspec_file)
else:
if top_file is None or top_module is None:
raise ValueError('Either top_file and top_module need to be defined, or bspec_file needs to be defined')
# Project Definition
self.top_file = top_file
self.top_module = top_module
# Path
self.bsv_path = bsv_path.copy()
if v_path is not None:
self.v_path = v_path
else:
# default verilog directory
self.v_path = [ os.path.join( os.environ['BLUESPECDIR'], 'Verilog' ) ]
# Directories
self.build_dir = build_dir
self.sim_dir = sim_dir
self.verilog_dir = verilog_dir
self.info_dir = info_dir
self.f_dir = f_dir
# Options
self.sim_exe = sim_exe
for arg in BSVProject.default_bsc_options:
if arg not in bsc_options:
bsc_options.append(arg)
self.bsc_options = bsc_options.copy()
self.rts_options = rts_options.copy()
# stuctures that hold metadata obtained from bluetcl
self.packages = None
self.modules = None
# command line argument formatting
def get_dir_args(self, build_dir = None, sim_dir = None, verilog_dir = None, info_dir = None, f_dir = None):
"""Returns formatted bsc arguments for output directories."""
if build_dir == None:
build_dir = self.build_dir
if sim_dir == None:
sim_dir = self.sim_dir
if verilog_dir == None:
verilog_dir = self.verilog_dir
if info_dir == None:
info_dir = self.info_dir
if f_dir == None:
f_dir = self.f_dir
for directory in [build_dir, sim_dir, verilog_dir, info_dir, f_dir]:
if not os.path.exists(directory):
os.makedirs(directory)
return ['-bdir', build_dir,
'-simdir', sim_dir,
'-vdir', verilog_dir,
'-info-dir', info_dir,
'-fdir', f_dir]
def get_path_arg(self):
"""Returns formatted bsc arguments for the path."""
# The bluespec compiler automatically adds build_dir to the front of the path, but bluetcl does not,
# so we add it manually and get a warning from the bluespec compiler about redundant folders in the path
return ['-p', ':'.join([self.build_dir] + self.bsv_path + BSVProject.default_paths)]
def get_sim_exe_out_arg(self):
"""Returns formatted bsc argument for the sim exe."""
dirname = os.path.dirname(self.sim_exe)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
return ['-o', self.sim_exe]
# compilation functions
def compile_verilog(self, out_folder = None, extra_bsc_args = []):
"""Compiles the project to verilog.
If out_folder is specified, the verilog is written there. Otherwise the
verilog is written to the projects verilog_dir.
"""
# add the -elab flag to ensure .ba files are generated during compilation
# .ba files are used by bluetcl to get information about the design
bsc_command = ['bsc', '-verilog', '-elab'] + self.bsc_options + extra_bsc_args + self.get_dir_args(verilog_dir = out_folder) + self.get_path_arg() + ['-g', self.top_module, '-u', self.top_file]
exit_code = subprocess.call(bsc_command)
if exit_code != 0:
raise Exception('Bluespec Compiler failed compilation')
def compile_bluesim(self, out_folder = None, extra_bsc_args = []):
"""Compiles the project to a bluesim executable.
If out_folder is specified, the bluesim intermediate files are written
there. Otherwise the files are written to sim_dir.
"""
bsc_command = ['bsc', '-sim'] + self.bsc_options + extra_bsc_args + self.get_dir_args(sim_dir = out_folder) + self.get_path_arg() + ['-g', self.top_module, '-u', self.top_file]
exit_code = subprocess.call(bsc_command)
if exit_code != 0:
raise Exception('Bluespec Compiler failed compilation')
bsc_command = ['bsc', '-sim'] + self.bsc_options + extra_bsc_args + self.get_dir_args(sim_dir = out_folder) + self.get_path_arg() + self.get_sim_exe_out_arg() + ['-e', self.top_module]
exit_code = subprocess.call(bsc_command)
if exit_code != 0:
raise Exception('Bluespec Compiler failed compilation')
def gen_python_repl(self, scheduling_control = False, verilator_dir = 'verilator_dir'):
"""Compiles the project to a python BluespecREPL compatable verilator executable."""
extra_bsc_args = []
if scheduling_control:
extra_bsc_args.append('-no-opt-ATS')
self.compile_verilog(extra_bsc_args = extra_bsc_args)
# now get interface information
self.populate_packages_and_modules()
interface = [(hierarchy, method.to_dict()) for hierarchy, method in self.modules[self.top_module].interface.methods]
# copy verilog files to verilator dir
verilator_verilog_files = {} # map from module name to verilog file
if not os.path.exists(verilator_dir):
os.makedirs(verilator_dir)
for name in os.listdir(self.verilog_dir):
base, extension = os.path.splitext(name)
if extension.lower() == '.v':
shutil.copy(os.path.join(self.verilog_dir, name), os.path.join(verilator_dir, name))
verilator_verilog_files[base] = os.path.join(verilator_dir, name)
verilog_file = os.path.join(verilator_dir, self.top_module + '.v')
rules = []
if scheduling_control:
# modify the compiled verilog to add scheduling control signals
# this is done hierarchically from the leaf modules to the top module
mutators = { module : verilog_mutator.VerilogMutator(module_verilog_file) for module, module_verilog_file in verilator_verilog_files.items() }
submodules = { module : mutator.get_submodules() for module, mutator in mutators.items() }
modules_to_mutate = list(verilator_verilog_files.keys())
num_rules_per_module = {}
rule_names_per_module = {}
while len(modules_to_mutate) != 0:
module_to_mutate = None
for module in modules_to_mutate:
good_candidate = True
for instance_name, instance_module in submodules[module]:
if instance_module in modules_to_mutate:
good_candidate = False
if good_candidate:
module_to_mutate = module
break
if module_to_mutate is not None:
mutator = mutators[module_to_mutate]
num_rules = mutator.expose_internal_scheduling_signals(num_rules_per_module = num_rules_per_module)
mutator.write_verilog(verilator_verilog_files[module])
rules = mutator.get_rules_in_scheduling_order()
num_rules_per_module[module_to_mutate] = num_rules
# get list of rule names
full_module_rule_names = []
for sched_item in mutator.get_default_scheduling_order():
if sched_item.startswith('RL_'):
full_module_rule_names.append(sched_item)
elif sched_item.startswith('MODULE_'):
submodule_instance_name = sched_item[len('MODULE_'):]
submodule_type = [y for x, y in submodules[module_to_mutate] if x == submodule_instance_name][0]
if submodule_type not in rule_names_per_module:
# this submodule has no known rules
continue
submodule_rule_names = [submodule_instance_name + '__DOT__' + x for x in rule_names_per_module[submodule_type]]
full_module_rule_names += submodule_rule_names
else:
raise Exception('Unsupported scheuling item type')
rule_names_per_module[module_to_mutate] = full_module_rule_names
rule_order = mutator.get_default_scheduling_order()
modules_to_mutate.remove(module_to_mutate)
else:
raise Exception("Adding scheduling control failed. Can't find next module to mutate")
# get rule names
rules = rule_names_per_module[self.top_module]
return pyverilatorbsv.PyVerilatorBSV.build(
verilog_file,
verilog_path = [verilator_dir] + self.v_path,
build_dir = verilator_dir,
interface = interface,
rules = rules,
bsc_build_dir = self.build_dir)
def clean(self):
"""Deletes output from project compilation."""
cleaning_targets = [
(self.build_dir, ['.ba', '.bo']),
(self.sim_dir, ['.cxx', '.h', '.o']),
(self.verilog_dir, ['.v']),
(self.info_dir, [])]
# This function should delete:
# *.ba, *.bo from build_dir
# *.cxx, *.h, *.o from sim_dir
# *.v from verilog_dir
# ? from info_dir
# sim_exe
for path, extensions in cleaning_targets:
for name in os.listdir(path):
if os.path.splitext(name)[1].lower() in extensions:
os.remove(os.path.join(path, name))
try:
os.rmdir(path)
except OSError:
# ignore errors
pass
try:
os.remove(self.sim_exe)
except OSError:
# ignore errors
pass
# import/export methods
def import_bspec_project_file(self, filename):
"""Import project settings from a .bspec file.
This does not import v_path."""
params = {}
with open(filename) as f:
lines = f.readlines()
for line in lines:
match = re.match(r'set PROJECT\((.*?)\) "(.*)"', line)
if match:
params[match.group(1)] = match.group(2)
self.import_bspec_config_params(params)
def export_bspec_project_file(self, filename):
"""Export project settings to a .bspec file.
This does not export v_path."""
with open(os.path.join(os.path.realpath(os.path.dirname(os.path.realpath(__file__))), 'templates', 'template.bspec')) as f:
bspec_project_template = f.read()
params = self.export_bspec_config_params()
bspec_project_text = bspec_project_template.format(**params)
with open(filename, 'w') as f:
f.write(bspec_project_text)
def import_bspec_config_params(self, params):
"""Imports project settings from parameters defined in a *.bspec file.
This does not import v_path."""
self.top_file = params['TOP_FILE']
self.top_module = params['TOP_MODULE']
self.bsv_path = list(tclstring_to_list(params['PATHS']))
self.build_dir = params['COMP_BDIR']
self.sim_dir = params['COMP_SIMDIR']
self.verilog_dir = params['COMP_VDIR']
self.info_dir = params['COMP_INFO_DIR']
self.f_dir = params['CURRENT_DIR']
self.sim_exe = os.path.join(params['LINK_OUTDIR'], params['LINK_OUTNAME'])
self.bsc_options = params['COMP_BSC_OPTIONS'].split(' ')
link_bsc_options = params['LINK_BSC_OPTIONS'].split(' ')
for opt in link_bsc_options:
if opt not in self.bsc_options:
self.bsc_options.append(opt)
self.rts_options = params['COMP_RTS_OPTIONS'].split(' ')
# strip default path arguments from self.bsv_path
for path in BSVProject.default_paths:
if path in self.bsv_path:
self.bsv_path.remove(path)
# assume the default v_path
self.v_path = [ os.path.join( os.environ['BLUESPECDIR'], 'Verilog' ) ]
def export_bspec_config_params(self):
"""Exports project settings to a dict of *.bspec file parameters.
This does not export v_path."""
params = {}
params['TOP_FILE'] = self.top_file
params['TOP_MODULE'] = self.top_module
params['PATHS'] = list_to_tclstring([self.build_dir] + self.bsv_path + BSVProject.default_paths)
params['COMP_BDIR'] = self.build_dir
params['COMP_SIMDIR'] = self.sim_dir
params['COMP_VDIR'] = self.verilog_dir
params['COMP_INFO_DIR'] = self.info_dir
params['CURRENT_DIR'] = self.f_dir
link_outdir = os.path.dirname(self.sim_exe)
if link_outdir == '':
link_outdir = '.'
params['LINK_OUTDIR'] = link_outdir
params['LINK_OUTNAME'] = os.path.basename(self.sim_exe)
params['COMP_BSC_OPTIONS'] = ' '.join(self.bsc_options)
params['LINK_BSC_OPTIONS'] = ' '.join(self.bsc_options)
params['COMP_RTS_OPTIONS'] = ' '.join(self.rts_options)
return params
def populate_packages_and_modules(self, force = False):
"""Populates self.packages and self.modules members using information from bluetcl.
self.packages is a dictionary mapping package names to BluespecPackage objects.
self.modules is a dictionary mapping module names to BluespecModule objects.
If self.packages and self.modules have already been filled, this function does
nothing unless force is True."""
if not force and self.packages is not None and self.modules is not None:
# nothing to do
return
if not os.path.isfile(os.path.join(self.build_dir, self.top_module + '.ba')):
raise Exception("top file not elaborated: either you forgot to build the design or the top module doesn't have a (* synthesize *) attribute")
with tclwrapper.TCLWrapper('bluetcl') as bluetcl:
bluetcl.eval('Bluetcl::flags set -verilog ' + ' '.join(self.get_path_arg()))
# load top package
bluetcl.eval('Bluetcl::bpackage load %s' % os.path.basename(self.top_file).split('.')[0])
# list all packages
packages = bluetcl.eval('Bluetcl::bpackage list', to_list = True)
if force or self.packages is None:
self.packages = { pkg_name : BluespecPackage(pkg_name, bluetcl) for pkg_name in packages}
if force or self.modules is None:
self.modules = {}
for package_name in self.packages:
for module in self.packages[package_name].modules:
if module not in self.modules:
self.modules[module] = BluespecModule(module, bluetcl)
# Advanced Functions
#####################
def get_submodules(self):
"""Returns a dictionary of submodules for each module in the current package.
The dictionary has module names as keys and lists of (instance_name, module_name) tuples as values."""
submodule_dict = {}
with tclwrapper.TCLWrapper('bluetcl') as bluetcl:
bluetcl.eval('Bluetcl::flags set -verilog ' + ' '.join(self.get_path_arg()))
bluetcl.eval('Bluetcl::bpackage load %s' % os.path.basename(self.top_file).split('.')[0])
packages = bluetcl.eval('Bluetcl::bpackage list', to_list = True)
# "Bluetcl::defs module <pkg>" returns modules with package names as well,
# but "Bluetcl::module submods <mod>" doesn't accept package names, so they should be stripped
modules = [mod.split('::')[-1] for pkg in packages for mod in bluetcl.eval('Bluetcl::defs module %s' % pkg, to_list = True)]
uniq_modules = []
for mod in modules:
if mod not in uniq_modules:
uniq_modules.append(mod)
for module in uniq_modules:
bluetcl.eval('Bluetcl::module load %s' % module)
user_or_prim, submodules, functions = tclstring_to_list(bluetcl.eval('Bluetcl::module submods %s' % module))
submodules = tclstring_to_nested_list(submodules, levels = 2)
if user_or_prim == 'user':
submodule_dict[module] = submodules
return submodule_dict
def get_rule_method_calls(self):
"""Returns a dictionary of rules and methodcalls for each rule in the current package.
The dictionary contains a list of (rule, methods) tuples in execution order."""
rule_method_call_dict = {}
with tclwrapper.TCLWrapper('bluetcl') as bluetcl:
bluetcl.eval('Bluetcl::flags set -verilog ' + ' '.join(self.get_path_arg()))
bluetcl.eval('Bluetcl::bpackage load %s' % os.path.basename(self.top_file).split('.')[0])
packages = bluetcl.eval('Bluetcl::bpackage list', to_list = True)
# "Bluetcl::defs module <pkg>" returns modules with package names as well,
# but "Bluetcl::module submods <mod>" doesn't accept package names, so they should be stripped
modules = [mod.split('::')[-1] for pkg in packages for mod in bluetcl.eval('Bluetcl::defs module %s' % pkg, to_list = True)]
uniq_modules = []
for mod in modules:
if mod not in uniq_modules:
uniq_modules.append(mod)
for module in uniq_modules:
bluetcl.eval('Bluetcl::module load %s' % module)
execution_order = tclstring_to_list(bluetcl.eval('Bluetcl::schedule execution %s' % module))
rule_method_call_dict[module] = []
for rule in execution_order:
rule_info = tclstring_to_list(bluetcl.eval('Bluetcl::rule full %s %s' % (module, rule)))
# look for item that has 'methods' as its first element
# assume its always the 3rd element
if not rule_info[3].startswith('methods'):
raise Exception('method is expected to be the 3rd element from "Bluetcl::rule full <mod> <rule>"')
methods_tclstring = tclstring_to_list(rule_info[3])
method_calls = tclstring_to_flat_list(methods_tclstring)
rule_method_call_dict[module].append((rule, method_calls))
return rule_method_call_dict
def get_hierarchy(self, module_name = None):
if module_name is None:
module_name = self.top_module
hierarchy = {}
modules_to_add = [module_name]
with tclwrapper.TCLWrapper('bluetcl') as bluetcl:
bluetcl.eval('Bluetcl::flags set -verilog ' + ' '.join(self.get_path_arg()))
while len(modules_to_add) > 0:
curr_module_name = modules_to_add.pop()
try:
bluetcl.eval('Bluetcl::module load ' + curr_module_name)
hierarchy[curr_module_name] = []
user_or_prim, submodules, functions = tclstring_to_nested_list(bluetcl.eval('Bluetcl::module submods ' + curr_module_name))
if user_or_prim == 'user':
for instance_name, submodule_name in submodules:
if submodule_name not in hierarchy and submodule_name not in modules_to_add:
modules_to_add.append(submodule_name)
hierarchy[curr_module_name].append((instance_name, submodule_name))
except tclwrapper.TCLWrapperError as e:
# couldn't load modules, typically the case for primitive modules such as FIFOs
hierarchy[curr_module_name] = None
return hierarchy
def get_module_schedule(self, module_name = None):
if module_name is None:
module_name = self.top_module
with tclwrapper.TCLWrapper('bluetcl') as bluetcl:
bluetcl.eval('Bluetcl::flags set -verilog ' + ' '.join(self.get_path_arg()))
bluetcl.eval('Bluetcl::module load ' + module_name)
return tclstring_to_list(bluetcl.eval('Bluetcl::schedule execution ' + module_name))
def get_complete_schedule_from_bluesim(self):
"""Returns the complete schedule for the top module.
The schedule is a combination of top-level interface methods, top-level
rules, and submodule rules. This requires compiling for bluesim.
"""
# The complete schedule can be inferred by this file
bluesim_model_file = os.path.join(self.sim_dir, 'model_%s.cxx' % self.top_module)
# bluesim compilation is required to generate the bluesim_model_file
self.compile_bluesim()
# regex patterns
# start and end of schedule_posedge_CLK function
# not exact, but good enough
fn_start_regex = r'^static void schedule_posedge_CLK'
fn_end_regex = r'^[^\s]'
# schedule pattern
schedule_regex = r'if \(INST_top.([^)]*)\)'
with open(bluesim_model_file, 'r') as f:
complete_schedule = []
# skip to start of schedule_posedge_CLK function
line = f.readline()
while not re.search(fn_start_regex, line):
line = f.readline()
line = f.readline()
while not re.search(fn_end_regex, line):
match = re.search(schedule_regex, line)
if match:
# remove INST_ and DEF_WILL_FIRE_ from the hierarchy
hierarchy = match.group(1).split('.')
for i in range(len(hierarchy)):
if i == len(hierarchy) - 1:
if not hierarchy[i].startswith('DEF_WILL_FIRE_'):
raise ValueError("full schedule hierarchy has unexpected element")
hierarchy[i] = hierarchy[i][len('DEF_WILL_FIRE_'):]
else:
if not hierarchy[i].startswith('INST_'):
raise ValueError("full schedule hierarchy has unexpected element")
hierarchy[i] = hierarchy[i][len('INST_'):]
complete_schedule.append(tuple(hierarchy))
line = f.readline()
return complete_schedule
def get_complete_schedule(self, module_name = None):
"""Returns the complete schedule for the top module.
The schedule is a combination of top-level interface methods, top-level
rules, and submodule rules.
"""
# from scratch
if self.modules is None:
self.populate_packages_and_modules()
if module_name is None:
module_name = self.top_module
instance_dict = {}
worklist = [ (module_name, self.modules[module_name]) ]
while len(worklist) != 0:
instance_name, module = worklist.pop()
instance_dict[instance_name] = module
for submodule_instance, submodule_type in module.submodules:
if submodule_type in self.modules:
worklist.append((instance_name + '.' + submodule_instance, self.modules[submodule_type]))
partial_order = {}
called_methods = {} # list of rules (and methods) that call a given method
for instance_name, module in instance_dict.items():
# add execution to partial order
for i in range(len(module.execution)):
partial_order[instance_name + '.' + module.execution[i]] = [instance_name + '.' + x for x in module.execution[i+1:]]
# add method calls to partial order
# get list of rules that call each method
for rule, methods in module.method_calls_by_rule.items():
full_rule_name = instance_name + '.' + rule
for method in methods:
full_method_name = instance_name + '.' + method
if full_method_name not in called_methods:
called_methods[full_method_name] = [full_rule_name]
else:
called_methods[full_method_name].append(full_rule_name)
# make sure all lower-level methods appear in called_methods, even if they are not called by a rule
for rule in module.execution:
if rule.count('.') > 1 and not rule.split('.')[-1].startswith('RL_'):
# this is a lower-level method
if rule not in called_methods:
called_methods[rule] = []
# the items in called_methods are a list of rules and methods, this function helps to get just rules
# similar to taking the transitive closure of called_methods
def get_rules_from_rule_or_method(x):
if x not in called_methods:
# x is a rule or top-level method
return [x]
rules = [get_rules_from_rule_or_method(y) for y in called_methods[x]]
rules = sum(rules, []) # flatten rules
return list(set(rules))
# create a new partial order that doesn't contain called methods
new_partial_order = {}
for first_rule, second_rules in partial_order.items():
actual_first_rules = get_rules_from_rule_or_method(first_rule)
actual_second_rules = []
for second_rule in second_rules:
actual_second_rules += get_rules_from_rule_or_method(second_rule)
for r1 in actual_first_rules:
if r1 not in new_partial_order:
new_partial_order[r1] = actual_second_rules
else:
new_partial_order[r1] += actual_second_rules
# cleanup new_partial_order
for first_rule in new_partial_order:
new_partial_order[first_rule] = list(set(new_partial_order[first_rule]))
while new_partial_order[first_rule].count(first_rule) > 0:
new_partial_order[first_rule].remove(first_rule)
partial_order = new_partial_order.copy()
full_schedule = []
to_schedule = set(partial_order.keys())
# schedule rules from end to beginning
while len(to_schedule) > 0:
removed_candidate = False
for candidate in to_schedule:
if len(partial_order[candidate]) == 0:
to_schedule.remove(candidate)
full_schedule = [candidate] + full_schedule
# remove candidate from all the partial orders
for x in partial_order:
while partial_order[x].count(candidate) > 0:
partial_order[x].remove(candidate)
removed_candidate = True
break
if not removed_candidate:
raise Exception("getting the full schedule failed")
return full_schedule
# There are no links between bluespec packages and modules, everything is done by name
class BluespecPackage:
def __init__(self, name, bluetcl):
# first open package if its not already open
bluetcl.eval('Bluetcl::bpackage load %s' % name)
self.name = name
def remove_package_name(n):
return n.split('::')[-1]
self.type_names = list(map(remove_package_name, tclstring_to_list(bluetcl.eval('Bluetcl::defs type %s' % name))))
self.types = {}
for type_name in self.type_names:
try:
self.types[type_name] = bluetcl.eval('Bluetcl::type full [Bluetcl::type constr {%s}]' % type_name)
except tclwrapper.TCLWrapperError as e:
# only raise the exception further if its not from Prelude or StmtFSM
# Prelude causes this exception for ActionValue, Action, and List_$Cons
# StmtFSM causes this exception for State' and NCount'
if name != 'Prelude' and name != 'StmtFSM':
raise e
self.modules = list(map(remove_package_name, tclstring_to_list(bluetcl.eval('Bluetcl::defs module %s' % name))))
self.func = tclstring_to_list(bluetcl.eval('Bluetcl::defs func %s' % name))
class BluespecModule:
def __init__(self, name, bluetcl):
if '::' in name:
self.package = name.split('::')[0]
self.name = name.split('::')[1]
else:
self.package = None
self.name = name
bluetcl.eval('Bluetcl::module load %s' % self.name)
# get scheduling info (urgency and execution)
urgency_tclstrings = tclstring_to_list(bluetcl.eval('Bluetcl::schedule urgency %s' % self.name))
urgency_lists = list(map(tclstring_to_flat_list, urgency_tclstrings))
# urgency is a list of rules that block a given rule
self.urgency = { x[0] : x[1:] for x in urgency_lists}
self.execution = tclstring_to_list(bluetcl.eval('Bluetcl::schedule execution %s' % self.name))
self.methodinfo = tclstring_to_list(bluetcl.eval('Bluetcl::schedule methodinfo %s' % self.name))
self.pathinfo = tclstring_to_list(bluetcl.eval('Bluetcl::schedule pathinfo %s' % self.name))
# get submodule info (list of submodule instance names and constructors)
user_or_prim, submodules, functions = tclstring_to_nested_list(bluetcl.eval('Bluetcl::module submods %s' % self.name))
if len(functions) != 0:
print('There is a function used in %s' % self.name)
# If there is only one submodule, "Bluetcl::module submods <mod>" doesn't return a list of lists
if isinstance(submodules, str):
if submodules == '':
submodules = tuple()
else:
submodules = (tuple(submodules.split()),)
if user_or_prim == 'user':
self.submodules = submodules
else:
self.submodules = tuple()
# get rule info (methods called by each rule)
self.method_calls_by_rule = {}
for rule in self.execution:
rule_info = tclstring_to_list(bluetcl.eval('Bluetcl::rule full %s %s' % (self.name, rule)))
# look for item that has 'methods' as its first element
# It is usually the 3rd element, but if a rule has attributes, then it is the 4th element
methods_tclstring = None
for i in range(len(rule_info)):
if rule_info[i].startswith('methods'):
methods_tclstring = str(rule_info[i][len('methods '):])
if methods_tclstring is None:
raise Exception('"method" tag was not found in "Bluetcl::rule full <mod> <rule>"')
method_calls = tclstring_to_flat_list(methods_tclstring)
self.method_calls_by_rule[rule] = method_calls
# returns an interface name with the package name as a prefix (ex: GCD::GCD)
self.interface = BluespecInterface(self.name, bluetcl)
self.interface_name = bluetcl.eval('Bluetcl::module ifc %s' % self.name)
self.interface_methods = bluetcl.eval('Bluetcl::module methods %s' % self.name)
self.ports = tclstring_to_nested_list(bluetcl.eval('Bluetcl::module ports %s' % self.name))
self.port_types = bluetcl.eval('Bluetcl::module porttypes %s' % self.name)
class BluetclAssumptionError(Exception):
"""Raised when an assumption about the data coming back from Bluetcl was violated."""
pass
class BluespecInterfaceMethod:
def __init__(self, name, ready, enable, args, result):
# bluespec name
self.name = name
# verilog name
self.ready = ready
# verilog name
self.enable = enable
# list of (bluespec_name, verilog_name, type) tuples
self.args = args
# (verilog name, type)
self.result = result
def bsv_decl(self):
# for now we're making assumptions about what methods are action methods
# TODO: in the future, use type information from "Bluetcl::type full" to get the actual return type
is_action = self.enable != None
if self.result is None:
# just assume its an action method, because a void method doesn't make sense
return_type = 'Action'
else:
if is_action:
return_type = 'ActionValue#({})'.format(self.result[1])
else:
return_type = self.result[1]
arg_decls = [ arg_type + ' ' + bluespec_name for bluespec_name, _, arg_type in self.args]
decl = '{} {}({})'.format(return_type, self.name, ', '.join(arg_decls))
return decl
def to_dict(self):
return {
'name' : self.name,
'ready' : self.ready,
'enable' : self.enable,
'args' : self.args,
'result' : self.result }
# This is actually for an interface instance
class BluespecInterface:
def __init__(self, module_name, bluetcl):
name = bluetcl.eval('Bluetcl::module ifc %s' % module_name)
if '::' in name:
self.interface_type_package = name.split('::')[0]
self.interface_type_name = name.split('::')[1]
else:
# This is for the Empty Interface
self.interface_type_package = None
self.interface_type_name = name
# porttypes and ports will hold data directly from bluetcl
# porttypes example: (('CLK', 'Clock'), ('RST_N', 'Reset'), ('start_a', 'Bit#(32)'), ...)
self.porttypes = tclstring_to_nested_list(bluetcl.eval('Bluetcl::module porttypes %s' % module_name))
# ports example: (('interface', (<method>, <method>, ...)), ('args', (('clock', 'default_clock', ('ocs', 'CLK')), ('reset', 'default_reset', ('port', 'RST_N'), ('clock', 'default_clock'))))
# <method> example: (('method', 'start', 'start', ('clock', 'default_clock'), ('reset', 'default_reset'), ('args', (<arg>, <arg>)), ('enable', 'EN_start'), ('ready', 'RDY_start')))
# <arg> example: (('name', 'start_a'), ('port', 'start_a'), ('size', '32'))
self.raw_ports = bluetcl.eval('Bluetcl::module ports %s' % module_name)
self.ports = tclstring_to_nested_list(self.raw_ports)
# self.ports = tclstring_to_nested_list(bluetcl.eval('Bluetcl::module ports %s' % module_name))
# function to access data in ports in a dictionary-like manner
def get_item(nested_list, item_name, none_ok = False):
ret = None
for item in nested_list:
if item[0] == item_name:
if ret is not None:
raise BluetclAssumptionError('"{}" appears more than once in list'.format(item_name))
if len(item) == 2:
ret = item[1]
else:
ret = item[1:]
if ret is None and not none_ok:
raise BluetclAssumptionError('"{}" does not appear in list'.format(item_name))
return ret
def get_methods(raw_methods, prefix = ()):
methods = []
for raw_method in raw_methods:
if raw_method[0] == 'interface':
# this is actually a subinterface or a value method returning a tuple
subinterface_name = raw_method[1]
if isinstance(raw_method[2], str):
if raw_method[2] == '':
continue
else:
methods.extend(get_methods([tclstring_to_nested_list(raw_method[2])], (*prefix, subinterface_name)))
else:
methods.extend(get_methods(raw_method[2], (*prefix, subinterface_name)))
elif raw_method[0] == 'method':
short_method_name = raw_method[1]
# underscore separates subinterfaces and method name
full_method_name = raw_method[2]
ready = get_item(raw_method[3:], 'ready', none_ok = True)
enable = get_item(raw_method[3:], 'enable', none_ok = True)
raw_args = get_item(raw_method[3:], 'args')
# if there are no args, raw_args will be ""
# if there is one arg, raw_args will return a tclstring due to a problem in tclstring_to_nested_list
# this code tries to fix it
if isinstance(raw_args, str):
if raw_args == '':
raw_args = []
else:
raw_args = [tclstring_to_nested_list(raw_args)]
args = []
result_name = get_item(raw_method[3:], 'result', none_ok = True)
if result_name is None:
result = None
else:
result_type = get_item(self.porttypes, result_name)
if isinstance(result_type, tuple) or isinstance(result_type, list):
result_type = ''.join(result_type)
result = (result_name, result_type)
# if there are no arguments, raw_method_args = '', which still works with this for loop
for raw_arg in raw_args:
arg_name = get_item(raw_arg, 'name')
arg_port = get_item(raw_arg, 'port')
arg_type = get_item(self.porttypes, arg_port)
if isinstance(arg_type, tuple) or isinstance(arg_type, list):
arg_type = ''.join(arg_type)
args.append((arg_name, arg_port, arg_type))
# arg_size = int(get_item(raw_arg, 'size'))
methods.append( ((*prefix, short_method_name), BluespecInterfaceMethod(short_method_name, ready, enable, args, result)) )
return methods
raw_methods = get_item(self.ports, 'interface')
self.methods = get_methods(raw_methods, ())
self.clocks = [ port_name for port_name, port_type in self.porttypes if port_type == 'Clock']
self.resets = [ port_name for port_name, port_type in self.porttypes if port_type == 'Reset']
def bsv_decl(self):
decl = 'interface {};\n'.format(self.interface_type_name)
curr_hierarchy = ()
for full_name, method in self.methods:
method_hierarchy = full_name[:-1]
while method_hierarchy[:len(curr_hierarchy)] != curr_hierarchy:
# remove a level of the current hierarchy until the two share a common prefix
curr_hierarchy = curr_hierarchy[:-1]
decl += ' ' * (len(curr_hierarchy)+1) + 'endinterface\n'
while len(curr_hierarchy) < len(method_hierarchy):
# add a level to the current hierarchy until curr_hierarchy == method_hierarchy
decl += ' ' * (len(curr_hierarchy)+1) + 'interface {};\n'.format(method_hierarchy[len(curr_hierarchy)])
curr_hierarchy = (*curr_hierarchy, method_hierarchy[len(curr_hierarchy)])
decl += ' ' * (len(curr_hierarchy)+1) + '{};\n'.format(method.bsv_decl())
decl += 'endinterface'
return decl
|
import pytest
import numpy as np
from mesohops.dynamics.hops_aux import AuxiliaryVector
from mesohops.util.exceptions import AuxError
def test_auxvec_ordering():
"""
This function test whether an incorrectly ordered array_aux_vex properly raises
the correct AuxError
"""
aux_1010 = AuxiliaryVector([(0, 1), (2, 1)], 4)
assert type(aux_1010) == AuxiliaryVector
with pytest.raises(AuxError) as excinfo:
aux_1010 = AuxiliaryVector([(2, 1), (0, 1)], 4)
assert 'array_aux_vec not properly ordered' in str(excinfo.value)
def test_keys():
"""
This function test whether the correct mode indices (keys) are being grabbed by
the keys function
"""
aux_1010 = AuxiliaryVector([(0, 1), (2, 1)], 4)
keys = aux_1010.keys()
known_keys = np.array([0, 2])
assert np.array_equal(keys, known_keys)
def test_values():
"""
This function test whether the correct values are being grabbed by
the values function
"""
aux_1010 = AuxiliaryVector([(0, 1), (2, 1)], 4)
values = aux_1010.values()
known_values = np.array([1, 1])
assert np.array_equal(values, known_values)
def test_compare_if():
"""
This function test whether _compare is properly comparing auxiliary vectors by
testing whether one vector is less than another
"""
aux_1010 = AuxiliaryVector([(0, 1), (2, 1)], 4)
aux_1000 = AuxiliaryVector([(0, 1)], 4)
flag = aux_1010._compare(aux_1000, lambda s, o: s > o)
assert flag == True
def test_compare_else():
"""
This function test to make sure you cannot compare an Auxiliary Vector to another
type
"""
aux_1010 = AuxiliaryVector([(0, 1), (2, 1)], 4)
known_aux = [(1, 0, 1, 0)]
flag = aux_1010._compare(known_aux, lambda s, o: s > o)
assert flag == False
def test_dot():
"""
This function test to make sure the correct dot product value is given when using
the dot function
"""
vector = np.array([1, 1, 1, 1])
aux_1234 = AuxiliaryVector([(0, 1), (1, 2), (2, 3), (3, 4)], 4)
known_value = 10
dot_value = aux_1234.dot(vector)
assert dot_value == known_value
def test_sum():
"""
This function test to make sure the values are properly being summed
"""
aux_array = [(0, 1), (2, 1), (4, 2)]
n_mod = 6
aux_101020 = AuxiliaryVector(aux_array, n_mod)
aux_sum = aux_101020.sum()
known_sum = 4
assert aux_sum == known_sum
# np.sum test
known_np_sum = 4
aux_np_sum = np.sum(aux_101020)
assert aux_np_sum == known_np_sum
def test_todense():
"""
This function test that a sparse vector is properly being made dense
"""
aux_101010 = AuxiliaryVector([(0, 1), (2, 1), (4, 1)], 6)
aux_101010 = aux_101010.todense()
known_aux = (1, 0, 1, 0, 1, 0)
assert tuple(aux_101010) == known_aux
def test_toarray():
"""
This function test that a sparse vector is properly being arranged into an array
"""
aux_010101 = AuxiliaryVector([(1, 1), (3, 1), (5, 1)], 6)
aux_010101 = aux_010101.toarray()
known_array = np.array([[1, 1], [3, 1], [5, 1]])
assert np.array_equal(aux_010101, known_array)
def test_get_values():
"""
This function test whether the correct sub-indexed values are being grabbed by
the get_values function
"""
aux_101010 = AuxiliaryVector([(0, 1), (2, 1), (4, 1)], 6)
values = aux_101010.get_values([4, 5])
known_values = np.array([1, 0])
assert np.array_equal(values, known_values)
def test_get_values_nonzero():
"""
This function test whether the correct sub-indexed nonzero values are being grabbed
by the get_values_nonzero function
"""
aux_101010 = AuxiliaryVector([(0, 1), (2, 1), (4, 1)], 6)
values = aux_101010.get_values_nonzero([2, 3, 4, 5])
known_values = np.array([1, 1])
assert np.array_equal(values, known_values)
def test_hash_from_estep():
"""
This function test that the returns the hash of a new Auxiliary Vector
with the desired step in the given mode is the correct hash
"""
# Define constants
aux_2000 = AuxiliaryVector([(0, 2)], 4)
aux_1001 = AuxiliaryVector([(0, 1), (3, 1)], 4)
aux_1011 = AuxiliaryVector([(0, 1), (2, 1), (3, 1)], 4)
aux_1000 = AuxiliaryVector([(0, 1)], 4)
aux_0000 = AuxiliaryVector([], 4)
hash_m1 = hash(((0, -1),))
# test when step = 0
assert aux_1000.hash == aux_1000.hash_from_e_step(3, 0)
assert aux_0000.hash == aux_0000.hash_from_e_step(0, 0)
assert aux_1011.hash == aux_1011.hash_from_e_step(2, 0)
# test when step = 1
assert aux_1001.hash == aux_1000.hash_from_e_step(3, 1)
assert aux_2000.hash == aux_1000.hash_from_e_step(0, 1)
assert aux_1000.hash == aux_0000.hash_from_e_step(0, 1)
assert aux_1011.hash == aux_1001.hash_from_e_step(2, 1)
# test when step = -1
assert hash_m1 == aux_0000.hash_from_e_step(0, -1)
assert aux_0000.hash == aux_1000.hash_from_e_step(0, -1)
assert aux_1000.hash == aux_2000.hash_from_e_step(0, -1)
assert aux_1000.hash == aux_1001.hash_from_e_step(3, -1)
assert aux_1001.hash == aux_1011.hash_from_e_step(2, -1)
def test_e_step():
"""
This function test whether e_step returns a new Auxiliary Vector with the desired
step in the given mode
"""
# Define constants
aux_2000 = AuxiliaryVector([(0, 2)], 4)
aux_1001 = AuxiliaryVector([(0, 1), (3, 1)], 4)
aux_1000 = AuxiliaryVector([(0, 1)], 4)
aux_1011 = AuxiliaryVector([(0, 1), (2, 1), (3, 1)], 4)
aux_0000 = AuxiliaryVector([], 4)
Aux_m1 = AuxiliaryVector([(0, -1)], 4)
# test when step = 0
assert aux_1000 == aux_1000.e_step(3, 0)
assert aux_0000 == aux_0000.e_step(0, 0)
assert aux_1011 == aux_1011.e_step(2, 0)
# test when step = 1
assert aux_1001 == aux_1000.e_step(3, 1)
assert aux_2000 == aux_1000.e_step(0, 1)
assert aux_1000 == aux_0000.e_step(0, 1)
assert aux_1011 == aux_1001.e_step(2, 1)
# test when step = -1
assert Aux_m1 == aux_0000.e_step(0, -1)
assert aux_0000 == aux_1000.e_step(0, -1)
assert aux_1000 == aux_2000.e_step(0, -1)
assert aux_1000 == aux_1001.e_step(3, -1)
assert aux_1001 == aux_1011.e_step(2, -1)
def test_index_analytic():
"""
This function provides a test to ensure an absolute index value is returned
for an auxiliary vector using an analytic function of the indices
"""
aux = AuxiliaryVector([(0, 1), (2, 1)], 4)
# known result based on alpha numerical ordering
known_ind = 7
assert aux.absolute_index == known_ind
def test_tuple_from_e_step():
"""
Test whether tuple_from_e_step returns the sparse correct tuple representation of
the auxiliary
"""
# Constants
aux_0101 = AuxiliaryVector([(1, 1), (3, 1)], 4)
aux_1010 = AuxiliaryVector([(0, 1), (2, 1)], 4)
aux_1000 = AuxiliaryVector([(0, 1)], 4)
aux_0000 = AuxiliaryVector([], 4)
aux_empty = AuxiliaryVector([], 4)
# test when step = 0
known_1000 = ((0, 1),)
known_0000 = ()
assert known_1000 == aux_1000.tuple_from_e_step(2, 0)
assert known_0000 == aux_0000.tuple_from_e_step(0, 0)
# test when mode + step < 0
known_tuple = ((0, -1),)
assert known_tuple == aux_0000.tuple_from_e_step(0, -1)
# test when len(self.dict_aux_vec) == 0
known_tuple = ((1, 1),)
assert known_tuple == aux_empty.tuple_from_e_step(1, 1)
# test when mode is in array_aux_vec[:, 0] and mode + step = 0
known_tuple = ((3, 1),)
assert known_tuple == aux_0101.tuple_from_e_step(1, -1)
# test when mode is in array_aux_vec[:, 0]
known_tuple = ((1, 2), (3, 1))
assert known_tuple == aux_0101.tuple_from_e_step(1, 1)
# test else
known_tuple = ((0, 1), (2, 1), (3, 1))
assert known_tuple == aux_1010.tuple_from_e_step(3, 1)
def test_add_aux_connect():
"""
Test whether add_aux_connect updates the HopsAux object to contain a pointer to the
other HopsAux objects it is connected to.
"""
# Define Constants
aux_1001 = AuxiliaryVector([(0, 1), (3, 1)], 4)
aux_1011 = AuxiliaryVector([(0, 1), (2, 1), (3, 1)], 4)
aux_1000 = AuxiliaryVector([(0, 1)], 4)
aux_1002 = AuxiliaryVector([(0, 1), (3, 2)], 4)
# Test when type == 1
aux_1001.add_aux_connect(2, aux_1011, 1)
assert aux_1001.dict_aux_p1[2] == aux_1011
# Test when type == -1
aux_1001.add_aux_connect(3, aux_1000, -1)
assert aux_1001.dict_aux_m1[3] == aux_1000
# # Test when type != +/- 1
with pytest.raises(AuxError) as excinfo:
aux_1002.add_aux_connect(3, aux_1000, 2)
assert 'There is a problem in the hierarchy: add_aux_connect does not support ' \
'type=2' in str(excinfo.value)
def test_remove_aux_connect():
"""
Test whether the remove_aux_connect function removes the connection between the
HopsAux object and another connected with type (+1/-1) along index mode.
"""
# Define Constants
aux_1001 = AuxiliaryVector([(0, 1), (3, 1)], 4)
aux_1011 = AuxiliaryVector([(0, 1), (2, 1), (3, 1)], 4)
aux_1000 = AuxiliaryVector([(0, 1)], 4)
# Test when type == 1
aux_1001.add_aux_connect(2, aux_1011, 1)
aux_1001.remove_aux_connect(2, 1)
assert aux_1001.dict_aux_p1 == {}
# Test when type == -1
aux_1001.add_aux_connect(3, aux_1000, -1)
aux_1001.remove_aux_connect(3, -1)
assert aux_1001.dict_aux_m1 == {}
# Test when type != +/- 1
with pytest.raises(AuxError) as excinfo:
aux_1001.remove_aux_connect(3, 2)
assert 'There is a problem in the hierarchy: remove_aux_connect does not ' \
'support ' \
'type=2' in str(excinfo.value)
def test_remove_pointers():
"""
This will test if the remove_pointers function removes all pointers targeting the
current HopsAux object from the set of HopsAux objects it has connections to.
"""
# Define Constants
aux_1012 = AuxiliaryVector([(0, 1), (2, 1), (3, 2)], 4)
aux_1011 = AuxiliaryVector([(0, 1), (2, 1), (3, 1)], 4)
aux_1010 = AuxiliaryVector([(0, 1), (2, 1)], 4)
# Test with both +/- 1 additions
aux_1011.add_aux_connect(3, aux_1012, 1)
aux_1011.add_aux_connect(3, aux_1010, -1)
aux_1012.add_aux_connect(3,aux_1011,-1)
aux_1010.add_aux_connect(3,aux_1011,1)
aux_1011.remove_pointers()
assert aux_1011.dict_aux_p1 == {}
assert aux_1011.dict_aux_m1 == {}
assert aux_1012.dict_aux_m1 == {}
assert aux_1010.dict_aux_p1 == {}
def test_difference_by_mode():
"""
This test will ensure that the difference_by_mode function is returning the
correct mode in which one HopsAux object differs by another HopsAux object, if the
difference is only 1 step. This function will also test that an error is called
if the two objects differ by more than 1 step.
"""
# Define Constants
aux_1012 = AuxiliaryVector([(0, 1), (2, 1), (3, 2)], 4)
aux_3012 = AuxiliaryVector([(0, 3), (2, 1), (3, 2)], 4)
aux_2012 = AuxiliaryVector([(0, 2), (2, 1), (3, 2)], 4)
aux_1112 = AuxiliaryVector([(0, 1), (1, 1), (2, 1), (3, 2)], 4)
aux_1022 = AuxiliaryVector([(0, 1), (2, 2), (3, 2)], 4)
aux_1013 = AuxiliaryVector([(0, 1), (2, 1), (3, 3)], 4)
aux_10130 = AuxiliaryVector([(0, 1), (2, 1), (3, 3)], 5)
# Test mode 0
difference_0 = aux_1012.difference_by_mode(aux_3012)
assert difference_0 is False
# Test mode 1
difference_1 = aux_1012.difference_by_mode(aux_1112)
assert difference_1 == [1]
# Test mode 2
difference_2 = aux_1012.difference_by_mode(aux_1022)
assert difference_2 == [2]
# Test mode 3
difference_3 = aux_1012.difference_by_mode(aux_1013)
assert difference_3 == [3]
# Test when mode of difference is more than one
difference_many = aux_2012.difference_by_mode(aux_1112)
assert difference_many is False
# Test when the two HopsAux objects don't belong to the same hierarchy
with pytest.raises(AssertionError):
aux_10130.difference_by_mode(aux_1013)
|
# -*- coding:utf8 -*-
from __future__ import print_function
import numpy as np
from nltk.corpus import stopwords
from nltk.stem import SnowballStemmer
from nltk.stem.porter import *
from string import punctuation
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import sequence
from config import Config
from data_loader import Data_Loader
class Pre_Process(object):
def __init__(self):
print('pre processing...')
def process(self, config, data_loader):
"""Process from keras imdb dataset
:param config:
:param data_loader:
:return: X_train, y_train, X_test, y_test
"""
X_train, y_train, X_test, y_test = data_loader.load(config)
print("Pad sequences (samples x time)")
X_train = sequence.pad_sequences(X_train, maxlen=config.maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=config.maxlen)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
y_train = np.array(y_train)
y_test = np.array(y_test)
print('y_train shape:', y_train.shape)
print('y_test shape:', y_test.shape)
# print('sample of train:\n', X_train[0])
# print('sample of test:\n', X_test[0])
return X_train, y_train, X_test, y_test
def process_from_file(self, config, data_loader):
"""Process from raw file
:param config:
:param data_loader:
:return: X_train, y_train, X_test, y_test, word_index
"""
X_train, y_train = data_loader.load_from_file(config, 'train')
X_test, y_test = data_loader.load_from_file(config, 'test')
new_X = []
for X in X_train:
tmp = self.text_to_wordlist(X.strip(), True)
new_X.append(tmp)
print(tmp)
X_train = new_X
new_X = []
for X in X_test:
tmp = self.text_to_wordlist(X.strip(), True)
new_X.append(tmp)
print(tmp)
X_test = new_X
tokenizer = Tokenizer(num_words=config.max_features)
tokenizer.fit_on_texts(X_train + X_test)
X_train_sequences = tokenizer.texts_to_sequences(X_train)
X_test_sequences = tokenizer.texts_to_sequences(X_test)
word_index = tokenizer.word_index
print('Found %s unique tokens' % len(word_index))
X_train = sequence.pad_sequences(X_train_sequences, maxlen=config.maxlen)
X_test = sequence.pad_sequences(X_test_sequences, maxlen=config.maxlen)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
y_train = np.array(y_train)
y_test = np.array(y_test)
print('y_train shape:', y_train.shape)
print('y_test shape:', y_test.shape)
# print('sample of train:\n', X_train[0])
# print('sample of test:\n', X_test[0])
return X_train, y_train, X_test, y_test, word_index
def text_to_wordlist(self, text, remove_stopwords=False, stem_words=False):
# Clean the text, with the option to remove stopwords and to stem words.
# Convert words to lower case and split them
text = str(text).lower()
# Optionally, remove stop words
if remove_stopwords:
text = str(text).lower().split()
stops = set(stopwords.words("english"))
text = [w for w in text if not w in stops]
text = " ".join(text)
# Clean the text
# text = re.sub(r"[^A-Za-z0-9^,!.\/'+-=]", " ", text)
# text = re.sub(r"[^A-Za-z0-9]", " ", text)
text = re.sub(ur"\p{P}+", "", text)
text = re.sub(r"what's", "what is ", text)
text = re.sub(r"\(", " ", text)
text = re.sub(r"\)", " ", text)
text = re.sub(r"\(", " ", text)
text = re.sub(r"\)", " ", text)
text = re.sub(r"\:", " ", text)
text = re.sub(r"\:", " ", text)
text = re.sub(r"\;", " ", text)
text = re.sub(r"\;", " ", text)
text = re.sub(r"\!", " ", text)
text = re.sub(r"\?", " ", text)
text = re.sub(r"\?", " ", text)
text = re.sub(r"\-", " ", text)
text = re.sub(r"<br />", " ", text)
text = re.sub(r"\'s", " is ", text)
text = re.sub(r"\'ve", " have ", text)
text = re.sub(r"can't", "cannot ", text)
text = re.sub(r"n't", " not ", text)
text = re.sub(r"i'm", "i am ", text)
text = re.sub(r"\'re", " are ", text)
text = re.sub(r"\'d", " would ", text)
text = re.sub(r"\'ll", " will ", text)
text = re.sub(r"\,", " ", text)
text = re.sub(r"\.", " ", text)
text = re.sub(r"\!", " ! ", text)
text = re.sub(r"\/", " ", text)
text = re.sub(r"\\", " ", text)
text = re.sub(r"\^", " ^ ", text)
text = re.sub(r"\+", " + ", text)
text = re.sub(r"\-", " - ", text)
text = re.sub(r"\=", " = ", text)
text = re.sub(r"\'", " ", text)
text = re.sub(r"\‘", " ", text)
text = re.sub(r"\’", " ", text)
text = re.sub(r"\“", " ", text)
text = re.sub(r"\”", " ", text)
text = re.sub(r"\"", " ", text)
text = re.sub(r"(\d+)(k)", r"\g<1>000", text)
text = re.sub(r":", " : ", text)
text = re.sub(r" e g ", " eg ", text)
text = re.sub(r" b g ", " bg ", text)
text = re.sub(r" u s ", " american ", text)
text = re.sub(r"\0s", "0", text)
text = re.sub(r" 9 11 ", "911", text)
text = re.sub(r"e - mail", "email", text)
text = re.sub(r"j k", "jk", text)
text = re.sub(r"\s{2,}", " ", text)
text = re.sub(r"\'ve", " have ", text)
text = re.sub(r"can't", "cannot ", text)
text = re.sub(r"n't", " not ", text)
text = re.sub(r"I'm", "I am", text)
text = re.sub(r" m ", " am ", text)
text = re.sub(r"\'re", " are ", text)
text = re.sub(r"\'d", " would ", text)
text = re.sub(r"\'ll", " will ", text)
text = re.sub(r"60k", " 60000 ", text)
text = re.sub(r"quikly", "quickly", text)
text = re.sub(r" usa ", " america ", text)
text = re.sub(r" u s ", " america ", text)
text = re.sub(r" uk ", " england ", text)
text = re.sub(r"imrovement", "improvement", text)
text = re.sub(r"intially", "initially", text)
text = re.sub(r" dms ", "direct messages ", text)
text = re.sub(r"demonitization", "demonetization", text)
text = re.sub(r"actived", "active", text)
text = re.sub(r"kms", " kilometers ", text)
text = re.sub(r" cs ", " computer science ", text)
text = re.sub(r" upvotes ", " up votes ", text)
text = re.sub(r" iPhone ", " phone ", text)
text = re.sub(r"\0rs ", " rs ", text)
text = re.sub(r"calender", "calendar", text)
text = re.sub(r"ios", "operating system", text)
text = re.sub(r"programing", "programming", text)
text = re.sub(r"bestfriend", "best friend", text)
text = re.sub(r"iii", "3", text)
text = re.sub(r"the us", "america", text)
text = ' '.join([c for c in re.split('(\W+)?', str(text))
if (str(c).strip() not in punctuation) and (c.strip())])
# Optionally, shorten words to their stems
if stem_words:
text = text.split()
stemmer = SnowballStemmer('english')
# stemmed_words = [stemmer.stem(word) for word in text]
stemmed_words = []
for word in text:
try:
stemmed_words.append(stemmer.stem(word))
except:
stemmed_words.append(word)
text = " ".join(stemmed_words)
# Return a list of words
return text
def test(self):
config = Config(max_feature=200000, maxlen=400, embedding_dims=300,
embedding_file='/home/irlab0/Research/kaggle/Quora_Question_Pairs/data/glove.840B.300d.txt',
trian_path='/home/irlab0/Research/TextClassification/imdb/data/aclImdb/train/',
test_path='/home/irlab0/Research/TextClassification/imdb/data/aclImdb/test/')
data_loader = Data_Loader()
# X_train, y_train, X_test, y_test = self.process(config, data_loader)
X_train, y_train, X_test, y_test, word_index = self.process_from_file(config, data_loader)
config.get_embedding_matrix(word_index)
print(X_train[0])
print(y_train[0])
if __name__ == "__main__":
pre_process = Pre_Process()
pre_process.test()
|
<reponame>neelasha23/ploomber
"""
On languages and kernels
------------------------
NotebookSource represents source code in a Jupyter notebook format (language
agnostic). Apart from .ipynb, we also support any other extension supported
by jupytext.
Given a notebook, we have to know which language it is written in to extract
upstream/product variables (though this only happens when the option of
extracting dependencies automatically is on), we also have to determine the
Jupyter kernel to use (this is always needed).
The unequivocal place to store this information is in the notebook metadata
section, but given that we advocate for the use of scripts (converted to
notebooks via jupytext), they most likely won't contain metadata (metadata
saving is turned off by default in jupytext), so we have to infer this
ourselves.
To make things more complex, jupytext adds its own metadata section but we are
ignoring that for now.
Given that there are many places where this information might be stored, we
have a few rules to automatically determine language and kernel given a
script/notebook.
"""
from functools import wraps
import ast
from pathlib import Path
import warnings
import logging
from contextlib import redirect_stdout
from io import StringIO
from copy import deepcopy
from papermill.parameterize import parameterize_notebook
import click
import nbformat
import jupytext
from jupytext import cli as jupytext_cli
from jupytext.formats import long_form_one_format, short_form_one_format
from jupytext.config import JupytextConfiguration
import parso
from ploomber.exceptions import (SourceInitializationError,
MissingParametersCellError)
from ploomber.placeholders.placeholder import Placeholder
from ploomber.util import requires
from ploomber.sources.abc import Source
from ploomber.sources.nb_utils import find_cell_with_tag, find_cell_with_tags
from ploomber.static_analysis.extractors import extractor_class_for_language
from ploomber.static_analysis.pyflakes import check_notebook
from ploomber.sources import docstring
from ploomber.io import pretty_print
class IgnoreBlackWarning(logging.Filter):
def filter(self, record):
return 'Black is not installed' not in record.msg
logging.getLogger("papermill.translators").addFilter(IgnoreBlackWarning())
def _jupytext_fmt(primitive, extension):
"""
Determine the jupytext fmt string to use based on the content and extension
"""
if extension != 'ipynb':
fmt, _ = jupytext.guess_format(primitive, f'.{extension}')
fmt_final = f'{extension}:{fmt}'
else:
fmt_final = '.ipynb'
return fmt_final
# TODO: we should unit test that this function is called, as opposed to vanilla
# .read_text
def _read_primitive(path):
"""
We read using the UTF-8 instead of the default encoding since notebooks are
always stored in UTF-8.
We can see this in nbformat, which always reads as UTF-8:
https://github.com/jupyter/nbformat/blob/df63593b64a15ee1c37b522973c39e8674f93c5b/nbformat/__init__.py#L125
Scripts are a different story since they may have other encodings, however,
modern editors have UTF-8 as default (example: VSCode
https://docs.microsoft.com/en-us/powershell/scripting/dev-cross-plat/vscode/understanding-file-encoding?view=powershell-7.2#configuring-vs-code)
so it's safer to use UTF-8 than the default encoding.
jupytext already does this:
https://github.com/mwouts/jupytext/issues/896
"""
return Path(path).read_text(encoding='utf-8')
def _get_last_cell(nb):
"""
Get last cell, ignores cells with empty source (unless the notebook only
has one cell and it's empty)
"""
# iterate in reverse order
for idx in range(-1, -len(nb.cells) - 1, -1):
cell = nb.cells[idx]
# only return it if it has some code
if cell['source'].strip():
return cell
# otherwise return the first cell
return nb.cells[0]
def _get_cell_suggestion(nb):
format_name = nb.metadata.get('jupytext', {}).get('text_representation',
{}).get('format_name')
preamble = 'Add a new cell with your code'
if format_name == 'light':
message = f'{preamble}:\n' + """
# + tags=["parameters"]
# your parameters here...
# -
# +
# your code here...
# -
"""
elif format_name == 'percent':
message = f'{preamble}:\n' + """
# %% tags=["parameters"]
# your parameters here...
# %%
# your code here...
"""
else:
message = preamble + '.'
return message
def requires_path(func):
"""
Checks if NotebookSource instance was initialized from a file, raises
an error if not
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if self._path is None:
raise ValueError(f'Cannot use {func.__name__!r} if notebook was '
'not initialized from a file')
return func(self, *args, **kwargs)
return wrapper
class NotebookSource(Source):
"""
A source object representing a jupyter notebook (or any format supported
by jupytext)
Parameters
----------
hot_reload : bool, optional
Makes the notebook always read the file before rendering
kernelspec_name : str, optional
Which kernel to use for executing the notebook, it overrides any
existing kernelspec metadata in the notebook. If the notebook does
not have kernelspec info, this parameter is required. Defaults to None.
To see which kernelspecs are available run "jupyter kernelspec list"
check_if_kernel_installed : bool, optional
Check if the kernel is installed during initization
Notes
-----
The render method prepares the notebook for execution: it adds the
parameters and it makes sure kernelspec is defined
"""
@requires([
'parso', 'pyflakes', 'jupytext', 'nbformat', 'papermill',
'jupyter_client'
])
def __init__(self,
primitive,
hot_reload=False,
ext_in=None,
kernelspec_name=None,
static_analysis='regular',
check_if_kernel_installed=True):
# any non-py file must first be converted using jupytext, we need
# that representation for validation, if input is already a .py file
# do not convert. If passed a string, try to guess format using
# jupytext. We also need ipynb representation for .develop(),
# but do lazy loading in case we don't need both
self._primitive = primitive
self._check_if_kernel_installed = check_if_kernel_installed
# this happens if using SourceLoader
if isinstance(primitive, Placeholder):
self._path = primitive.path
self._primitive = str(primitive)
elif isinstance(primitive, str):
self._path = None
self._primitive = primitive
elif isinstance(primitive, Path):
self._path = primitive
if primitive.is_dir():
raise SourceInitializationError(
f'Failed to initialize {str(primitive)!r}. '
'Expected a file, got a directory.' +
_suggest_ploomber_scaffold_is_dir())
if not primitive.exists():
raise SourceInitializationError(
f'Failed to initialize {str(primitive)!r}. '
'File does not exist.' +
_suggest_ploomber_scaffold_missing_file())
self._primitive = _read_primitive(primitive)
else:
raise TypeError('Notebooks must be initialized from strings, '
'Placeholder or pathlib.Path, got {}'.format(
type(primitive)))
static_analysis_vals = {'disable', 'regular', 'strict'}
if static_analysis not in static_analysis_vals:
raise ValueError(f'{static_analysis!r} is not a '
"valid 'static_analysis' value, choose one from: "
f'{pretty_print.iterable(static_analysis_vals)}')
self.static_analysis = static_analysis
self._kernelspec_name = kernelspec_name
self._hot_reload = hot_reload
# TODO: validate ext_in values and extensions
if self._path is None and hot_reload:
raise ValueError('hot_reload only works in the notebook was '
'loaded from a file')
if self._path is not None and ext_in is None:
self._ext_in = self._path.suffix[1:]
elif self._path is None and ext_in is None:
if Path(self._primitive).exists():
path = str(self._primitive)
raise ValueError(
f'The file {path!r} you passed looks like '
'a path to a file. Perhaps you meant passing a '
'pathlib.Path object? Example:\n\n'
'from pathlib import Path\n'
f'NotebookRunner(Path({path!r}))')
else:
raise ValueError(
'"ext_in" cannot be None if the notebook is '
'initialized from a string. Either pass '
'a pathlib.Path object with the notebook file '
'location or pass the source code as string '
'and include the "ext_in" parameter')
elif self._path is not None and ext_in is not None:
raise ValueError('"ext_in" must be None if notebook is '
'initialized from a pathlib.Path object')
elif self._path is None and ext_in is not None:
self._ext_in = ext_in
# try to determine language based on extension, though this test
# might be inconclusive if dealing with a ipynb file, though we only
# use this to determine the appropriate jupyter kernel when
# initializing from a string, when initializing from files, the
# extension is used to determine the kernel
self._language = determine_language(self._ext_in)
self._loc = None
self._params = None
self._nb_str_unrendered = None
self._nb_obj_unrendered = None
self._nb_str_rendered = None
self._nb_obj_rendered = None
# this will raise an error if kernelspec_name is invalid
self._read_nb_str_unrendered()
self._post_init_validation(str(self._primitive))
@property
def primitive(self):
if self._hot_reload:
self._primitive = _read_primitive(self._path)
return self._primitive
def render(self, params):
"""Render notebook (fill parameters using papermill)
"""
self._params = json_serializable_params(params)
self._render()
def _render(self):
# _read_nb_str_unrendered uses hot_reload, this ensures we always get
# the latest version
_, nb = self._read_nb_str_unrendered()
if 'parameters' in _get_last_cell(nb).metadata.get('tags', []):
cell_suggestion = _get_cell_suggestion(nb)
kind = 'notebook' if self._ext_in == 'ipynb' else 'script'
raise SourceInitializationError(
f'Error processing {str(self._path)!r}: the last cell '
f'in the {kind} is the parameters cell. {cell_suggestion}')
# this is needed for parameterize_notebook to work
for cell in nb.cells:
if not hasattr(cell.metadata, 'tags'):
cell.metadata['tags'] = []
nb.metadata['papermill'] = dict()
# NOTE: we use parameterize_notebook instead of execute_notebook
# with the prepare_only option because the latter adds a "papermill"
# section on each cell's metadata, which makes it too verbose when
# using NotebookRunner.develop() when the source is script (each cell
# will have an empty "papermill" metadata dictionary)
nb = parameterize_notebook(nb, self._params)
# delete empty tags to prevent cluttering the notebooks
for cell in nb.cells:
if not len(cell.metadata['tags']):
cell.metadata.pop('tags')
self._nb_str_rendered = nbformat.writes(nb)
self._post_render_validation()
def _read_nb_str_unrendered(self):
"""
Returns the notebook representation (JSON string), this is the raw
source code passed, does not contain injected parameters.
Adds kernelspec info if not present based on the kernelspec_name,
this metadata is required for papermill to know which kernel to use.
An exception is raised if we cannot determine kernel information.
"""
# hot_reload causes to always re-evalaute the notebook representation
if self._nb_str_unrendered is None or self._hot_reload:
# this is the notebook node representation
nb = _to_nb_obj(
self.primitive,
ext=self._ext_in,
# passing the underscored version
# because that's the only one available
# when this is initialized
language=self._language,
kernelspec_name=self._kernelspec_name,
check_if_kernel_installed=self._check_if_kernel_installed,
path=self._path)
# if the user injected cells manually (with ploomber nb --inject)
# the source will contain the injected cell, remove it because
# it should not be considered part of the source code
self._nb_obj_unrendered = _cleanup_rendered_nb(nb, print_=False)
# get the str representation. always write from nb_obj, even if
# this was initialized with a ipynb file, nb_obj contains
# kernelspec info
self._nb_str_unrendered = nbformat.writes(
self._nb_obj_unrendered, version=nbformat.NO_CONVERT)
return self._nb_str_unrendered, self._nb_obj_unrendered
def _post_init_validation(self, value):
"""
Validate notebook after initialization (run pyflakes to detect
syntax errors)
"""
# NOTE: what happens if I pass source code with errors to parso?
# maybe we don't need to use pyflakes after all
# we can also use compile. can pyflakes detect things that
# compile cannot?
params_cell, _ = find_cell_with_tag(self._nb_obj_unrendered,
'parameters')
if params_cell is None:
loc = ' "{}"'.format(self.loc) if self.loc else ''
msg = ('Notebook{} does not have a cell tagged '
'"parameters"'.format(loc))
if self.loc and Path(self.loc).suffix == '.py':
msg += """.
Add a cell at the top like this:
# %% tags=["parameters"]
upstream = None
product = None
Go to: https://ploomber.io/s/params for more information
"""
if self.loc and Path(self.loc).suffix == '.ipynb':
msg += ('. Add a cell at the top and tag it as "parameters". '
'Go to the next URL for '
'details: https://ploomber.io/s/params')
raise MissingParametersCellError(msg)
def _post_render_validation(self):
"""
Validate params passed against parameters in the notebook
"""
# NOTE: maybe static_analysis = off should not turn off everything
# but only warn
# strict mode: raise and check signature
# regular mode: _check_notebook called in NotebookRunner.run
if self.static_analysis == 'strict':
self._check_notebook(raise_=True, check_signature=True)
else:
# otherwise, only warn on unused parameters
_warn_on_unused_params(self._nb_obj_unrendered, self._params)
def _check_notebook(self, raise_, check_signature):
if self.static_analysis and self.language == 'python':
# warn if errors (e.g., undeclared variables, syntax errors)
check_notebook(self._nb_str_to_obj(self._nb_str_rendered),
self._params,
filename=self._path or 'notebook',
raise_=raise_,
check_signature=check_signature)
@property
def doc(self):
"""
Returns notebook docstring parsed either from a triple quoted string
in the top cell or a top markdown markdown cell
"""
return docstring.extract_from_nb(self._nb_obj_unrendered)
@property
def loc(self):
return self._path
@property
def name(self):
# filename without extension(e.g., plot.py -> plot)
if self._path:
return self._path.stem
@property
def nb_str_rendered(self):
"""
Returns the notebook (as a string) with parameters injected, hot
reloadig if necessary
"""
if self._nb_str_rendered is None:
raise RuntimeError('Attempted to get location for an unrendered '
'notebook, render it first')
if self._hot_reload:
self._render()
return self._nb_str_rendered
@property
def nb_obj_rendered(self):
"""
Returns the notebook (as an objet) with parameters injected, hot
reloadig if necessary
"""
if self._nb_obj_rendered is None:
# using self.nb_str_rendered triggers hot reload if needed
self._nb_obj_rendered = self._nb_str_to_obj(self.nb_str_rendered)
return self._nb_obj_rendered
def __str__(self):
# reload if empty or hot_reload=True
self._read_nb_str_unrendered()
# FIXME: this should ignore changes to the markdown cells
return '\n'.join([c.source for c in self._nb_obj_unrendered.cells])
def __repr__(self):
if self.loc is not None:
return "{}('{}')".format(type(self).__name__, self.loc)
else:
return "{}(loaded from string)".format(type(self).__name__)
@property
def variables(self):
raise NotImplementedError
@property
def extension(self):
# this can be Python, R, Julia, etc. We are handling them the same,
# for now, no normalization can be done.
# One approach is to use the ext if loaded from file, otherwise None
return None
# FIXME: add this to the abstract class, probably get rid of "extension"
# since it's not informative (ipynb files can be Python, R, etc)
@property
def language(self):
"""
Notebook Language (Python, R, etc), this is a best-effort property,
can be None if we could not determine the language
"""
if self._language is None:
self._read_nb_str_unrendered()
try:
# make sure you return "r" instead of "R"
return (self._nb_obj_unrendered.metadata.kernelspec.language.
lower())
except AttributeError:
return None
else:
return self._language
def _nb_str_to_obj(self, nb_str):
return nbformat.reads(nb_str, as_version=nbformat.NO_CONVERT)
def _get_parameters_cell(self):
self._read_nb_str_unrendered()
cell, _ = find_cell_with_tag(self._nb_obj_unrendered, tag='parameters')
return cell.source
def extract_upstream(self):
extractor_class = extractor_class_for_language(self.language)
return extractor_class(self._get_parameters_cell()).extract_upstream()
def extract_product(self):
extractor_class = extractor_class_for_language(self.language)
return extractor_class(self._get_parameters_cell()).extract_product()
@requires_path
def save_injected_cell(self):
"""
Inject cell, overwrite the source file (and any paired files)
"""
fmt_ = _jupytext_fmt(self._primitive, self._ext_in)
# add metadata to flag that the cell was injected manually
recursive_update(
self.nb_obj_rendered,
dict(metadata=dict(ploomber=dict(injected_manually=True))))
# Are we updating a text file that has a metadata filter? If so,
# add ploomber as a section that must be stored
if (self.nb_obj_rendered.metadata.get(
'jupytext', {}).get('notebook_metadata_filter') == '-all'):
recursive_update(
self.nb_obj_rendered,
dict(metadata=dict(jupytext=dict(
notebook_metadata_filter='ploomber,-all'))))
# overwrite
jupytext.write(self.nb_obj_rendered, self._path, fmt=fmt_)
# overwrite all paired files
for path, fmt_ in iter_paired_notebooks(self.nb_obj_rendered, fmt_,
self._path.stem):
jupytext.write(self.nb_obj_rendered, fp=path, fmt=fmt_)
@requires_path
def remove_injected_cell(self):
"""
Delete injected cell, overwrite the source file (and any paired files)
"""
nb_clean = _cleanup_rendered_nb(self._nb_obj_unrendered)
# remove metadata
recursive_update(
nb_clean,
dict(metadata=dict(ploomber=dict(injected_manually=None))))
fmt_ = _jupytext_fmt(self._primitive, self._ext_in)
# overwrite
jupytext.write(nb_clean, self._path, fmt=fmt_)
# overwrite all paired files
for path, fmt_ in iter_paired_notebooks(self._nb_obj_unrendered, fmt_,
self._path.stem):
jupytext.write(nb_clean, fp=path, fmt=fmt_)
@requires_path
def format(self, fmt, entry_point):
"""Change source format
Returns
-------
str
The path if the extension changed, None otherwise
"""
nb_clean = _cleanup_rendered_nb(self._nb_obj_unrendered)
ext_file = self._path.suffix
ext_format = long_form_one_format(fmt)['extension']
extension_changed = ext_file != ext_format
if extension_changed:
if Path(entry_point).is_file():
path = self._path.with_suffix(ext_format)
Path(self._path).unlink()
modified_entry = Path(entry_point).read_text()
main_file = f'{self.name}{ext_file}'
if main_file in modified_entry:
modified_entry = modified_entry.replace(
main_file, f'{self.name}{ext_format}')
Path(entry_point).write_text(modified_entry)
else:
click.secho(
f'{main_file} does not appear in entry-point'
f'please edit manually\n',
fg='yellow')
path = self._path
else:
click.secho(
"The entry-point is not a valid file, please"
" update the pipeline file extensions manually\n",
fg='yellow')
path = self._path
else:
path = self._path
jupytext.write(nb_clean, path, fmt=fmt)
return path if extension_changed else None
@requires_path
def pair(self, base_path):
"""Pairs with an ipynb file
"""
# TODO: add unit test
if self._ext_in == 'ipynb':
raise ValueError(
'pairing only works with .py files, got .ipynb. '
'Yoy may convert the .ipynb to .py and try again.')
fmt, _ = jupytext.guess_format(self._primitive, f'.{self._ext_in}')
fmt_ = f'{self._ext_in}:{fmt}'
# mute jupytext's output
with redirect_stdout(StringIO()):
jupytext_cli.jupytext(args=[
'--set-formats', f'{base_path}//ipynb,{fmt_}',
str(self._path)
])
@requires_path
def sync(self):
"""Pairs with and ipynb file
"""
# mute jupytext's output
with redirect_stdout(StringIO()):
jupytext_cli.jupytext(args=['--sync', str(self._path)])
def json_serializable_params(params):
# papermill only allows JSON serializable parameters
# convert Params object to dict
params = params.to_dict()
params['product'] = params['product'].to_json_serializable()
if params.get('upstream'):
params['upstream'] = params['upstream'].to_json_serializable()
return params
def _to_nb_obj(source,
language,
ext=None,
kernelspec_name=None,
check_if_kernel_installed=True,
path=None):
"""
Convert to jupyter notebook via jupytext, if the notebook does not contain
kernel information and the user did not pass a kernelspec_name explicitly,
we will try to infer the language and select a kernel appropriately.
If a valid kernel is found, it is added to the notebook. If none of this
works, an exception is raised.
If also converts the code string to its notebook node representation,
adding kernel data accordingly.
Parameters
----------
source : str
Jupyter notebook (or jupytext compatible formatted) document
language : str
Programming language
path : str, default=None
Script/notebook path. If not None, it's used to throw an informative
error if the notebook fails to load
Returns
-------
nb
Notebook object
Raises
------
RenderError
If the notebook has no kernelspec metadata and kernelspec_name is
None. A notebook without kernelspec metadata will not display in
jupyter notebook correctly. We have to make sure all notebooks
have this.
"""
import jupytext
# let jupytext figure out the format
try:
nb = jupytext.reads(source, fmt=ext)
except Exception as e:
what = 'notebook' if ext == 'ipynb' else 'script'
err = f'Failed to read {what}'
if path is not None:
err += f' from {str(path)!r}'
raise SourceInitializationError(err) from e
# NOTE: I can add the cell with parameters here, but what happens if
# extract_upstream is false? would that be a problem?
check_nb_kernelspec_info(nb,
kernelspec_name,
ext,
language,
check_if_installed=check_if_kernel_installed)
return nb
def check_nb_kernelspec_info(nb,
kernelspec_name,
ext,
language,
check_if_installed=True):
"""Make sure the passed notebook has kernel info
Parameters
----------
check_if_installed : bool
Also check if the kernelspec is installed, nb.metadata.kernelspec
to be replaced by whatever information jupyter returns when requesting
the kernelspec
"""
import jupyter_client
kernel_name = determine_kernel_name(nb, kernelspec_name, ext, language)
# cannot keep going if we don't have the kernel name
if kernel_name is None:
raise SourceInitializationError(
'Notebook does not contain kernelspec metadata and '
'kernelspec_name was not specified, either add '
'kernelspec info to your source file or specify '
'a kernelspec by name. To see list of installed kernels run '
'"jupyter kernelspec list" in the terminal (first column '
'indicates the name). Python is usually named "python3", '
'R usually "ir"')
if check_if_installed:
kernelspec = jupyter_client.kernelspec.get_kernel_spec(kernel_name)
nb.metadata.kernelspec = {
"display_name": kernelspec.display_name,
"language": kernelspec.language,
"name": kernel_name
}
else:
if 'metadata' not in nb:
nb['metadata'] = dict()
if 'kernelspec' not in nb['metadata']:
nb['metadata']['kernelspec'] = dict()
# we cannot ask jupyter, so we fill this in ourselves
nb.metadata.kernelspec = {
"display_name": 'R' if kernel_name == 'ir' else 'Python 3',
"language": 'R' if kernel_name == 'ir' else 'python',
"name": kernel_name
}
def determine_kernel_name(nb, kernelspec_name, ext, language):
"""
Determines the kernel name by using the following data (returns whatever
gives kernel info first): 1) explicit kernel from the user 2) notebook's
metadata 3) file extension 4) language 5) best guess
"""
# explicit kernelspec name
if kernelspec_name is not None:
return kernelspec_name
# use metadata info
try:
return nb.metadata.kernelspec.name
except AttributeError:
pass
# use language from extension if passed, otherwise use language variable
if ext:
language = determine_language(ext)
lang2kernel = {'python': 'python3', 'r': 'ir'}
if language in lang2kernel:
return lang2kernel[language]
# nothing worked, try to guess if it's python...
is_python_ = is_python(nb)
if is_python_:
return 'python3'
else:
return None
def inject_cell(model, params):
"""Inject params (by adding a new cell) to a model
Notes
-----
A model is different than a notebook:
https://jupyter-notebook.readthedocs.io/en/stable/extending/contents.html
"""
nb = nbformat.from_dict(model['content'])
# we must ensure nb has kernelspec info, otherwise papermill will fail to
# parametrize
ext = model['name'].split('.')[-1]
check_nb_kernelspec_info(nb, kernelspec_name=None, ext=ext, language=None)
# papermill adds a bunch of things before calling parameterize_notebook
# if we don't add those things, parameterize_notebook breaks
# https://github.com/nteract/papermill/blob/0532d499e13e93d8990211be33e9593f1bffbe6c/papermill/iorw.py#L400
if not hasattr(nb.metadata, 'papermill'):
nb.metadata['papermill'] = {
'parameters': dict(),
'environment_variables': dict(),
'version': None,
}
for cell in nb.cells:
if not hasattr(cell.metadata, 'tags'):
cell.metadata['tags'] = []
params = json_serializable_params(params)
comment = ('This cell was injected automatically based on your stated '
'upstream dependencies (cell above) and pipeline.yaml '
'preferences. It is temporary and will be removed when you '
'save this notebook')
model['content'] = parameterize_notebook(nb,
params,
report_mode=False,
comment=comment)
def _cleanup_rendered_nb(nb, print_=True):
"""
Cleans up a rendered notebook object. Removes cells with tags:
injected-parameters, debugging-settings, and metadata injected by
papermill
"""
out = find_cell_with_tags(nb,
['injected-parameters', 'debugging-settings'])
if print_:
for key in out.keys():
print(f'Removing {key} cell...')
idxs = set(cell['index'] for cell in out.values())
nb['cells'] = [
cell for idx, cell in enumerate(nb['cells']) if idx not in idxs
]
# papermill adds "tags" to all cells that don't have them, remove them
# if they are empty to avoid cluttering the script
for cell in nb['cells']:
if 'tags' in cell.get('metadata', {}):
if not len(cell['metadata']['tags']):
del cell['metadata']['tags']
return nb
def is_python(nb):
"""
Determine if the notebook is Python code for a given notebook object, look
for metadata.kernelspec.language first, if not defined, try to guess if
it's Python, it's conservative and it returns False if the code is valid
Python but contains (<-), in which case it's much more likely to be R
"""
is_python_ = None
# check metadata first
try:
language = nb.metadata.kernelspec.language
except AttributeError:
pass
else:
is_python_ = language == 'python'
# no language defined in metadata, check if it's valid python
if is_python_ is None:
code_str = '\n'.join([c.source for c in nb.cells])
try:
ast.parse(code_str)
except SyntaxError:
is_python_ = False
else:
# there is a lot of R code which is also valid Python code! So
# let's
# run a quick test. It is very unlikely to have "<-" in Python (
# {less than} {negative} but extremely common {assignment}
if '<-' not in code_str:
is_python_ = True
# inconclusive test...
if is_python_ is None:
is_python_ = False
return is_python_
def determine_language(extension):
"""
A function to determine programming language given file extension,
returns programming language name (all lowercase) if could be determined,
None if the test is inconclusive
"""
if extension.startswith('.'):
extension = extension[1:]
mapping = {'py': 'python', 'r': 'r', 'R': 'r', 'Rmd': 'r', 'rmd': 'r'}
# ipynb can be many languages, it must return None
return mapping.get(extension)
def recursive_update(target, update):
"""Recursively update a dictionary. Taken from jupytext.header
"""
for key in update:
value = update[key]
if value is None:
# remove if it exists
target.pop(key, None)
elif isinstance(value, dict):
target[key] = recursive_update(target.get(key, {}), value)
else:
target[key] = value
return target
def parse_jupytext_format(fmt, name):
"""
Parse a jupytext format string (such as notebooks//ipynb) and return the
path to the file and the extension
"""
fmt_parsed = long_form_one_format(fmt)
path = Path(fmt_parsed['prefix'], f'{name}{fmt_parsed["extension"]}')
del fmt_parsed['prefix']
return path, short_form_one_format(fmt_parsed)
def iter_paired_notebooks(nb, fmt_, name):
formats = nb.metadata.get('jupytext', {}).get('formats', '')
if not formats:
return
formats = formats.split(',')
formats.remove(fmt_)
# overwrite all paired files
for path, fmt_current in (parse_jupytext_format(fmt, name)
for fmt in formats):
yield path, fmt_current
def _nb2codestr(nb):
return '\n'.join([c.source for c in nb.cells if c.cell_type == 'code'])
def _warn_on_unused_params(nb, params):
nb = deepcopy(nb)
_, idx = find_cell_with_tag(nb, 'parameters')
del nb.cells[idx]
code = _nb2codestr(nb)
# NOTE: if there a syntax error we cannot accurately check this
m = parso.parse(code)
names = set(m.get_used_names())
# remove product since it may not be required
# FIXME: maybe only remove it if it's a dictionary with >2 keys
unused = set(params) - names - {'product'}
if unused:
warnings.warn('These parameters are not used in the '
f'task\'s source code: {pretty_print.iterable(unused)}')
def add_parameters_cell(path, extract_upstream, extract_product):
"""
Add parameters cell to a script/notebook in the given path, overwrites the
original file
"""
source = ''
if extract_upstream:
source += """\
# declare a list tasks whose products you want to use as inputs
upstream = None
"""
if extract_product:
source += """\
# declare a dictionary with the outputs of this task
product = None
"""
c = JupytextConfiguration()
c.notebook_metadata_filter
c.cell_metadata_filter = 'all'
nb = jupytext.read(path)
new_cell = nbformat.v4.new_code_cell(source,
metadata={'tags': ['parameters']})
nb.cells.insert(0, new_cell)
jupytext.write(nb, path, config=c)
def _suggest_ploomber_scaffold_missing_file():
if Path('pipeline.yaml').is_file():
return '\nTo create it, run: ploomber scaffold'
else:
return ''
def _suggest_ploomber_scaffold_is_dir():
if Path('pipeline.yaml').is_file():
return ('\nTo create it, delete the directory, '
'then run: ploomber scaffold')
else:
return ''
|
<reponame>talbertc-usgs/GuanoMDEditor<filename>guanomdeditor/gui/SingleGuanoEditor.py
import sys
import os
from pathlib import Path
from collections import OrderedDict
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtWidgets import QWidget
from PyQt5.QtWidgets import QCheckBox
from PyQt5.QtWidgets import QFileDialog
from PyQt5.QtCore import QSettings
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QIcon
from guanomdeditor.gui.ui_files import UI_simple_viewer2
from guanomdeditor.gui.namespace_group import NamespaceGroup
from guanomdeditor.core import utils
from guano import GuanoFile
class SingleGuanoEditor(QWidget):
def __init__(self, parent=None):
QWidget.__init__(self, parent=parent)
self.help_text = ''
self.in_context = False
self.ui = None
self.guano_content = None
self.namespaces = OrderedDict()
self.namespace_chks = OrderedDict()
self.build_ui()
self.connect_events()
def build_ui(self):
self.ui = UI_simple_viewer2.Ui_Form()
self.ui.setupUi(self)
self.setAcceptDrops(True)
self.ui.scrollArea.setAcceptDrops(True)
self.installEventFilter(self)
self.setMouseTracking(True)
self.setAcceptDrops(True)
icon = QIcon(utils.resource_path('../resources/icons/nabat_circle_color.ico'))
self.setWindowIcon(icon)
self.header_layout = self.ui.header_layout
self.namespace_layout = self.ui.scrollAreaWidgetContents_2.layout()
self.load_namespaces()
def connect_events(self):
self.ui.btn_browse.clicked.connect(self.browse)
self.ui.file_name.textChanged.connect(self.load_file)
self.ui.btn_save.clicked.connect(self.save)
def browse(self):
settings = QSettings('USGS', 'guanoeditor')
last_data_fname = settings.value('lastDataFname', '')
if last_data_fname:
dname, fname = os.path.split(last_data_fname)
else:
fname, dname = "", ""
fname = QFileDialog.getOpenFileName(self, fname, dname,
filter="Wave files (*.wav)")
if fname[0]:
settings.setValue('lastDataFname', fname[0])
self.ui.file_name.setText(fname[0])
def dragEnterEvent(self, e):
if e.mimeData().hasUrls() and e.mimeData().urls()[0].isLocalFile():
url = e.mimeData().urls()[0].toLocalFile()
if url.endswith('.wav'):
e.accept()
else:
e.ignore()
else:
e.ignore()
def dropEvent(self, e):
"""
Updates the form with the contents of an xml node dropped onto it.
Parameters
----------
e : qt event
Returns
-------
None
"""
try:
e.setDropAction(Qt.CopyAction)
e.accept()
url = e.mimeData().urls()[0].toLocalFile()
self.ui.file_name.setText(url)
# self.from_xml(element)
except:
e = sys.exc_info()[0]
print('problem drop', e)
def load_namespaces(self):
namespace_d = Path(utils.resource_path("../resources/specs"))
namespace_csvs = namespace_d.glob("*.csv")
for namespace_csv in namespace_csvs:
namespace = namespace_csv.name.replace(".csv", "")
namespace_chk = QCheckBox(namespace.replace("_", " "))
self.header_layout.addWidget(namespace_chk)
namespace_chk.stateChanged.connect(self.namespace_changed)
self.namespace_chks[namespace] = namespace_chk
def namespace_changed(self, int):
this_namespace_sender = self.sender()
namespace = this_namespace_sender.text().replace(" ", "_")
if int == 0: # remove this namespace
if namespace in self.namespaces:
self.namespaces[namespace].hide()
else: # show this namespace
if namespace in self.namespaces:
self.namespaces[namespace].show()
else:
namespace_fname = utils.resource_path(f"../resources/specs/{namespace}.csv")
spec = utils.read_namespace(namespace_fname)
this_namespace = NamespaceGroup(namespace, spec)
this_namespace.load_data({})
index = self.ui.scrollAreaWidgetContents_2.layout().count()-1
self.ui.scrollAreaWidgetContents_2.layout().insertWidget(index, this_namespace)
self.namespaces[namespace] = this_namespace
def load_file(self):
for i in reversed(range(self.namespace_layout.count()-1)):
self.namespace_layout.itemAt(i).widget().setParent(None)
for namespce_chk in self.namespace_chks.values():
namespce_chk.setChecked(False)
self.namespaces = OrderedDict()
fname = self.ui.file_name.text()
f = Path(fname)
try:
exists = f.exists()
except:
exists = False
if exists:
try:
g = GuanoFile(fname)
except:
msg = f"There was a problem loading the Guano MD from:\n{fname}\n\nPlease verify that it is a valid wav file"
QMessageBox.warning(self, "File Error", msg)
return None
self.guano_content = {key:{} for key in g.get_namespaces()}
for item in g.items_namespaced():
self.guano_content[item[0]][item[1]] = item[2]
for namespace in g.get_namespaces():
if namespace == '':
namespace = 'guano_base'
namespace_fname = utils.resource_path(f"../resources/specs/{namespace}.csv")
if Path(namespace_fname).exists():
spec = utils.read_namespace(namespace_fname)
else:
# if we have a namespace we've never seen, load it up as if it was a complete spec
spec = [{'tag':tag} for tag in self.guano_content[namespace].keys()]
this_namespace = NamespaceGroup(namespace, spec)
if namespace == 'guano_base':
this_namespace.load_data(self.guano_content[''])
else:
this_namespace.load_data(self.guano_content[namespace])
index = self.ui.scrollAreaWidgetContents_2.layout().count()-1
self.ui.scrollAreaWidgetContents_2.layout().insertWidget(index, this_namespace)
self.namespaces[namespace] = this_namespace
try:
self.namespace_chks[namespace].setChecked(True)
except KeyError:
pass
def save(self):
fname = self.ui.file_name.text()
try:
g = GuanoFile(fname)
except:
msg = f"There was a problem loading the Guano MD from:\n{fname}\n\nPlease verify that it is a valid wav file"
QMessageBox.warning(self, "File Error", msg)
return None
for namespace_name, namespace_group in self.namespaces.items():
print(namespace_name)
namespace_data = namespace_group.get_data()
if namespace_name == 'guano_base':
namespace_name = ''
for k, v in namespace_data.items():
if v == '':
try:
del g[f"{namespace_name}|{k}"]
except KeyError:
pass
else:
g[f"{namespace_name}|{k}"] = v
g.write(make_backup=False)
if __name__ == "__main__":
app = QApplication([])
app.title = 'Guano MD Editor'
widget = SingleGuanoEditor()
widget.setWindowTitle(app.title)
widget.show()
sys.exit(app.exec_())
|
# -*- coding: utf-8 -*-
from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
# import numpy.linalg as la
from procedural_city_generation.additional_stuff.Singleton import Singleton
from procedural_city_generation.building_generation.building_tools import *
from procedural_city_generation.building_generation.cuts import *
from procedural_city_generation.building_generation.Polygon3D import Polygon3D
singleton = Singleton("building_generation")
def roof(walls, roofwalls, currentheight, housebool, texture, texture2=None):
"""Builds a roof on top of a house, depending on housetype
Parameters
----------
walls : procedural_city_generation.building_generation.Walls object
Walls object with cuts
roofwalls : procedural_city_generation.building_generation.Walls object
Walls object prior to cuts
currentheight : float
Current height, Z coordinate of the base of the roof-Polygon
housebool : boolean
Decides if the building is a house or not.
texture : procedural_citY_generation.building_generation.Texture object
Texture of the roof
texture2 (optional) : procedural_citY_generation.building_generation.Texture object
Texture of other elements (such as a box in boxroof) on the roof.
If not specified, will default to texture
Returns
-------
procedural_city_generation.building_generation.Polygon3D object
"""
roofheight = np.random.uniform(
singleton.roofheight_min, singleton.roofheight_max)
if roofwalls.l == 4 and housebool:
return houseroof(roofwalls, currentheight, roofheight, texture)
else:
return kastenroof(walls, roofwalls, currentheight, roofheight, texture, texture2)
def houseroof(walls, currentheight, roofheight, texture):
"""Creates a "classic" roof with two triangles and two rectangles.
Used only for houses and assumes that the house has 4 sides.
Parameters
-----------
walls : procedural_city_generation.building_generation.Walls object
currentheight : float
Current height, Z coordinate of the base of the roof
roofheight : float
Height of the roof itself
texture : procedural_city_generation.building_generation.Texture object
Returns
-------
list<procedural_city_generation.building_generation.Polygon3D object>
"""
# Differentiation: the shorter of the first two walls is to be cut in half
if not np.linalg.norm(np.diff(walls.getWalls()[0], axis=0)) < np.linalg.norm(np.diff(walls.getWalls()[1], axis=0)):
walls = Walls(np.roll(walls.vertices, 1, axis=0), walls.l)
h_low = np.array([0, 0, currentheight])
h_high = h_low+np.array([0, 0, roofheight])
# The gable coordinates
c1, c2 = sum(walls.getWalls()[0]/2), sum(walls.getWalls()[2]/2)
# Verts are the vertices of the wall and the vertices of the gable
verts = [x+h_low for x in walls.vertices]+[c1+h_high, c2+h_high]
# Faces are two rectangles and two triangles
faces = [(0, 1, 5, 4), (3, 2, 5, 4), (0, 3, 4), (1, 2, 5)]
return [Polygon3D(verts, faces, texture)]
def kastenroof(walls, roofwalls, currentheight, roofheight, texture, texture2=None):
"""
Creates a flat roof with a box on top.
Parameters
-----------
walls : procedural_city_generation.building_generation.Walls object
Walls object after cuts
roofwalls : procedural_city_generation.building_generation.Walls object
Walls object prior to cuts
currentheight : float
Current height, Z coordinate of the base of the roof
roofheight : float
Height of the roof itself
texture : procedural_city_generation.building_generation.Texture object
texture2 (optional): procedural_city_generation.building_generation.Texture object
Will default to texture if not specified
Returns
-----------
- list<procedural_city_generation.building_generation.Polygon3D object>
"""
# Texture2 is optional: if not given it will be texture1
texture2 = texture2 if texture2 else texture
# TODO: Move numeric values to conf.
# Box is a scaled down version of the roofwalls
box = scaletransform(roofwalls, random.uniform(0.07, 0.14))
if not roofwalls.l == 4:
# Constructs a box with 4 sides if the box did not have 4 sides
a, b = box.vertices[0], box.vertices[1]
n = (b-a)
n = np.array([-n[1], n[0], 0])
box = Walls(np.array([a, b, b+n, a+n]), 4)
# Checks if every vertex of the box is "inside" the roof polygon so that the box does not float.
# If this is not the case for every vertex, then just a flat roof is built
for vert in box.vertices:
if not p_in_poly(walls, vert):
return [Polygon3D(walls.vertices+np.array([0, 0, currentheight]), [range(walls.l)], texture)]
# List of the walls and the top of the box and the flat roof
return [buildwalls(box, currentheight, currentheight+roofheight, texture2),
Polygon3D(
box.vertices+np.array([0, 0, currentheight+roofheight]), [range(4)], texture2),
Polygon3D(walls.vertices+np.array([0, 0, currentheight]), [range(walls.l)], texture)]
def isleft(wall, point):
"""Helper function for p_in_poly
Taken from: http://geomalgorithms.com/a03-_inclusion.html, all credits to Dan Sunday.
Paramaters
----------
wall : numpy-array with shape 3, 2
point : numpy-array with shape 3, 1
Returns
-------
float
"""
return ((wall[1][0]-wall[0][0])*(point[1]-wall[0][1]) - (point[0]-wall[0][0]) * (wall[1][1]-wall[0][1]))
def p_in_poly(walls, point):
"""
Returns True if a point is in a "walls" polygon, eles False
Taken from: http://geomalgorithms.com/a03-_inclusion.html, all credits to Dan Sunday.
Parameters
----------
walls : procedural_city_generation.building_generation.walls object
point : np.ndarray with shape (3, )
Returns
----------
boolean
"""
counter = 0
for wall in walls.getWalls():
if wall[0][1] <= point[1]:
if wall[1][1] > point[1]:
if isleft(wall, point) > 0:
counter += 1
else:
if isleft(wall, point) < 0:
counter -= 1
if counter != 0:
return True
return False
|
<reponame>dane-king/intake
from django.test import TestCase
from django.conf import settings
from django.urls import reverse
from intake.permissions import get_all_followup_permissions
from user_accounts.tests.factories import UserProfileFactory, UserFactory
from intake.tests.factories import FormSubmissionWithOrgsFactory
class SearchViewTestCase(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
# users & subs for two orgs
# combo sub
# a staff user with followup permissions
this_profile = UserProfileFactory()
other_profile = UserProfileFactory()
cls.org_user = this_profile.user
cls.staff_user = UserFactory(is_staff=True)
UserProfileFactory(user=cls.staff_user)
cls.staff_user.user_permissions.add(*get_all_followup_permissions())
answers = dict(
first_name='<NAME>', last_name='Borges',
email='<EMAIL>', phone_number='4152124848')
cls.these_subs = [
FormSubmissionWithOrgsFactory(
organizations=[this_profile.organization],
answers=answers) for i in (1, 2)]
cls.other_subs = [
FormSubmissionWithOrgsFactory(
organizations=[other_profile.organization],
answers=answers) for i in (1, 2)]
cls.combo_sub = FormSubmissionWithOrgsFactory(
organizations=[
this_profile.organization,
other_profile.organization],
answers=answers)
def assertContainsTheseSubs(self, response):
for sub in self.these_subs:
self.assertContains(response, sub.get_absolute_url())
def assertContainsOtherSubs(self, response):
for sub in self.other_subs:
self.assertContains(response, sub.get_absolute_url())
def assertNotContainsOtherSubs(self, response):
for sub in self.other_subs:
self.assertNotContains(response, sub.get_absolute_url())
def assertContainsComboSub(self, response):
self.assertContains(response, self.combo_sub.get_absolute_url())
def login(self, user):
self.client.login(
username=user.username,
password=<PASSWORD>.<PASSWORD>)
def login_as_org_user(self):
self.login(self.org_user)
def login_as_staff_user(self):
self.login(self.staff_user)
class TestApplicantAutocomplete(SearchViewTestCase):
view_name = 'applications-autocomplete'
def test_anonymous_users_get_403(self):
self.client.logout()
response = self.client.post(reverse(self.view_name), {'q': 'anything'})
self.assertEqual(response.status_code, 403)
def test_authenticated_org_users_receive_application_jsons(self):
self.login_as_org_user()
response = self.client.post(reverse(self.view_name), {'q': 'Luis'})
self.assertEqual(response.status_code, 200)
self.assertContainsTheseSubs(response)
self.assertContainsComboSub(response)
self.assertNotContainsOtherSubs(response)
def test_get_method_not_allowed(self):
self.login_as_org_user()
response = self.client.get(reverse(self.view_name), {'q': 'Luis'})
# should return "method not allowed"
self.assertEqual(response.status_code, 405)
def test_staff_user_gets_200(self):
self.login_as_staff_user()
response = self.client.post(reverse(self.view_name), {'q': 'Luis'})
self.assertEqual(response.status_code, 200)
def test_can_find_app_with_phone_number(self):
self.login_as_org_user()
response = self.client.post(
reverse(self.view_name), {'q': '2124848'})
self.assertEqual(response.status_code, 200)
self.assertContainsTheseSubs(response)
self.assertContainsComboSub(response)
self.assertNotContainsOtherSubs(response)
def test_can_find_app_with_email(self):
self.login_as_org_user()
response = self.client.post(
reverse(self.view_name), {'q': 'george@fictions'})
self.assertEqual(response.status_code, 200)
self.assertContainsTheseSubs(response)
self.assertContainsComboSub(response)
self.assertNotContainsOtherSubs(response)
class TestFollowupsAutocomplete(SearchViewTestCase):
view_name = 'followups-autocomplete'
def test_anonymous_users_get_403(self):
self.client.logout()
response = self.client.post(reverse(self.view_name), {'q': 'Luis'})
self.assertEqual(response.status_code, 403)
def test_org_users_get_403(self):
self.login_as_org_user()
response = self.client.post(reverse(self.view_name), {'q': 'Luis'})
self.assertEqual(response.status_code, 403)
def test_staff_users_receive_html(self):
self.login_as_staff_user()
response = self.client.post(reverse(self.view_name), {'q': 'Luis'})
self.assertEqual(response.status_code, 200)
self.assertContainsTheseSubs(response)
self.assertContainsComboSub(response)
self.assertContainsOtherSubs(response)
def test_get_method_not_allowed(self):
self.login_as_staff_user()
response = self.client.get(reverse(self.view_name), {'q': 'Luis'})
# should return "method not allowed"
self.assertEqual(response.status_code, 405)
def test_can_find_app_with_phone_number(self):
self.login_as_staff_user()
response = self.client.post(
reverse(self.view_name), {'q': '2124848'})
self.assertEqual(response.status_code, 200)
self.assertContainsTheseSubs(response)
self.assertContainsComboSub(response)
self.assertContainsOtherSubs(response)
def test_can_find_app_with_email(self):
self.login_as_staff_user()
response = self.client.post(
reverse(self.view_name), {'q': 'george@fictions'})
self.assertEqual(response.status_code, 200)
self.assertContainsTheseSubs(response)
self.assertContainsComboSub(response)
self.assertContainsOtherSubs(response)
|
<filename>test/test_view_individual_module.py<gh_stars>1-10
'''
test_view_individual_module.py tests the app's view individual mod page
'''
from paste.fixture import TestApp
from nose.tools import assert_equal, raises
from app import APP
from components import session
class TestCode(object):
'''
This class runs the test cases to test app's view individual mod page
'''
URL_CONTAIN_CODE_AY_QUOTA = '/individualModuleInfo?' +\
'code=BT5110' +\
'&aysem=AY+16%2F17+Sem+1' +\
'"a=60'
URL_CONTAIN_FUTURE_AY = '/individualModuleInfo?' +\
'code=BT5110' +\
'&aysem=AY+17%2F18+Sem+1' +\
'"a=60'
URL_CONTAIN_INVALID_CODE_AY_QUOTA = '/individualModuleInfo?' +\
'code=CS0123' +\
'&aysem=AY+16%2F17+Sem+1' +\
'"a=60'
URL_CONTAIN_CODE_INVALID_AY_QUOTA = '/individualModuleInfo?' +\
'code=BT5110' +\
'&aysem=AY+16%2F18+Sem+1' +\
'"a=60'
URL_CONTAIN_CODE_AY_INVALID_QUOTA = '/individualModuleInfo?' +\
'code=BT5110' +\
'&aysem=AY+16%2F17+Sem+1' +\
'"a=70'
URL_CONTAIN_CODE_AY_NO_QUOTA = '/individualModuleInfo' +\
'?code=CP3880' +\
'&aysem=AY+16%2F17+Sem+1'+\
'"a='
FORM_EDIT_MODULE_INFO = '<form id="edit-module-button" name="edit-module-button" '+\
'action="/editModule" method="get" class="no-padding-margin">'
FORM_EDIT_MODULE_INFO_BUTTON = '<input class="dropdown-btn-custom" '+\
'type="submit" value="Edit General'+\
' Module Info" data-toggle="tooltip" '+\
'data-placement="right" title="Edit '+\
'the module\'s name, description, MC, '+\
'pre-requisites and preclusions">'
FORM_EDIT_SPECIFIC_MODULE_INFO = '<form id="edit-mounting-button"'+\
' name="edit-mounting-button" '+\
'action="/editMounting" method="get" '+\
'class="no-padding-margin">'
FORM_EDIT_SPECIFIC_MODULE_INFO_BUTTON = '<button type="button" id="edit-specific-info" ' +\
'class="dropdown-btn-custom no-padding-margin" ' +\
'data-toggle="tooltip" data-placement="right" ' +\
'title="Edit the module\'s mounting and quota">' +\
'Edit Specific Module Info</button>'
FORM_STUDENTS_AFFECTED = '<form id="view-students-planning-to-take-module" '+\
'name="view-students-planning-to-take-module"'+\
' action="/studentsAffectedByModule" '+\
'method="get" class="no-padding-margin">'
FORM_STUDENTS_AFFECTED_BUTTON = '<button type="button" class="dropdown-btn-custom" '+\
'data-toggle="tooltip" data-placement="right" '+\
'title="Show list of students who have taken, are currently ' +\
'taking, or are planning to take this module">View Students ' +\
'Taking This Module</button>'
FORM_OVERLAPPING_WITH_MODULE = '<form id="view-overlapping-with-module" '+\
'name="view-overlapping-with-module"'+\
' action="/overlappingWithModule" method="get" '+\
'class="no-padding-margin">'
FORM_OVERLAPPING_WITH_MODULE_BUTTON = '<button type="button" class="dropdown-btn-custom" '+\
'data-toggle="tooltip" data-placement="right" '+\
'title="Show modules that are also taken with this ' +\
'module">View Modules Overlapping With This Module' +\
'</button>'
CONTENT_SUMMARY = '<h1 class="text-center"><b>Module Info for <u>AY 16/17 ' +\
'Sem 1</u></b></h1>'
CONTENT_SUMMARY_FUTURE_AY = '<h1 class="text-center"><b>Module Info for ' +\
'<u>AY 17/18 Sem 1</u></b></h1>'
CONTENT_CODE = "BT5110"
CONTENT_NAME = "Data Management and Warehousing"
CONTENT_MC = "(4 MCs)"
CONTENT_BUTTON_TO_OVERVIEW_DATA = '<input type="hidden" name="code" ' +\
'value="BT5110">'
CONTENT_BUTTON_TO_OVERVIEW_BUTTON = '<input class="btn btn-lg btn-primary"'+\
' type="submit" value="Back to Overview">'
CONTENT_DESCRIPTION = "Module Description:"
CONTENT_PRECLUSION = "Module Preclusions:"
CONTENT_PREREQUISITE = "Module Prerequisites"
CONTENT_QUOTA = "Class Quota"
CONTENT_QUOTA_ACTUAL = "60"
CONTENT_FUTURE_QUOTA = "-"
CONTENT_CLASS_QUOTA_BLANK = "?"
DROPDOWN_BTN = '<button type="button" class="btn btn-primary btn-lg'+\
' dropdown-toggle dropdown-btn-custom-main" data-toggle="dropdown"'+\
' aria-haspopup="true" aria-expanded="false">More Actions <span '+\
'class="caret"></span></button>'
def __init__(self):
self.middleware = None
self.test_app = None
def setUp(self):
'''
Sets up the 'app.py' fixture
'''
self.middleware = []
self.test_app = TestApp(APP.wsgifunc(*self.middleware))
session.set_up(self.test_app)
def tearDown(self):
'''
Tears down 'app.py' fixture and logs out
'''
session.tear_down(self.test_app)
def test_view_individual_module_valid_response(self):
'''
Tests whether user can access page for showing module overview
if target module is valid.
'''
root = self.test_app.get(self.URL_CONTAIN_CODE_AY_QUOTA)
# checks if HTTP response code is 200 (= OK)
assert_equal(root.status, 200)
@raises(Exception)
def test_view_individual_module_invalid_code_response(self):
'''
Tests if user will fail to access page for showing module overview
if target module is invalid.
'''
root = self.test_app.get(self.URL_CONTAIN_INVALID_CODE_AY_QUOTA)
'''
Tests if user will fail to access page for showing module overview
if the target AY-semester is invalid.
'''
@raises(Exception)
def test_view_individual_module_invalid_ay_sem_response(self):
'''
Tests if user will fail to access page for showing module overview
if the target AY-semester is invalid.
'''
# AY-Semester used here is '16/18 Sem 1'
root = self.test_app.get(self.URL_CONTAIN_CODE_INVALID_AY_QUOTA)
def test_view_individual_module_contents(self):
'''
Tests if all the necessary info is displayed in the individual
module view page.
'''
root = self.test_app.get(self.URL_CONTAIN_CODE_AY_QUOTA)
root.mustcontain(self.CONTENT_SUMMARY)
root.mustcontain(self.CONTENT_CODE)
root.mustcontain(self.CONTENT_NAME)
root.mustcontain(self.CONTENT_MC)
root.mustcontain(self.CONTENT_BUTTON_TO_OVERVIEW_DATA)
root.mustcontain(self.CONTENT_BUTTON_TO_OVERVIEW_BUTTON)
root.mustcontain(self.CONTENT_DESCRIPTION)
root.mustcontain(self.CONTENT_QUOTA)
root.mustcontain(self.CONTENT_QUOTA_ACTUAL)
root.mustcontain(self.FORM_EDIT_MODULE_INFO)
root.mustcontain(self.FORM_EDIT_MODULE_INFO_BUTTON)
root.mustcontain(self.FORM_STUDENTS_AFFECTED)
root.mustcontain(self.FORM_STUDENTS_AFFECTED_BUTTON)
root.mustcontain(self.FORM_OVERLAPPING_WITH_MODULE)
root.mustcontain(self.FORM_OVERLAPPING_WITH_MODULE_BUTTON)
root.mustcontain(self.DROPDOWN_BTN)
def test_view_individual_module_contents_with_future_ay(self):
'''
Tests if all the necessary info is displayed in the individual
module view page with future AY and Semester.
'''
root = self.test_app.get(self.URL_CONTAIN_FUTURE_AY)
root.mustcontain(self.CONTENT_SUMMARY_FUTURE_AY)
root.mustcontain(self.CONTENT_CODE)
root.mustcontain(self.CONTENT_NAME)
root.mustcontain(self.CONTENT_MC)
root.mustcontain(self.CONTENT_BUTTON_TO_OVERVIEW_DATA)
root.mustcontain(self.CONTENT_BUTTON_TO_OVERVIEW_BUTTON)
root.mustcontain(self.CONTENT_DESCRIPTION)
root.mustcontain(self.CONTENT_QUOTA)
root.mustcontain(self.CONTENT_FUTURE_QUOTA)
root.mustcontain(self.FORM_EDIT_MODULE_INFO)
root.mustcontain(self.FORM_EDIT_MODULE_INFO_BUTTON)
root.mustcontain(self.FORM_EDIT_SPECIFIC_MODULE_INFO)
root.mustcontain(self.FORM_EDIT_SPECIFIC_MODULE_INFO_BUTTON)
root.mustcontain(self.FORM_STUDENTS_AFFECTED)
root.mustcontain(self.FORM_STUDENTS_AFFECTED_BUTTON)
root.mustcontain(self.FORM_OVERLAPPING_WITH_MODULE)
root.mustcontain(self.FORM_OVERLAPPING_WITH_MODULE_BUTTON)
def test_view_individual_module_no_quota_valid_response(self):
'''
Tests the contents when there is no quota specified
'''
root = self.test_app.get(self.URL_CONTAIN_CODE_AY_NO_QUOTA)
root.mustcontain(self.CONTENT_CLASS_QUOTA_BLANK)
def test_goto_edit_general_info(self):
'''
Tests if user can access the 'Edit General Module Info' option
'''
root = self.test_app.get(self.URL_CONTAIN_CODE_AY_QUOTA)
edit_form = root.forms__get()["edit-module-button"]
response = edit_form.submit()
assert_equal(response.status, 200)
|
import argparse
import logging
import os
import sys
from configparser import ConfigParser
from functools import wraps
import boto3
from flask import (
Flask,
request,
session,
jsonify,
render_template
)
from flask_sqlalchemy import SQLAlchemy
from osisoft_pi2aws_root import PROJECT_DIR
from scheduling_manager.scheduling_manager import SchedulingManager, NoSuchRuleException
from sqlalchemy import create_engine
from webapp_management_console.json_encoder import CustomJSONEncoder
from workers.managed_feeds.managed_feeds_manager import ManagedFeedsManager
from webapp_management_console.app_exceptions import BackendException, raise_backend_exception
REACT_FOLDER = os.path.join(PROJECT_DIR, 'webapp_management_console/app/build')
app = Flask(
__name__,
template_folder=REACT_FOLDER,
static_folder=os.path.join(REACT_FOLDER, 'static')
)
app.logger.setLevel(logging.ERROR)
log = logging.getLogger()
@app.errorhandler(BackendException)
def backend_exception_errorhandler(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
def login_required(fun):
@wraps(fun)
def wrapper(*args, **kwargs):
if session.get('logged_in') is None:
raise BackendException("Not logged in", status_code=400)
return fun(*args, **kwargs)
return wrapper
@app.route('/isloggedin', methods=['POST'])
def is_logged_in():
logged_in = False
if session.get('logged_in'):
logged_in = True
return jsonify({'loggedIn': logged_in})
@app.route('/login', methods=['POST'])
@raise_backend_exception('Unable to log in')
def login():
if request.json['password'] == app.config['webapp_password'] \
and request.json['username'] == app.config['webapp_username']:
session['logged_in'] = True
session['username'] = request.json['username']
return is_logged_in()
@app.route('/logout', methods=['POST'])
def logout():
session['logged_in'] = None
return is_logged_in()
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
@raise_backend_exception('Unable to load page')
def any_root_path(path):
return render_template('index.html')
@app.route('/favicon/<path>')
def favicon(path):
return app.send_static_file('favicon/{}'.format(path))
def _save_settings(settings):
feed_manager = _create_managed_feeds_manager(app.config)
feed_manager.save_settings(settings)
_load_settings()
@app.route('/settings/save', methods=['POST'])
@raise_backend_exception('Unable to save settings')
@login_required
def save_settings():
_save_settings(request.json['settings'])
return 'ok'
@app.route('/settings', methods=['GET'])
@raise_backend_exception('Unable to load settings')
@login_required
def load_settings():
settings = _get_settings()
# overwrite afDbName in case it is not present in database
settings['afDbName'] = app.config['af_structure_database']
return jsonify(settings)
@app.route('/structure/asset-children', methods=['POST'])
@raise_backend_exception('Unable to load assets')
@login_required
def get_asset_children():
feed_manager = _create_managed_feeds_manager(app.config)
data = feed_manager.get_asset_children(request.json['parentAssetId'])
return jsonify(data)
@app.route('/structure/search', methods=['POST'])
@raise_backend_exception('Unable to search assets')
@login_required
def search_assets():
feed_manager = _create_managed_feeds_manager(app.config)
data = feed_manager.search_assets(
filters=request.json['filters'],
page=request.json.get('page', 0),
page_size=request.json.get('pageSize', 5)
)
return jsonify(data)
@app.route('/structure/asset-attributes', methods=['POST'])
@raise_backend_exception('Unable to search assets')
@login_required
def asset_attributes():
feed_manager = _create_managed_feeds_manager(app.config)
data = feed_manager.search_asset_attributes(
request.json['assetId'], request.json['filters'])
return jsonify(data)
def _search_feeds(feed_manager, request_data):
feeds = feed_manager.search_pi_points(
filters=request_data.get('filters'),
pattern=request_data.get('searchPattern'),
pi_points=None,
status=request_data.get('searchForStatus')
)['pi_points']
return [feed['pi_point'] for feed in feeds]
@app.route('/backfill', methods=['POST'])
@raise_backend_exception('Cannot send backfill request')
@login_required
def backfill():
feed_manager = _create_managed_feeds_manager(app.config)
if request.json.get('onlySearchedFeeds', False):
points = _search_feeds(feed_manager, request.get_json())
else:
points = request.json['feeds']
query_syntax = request.json.get('syntax', False)
feed_manager.send_backfill_request(
query_syntax=query_syntax,
query=request.json.get('query'),
feeds=points,
request_to=request.json.get('to'),
request_from=request.json.get('from'),
name=request.json.get('name')
)
return 'OK'
@app.route('/interpolate', methods=['POST'])
@raise_backend_exception('Cannot send interpolate request')
@login_required
def interpolate():
feed_manager = _create_managed_feeds_manager(app.config)
query_syntax = request.json.get('syntax', False)
if request.json.get('onlySearchedFeeds', False):
points = _search_feeds(feed_manager, request.get_json())
else:
points = request.json['feeds']
feed_manager.send_interpolate_request(
query_syntax=query_syntax,
feeds=points,
interval=request.json['interval'],
interval_unit=request.json['intervalUnit'],
query=request.json.get('query'),
request_from=request.json.get('from'),
request_to=request.json.get('to'),
name=request.json.get('name')
)
return 'OK'
@app.route('/subscribe', methods=['POST'])
@raise_backend_exception('Cannot send subscribe request')
@login_required
def subscribe():
feed_manager = _create_managed_feeds_manager(app.config)
if request.json.get('onlySearchedFeeds', False):
points = _search_feeds(feed_manager, request.get_json())
else:
points = request.json['feeds']
feed_manager.send_subscribe_request(points)
return 'OK'
@app.route('/subscribe/<limit>', methods=['GET'])
@raise_backend_exception('Cannot send subscribe request')
@login_required
def subscribe_with_limit(limit):
feed_manager = _create_managed_feeds_manager(app.config)
points = feed_manager.managed_feeds_dao.get_pi_points(limit)
points = [point['pi_point'] for point in points]
feed_manager.send_subscribe_request(points)
return jsonify(points)
@app.route('/unsubscribe', methods=['POST'])
@raise_backend_exception('Cannot send unsubscribe request')
@login_required
def unsubscribe():
feed_manager = _create_managed_feeds_manager(app.config)
if request.json.get('onlySearchedFeeds', False):
points = _search_feeds(feed_manager, request.get_json())
else:
points = request.json['feeds']
feed_manager = _create_managed_feeds_manager(app.config)
feed_manager.send_unsubscribe_request(points)
return 'OK'
@app.route('/structure/sync', methods=['POST'])
@raise_backend_exception('Cannot send structure sync request')
@login_required
def request_af_structure_sync():
feed_manager = _create_managed_feeds_manager(app.config)
s3_bucket = app.config['curated_datasets_bucket_name']
database = app.config['af_structure_database']
feed_manager.send_sync_af_request(s3_bucket, database)
return "OK"
@app.route('/feeds/search', methods=['POST'])
@raise_backend_exception('Cannot search PI Points list')
@login_required
def search_feeds():
feed_manager = _create_managed_feeds_manager(app.config)
data = request.get_json()
page = int(data['page']) if 'page' in data else None
page_size = int(data['page_size']) if 'page_size' in data else None
feeds = feed_manager.search_pi_points(
pattern=data.get('query'),
pi_points=data.get('pi_points'),
status=data.get('status'),
use_regex=data.get('useRegex'),
page=page,
page_size=page_size
)
return jsonify(feeds)
@app.route('/feeds/sync', methods=['POST'])
@raise_backend_exception('Cannot send PiPoints sync request')
@login_required
def sync_feeds():
feed_manager = _create_managed_feeds_manager(app.config)
feed_manager.send_sync_pi_points_request(
s3_bucket=app.config['curated_datasets_bucket_name'])
return "OK"
@app.route('/events/get-recent', methods=['POST'])
@raise_backend_exception('Cannot get recent events')
@login_required
def get_recent_events():
limit = int(request.json['limit'])
feed_manager = _create_managed_feeds_manager(app.config)
events = feed_manager.get_recent_events(limit)
return jsonify(events)
def _format_cron_expression(cron_expr):
return 'cron({})'.format(cron_expr)
@app.route('/athena-info')
@raise_backend_exception('Cannot get athena table name')
@login_required
def get_athena_info():
athena_database = app.config['athena_database_name']
athena_numeric_table_name = app.config['athena_numeric_table_name']
athena_text_table_name = app.config['athena_text_table_name']
athena_url = "https://{}.console.aws.amazon.com/athena/home?region={}".format(
app.config['region'],
app.config['region']
)
return jsonify({
'athena_url': athena_url,
'athena_database': athena_database,
'athena_numeric_table_name': athena_numeric_table_name,
'athena_text_table_name': athena_text_table_name
})
@app.route('/scheduler/structure', methods=['POST'])
@raise_backend_exception('Cannot schedule structure sync')
@login_required
def schedule_structure_sync():
scheduling_manager = _create_scheduling_manager()
arguments = {
'cron_expr': _format_cron_expression(request.json['cron']),
'af_struct_manager_payload': {
's3_bucket': app.config['curated_datasets_bucket_name'],
'database': app.config['af_structure_database']
},
'rule_name': app.config['SYNC_AF_STRUCTURE_EVENT_NAME']
}
scheduling_manager.create_af_sync_schedule(**arguments)
return 'ok'
@app.route('/scheduler/feeds', methods=['POST'])
@raise_backend_exception('Cannot schedule Feeds List sync')
@login_required
def schedule_feeds_sync():
scheduling_manager = _create_scheduling_manager()
arguments = {
'cron_expr': _format_cron_expression(request.json['cron']),
'pi_points_manager_payload': {
's3_bucket': app.config['curated_datasets_bucket_name']
},
'rule_name': app.config['SYNC_PI_POINTS_EVENT_NAME']
}
scheduling_manager.create_pi_points_sync_schedule(**arguments)
return 'ok'
@app.route('/scheduler/rules', methods=['GET'])
@login_required
def get_scheduler_rule():
return jsonify({
'structure': scheduler_rule(app.config['SYNC_AF_STRUCTURE_EVENT_NAME']),
'feeds': scheduler_rule(app.config['SYNC_PI_POINTS_EVENT_NAME'])
})
def scheduler_rule(rule_name):
scheduling_manager = _create_scheduling_manager()
try:
rule = scheduling_manager.get_rule_parameters_by_rule_name(
rule_name, fetch_feed=False)
except NoSuchRuleException:
return {
'ruleName': rule_name,
'cron': 'unknown'
}
rule = {
'ruleName': rule_name,
'query': rule.get('query'),
'dbName': rule.get('database'),
'cron': rule['schedule_expression'].replace('cron(', '').replace(')', ''),
'interval': rule.get('interval'),
'intervalUnit': rule.get('interval_unit')
}
return {k: v for k, v in rule.items() if v is not None}
def _create_managed_feeds_manager(config):
return ManagedFeedsManager.create_manager(
aws_region=config['region'],
postgres_session=database.session,
incoming_queue_name=config['incoming_queue_name']
)
def _create_scheduling_manager():
lambda_arns = {
'AF_SYNC_LAMBDA_ARN': app.config['af_sync_lambda_arn'],
'PI_POINTS_SYNC_LAMBDA_KEY': app.config['pi_points_sync_lambda_arn']
}
boto_session = boto3.session.Session()
return SchedulingManager(
boto_session.client('events', region_name=app.config['region']),
lambda_arns,
s3_resource=boto_session.resource('s3'),
s3_rule_bucket=app.config['curated_datasets_bucket_name'],
s3_rule_bucket_key_prefix=app.config['s3_rule_bucket_key_prefix']
)
def _read_config(config_path):
parser = ConfigParser()
parser.read(config_path)
config = {}
for section in parser.sections():
for (config_key, config_value) in parser.items(section):
config[config_key] = config_value
return config
def _parse_command_line_args():
parser = argparse.ArgumentParser(description='Webapp Management Console')
parser.add_argument('--config', required=True, help='Configuration')
return parser.parse_args()
def _get_settings():
feed_manager = _create_managed_feeds_manager(app.config)
return feed_manager.get_settings()
def _load_settings():
feed_manager = _create_managed_feeds_manager(app.config)
settings = feed_manager.get_settings()
if 'afDbName' in settings.keys():
app.config['af_structure_database'] = settings['afDbName']
if __name__ == "__main__":
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
app.secret_key = os.urandom(47)
args = _parse_command_line_args()
config = _read_config(args.config)
app.config.update(config)
app.config['SQLALCHEMY_DATABASE_URI'] = config['postgres_uri']
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SYNC_PI_POINTS_EVENT_NAME'] = "{}-sync-pi-points-{}".format(
config['region'],
config['account_id']
)
app.config['SYNC_AF_STRUCTURE_EVENT_NAME'] = "{}-sync-af-structure-{}".format(
config['region'],
config['account_id']
)
engine = create_engine(config['postgres_uri'], use_batch_mode=True)
database = SQLAlchemy(app, session_options={'bind': engine})
app.json_encoder = CustomJSONEncoder
_load_settings()
app.run(host='0.0.0.0', port=int(
config['port']), threaded=True)
|
<gh_stars>1-10
from pathlib import Path
from typing import Union, List, Optional, Tuple
from functools import partial, reduce
from multiprocessing import Pool, cpu_count
import json
from .example import Example
from .tables import Result, Cycle
from .errors import PrecisionError
from .database import session_scope
from .utils import is_passing
from .word_utils import generate_all_reduced_words, get_cycles, convert_to_runnable
def get_polytope(
word: str, output_dir: Path, cycles: bool = False
) -> None:
"""Writes polytope for given word to file in the output drectory"""
if cycles:
words = get_cycles(word)
cycles_output_dir = output_dir / f"{word}_cycles"
for cycle_word in words:
get_polytope(cycle_word, cycles_output_dir)
else:
example = create_example(word)
if example is None:
print(f"Couldn't generate example for word {word}")
return
polytope = example.get_polytope()
output_dir.mkdir(parents=True, exist_ok=True)
output_path = output_dir / f"{word}_polytope.json"
with open(output_path, "w") as output_file:
print(f"Writing polytope for word {word} to {output_path}")
output_file.write(json.dumps(polytope))
def get_polytopes(
words: List[str], output_dir: Path, cycles: bool = False
) -> None:
"""Writes a polytope to file for each word"""
get_polytope_partial = partial(
get_polytope, output_dir=output_dir, cycles=cycles
)
with Pool(cpu_count() - 1) as pool:
pool.map(get_polytope_partial, words)
def create_example(word: str, precision=15) -> Union[None, Example]:
"""Creates and returns example if valid otheriwse returns None"""
runnable_word = convert_to_runnable(word)
example = Example(runnable_word, precision)
try:
example.generate_inequalities()
except PrecisionError:
if precision < 50:
return create_example(runnable_word, precision=precision + 10)
if example.is_valid and example.removed_region:
return example
def solve_example(
word: str, print_result: bool = False, **kwargs
) -> Union[Result, None]:
"""Creates example and returns Result to be saved in database"""
example = create_example(word)
result = None
if example is not None:
example.solve()
result = example.get_result()
if print_result:
if result is not None:
print(result)
else:
print("Example could not be solved")
return result
def solve_examples(
word_size_range: List[int],
cyclic: bool = False,
output_dir: Optional[Path] = None,
sample_size: int = None,
**kwargs,
) -> None:
if cyclic:
raise NotImplementedError("cyclic parameter")
if output_dir is not None:
database_dir = output_dir / "result_databases"
else:
# should create a temp directory
raise NotImplementedError("temp directory")
for word_size in range(*word_size_range):
if not database_dir.exists():
database_dir.mkdir(parents=True)
database_path = database_dir / f"rank_2_length_{word_size}.db"
create_all_cycles(database_path, word_size)
with session_scope(database_path) as session:
with Pool(cpu_count() - 1) as pool:
words = ()
if sample_size is not None:
raise NotImplementedError("Samples aren't implemented")
words = Sample(word_size, sample_size).words
else:
print("Generating all reduced words")
words = generate_all_reduced_words(word_size)
print(f"running examples for word size {word_size}")
example_results = pool.map(solve_example, words)
for example_result in example_results:
if example_result is not None:
session.merge(example_result)
session.commit()
if output_dir is not None:
get_all_cycle_data(input_dir=database_dir, output_dir=output_dir, **kwargs)
def create_all_cycles(database_path: Path, word_size: int, **kwargs) -> None:
"""Initiates all cycles in the database"""
with session_scope(database_path) as session:
words = generate_all_reduced_words(word_size)
for word in words:
word_cycles = get_cycles(word)
cycle_representative = min(word_cycles)
session.merge(Cycle(representative_word=cycle_representative))
session.commit()
def get_cycle_data(database_path: Path, **kwargs) -> Tuple[int, float]:
"""Returns tuple of size and min curvature"""
word_size = int(database_path.stem.split("_")[-1])
with session_scope(database_path) as session:
cycles = session.query(Cycle).all()
passing_examples = sum(map(is_passing, cycles))
percentage_of_passes = passing_examples / len(cycles)
return word_size, percentage_of_passes
def get_all_cycle_data(input_dir: Path, output_dir: Path, **kwargs) -> None:
"""get data for word sequence"""
result_tuples = []
with Pool(cpu_count() - 1) as pool:
databse_paths = input_dir.glob("*.db")
result_tuples = pool.map(get_cycle_data, databse_paths)
if output_dir is None:
print(json.dumps(result_tuples, indent=4))
else:
output_results_path = output_dir / "results_array.json"
with open(output_results_path, "w") as output_path:
output_path.write(json.dumps(result_tuples))
|
<reponame>dyndeploy-test/timestrap
import conf.managers
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.manager
class Migration(migrations.Migration):
initial = True
dependencies = [
('sites', '0002_alter_domain_unique'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Client',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('archive', models.BooleanField(default=False)),
('payment_id', models.CharField(blank=True, max_length=255, null=True)),
('invoice_email', models.EmailField(blank=True, max_length=255, null=True)),
('sites', models.ManyToManyField(to='sites.Site')),
],
options={
'default_permissions': ('view', 'add', 'change', 'delete'),
'ordering': ['-id'],
},
managers=[
('objects', django.db.models.manager.Manager()),
('on_site', conf.managers.CurrentSiteManager()),
],
),
migrations.CreateModel(
name='Entry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(blank=True)),
('duration', models.DurationField(blank=True)),
('note', models.TextField(blank=True, null=True)),
],
options={
'verbose_name_plural': 'Entries',
'ordering': ['-date', '-id'],
'default_permissions': ('view', 'add', 'change', 'delete'),
},
managers=[
('objects', django.db.models.manager.Manager()),
('on_site', conf.managers.CurrentSiteManager()),
],
),
migrations.CreateModel(
name='Invoice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('paid', models.DateTimeField(blank=True, null=True)),
('transaction_id', models.CharField(blank=True, max_length=255, null=True)),
('client', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Client')),
('entries', models.ManyToManyField(related_name='invoices', to='core.Entry')),
('site', models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='sites.Site')),
],
options={
'default_permissions': ('view', 'add', 'change', 'delete'),
},
managers=[
('objects', django.db.models.manager.Manager()),
('on_site', conf.managers.CurrentSiteManager()),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('archive', models.BooleanField(default=False)),
('estimate', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('client', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='projects', to='core.Client')),
],
options={
'default_permissions': ('view', 'add', 'change', 'delete'),
'ordering': ['client', '-id'],
},
),
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('hourly_rate', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('sites', models.ManyToManyField(to='sites.Site')),
],
options={
'default_permissions': ('view', 'add', 'change', 'delete'),
'ordering': ['-id'],
},
managers=[
('objects', django.db.models.manager.Manager()),
('on_site', conf.managers.CurrentSiteManager()),
],
),
migrations.AddField(
model_name='entry',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='entries', to='core.Project'),
),
migrations.AddField(
model_name='entry',
name='site',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='sites.Site'),
),
migrations.AddField(
model_name='entry',
name='task',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='entries', to='core.Task'),
),
migrations.AddField(
model_name='entry',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='entries', to=settings.AUTH_USER_MODEL),
),
]
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
from absl import app as absl_app
import numpy as np
import tensorflow as tf
from models import embed_pool, embed_cnn, cnn_lstm, resnet_cnn, \
embed_lstm, embed_lstm_attention, seq2species
from models.input_pipeline import input_function_train_kmer, input_function_train_one_hot, \
input_function_predict_kmer, input_function_predict_one_hot, \
input_function_train_kmer_pad_to_fixed_len, input_function_predict_kmer_pad_to_fixed_len
from models.define_flags import universal_flags, model_specific_flags_embed_cnn, \
model_specific_flags_embed_lstm, flags_of_mode
from models.format_prediction import prob2npy, top_n_class, paired_report, \
prob2npy_paired, single_report
from utils.logs import hooks_helper
from utils.logs import logger
# import sys
# sys.path.append('models')
def config(model_name, params):
if model_name == 'embed_pool': # Embed + Pool
model = embed_pool.EmbedPool(num_classes=params['num_classes'],
vocab_size=params['vocab_size'],
embedding_dim=params['embedding_dim'],
mlp_dim=params['mlp_dim'],
kmer=params['kmer'],
max_len=params['max_len'])
elif model_name == 'embed_cnn': # Embed + CNN
model = embed_cnn.EmbedCNN(num_classes=params['num_classes'],
vocab_size=params['vocab_size'],
embedding_dim=params['embedding_dim'],
mlp_dim=params['mlp_dim'],
cnn_filter_sizes=list(map(int, params['cnn_filter_sizes'].split(","))),
cnn_num_filters=params['cnn_num_filters'],
kmer=params['kmer'],
max_len=params['max_len'])
elif model_name == 'embed_cnn_no_pool': # deprecated due to lower performance than embed_cnn
model = embed_cnn.EmbedCNNnoPool(num_classes=params['num_classes'],
vocab_size=params['vocab_size'],
embedding_dim=params['embedding_dim'],
mlp_dim=params['mlp_dim'],
cnn_filter_sizes=list(map(int, params['cnn_filter_sizes'].split(","))),
cnn_num_filters=params['cnn_num_filters'],
kmer=params['kmer'],
max_len=params['max_len'])
elif model_name == 'embed_lstm': # Embed + LSTM
model = embed_lstm.EmbedLSTM(num_classes=params['num_classes'],
vocab_size=params['vocab_size'],
embedding_dim=params['embedding_dim'],
mlp_dim=params['mlp_dim'],
lstm_dim=params['lstm_dim'],
pooling_type=params['pooling_type'],
kmer=params['kmer'],
max_len=params['max_len'])
elif model_name == 'cnn_lstm': # CNN + LSTM
model = cnn_lstm.ConvLSTM(num_classes=params['num_classes'],
max_len=params['max_len'])
elif model_name == 'cnn_2lstm': # deprecated due to lower performance than cnn_lstm
model = cnn_lstm.Conv2LSTM(num_classes=params['num_classes'],
max_len=params['max_len'])
elif model_name == 'deep_cnn': # ResNet-like CNN
model = resnet_cnn.DeepCNN(num_classes=params['num_classes'],
max_len=params['max_len'])
elif model_name == 'deep_cnn_13layer': # deprecated due to lower performance than deep_cnn
model = resnet_cnn.DeepCNN13(num_classes=params['num_classes'],
max_len=params['max_len'])
elif model_name == 'deep_cnn_9layer': # deprecated due to lower performance than deep_cnn
model = resnet_cnn.DeepCNN9(num_classes=params['num_classes'],
max_len=params['max_len'])
elif model_name == 'seq2species': # Seq2species
model = seq2species.Seq2species(num_classes=params['num_classes'],
max_len=params['max_len'])
else: # the best performing model, DeepMicrobes, Embed + LSTM + Attention
model = embed_lstm_attention.EmbedAttention(num_classes=params['num_classes'],
vocab_size=params['vocab_size'],
embedding_dim=params['embedding_dim'],
mlp_dim=params['mlp_dim'],
lstm_dim=params['lstm_dim'],
row=params['row'],
da=params['da'],
keep_prob=params['keep_prob'])
return model
def model_fn(features, labels, mode, params):
model = config(flags.FLAGS.model_name, params)
logits = model(features)
# ------- Prediction mode
predictions = {
'classes': tf.argmax(logits, axis=1),
'probabilities': tf.nn.softmax(logits)
}
if mode == tf.estimator.ModeKeys.PREDICT:
# Return the predictions and the specification for serving a SavedModel
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
export_outputs={
'predict': tf.estimator.export.PredictOutput(predictions)
})
loss = tf.losses.sparse_softmax_cross_entropy(
logits=logits, labels=labels)
# Create a tensor named cross_entropy for logging purposes.
tf.identity(loss, name='cross_entropy')
tf.summary.scalar('cross_entropy', loss)
# ------- Training mode
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_or_create_global_step()
learning_rate = tf.train.exponential_decay(params['lr'], global_step, 400000,
params['lr_decay'], staircase=False)
# Create a tensor named learning_rate for logging purposes
tf.identity(learning_rate, name='learning_rate')
tf.summary.scalar('learning_rate', learning_rate)
optimizer = tf.train.AdamOptimizer(learning_rate)
minimize_op = optimizer.minimize(loss, global_step)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
train_op = tf.group(minimize_op, update_ops)
else:
train_op = None
accuracy = tf.metrics.accuracy(labels, predictions['classes'])
metrics = {'accuracy': accuracy}
# Create a tensor named train_accuracy for logging purposes
tf.identity(accuracy[1], name='train_accuracy')
tf.summary.scalar('train_accuracy', accuracy[1])
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=metrics)
def train(flags_obj, model_function, dataset_name):
run_config = tf.estimator.RunConfig(save_checkpoints_steps=100000, keep_checkpoint_max=1000)
classifier = tf.estimator.Estimator(
model_fn=model_function, model_dir=flags_obj.model_dir, config=run_config,
params={
'num_classes': flags_obj.num_classes,
'vocab_size': flags_obj.vocab_size,
'embedding_dim': flags_obj.embedding_dim,
'mlp_dim': flags_obj.mlp_dim,
'kmer': flags_obj.kmer,
'max_len': flags_obj.max_len,
'lr': flags_obj.lr,
'lr_decay': flags_obj.lr_decay,
'cnn_num_filters': flags_obj.cnn_num_filters,
'cnn_filter_sizes': flags_obj.cnn_filter_sizes,
'lstm_dim': flags_obj.lstm_dim,
'pooling_type': flags_obj.pooling_type,
'row': flags_obj.row,
'da': flags_obj.da,
'keep_prob': flags_obj.keep_prob
})
run_params = {
'batch_size': flags_obj.batch_size,
'train_epochs': flags_obj.train_epochs,
}
benchmark_logger = logger.config_benchmark_logger(flags_obj)
benchmark_logger.log_run_info('model', dataset_name, run_params)
train_hooks = hooks_helper.get_train_hooks(
flags_obj.hooks,
batch_size=flags_obj.batch_size)
def input_fn_train():
# ------ Train based on kmers as inputs
if flags_obj.encode_method == 'kmer':
input_fn = input_function_train_kmer(
flags_obj.input_tfrec,
flags_obj.train_epochs, flags_obj.batch_size,
flags_obj.cpus
)
if flags_obj.model_name in ['embed_pool', 'embed_cnn', 'embed_lstm',
'embed_cnn_no_pool']:
input_fn = input_function_train_kmer_pad_to_fixed_len(
flags_obj.input_tfrec,
flags_obj.train_epochs, flags_obj.batch_size,
flags_obj.cpus, flags_obj.max_len, flags_obj.kmer
)
else:
# ------ Train based on one-hot-encoding as inputs
input_fn = input_function_train_one_hot(
flags_obj.input_tfrec,
flags_obj.train_epochs, flags_obj.batch_size,
flags_obj.cpus, flags_obj.max_len
)
return input_fn
classifier.train(input_fn=input_fn_train, hooks=train_hooks)
# --------------------------------------- Evaluation
def evaluate(flags_obj, model_function):
classifier = tf.estimator.Estimator(
model_fn=model_function, model_dir=flags_obj.model_dir,
params={
'num_classes': flags_obj.num_classes,
'vocab_size': flags_obj.vocab_size,
'embedding_dim': flags_obj.embedding_dim,
'mlp_dim': flags_obj.mlp_dim,
'kmer': flags_obj.kmer,
'max_len': flags_obj.max_len,
'lr': flags_obj.lr,
'lr_decay': flags_obj.lr_decay,
'cnn_num_filters': flags_obj.cnn_num_filters,
'cnn_filter_sizes': flags_obj.cnn_filter_sizes,
'lstm_dim': flags_obj.lstm_dim,
'pooling_type': flags_obj.pooling_type,
'row': flags_obj.row,
'da': flags_obj.da,
'keep_prob': flags_obj.keep_prob
})
def input_fn_eval():
if flags_obj.encode_method == 'kmer':
input_fn = input_function_train_kmer(
flags_obj.input_tfrec,
1, flags_obj.batch_size,
flags_obj.cpus
)
if flags_obj.model_name in ['embed_pool', 'embed_cnn', 'embed_lstm',
'embed_cnn_no_pool']:
input_fn = input_function_train_kmer_pad_to_fixed_len(
flags_obj.input_tfrec,
1, flags_obj.batch_size,
flags_obj.cpus, flags_obj.max_len, flags_obj.kmer
)
else:
input_fn = input_function_train_one_hot(
flags_obj.input_tfrec,
1, flags_obj.batch_size,
flags_obj.cpus, flags_obj.max_len
)
return input_fn
classifier.evaluate(input_fn=input_fn_eval)
# --------------------------- Prediction
def predict(flags_obj, model_function):
classifier = tf.estimator.Estimator(
model_fn=model_function, model_dir=flags_obj.model_dir,
params={
'num_classes': flags_obj.num_classes,
'vocab_size': flags_obj.vocab_size,
'embedding_dim': flags_obj.embedding_dim,
'mlp_dim': flags_obj.mlp_dim,
'kmer': flags_obj.kmer,
'max_len': flags_obj.max_len,
'lr': flags_obj.lr,
'lr_decay': flags_obj.lr_decay,
'cnn_num_filters': flags_obj.cnn_num_filters,
'cnn_filter_sizes': flags_obj.cnn_filter_sizes,
'lstm_dim': flags_obj.lstm_dim,
'pooling_type': flags_obj.pooling_type,
'row': flags_obj.row,
'da': flags_obj.da,
'keep_prob': flags_obj.keep_prob
})
def input_fn_predict():
if flags_obj.encode_method == 'kmer':
input_fn = input_function_predict_kmer(
flags_obj.input_tfrec,
flags_obj.batch_size,
flags_obj.cpus
)
if flags_obj.model_name in ['embed_pool', 'embed_cnn', 'embed_lstm',
'embed_cnn_no_pool']:
input_fn = input_function_predict_kmer_pad_to_fixed_len(
flags_obj.input_tfrec,
flags_obj.batch_size,
flags_obj.cpus,
flags_obj.max_len,
flags_obj.kmer
)
else:
input_fn = input_function_predict_one_hot(
flags_obj.input_tfrec,
flags_obj.batch_size,
flags_obj.cpus,
flags_obj.max_len
)
return input_fn
return classifier.predict(input_fn=input_fn_predict, yield_single_examples=False)
def main(_):
if flags.FLAGS.running_mode == 'eval':
evaluate(flags.FLAGS, model_fn)
elif flags.FLAGS.running_mode == 'predict_prob':
predict_out = predict(flags.FLAGS, model_fn)
prob_matrix = prob2npy(
predict_out,
flags.FLAGS.num_classes,
flags.FLAGS.strands_average)
np.save(flags.FLAGS.pred_out, prob_matrix)
elif flags.FLAGS.running_mode == 'predict_top_n':
predict_out = predict(flags.FLAGS, model_fn)
top_n_indexes, top_n_probs = top_n_class(
predict_out,
flags.FLAGS.num_classes,
flags.FLAGS.top_n_class,
flags.FLAGS.strands_average)
np.savetxt(flags.FLAGS.pred_out+'.category.txt', top_n_indexes, fmt='%d', delimiter='\t')
np.savetxt(flags.FLAGS.pred_out+'.prob.txt', top_n_probs, fmt='%.2f', delimiter='\t')
elif flags.FLAGS.running_mode == 'predict_single_class':
predict_out = predict(flags.FLAGS, model_fn)
classes, probs = single_report(predict_out,
flags.FLAGS.num_classes,
flags.FLAGS.label_file,
flags.FLAGS.translate,
flags.FLAGS.strands_average)
np.savetxt(flags.FLAGS.pred_out + '.category_single.txt', classes, fmt='%d', delimiter='\t')
np.savetxt(flags.FLAGS.pred_out + '.prob_single.txt', probs, fmt='%.2f', delimiter='\t')
elif flags.FLAGS.running_mode == 'predict_paired_class':
predict_out = predict(flags.FLAGS, model_fn)
classes, probs = paired_report(predict_out,
flags.FLAGS.num_classes,
flags.FLAGS.label_file,
flags.FLAGS.translate)
np.savetxt(flags.FLAGS.pred_out + '.category_paired.txt', classes, fmt='%d', delimiter='\t')
np.savetxt(flags.FLAGS.pred_out + '.prob_paired.txt', probs, fmt='%.2f', delimiter='\t')
elif flags.FLAGS.running_mode == 'predict_paired_prob':
predict_out = predict(flags.FLAGS, model_fn)
prob_matrix = prob2npy_paired(predict_out,
flags.FLAGS.num_classes)
np.save(flags.FLAGS.pred_out, prob_matrix)
else:
train(flags.FLAGS, model_fn, 'dataset_name')
if __name__ == "__main__":
# if DeepMicrobes.py is executed as the main program then the following codes are executed, otherwise if it is
# called as a module by another program then the following codes are not executed
# More explanations: https://stackoverflow.com/questions/419163/what-does-if-name-main-do
tf.logging.set_verbosity(tf.logging.INFO)
universal_flags()
model_specific_flags_embed_cnn()
model_specific_flags_embed_lstm()
flags_of_mode()
absl_app.run(main)
|
from __future__ import annotations
from os import PathLike
from pathlib import Path
import click
from grapl_tests_common.upload_logs import upload_osquery_logs, upload_sysmon_logs
from graplctl import idempotency_checks
from graplctl.common import State, pass_graplctl_state
from graplctl.upload.lib import upload_analyzer
@click.group()
@click.pass_context
@pass_graplctl_state
def upload(
graplctl_state: State,
ctx: click.Context,
) -> None:
"""commands like "upload analyzer" or "upload sysmon logs" """
# TODO: Disallow any uploads until we've confirmed we've provisioned
# https://github.com/grapl-security/issue-tracker/issues/340
assert idempotency_checks.is_grapl_provisioned(
dynamodb=graplctl_state.dynamodb,
schema_table=graplctl_state.schema_table,
), "You can't upload anything to grapl until it's provisioned."
@upload.command()
@click.option(
"--analyzer_main_py",
type=click.Path(exists=True, file_okay=True, dir_okay=False, resolve_path=True),
required=True,
help="Path to the analyzer's `main.py`",
)
@click.option(
"--analyzers-bucket",
help="Name of the S3 bucket to upload analyzers to",
type=click.STRING,
required=True,
envvar="GRAPL_ANALYZERS_BUCKET",
)
@pass_graplctl_state
def analyzer(
graplctl_state: State, analyzers_bucket: str, analyzer_main_py: PathLike
) -> None:
"""Upload an analyzer to the S3 bucket"""
upload_analyzer(
graplctl_state.s3,
analyzers_bucket=analyzers_bucket,
analyzer_main_py=Path(analyzer_main_py).resolve(),
)
@upload.command()
@click.option(
"--logfile",
type=click.Path(exists=True, file_okay=True, dir_okay=False, resolve_path=True),
required=True,
help="The log file to upload",
)
@click.option(
"--log-bucket",
help="The name of the S3 bucket to which Sysmon logs should be uploaded",
type=click.STRING,
required=True,
envvar="GRAPL_SYSMON_LOG_BUCKET",
)
@click.option(
"--queue-url",
help="The URL of the SQS queue for Sysmon logs",
type=click.STRING,
required=True,
envvar="GRAPL_SYSMON_GENERATOR_QUEUE",
)
@pass_graplctl_state
def sysmon(
graplctl_state: State, logfile: PathLike, log_bucket: str, queue_url: str
) -> None:
"""Upload a Sysmon log file to the S3 bucket"""
upload_sysmon_logs(
s3_client=graplctl_state.s3,
sqs_client=graplctl_state.sqs,
deployment_name=graplctl_state.stack_name,
log_bucket=log_bucket,
queue_url=queue_url,
logfile=Path(logfile).resolve(),
)
@upload.command()
@click.option(
"--logfile",
type=click.Path(exists=True, file_okay=True, dir_okay=False, resolve_path=True),
required=True,
help="The log file to upload",
)
@click.option(
"--log-bucket",
help="The name of the S3 bucket to which OSQuery logs should be uploaded",
type=click.STRING,
required=True,
envvar="GRAPL_OSQUERY_LOG_BUCKET",
)
@click.option(
"--queue-url",
help="The URL of the SQS queue for OSQuery logs",
type=click.STRING,
required=True,
envvar="GRAPL_OSQUERY_GENERATOR_QUEUE",
)
@pass_graplctl_state
def osquery(
graplctl_state: State, logfile: PathLike, log_bucket: str, queue_url: str
) -> None:
"""Upload an OSQuery log file to the S3 bucket"""
upload_osquery_logs(
s3_client=graplctl_state.s3,
sqs_client=graplctl_state.sqs,
deployment_name=graplctl_state.stack_name,
log_bucket=log_bucket,
queue_url=queue_url,
logfile=Path(logfile).resolve(),
)
|
<filename>signal_ocean/historical_tonnage_list/vessel_filter.py
# noqa: D100
from datetime import date
from dataclasses import dataclass, field
from typing import List, Optional, cast
from .vessel_subclass import VesselSubclass
from .._internals import QueryString, format_iso_date
@dataclass(eq=False)
class VesselFilter:
"""Enables vessel filtering in a Historical Tonnage List query.
All attributes in this class are optional, i.e. no filtering will be
performed on attributes whose value is None.
Attributes that accept a list of values are used to perform an *OR*
comparison. In other words, when a non-empty list of values is used,
the Historical Tonnage List will contain vessels that match on **any**
of the specified values. Using an empty list will result in no filtering
at all.
VesselFilter is mutable in order to allow making adjustments to
existing instances if query results are unsatisfactory.
Attributes:
push_types: Return vessels with the specified push types.
Use constants defined in the PushType class for the values of
this attribute.
market_deployments: Return vessels with the specified market
deployment types. Use constants defined in the MarketDeployment
class for the values of this attribute.
commercial_statuses: Return vessels with the specified
commercial statuses. Use constants defined in the CommercialStatus
class for the values of this attribute.
vessel_subclass: Return vessels of the specified subclass.
Use constants defined in the VesselSubclass class for the values
of this attribute.
add_willing_to_switch_subclass: When True, returns vessels
that do not match the subclass but are willing to switch to it.
latest_ais_since: The maximum age, in days, of the vessel's
AIS information at the time the tonnage list was captured.
operational_statuses: Return vessels with the specified
operational statuses. Use constants defined in the
OperationalStatus class for the values of this attribute.
min_liquid_capacity: The minimum liquid capacity, in cubic
meters, the vessel should be able to hold.
max_liquid_capacity: The maximum liquid capacity, in cubic
meters, the vessel should be able to hold.
fixture_types: Return vessels with the specified
fixture types. Use constants defined in the FixtureType class for
the values of this attribute.
last_cargo_types: Return vessels with the specified last
cargo type IDs.
past_port_visits: Return vessels with the specified past
port visits.
open_port_ids: Return vessels with the specified open
port ids.
canakkale_cancelling: Return vessels with the specified
Canakkale cancelling date.
open_date: Return vessels with the specified open date.
ice_classes: Return vessels with the specified ice classes.
min_cranes_ton_capacity: Return vessels with the specified
minimum cranes ton capacity.
max_cranes_ton_capacity: Return vessels with the specified
maximum cranes ton capacity.
min_length_overall: Return vessels with the specified
minimum length overall.
max_length_overall: Return vessels with the specified
maximum length overall.
min_breadth_extreme: Return vessels with the specified
minimum breadth extreme.
max_breadth_extreme: Return vessels with the specified
maximum breadth extreme.
openAreas: Return vessels with the specified open area ids.
openCountries: Return vessels with the specified open
country ids.
"""
push_types: Optional[List[str]] = cast(
List[str], field(default_factory=list)
)
market_deployments: Optional[List[str]] = cast(
List[str], field(default_factory=list)
)
commercial_statuses: Optional[List[str]] = cast(
List[str], field(default_factory=list)
)
vessel_subclass: Optional[str] = VesselSubclass.ALL
add_willing_to_switch_subclass: Optional[bool] = False
latest_ais_since: Optional[int] = None
operational_statuses: Optional[List[str]] = cast(
List[str], field(default_factory=list)
)
min_liquid_capacity: Optional[int] = None
max_liquid_capacity: Optional[int] = None
fixture_types: Optional[List[str]] = cast(
List[str], field(default_factory=list)
)
past_port_visits: Optional[List[int]] = cast(
List[int], field(default_factory=list)
)
open_port_ids: Optional[List[int]] = cast(
List[int], field(default_factory=list)
)
canakkale_cancelling: Optional[date] = None
open_date: Optional[date] = None
ice_classes: Optional[List[str]] = cast(
List[str], field(default_factory=list)
)
min_cranes_ton_capacity: Optional[int] = None
max_cranes_ton_capacity: Optional[int] = None
min_length_overall: Optional[int] = None
max_length_overall: Optional[int] = None
min_breadth_extreme: Optional[int] = None
max_breadth_extreme: Optional[int] = None
open_area_ids: Optional[List[int]] = cast(
List[int], field(default_factory=list)
)
open_country_ids: Optional[List[int]] = cast(
List[int], field(default_factory=list)
)
def _to_query_string(self) -> QueryString:
return {
"pushType": self.push_types,
"commercialStatus": self.commercial_statuses,
"latestAisSince": self.latest_ais_since,
"vesselSubclass": self.vessel_subclass,
"addWillingToSwitchSubclass": self.add_willing_to_switch_subclass,
"marketDeployment": self.market_deployments,
"operationalStatus": self.operational_statuses,
"minLiquidCapacity": self.min_liquid_capacity,
"maxLiquidCapacity": self.max_liquid_capacity,
"fixtureType": self.fixture_types,
"pastPortVisit": self.past_port_visits,
"openPortId": self.open_port_ids,
"canakkaleCancelling": format_iso_date(self.canakkale_cancelling),
"openDate": format_iso_date(self.open_date),
"iceClass": self.ice_classes,
"cranesTonCapacityMin": self.min_cranes_ton_capacity,
"cranesTonCapacityMax": self.max_cranes_ton_capacity,
"lengthOverallMin": self.min_length_overall,
"lengthOverallMax": self.max_length_overall,
"breadthExtremeMin": self.min_breadth_extreme,
"breadthExtremeMax": self.max_breadth_extreme,
"openArea": self.open_area_ids,
"openCountry": self.open_country_ids
}
|
<gh_stars>0
# -*- coding: UTF-8 -*-
"""
Author:wistn
since:2020-05-22
LastEditors:Do not edit
LastEditTime:2020-10-06
Description:
"""
from .org_noear_sited_SdAttributeList import SdAttributeList
from .org_noear_sited_SdNode import SdNode
from .mytool import TextUtils
class DdNode(SdNode):
def s(self):
return self.source
def __init__(self, source):
super().__init__(source)
# 是否支持全部下载(book[1,2,3])
self.donwAll = True
# 是否显示导航能力(用于:section[1,2,3]) 即上一章下一章
self.showNav = True
# 是否显示图片(None:默认;0:不显示;1:显示小图;2:显示大图)
self.showImg = None
# 是否自适应大小(基于pad 或 phone 显示不同的大小)
self.autoSize = False
# 是否显示S按钮
self.showWeb = True
# 屏幕方向(v/h)
self.screen = None
# 首页图片显示的宽高比例
self.WHp = 0
# 是否循环播放
self.loop = False
# 样式风格
self.style = 0
# 预设选项
self.options = None
self._web = None
# @Override
def OnDidInit(self):
self.donwAll = self.attrs.getInt("donwAll", 1) > 0
self.showNav = self.attrs.getInt("showNav", 1) > 0
self.showImg = self.attrs.getString("showImg")
self.autoSize = self.attrs.getInt("autoSize", 0) > 0
self.showWeb = (
self.attrs.getInt("showWeb", 0 if self.s().isPrivate() else 1) > 0
) # isPrivate时,默认不显示;否则默认显示
self.screen = self.attrs.getString("screen")
self.loop = self.attrs.getInt("loop", 0) > 0
self._web = self.attrs.getValue("web") # 控制外部浏览器的打开
if self.source.schema < 2:
self._web.build = self.attrs.getString("buildWeb")
self.options = self.attrs.getString("options")
self.style = self.attrs.getInt("style", DdNode.STYLE_VIDEO)
if TextUtils.isEmpty(self.screen) and self.style == DdNode.STYLE_AUDIO:
self.screen = "v"
w = self.attrs.getString("w")
if TextUtils.isEmpty(w) == False:
h = self.attrs.getString("h")
self.WHp = float(w) / float(h)
# 是否内部WEB运行
def isWebrun(self):
run = self.attrs.getString("run")
if run == None:
return False
return run.find("web") >= 0
# 是否外部WEB运行
def isOutWebrun(self):
run = self.attrs.getString("run")
if run == None:
return False
return run.find("outweb") >= 0
def getWebUrl(self, url):
atts = SdAttributeList()
atts.set("url", url)
return self._web.run(self.source, atts, url)
DdNode.STYLE_VIDEO = 11
DdNode.STYLE_AUDIO = 12
DdNode.STYLE_INWEB = 13
|
from openpyxl import load_workbook
import os
import yaml
from pathlib import Path
import re
import networkx as nx
def import_from_xls(reqmodule, req_xls, wb=None):
if not wb:
wb = load_workbook(req_xls)
if reqmodule.module_prefix in wb.sheetnames:
sheet = wb[reqmodule.module_prefix]
else:
sheet = None
if sheet:
print(reqmodule.module_prefix)
reqmodule.clear_ordered_req_list()
fields = [field.value for field in list(sheet.iter_rows(1, 1))[0]]
req = {}
for row in range(2, sheet.max_row+1):
for col, field in enumerate(fields):
# Don't consider columns with empty field name,
# empty fields
# nor the 'non_stored_fields' columns
if field and field != 'non_stored_fields':
req[field] = sheet.cell(row, col+1).value
if not req[field] or req[field] == 'None':
req[field] = ''
# Check that the requirement is not empty before adding
if [f for f in req.values() if f]:
reqmodule.add_req(req)
for submodule in reqmodule.modules.values():
import_from_xls(submodule, req_xls, wb=wb)
def import_from_markdown(reqmodule, req_md):
req = {}
record_req = False
with open(req_md, 'r') as md_file:
md_line = md_file.readline()
while md_line:
if md_line[0:6] == '<!--- ' and md_line[6:13] == 'gitreq:':
if md_line[13:19] == 'start:':
req = {}
req['Req-Id'] = md_line[19:].replace('-->\n', '')
req['Description'] = ""
record_req = True
elif md_line[13:18] == 'stop:':
req['Description'] = req['Description'].replace(
'[' + req['Req-Id'] + ']', '').rstrip()
reqmodule.add_req(req)
record_req = False
elif record_req:
req['Description'] = req['Description'] + md_line
md_line = md_file.readline()
def check_for_req_links_and_update(reqmodule, string, string_changed):
pattern = reqmodule.get_link_regex()
res = re.search(pattern, string)
if res:
dest_req_id = reqmodule.module_prefix + '_' + res.groups()[2].replace("\"", "")
dest_req = reqmodule.reqs.nodes[dest_req_id]
link_name = res.groups()[0].split(':')
# Create a new req/test if required
if link_name[0].startswith("?"):
req_type = link_name[1]
desc = link_name[2].replace('..', ' ') if len(link_name) > 2 else ""
name = reqmodule.add_req_with_path(link_name[0][1:], {"Req-Id": "",
"Type": req_type,
"Description": desc})
string = string.replace(res.groups()[0], name)
string_changed = True
else:
name = res.groups()[0].split(':')[0]
link = ':'.join([res.groups()[1], name])
if not link in dest_req['downward_links']:
if not dest_req['downward_links'] == "":
dest_req['downward_links'] += ','
dest_req['downward_links'] += link
return string, string_changed
def parse_requirement_links(reqmodule, source_root):
if reqmodule.config['req_version'] >= 0.3:
for src_pth in reqmodule.config['source_paths']:
for ext in reqmodule.config['source_extensions']:
files = Path(source_root + '/' + src_pth).rglob('*.' + ext)
for f in files:
print('Checking source file: %s' % f.name)
with open(f.absolute(), 'r') as src_file:
with open(str(f.absolute()) + "_tmp", 'w') as new_src_file:
file_changed = False
for line_nr, line in enumerate(src_file):
line, file_changed = check_for_req_links_and_update(reqmodule, line, file_changed)
new_src_file.write(line)
if file_changed:
os.remove(f.absolute())
os.rename(str(f.absolute()) + "_tmp", f.absolute())
else:
os.remove(str(f.absolute()) + "_tmp")
reqmodule.write_reqs()
else:
print('Requirement link parsing is only available from req version 0.3 and up')
def add_test_results(module_path, test_results_file):
if os.path.exists(module_path + '/test_results.temp.yaml'):
with open(module_path + '/test_results.temp.yaml', 'r') as testlist_file:
test_result_files = yaml.safe_load(testlist_file)
test_result_files.append(test_results_file)
else:
test_result_files = [test_results_file]
with open(module_path + '/test_results.temp.yaml', 'w') as testlist_file:
yaml.dump(test_result_files, testlist_file)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.