seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
74667010345 | from math import sqrt, cos, sin, pi
import numpy as np
import pyvista as pv
# Affine rotation ####
#' Matrix of the affine rotation around an axis
#' @param theta angle of rotation in radians
#' @param P1,P2 the two points defining the axis of rotation
def AffineRotationMatrix(theta, P1, P2):
T = np.vstack(
(
np.hstack((np.eye(3), -P1.reshape(3,1))),
np.array([0, 0, 0, 1])
)
)
invT = np.vstack(
(
np.hstack((np.eye(3), P1.reshape(3,1))),
np.array([0, 0, 0, 1])
)
)
a, b, c = (P2 - P1) / np.linalg.norm(P2 - P1)
d = sqrt(b*b + c*c)
if d > 0:
Rx = np.array([
[1, 0, 0, 0],
[0, c/d, -b/d, 0],
[0, b/d, c/d, 0],
[0, 0, 0, 1]
])
invRx = np.array([
[1, 0, 0, 0],
[0, c/d, b/d, 0],
[0, -b/d, c/d, 0],
[0, 0, 0, 1]
])
else:
Rx = invRx = np.eye(4)
Ry = np.array([
[d, 0, -a, 0],
[0, 1, 0, 0],
[a, 0, d, 0],
[0, 0, 0, 1]
])
invRy = np.array([
[d, 0, a, 0],
[0, 1, 0, 0],
[-a, 0, d, 0],
[0, 0, 0, 1]
])
Rz = np.array([
[cos(theta), -sin(theta), 0, 0],
[sin(theta), cos(theta), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]
])
return invT @ invRx @ invRy @ Rz @ Ry @ Rx @ T
O = np.array([0.0, 0.0, 0.0])
A = np.array([0.0, 10.0, 0.0])
Rot = AffineRotationMatrix(3*pi/4, O, A)
def f(x, y, z, a, b):
return ((
(x * x + y * y + 1) * (a * x * x + b * y * y)
+ z * z * (b * x * x + a * y * y)
- 2 * (a - b) * x * y * z
- a * b * (x * x + y * y)
) ** 2
- 4 * (x * x + y * y) * (a * x * x + b * y * y - x * y * z * (a - b)) ** 2)
def inversion(omega, M):
Omega0 = np.array([omega, 0.0, 0.0])
OmegaM = M - Omega0;
k = np.dot(OmegaM, OmegaM)
return Omega0 + OmegaM / k
def params(alpha, gamma, mu):
beta = sqrt(alpha*alpha - gamma*gamma)
theta = beta * sqrt(mu * mu - gamma*gamma)
omega = (alpha * mu + theta) / gamma
ratio = (
(mu - gamma) * ((alpha - gamma) * (mu + gamma) + theta)
/ ((alpha + gamma) * (mu - gamma) + theta) / (alpha - gamma)
)
R = (
1/ratio * gamma * gamma / ((alpha - gamma) * (mu - gamma) + theta)
* (mu - gamma) / ((alpha + gamma) * (mu - gamma) + theta)
)
omegaT = (
omega - (beta * beta * (omega - gamma))
/ ((alpha - gamma) * (mu + omega) - beta * beta)
/ ((alpha + gamma) * (omega - gamma) + beta * beta)
)
return (omega, omegaT, ratio, R)
alpha = 0.97
gamma = 0.32
mu = 0.56
omega, omegaT, ratio, R = params(alpha, gamma, mu)
OmegaT = np.array([omegaT, 0.0, 0.0])
a = ratio*ratio
b = 0.06
# generate data grid for computing the values
X, Y, Z = np.mgrid[(-1.3):1.3:350j, (-1.6):1.6:350j, (-0.6):0.6:350j]
# create a structured grid
grid = pv.StructuredGrid(X, Y, Z)
# compute and assign the values
values = f(X, Y, Z, a, b)
grid.point_data["values"] = values.ravel(order="F")
# compute the isosurface f(x, y, z) = 0
isosurf = grid.contour(isosurfaces=[0])
# convert to a PolyData mesh
mesh = isosurf.extract_geometry()
# rotate mesh
mesh.transform(Rot)
# transform
points = R * mesh.points
points = np.apply_along_axis(lambda M: inversion(omega, M + OmegaT), 1, points)
newmesh = pv.PolyData(points, mesh.faces)
newmesh["dist"] = np.linalg.norm(mesh.points, axis=1)
pltr = pv.Plotter(window_size=[512, 512])
pltr.set_focus(newmesh.center)
pltr.set_position(newmesh.center - np.array([0.0, 0.0, 7.0]))
pltr.add_background_image("SpaceBackground.png")
pltr.add_mesh(
newmesh, smooth_shading=True, cmap="turbo", specular=25,
show_scalar_bar=False
)
pltr.show()
| stla/PyVistaMiscellanous | InvertedSolidMobiusStrip.py | InvertedSolidMobiusStrip.py | py | 3,809 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "numpy.vstack",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.hstack",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.eye",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": ... |
27142738401 | from django.shortcuts import render, get_object_or_404
from .models import Animal
def index(request):
animais = Animal.objects.all()
return render(request, 'clientes/index.html', {
'animais': animais
})
def ver_animal(request, animal_id):
animal = get_object_or_404(Animal, id=animal_id)
return render(request, 'clientes/ver_cliente.html', {
'animal': animal
})
| LorenzoBorges/Projeto-Veterinario | clientes/views.py | views.py | py | 406 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "models.Animal.objects.all",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "models.Animal.objects",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "models.Animal",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "djang... |
38066575543 | # import the argmax function from numpy to get the index of the maximum value in an array
from numpy import argmax
# import the mnist dataset from keras, which contains 60,000 images of handwritten digits for training and 10,000 images for testing
from keras.datasets import mnist
# import the to_categorical function from keras to convert integer labels to one-hot encoded vectors
from keras.utils import to_categorical
# import the load_img function from keras to load an image from a file
from keras.utils import load_img
# import the img_to_array function from keras to convert an image to a numpy array
from keras.utils import img_to_array
# import the load_model function from keras to load a saved model from a file
from keras.models import load_model
# import the Sequential class from keras to create a linear stack of layers for the model
from keras.models import Sequential
# import the Conv2D class from keras to create a convolutional layer that applies filters to the input image and produces feature maps
from keras.layers import Conv2D
# import the MaxPooling2D class from keras to create a pooling layer that reduces the size of the feature maps by taking the maximum value in each region
from keras.layers import MaxPooling2D
# import the Dense class from keras to create a fully connected layer that performs a linear transformation on the input vector and applies an activation function
from keras.layers import Dense
# import the Flatten class from keras to create a layer that flattens the input tensor into a one-dimensional vector
from keras.layers import Flatten
# import the SGD class from keras to create a stochastic gradient descent optimizer with a learning rate and a momentum parameter
from keras.optimizers import SGD
# import matplotlib.pyplot as plt to plot and show images using matplotlib library
import matplotlib.pyplot as plt
# import os.path to check if a file exists in the current directory
import os.path
# import sys to exit the program if an invalid input is given by the user
import sys
from sklearn.model_selection import KFold
# define the model file name as a global variable
model_file_name = 'mnist_cnn_test_1.h5'
# define a function to load and prepare the train and test dataset
def load_dataset():
# load the mnist dataset using the load_data function from keras and assign the train and test data to four variables: trainX, trainY, testX, testY
(trainX, trainY), (testX, testY) = mnist.load_data()
# reshape the train and test images to have a single channel (grayscale) by adding a dimension of size 1 at the end of each array using the reshape method
trainX = trainX.reshape((trainX.shape[0], 28, 28, 1))
testX = testX.reshape((testX.shape[0], 28, 28, 1))
# one hot encode the train and test labels using the to_categorical function from keras
trainY = to_categorical(trainY)
testY = to_categorical(testY)
# return the four variables as output of the function
return trainX, trainY, testX, testY
# define a function to scale the pixel values of the train and test images
def prep_pixels(train, test):
# convert the train and test images from integers to floats using the astype method
train_norm = train.astype('float32')
test_norm = test.astype('float32')
# normalize the pixel values to range 0-1 by dividing them by 255.0
train_norm = train_norm / 255.0
test_norm = test_norm / 255.0
# return the normalized images as output of the function
return train_norm, test_norm
# define a function to create and compile a CNN model
def define_model():
# create an instance of the Sequential class and assign it to a variable named model
model = Sequential()
# add a convolutional layer with 32 filters of size 3x3, relu activation function, he_uniform weight initialization and input shape of 28x28x1 using the add method and passing an instance of the Conv2D class as argument
model.add(Conv2D(32, (3, 3), activation='relu',
kernel_initializer='he_uniform', input_shape=(28, 28, 1)))
# add a max pooling layer with pool size of 2x2 using the add method and passing an instance of the MaxPooling2D class as argument
model.add(MaxPooling2D((2, 2)))
# add a convolutional layer with 64 filters of size 3x3, relu activation function and he_uniform weight initialization using the add method and passing an instance of the Conv2D class as argument
model.add(Conv2D(64, (3, 3), activation='relu',
kernel_initializer='he_uniform'))
# add another convolutional layer with 64 filters of size 3x3, relu activation function and he_uniform weight initialization using the add method and passing an instance of the Conv2D class as argument
model.add(Conv2D(64, (3, 3), activation='relu',
kernel_initializer='he_uniform'))
# add another max pooling layer with pool size of 2x2 using the add method and passing an instance of the MaxPooling2D class as argument
model.add(MaxPooling2D((2, 2)))
# add a flatten layer to convert the output of the previous layer into a one-dimensional vector using the add method and passing an instance of the Flatten class as argument
model.add(Flatten())
# add a dense layer with 100 units, relu activation function and he_uniform weight initialization using the add method and passing an instance of the Dense class as argument
model.add(Dense(100, activation='relu', kernel_initializer='he_uniform'))
# add another dense layer with 10 units (corresponding to the 10 classes of digits) and softmax activation function to output a probability distribution over the classes using the add method and passing an instance of the Dense class as argument
model.add(Dense(10, activation='softmax'))
# compile the model by specifying the optimizer, loss function and metrics using the compile method
# create an instance of the SGD class with a learning rate of 0.01 and a momentum of 0.9 and assign it to a variable named opt
opt = SGD(learning_rate=0.01, momentum=0.9)
# use the opt variable as the optimizer argument, use categorical_crossentropy as the loss function for multi-class classification and use accuracy as the metric to evaluate the model performance
model.compile(optimizer=opt, loss='categorical_crossentropy',
metrics=['accuracy'])
# return the model as output of the function
return model
# define a function to run the test harness for evaluating a model
def run_test_harness():
# load and prepare the train and test dataset using the load_dataset function and assign them to four variables: trainX, trainY, testX, testY
trainX, trainY, testX, testY = load_dataset()
# scale the pixel values of the train and test images using the prep_pixels function and assign them to two variables: trainX, testX
trainX, testX = prep_pixels(trainX, testX)
# create and compile a cnn model using the define_model function and assign it to a variable named model
model = define_model()
# fit the model on the train dataset using the fit method with 10 epochs (number of iterations over the entire dataset), batch size of 32 (number of samples per gradient update) and verbose set to 1 (progress messages or 0 for not)
model.fit(trainX, trainY, epochs=10, batch_size=32, verbose=1)
# evaluate model
_, acc = model.evaluate(testX, testY, verbose=1)
print('evaluate result > %.3f' % (acc * 100.0))
# save the model to a file using the save method and passing the model file name as argument
model.save(model_file_name)
# define a function to load and prepare an image for prediction
def load_image(filename):
# load an image from a file using the load_img function from keras with grayscale set to True (convert to grayscale) and target_size set to (28, 28) (resize to match the input shape of the model) and assign it to a variable named img
img = load_img(filename, grayscale=True, target_size=(28, 28))
# convert the image to a numpy array using the img_to_array function from keras and assign it to a variable named img
img = img_to_array(img)
# reshape the image array to have a single sample with one channel by adding a dimension of size 1 at the beginning and at the end of the array using the reshape method and assign it to a variable named img
img = img.reshape(1, 28, 28, 1)
# the astype method and normalizing the pixel values to range 0-1 by dividing them by 255.0
img = img.astype('float32')
img = img / 255.0
# return the image array as output of the function
return img
# define a function to load an image and predict the class using the model
def run_example(path):
# load and prepare the image using the load_image function and passing the path argument as filename and assign it to a variable named img
img = load_image(path)
# load the model from a file using the load_model function and passing the model file name as argument and assign it to a variable named model
model = load_model(model_file_name)
# predict the class of the image using the predict method of the model and passing the img variable as argument and assign it to a variable named predict_value
predict_value = model.predict(img)
# get the index of the maximum value in the predict_value array using the argmax function from numpy and assign it to a variable named digit
digit = argmax(predict_value)
# print the digit variable to show the predicted label
print(digit)
# plot and show the image using matplotlib.pyplot library
# use the imshow function to display the image array (the first element of the img variable) with a grayscale colormap
plt.imshow(img[0], cmap='gray')
# use the title function to set a title for the image with 'Predicted label: ' followed by the digit variable
plt.title('Predicted label: ' + str(digit))
# use the show function to display the figure
plt.show()
# -----------------------------------------------
# -----------------------------------------------
# -----------------------------------------------
# ------------------ENTRY POINT------------------
# -----------------------------------------------
# -----------------------------------------------
# -----------------------------------------------
# ask the user if they want to re-train the data or use an existing model file using the input function and assign it to a variable named re_train
re_train = input('re train data and evaluate model? (0 -> false | 1 -> true): ')
# end program if input condition not satisfied by printing a message and using sys.exit function
if re_train != "0" and re_train != "1" and re_train != "":
print("input condition not satisfied")
sys.exit()
# check if a model file exists in the current directory using os.path.isfile function and if re_train is 0 or nothing using logical operators
if os.path.isfile(model_file_name) and (re_train == "0" or re_train == ""):
# load model from file using load_model function and assign it to a variable named model
model = load_model(model_file_name)
else:
# run test harness to train and save a new model using run_test_harness function
run_test_harness()
# run example to load an image and predict its class using run_example function with p as path argument
p = 'test_0_1.png'
run_example(path=p) | mohammadnr2817/digit_classifier | digit_classifier.py | digit_classifier.py | py | 11,367 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "keras.datasets.mnist.load_data",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "keras.datasets.mnist",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "keras.utils.to_categorical",
"line_number": 47,
"usage_type": "call"
},
{
"ap... |
37129616360 | import numpy as np
import cv2
import NeuralNetwork
import json
import os
import matplotlib.pyplot as plt
#defining the initial parameters and the learning rate
batch_size = 10
nn_hdim = 2048
learning_rate = 0.1
f1 = "relu"
f2 = "sigmoid"
threshold = 0.0001
sd_init = 0.01
sd_init_w2 = sd_init
def make_json(W1, W2, b1, b2, id1, id2, activation1, activation2, nn_h_dim, path_to_save):
"""
make json file with trained parameters.
W1: numpy arrays of shape (1024, nn_h_dim)
W2: numpy arrays of shape (nn_h_dim, 1)
b1: numpy arrays of shape (1, nn_h_dim)
b2: numpy arrays of shape (1, 1)
nn_hdim - 2048
id1: id1 - str '204214928'
id2: id2 - str '308407907'
activation1: 'ReLU'
activation2: 'sigmoid'
"""
trained_dict = {'weights': (W1.tolist(), W2.tolist()),
'biases': (b1.tolist(), b2.tolist()),
'nn_hdim': nn_h_dim,
'activation_1': activation1,
'activation_2': activation2,
'IDs': (id1, id2)}
file_path = os.path.join(path_to_save, 'trained_dict_{}_{}'.format(
trained_dict.get('IDs')[0], trained_dict.get('IDs')[1])
)
with open(file_path, 'w') as f:
json.dump(trained_dict, f, indent=4)
def load_image(prefix, number, data_vec, label_vec, is_training):
if is_training:
path = "data\\training\\"
else:
path = "data\\validation\\"
path = path + prefix + number + ".png"
image = cv2.imread(path, flags=cv2.IMREAD_GRAYSCALE)
data_vec.append(image.flatten() / 255.0)
if prefix == "pos_":
label_vec.append(1)
else:
label_vec.append(0)
def load_data(train_data, val_data, train_label, val_label):
# load train data
for i in range(256):
load_image("neg_", str(i), train_data, train_label, True)
load_image("pos_", str(i), train_data, train_label, True)
for i in range(256, 334):
load_image("neg_", str(i), val_data, val_label, False)
load_image("pos_", str(i), val_data, val_label, False)
return np.asarray(train_data), np.asarray(val_data), np.asarray(train_label), np.asarray(val_label),
def main():
convergence_flag = False
previous_loss = np.inf
counter = 0
accuracy_per_training_epoch = 0
loss_per_training_epoch = 0
train_data = []
val_data = []
train_label = []
val_label = []
epoch_training_loss = []
epoch_validation_loss= []
epoch_training_accuracy = []
epoch_validation_accuracy = []
train_data, val_data, train_label, val_label = load_data(train_data, val_data, train_label, val_label)
my_net = NeuralNetwork.NeuralNetwork(learning_rate, f1, f2, sd_init, sd_init_w2)
epoc = 0
my_net.forward_pass(val_data, val_label)
my_net.calculate_accuracy(val_label)
print("Inintial validation loss: ", my_net.loss, "Inintial accuracy: ", my_net.accuracy)
while not convergence_flag:
batch_count = 0
shuffler = np.random.permutation(len(train_label))
train_label = train_label[shuffler]
train_data = train_data[shuffler]
if (not epoc % 10) and (epoc != 0):
my_net.learning_rate = my_net.learning_rate / 2
for i in range(0, len(train_label), batch_size):
batch = train_data[i:batch_size + i, :]
batch_labels = train_label[i:batch_size + i]
my_net.forward_pass(batch, batch_labels)
my_net.calculate_accuracy(batch_labels)
accuracy_per_training_epoch += my_net.accuracy
loss_per_training_epoch += my_net.loss
# print("epoc:", epoc, "batch:", batch_count, "loss:", my_net.loss, "accuracy:",
# my_net.accuracy, "prediction:", my_net.a2, np.round(my_net.a2).squeeze(), "real labels:", batch_labels)
my_net.backward_pass(batch_labels)
my_net.compute_gradient(batch)
batch_count += 1
accuracy_per_training_epoch = accuracy_per_training_epoch/(len(train_label)/batch_size)
loss_per_training_epoch = loss_per_training_epoch/(len(train_label)/batch_size)
epoch_training_accuracy.append(accuracy_per_training_epoch)
epoch_training_loss.append(loss_per_training_epoch)
accuracy_per_training_epoch = 0
loss_per_training_epoch = 0
my_net.forward_pass(val_data, val_label)
my_net.calculate_accuracy(val_label)
if (my_net.loss - previous_loss) <= threshold:
counter += 1
else:
counter = 0
if epoc > 100:
convergence_flag = (counter >= 3)
print("Validation loss: ", my_net.loss, "Accuracy:", my_net.accuracy, "learning rate:", my_net.learning_rate)
previous_loss = my_net.loss
epoch_validation_accuracy.append(my_net.accuracy)
epoch_validation_loss.append(my_net.loss)
epoc += 1
## plotting section-----------------------------------------------------------------------------------------------
trained_dict = {
'weights': (my_net.W1, my_net.W2),
'biases': (my_net.b1, my_net.b2),
'nn_hdim': 2048,
'activation_1': 'relu',
'activation_2': 'sigmoid',
'IDs': (204214928, 308407907)
}
json_path = ''
make_json(my_net.W1,my_net.W2,my_net.b1,my_net.b2,'204214928','308407907','relu','sigmoid',nn_hdim, json_path)
plt.subplot(2, 1, 1)
plt.plot(range(epoc), epoch_training_loss)
plt.plot(range(epoc), epoch_validation_loss)
plt.scatter(epoc, epoch_training_loss[epoc-1], marker='o')
plt.scatter(epoc, epoch_validation_loss[epoc-1], marker='o')
x = [epoc, epoc]
n = [round(epoch_training_loss[epoc-1], 2), round(epoch_validation_loss[epoc-1], 2)]
for i, txt in enumerate(n):
plt.annotate(txt, (x[i], n[i]))
plt.legend(["training", "validation"])
plt.title('loss and accuracy as function of epoc number')
plt.ylabel('loss [au]')
plt.subplot(2, 1, 2)
plt.plot(range(epoc), epoch_training_accuracy)
plt.plot(range(epoc), epoch_validation_accuracy)
plt.scatter(epoc, epoch_training_accuracy[epoc-1], marker='o')
plt.scatter(epoc, epoch_validation_accuracy[epoc-1], marker='o')
y = [epoc, epoc]
s = [round(epoch_training_accuracy[epoc-1], 2), round(epoch_validation_accuracy[epoc-1], 2)]
for i, txt in enumerate(s):
plt.annotate(txt, (y[i], s[i]))
plt.legend(["training", "validation"])
plt.xlabel('epoc number')
plt.ylabel('accuracy [%]')
plt.show()
if __name__ == "__main__":
main() | leosegre/medic_ip_project | main.py | main.py | py | 6,594 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "json.dump",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 5... |
29126825853 | #Grupo PHP
#Kevin Cevallos
#María Camila Navarro
#Joffre Ramírez
import ply.lex as lex
reserved = {
'if': 'IF',
'else': 'ELSE',
'elseif': 'ELSEIF',
#'boolean': 'BOOLEAN',
#'float': 'FLOAT',
#'string': 'STRING',
'null': 'NULL',
'array': 'ARRAY',
#'object': 'OBJECT',
'break': 'BREAK',
'continue': 'CONTINUE',
'return': 'RETURN',
'for each': 'FOREACH',
'echo': 'ECHO',
'print': 'PRINT',
'print_r': 'PRINT_R',
'var_dump': 'VAR_DUMP',
'fgets': 'FGETS',
'fread': 'FREAD',
'fscanf': 'FSCANF',
'fpassthru': 'FPASSTHRU',
'fgetcsv': 'FGETCSV',
'fgetc': 'FGETC',
'file_get_contents': 'FILE_GET_CONTENTS',
'readfile': 'READFILE',
'file': 'FILE',
'parse_ini_file': 'PARSE_INI_FILE',
'implode': 'IMPLODE',
'explode': 'EXPLODE',
'new':'NEW',
'class':'CLASS',
'count': 'COUNT',
'sizeof': 'SIZEOF',
'array_push': 'ARRAY_PUSH',
'sort': 'SORT',
'asort': 'ASORT',
'ksort': 'KSORT',
'unset': 'UNSET',
'var_export': 'VAR_EXPORT',
'shuffle': 'SHUFFLE',
'array_merge': 'ARRAY_MERGE',
'array_search': 'ARRAY_SEARCH',
'array_rand': 'ARRAY_RAND',
'array_chunk': 'ARRAY_CHUNK',
'str_split': 'STR_SPLIT',
'preg_split': 'PREG_SPLIT',
'array_unique': 'ARRAY_UNIQUE',
'function' : 'FUNCTION',
'while' : 'WHILE',
'as' : 'AS'
}
tokens =(
[
#Operadores Matemáticos
'PLUS',
'MINUS',
'TIMES',
'DIVIDE',
'EQUALS',
'MODULO',
#Operadores Lógicos
'AND',
'OR',
'XOR',
'NOT',
#Símbolos
'LPAREN',
'RPAREN',
#'PEIROT',
'RCORCHET',
'LCORCHET',
'OBJECT_OPERATOR',
'COMA',
'OPEN',
'CLOSE',
'END',
'FLECHA',
#Variable
'ID',
#Número
'NUMBER',
'DECIMAL',
#Valor Boolean
'TRUE',
'FALSE',
#Cadena de texto
'TEXT',
#Operadores Comparación
'MAYORQUE',
'MENORQUE',
'IS_EQUAL',
'IS_IDENTICAL',
'IS_NOT_EQUAL',
'IS_NOT_IDENTICAL',
'IS_GREATER_OR_EQUAL',
'IS_SMALLER_OR_EQUAL',
'SPACESHIP',
#Nombre de Funciones
'FNOMBRE'
] + list(reserved.values()))
#Operadores Matemáticos
t_MODULO=r'%'
t_PLUS=r'\+'
t_MINUS=r'-'
t_TIMES=r'\*'
t_DIVIDE=r'/'
t_EQUALS = r'='
#Operadores Lógicos
t_AND = r'and'
t_OR = r'or'
t_XOR = r'xor'
t_NOT = r'!'
#Símbolos
t_OBJECT_OPERATOR=r'->'
t_LPAREN=r'\('
t_RPAREN=r'\)'
t_END = r';'
t_TEXT = r'".*"'
t_FLECHA = r'=>'
#t_PEIROT = r'\.'
t_OPEN = r'<\?php'
t_CLOSE = r'\?>'
t_RCORCHET=r'\}'
t_LCORCHET=r'\{'
t_COMA=r','
#Variable
t_ID = r'(\$([a-z]|[A-Z]))([a-zA-Z0-9]+)?'
#Valor Boolean
#t_TRUE = r'true'
#t_FALSE = r'false'
#Operadores Comparación
t_MAYORQUE = r'>'
t_MENORQUE = r'<'
t_IS_EQUAL = r'=='
t_IS_IDENTICAL = r'==='
t_IS_NOT_EQUAL= r'!='
t_IS_NOT_IDENTICAL= r'!=='
t_IS_GREATER_OR_EQUAL=r'>='
t_IS_SMALLER_OR_EQUAL=r'<='
t_SPACESHIP = r'<=>'
t_ignore = ' \t'
#Número
def t_DECIMAL(t):
r'\d+\.\d+'
t.value = float(t.value)
return t
def t_NUMBER(t):
r'\d+'
t.value = int(t.value)
return t
#Cadena de texto
#Palabras reservadas
def t_CLASS(t):
r'class'
return t
def t_ECHO(t):
r'echo'
return t
def t_NEW(t):
r'new'
return t
'''
def t_BOOLEAN(t):
r'boolean'
return t
def t_STRING(t):
r'string'
return t
'''
def t_TRUE(t):
r'true'
return t
def t_FALSE(t):
r'false'
return t
def t_NULL(t):
r'null'
return t
'''
def t_OBJECT(t):
r'object'
return t
'''
def t_BREAK(t):
r'break'
return t
def t_CONTINUE(t):
r'continue'
return t
def t_RETURN(t):
r'return'
return t
def t_FUNCTION(t):
r'function'
return t
def t_AS(t):
r'as'
return t
#Sentencia if
def t_IF(t):
r'if'
return t
def t_ELSE(t):
r'else'
return t
def t_ELSEIF(t):
r'elseif'
return t
#Lazo
def t_FOREACH(t):
r'foreach'
return t
def t_WHILE(t):
r'while'
return t
#Funciones print
def t_PRINT(t):
r'print'
return t
def t_PRINT_R(t):
r'print_r'
return t
def t_VAR_DUMP(t):
r'var_dump'
return t
#Funciones
def t_FGETS(t):
r'fgets'
return t
def t_FREAD(t):
r'fread'
return t
def t_FSCANF(t):
r'fscanf'
return t
def t_FPASSTHRU(t):
r'fpassthru'
return t
def t_FGETCSV(t):
r'fgetcsv'
return t
def t_FGETC(t):
r'fgetc'
return t
def t_FILE_GET_CONTENTS(t):
r'file_get_contents'
return t
def t_READFILE(t):
r'readfile'
return t
def t_FILE(t):
r'file'
return t
def t_PARSE_INI_FILE(t):
r'parse_ini_file'
return t
def t_IMPLODE(t):
r'implode'
return t
def t_EXPLODE(t):
r'explode'
return t
def t_ARRAY(t):
r'array'
return t
def t_COUNT(t):
r'count'
return t
def t_SIZEOF(t):
r'sizeof'
return t
def t_ARRAY_PUSH(t):
r'array_push'
return t
def t_SORT(t):
r'sort'
return t
def t_ASORT(t):
r'asort'
return t
def t_KSORT(t):
r'ksort'
return t
def t_UNSET(t):
r'unset'
return t
def t_VAR_EXPORT(t):
r'var_export'
return t
def t_SHUFFLE(t):
r'shuffle'
return t
def t_ARRAY_MERGE(t):
r'array_merge'
return t
def t_ARRAY_SEARCH(t):
r'array_search'
return t
def t_ARRAY_RAND(t):
r'array_rand'
return t
def t_ARRAY_CHUNK(t):
r'array_chunk'
return t
def t_STR_SPLIT(t):
r'str_split'
return t
def t_PREG_SPLIT(t):
r'preg_split'
return t
def t_ARRAY_UNIQUE(t):
r'array_unique'
return t
#Nombre de funciones
def t_FNOMBRE(t):
r'(?!or|and|xor)([a-z]|[A-Z])([a-zA-Z0-9_]+)?'
return t
def t_error(t):
print("No es reconocido '%s'"%t.value[0])
t.lexer.skip(1)
def t_newline(t):
r'\n+'
t.lexer.lineno += len(t.value)
lexer=lex.lex()
def analizar(dato):
lexer.input(dato)
while True:
tok =lexer.token()
if not tok:
break
print(tok)
archivo= open("archivo.txt")
for linea in archivo:
#print(">>"+linea)
#analizar(linea)
if len(linea)==0:
break
def ImprimirAnalizar(dato):
texto= dato.split("\n")
cadena=""
for i in texto:
cadena+= "-> "+i
lexer.input(i)
while True:
tok = lexer.token()
if not tok:
break
cadena+="\n"
cadena+=str(tok)
cadena+="\n"
return cadena
| keanceva/ProyectoLP | lexicoLP.py | lexicoLP.py | py | 6,448 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "ply.lex.lex",
"line_number": 366,
"usage_type": "call"
},
{
"api_name": "ply.lex",
"line_number": 366,
"usage_type": "name"
}
] |
11539702751 | import os
import pygame
import pygame.color
from views.panelview import PanelView
class MenuView(PanelView):
def __init__(self, config, bus):
PanelView.__init__(self, config, bus)
self.fntRegText = pygame.font.Font(os.path.join(self.config.script_directory, "assets/Roboto-Regular.ttf"), 16)
dashboard_icon = pygame.image.load(os.path.join(self.config.script_directory, 'assets/icon-dashboard.png'))
graph_icon = pygame.image.load(os.path.join(self.config.script_directory, 'assets/icon-graph.png'))
control_icon = pygame.image.load(os.path.join(self.config.script_directory, 'assets/icon-control.png'))
setting_icon = pygame.image.load(os.path.join(self.config.script_directory, 'assets/icon-setting.png'))
self.menu_items = [{"text": "Dashboard", "icon": dashboard_icon, "name": "dashboard"},
{"text": "Temperature Graph", "icon": graph_icon, "name": "graph"},
{"text": "Control", "icon": control_icon, "name": "control"},
{"text": "Settings", "icon": setting_icon, "name": "settings"}]
self.items_per_page = 4
self.page = 0
self.background_color = pygame.color.Color("#EF3220")
self.divider_color = pygame.color.Color("#CC302B")
def handle_event(self, event):
PanelView.handle_event(self, event)
if event.type == pygame.MOUSEBUTTONUP:
if 40 <= event.pos[1] < 200:
item_pos = ((event.pos[1] - 40) / 40) + (self.page * self.items_per_page)
if len(self.menu_items) > item_pos:
self.bus.publish("viewchange", self.menu_items[item_pos]["name"])
def draw(self, screen):
PanelView.draw(self, screen)
s = pygame.Surface((320, 200))
s.fill(self.background_color)
screen.blit(s, (0, 0))
pygame.draw.line(screen, self.divider_color, (0, 40), (320, 40))
for index, item in enumerate(self.menu_items[self.page * self.items_per_page: self.page + 1 * self.items_per_page]):
file_name_lbl = self.fntRegText.render(item["text"], 1, (255, 255, 255))
ypos = 40 + (index * 40)
screen.blit(file_name_lbl, (40, ypos + 12))
pygame.draw.line(screen, self.divider_color, (0, ypos + 40), (320, ypos + 40))
screen.blit(item["icon"], (0, ypos)) | mcecchi/OctoPiControlPanel | views/menuview.py | menuview.py | py | 2,401 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "views.panelview.PanelView",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "views.panelview.PanelView.__init__",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "views.panelview.PanelView",
"line_number": 9,
"usage_type": "name"
},
{
... |
74367228902 | import json
from flask import Flask, render_template, request, jsonify
import requests
app = Flask(__name__)
API_KEY = '0c20320445392a19d9b2a02ae290502c'
BASE_URL = 'http://api.weatherstack.com/current'
def get_weather(city):
params = {
'access_key': API_KEY,
'query': city,
}
try:
response = requests.get(BASE_URL, params=params)
response.raise_for_status()
data = response.json()
temperature_celsius = data['current']['temperature']
temperature_fahrenheit = (temperature_celsius * 9/5) + 32
temperature_kelvin = temperature_celsius + 273.15
description = data['current']['weather_descriptions'][0]
country = data['location']['country']
longitude = data['location']['lon']
latitude = data['location']['lat']
humidity = data['current']['humidity']
# Additional data
visibility = data['current']['visibility']
wind_speed = data['current']['wind_speed']
wind_direction = data['current']['wind_dir']
atmospheric_pressure = data['current']['pressure']
time_zone = data['location']['utc_offset']
return {
'city': city,
'country': country,
'temperature': temperature_celsius,
'fahrenheit': temperature_fahrenheit,
'kelvin': temperature_kelvin,
'description': description,
'longitude': longitude,
'latitude': latitude,
'humidity': humidity,
'visibility': visibility,
'wind_speed': wind_speed,
'wind_direction': wind_direction,
'atmospheric_pressure': atmospheric_pressure,
'time_zone': time_zone,
}
except requests.exceptions.RequestException as e:
status_code = e.response.status_code if e.response is not None else None
error_message = 'Network error. Please check your internet connection and try again.'
return {
'error': f'Error {status_code}: {error_message}',
}
except (KeyError, ValueError) as e:
status_code = 400
error_message = 'Invalid data received from the server.'
return {
'error': f'Error {status_code}: {error_message}',
}
@app.route('/', methods=['GET', 'POST'])
def index():
error_message = None
if request.method == 'POST':
city = request.form.get('city')
weather_data = get_weather(city)
if 'error' in weather_data:
error_message = weather_data['error']
else:
return render_template('index.html', **weather_data)
return render_template('index.html', city='', country='', temperature='', description='', error=error_message)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True)
# app.run(debug=True) | ruisu666/WeatherApp-Flask | app.py | app.py | py | 2,886 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "requests.exceptions",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "flask.request.method... |
73923124264 | #!/usr/bin/env python3
import requests
import json
import sys
from collections import OrderedDict
def get_versions():
url = 'https://api.github.com/repos/jenkinsci/swamp-plugin/releases'
versions = set()
response = requests.get(url)
if response.status_code == 200:
response = response.json()
for rp in response:
if 'tag_name' in rp.keys() and rp['tag_name'].startswith('swamp'):
versions.add(rp['tag_name'].partition('swamp-')[-1])
return versions
def get_stats():
versions = get_versions()
if isinstance(versions, str):
versions = versions.split()
stats = OrderedDict()
for version in versions:
data = {"type": "file",
"repoKey": "releases",
"path": "org/continuousassurance/swamp/jenkins/swamp/{version}/swamp-{version}.hpi".format(version=version)}
response = requests.post('https://repo.jenkins-ci.org/ui/artifactgeneral', json=data)
if response.status_code == 200:
info = json.loads(response.text)
stats['swamp-jenkins-plugin-{version}'.format(version=version)] = info['info']['downloaded']
else:
print(response, file=sys.stderr)
return stats
if __name__ == '__main__':
print(get_stats())
| vamshikr/swamp-plugin-stats | src/jenkins.py | jenkins.py | py | 1,306 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "json.loads",
"... |
25874083390 | #! python3
# scraper for dark souls armor
import requests
import re
import sqlite3
from bs4 import BeautifulSoup
import time
# connecting to actual database
conn = sqlite3.connect("./databases/armor.db")
# testing connection
# conn = sqlite3.connect(":memory:")
c = conn.cursor()
with conn:
c.execute("""CREATE TABLE IF NOT EXISTS armor(
slot TEXT,
name TEXT,
durability REAL,
weight REAL,
physical REAL,
strike REAL,
slash REAL,
thrust REAL,
magic REAL,
fire REAL,
lightning REAL,
poise REAL,
bleed REAL,
poison REAL,
curse REAL)""")
# create tuple of item types which will be added to table
# tables on website contain no slot info but are always
# in this order so we map these to the items
item_slots = ("helmet", "chest", "gauntlets", "boots")
r = requests.get("https://darksouls.wiki.fextralife.com/Armor").text
soup = BeautifulSoup(r, "lxml")
delay = 0
for a in soup.find_all('a', class_ = "wiki_link wiki_tooltip", href=re.compile(r"\+Set")):
# adaptive delay based on website response time
time.sleep(delay)
start = time.time()
# website has both local and url reference links, this formats the request correctly
if ".com" in a["href"]:
page = requests.get(a["href"])
else:
page = requests.get(f"https://darksouls.wiki.fextralife.com{a['href']}")
end = time.time()
response_time = end - start
delay = response_time * 10
# if bad response from the link we skip attempting to process and move to next link
if not page.ok:
continue
# print(page.url)
info = BeautifulSoup(page.text, "lxml")
# attempts to find second table on page, which has relevant info
try:
table = info.find_all("table")[1]
except IndexError:
continue
# creates the iterator for pulling item type since info is not in table
slots = iter(item_slots)
# each row contains the name and stats of one armor item in the set
for row in table.tbody:
# list for containing scraped info to be stored in db
vals = []
# exception handling when trying to parse table data
try:
data = row.find_all("td")
except AttributeError as e:
pass
# print(e)
else:
# first row only has <th> tags, this skips it
if not data:
continue
# Names of items are contained in <a> tags which link to their pages
# This check skips the total row at bottom of table preventing StopIteration Exception
elif data[0].find('a') is not None:
# each page's table is in order of the slots iterator
vals.append(next(slots))
for line in data:
vals.append(line.text)
# print(line.text)
# once vals is populated we print the values and insert them into db
with conn:
print(f"Inserting {vals}")
c.execute(f"INSERT INTO armor VALUES ({', '.join('?' for i in range(15))})", tuple(vals))
# finally insertion is confirmed by printing all values from db
with conn:
for line in c.execute("SELECT * FROM armor"):
print(line)
| Bipolarprobe/armorcalc | armorscrape.py | armorscrape.py | py | 3,289 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlite3.connect",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line... |
33779283072 | #!/usr/local/bin/python3.7
#############
# Imports #
#############
import globalvars
import modules.conf as conf
import modules.misc as misc
import modules.platform as platform
import modules.special as special
import modules.subst as subst
import configparser
import os
import shutil
import subprocess
###############
# Functions #
###############
def print_info():
"""Print info about operating system and target triple."""
print("\nInformation summary:\n--------------------------------------")
print("OSNAME: " + globalvars.OSNAME)
print("OSVERSION: " + globalvars.OSVERSION)
print("OSRELEASE: " + globalvars.OSRELEASE)
print("OSMAJOR: " + globalvars.OSMAJOR)
print("OSARCH: " + globalvars.OSARCH)
print("STDARCH: " + globalvars.STDARCH)
print("TARGET_TRIPLE: " + globalvars.TGT_TRIPLE)
print("--------------------------------------\n")
print(str(len(packages_present)) + " packages present:")
for p in packages_present:
print(p + ' ', end='')
print("\n" + str(len(packages_missing)) + " packages missing:")
for p in packages_missing:
print(p + ' ', end='')
def ensure_distfile(mode, package):
"""Ensure that the compressed or uncompressed ("mode") distfile for a package is present."""
if mode == "compressed":
distdir = globalvars.SUBSTITUTION_MAP['rbuild_dist_comp_dir']
filename = misc.get_filename('distfiles', package)
hashtype = "md5"
elif mode == "uncompressed":
distdir = globalvars.SUBSTITUTION_MAP['rbuild_dist_uncomp_dir']
filename = os.path.basename(misc.get_tarball_uri(package))
hashtype = "umd5"
else:
misc.die("Invalid ensure_distfile mode \"" + mode + "\"! Aborting...")
absolute_path = distdir + '/' + filename
if not os.path.isfile(absolute_path):
if mode == "compressed":
misc.fetch_file(conf.get_config_value('distfiles', package), distdir, filename)
else:
misc.decompress_file(globalvars.SUBSTITUTION_MAP['rbuild_dist_comp_dir'], misc.get_filename('distfiles', package), distdir)
checksum = misc.get_distfile_checksum(hashtype, package)
misc.verbose_output("Checksum for \"" + package + "\": Comparing for " + mode + " distfile... ")
if checksum == None:
misc.verbose_output("skipping (not available)\n")
else:
if misc.get_file_hash(absolute_path) == checksum:
misc.verbose_output("ok (matches)\n")
else:
if mode == "compressed":
misc.verbose_output("Mismatch! Fetching again...\n")
misc.remove_file_or_dir(absolute_path)
misc.fetch_file(conf.get_config_value('distfiles', package), globalvars.SUBSTITUTION_MAP['rbuild_dist_comp_dir'], filename)
misc.verbose_output("Comparing checksums once more... ")
if misc.get_file_hash(absolute_path) == checksum:
misc.verbose_output("ok (matches)\n")
else:
misc.die("Mismatch again! Bailing out...")
else:
misc.verbose_output("Mismatch! Extracting again...\n")
misc.die("Extract again!")
def ensure_extrafiles_present(package):
"""Ensure that the extra files for a package are present."""
extradir = globalvars.SUBSTITUTION_MAP['rbuild_extra_dir'] + '/' + package
extrafiles = conf.get_config_value('extrafiles', package).split(", ")
md5s = None
if package + "_md5" in conf.config['extrafiles']:
md5s = conf.get_config_value('extrafiles', package + "_md5").split(", ")
misc.verbose_output("Extra files: Ensuring directory \"" + extradir + "\" exists... ")
if not os.path.isdir(extradir):
try:
os.makedirs(extradir)
except OSError as e:
misc.die("\nPatches error: Could not create directory \"" + extradir + "\"! Exiting.")
misc.verbose_output("ok\n")
i = 0
for f in extrafiles:
filename = os.path.basename(f)
absolute_path = extradir + '/' + filename
if not os.path.isfile(absolute_path):
misc.fetch_file(f, extradir, filename)
misc.verbose_output("Comparing checksums for extra file " + str(i) + "... ")
if md5s == None:
misc.verbose_output("skipping (not available)\n")
else:
if misc.get_file_hash(absolute_path) == md5s[i]:
misc.verbose_output("ok (matches)\n")
else:
misc.verbose_output("Mismatch! Fetching again...\n")
misc.remove_file_or_dir(absolute_path)
misc.fetch_file(f, extradir, filename)
misc.verbose_output("Comparing checksums once more... ")
if misc.get_file_hash(absolute_path) == md5s[i]:
misc.verbose_output("ok (matches)\n")
else:
misc.die("Mismatch again! Bailing out...")
i = i + 1
def ensure_clean_wrkdir(package):
"""Ensure that a fresh work directory is present for the package to build."""
wrkdir = misc.get_wrkdir(package)
if os.path.exists(wrkdir):
print("Old workdir found. Deleting... ", end='', flush=True)
misc.remove_file_or_dir(wrkdir)
print("ok")
if package in conf.config['distfiles']:
ensure_distfile("compressed", package)
ensure_distfile("uncompressed", package)
misc.extract_tarball(package)
if package in conf.config['extrafiles']:
if not os.path.exists(wrkdir):
try:
os.makedirs(wrkdir)
except OSError as e:
misc.die("\nFilesystem error: Could not create directory \"" + directory + "\"! Exiting.")
if package in conf.config['extrafiles']:
ensure_extrafiles_present(package)
extradir = globalvars.SUBSTITUTION_MAP['rbuild_extra_dir'] + '/' + package
extrafiles = conf.get_config_value('extrafiles', package).split(", ")
for f in extrafiles:
absolute_path = extradir + '/' + os.path.basename(f)
try:
shutil.copy(absolute_path, wrkdir)
except IOError as e:
misc.die("\nFilesystem error: Could not copy \"" + absolute_path + "\" to \"" + wrkdir + "\"! Exiting.")
def ensure_patchfiles_present(package):
"""Check if patches required to build the package are present, try to fetch them otherwise."""
patches = conf.get_config_value('patches', package).split(", ")
md5s = None
if package + "_md5" in conf.config['patches']:
md5s = conf.get_config_value('patches', package + "_md5").split(", ")
patchdir = globalvars.SUBSTITUTION_MAP['rbuild_patches_dir'] + '/' + package
misc.verbose_output("Patches: Ensuring directory \"" + patchdir + "\" exists... ")
if not os.path.isdir(patchdir):
try:
os.makedirs(patchdir)
except OSError as e:
misc.die("\nPatches error: Could not create directory \"" + patchdir + "\"! Exiting.")
misc.verbose_output("ok\n")
i = 0
for uri in patches:
filename = os.path.basename(uri)
absolute_path = patchdir + '/' + filename
if not os.path.isfile(absolute_path):
misc.fetch_file(uri, patchdir, filename)
misc.verbose_output("Comparing checksums for patch " + str(i) + "... ")
if md5s == None:
misc.verbose_output("skipping (not available)\n")
else:
if misc.get_file_hash(absolute_path) == md5s[i]:
misc.verbose_output("ok (matches)\n")
else:
misc.verbose_output("Mismatch! Fetching again...\n")
misc.remove_file_or_dir(absolute_path)
misc.fetch_file(uri, patchdir, filename)
misc.verbose_output("Comparing checksums once more... ")
if misc.get_file_hash(absolute_path) == md5s[i]:
misc.verbose_output("ok (matches)\n")
else:
misc.die("Mismatch again! Bailing out...")
i = i + 1
def build_package(phase, package):
"""Configure, make or install (phase) a program (package)."""
if phase == "configure":
activity = "Configuring"
env = phase
elif phase == "make":
activity = "Building"
env = phase
elif phase == "install":
activity = "Installing"
env = "make"
else:
misc.die("\nError: Unknown build phase \"" + phase + "\"! Exiting.")
env = misc.prepare_env(env, package)
print(activity + " \"" + package + "\"... ", end='', flush=True)
wrkdir = misc.get_wrkdir(package)
for cmd in conf.get_config_value(phase + "_cmds", package).split(', '):
r = misc.do_shell_cmd(cmd, wrkdir, env)
if r != 0:
misc.die("\nError: " + activity + " failed for package \"" + package + "\"! Exiting.")
print("ok")
def ensure_missing_packages():
"""Build and install missing packages."""
print()
for p in packages_missing:
ensure_clean_wrkdir(p)
if p == "uname":
special.prepare_uname_source()
if p in conf.config['patches']:
ensure_patchfiles_present(p)
if p == "bmake":
special.prepare_bmake_patch()
misc.patch_source(p)
if p in conf.config['configure_cmds']:
build_package('configure', p)
if p in conf.config['make_cmds']:
build_package('make', p)
build_package('install', p)
##########
# Main #
##########
conf.assert_conf_file_present()
conf.config = configparser.ConfigParser()
conf.config.read(globalvars.CONFNAME)
conf.assert_config_valid()
globalvars.OSNAME = platform.get_osname()
if not globalvars.OSNAME in globalvars.OPERATING_SYSTEMS_SUPPORTED:
misc.die("Unsupported OS: \"" + globalvars.OSNAME + "\"!")
globalvars.OSRELEASE = platform.get_os_release()
globalvars.OSVERSION = platform.get_os_version()
globalvars.OSMAJOR = platform.get_os_major()
globalvars.OSARCH = platform.get_os_arch()
globalvars.STDARCH = platform.get_stdarch()
globalvars.TGT_TRIPLE = platform.assemble_triple()
print("System: Set for " + globalvars.TGT_TRIPLE + ".")
subst.populate_substitution_map()
misc.assert_external_binaries_available()
misc.ensure_fs_hierarchy('rjail')
misc.ensure_fs_hierarchy('rbuild')
print("Filesystem: Hierarchy is in place.")
packages_present, packages_missing = misc.detect_packages()
print_info()
if len(packages_missing) > 0:
a = 0
while (a != "N" and a != "Y"):
a = input("\n\nBuild missing packages now? (Y/N) ").upper()
if (a == "N"):
exit(0)
ensure_missing_packages()
print("All done!")
| kraileth/miniraven | miniraven.py | miniraven.py | py | 10,771 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "globalvars.OSNAME",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "globalvars.OSVERSION",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "globalvars.OSRELEASE",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_na... |
74436895785 | # Author Chaudhary Hamdan
from functools import reduce
def factors(n):
return set(reduce(list.__add__,
([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0)))
t = int(input())
for _ in range(t):
n,k = [int(x) for x in input().split()]
if k == 0:
print(0)
continue
if n == 0:
print(0)
continue
setbit = 1<<(k-1)
c = 0
fact = list(factors(n))
for a in fact:
if a & setbit:
c += 1
print(c)
| hamdan-codes/codechef-unrated-contests | Codingo21_CODINGO01.py | Codingo21_CODINGO01.py | py | 536 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "functools.reduce",
"line_number": 6,
"usage_type": "call"
}
] |
16968865127 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import forms
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import UserChangeForm, UserCreationForm
from django.utils.translation import ugettext_lazy as _
USERNAME_FIELD_HELP_TEXT = _(
'Required field. Length must not exceed 30 characters. Only letters, digits and symbols @/./+/-/_ are accepted.'
)
USERNAME_LENGTH_VALIDATION_TEXT = _('Username length must not exceed 30 characters.')
# ==============================================================================
# CustomerCreationForm
# ==============================================================================
class CustomerCreationForm(UserCreationForm):
class Meta(UserChangeForm.Meta):
model = get_user_model()
def __init__(self, *args, **kwargs):
super(UserCreationForm, self).__init__(*args, **kwargs)
self.fields['username'].help_text = USERNAME_FIELD_HELP_TEXT
def clean(self):
if 'username' in self.cleaned_data:
username = self.cleaned_data['username']
if username and len(username) > 30:
raise forms.ValidationError(USERNAME_LENGTH_VALIDATION_TEXT)
def save(self, commit=True):
self.instance.is_staff = True
return super(CustomerCreationForm, self).save(commit=False)
# ==============================================================================
# CustomerChangeForm
# ==============================================================================
class CustomerChangeForm(UserChangeForm):
email = forms.EmailField(required=False)
class Meta(UserChangeForm.Meta):
model = get_user_model()
def __init__(self, *args, **kwargs):
initial = kwargs.get('initial', {})
instance = kwargs.get('instance')
initial['email'] = instance.email or ''
super(CustomerChangeForm, self).__init__(initial=initial, *args, **kwargs)
self.fields['username'].help_text = USERNAME_FIELD_HELP_TEXT
def clean(self):
if 'username' in self.cleaned_data:
username = self.cleaned_data['username']
if username and len(username) > 30:
raise forms.ValidationError(USERNAME_LENGTH_VALIDATION_TEXT)
def clean_email(self):
email = self.cleaned_data.get('email').strip()
if not email:
# nullify empty email field in order to prevent unique index collisions
return None
customers = get_user_model().objects.filter(email=email)
if len(customers) and (len(customers) > 1 or self.instance != customers[0]):
msg = _("A customer with the e-mail address ‘{email}’ already exists.")
raise forms.ValidationError(msg.format(email=email))
return email
def save(self, commit=False):
self.instance.email = self.cleaned_data['email']
return super(CustomerChangeForm, self).save(commit)
| infolabs/django-edw | backend/edw/admin/customer/forms.py | forms.py | py | 2,967 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.forms.UserCreationForm",
"line_number": 18,
... |
41774888432 | import time
import sys
sys.path.append("../")
from Utils_1 import Util
import pymysql
from lxml import etree
import requests
import http
from Utils_1.UA import User_Agent
import random
"""
数据来源:中华人民共和国商务部
来源地址:http://femhzs.mofcom.gov.cn/fecpmvc/pages/fem/CorpJWList_nav.pageNoLink.html?session=T&sp=1&sp=S+_t1.CORP_CDE%2C+_t1.id&sp=T&sp=S
数据描述:境外投资企业(机构)备案结果公开名录列表
目标表中文名:境外投资企业公开名录列表
目标表英文名:EXT_INV_ENTP_LST_INF
数据量:3 - 4 (万条)
作者:mcg
状态:完成
记录时间:2019.08.02
备注:对于cookie值,可以再优化。
"""
class FemhzsMofcomGov:
def __init__(self):
self.base_url = "http://femhzs.mofcom.gov.cn/fecpmvc/pages/fem/CorpJWList_nav.pageNoLink.html?" \
"session=T&sp={}&sp=S+_t1.CORP_CDE%2C+_t1.id&sp=T&sp=S"
self.headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,"
"application/signed-exchange;v=b3",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9",
"Connection": "keep-alive",
"Cookie": "JSESSIONID=ACBDC30A40FD783627A075ADB9440B4D; insert_cookie=56224592 ",
"Host": "femhzs.mofcom.gov.cn",
"Referer": "http://femhzs.mofcom.gov.cn/fecpmvc/pages/fem/CorpJWList.html",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/75.0.3770.100 Safari/537.36",
}
self.f_headers = {
"Host": "femhzs.mofcom.gov.cn",
"Connection": "keep-alive",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3",
"Referer": "http://www.mofcom.gov.cn/publicService.shtml",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9"
}
self.util = Util()
self.conn = self.util.MySQL()
self.page = 0
def insert2mysql(self, sql):
try:
self.conn.cursor().execute(sql)
self.conn.commit()
print("插入成功")
except pymysql.err.IntegrityError:
print("插入失败,数据重复")
self.conn.rollback()
except pymysql.err.ProgrammingError:
print("数据异常,已回滚")
self.conn.rollback()
def run(self):
first_req = requests.get(url="http://femhzs.mofcom.gov.cn/fecpmvc/pages/fem/CorpJWList.html",
headers=self.f_headers)
cookies = first_req.headers["Set-Cookie"].replace(" Path=/fecpmvc,", "").replace("; path=/", "")
try:
page = etree.HTML(first_req.text).xpath(
"//em[@class=\"m-page-total-num\"]/text()")[0]
except TimeoutError:
time.sleep(10)
page = etree.HTML(first_req.text).xpath(
"//em[@class=\"m-page-total-num\"]/text()")[0]
except http.client.RemoteDisconnected:
time.sleep(10)
self.headers["User-Agent"] = random.choice(User_Agent)
page = etree.HTML(first_req.text).xpath(
"//em[@class=\"m-page-total-num\"]/text()")[0]
print("共有:{} 页".format(page))
for i in range(1, int(page)):
print(i)
data = {
"session": "T",
"sp": i,
"sp": "S _t1.CORP_CDE, _t1.id",
"sp": "T",
"sp": "S",
}
self.headers["Cookie"] = cookies
url = self.base_url.format(i)
try:
res = requests.get(url=url, headers=self.headers, data=data, timeout=15)
except TimeoutError:
time.sleep(10)
res = requests.get(url=url, headers=self.headers, data=data, timeout=15)
time.sleep(2)
if res.status_code == 200:
print("请求成功,开始解析")
html = etree.HTML(res.text)
for tr in html.xpath("//table[@class=\"m-table\"]/tbody/tr"):
company_name = tr.xpath("./td[1]/text()")[0].strip()
investor_name = tr.xpath("./td[2]/text()")[0].strip()
country = tr.xpath("./td[3]/text()")[0].strip()
# 公司名称编码作为id
md5_company = self.util.MD5(company_name)
# 获取当前时间
otherStyleTime = self.util.get_now_time()
sql = "insert into EXT_INV_ENTP_LST_INF(ID, OVS_INV_ENTP_NM, OVS_INV_NM, INV_CNR, INPT_DT)values('%s','%s','%s','%s','%s')" % (md5_company, company_name, investor_name, country, otherStyleTime)
self.insert2mysql(sql)
else:
print("请求失败, HTTP Code:{}".format(res.status_code))
if __name__ == '__main__':
while True:
f = FemhzsMofcomGov()
f.run()
time.sleep(86400)
| 921016124/Spiders | module/对外投资/femhzs_mofcom_gov.py | femhzs_mofcom_gov.py | py | 5,506 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "Utils_1.Util",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "pymysql.err",
"line_numb... |
2050897159 | from openpyxl import load_workbook, Workbook
from django.core.management import BaseCommand
from django.db.utils import IntegrityError
from nomenclature.models import *
SERVICE_TYPES = [
'Not defined',
'ПРОФ',
'Лабораторное исследование',
'Коммерческий профиль',
'Услуга'
]
class Command(BaseCommand):
def handle(self, *args, **kwargs):
ServiceType.objects.all().delete()
for type in SERVICE_TYPES:
new_type = ServiceType(name=type)
new_type.save()
Group.objects.all().delete()
SubGroup.objects.all().delete()
wb = load_workbook('nomenclature/data/Список групп и подгрупп.xlsx', read_only=True)
first_sheet = wb.worksheets[0]
groups = {}
for row in first_sheet.rows:
if str(row[0].value) not in groups.keys():
groups[str(row[0].value)] = {'name':str(row[1].value), 'subgroups':[]}
groups[str(row[0].value)]['subgroups'].append({'number':str(row[2].value), 'name':str(row[3].value)})
continue
groups[str(row[0].value)]['subgroups'].append({'number':str(row[2].value), 'name':str(row[3].value)})
for group in groups.keys():
new_group = Group(number=group, name=groups[group]['name'])
new_group.save()
for sg in groups[group]['subgroups']:
new_sg = SubGroup(number=sg['number'], name=sg['name'], group=new_group)
new_sg.save()
new_group = Group(number='99', name='не определена!')
new_group.save()
new_sg = SubGroup(number='99', name='не определена!', group=new_group)
new_sg.save()
Service.objects.all().delete()
wb = load_workbook('nomenclature/data/nomenclature.xlsx', read_only=True)
first_sheet = wb.worksheets[0]
for row in first_sheet:
if row[0].value is not None:
try:
gr = Group.objects.get(number=str(row[0].value))
except:
print(f'группа {row[0].value} не найдена, для теста {row[7].value}')
else:
try:
gr = Group.objects.get(number='99')
except:
print(f'группа 99 не найдена, для теста {row[7].value}')
if row[1].value is not None:
try:
sg = SubGroup.objects.get(number=str(row[1].value), group=gr)
except:
print(f'подгруппа {row[1].value} не найдена, для теста {row[7].value}')
else:
try:
sg = SubGroup.objects.get(number='99')
except:
print(f'подгруппа 99 не найдена, для теста {row[7].value}')
if row[3].value is not None:
type = ServiceType.objects.get(name=row[3].value)
else:
type = ServiceType.objects.get(name='Not defined')
if type.name == 'Коммерческий профиль':
new_record = Profile()
else:
new_record = Service()
try:
new_record.subgroup=sg
new_record.salesability=True
new_record.clients_group=row[2].value
new_record.type=type
new_record.classifier_1с=row[4].value
new_record.tcle_code=row[5].value
new_record.tcle_abbreviation=row[6].value
new_record.code=row[7].value
new_record.name=row[8].value
new_record.blanks=row[9].value
new_record.biomaterials=row[10].value
new_record.container=row[11].value
new_record.result_type=row[12].value
new_record.due_date=row[13].value
new_record.save()
except IntegrityError:
print(f'код {row[7].value} - не уникальный, второй раз не добавлен')
profiles = Profile.objects.all()
for profile in profiles:
profile.services.clear()
wb = load_workbook('nomenclature/data/profile.xlsx', read_only=True)
first_sheet = wb.worksheets[0]
for row in first_sheet.rows:
try:
profile = Profile.objects.get(code=row[0].value)
except:
print(f'Профиля {row[0].value} нет в номенклатуре')
try:
service = Service.objects.get(code=row[1].value)
except:
print(f'Услуги {row[1].value} нет в номенклатуре')
continue
profile.services.add(service)
wb = load_workbook('nomenclature/data/test_set.xlsx', read_only=True)
first_sheet = wb.worksheets[0]
for row in first_sheet.rows:
test_set = TestSet(key_code=row[1].value, name=row[2].value, department=row[3].value, addendum_key=row[4].value)
test_set.save()
try:
service = Service.objects.get(code=row[0].value)
except:
print(f'Услуги {row[0].value} - нет в номенклатуре')
service.test_set=test_set
service.save()
wb = load_workbook('nomenclature/data/test.xlsx', read_only=True)
first_sheet = wb.worksheets[0]
for num, row in enumerate(first_sheet.rows):
check_test = Test.objects.filter(keycode=row[0].value)
if not check_test:
test = Test(
keycode=row[0].value,
name=row[1].value,
short_name=row[1].value[:50],
result_type=row[4].value,
decimal_places=5,
kdl_test_key=row[2].value,
measure_unit=row[3].value,
)
test.save()
if row[10].value is not None:
test = Test.objects.get(keycode=row[0].value)
new_reference = Reference(
test=test,
position=int(row[10].value[:-4])
)
if row[5].value is None:
new_reference.sex = 'Любой'
if row[6].value is not None:
if '.' in row[6].value:
age = row[6].value.split('.')
yy = '00' if not age[0] else age[0]
mm = age[1][:2]
dd = age[1][2:]
age_from = f'{yy}:{mm}:{dd}'
new_reference.age_from = age_from
else:
new_reference.age_from = f'{row[6].value}:00:00'
if row[7].value is not None:
if '.' in row[7].value:
age = row[7].value.split('.')
yy = '00' if not age[0] else age[0]
mm = age[1][:2]
dd = age[1][2:]
age_to = f'{yy}:{mm}:{dd}'
new_reference.age_to = age_to
else:
new_reference.age_to = f'{row[7].value}:00:00'
if row[8].value is not None:
new_reference.lower_normal_value = row[8].value
if row[9].value is not None:
new_reference.upper_normal_value = row[9].value
if row[13].value is not None:
new_reference.normal_text = row[13].value
if row[11].value is not None:
new_reference.normal_text = row[13].value
if row[11].value is not None:
new_reference.clinic_interpretation_key = row[11].value
if row[12].value is not None:
new_reference.clinic_interpretation_text = row[12].value
new_reference.save()
wb = load_workbook('nomenclature/data/med.xlsx', read_only=True)
first_sheet = wb.worksheets[0]
for row in first_sheet.rows:
try:
service = Service.objects.get(code=row[0].value)
except:
print(f'Услуги {row[0].value} - нет в номенклатуре')
continue
new_record = MadicineData(
service=service,
alter_name_KC=row[1].value,
alter_name=row[2].value,
note=row[3].value,
volume_pp=row[4].value,
container_pp=row[5].value,
guide_pp=row[6].value,
transport_conditions=row[7].value,
term_assign=row[8].value,
description=row[9].value,
method=row[10].value,
factors=row[11].value,
preparation=row[12].value,
)
new_record.save()
| Sin93/lab | nomenclature/management/commands/import.py | import.py | py | 9,200 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.core.management.BaseCommand",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "openpyxl.load_workbook",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "openpyxl.load_workbook",
"line_number": 53,
"usage_type": "call"
},
{
"... |
36315576627 | from __future__ import print_function
import sys
import json
import collections
import getopt
g_debug = False
g_indent = 4
def debug(s):
if g_debug:
print("DEBUG> " + s)
def usage(s):
sys.stderr.write("Usage: %s [-t <indent>] [-d] <[-f <json file>] | txt>\n"
% s)
sys.stderr.write("\t-t: --indent\n")
sys.stderr.write("\t-d: --debug\n")
sys.stderr.write("\t-f: --file\n")
sys.stderr.write("e.g.\n")
sys.stderr.write(" %s -t 8 -d -f foo.json\n" % s)
sys.stderr.write(" %s --indent=4 --debug -f foo.json\n" % s)
sys.stderr.write(" %s '{\"A\": 123, \"B\": \"bcd\"}'\n" % s)
def main(argc, argv):
json_file = None
options, rargv = getopt.getopt(argv[1:],
":f:t:dh",
["file=", "indent=", "debug", "help"])
for opt, arg in options:
if opt in ("-d", "--debug"):
global g_debug
g_debug = True
elif opt in ("-t", "--indent"):
global g_indent
g_indent = int(arg)
elif opt in ("-f", "--file"):
json_file = arg
elif opt in ("-h", "--help"):
usage(argv[0])
return 1
else:
usage(argv[0])
return 1
argc = len(rargv)
if json_file is None:
if argc == 0:
usage(argv[0])
return 1
txt = rargv[0]
else:
with open(json_file, 'r') as f:
txt = ''.join(f.readlines())
obj = json.loads(txt, object_pairs_hook=collections.OrderedDict)
debug(str(type(txt)))
debug(txt)
debug(str(type(obj)))
debug(str(obj))
out = json.dumps(obj, indent=g_indent)
print(out)
return 0
if __name__ == '__main__':
sys.exit(main(len(sys.argv), sys.argv))
| idorax/vCodeHub | sharpsword/python/jsonfmt.py | jsonfmt.py | py | 1,840 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "sys.stderr.write",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr.write",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"l... |
35936751903 | import covasim as cv
import pandas as pd
import sciris as sc
import pylab as pl
import numpy as np
from matplotlib import ticker
import datetime as dt
import matplotlib.patches as patches
import seaborn as sns
import matplotlib as mpl
from matplotlib.colors import LogNorm
# Filepaths
resultsfolder = 'sweeps'
sensfolder = 'sweepssens'
figsfolder = 'figs'
process = False
# Parameter levels
T = sc.tic()
tlevels = [0.067, 0.1, 0.15, 0.19]
vlevels = np.arange(0, 5) / 4
mlevels = np.arange(0, 4) / 4
nt, nv, nm = len(tlevels), len(vlevels), len(mlevels)
# Fonts and sizes for all figures
font_size = 26
font_family = 'Proxima Nova'
pl.rcParams['font.size'] = font_size
pl.rcParams['font.family'] = font_family
################################################################################################
# Do processing if required
################################################################################################
if process:
for thisfig in [resultsfolder,sensfolder]:
results = {'cum_infections': {}, 'r_eff': {}, 'new_infections':{}, 'cum_quarantined':{}}
for future_test_prob in tlevels:
for name in ['cum_infections', 'r_eff', 'new_infections','cum_quarantined']: results[name][future_test_prob] = {}
for venue_trace_prob in vlevels:
for name in ['cum_infections', 'r_eff', 'new_infections','cum_quarantined']: results[name][future_test_prob][venue_trace_prob] = []
for mask_uptake in mlevels:
print(f'mask_uptake: {mask_uptake}, venue_trace_prob: {venue_trace_prob}, future_test_prob: {future_test_prob}')
msim = sc.loadobj(f'{thisfig}/nsw_tracingsweeps_T{int(future_test_prob * 100)}_M{int(mask_uptake * 100)}_V{int(venue_trace_prob * 100)}.obj')
results['cum_quarantined'][future_test_prob][venue_trace_prob].append(msim.results['cum_quarantined'].values[-1]-msim.results['cum_quarantined'].values[244])
results['cum_infections'][future_test_prob][venue_trace_prob].append(msim.results['cum_infections'].values[-1]-msim.results['cum_infections'].values[244])
results['r_eff'][future_test_prob][venue_trace_prob].append(msim.results['r_eff'].values[-1])
results['new_infections'][future_test_prob][venue_trace_prob].append(msim.results['new_infections'].values)
sc.saveobj(f'{thisfig}/nsw_sweep_results.obj', results)
#else:
# results = sc.loadobj(f'{resultsfolder}/nsw_sweep_results.obj')
################################################################################################################
# Figure 2 and S2: grids of new infections
################################################################################################################
for thisfig in [resultsfolder, sensfolder]:
# Fonts and sizes
fig = pl.figure(figsize=(24,16))
results = sc.loadobj(f'{thisfig}/nsw_sweep_results.obj')
# Subplot sizes
xgapl = 0.05
xgapm = 0.017
xgapr = 0.05
ygapb = 0.05
ygapm = 0.017
ygapt = 0.05
nrows = nt
ncols = nv
dx = (1-(ncols-1)*xgapm-xgapl-xgapr)/ncols
dy = (1-(nrows-1)*ygapm-ygapb-ygapt)/nrows
nplots = nrows*ncols
ax = {}
colors = pl.cm.GnBu(np.array([0.4,0.6,0.8,1.]))
labels = ['0% masks', '25% masks', '50% masks', '75% masks']
epsx = 0.003
epsy = 0.008
llpad = 0.01
rlpad = 0.005
if thisfig==resultsfolder:
pl.figtext(xgapl+dx*nv+xgapm*(nv-1)+rlpad, ygapb+(ygapm+dy)*0+epsy, ' 90% testing ', rotation=90, fontsize=30, fontweight='bold', bbox={'edgecolor':'none', 'facecolor':'silver', 'alpha':0.5, 'pad':4})
pl.figtext(xgapl+dx*nv+xgapm*(nv-1)+rlpad, ygapb+(ygapm+dy)*1+epsy, ' 80% testing ', rotation=90, fontsize=30, fontweight='bold', bbox={'edgecolor':'none', 'facecolor':'silver', 'alpha':0.5, 'pad':4})
pl.figtext(xgapl+dx*nv+xgapm*(nv-1)+rlpad, ygapb+(ygapm+dy)*2+epsy, ' 65% testing ', rotation=90, fontsize=30, fontweight='bold', bbox={'edgecolor':'none', 'facecolor':'silver', 'alpha':0.5, 'pad':4})
pl.figtext(xgapl+dx*nv+xgapm*(nv-1)+rlpad, ygapb+(ygapm+dy)*3+epsy, ' 50% testing ', rotation=90, fontsize=30, fontweight='bold', bbox={'edgecolor':'none', 'facecolor':'silver', 'alpha':0.5, 'pad':4})
elif thisfig==sensfolder:
pl.figtext(xgapl+dx*nv+xgapm*(nv-1)+rlpad, ygapb+(ygapm+dy)*0+epsy, ' 90% symp. testing \n 60% contact testing ', rotation=90, fontsize=26, fontweight='bold', bbox={'edgecolor':'none', 'facecolor':'silver', 'alpha':0.5, 'pad':4})
pl.figtext(xgapl+dx*nv+xgapm*(nv-1)+rlpad, ygapb+(ygapm+dy)*1+epsy, ' 80% symp. testing \n 50% contact testing ', rotation=90, fontsize=26, fontweight='bold', bbox={'edgecolor':'none', 'facecolor':'silver', 'alpha':0.5, 'pad':4})
pl.figtext(xgapl+dx*nv+xgapm*(nv-1)+rlpad, ygapb+(ygapm+dy)*2+epsy, ' 65% symp. testing \n 40% contact testing ', rotation=90, fontsize=26, fontweight='bold', bbox={'edgecolor':'none', 'facecolor':'silver', 'alpha':0.5, 'pad':4})
pl.figtext(xgapl+dx*nv+xgapm*(nv-1)+rlpad, ygapb+(ygapm+dy)*3+epsy, ' 50% symp. testing \n 30% contact testing ', rotation=90, fontsize=26, fontweight='bold', bbox={'edgecolor':'none', 'facecolor':'silver', 'alpha':0.5, 'pad':4})
pl.figtext(xgapl+(dx+xgapm)*0+epsx, ygapb+dy*nm+ygapm*(nm-1)+llpad, ' 0% tracing ', fontsize=30, fontweight='bold', bbox={'edgecolor':'none', 'facecolor':'silver', 'alpha':0.5, 'pad':4})
pl.figtext(xgapl+(dx+xgapm)*1+epsx, ygapb+dy*nm+ygapm*(nm-1)+llpad, ' 25% tracing ', fontsize=30, fontweight='bold', bbox={'edgecolor':'none', 'facecolor':'silver', 'alpha':0.5, 'pad':4})
pl.figtext(xgapl+(dx+xgapm)*2+epsx, ygapb+dy*nm+ygapm*(nm-1)+llpad, ' 50% tracing ', fontsize=30, fontweight='bold', bbox={'edgecolor':'none', 'facecolor':'silver', 'alpha':0.5, 'pad':4})
pl.figtext(xgapl+(dx+xgapm)*3+epsx, ygapb+dy*nm+ygapm*(nm-1)+llpad, ' 75% tracing ', fontsize=30, fontweight='bold', bbox={'edgecolor':'none', 'facecolor':'silver', 'alpha':0.5, 'pad':4})
pl.figtext(xgapl+(dx+xgapm)*4+epsx, ygapb+dy*nm+ygapm*(nm-1)+llpad, ' 100% tracing ', fontsize=30, fontweight='bold', bbox={'edgecolor':'none', 'facecolor':'silver', 'alpha':0.5, 'pad':4})
# Extract plot values
def plinf(pn, what='new_infections'):
# Get series for this plot number
t = list(results['new_infections'].keys())[(nplots-1-pn)//nv]
v = list(results['new_infections'][t].keys())[pn%nv]
if what =='new_infections':
#return np.array(([results['new_infections'][t][v][mm][214:] for mm in range(nm)]))
return np.array([[results['new_infections'][t][v][mm][200+i:214+i].sum() / 14 for i in range(306-214)] for mm in range(nm)])
elif what == 'cum_infections':
return results['cum_infections'][t][v]
@ticker.FuncFormatter
def date_formatter(x, pos):
return (cv.date('2020-09-30') + dt.timedelta(days=x)).strftime('%d-%b')
for pn in range(nplots):
ax[pn] = pl.axes([xgapl+(dx+xgapm)*(pn%ncols), ygapb+(ygapm+dy)*(pn//ncols), dx, dy])
data = plinf(pn)
for mi,mval in enumerate(mlevels):
ax[pn].plot(range(len(data[mi,:])), data[mi,:], '-', lw=4, c=colors[mi], label=labels[mi], alpha=1.0)
val = sc.sigfig(plinf(pn, what='cum_infections')[mi],3) if plinf(pn, what='cum_infections')[mi]<100 else sc.sigfig(plinf(pn, what='cum_infections')[mi],2)
ax[pn].text(0.1, 180-mi*15, val.rjust(6), fontsize=20, family='monospace', color=colors[mi])
ax[pn].set_ylim(0, 200)
ax[pn].xaxis.set_major_formatter(date_formatter)
if pn==4: pl.legend(loc='upper right', frameon=False, fontsize=20)
if pn not in [0,5,10,15]:
ax[pn].set_yticklabels([])
else:
ax[pn].set_ylabel('New infections')
if pn not in range(nv):
ax[pn].set_xticklabels([])
else:
xmin, xmax = ax[pn].get_xlim()
ax[pn].set_xticks(pl.arange(xmin+5, xmax, 40))
if thisfig==resultsfolder: figname = figsfolder+'/fig2_grid.png'
elif thisfig==sensfolder: figname = figsfolder+'/figS2_grid.png'
cv.savefig(figname, dpi=100)
#d = {'testing': [0.067]*nv*nm+[0.1]*nv*nm+[0.15]*nv*nm+[0.19]*nv*nm, 'tracing': [0.0]*nm+[0.25]*nm+[0.5]*nm+[0.75]*nm+[1.0]*nm+[0.0]*nm+[0.25]*nm+[0.5]*nm+[0.75]*nm+[1.0]*nm+[0.0]*nm+[0.25]*nm+[0.5]*nm+[0.75]*nm+[1.0]*nm+[0.0]*nm+[0.25]*nm+[0.5]*nm+[0.75]*nm+[1.0]*nm, 'masks': [0.0,0.25,0.5,0.75]*nt*nv}
#d['val'] = []
#for t in tlevels:
# for v in vlevels:
# d['val'].extend(sc.sigfig(results['cum_infections'][t][v],3))
#import pandas as pd
#df = pd.DataFrame(d)
#df.to_excel('sweepresults.xlsx')
################################################################################################################
# Figure 3: bar plot of cumulative infections
################################################################################################################
mainres = sc.loadobj(f'{resultsfolder}/nsw_sweep_results.obj')
sensres = sc.loadobj(f'{sensfolder}/nsw_sweep_results.obj')
# Subplot sizes
xgapl = 0.07
xgapm = 0.02
xgapr = 0.02
ygapb = 0.1
ygapm = 0.02
ygapt = 0.08
nrows = 1
ncols = 2
dx = (1-(ncols-1)*xgapm-xgapl-xgapr)/ncols
dy = (1-(nrows-1)*ygapm-ygapb-ygapt)/nrows
nplots = nrows*ncols
ax = {}
colors = pl.cm.GnBu(np.array([0.4,0.6,0.8,1.]))
mlabels = ['0% masks', '25% masks', '50% masks', '75% masks']
tlabels = ['50%', '65%', '80%', '90%']
fig = pl.figure(figsize=(24,8*nrows))
x = np.arange(len(tlabels))
width = 0.2 # the width of the bars
# Extract data
datatoplot = {}
datatoplot[0] = np.array([[mainres['cum_infections'][t][1.0][mi] for t in tlevels] for mi in range(nm)])
datatoplot[1] = np.array([[sensres['cum_infections'][t][1.0][mi] for t in tlevels] for mi in range(nm)])
#datatoplot[2] = np.array([[mainres['cum_quarantined'][t][1.0][mi] for t in tlevels] for mi in range(nm)])
#datatoplot[3] = np.array([[sensres['cum_quarantined'][t][1.0][mi] for t in tlevels] for mi in range(nm)])
# Headings
pl.figtext(xgapl+0.001, ygapb+dy*nrows+ygapm*(nrows-1)+0.01, ' Asymptomatic testing equal to symptomatic testing ',
fontsize=30, fontweight='bold', bbox={'edgecolor':'none', 'facecolor':'silver', 'alpha':0.5, 'pad':4})
pl.figtext(xgapl+xgapm+dx+0.001, ygapb+dy*nrows+ygapm*(nrows-1)+0.01, ' Asymptomatic testing lower than symptomatic testing ',
fontsize=30, fontweight='bold', bbox={'edgecolor':'none', 'facecolor':'silver', 'alpha':0.5, 'pad':4})
# Make plots
for pn in range(nplots):
ax[pn] = pl.axes([xgapl+(dx+xgapm)*(pn%ncols), ygapb+(ygapm+dy)*(pn//ncols), dx, dy])
data = datatoplot[pn]
for mi,mval in enumerate(mlevels):
ax[pn].bar(x+width*(mval*4-1.5), data[mi,:], width, color=colors[mi], label=mlabels[mi], alpha=1.0)
ax[pn].set_xticks(x)
ax[pn].set_xticklabels(tlabels)
if pn <2:
ax[pn].set_ylim(0, 20e3)
ax[pn].set_xlabel('Symptomatic testing rate')
else:
ax[pn].set_ylim(0, 250e3)
sc.boxoff()
if pn in [0,2]:
ax[pn].set_ylabel('Cumulative infections')
if pn==1:
pl.legend(loc='upper right', frameon=False, fontsize=20)
ax[pn].set_yticklabels([])
cv.savefig(f'{figsfolder}/fig3_bars.png', dpi=100)
################################################################################################################
# Figure X: trade-off heatmaps
################################################################################################################
# Subplot sizes
xgapl = 0.07
xgapm = 0.02
xgapr = 0.02
ygapb = 0.3
ygapm = 0.02
ygapt = 0.08
nrows = 1
ncols = nv
dx = (1-(ncols-1)*xgapm-xgapl-xgapr)/ncols
dy = (1-(nrows-1)*ygapm-ygapb-ygapt)/nrows
nplots = nrows*ncols
ax = {}
# Create figure
fig = pl.figure(figsize=(24,10))
colors = pl.cm.GnBu(np.array([0.4,0.6,0.8,1.]))
mlabels = ['0% masks', '25% masks', '50% masks', '75% masks']
tlabels = ['50%', '65%', '80%', '90%']
vlabels = ['0% tracing', '25% tracing', '50% tracing', '75% tracing', '100% tracing']
M, T = np.meshgrid(mlevels, tlevels)
mt = M.reshape(nt*nm,)
tt = T.reshape(nt*nm,)
cmin, cmax = 0., 5.
lev_exp = np.arange(0., 5.1, 0.1)
levs = np.power(10, lev_exp)
# Load objects
for pn,vl in enumerate(vlevels):
# Load in scenario multisims
zi1 = np.array([mainres['cum_infections'][ti][vl] for ti in tlevels])
z = zi1.reshape(nt*nm,)
# Set axis and plot
ax[pn] = pl.axes([xgapl+(dx+xgapm)*(pn%ncols), ygapb+(ygapm+dy)*(pn//ncols), dx, dy])
im = ax[pn].imshow(zi1, cmap='Oranges')
# Annotate
for i in range(nm):
for j in range(nt):
c = sc.sigfig(zi1[j, i],3)
ax[pn].text(i, j, str(c), va='center', ha='center')
# Axis and plot labelling
if pn == 0:
ax[pn].set_ylabel('Symptomatic testing rate', fontsize=24, labelpad=20)
ax[pn].set_xlabel('Mask uptake')
ax[pn].set_title(vlabels[pn])
ax[pn*100] = pl.axes([xgapl+(dx+xgapm)*(pn%ncols), 0.05, dx, 0.1])
cbar = pl.colorbar(im, cax=ax[pn*100])
cv.savefig(f'{figsfolder}/figX_heatmaps.png', dpi=100)
sc.toc(T)
| optimamodel/covid_nsw | 1_submission/plot_nsw_sweeps.py | plot_nsw_sweeps.py | py | 13,309 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "sciris.tic",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pylab.rcParams",
"line_numbe... |
17793223864 | from dgl.nn.pytorch.conv import SAGEConv
import torch
import torch.nn as nn
import torch.nn.functional as F
import time
import numpy as np
from dgl import DGLGraph
from dgl.data import citation_graph as citegrh
import networkx as nx
class GraphSAGE(nn.Module):
def __init__(self,
in_feats,
n_hidden,
n_classes,
n_layers,
activation,
dropout,
aggregator_type):
super(GraphSAGE, self).__init__()
self.layers = nn.ModuleList()
self.dropout = nn.Dropout(dropout)
self.activation = activation
# input layer
self.layers.append(SAGEConv(in_feats, n_hidden, aggregator_type))
# hidden layers
for i in range(n_layers - 1):
self.layers.append(SAGEConv(n_hidden, n_hidden, aggregator_type))
# output layer
self.layers.append(SAGEConv(n_hidden, n_classes, aggregator_type)) # activation None
def forward(self, graph, inputs):
h = self.dropout(inputs)
for l, layer in enumerate(self.layers):
h = layer(graph, h)
if l != len(self.layers) - 1:
h = self.activation(h)
h = self.dropout(h)
return h
def load_cora_data():
data = citegrh.load_cora()
features = torch.FloatTensor(data.features)
labels = torch.LongTensor(data.labels)
mask = torch.BoolTensor(data.train_mask)
g = DGLGraph(data.graph)
n_classes = data.num_classes
return n_classes, g, features, labels, mask
n_classes, g, features, labels, mask = load_cora_data()
# create GraphSAGE model
model = GraphSAGE(in_feats=features.size()[1],
n_hidden=16,
n_classes=n_classes,
n_layers=1,
activation=F.relu,
dropout=0.5,
aggregator_type='gcn')
# use optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
# initialize graph
dur = []
for epoch in range(50):
model.train()
if epoch >= 3:
t0 = time.time()
logits = model(g, features)
loss = F.cross_entropy(logits[mask], labels[mask])
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch >= 3:
dur.append(time.time() - t0)
print("Epoch {:05d} | Loss {:.4f} | Time(s) {:.4f}".format(epoch, loss.item()), np.mean(dur))
| Gabtakt/GNN-lab | GraphSAGE.py | GraphSAGE.py | py | 2,428 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "torch.nn.ModuleList",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"lin... |
42222604118 | """ new visualizations 2020
Revision ID: 437ffc36a821
Revises: d73f1a3bccf3
Create Date: 2020-07-16 19:48:01.228630
"""
from alembic import op
from sqlalchemy import String, Integer
from sqlalchemy.sql import table, column, text
from caipirinha.migration_utils import get_enable_disable_fk_command
# revision identifiers, used by Alembic.
revision = '437ffc36a821'
down_revision = 'd73f1a3bccf3'
branch_labels = None
depends_on = None
def insert_visualization_type():
tb = table(
'visualization_type',
column('id', Integer),
column('name', String),
column('help', String),
column('icon', String))
all_ops = [
(130, 'indicator', 'Gauge', 'fa-chart'),
(131, 'markdown', 'Markdown text', 'fa-chart'),
(132, 'word-cloud', 'Word cloud', 'fa-chart'),
(133, 'heatmap', 'Heatmap', 'fa-chart'),
(134, 'bubble-chart', 'Bubble chart', 'fa-chart'),
(135, 'force-direct', 'Network graphs', 'fa-chart'),
(136, 'iframe', 'HTML iframe', 'fa-chart'),
(137, 'treemap', 'Treemap', 'fa-chart'),
]
rows = [dict(zip([c.name for c in tb.columns], operation)) for operation in
all_ops]
op.bulk_insert(tb, rows)
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
try:
op.execute(text('BEGIN'))
insert_visualization_type()
op.execute(text('COMMIT'))
except:
op.execute(text('ROLLBACK'))
raise
# noinspection PyBroadException
def downgrade():
try:
op.execute(text('BEGIN'))
op.execute(text(get_enable_disable_fk_command(False)))
op.execute(
text("DELETE FROM visualization WHERE type_id IN (123, 124)"))
op.execute(
text("DELETE FROM visualization_type WHERE id IN (123, 124)"))
op.execute(text(get_enable_disable_fk_command(True)))
op.execute(text('COMMIT'))
except:
op.execute(text('ROLLBACK'))
raise
| eubr-bigsea/caipirinha | migrations/versions/437ffc36a821_new_visualizations_2020.py | 437ffc36a821_new_visualizations_2020.py | py | 2,171 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "sqlalchemy.sql.table",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.sql.column",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 23,
"usage_type": "argument"
},
{
"api_name": "sql... |
27702917459 | from re import L
from flask import Flask
from flask import jsonify
from flask import request
from flask_restful import Api, Resource, reqparse
import json
import sys
from get_details import get_name
from process_swipe import process_swipe
import requests
import random
from eventlet import wsgi
import eventlet
from redis_instance import get_instance
from RecommendationEngine import get_swipe_stack
from mysql_connection import get_cursor
import traceback
import pymongo
mongod = pymongo.MongoClient("")
db = mongod["tat"]
collection = db["sessions"]
r = get_instance()
# 1 day
EXPIRATION_TIME = 86400
like_post_args = reqparse.RequestParser()
like_post_args.add_argument(
"foodid", type=int, help="The ID of the food item swiped on")
like_post_args.add_argument("userid", type=str, help="Your UserID")
like_post_args.add_argument("restuarantid", type=int, help="The restaurants ID UserID")
like_post_args.add_argument("authtoken", type=str, help="Authorisation token")
like_post_args.add_argument(
"islike", type=bool, help="If the like was like / dislike")
like_post_args.add_argument(
"isfavourite", type=bool, help="If it was a super like")
like_post_args.add_argument("isGroup", type=bool, help="If the swipe stack you're getting is for a group")
swipestack_args = reqparse.RequestParser()
swipestack_args.add_argument(
"lat", type=float, help="Lattitude of where to search recommendations"
)
swipestack_args.add_argument(
"lng", type=float, help="Longitude of where to search recommendations"
)
swipestack_args.add_argument(
"userid", type=str, help="The userID"
)
swipestack_args.add_argument("authtoken", type=str, help="Authorisation token")
swipestack_args.add_argument("code", type=int, help="Room code")
swipestack_args.add_argument("isGroup", help="If the swipe stack you're getting is for a group")
swipestack_args.add_argument(
"distance", type=float, help="Radius of the circle to search within"
)
app = Flask(__name__)
api = Api(app)
class RecommenderController(Resource):
#get [/swipestack]
def post(self):
args = swipestack_args.parse_args()
print(args)
payload = {"authtoken": args.authtoken, "userid": args.userid}
r = requests.post(
'http://devapi.trackandtaste.com/user/authcheck', json=payload)
if r.status_code != 200:
return '', r.status_code
try:
data = get_swipe_stack(args.lat, args.lng, args.userid, args.distance, args.isGroup == "True", str(args.code))
except Exception as e :
print(e)
print(traceback.format_exc())
return '', 404 # We couldn't find any restaurants
return json.loads(data), 200
class SwipeController(Resource):
# post [/swipe]
def post(self):
args = like_post_args.parse_args()
payload = {"authtoken": args.authtoken, "userid": args.userid}
res = requests.post(
'http://devapi.trackandtaste.com/user/authcheck', json=payload)
if res.status_code != 200:
return '', r.status_code
try:
process_swipe(args.userid, args.foodid,
args.islike, args.isfavourite)
# If the swipe doesn't come from the group page, cache it
# Else caching is handled by the websocket server and is saved in mongo
if not args.isGroup:
print(args)
r.lpush(f"Recommendations-{args.userid}", args.foodid)
r.expire(f"Recommendations-{args.userid}", 7200)
if args.islike or args.isfavourite:
r.lpush(f"Likes-{args.userid}", f"{args.foodid},{args.restuarantid}")
r.expire(f"Likes-{args.userid}", 7200)
except Exception as e:
print(e)
print(traceback.format_exc())
# Food item not found
return '', 404
return '', 201
class ItemController(Resource):
#get [/likeditems]
def get(self):
args = request.args
# If we're dealing with a group
filtered = []
if(args["isGroup"] == "true"):
print("is group")
room = collection.find_one({"code" : args["room"]}, {"restaurantsLiked": 1})
userid = str(args["userID"])
restaurantid = int(args["restaurantID"])
for restaurant in room["restaurantsLiked"]:
if restaurant["restaurantID"] == restaurantid:
for user in restaurant["likes"]:
if user["userID"] == userid:
for item in user["items"]:
filtered.append(str(item))
else:
print("Nota group")
# We're dealing with individual swipe
likedItems = [i.decode("UTF-8") for i in r.lrange(f"Likes-{args['userID']}", 0, -1)]
for item in likedItems:
if item.split(',')[1] == str(args["restaurantID"]):
filtered.append(item.split(',')[0])
if len(filtered) == 0:
return '', 404
cursor = get_cursor()
cursor.execute(f"SELECT Price, FoodNameShort, FoodID FROM FoodItem WHERE FoodID IN({','.join(filtered)});")
result = cursor.fetchall()
items = []
for item in result:
items.append({"price": str(item[0]),"name": item[1], "id": item[2]})
print(items)
return items, 200
api.add_resource(SwipeController, "/swipe")
api.add_resource(RecommenderController, "/swipestack")
api.add_resource(ItemController, "/likeditems")
if __name__ == "__main__":
wsgi.server(eventlet.listen(('', 8000)), app)
| mbruty/COMP2003-2020-O | recommender/main.py | main.py | py | 5,688 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "redis_instance.get_instance",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "flask_restful.reqparse.RequestParser",
"line_number": 30,
"usage_type": "call"
},
{
... |
28891383601 | """Initializes and checks the environment needed to run pytype."""
import logging
import sys
from typing import List
from pytype.imports import typeshed
from pytype.platform_utils import path_utils
from pytype.tools import runner
def check_pytype_or_die():
if not runner.can_run("pytype", "-h"):
logging.critical(
"Cannot run pytype. Check that it is installed and in your path")
sys.exit(1)
def check_python_version(exe: List[str], required):
"""Check if exe is a python executable with the required version."""
try:
# python --version outputs to stderr for earlier versions
_, out, err = runner.BinaryRun(exe + ["--version"]).communicate() # pylint: disable=unpacking-non-sequence
version = out or err
version = version.decode("utf-8")
if version.startswith(f"Python {required}"):
return True, None
else:
return False, version.rstrip()
except OSError:
return False, None
def check_python_exe_or_die(required) -> List[str]:
"""Check if a python executable with the required version is in path."""
error = []
if sys.platform == "win32":
possible_exes = (["py", f"-{required}"], ["py3"], ["py"])
else:
possible_exes = ([f"python{required}"], ["python3"], ["python"])
for exe in possible_exes:
valid, out = check_python_version(exe, required)
if valid:
return exe
elif out:
error.append(out)
logging.critical(
"Could not find a valid python%s interpreter in path (found %s)",
required, ", ".join(sorted(set(error))))
sys.exit(1)
def initialize_typeshed_or_die():
"""Initialize a Typeshed object or die.
Returns:
An instance of Typeshed()
"""
try:
return typeshed.Typeshed()
except OSError as e:
logging.critical(str(e))
sys.exit(1)
def compute_pythonpath(filenames):
"""Compute a list of dependency paths."""
paths = set()
for f in filenames:
containing_dir = path_utils.dirname(f)
if path_utils.exists(path_utils.join(containing_dir, "__init__.py")):
# If the file's containing directory has an __init__.py, we assume that
# the file is in a (sub)package. Add the containing directory of the
# top-level package so that 'from package import module' works.
package_parent = path_utils.dirname(containing_dir)
while path_utils.exists(path_utils.join(package_parent, "__init__.py")):
package_parent = path_utils.dirname(package_parent)
p = package_parent
else:
# Otherwise, the file is a standalone script. Add its containing directory
# to the path so that 'import module_in_same_directory' works.
p = containing_dir
paths.add(p)
# Reverse sorting the paths guarantees that child directories always appear
# before their parents. To see why this property is necessary, consider the
# following file structure:
# foo/
# bar1.py
# bar2.py # import bar1
# baz/
# qux1.py
# qux2.py # import qux1
# If the path were [foo/, foo/baz/], then foo/ would be used as the base of
# the module names in both directories, yielding bar1 (good) and baz.qux1
# (bad). With the order reversed, we get bar1 and qux1 as expected.
return sorted(paths, reverse=True)
| google/pytype | pytype/tools/environment.py | environment.py | py | 3,242 | python | en | code | 4,405 | github-code | 36 | [
{
"api_name": "pytype.tools.runner.can_run",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pytype.tools.runner",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "logging.critical",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sys.... |
42469623972 | import os
import morfeusz2
import pandas as pd
from sklearn.metrics import classification_report
def lemmatize_text(text):
if isinstance(text, str):
text = text.split()
morf = morfeusz2.Morfeusz(expand_dag=True, expand_tags=True)
text_new = []
for word in text:
w = morf.analyse(word)[0][0][1].split(':')[0]
if w == 'oko':
w = 'ok'
text_new.append(w)
return " ".join(text_new)
def create_dir(directory):
if not os.path.isdir(directory):
_path = os.path.abspath(directory).split('\\')
for i in range(1, len(_path) + 1):
current_dir = "//".join(_path[:i])
if not os.path.isdir(current_dir):
os.mkdir(current_dir)
def mapping_from_clusters(x):
if x == -1:
return 'neg'
else:
return 'pos'
def classification_report_to_excel(y_test, y_pred, filename):
cr = classification_report(y_test, y_pred, output_dict=True, target_names=['Negative', 'Positive'])
pd.DataFrame(cr).T.to_excel(filename)
| kingagla/reviews_classification | scripts/utils.py | utils.py | py | 1,049 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "morfeusz2.Morfeusz",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"... |
74779852584 | from unittest import TestCase
from collections import namedtuple
from P2_Sorting.HeapSort.heap_sort import heap_sort
class Task(object):
def __init__(self, deadline, penalty):
assert isinstance(deadline, int) and deadline > 0
assert penalty > 0
self._penalty = penalty
self._deadline = deadline
@property
def deadline(self):
return self._deadline
@property
def penalty(self):
return self._penalty
def _check_input_or_error(tasks):
if not tasks:
return
n = len(tasks)
for task in tasks:
assert task.deadline <= n
def _check_independent(tasks, deadline_counts, indices, length):
"""
Ex 16.5-2. O(|A|) running time algorithm to check whether a set A of tasks are independent.
:param tasks: all the tasks.
:param deadline_counts: a helper array, where deadline_counts[i] denotes how many tasks have deadlines no greater
than i + 1.
:param indices: indices of tasks to consider.
:param length: indices[0:length] will be considered, which means that length = |A|.
:return: whether the tasks considered are independent.
"""
for i in range(0, len(tasks)):
deadline_counts[i] = 0
for i in range(0, length):
task = tasks[indices[i]]
deadline_counts[task.deadline - 1] += 1
cumulative_deadline_counts = 0
for i in range(0, len(tasks)):
cumulative_deadline_counts += deadline_counts[i]
if cumulative_deadline_counts > i + 1:
return False
return True
def schedule_task(tasks):
"""
O(n^2) running time algorithm to schedule unit-time tasks with deadlines and penalties to get the minimum total
penalty.
:param tasks: tasks to consider.
:return: the optimal schedule of 'early' tasks.
"""
_check_input_or_error(tasks)
n = len(tasks)
tasks = list(tasks)
for i in range(0, n):
tasks[i].index = i
heap_sort(tasks, key=lambda t: -t.penalty)
schedule_on_sorted = [-1] * n
early_count = 0
deadline_counts = [0] * n
for i in range(0, n):
schedule_on_sorted[early_count] = i
if _check_independent(tasks, deadline_counts, schedule_on_sorted, early_count + 1):
early_count += 1
schedule = [-1] * early_count
for i in range(0, early_count):
schedule[i] = schedule_on_sorted[i]
heap_sort(schedule, key=lambda index: tasks[index].deadline)
for i in range(0, early_count):
schedule[i] = tasks[schedule[i]].index
return tuple(schedule)
class TestTaskScheduling(TestCase):
def test_task_scheduling(self):
case_class = namedtuple('Case', 'desc tasks schedules')
cases = (
case_class(desc='Empty', tasks=(), schedules=(
(),
)),
case_class(desc='Single', tasks=(
Task(1, 10),
), schedules=(
(0,),
)),
case_class(desc='Two early', tasks=(
Task(1, 10),
Task(2, 20)
), schedules=(
(0, 1),
)),
case_class(desc='Two late', tasks=(
Task(1, 10),
Task(1, 20)
), schedules=(
(1,),
)),
case_class(desc='Example in textbook', tasks=(
Task(4, 70),
Task(2, 60),
Task(4, 50),
Task(3, 40),
Task(1, 30),
Task(4, 20),
Task(6, 10),
), schedules=(
(1, 3, 0, 2, 6),
)),
case_class(desc='Ex 16.5-1', tasks=(
Task(4, 10),
Task(2, 20),
Task(4, 30),
Task(3, 40),
Task(1, 50),
Task(4, 60),
Task(6, 70),
), schedules=(
(4, 3, 2, 5, 6),
(4, 3, 5, 2, 6),
)),
)
for case in cases:
schedule = schedule_task(case.tasks)
self.assertTrue(schedule in case.schedules, msg='%s, wrong schedule %s' % (case.desc, schedule))
| GarfieldJiang/CLRS | P4_AdvancedTech/Greedy/task_scheduling_with_matroid.py | task_scheduling_with_matroid.py | py | 4,191 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "P2_Sorting.HeapSort.heap_sort.heap_sort",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "P2_Sorting.HeapSort.heap_sort.heap_sort",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "unittest.TestCase",
"line_number": 90,
"usage_type": "nam... |
70942499944 | from pyspark.sql import SparkSession
from pyspark.sql.functions import col
import boto3
session = boto3.Session(profile_name="***_AdministratorAccess",region_name="us-east-1")
s3 = boto3.resource('s3')
# Inicialize a sessão do Spark
spark = SparkSession.builder.getOrCreate()
# Leia os arquivos Parquet e crie os dataframes
df_imdb = spark.read.parquet("natalias-s3-bucket/Trusted/Parquet/Movies/CSV/")
df_tmdb = spark.read.parquet("natalias-s3-bucket/Trusted/Parquet/Movies/JSON/")
# Selecione as colunas necessárias do dataframe do IMDB
df_imdb = df_imdb.select(
col("id").alias("idImdb"),
col("anolancamento").alias("anoLancamento"),
col("genero").alias("genero"),
col("tituloprincipal").alias("tituloPrincipal"),
col("notamedia").alias("notaMedia")
)
# Selecione as colunas necessárias do dataframe do TMDB
df_tmdb = df_tmdb.select(
col("id").alias("idTmdb"),
col("popularity").alias("popularity"),
col("vote_average").alias("voteAverage"),
col("vote_count").alias("voteCount"),
col("release_date").alias("releaseDate")
)
# Crie a tabela FatoFilmes
df_fato_filmes = df_imdb.join(df_tmdb, "idImdb")
# Crie a tabela DimensaoTmdb
df_dimensao_tmdb = df_tmdb.select(
col("idTmdb"),
col("genre_ids").alias("generos"),
col("original_language").alias("originalLanguage"),
col("releaseDate")
)
# Crie a tabela DimensaoImdb
df_dimensao_imdb = df_imdb.select(
col("idImdb"),
col("anoLancamento"),
col("genero"),
col("tituloPrincipal")
)
# Salve os dataframes resultantes como tabelas temporárias
df_fato_filmes.createOrReplaceTempView("FatoFilmes")
df_dimensao_tmdb.createOrReplaceTempView("DimensaoTmdb")
df_dimensao_imdb.createOrReplaceTempView("DimensaoImdb")
# Execute uma consulta para visualizar os resultados
result = spark.sql("""
SELECT
FatoFilmes.idImdb,
FatoFilmes.idTmdb,
FatoFilmes.notaMedia,
FatoFilmes.numeroVotos,
FatoFilmes.popularity,
FatoFilmes.voteAverage,
FatoFilmes.voteCount,
DimensaoTmdb.genres,
DimensaoTmdb.originalLanguage,
DimensaoTmdb.releaseDate,
DimensaoImdb.anoLancamento,
DimensaoImdb.genero,
DimensaoImdb.tituloPrincipal
FROM
FatoFilmes
JOIN
DimensaoTmdb ON FatoFilmes.idTmdb = DimensaoTmdb.idTmdb
JOIN
DimensaoImdb ON FatoFilmes.idImdb = DimensaoImdb.idImdb
""")
# Salve o DataFrame resultante no S3 em formato Parquet
result.write.parquet("s3://natalias-s3-bucket/Processed-Trusted/Parquet/Movies/resultedparquet") | nataliasguimaraes/compassuol | sprint_09/desafio_etl/processed_trusted/proc_trusted.py | proc_trusted.py | py | 2,573 | python | pt | code | 0 | github-code | 36 | [
{
"api_name": "boto3.Session",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "boto3.resource",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.SparkSession.builder.getOrCreate",
"line_number": 8,
"usage_type": "call"
},
{
"api_name":... |
71967566185 | import subprocess
import pytest
from pipfile2req.requirements import requirement_from_pipfile
def compare_requirements(left, right):
return len(set(left.splitlines()) - set(right.splitlines())) == 0
@pytest.mark.parametrize(
"command,golden_file",
[
("pipfile2req -p tests", "tests/requirements.txt"),
("cd tests && pipfile2req", "tests/requirements.txt"),
("pipfile2req -p tests -d", "tests/dev-requirements.txt"),
("pipfile2req -p tests Pipfile", "tests/requirements-pipfile.txt"),
("pipfile2req -d tests/Pipfile", "tests/dev-requirements-pipfile.txt"),
("pipfile2req -d tests/Pipfile.lock", "tests/dev-requirements.txt"),
("pipfile2req -p tests --sources", "tests/requirements-sources.txt"),
("pipfile2req -p tests Pipfile --sources", "tests/requirements-pipfile-sources.txt"),
],
)
def test_convert_pipfile(command, golden_file):
proc = subprocess.Popen(
command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
output, err = proc.communicate()
with open(golden_file) as f:
assert compare_requirements(
output.decode("utf-8").strip().replace("\r\n", "\n"),
f.read().strip().replace("\r\n", "\n"),
)
def test_convert_include_hash():
command = "pipfile2req -p tests --hashes"
proc = subprocess.Popen(
command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
_, err = proc.communicate()
print(err)
assert proc.returncode == 0
@pytest.mark.parametrize("name,package,req", [
("foo", "*", "foo"),
("foo", {"version": "*"}, "foo"),
("foo", {"version": ">=1.0", "extras": ["test", "sec"]}, "foo[test,sec]>=1.0"),
("foo", {"file": "file:///data/demo-0.0.1.tar.gz"}, "foo @ file:///data/demo-0.0.1.tar.gz"),
("foo", {"file": "file:///data/demo-0.0.1.tar.gz", "extras": ["test", "sec"]}, "foo[test,sec] @ file:///data/demo-0.0.1.tar.gz"),
("foo", {"path": ".", "editable": True, "extras": ["test", "sec"]}, "-e .[test,sec]"),
("foo", {"version": ">=1.0", "markers": "os_name=='nt'", "python_version": "~='3.7'"}, 'foo>=1.0; os_name == "nt" and python_version ~= "3.7"'),
("foo", {"git": "https://github.com/foo/foo.git", "ref": "master", "subdirectory": "sub"}, "git+https://github.com/foo/foo.git@master#egg=foo&subdirectory=sub")
])
def test_convert_requirement(name, package, req):
result = requirement_from_pipfile(name, package)
assert result == req
| frostming/pipfile-requirements | test_pipfile_requirements.py | test_pipfile_requirements.py | py | 2,499 | python | en | code | 49 | github-code | 36 | [
{
"api_name": "subprocess.Popen",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pytest.... |
75071109224 | import requests
import pandas as pd
import numpy as np
import seaborn as sns
from bs4 import BeautifulSoup
import warnings
import nltk
#import surprise
import scipy as sp
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.feature_extraction.text import CountVectorizer
from nltk.corpus import stopwords
from nltk import word_tokenize, RegexpTokenizer
from nltk.stem import SnowballStemmer
from sklearn.feature_extraction.text import TfidfVectorizer
from datetime import datetime, time
import movieMender
class Generos:
def __init__(self):
self.cargaDocumentos()
def cargaDocumentos(self):
self.df_usuaarioO = pd.read_csv('csv/Usuario_0.csv', sep=';')
self.df_usuaarioO = self.df_usuaarioO.drop(columns=["title"])
for usuario_nuevo in range(len(self.df_usuaarioO["movieId"])):
self.df_usuaarioO["userId"] = 0
self.df_usuaarioO["timestamp"] = datetime.now()
self.df_movies = pd.read_csv('csv/movies.csv')
# Carga del dataframe de las peliculas con su sinopsis
self.df_movies = self.df_movies.dropna()
self.df_ratings = pd.read_csv('csv/ratings.csv')
self.df_ratings = self.df_ratings.dropna()
self.df_tags = pd.read_csv('csv/tags.csv')
self.df_tags = self.df_tags.dropna()
self.df_ratings = pd.concat([self.df_usuaarioO, self.df_ratings], axis=0)
self.df_movies_ratings = self.df_ratings.merge(self.df_movies)[
['userId', 'movieId', 'title', 'rating', 'genres']]
self.df_movies_ratings_tags = pd.merge(self.df_movies_ratings, self.df_tags, how='outer')[
['userId', 'movieId', 'title', 'rating', 'genres', 'tag']]
self.df_movies_ratings_tags["tag"] = self.df_movies_ratings_tags["tag"].str.lower()
# self.df_movies_ratings_tags.fillna("vacio", inplace = True)
self.ratings_table = self.df_movies_ratings.pivot_table(index='userId', columns='title', values='rating')
# para cambiar los NAN por 0:
self.ratings_table.fillna(0, inplace=True)
def recomedacionPorGenero(self, nombrePelicula, n_similares):
n_similares=int(n_similares)
genres = list(set([genre for genres in self.df_movies["genres"].str.split("|") for genre in genres]))
genre_matrix = []
for index, row in self.df_movies.iterrows():
genre_list = row["genres"].split("|")
genre_vector = [1 if genre in genre_list else 0 for genre in genres]
genre_matrix.append(genre_vector)
genre_matrix = pd.DataFrame(genre_matrix, columns=genres)
contador = 1
selected_movie = self.df_movies[self.df_movies["title"] == nombrePelicula]
selected_movie_index = selected_movie.index[0]
#sacamos las similitudes de los generos
similarities = cosine_similarity(genre_matrix[selected_movie_index:selected_movie_index+1], genre_matrix).flatten()
#las metemos en una tupla y las ordenamos de mayor a menor
movie_list = [(index, similarity) for index, similarity in enumerate(similarities)]
movie_list.sort(key=lambda x: x[1], reverse=True)
listaSimilar = []
for i in movie_list[0:n_similares]:
listaSimilar.append(i)
#la bandera nos sirve para saltarnos la propia peli que buscamos
#siempre esta a false y si nos encontramos la peli que estamos buscando la activamos a True
#si esta en True al finalizar el bucle significa que ha saltado el titulo que buscabamos para no repetirse a si mismo
#y por lo tanto hay que añadir uno mas para llegar al numero deseado por el usuario
listaPeliculasMostrar = []
bandera=False
if(n_similares>len(self.df_movies)):
n_similares=len(self.df_movies)-1
for movie in movie_list[0:n_similares]:
if(nombrePelicula != self.df_movies.iloc[movie[0]]["title"]):
listaPeliculasMostrar.append(self.df_movies.iloc[movie[0]]["title"])
contador+=1
else:
bandera=True
if(bandera):
mov=movie_list[n_similares][0]
listaPeliculasMostrar.append(self.df_movies.iloc[mov]["title"])
return listaPeliculasMostrar #listaSimilar
def predecirRatingDeUserAPeliculaPorSusGeneros(self, nombrePelicula, user_id):
user_id=int(user_id)
yaVotado = self.df_movies_ratings[(self.df_movies_ratings['title']==nombrePelicula) & (self.df_movies_ratings['userId']==user_id)]["rating"].unique()
if(len(yaVotado)!=0):
prediction = yaVotado[0]
return str(prediction)
else:
# obtener géneros de la película a predecir
movie_genres = self.df_movies_ratings[self.df_movies_ratings['title']==nombrePelicula]["genres"].unique()
generosPeli = movie_genres[0].split("|")
# filtrar valoraciones del usuario para peliculas con generos en comun
user_ratings_ID = self.df_movies_ratings[self.df_movies_ratings['userId'] == user_id]
user_ratings = user_ratings_ID.loc[user_ratings_ID['genres'].str.split('|').apply(lambda x: any(i in x for i in generosPeli))]
# calcular la media de valoraciones del usuario para las peliculas con generos en comun
if user_ratings.empty:
print()
return "Vacio"
else:
#prediction = user_ratings_ID['rating'].mean()
prediction = format(user_ratings['rating'].mean(), '.3f')
return str(prediction)
def recomendacionEnBaseGeneroPelisQueNoHaVistoUsuario(self, user_id, n_similares):
warnings.filterwarnings('ignore')
user_id=int(user_id)
n_similares=int(n_similares)
#warnings.filterwarnings('ignore')
df_movies_rating_user = self.df_movies_ratings[self.df_movies_ratings['userId']==user_id]
df_movies_rating_user = df_movies_rating_user.sort_values(by='rating',ascending=False)
#cogemos los primeros 10 para ver que generos le gustan mas, anteriormente hemos ordenado por genero
genero_mejor_rating_unicos = list(set([genre for genres in df_movies_rating_user.head(10)["genres"].str.split("|") for genre in genres]))
# creamos un diccionario para guardar los generos y cuantas veces se repiten
genre_count = {}
for g in genero_mejor_rating_unicos:
genre_count[g] = df_movies_rating_user.head(10)['genres'].str.count(g).sum()
#ordenamos el diccionario de mayor a menor
genero_mejor_rating = dict(sorted(genre_count.items(), key=lambda x: x[1], reverse=True))
#sacamos las pelis que el usuario no ha visto
df_movies_no_rating_user = self.df_movies[self.df_movies['movieId'].isin(df_movies_rating_user['movieId']) == False]
#creamos en el df una columna por cada genero que le gusta al usuario y le agregamos cuanto le gusta
for genre, weight in genero_mejor_rating.items():
df_movies_no_rating_user[genre] = df_movies_no_rating_user["genres"].str.contains(genre).apply(lambda x: weight if x else 0)
#creamos una nueva columna con la suma de cada fila para saber que peliculas le pueden gustar mas
df_movies_no_rating_user["sumaPesos"] = df_movies_no_rating_user[genero_mejor_rating.keys()].sum(axis=1)
#ordenamos por las pelis que tengan una mayor puntuacion en la columna sumaPesos ya que esto quiere decir que hay muchos generos que le gustan al usuario
df_movies_no_rating_user = df_movies_no_rating_user.sort_values(by='sumaPesos',ascending=False)
df_peliculas_mostrar = df_movies_no_rating_user['title'][0:n_similares]
listaPeliculasMostrar = []
contador = 1
for movie in df_peliculas_mostrar:
listaPeliculasMostrar.append(movie)
contador+=1
return listaPeliculasMostrar | Liixxn/MovieMender | generos.py | generos.py | py | 7,990 | python | es | code | 1 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "pandas.read_c... |
17792225774 | from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import os
import re
from builtins import open
from pants.backend.codegen.antlr.java.java_antlr_library import JavaAntlrLibrary
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.tasks.nailgun_task import NailgunTask
from pants.base.exceptions import TaskError
from pants.java.jar.jar_dependency import JarDependency
from pants.task.simple_codegen_task import SimpleCodegenTask
from pants.util.dirutil import safe_mkdir, safe_walk
from pants.util.memo import memoized_method
logger = logging.getLogger(__name__)
def antlr4_jar(name):
return JarDependency(org='org.antlr', name=name, rev='4.1')
_DEFAULT_ANTLR_DEPS = {
'antlr3': ('//:antlr-3.4', [JarDependency(org='org.antlr', name='antlr', rev='3.4')]),
'antlr4': ('//:antlr-4', [antlr4_jar(name='antlr4'),
antlr4_jar(name='antlr4-runtime')])
}
# TODO: Refactor this and AntlrPyGen to share a common base class with most of the functionality.
# See comments there for what that would take.
class AntlrJavaGen(SimpleCodegenTask, NailgunTask):
"""Generate .java source code from ANTLR grammar files."""
gentarget_type = JavaAntlrLibrary
sources_globs = ('**/*.java',)
class AmbiguousPackageError(TaskError):
"""Raised when a java package cannot be unambiguously determined for a JavaAntlrLibrary."""
# TODO: Do we need this?
def find_sources(self, target, target_dir):
sources = super(AntlrJavaGen, self).find_sources(target, target_dir)
return [source for source in sources if source.endswith('.java')]
@classmethod
def register_options(cls, register):
super(AntlrJavaGen, cls).register_options(register)
for key, (classpath_spec, classpath) in _DEFAULT_ANTLR_DEPS.items():
cls.register_jvm_tool(register, key, classpath=classpath, classpath_spec=classpath_spec)
def is_gentarget(self, target):
return isinstance(target, JavaAntlrLibrary)
def synthetic_target_type(self, target):
return JavaLibrary
def execute_codegen(self, target, target_workdir):
args = ['-o', target_workdir]
compiler = target.compiler
if target.package is None:
java_package = self._get_sources_package(target)
else:
java_package = target.package
if compiler == 'antlr3':
if target.package is not None:
logger.warn("The 'package' attribute is not supported for antlr3 and will be ignored.")
java_main = 'org.antlr.Tool'
elif compiler == 'antlr4':
args.append('-visitor') # Generate Parse Tree Visitor As Well
# Note that this assumes that there is no package set in the antlr file itself,
# which is considered an ANTLR best practice.
args.append('-package')
args.append(java_package)
java_main = 'org.antlr.v4.Tool'
else:
raise TaskError('Unsupported ANTLR compiler: {}'.format(compiler))
antlr_classpath = self.tool_classpath(compiler)
sources = self._calculate_sources([target])
args.extend(sources)
result = self.runjava(classpath=antlr_classpath, main=java_main, args=args,
workunit_name='antlr')
if result != 0:
raise TaskError('java {} ... exited non-zero ({})'.format(java_main, result))
self._rearrange_output_for_package(target_workdir, java_package)
if compiler == 'antlr3':
self._scrub_generated_timestamps(target_workdir)
def synthetic_target_extra_dependencies(self, target, target_workdir):
# Fetch the right java dependency from the target's compiler option
return self._deps(target.compiler)
@memoized_method
def _deps(self, compiler):
spec = self.get_options()[compiler]
return list(self.resolve_deps([spec])) if spec else []
# This checks to make sure that all of the sources have an identical package source structure, and
# if they do, uses that as the package. If they are different, then the user will need to set the
# package as it cannot be correctly inferred.
def _get_sources_package(self, target):
parents = {os.path.dirname(source) for source in target.sources_relative_to_source_root()}
if len(parents) != 1:
raise self.AmbiguousPackageError('Antlr sources in multiple directories, cannot infer '
'package. Please set package member in antlr target.')
return parents.pop().replace('/', '.')
def _calculate_sources(self, targets):
sources = set()
def collect_sources(tgt):
if self.is_gentarget(tgt):
sources.update(tgt.sources_relative_to_buildroot())
for target in targets:
target.walk(collect_sources)
return sources
_COMMENT_WITH_TIMESTAMP_RE = re.compile(r'^//.*\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d')
def _rearrange_output_for_package(self, target_workdir, java_package):
"""Rearrange the output files to match a standard Java structure.
Antlr emits a directory structure based on the relative path provided
for the grammar file. If the source root of the file is different from
the Pants build root, then the Java files end up with undesired parent
directories.
"""
package_dir_rel = java_package.replace('.', os.path.sep)
package_dir = os.path.join(target_workdir, package_dir_rel)
safe_mkdir(package_dir)
for root, dirs, files in safe_walk(target_workdir):
if root == package_dir_rel:
# This path is already in the correct location
continue
for f in files:
os.rename(
os.path.join(root, f),
os.path.join(package_dir, f)
)
# Remove any empty directories that were left behind
for root, dirs, files in safe_walk(target_workdir, topdown = False):
for d in dirs:
full_dir = os.path.join(root, d)
if not os.listdir(full_dir):
os.rmdir(full_dir)
def _scrub_generated_timestamps(self, target_workdir):
"""Remove the first line of comment from each file if it contains a timestamp."""
for root, _, filenames in safe_walk(target_workdir):
for filename in filenames:
source = os.path.join(root, filename)
with open(source, 'r') as f:
lines = f.readlines()
if len(lines) < 1:
return
with open(source, 'w') as f:
if not self._COMMENT_WITH_TIMESTAMP_RE.match(lines[0]):
f.write(lines[0])
for line in lines[1:]:
f.write(line)
| fakeNetflix/twitter-repo-pants | src/python/pants/backend/codegen/antlr/java/antlr_java_gen.py | antlr_java_gen.py | py | 6,475 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pants.java.jar.jar_dependency.JarDependency",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pants.java.jar.jar_dependency.JarDependency",
"line_number": 26,
"usage_typ... |
70616752744 | import constants
from flask import jsonify, make_response
def getData(request):
body = request.json
outlook = body['outlook']
temp = body['temp']
humidity = body['humidity']
wind = body['wind']
data = [
constants.OUTLOOK_VALUES[outlook],
constants.TEMP_VALUES[temp],
constants.HUMIDITY_VALUES[humidity],
constants.WIND_VALUES[wind]
]
return data
def makeResponse(result, modelType):
return make_response(jsonify(
{
'message': 'Success',
'data': {
'modelType': modelType,
'play': constants.PLAY_VALUES[result]
}
}
)) | mgstabrani/play-tennis-model-service-python | general.py | general.py | py | 675 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "constants.OUTLOOK_VALUES",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "constants.TEMP_VALUES",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "constants.HUMIDITY_VALUES",
"line_number": 14,
"usage_type": "attribute"
},
... |
32203986950 | import streamlit as st
from recipesnet.api import RecipesApi
from recipesnet.st_helpers import recip_ingr_widget
st.set_page_config("Recipes net", layout="wide")
st.title("Recipes similarity")
st.write(
"""
In this section you can search what recipes are similar to an specific one.
"""
)
with st.spinner("Loading data..."):
if "api" not in st.session_state:
st.session_state["api"] = RecipesApi()
api: RecipesApi = st.session_state.api
c1, c2 = st.columns(2)
with c1:
st.header("Similar recipes")
recipes = api.recipes
selected_recipe = st.selectbox(
"Recipes similar to ...",
recipes,
recipes.index(st.session_state.recipe) if "recipe" in st.session_state else 0,
)
st.session_state.recipe = selected_recipe
similar = api.similar_recipes(selected_recipe)
i = 0
for rec, score in similar:
if st.button(f"{score:.1%}: {rec.capitalize()}", key=f"similarity_btn_{i}"):
st.session_state.recipe = rec
i += 1
with c2:
recip_ingr_widget()
| jmorgadov/complex-recipes-net | recipesnet/pages/Similarity.py | Similarity.py | py | 1,054 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "streamlit.set_page_config",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "streamlit.title",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "streamlit.write",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "streamlit.spin... |
75084616422 | """
firebase.py
This module caches video information in Firebase using the user's id as the key.
Cached video entries include duration, title, channel name, category, and timestamp.
The timestamp acts as a TTL of 24 hours, and entries older than the TTL are updated by requesting
the video information from the YouTube API.
Functions:
- is_video_cached(): Checks if a video is cached in Firebase and not expired.
- get_uncached_video_ids(): Returns a list of uncached or expired video IDs.
- cache_video_data(): Caches video information in Firebase for a given video ID.
- cache_request(): Caches video information in Firebase if not already cached or expired.
"""
# Necessary imports
import os
import sys
import json
from datetime import datetime, timedelta
# Add the parent directory to sys.path to import local modules
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
# Local modules
from utils.imports import *
from utils.youtube_utils import get_video_information
# Credentials
from config.credentials import *
def is_video_cached(video_id, data_from_cache):
required_attributes = ['timestamp', 'duration', 'title', 'channel_name', 'category']
if not data_from_cache or video_id not in data_from_cache:
return False
for attribute in required_attributes:
if attribute not in data_from_cache[video_id]:
return False
timestamp = datetime.strptime(data_from_cache[video_id]['timestamp'], "%Y-%m-%dT%H:%M:%S.%f")
return datetime.now() - timestamp < timedelta(days=1)
def get_uncached_video_ids(video_ids, data_from_cache):
uncached_video_ids = []
for video_id in video_ids:
if not is_video_cached(video_id, data_from_cache):
uncached_video_ids.append(video_id)
return uncached_video_ids
def cache_video_data(user_email, video_id, video_data):
url = f'{FIREBASE_DB_URL}/{user_email}/{video_id}.json?auth={FIREBASE_API_KEY}'
response = requests.put(url, json.dumps(video_data))
def cache_request(youtube, video_ids):
user_email = USER_ID.replace('@', '-').replace('.', '-')
video_info = {}
# Check if the video_ids are in Firebase cache
url = f'{FIREBASE_DB_URL}/{user_email}.json?auth={FIREBASE_API_KEY}'
response = requests.get(url)
data_from_cache = response.json()
if data_from_cache is None:
data_from_cache = {}
uncached_video_ids = get_uncached_video_ids(video_ids, data_from_cache)
# If there are uncached videos, request the video information from YouTube API
if uncached_video_ids:
video_data = get_video_information(youtube, uncached_video_ids)
# Update the cache with the new video information
for video_id, data in video_data.items():
# Convert duration to ISO 8601 format before storing in Firebase
data['duration'] = isodate.duration_isoformat(data['duration'])
# Add a timestamp to the video data
data['timestamp'] = datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%f")
# Cache the channel name and category
data['channel_name'] = data['channel_name']
data['category'] = data.get('category', 'Unknown')
cache_video_data(user_email, video_id, data)
# Build video_info from cache data and newly fetched data
for video_id in video_ids:
if video_id in data_from_cache:
try:
video_info[video_id] = {
'duration': timedelta(seconds=isodate.parse_duration(data_from_cache[video_id]['duration']).total_seconds()),
'title': data_from_cache[video_id]['title'],
'channel_name': data_from_cache[video_id]['channel_name'],
'category': data_from_cache[video_id].get('category', 'Unknown') # Use .get() to handle the missing 'category' key
}
except isodate.isoerror.ISO8601Error:
pass
elif video_id in video_data:
video_info[video_id] = {
'duration': timedelta(seconds=isodate.parse_duration(video_data[video_id]['duration']).total_seconds()),
'title': video_data[video_id]['title'],
'channel_name': video_data[video_id]['channel_name'],
'category': video_data[video_id]['category']
}
return video_info | ractodev/youtube-wrapped-v1 | utils/firebase.py | firebase.py | py | 4,392 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_num... |
29073464319 | import csv
import datetime
import pathlib
from typing import Generator
import click
from case_rate._types import Cases, CaseTesting, PathLike
from case_rate.sources._utilities import download_file
from case_rate.storage import InputSource
def _to_date(date: str) -> datetime.date:
'''Converts a date string into a date object.
Parameters
----------
date : str
input date string of the form "DD-MM-YYYY"
Returns
-------
datetime.date
output ``date`` object
'''
dt = datetime.datetime.strptime(date, '%d-%m-%Y')
return dt.date()
def _to_int(number: str) -> int:
'''Converts a numerical string into an integer.
This performs an extra check to see if the input string is ``''``. This is
then treated as a zero. Anything else will result in a ``ValueError``.
Parameters
----------
number : str
input string as a number
Returns
-------
int
the string's integer value
Throws
------
:exc:`ValueError`
if the string is not actually a number
'''
if len(number) == 0:
return 0
if number == 'N/A':
return 0
try:
count = int(number)
except ValueError:
count = int(float(number))
return count
class PublicHealthAgencyCanadaSource(InputSource):
'''Uses reporting data published by the PHAC.
This input source uses a CSV file that's regularly updated by the Public
Health Agency of Canada (PHAC). The default source is
https://health-infobase.canada.ca/src/data/covidLive/covid19.csv. The
data source will link back to the original PHAC site rather than to the
file.
'''
def __init__(self, path: PathLike, url: str, info: str,
update: bool = True):
'''
Parameters
----------
path : path-like object
the path (on disk) where the CSV file is located
url : str
the URL to the Government of Canada's COVID-19 report
info : str optional
the URL to the main information path (not the CSV file)
update : bool, optional
if ``True`` then updates an existing CSV file to the latest version
'''
path = pathlib.Path(path) / 'covid19.csv'
if path.exists():
if update:
click.echo('Updating PHAC COVID-19 report.')
download_file(url, path)
else:
click.echo('Accessing PHAC COVID-19 report.')
download_file(url, path)
self._info = info
self._path = path
@classmethod
def name(cls) -> str:
return 'public-health-agency-canada'
@classmethod
def details(cls) -> str:
return 'Public Health Agency of Canada - Current Situation'
def url(self) -> str:
return self._info
def cases(self) -> Generator[Cases, None, None]:
with self._path.open() as f:
contents = csv.DictReader(f)
for entry in contents:
if entry['prname'] == 'Canada':
continue
# NOTE: PHAC doesn't report resolved cases as of 2022-08-26
yield Cases(
date=_to_date(entry['date']),
province=entry['prname'],
country='Canada',
confirmed=_to_int(entry['totalcases']),
resolved=-1,
deceased=_to_int(entry['numdeaths'])
)
def testing(self) -> Generator[CaseTesting, None, None]:
with self._path.open() as f:
contents = csv.DictReader(f)
for entry in contents:
if entry['prname'] == 'Canada':
continue
# NOTE: PHAC doesn't report testing counts as of 2022-08-26
yield CaseTesting(
date=_to_date(entry['date']),
province=entry['prname'],
country='Canada',
tested=-1,
under_investigation=-1
)
| richengguy/case-rate | src/case_rate/sources/public_health_agency_canada.py | public_health_agency_canada.py | py | 4,097 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.strptime",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "datetime.date",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "... |
73387100264 | import time
from dataclasses import dataclass
from transmitter import sendEmail
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.firefox.options import Options as FFOptions
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
@dataclass()
class AutoWeb:
url: str
ff: bool = False
headless: bool = False
def start_session(self):
options = FFOptions() if self.ff else Options()
if self.headless: options.add_argument('--headless')
dr = '/Users/ashhadghazi/scripts/python/webdrivers/geckodriver' if self.ff \
else '/Users/ashhadghazi/scripts/python/webdrivers/chromedriver'
self.br = webdriver.Firefox(executable_path=dr, options=options) if self.ff else webdriver.Chrome(dr, chrome_options=options)
self.br.get(self.url)
def get(self, url):
self.br.get(url)
def go_back(self):
self.br.back()
def refresh(self):
self.br.refresh()
def stop_session(self):
self.br.quit()
def get_key(self, key, only_check_if_special=False):
key_map = {
'up': Keys.UP,
'right': Keys.RIGHT,
'down': Keys.DOWN,
'left': Keys.LEFT,
'enter': Keys.ENTER,
'escape': Keys.ESCAPE
}
if only_check_if_special:
return True if key in key_map.keys() else False
return key_map[key] if key in key_map.keys() else key
def get_by(self, by):
by_map = {
'id': By.ID,
'css': By.CSS_SELECTOR,
'name': By.NAME,
'xpath': By.XPATH,
'class': By.CLASS_NAME,
'link_text': By.LINK_TEXT,
'partial_link_text': By.PARTIAL_LINK_TEXT
}
return by_map[by]
def element_exists(self, by, elem):
by = self.get_by(by)
return len(self.br.find_elements(by, elem))
def get_element(self, by, elem):
by = self.get_by(by)
return WebDriverWait(self.br, 5).until(
EC.presence_of_element_located((by, elem)))
def get_elements(self, by, elem):
by = self.get_by(by)
return WebDriverWait(self.br, 5).until(
EC.presence_of_all_elements_located((by, elem)))
def get_element_text(self, elem, by):
by = self.get_by(by)
return WebDriverWait(self.br, 5).until(
EC.presence_of_element_located((by, elem))).text
def get_table_rows(self, by, elem):
by = self.get_by(by)
table = self.get_element(by, elem)
return table.find_elements_by_tag_name('li')
def notify(sbj, msg):
email = "<email_address>"
pwd = "<password>"
sendEmail(email, pwd, [email], msg, sbj)
def run_op(self, by, elem, op, op_value=''):
by = self.get_by(by)
if op == 'send':
if not self.get_key(op_value, only_check_if_special=True):
self.get_element(by, elem).clear()
time.sleep(0.2)
self.get_element(by, elem).send_keys(self.get_key(op_value))
elif op == 'clear':
self.get_element(by, elem).clear()
elif op == 'click':
self.get_element(by, elem).click()
time.sleep(0.2)
def run_ops(self, ops_map):
for op in ops_map:
self.run_op(op['by'], op['elem'], op['op'], op['op_value']) | ghazis/auto_flights | backend/flight_scraper/AutoWeb.py | AutoWeb.py | py | 3,652 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "selenium.webdriver.firefox.options.Options",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.chrome.options.Options",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Firefox",
"line_number": 23,
"usa... |
7431120432 | from sqlalchemy import create_engine
from constants import get_nutrient_idx
def load_cache():
db = create_engine('sqlite:///usda.sql3')
cache = {}
query = "SELECT food.id,food.long_desc,food_group.name,nutrient.tagname,nutrition.amount,weight.gm_weight,weight.gm_weight*nutrition.amount/100.0 as gm_amount,weight.description FROM food, food_group, nutrient, nutrition, weight where food.food_group_id = food_group.id and food.id = nutrition.food_id and nutrient.id = nutrition.nutrient_id and weight.food_id = food.id and food.id < 1100 and weight.sequence_num = 1 and nutrient.tagname in ('ENERC_KCAL','CHOCDF','PROCNT','FAT','LACS','SUGAR','CAFFN') order by food.id, nutrient.tagname"
conn = db.connect()
result = conn.execute(query)
nidx = get_nutrient_idx()
rows = result.cursor.fetchall()
for row in rows:
fid = row[0]
desc = row[1]
group = row[2]
nutrient = row[3]
amount = row[4]
gm_weight = row[5]
gm_amount = row[6]
serving_desc = row[7]
if fid not in cache:
cache[fid] = [fid,desc,group,serving_desc,gm_weight,0,0,0,0,0,0,0]
cache[fid][nidx[nutrient]] = gm_amount
return cache | sidowsky/sr_takehome | loaders.py | loaders.py | py | 1,169 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlalchemy.create_engine",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "constants.get_nutrient_idx",
"line_number": 14,
"usage_type": "call"
}
] |
31095405505 | #You'r a robot?
from random import randint, randrange
from PIL import Image, ImageDraw, ImageFont
import os
import textwrap
class CreateCaptcha:
def __init__(self):
self.valido = False
self.l = []
self.width = 300
self.height = 150
self.font_size = 60 # Tamanho maior da fonte
def Gerar(self):
y = randrange(4, 12, 4) #pega valores em um intervalo aleatório
cont = y #Vai gerar o tamanho do capcha
while cont > 0:
n = randint(0, 1)
if n == 0:
val = chr(randint(65, 90))
else:
val = chr(randint(49, 57))
self.l.append(val)
cont -= 1
#print(f"CARACETERES DE VERIFICAÇÃO: {l}") #Apenas para análise
l = ''.join(self.l) #Uni as letras
return l
def Validar(self, MeuGerador, ValorUser):
#Executa a validação
if ValorUser == MeuGerador:
self.valido = True
return self.valido
else:
return self.valido
def GerarImagem(self, text):
# Cria uma imagem em branco
image = Image.new('RGB', (self.width, self.height), color=(255, 255, 255))
# Cria um objeto de desenho
draw = ImageDraw.Draw(image)
# Carrega uma fonte para o texto com tamanho maior
font = ImageFont.load_default()
font = ImageFont.truetype("arial.ttf", self.font_size) # Use a fonte TrueType e defina o tamanho da fonte
# Obtém as dimensões da caixa do texto
text_bbox = draw.textbbox((0, 0), text, font)
# Centraliza o texto na imagem
x = (self.width - text_bbox[2] - text_bbox[0]) / 2
y = (self.height - text_bbox[3] - text_bbox[1]) / 2
# Desenha o texto na imagem
draw.text((x, y), text, fill=(0, 0, 0), font=font)
# Adiciona um risco à imagem
for _ in range(10):
x1 = randint(0, self.width - 1)
y1 = randint(0, self.height - 1)
x2 = randint(0, self.width - 1)
y2 = randint(0, self.height - 1)
draw.line([(x1, y1), (x2, y2)], fill=(0, 0, 0), width=2)
# Adiciona um padrão de fundo aleatório (pontos)
for _ in range(1000):
x = randint(0, self.width - 1)
y = randint(0, self.height - 1)
draw.point((x, y), fill=(0, 0, 0))
#Salve o arquivo na pasta image
d = os.getcwd()
i = "static\\image"
caminho = os.path.join(d, i)
element_image_path = os.path.join(caminho, "element_image.png")
# Salva a imagem como um arquivo
image.save(element_image_path)
if __name__ == "__main__":
c = CreateCaptcha()
x = c.Gerar()
c.GerarImagem(x)
print(x)
y = input("Copie aqui ou digite errado: ").upper()
print(c.Validar(x, y))
| Jv131103/ProjectCaptcha | cp.py | cp.py | py | 2,968 | python | pt | code | 0 | github-code | 36 | [
{
"api_name": "random.randrange",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "random.randint",
"... |
1395161242 | #!/usr/bin/env python
# coding: utf-8
# 1. Compare and contrast the float and Decimal classes' benefits and drawbacks.
#
# floats are faster and more memory-efficient, suitable for a wide range of values, but can have precision and rounding issues. Decimals provide precise decimal arithmetic, accurate representation of decimal numbers, but are slower and have a more limited value range. The choice between float and Decimal depends on the specific requirements of the application.
# 2. Decimal('1.200') and Decimal('1.2') are two objects to consider. In what sense are these the same object? Are these just two ways of representing the exact same value, or do they correspond to different internal states?
#
# Decimal('1.200') and Decimal('1.2') represent the same value of 1.2 mathematically. However, internally they have different representations due to the presence or absence of trailing zeros, making them distinct Decimal objects.
# 3. What happens if the equality of Decimal('1.200') and Decimal('1.2') is checked?
#
# In[6]:
from decimal import Decimal
decimal1 = Decimal('1.200')
decimal2 = Decimal('1.2')
print(decimal1 == decimal2)
# 4. Why is it preferable to start a Decimal object with a string rather than a floating-point value?
#
# In[10]:
#example
from decimal import Decimal
float_value = 0.1
decimal_float = Decimal(float_value)
decimal_string = Decimal('0.1')
# In[11]:
print(decimal_float)
# In[12]:
print(decimal_string)
# 5. In an arithmetic phrase, how simple is it to combine Decimal objects with integers?
#
# Decimal objects with integers in arithmetic operations is simple and straightforward. The Decimal class seamlessly handles the interoperability between Decimal objects and integers, allowing you to use standard arithmetic operators without any additional complexity.
# 6. Can Decimal objects and floating-point values be combined easily?
#
# Combining Decimal objects with floating-point values in arithmetic operations is easy and straightforward in Python. The Decimal class seamlessly supports interoperability between Decimal objects and floating-point values, allowing you to use standard arithmetic operators without any complications.
# 7. Using the Fraction class but not the Decimal class, give an example of a quantity that can be expressed with absolute precision.
#
# In[13]:
#The Fraction class in Python allows precise representation of rational numbers without any loss of precision. Here's an example of a quantity that can be expressed with absolute precision using the Fraction class
from fractions import Fraction
fraction = Fraction(4,8)
# In[15]:
print(fraction)
# 8. Describe a quantity that can be accurately expressed by the Decimal or Fraction classes but not by a floating-point value.
#
# In[16]:
#example
from decimal import Decimal
decimal = Decimal('2')/ Decimal('8')
# In[17]:
print(decimal)
# In[19]:
#example
from fractions import Fraction
fraction = Fraction(2,8)
# In[20]:
print(fraction)
# Q9.Consider the following two fraction objects: Fraction(1, 2) and Fraction(1, 2). (5, 10). Is the internal state of these two objects the same? Why do you think that is?
#
# In[25]:
#yes the internal state of these two object are same:
from fractions import Fraction
fractions1 = Fraction(1,2)
fractions2 = Fraction(5,10)
# In[26]:
print(fractions1)
# In[27]:
print(fractions2)
# Q10. How do the Fraction class and the integer type (int) relate to each other? Containment or inheritance?
#
# The Fraction class and the int type have a containment relationship. The Fraction class can work with and contain integer
# In[ ]:
| Rajn013/assignment-020 | Untitled83.py | Untitled83.py | py | 3,685 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "decimal.Decimal",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "decimal.Decimal",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "decimal.Decimal",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "decimal.Decimal",
... |
3974726049 | import json
import os
from datetime import datetime
from sys import exit as x
from typing import List
import cv2
import numpy as np
import pandas as pd
import printj # pip install printj
from jaitool.inference import D2Inferer as inferer
from jaitool.inference.models.hook import draw_info_box, draw_inference_on_hook2
from pyjeasy.file_utils import (dir_exists, file_exists, delete_dir,
make_dir, make_dir_if_not_exists)
from pyjeasy.math_utils import dist
from annotation_utils.coco.structs import COCO_Annotation, COCO_Dataset
from common_utils import path_utils
from common_utils.check_utils import check_value
from common_utils.common_types import BBox
from common_utils.common_types.bbox import BBox
from common_utils.common_types.bbox import ConstantAR_BBox as BBox
from common_utils.common_types.keypoint import Keypoint2D, Keypoint2D_List
from common_utils.cv_drawing_utils import (SimpleVideoViewer,
cv_simple_image_viewer,
draw_bbox,
draw_bool_mask,
draw_keypoints,
draw_skeleton)
# from common_utils.file_utils import (delete_dir, dir_exists, file_exists,
# make_dir, make_dir_if_not_exists)
from common_utils.path_utils import (get_all_files_in_extension_list,
get_all_files_of_extension, get_filename,
get_rootname_from_path, get_script_dir,
rel_to_abs_path)
from detectron2.config import get_cfg
from detectron2.engine import DefaultPredictor
from typing import List
# from logger import logger
from tqdm import tqdm
def infer(path: str, weights_path: str, thresh: int = 0.5, key: str = 'R', infer_dump_dir: str = '', model: str = 'mask_rcnn_R_50_FPN_1x', size: int = 1024,
class_names: List[str]=['hook'],
gt_path: str = '/home/jitesh/3d/data/coco_data/hook_test/json/cropped_hook.json'):
# class_names=['hook', 'pole']
# class_names=['hook']
conf_thresh = 0.001
show_bbox_border = True
gt_dataset = COCO_Dataset.load_from_path(json_path=gt_path)
inferer_seg = inferer(
weights_path=weights_path,
confidence_threshold=0.1,
# num_classes=1,
# num_classes=2,
class_names=class_names,
# class_names=['hook'],
model='keypoint_rcnn_R_50_FPN_1x',
# model='faster_rcnn_X_101_32x8d_FPN_3x',
# model='faster_rcnn_R_101_FPN_3x',
# model=model,
)
inferer_seg.cfg.INPUT.MIN_SIZE_TEST = size
inferer_seg.cfg.INPUT.MAX_SIZE_TEST = size
inferer_seg.cfg.MODEL.MASK_ON = True
weights_path = '/home/jitesh/3d/data/coco_data/hook_sim_real_data7/weights/Keypoints_R_50_1x_aug_cm_seg_val_1/model_0009999.pth'
weights_path = '/home/jitesh/3d/data/coco_data/hook_sim_real_data7_0.1/weights/Keypoints_R_50_1x_aug_cm_seg_val_3/model_0009999.pth'
weights_path = '/home/jitesh/3d/data/coco_data/hook_sim_real_data7_0.1/weights/Keypoints_R_50_1x_aug_cm_seg_val_1/model_0007999.pth'
weights_path = '/home/jitesh/3d/data/coco_data/hook_sim_real_data8/weights/Keypoints_R_50_1x_aug_key_seg_val_1/model_0009999.pth'
weights_path = '/home/jitesh/3d/data/coco_data/hook_sim_real_data8/weights/Keypoints_R_50_1x_aug_key_seg_val_2/model_0004999.pth'
# inferer_key = jDetectron2KeypointInferer(
# weights_path=weights_path,
# # ref_coco_ann_path=f'/home/jitesh/3d/data/coco_data/hook_real1/json/hook.json',
# # categories_path=f'/home/jitesh/3d/data/categories/hook_infer.json',
# # categories_path=f'/home/jitesh/3d/data/categories/hook_7ckpt.json',
# categories_path=f'/home/jitesh/3d/data/categories/hook_7ckpt_pole.json',
# target_category='hook',
# model_name='keypoint_rcnn_R_50_FPN_1x',
# bbox_threshold=bbox_thresh,
# kpt_threshold=kpt_thresh,
# key_box='hook',
# )
# k_size = 1024
# inferer_key.cfg.INPUT.MIN_SIZE_TEST = k_size
# inferer_key.cfg.INPUT.MAX_SIZE_TEST = k_size
possible_modes = ['save', 'preview']
mode = 'save'
check_value(mode, valid_value_list=possible_modes)
# make_dir_if_not_exists(infer_dump_dir)
img_extensions = ['jpg', 'JPG', 'png', 'PNG']
img_pathlist = get_all_files_in_extension_list(
dir_path=f'{path}', extension_list=img_extensions)
img_pathlist.sort()
confirm_folder(infer_dump_dir, mode)
# confirm_folder(f'{infer_dump_dir}/good_seg', mode)
# confirm_folder(f'{infer_dump_dir}/good_cropped', mode)
# confirm_folder(f'{infer_dump_dir}/good', mode)
# confirm_folder(f'{infer_dump_dir}/G(>4D) P(>4D)', mode)
# confirm_folder(f'{infer_dump_dir}/G(>4D) P(<4D)', mode)
# confirm_folder(f'{infer_dump_dir}/G(<4D) P(>4D)', mode)
# confirm_folder(f'{infer_dump_dir}/G(<4D) P(<4D)', mode)
# confirm_folder(f'{infer_dump_dir}/bad', mode)
confirm_folder(f'{infer_dump_dir}/infer_key_seg', mode)
count = 0
start = datetime.now()
df = pd.DataFrame(data=[], columns=['gt_d', 'pred_d',
'gt_ab', 'pred_ab',
'gt_ratio', 'pred_ratio',
'gt_ratio>4', 'pred_ratio>4',
'correct_above4d_ratio', 'incorrect_above4d_ratio',
'correct_below4d_ratio', 'incorrect_below4d_ratio',
])
# 'image_path'])
for i, img_path in enumerate(tqdm(img_pathlist, desc='Writing images',)):
img_filename = get_filename(img_path)
# if not '201005_70_縮小革命PB020261.jpg' in img_path:
# continue
# if i > 19:
# continue
printj.purple(img_path)
img = cv2.imread(img_path)
result = img
# print(f'shape {img.shape}')
# cv2.imshow('i', img)
# cv2.waitKey(100000)
# continue
score_list, pred_class_list, bbox_list, pred_masks_list, pred_keypoints_list, vis_keypoints_list, kpt_confidences_list = inferer_seg.predict(
img=img)
# printj.blue(pred_masks_list)
max_hook_score = -1
max_pole_score = -1
diameter = -1
len_ab = -1
found_hook = False
found_pole = False
for score, pred_class, bbox, mask, keypoints, vis_keypoints, kpt_confidences in zip(score_list, pred_class_list, bbox_list, pred_masks_list, pred_keypoints_list, vis_keypoints_list, kpt_confidences_list):
if pred_class == 'pole':
found_pole = True
if max_pole_score < score:
# if True:
max_pole_score = score
diameter = compute_diameter(mask)
# result = draw_bool_mask(img=result, mask=mask, color=[
# 0, 255, 255],
# transparent=True
# )
pole_bbox_text = f'pole {str(round(score, 2))}'
pole_bbox = bbox
pole_mask = mask
# result = draw_bbox(img=result, bbox=bbox,
# text=pole_bbox_text, label_only=not show_bbox_border, label_orientation='bottom')
printj.blue(f'diameter={diameter}')
if pred_class == 'hook':
# printj.green.bold_on_yellow(score)
found_hook = True
if max_hook_score < score:
# if True:
max_hook_score = score
hook_bbox = BBox.buffer(bbox)
hook_score = round(score, 2)
hook_mask = mask
hook_keypoints = keypoints
hook_vis_keypoints = vis_keypoints
hook_kpt_confidences = kpt_confidences
# xmin, ymin, xmax, ymax = bbox.to_int().to_list()
# _xmin, _ymin, _xmax, _ymax = _bbox.to_int().to_list()
# width = _xmax-_xmin
# height = _ymax-_ymin
# scale = 0.2
# xmin = max(int(_xmin - width*scale), 0)
# xmax = min(int(_xmax + width*scale), img.shape[1])
# ymin = max(int(_ymin - height*scale), 0)
# ymax = min(int(_ymax + height*scale), img.shape[0])
# printj.red(score)
# printj.red(bbox)
# return
# img = draw_bbox(img=img, bbox=_bbox, color=[
# 0, 255, 255], thickness=2, text=f"{pred_class} {round(score, 3)}",
# label_orientation='top')
# img = draw_bbox(img=img, bbox=_bbox, color=[
# 0, 255, 255], thickness=2, text=f"{pred_class} {round(score, 3)}",
# label_orientation='bottom')
# result = draw_bool_mask(img=result, mask=mask, color=[
# 255, 255, 0],
# transparent=True
# )
# result = result
# bbox_text = str(round(score, 4))
# result = draw_bbox(img=result, bbox=bbox,
# text=bbox_text, label_only=not show_bbox_border)
bbox_label_mode = 'euler'
# result = draw_keypoints(
# img=result, keypoints=vis_keypoints, radius=2, color=[0, 0, 255],
# # keypoint_labels=kpt_labels, show_keypoints_labels=True, label_thickness=1,
# # ignore_kpt_idx=conf_idx_list
# )
kpt_labels = ["kpt-a", "kpt-b", "kpt-cb",
"kpt-c", "kpt-cd", "kpt-d", "kpt-e"]
kpt_skeleton = [[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6]]
conf_idx_list = np.argwhere(
np.array(kpt_confidences) > conf_thresh).reshape(-1)
not_conf_idx_list = np.argwhere(
np.array(kpt_confidences) <= conf_thresh).reshape(-1).astype(int)
conf_keypoints, conf_kpt_labels = np.array(
vis_keypoints)[conf_idx_list], np.array(kpt_labels)[conf_idx_list]
not_conf_keypoints, not_conf_kpt_labels = np.array(
vis_keypoints)[not_conf_idx_list], np.array(kpt_labels)[not_conf_idx_list]
cleaned_keypoints = np.array(
vis_keypoints.copy()).astype(np.float32)
# result = draw_bool_mask(img=result, mask=mask, color=[
# 255, 255, 0],
# transparent=True
# )
# result, len_ab = draw_inference_on_hook2(img=result, cleaned_keypoints=cleaned_keypoints, kpt_labels=kpt_labels, kpt_skeleton=kpt_skeleton,
# score=score, bbox=_bbox, vis_keypoints=vis_keypoints, kpt_confidences=kpt_confidences, conf_idx_list=conf_idx_list, not_conf_idx_list=not_conf_idx_list,
# conf_keypoints=conf_keypoints, conf_kpt_labels=conf_kpt_labels, not_conf_keypoints=not_conf_keypoints, not_conf_kpt_labels=not_conf_kpt_labels,
# conf_thresh=conf_thresh, show_bbox_border=show_bbox_border, bbox_label_mode=bbox_label_mode, index_offset=0, diameter=diameter)
# result=result
# printj.green(_bbox)
# printj.green(_bbox.to_int())
# printj.green(_bbox.to_int().to_list())
printj.green.on_white(max_hook_score)
if found_pole:
result = draw_bool_mask(img=result, mask=pole_mask, color=[
0, 255, 255],
transparent=True
)
result = draw_bbox(img=result, bbox=pole_bbox,
text=pole_bbox_text, label_only=not show_bbox_border, label_orientation='top')
result = draw_bbox(img=result, bbox=pole_bbox,
text=pole_bbox_text, label_only=not show_bbox_border, label_orientation='bottom')
if found_hook:
result = draw_bool_mask(img=result, mask=hook_mask, color=[
255, 255, 0],
transparent=True
)
result, len_ab = draw_inference_on_hook2(img=result, cleaned_keypoints=cleaned_keypoints, kpt_labels=kpt_labels, kpt_skeleton=kpt_skeleton,
score=hook_score, bbox=hook_bbox, vis_keypoints=hook_vis_keypoints, kpt_confidences=hook_kpt_confidences, conf_idx_list=conf_idx_list, not_conf_idx_list=not_conf_idx_list,
conf_keypoints=conf_keypoints, conf_kpt_labels=conf_kpt_labels, not_conf_keypoints=not_conf_keypoints, not_conf_kpt_labels=not_conf_kpt_labels,
conf_thresh=conf_thresh, show_bbox_border=show_bbox_border, bbox_label_mode=bbox_label_mode, index_offset=0, diameter=diameter)
printj.purple(len_ab)
if len_ab == 0:
printj.green(keypoints)
result = draw_info_box(result, len_ab, diameter)
# img: np.ndarray, cleaned_keypoints, kpt_labels: List[str], kpt_skeleton: List[list],
# score: float, bbox: BBox, vis_keypoints: list, kpt_confidences: list, conf_idx_list: list, not_conf_idx_list: list,
# conf_keypoints, conf_kpt_labels, not_conf_keypoints, not_conf_kpt_labels,
# conf_thresh: float = 0.3, show_bbox_border: bool = False, bbox_label_mode: str = 'euler', index_offset: int = 0, diameter=1
# cv2.imshow('i', result)
# # cv2.imwrite('i', result)
# cv2.waitKey(10000)
# quit_flag = cv_simple_image_viewer(img=result, preview_width=1000)
# if quit_flag:
# break
# cv2.imwrite(f"{infer_dump_dir}/good_seg/{img_filename}", result)
cv2.imwrite(f"{infer_dump_dir}/infer_key_seg/{img_filename}", result)
# cv2.imwrite(f"{infer_dump_dir}/good_seg/{img_filename}", result)
# # img3, score_list, bbox_list, len_ab = inferer_key.infer_image(img=img2, draw_hm_collage=False, show_bbox_border=True, diameter=diameter)
# if diameter<=0:
# length_ratio = np.inf
# else:
# length_ratio = len_ab/diameter
# printj.purple(length_ratio)
# img4=img0
# img4[ymin:ymax, xmin:xmax]=img3
# font = cv2.FONT_HERSHEY_SIMPLEX
# TopLeftCornerOfText = (10,50)
# fontScale = 1
# fontColor = (255,255,255)
# lineType = 2
# cv2.rectangle(img4, (5,10 ), (280,180), (0,0,0), -1)
# cv2.rectangle(img4, (5,10 ), (280,180), (200,200,0), 2)
# cv2.putText(img4, f'Len-ab: {round(len_ab,2)}', (10,50), font, fontScale, fontColor, lineType)
# cv2.putText(img4, f'Diameter: {round(diameter,2)}', (10,100), font, fontScale, fontColor, lineType)
# cv2.putText(img4, str(round(length_ratio,2))+' D', (10,150), font, fontScale, fontColor, lineType)
# printj.purple(f'img0.shape = {img0.shape}')
# printj.purple(f'img.shape = {img.shape}')
# printj.purple(f'img2.shape = {img2.shape}')
# printj.purple(f'img3.shape = {img3.shape}')
# printj.purple(f'img4.shape = {img4.shape}')
# printj.purple(img.shape)
# printj.purple(img2.shape)
# printj.purple(img3.shape)
# printj.purple(img4.shape)
# quit_flag = cv_simple_image_viewer(img=img4, preview_width=1000)
# if quit_flag:
# break
# continue
# if len(score_list) == 0:
# if all(score < thresh for score in score_list):
# count = count +1
# # printj.purple(img_path)
# printj.purple(score_list)
# printj.yellow.bold_on_black(f'Good count: {i+1-count}, Bad count: {count}, Total: {i+1}')
# dump_path = f"{infer_dump_dir}/bad/{img_filename}"
# # cv2.imwrite(dump_path, img)
# else:
# # # printj.purple(score_list)
# # pass
# dump_path = f"{infer_dump_dir}/good/{img_filename}"
# cv2.imwrite(f"{infer_dump_dir}/good_cropped/{img_filename}", img3)
# cv2.imwrite(f"{infer_dump_dir}/good_seg/{img_filename}", result)
# # dump_path = f"{infer_dump_dir}/{img_filename}"
# cv2.imwrite(dump_path, img4)
# printj.yellow(f"({i+1}/{len(img_pathlist)}): Wrote {dump_path}")
# for image in gt_dataset.images:
# if image.file_name == img_filename:
# image_id = image.id
# for ann in gt_dataset.annotations:
# if ann.image_id == image_id:
# keys = Keypoint2D_List.to_point_list(ann.keypoints)
# gt_diameter = keys[7].distance(keys[8])
# gt_len_ab = keys[0].distance(keys[1])
# # gt_ratio = round(gt_diameter/gt_len_ab, 2)
# if gt_diameter<=0:
# gt_ratio = np.inf
# else:
# gt_ratio = round(gt_len_ab/gt_diameter, 2)
# # correct_ratio = int((length_ratio>4)==(gt_ratio>4))
# # incorrect_ratio = int((length_ratio>4)!=(gt_ratio>4))
# correct_above4d_ratio = int((length_ratio>4)==(gt_ratio>4)==True)
# incorrect_below4d_ratio = int((length_ratio>4)==(gt_ratio<4)==True)
# correct_below4d_ratio = int((length_ratio<4)==(gt_ratio<4)==True)
# incorrect_above4d_ratio = int((length_ratio<4)==(gt_ratio>4)==True)
# if gt_diameter<=0:
# error_diameter = np.inf
# else:
# error_diameter = (gt_diameter-diameter)/gt_diameter*100
# if gt_len_ab<=0:
# error_len_ab = np.inf
# else:
# error_len_ab = (gt_len_ab-len_ab)/gt_len_ab*100
# # incorrect_below4d_ratio = int((length_ratio>4)==(gt_ratio<4)==True)
# # correct_below4d_ratio = int((length_ratio<4)==(gt_ratio<4)==True)
# # incorrect_above4d_ratio = int((length_ratio<4)==(gt_ratio>4)==True)
# row = {'gt_d': round(gt_diameter, 2), 'pred_d': diameter,
# 'gt_ab': round(gt_len_ab, 2), 'pred_ab': len_ab,
# 'error_diameter': error_diameter,
# 'error_len_ab': error_len_ab,
# 'gt_ratio': gt_ratio, 'pred_ratio': length_ratio,
# 'gt_ratio>4': int(gt_ratio>4), 'pred_ratio>4': int(length_ratio>4),
# 'correct_above4d_ratio': correct_above4d_ratio,
# 'incorrect_above4d_ratio': incorrect_above4d_ratio,
# 'correct_below4d_ratio': correct_below4d_ratio,
# 'incorrect_below4d_ratio': incorrect_below4d_ratio,
# 'image_path':img_path,
# }
# df = df.append(pd.DataFrame(row, index =[img_filename]) )
# if correct_above4d_ratio:
# cv2.imwrite(f"{infer_dump_dir}/G(>4D) P(>4D)/{img_filename}", img4)
# if incorrect_above4d_ratio:
# cv2.imwrite(f"{infer_dump_dir}/G(>4D) P(<4D)/{img_filename}", img4)
# if incorrect_below4d_ratio:
# cv2.imwrite(f"{infer_dump_dir}/G(<4D) P(>4D)/{img_filename}", img4)
# if correct_below4d_ratio:
# cv2.imwrite(f"{infer_dump_dir}/G(<4D) P(<4D)/{img_filename}", img4)
# printj.blue(df)
# # printj.cyan(df['correct_below4d_ratio'])
# cm = pd.DataFrame(data=[],columns = ['p: more than 4D', 'p: less than 4D', 'Total'])
# cm = cm.append(pd.DataFrame({'p: more than 4D':df['correct_above4d_ratio'].sum(),
# 'p: less than 4D':df['incorrect_above4d_ratio'].sum(),
# 'Total':df['correct_above4d_ratio'].sum()+df['incorrect_above4d_ratio'].sum()}, index =['g: more than 4D']) )
# cm = cm.append(pd.DataFrame({'p: more than 4D':df['incorrect_below4d_ratio'].sum(),
# 'p: less than 4D':df['correct_below4d_ratio'].sum(),
# 'Total':df['incorrect_below4d_ratio'].sum()+df['correct_below4d_ratio'].sum()}, index =['g: less than 4D']) )
# cm = cm.append(pd.DataFrame({'p: more than 4D':df['correct_above4d_ratio'].sum()+df['incorrect_below4d_ratio'].sum(),
# 'p: less than 4D':df['incorrect_above4d_ratio'].sum()+df['correct_below4d_ratio'].sum(),
# 'Total':df['correct_above4d_ratio'].sum()+df['incorrect_above4d_ratio'].sum()+df['incorrect_below4d_ratio'].sum()+df['correct_below4d_ratio'].sum()}, index =['Total']) )
# printj.yellow(cm)
# cm.to_excel(f"{os.path.abspath(f'{path}/..')}/cm_data.xlsx")
# cm2 = pd.DataFrame(data=[],columns = ['correct', 'incorrect'])
# cm2 = cm2.append(pd.DataFrame({'correct':df['correct_above4d_ratio'].sum(), 'incorrect':df['incorrect_above4d_ratio'].sum()}, index =['more than 4D']) )
# cm2 = cm2.append(pd.DataFrame({'correct':df['correct_below4d_ratio'].sum(), 'incorrect':df['incorrect_below4d_ratio'].sum()}, index =['less than 4D']) )
# printj.cyan(cm2)
# df.to_excel(f"{os.path.abspath(f'{path}/..')}/test4d_data.xlsx") # pip install openpyx
# cm.to_excel(f"{os.path.abspath(f'{path}/..')}/cm_data.xlsx") # pip install openpyx
# total_time = datetime.now()-start
# info = f'\nDetection count: {len(img_pathlist) - count}, Total: {len(img_pathlist)}'
# info += f'\nNo detection count: {count}, Total: {len(img_pathlist)}'
# # Starts # Write inference json
# output_json_path = f"{infer_dump_dir}/infered_hook.json"
# info += f'\nTotal inference time: {total_time} \nTime per image: {total_time/len(img_pathlist)}'
# info += f'\n\nConfusion Matrix for ratio: \n{cm}'
# printj.blue.bold_on_yellow(info)
# text_file = f"{infer_dump_dir}/info.txt"
# if os.path.exists(text_file):
# os.remove(text_file)
# f= open(text_file,"w+")
# f.write(info)
# f.close()
# printj.green.italic_on_black(infer_dump_dir)
# from cocoeval_hook import run as evaluate
# # evaluate(output_json_path)
# os.system('spd-say "Folder Created"')
if __name__ == "__main__":
now = datetime.now()
dt_string3 = now.strftime("%Y_%m_%d_%H_%M_%S")
dt_string3 = now.strftime("%m_%d_%H")
TEST_PATH = '/home/jitesh/3d/data/coco_data/hook_test/level_01'
# TEST_PATH = '/home/jitesh/sekisui/teamviewer/sampled_data/VID_20200107_142503/img'
# TEST_PATH = '/home/jitesh/3d/data/coco_data/hook_real1/s_good'
# TEST_PATH = '/home/jitesh/3d/data/coco_data/hlk1_100_coco-data/img'
# TEST_PATH = '/home/jitesh/3d/data/coco_data/hlk2_200_coco-data/img'
# GT_PATH = f'/home/jitesh/3d/data/coco_data/hook_test/json/hook.json'
# GT_PATH = f'/home/jitesh/3d/data/coco_data/hook_test/json/hook4.json'
# WEIGHT_PATH='/home/jitesh/3d/data/coco_data/hook_weights/seg_hook_pole/model_0049999.pth'
WEIGHT_PATH = '/home/jitesh/3d/data/coco_data/hlk1_100_coco-data/weights/Keypoints_R_50_1x_aug_cm_seg_val_5/model_0004999.pth'
WEIGHT_PATH = '/home/jitesh/3d/data/coco_data/hook_sim_real_data8/weights/Keypoints_R_50_1x_aug_key_seg_val_1/model_0019999.pth'
# WEIGHT_PATH = '/home/jitesh/3d/data/coco_data/hook_sim_real_data8/weights/Keypoints_R_50_1x_aug_key_seg_val_2/model_0099999.pth'
WEIGHT_PATH = '/home/jitesh/3d/data/coco_data/hook_sim_real_data8/weights/Keypoints_R_50_1x_aug_key_seg_val_2/model_0049999.pth'
WEIGHT_PATH = '/home/jitesh/3d/data/coco_data/hook_sim_real_data8/weights/Keypoints_R_101_3x_aug_key_seg_val_1/model_0099999.pth'
# WEIGHT_PATH = '/home/jitesh/3d/data/coco_data/hook_sim_real_data8/weights/Keypoints_R_50_1x_aug_key_seg_val_3_hook-only/model_0049999.pth'
# WEIGHT_PATH = '/home/jitesh/3d/data/coco_data/hook_sim_real_data8/weights/Keypoints_R_50_1x_aug_key_seg_val_2/model_0004999.pth'
# KEY_WEIGHT_PATH = '/home/jitesh/3d/data/coco_data/hook_weights/seg_hook_pole/model_0049999.pth'
iteration = WEIGHT_PATH.split('_')[-1].split('.')[0]
training_data_name = WEIGHT_PATH.split('/')[-2].split('_')[0] + '_'\
+ WEIGHT_PATH.split('/')[6].split('_')[-2] + '_'\
+ WEIGHT_PATH.split('/')[6].split('_')[-1]
# training_model_name = WEIGHT_PATH.split('/')[-2].split('_')[0]
kpt_thresh = 0.1
bbox_thresh = 0.5
img_size = 1024
# key = f's{img_size}'
key = f'hookpole'
# key = f'hook'
class_names=['hook', 'pole']
# class_names=['hook']
output_dir_path = f'{TEST_PATH}_{dt_string3}_{training_data_name}_{key}_{iteration}_{bbox_thresh}_vis_infer_output_50_1x'
infer(path=TEST_PATH,
weights_path=WEIGHT_PATH,
# key='X'
key='c',
infer_dump_dir=output_dir_path,
thresh=bbox_thresh,
# model='mask_rcnn_R_50_FPN_1x',
model='mask_rcnn_R_101_FPN_3x',
size=img_size,
class_names=class_names,
# gt_path=GT_PATH,
)
| Jitesh17/jaitool | jaitool/inference/models/hook/hook.py | hook.py | py | 25,575 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "annotation_utils.coco.structs.COCO_Dataset.load_from_path",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "annotation_utils.coco.structs.COCO_Dataset",
"line_number": 50,
"us... |
10125696279 | #!/bin/bash/env python
# coding=UTF-8
# by Tarcisio marinho
# github.com/tarcisio-marinho
import requests,json,os
def minha_localizacao(frase):
url = 'http://freegeoip.net/json/'
try:
requisicao = requests.get('http://freegeoip.net/json/')
dicionario = json.loads(requisicao.text)
if(frase == u'pais'):
print('Você está no ')
print(str(dicionario['country_name'])+', '+str(dicionario['country_code']))
os.system('espeak -v pt-br -g 4 -a 100 "Você está no '+str(dicionario['country_name'])+'"')
elif(frase == u'estado'):
print('Você está em ')
print(str(dicionario['city'])+'-'+str(dicionario['region_code'])+', '+dicionario['region_name'])
os.system('espeak -v pt-br -g 4 -a 100 "Você está em '+str(dicionario['city'])+'"')
elif(frase == u'ip'):
print('Seu ip é: '+str(dicionario['ip']))
os.system('espeak -v pt-br -g 4 -a 100 "Seu ipê é"')
except:
print('Erro de conexão')
os.system('espeak -v pt-br -g 4 -a 100 "Erro de conexão"')
def clima(cidade):
url = 'http://api.openweathermap.org/data/2.5/weather?q='+ cidade + '&APPID=ab6ec687d641ced80cc0c935f9dd8ac9&units=metric'
try:
requisicao = requests.get(url)
dicionario = json.loads(requisicao.text)
print('A temperatura em '+str(cidade)+' é: ' + str(dicionario['main']['temp'])+ ' graus Celcius')
os.system('espeak -v pt-br -g 4 -a 100 "A temperatura em '+str(cidade)+' é: ' + str(dicionario['main']['temp'])+ ' graus Celcius'+'"')
if(dicionario['weather'][0]['main']=='Clear'):
print('O clima está: Limpo/Aberto')
os.system('espeak -v pt-br -g 4 -a 100 "O clima está: Limpo e Aberto"')
elif(dicionario['weather'][0]['main']=='Clouds'):
print('O clima está: Nebuloso/fechado')
os.system('espeak -v pt-br -g 4 -a 100 "O clima está: Nebuloso e fechado"')
elif(dicionario['weather'][0]['main']=='Thunderstorm'):
print('O clima está muito chuvoso e com tempestade, cuidado pae')
os.system('espeak -v pt-br -g 4 -a 100 "O clima está muito chuvoso e com tempestade, cuidado pae"')
else:
print('O clima está: '+ dicionario['weather'][0]['main'])
os.system('espeak -v pt-br -g 4 -a 100 "O clima está: '+ dicionario['weather'][0]['main']+'"')
except:
print('Erro de conexão')
os.system('espeak -v pt-br -g 4 -a 100 "Erro de conexão"')
| tarcisio-marinho/Eliza | modulos/mapa.py | mapa.py | py | 2,562 | python | pt | code | 11 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 20,
... |
19494554547 | from jinja2 import Environment, FileSystemLoader, select_autoescape
env = Environment(
loader=FileSystemLoader('templates'),
autoescape=select_autoescape(['html', 'xml'])
)
def render_run_plan(workout, routes, sunrise_sunset, forecast, dress):
template = env.get_template('run_plan.html')
return template.render(workout=workout, routes=routes, sunrise_sunset=sunrise_sunset,
forecast=forecast, dress=dress) | csickelco/runforfun | runforfun/util/template_engine.py | template_engine.py | py | 452 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "jinja2.Environment",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "jinja2.FileSystemLoader",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "jinja2.select_autoescape",
"line_number": 5,
"usage_type": "call"
}
] |
42482969055 | from __future__ import print_function
import logging
from optparse import OptionParser
import os
import re
import subprocess
import sys
import tempfile
from threading import Thread, Lock
import time
if sys.version < '3':
import Queue
else:
import queue as Queue
# Append `SPARK_HOME/dev` to the Python path so that we can import the sparktestsupport module
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "../spark/dev/"))
from sparktestsupport.shellutils import which, subprocess_check_output # noqa
SPARK_HOME = os.environ.get("SPARK_HOME")
PYTHONPATH = os.environ.get("PYTHONPATH")
snappy_python_modules = ["pyspark-sql-snappy", "pyspark-streaming-snappy"]
def print_red(text):
print('\033[31m' + text + '\033[0m')
LOG_FILE = os.path.join(os.path.abspath(''), "unit-tests.log")
FAILURE_REPORTING_LOCK = Lock()
LOGGER = logging.getLogger()
python_test_goals = {"pyspark-sql-snappy": "pyspark.sql.snappy.tests",
"pyspark-streaming-snappy": "pyspark.streaming.snappy.tests"}
def run_individual_python_test(test_name, pyspark_python):
env = dict(os.environ)
env.update({
'SPARK_TESTING': '1',
'SPARK_PREPEND_CLASSES': '1',
'PYSPARK_PYTHON': which(pyspark_python),
'PYSPARK_DRIVER_PYTHON': which(pyspark_python)
})
LOGGER.info("Starting test(%s): %s", pyspark_python, test_name)
start_time = time.time()
try:
per_test_output = tempfile.TemporaryFile()
testDir = test_name + pyspark_python
if not os.path.exists(testDir):
os.makedirs(testDir)
retcode = subprocess.Popen(
[os.path.join(SPARK_HOME, "bin/pyspark"), test_name],
stderr=per_test_output, stdout=per_test_output, env=env, cwd=testDir).wait()
except:
LOGGER.exception("Got exception while running %s with %s", test_name, pyspark_python)
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(1)
duration = time.time() - start_time
# Exit on the first failure.
if retcode != 0:
try:
with FAILURE_REPORTING_LOCK:
with open(LOG_FILE, 'ab') as log_file:
per_test_output.seek(0)
log_file.writelines(per_test_output)
per_test_output.seek(0)
for line in per_test_output:
decoded_line = line.decode()
if not re.match('[0-9]+', decoded_line):
print(decoded_line, end='')
per_test_output.close()
except:
LOGGER.exception("Got an exception while trying to print failed test output")
finally:
print_red("\nHad test failures in %s with %s; see logs." % (test_name, pyspark_python))
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(-1)
else:
per_test_output.close()
LOGGER.info("Finished test(%s): %s (%is)", pyspark_python, test_name, duration)
def get_default_python_executables():
python_execs = [x for x in ["python2.6", "python3.4", "pypy"] if which(x)]
if "python2.6" not in python_execs:
LOGGER.warning("Not testing against `python2.6` because it could not be found; falling"
" back to `python` instead")
python_execs.insert(0, "python")
return python_execs
def parse_opts():
parser = OptionParser(
prog="run-tests"
)
parser.add_option(
"--python-executables", type="string", default=','.join(get_default_python_executables()),
help="A comma-separated list of Python executables to test against (default: %default)"
)
parser.add_option(
"--modules", type="string",
default=",".join(sorted(snappy_python_modules)),
help="A comma-separated list of Python modules to test (default: %default)"
)
parser.add_option(
"-p", "--parallelism", type="int", default=4,
help="The number of suites to test in parallel (default %default)"
)
parser.add_option(
"--verbose", action="store_true",
help="Enable additional debug logging"
)
(opts, args) = parser.parse_args()
if args:
parser.error("Unsupported arguments: %s" % ' '.join(args))
if opts.parallelism < 1:
parser.error("Parallelism cannot be less than 1")
return opts
def main():
opts = parse_opts()
if (opts.verbose):
log_level = logging.DEBUG
else:
log_level = logging.INFO
logging.basicConfig(stream=sys.stdout, level=log_level, format="%(message)s")
LOGGER.info("Running PySpark tests. Output is in %s", LOG_FILE)
if os.path.exists(LOG_FILE):
os.remove(LOG_FILE)
python_execs = opts.python_executables.split(',')
modules_to_test = []
for module_name in opts.modules.split(','):
if module_name in snappy_python_modules:
modules_to_test.append(module_name)
else:
print("Error: unrecognized module '%s'. Supported modules: %s" %
(module_name, ", ".join(snappy_python_modules)))
sys.exit(-1)
LOGGER.info("Will test against the following Python executables: %s", python_execs)
LOGGER.info("Will test the following Python modules: %s", [x for x in modules_to_test])
task_queue = Queue.PriorityQueue()
for python_exec in python_execs:
python_implementation = subprocess_check_output(
[python_exec, "-c", "import platform; print(platform.python_implementation())"],
universal_newlines=True).strip()
LOGGER.info("%s python_implementation is %s", python_exec, python_implementation)
LOGGER.info("%s version is: %s", python_exec, subprocess_check_output(
[python_exec, "--version"], stderr=subprocess.STDOUT, universal_newlines=True).strip())
for module in modules_to_test:
test_goal = python_test_goals[module]
task_queue.put((0, (python_exec, test_goal)))
def process_queue(task_queue):
while True:
try:
(priority, (python_exec, test_goal)) = task_queue.get_nowait()
except Queue.Empty:
break
try:
run_individual_python_test(test_goal, python_exec)
finally:
task_queue.task_done()
start_time = time.time()
for _ in range(opts.parallelism):
worker = Thread(target=process_queue, args=(task_queue,))
worker.daemon = True
worker.start()
try:
task_queue.join()
except (KeyboardInterrupt, SystemExit):
print_red("Exiting due to interrupt")
sys.exit(-1)
total_duration = time.time() - start_time
LOGGER.info("Tests passed in %i seconds", total_duration)
if __name__ == "__main__":
main()
| TIBCOSoftware/snappydata | python/run-snappy-tests.py | run-snappy-tests.py | py | 7,072 | python | en | code | 1,041 | github-code | 36 | [
{
"api_name": "sys.version",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"li... |
973355858 | from pathlib import Path
import unittest
from lispy import reader
from lispy import rep as step6_file
from lispy.env import Env
from lispy.mal_types import MalList, MalAtom, MalInt
from lispy.mal_types import MalSyntaxException, MalString
class TestStep6(unittest.TestCase):
def setUp(self) -> None:
self._repl_env = step6_file.init_repl_env()
def test_step6_string_unbalanced(self):
with self.assertRaises(MalSyntaxException):
step6_file.rep('"foo', self._repl_env)
def test_step6_standard_string(self):
self.assertEqual(
'"foo"', step6_file.EVAL(MalString('"foo"'), Env(None)).native()
)
self.assertEqual('"foo"', step6_file.rep('"foo"', self._repl_env).__str__())
self.assertEqual('"foo"', MalString('"foo"').native())
self.assertEqual('"\\"foo\\""', MalString('"foo"').__str__())
def test_step6_reader_read_string(self):
read = reader.read('(read-string "(1 2 (3 4) nil)")')
self.assertTrue(isinstance(read, MalList))
arg = read.native()[1]
self.assertTrue(isinstance(arg, MalString))
native_str = arg.native()
self.assertEqual("(1 2 (3 4) nil)", native_str)
def test_step6_read_string_no_escapes(self):
self.assertEqual(
"(1 2 (3 4) nil)",
step6_file.rep('(read-string "(1 2 (3 4) nil)")', self._repl_env),
)
def test_step6_slurp(self):
f = Path(__file__).parent / "mal" / "tests" / "test.txt"
self.assertEqual(
'"A line of text\\n"', step6_file.rep(f'(slurp "{f}")', self._repl_env)
)
def test_step6_eval(self):
self.assertEqual(
"2", step6_file.rep('(eval (read-string "(+ 1 1)"))', self._repl_env)
)
def test_step6_str(self):
self.assertEqual(
'"abc2def ghi"',
step6_file.rep('(str "abc" 2 "def" " ghi")', self._repl_env),
)
def test_step6_atom_type(self):
atom = step6_file.EVAL(MalAtom(MalInt(1)), Env(None))
self.assertEqual(1, atom.native().native())
def test_step6_read_atom(self):
atom = step6_file.EVAL(step6_file.READ("(atom 1)"), self._repl_env)
self.assertEqual(1, atom.native().native())
def test_step6_atom_deref(self):
self.assertEqual("1", step6_file.rep("(deref (atom 1))", self._repl_env))
def test_step6_atom_p(self):
self.assertEqual("true", step6_file.rep("(atom? (atom 1))", self._repl_env))
self.assertEqual("false", step6_file.rep("(atom? (+ 1 2))", self._repl_env))
def test_step6_reset(self):
self.assertEqual(
"3", step6_file.rep("(do (def! a (atom 2)) (reset! a 3))", self._repl_env)
)
def test_step6_swap(self):
self.assertEqual(
"#<function>",
step6_file.rep("(def! inc3 (fn* (a) (+ 3 a)))", self._repl_env),
)
self.assertEqual(
"(atom 2)", step6_file.rep("(def! a (atom 2))", self._repl_env)
)
self.assertEqual("3", step6_file.rep("(swap! a + 1)", self._repl_env))
if __name__ == "__main__":
unittest.main()
| rectalogic/lispy | tests/test_step6.py | test_step6.py | py | 3,164 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "lispy.rep.init_repl_env",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "lispy.rep",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "lispy.mal_ty... |
15724939255 | import ast
import os
# Third party imports
from setuptools import find_packages, setup
HERE = os.path.abspath(os.path.dirname(__file__))
def get_version(module='spyder_reports'):
"""Get version."""
with open(os.path.join(HERE, module, '_version.py'), 'r') as f:
data = f.read()
lines = data.split('\n')
for line in lines:
if line.startswith('VERSION_INFO'):
version_tuple = ast.literal_eval(line.split('=')[-1].strip())
version = '.'.join(map(str, version_tuple))
break
return version
def get_description():
"""Get long description."""
with open(os.path.join(HERE, 'README.rst'), 'r') as f:
data = f.read()
return data
REQUIREMENTS = ['spyder>=3.2.0', 'pweave', 'matplotlib']
setup(
name='spyder-reports',
version=get_version(),
keywords=['Spyder', 'Plugin'],
url='https://github.com/spyder-ide/spyder-reports',
license='MIT',
author='Spyder Project Contributors',
author_email='admin@spyder-ide.org',
description='Spyder-IDE plugin for Markdown reports using Pweave.',
long_description=get_description(),
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
install_requires=REQUIREMENTS,
include_package_data=True,
package_data={'spyder_reports.utils': ['*.md']},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'
])
| spyder-ide/spyder-reports | setup.py | setup.py | py | 1,820 | python | en | code | 72 | github-code | 36 | [
{
"api_name": "os.path.abspath",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_nu... |
21091300951 | import streamlit as st
import scraper
stock = ['AAPL', 'AMZN', 'INTC', 'GOOG', 'CSCO']
search_btn = False
if st.sidebar.checkbox("Deseja procurar alguma ação?"):
symbol = st.sidebar.text_input("Dígite o símbolo da ação desejada")
if len(symbol) == 4:
new_company_info = scraper.fetch_info(symbol)
stock.append(symbol)
search_btn = st.sidebar.button('Buscar')
def get_new(symbol):
if new_company_info != None:
st.header(symbol)
new_graph = scraper.fetch_company_data_history(chart_data, symbol)
my_chart = st.line_chart(new_graph[chart_data], height=400, width=400)
st.header(f'Informações: {stock_symbol_info}')
st.text(scraper.fetch_info(symbol))
return my_chart
stock_symbol_info = st.sidebar.selectbox(
'Informações da ação',
['', *stock],
)
stock_chart = st.sidebar.multiselect(
'Ações para mostrar no gráfico',
scraper.DEFAULT_COMPANIES,
default=scraper.DEFAULT_COMPANIES
)
chart_data = st.sidebar.radio(
'Gráfico do volume ou da cotação de fechamento ajustado', ('Volume', 'Adj Close')
)
def main():
if search_btn: get_new(str(symbol))
else:
st.header('Gráficos')
if len(stock_chart) > 0:
scraper.render_graph(chart_data, [*stock_chart])
if (stock_symbol_info):
st.header(f'Informações: {stock_symbol_info}')
st.text(scraper.fetch_info(stock_symbol_info))
if __name__ == "__main__":
main()
| rodrigoaqueiroz/laraia-yahoo-finance | main.py | main.py | py | 1,440 | python | pt | code | 0 | github-code | 36 | [
{
"api_name": "streamlit.sidebar.checkbox",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "streamlit.sidebar",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "streamlit.sidebar.text_input",
"line_number": 10,
"usage_type": "call"
},
{
"api_... |
32331250687 | #!/usr/bin/python3
"""
Script that takes in a letter and sends a POST request
to http://0.0.0.0:5000/search_user with the letter as a
parameter.
"""
from sys import argv
import requests
if __name__ == "__main__":
if len(argv) < 2:
q = ""
else:
q = argv[1]
values = {'q': q}
url = "http://0.0.0.0:5000/search_user"
req = requests.post(url, values)
try:
js_ob = req.json()
if js_ob:
print("[{}] {}".format(js_ob.get("id"), js_ob.get("name")))
else:
print("No result")
except ValueError:
print("Not a valid JSON")
| ammartica/holbertonschool-higher_level_programming | 0x11-python-network_1/8-json_api.py | 8-json_api.py | py | 617 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 14,
"usage_type": "argument"
},
{
"api_name": "sys.argv",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "requests.post",
"line_number": 21,
"usage_type": "call"
}
] |
15772164828 | from django.shortcuts import render,redirect
from axf.models import SlideShow, Cart,MainDescription, Product,CategorieGroup,ChildGroup,User,Address,Order
from django.contrib.auth import logout
import random
from axf.sms import send_sms
from django.http import JsonResponse
import uuid
# Create your views here.
def home(request):
#获取轮播图数据
slideList = SlideShow.objects.all()
#获取5大模块数据
mainList = MainDescription.objects.all()
for item in mainList:
products = Product.objects.filter(categoryId=item.categoryId)
item.product1 = products.get(productId=item.product1)
item.product2 = products.get(productId=item.product2)
item.product3 = products.get(productId=item.product3)
return render(request, "home/home.html", {"slideList":slideList, "mainList":mainList})
def market(request, gid, cid, sid):
#左侧分组数据
leftCategorieList = CategorieGroup.objects.all()
#获取分组商品的信息
products = Product.objects.filter(categoryId=gid)
#获取子类数据
if cid != "0":
products = products.filter(childId=cid)
#排序
if sid == "1":
# products = products.order_by()
pass
elif sid == "2":
products = products.order_by("price")
elif sid == "3":
products = products.order_by("-price")
#获取子组信息
childs = ChildGroup.objects.filter(categorie__categorieId=gid)
return render(request, "market/market.html", {"leftCategorieList":leftCategorieList, "products":products, "childs":childs, "gid":gid, "cid":cid})
def cart(request):
# 判断是否登录
tokenValue = request.COOKIES.get("token")
if not tokenValue:
# 说明没登录
return redirect("/login/")
try:
user = User.objects.get(tokenValue=tokenValue)
except User.DoesNotExist as e:
return redirect("/login/")
carts = Cart.objects.filter(user__tokenValue=tokenValue)
return render(request, "cart/cart.html", {"carts":carts})
def mine(request):
phone = request.session.get('phoneNum',default='未登录')
return render(request, "mine/mine.html",{'phone':phone})
# def login(request):
# if request.method == 'GET':
# if request.is_ajax():
# strNum = '0123456789'
# rand_str=''
# for i in range(0,6):
# rand_str += strNum[random.randrange(0,len(strNum))]
# msg ="您的验证码是:%s。请不要把验证码泄露给其他人。"%rand_str
# phone = request.GET.get('phoneNum')
# send_sms(msg,phone)
# #存入session
# request.session['code'] = rand_str
# return JsonResponse({'data':'ok'})
# else:
# return render(request,'mine/login.html')
# else:
# phone = request.POST.get('username')
# passwd = request.POST.get('passwd')
# code = request.session.get('code')
#
# if passwd == code:
# uuidStr=str(uuid.uuid4())
# try:
# user= User.objects.get(pk=phone)
# user.tokenValue = uuidStr
# user.save()
# except User.DoesNotExist as e:
# user = User.create(phone,None,uuidStr,'000000')
# user.save()
# request.session['phoneNum'] = phone
# return redirect('/mine/')
# else:
# return redirect('/login/')
def login(request):
if request.method == "GET":
if request.is_ajax():
# 生产验证码
strNum = '1234567890'
# 随机选取4个值作为验证码
rand_str = ''
for i in range(0, 6):
rand_str += strNum[random.randrange(0, len(strNum))]
msg = "您的验证码是:%s。请不要把验证码泄露给其他人。"%rand_str
phone = request.GET.get("phoneNum")
send_sms(msg, phone)
# print('*************',rand_str)
#存入session
request.session["code"] = rand_str
return JsonResponse({"data":"ok"})
else:
return render(request, "mine/login.html")
else:
phone = request.POST.get("username")
passwd = request.POST.get("passwd")
code = request.session.get("code")
if passwd == code:
#验证码验证成功
#判断用户是否存在
uuidStr = str(uuid.uuid4())
try:
user = User.objects.get(pk=phone)
user.tokenValue = uuidStr
user.save()
except User.DoesNotExist as e:
#注册
user = User.create(phone,None,uuidStr,"sunck good")
user.save()
request.session["phoneNum"] = phone
#将tokenvalue写入cookie
response = redirect("/mine/")
response.set_cookie('token',uuidStr)
return response
else:
# 验证码验证失败
return redirect("/login/")
def quit(request):
logout(request)
return redirect('/mine/')
def showaddress(request):
addrList= Address.objects.filter(user__phoneNum=request.session.get('phoneNum'))
return render(request,'mine/showaddress.html',{'addrList':addrList})
def addaddr(request):
if request.method == 'GET':
return render(request, 'mine/addaddr.html')
else:
name = request.POST.get('name')
sex = request.POST.get('sex')
if sex == '0':
sex = False
sex = True
telephone = request.POST.get('phone')
province = request.POST.get('province')
city = request.POST.get('city')
county = request.POST.get('county')
street = request.POST.get('street')
postCode = request.POST.get('postCode')
detailAddress=request.POST.get('detailAddress')
phone = request.session.get('phoneNum')
print(phone)
user = User.objects.get(pk=phone)
# name, sex, phoneNum, postCode, address, province, city, county, street, detailAddress, user
alladdress = province+city+county+street+postCode+detailAddress
address = Address.create(name,sex,telephone,postCode,alladdress,province,city,county,street,detailAddress,user)
address.save()
return redirect('/mine/')
def changecart(request,flag):
num = 1
if flag == '1':
num = -1
#判断是否登陆
tokenValue=request.COOKIES.get('token')
if not tokenValue:
return JsonResponse({'error':1})
try:
user = User.objects.get(tokenValue=tokenValue)
except User.DoesNotExist as e:
return JsonResponse({'error':2})
gid = request.POST.get('gid')
cid = request.POST.get('cid')
pid = request.POST.get('pid')
product = Product.objects.filter(categoryId=gid,childId=cid).get(productId=pid)
try:
cart = Cart.objects.filter(user__tokenValue=tokenValue).filter(product__categoryId=gid).filter(product__childId=cid).get(product__productId=pid)
if flag == '2':
if product.storeNums == '0':
return JsonResponse ({'error':0,'num':cart.num})
cart.num = cart.num + num
product.storeNums = str(int(product.storeNums) - num)
product.save()
if cart.num == 0:
cart.delete()
else:
cart.save()
except Cart.DoesNotExist as e:
if flag == '1':
return JsonResponse({'error':0,'num':0})
try:
order = Order.orders2.filter(user__tokenValue=tokenValue).get(flag=0)
except Order.DoesNotExist as e:
orderId = str(uuid.uuid4())
address = Address.objects.get(pk=3)
order = Order.create(orderId,user,address,0)
order.save()
cart = Cart.create(user,product,order,1)
cart.save()
product.storeNums = str(int(product.storeNums) - num)
product.save()
return JsonResponse({'error':0,'num':cart.num})
def changecart2(request):
cartid = request.POST.get("cartid")
cart = Cart.objects.get(pk=cartid)
cart.isCheck = not cart.isCheck
cart.save()
return JsonResponse({'error':0,'flag':cart.isCheck})
def qOrder(request):
tokenValue = request.COOKIES.get('token')
order = Order.orders2.filter(user__tokenValue=tokenValue).get(flag=False)
order.flag = 1
order.save()
carts = Cart.objects.filter(user__tokenValue=tokenValue).filter(order=order).filter(isCheck=True)
for cart in carts:
cart.isOrder = False
cart.save()
newOrder = Order.create(str(uuid.uuid4()),User.objects.get(tokenValue=tokenValue),Address.objects.get(pk=3),0)
newOrder.save()
oldCarts = Cart.objects.filter(user__tokenValue=tokenValue)
for cart in oldCarts:
cart.order = newOrder
cart.save()
return JsonResponse({'error':0})
| qwewangjian/Xgd | axf/views.py | views.py | py | 8,953 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "axf.models.SlideShow.objects.all",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "axf.models.SlideShow.objects",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "axf.models.SlideShow",
"line_number": 12,
"usage_type": "name"
},
... |
17878539903 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Helper functions for the workflows"""
from distutils.version import StrictVersion
from builtins import range
def _tofloat(inlist):
if isinstance(inlist, (list, tuple)):
return [float(el) for el in inlist]
else:
return float(inlist)
def fmri_getidx(in_file, start_idx, stop_idx):
"""Heuristics to set the start and stop indices of fMRI series"""
from nibabel import load
from nipype.interfaces.base import isdefined
nvols = load(in_file).shape[3]
max_idx = nvols - 1
if start_idx is None or not isdefined(start_idx) or start_idx < 0 or start_idx > max_idx:
start_idx = 0
if (
stop_idx is None
or not isdefined(stop_idx)
or max_idx < stop_idx < start_idx
):
stop_idx = max_idx
return start_idx, stop_idx
def fwhm_dict(fwhm):
"""Convert a list of FWHM into a dictionary"""
fwhm = [float(f) for f in fwhm]
return {'fwhm_x': fwhm[0], 'fwhm_y': fwhm[1],
'fwhm_z': fwhm[2], 'fwhm_avg': fwhm[3]}
def thresh_image(in_file, thres=0.5, out_file=None):
"""Thresholds an image"""
import os.path as op
import nibabel as nb
if out_file is None:
fname, ext = op.splitext(op.basename(in_file))
if ext == '.gz':
fname, ext2 = op.splitext(fname)
ext = ext2 + ext
out_file = op.abspath('{}_thresh{}'.format(fname, ext))
im = nb.load(in_file)
data = im.get_data()
data[data < thres] = 0
data[data > 0] = 1
nb.Nifti1Image(
data, im.affine, im.header).to_filename(out_file)
return out_file
def spectrum_mask(size):
"""Creates a mask to filter the image of size size"""
import numpy as np
from scipy.ndimage.morphology import distance_transform_edt as distance
ftmask = np.ones(size)
# Set zeros on corners
# ftmask[0, 0] = 0
# ftmask[size[0] - 1, size[1] - 1] = 0
# ftmask[0, size[1] - 1] = 0
# ftmask[size[0] - 1, 0] = 0
ftmask[size[0] // 2, size[1] // 2] = 0
# Distance transform
ftmask = distance(ftmask)
ftmask /= ftmask.max()
# Keep this just in case we want to switch to the opposite filter
ftmask *= -1.0
ftmask += 1.0
ftmask[ftmask >= 0.4] = 1
ftmask[ftmask < 1] = 0
return ftmask
def slice_wise_fft(in_file, ftmask=None, spike_thres=3., out_prefix=None):
"""Search for spikes in slices using the 2D FFT"""
import os.path as op
import numpy as np
import nibabel as nb
from mriqc.workflows.utils import spectrum_mask
from scipy.ndimage.filters import median_filter
from scipy.ndimage import generate_binary_structure, binary_erosion
from statsmodels.robust.scale import mad
if out_prefix is None:
fname, ext = op.splitext(op.basename(in_file))
if ext == '.gz':
fname, _ = op.splitext(fname)
out_prefix = op.abspath(fname)
func_data = nb.load(in_file).get_data()
if ftmask is None:
ftmask = spectrum_mask(tuple(func_data.shape[:2]))
fft_data = []
for t in range(func_data.shape[-1]):
func_frame = func_data[..., t]
fft_slices = []
for z in range(func_frame.shape[2]):
sl = func_frame[..., z]
fftsl = median_filter(np.real(np.fft.fft2(sl)).astype(np.float32),
size=(5, 5), mode='constant') * ftmask
fft_slices.append(fftsl)
fft_data.append(np.stack(fft_slices, axis=-1))
# Recompose the 4D FFT timeseries
fft_data = np.stack(fft_data, -1)
# Z-score across t, using robust statistics
mu = np.median(fft_data, axis=3)
sigma = np.stack([mad(fft_data, axis=3)] * fft_data.shape[-1], -1)
idxs = np.where(np.abs(sigma) > 1e-4)
fft_zscored = fft_data - mu[..., np.newaxis]
fft_zscored[idxs] /= sigma[idxs]
# save fft z-scored
out_fft = op.abspath(out_prefix + '_zsfft.nii.gz')
nii = nb.Nifti1Image(fft_zscored.astype(np.float32), np.eye(4), None)
nii.to_filename(out_fft)
# Find peaks
spikes_list = []
for t in range(fft_zscored.shape[-1]):
fft_frame = fft_zscored[..., t]
for z in range(fft_frame.shape[-1]):
sl = fft_frame[..., z]
if np.all(sl < spike_thres):
continue
# Any zscore over spike_thres will be called a spike
sl[sl <= spike_thres] = 0
sl[sl > 0] = 1
# Erode peaks and see how many survive
struc = generate_binary_structure(2, 2)
sl = binary_erosion(sl.astype(np.uint8), structure=struc).astype(np.uint8)
if sl.sum() > 10:
spikes_list.append((t, z))
out_spikes = op.abspath(out_prefix + '_spikes.tsv')
np.savetxt(out_spikes, spikes_list, fmt=b'%d', delimiter=b'\t', header='TR\tZ')
return len(spikes_list), out_spikes, out_fft
def get_fwhmx():
from nipype.interfaces.afni import Info, FWHMx
fwhm_args = {"combine": True,
"detrend": True}
afni_version = StrictVersion('%s.%s.%s' % Info.version())
if afni_version >= StrictVersion("2017.2.3"):
fwhm_args['args'] = '-ShowMeClassicFWHM'
fwhm_interface = FWHMx(**fwhm_args)
return fwhm_interface
| pGarciaS/PREEMACS | scripts/mriqc/mriqc/workflows/utils.py | utils.py | py | 5,355 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "nibabel.load",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "nipype.interfaces.base.isdefined",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "nipype.interfaces.base.isdefined",
"line_number": 27,
"usage_type": "call"
},
{
"ap... |
35042458812 | import pickle
import argparse
import pickle
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--fasta', help='fasta input file')
args = parser.parse_args()
junction_id_to_seq = {}
with open(args.fasta, "r") as f:
while True:
line1 = f.readline()
if not line1:
break
line2 = f.readline()
junction_id_to_seq[line1.strip()] = line2.strip()
pickle.dump(junction_id_to_seq, open("known_fusions.pickle", "wb"))
| salzmanlab-admin/DEEPEST-Fusion | reference_files/create_pickle_file.py | create_pickle_file.py | py | 471 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 21,
"usage_type": "call"
}
] |
3735254268 | #!/usr/bin/env python3
#
# get_ad_right_matrix.py
# Export AD User -> Group Matrix to Excel
# Written by Maximilian Thoma 2021
#
import json
import re
import ldap3
import pandas as pd
########################################################################################################################
# NOTE:
# -----
# Following packages must be installed in your python environment:
# pandas, xslxwriter, ldap3
#
# Just install them with:
# pip install pandas xslxwriter, ldap3
#
########################################################################################################################
# Settings
# LDAP server ip or fqdn
LDAP_SERVER = '10.1.1.231'
# LDAP port 389 = unencrypted, 636 = encrypted
PORT = 389
# Use SSL? True/False
USE_SSL = False
# LDAP bind user DN
BIND = 'CN=ldap bind,CN=Users,DC=lab,DC=local'
# LDAP bind user password
BIND_PW = 'Test12345!'
# Base search DN
SEARCH = 'OU=lab,DC=lab,DC=local'
# All users regardless deactivated or activated
SEARCH_FILTER = '(&(objectclass=user)(sAMAccountName=*))'
# All users who are not deactivated
#SEARCH_FILTER = '(&(objectclass=user)(sAMAccountName=*)(!(UserAccountControl:1.2.840.113556.1.4.803:=2)))'
# All users who are not deactivated and in special group
#SEARCH_FILTER = '(&(objectclass=user)(sAMAccountName=*)(!(UserAccountControl:1.2.840.113556.1.4.803:=2))(memberOf=CN=b_testgruppe und restlicher DN))'
# Output file
FILE = 'output.xlsx'
########################################################################################################################
def main():
# Connect to LDAP and query
server = ldap3.Server(LDAP_SERVER, port=389, use_ssl=USE_SSL)
conn = ldap3.Connection(server, BIND, BIND_PW, auto_bind=True)
conn.search(SEARCH, SEARCH_FILTER, attributes=['memberOf', 'sAMAccountName'])
response = json.loads(conn.response_to_json())
def get_cn(cn_str):
cn = re.findall(r"CN=([^,]*),?", cn_str)[0]
return cn
buffer_users = {}
buffer_user_in_group = {}
for entry in response['entries']:
# Get short and long username
long_username = get_cn(entry['dn'])
short_username = entry['attributes']['sAMAccountName'].lower()
# append to users dir
buffer_users[short_username] = long_username
# go trough groups
for group in entry['attributes']['memberOf']:
# add to group buffer
group_name = get_cn(group)
if group_name not in buffer_user_in_group:
buffer_user_in_group[group_name] = []
if short_username not in buffer_user_in_group[group_name]:
buffer_user_in_group[group_name].append(short_username)
matrix = {}
length_cell = 0
for group, users in buffer_user_in_group.items():
matrix[group] = {}
for user, long_user in buffer_users.items():
index = "%s - %s" % (user, long_user)
# determine width of 1 column
index_length = len(index)
if index_length > length_cell:
length_cell = index_length
if user in users:
matrix[group][index] = "X"
else:
matrix[group][index] = "-"
# generate data matrix with pandas
a = pd.DataFrame(matrix)
# create excel file
writer = pd.ExcelWriter(FILE, engine='xlsxwriter')
# write pandas matrix to sheet1
a.to_excel(writer, sheet_name="Sheet1", startrow=1, header=False)
workbook = writer.book
worksheet = writer.sheets['Sheet1']
# format header line
header_format = workbook.add_format(
{
'bold': True,
'valign': 'bottom',
'fg_color': '#D7E4BC',
'border': 1,
}
)
# set header line text rotation to 90 degree
header_format.set_rotation(90)
# apply header format
for col_num, value in enumerate(a.columns.values):
worksheet.write(0, col_num + 1, value, header_format)
# format for X cells
format2 = workbook.add_format(
{
'bg_color': '#C6EFCE',
'font_color': '#006100'
}
)
# set autofilter in first line
cols_count = len(a.columns.values)
worksheet.autofilter(0, 0, 0, cols_count)
# set column width
worksheet.set_column(0, 0, length_cell+1)
worksheet.set_column(1, cols_count, 3)
# freeze panes
worksheet.freeze_panes(1, 1)
# conditional formatting
worksheet.conditional_format('A1:ZA65535', {
'type': 'cell',
'criteria': '=',
'value': '"X"',
'format': format2
})
# save excel file
writer.save()
if __name__ == "__main__":
main()
| lanbugs/get_ad_right_matrix | get_ad_right_matrix.py | get_ad_right_matrix.py | py | 4,692 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "ldap3.Server",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "ldap3.Connection",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_numbe... |
25108240229 | from zigzag.classes.io.onnx.parser import Parser
from zigzag.classes.io.onnx.utils import get_node_input_output_dimension_shapes
from zigzag.classes.workload.layer_node import LayerNode
import logging
logger = logging.getLogger(__name__)
class SoftmaxParser(Parser):
"""Parser for ONNX Softmax nodes into LayerNode."""
def __init__(self, node_id, node, nodes_outputs, mapping, onnx_model):
super().__init__(node_id, node, nodes_outputs, mapping, onnx_model)
def run(self):
"""Run the parser and return the created LayerNode object."""
layer_node = self.generate_layer_node_for_softmax()
return layer_node
def generate_layer_node_for_softmax(self):
def get_layer_node_input_format(B, C, K, node_mapping, nodes_outputs):
"""
Generate the necessary dictionary items required for the Node creation.
"""
# Convert the data types to precisions based on the ONNX definition
# Equation
d = {}
# Update the equation for Softmax
d["equation"] = "O[b][c] = exp(I[b][c]) / (reduce_sum(exp(I[b]), axis=1))"
d["dimension_relations"] = []
d["operand_precision"] = {"O": 16, "I": 8} # Modify precision as needed
d["operand_source"] = {"I": []}
# Core allocation and spatial mapping
d["core_allocation"] = node_mapping["core_allocation"]
d["spatial_mapping"] = node_mapping["spatial_mapping"]
# Find the previous layer(s) that should be this node's parent(s)
node_inputs = self.node.input
preds = []
for node_input in node_inputs:
for n in nodes_outputs:
if node_input in nodes_outputs[n]:
preds.append(n)
d["operand_source"]["I"] = preds
return d
ia_dimension_shape, oa_dimension_shape = get_node_input_output_dimension_shapes(
self.node, self.onnx_model
)
# Get the batch size, input channels, and output channels
B = ia_dimension_shape[0] if ia_dimension_shape else 1
C = ia_dimension_shape[1] if ia_dimension_shape else 0
K = oa_dimension_shape[1] if oa_dimension_shape else 0
# Get the hw mapping of this node.
if self.node.name in self.mapping:
node_mapping = self.mapping[self.node.name]
else:
try:
node_mapping = self.mapping["default"]
except:
raise ValueError(
f"There is no mapping provided for node {self.node.name}, nor a default one."
)
node_attrs = get_layer_node_input_format(
B, C, K, node_mapping, self.nodes_outputs
)
node_obj = LayerNode(
self.node_id,
node_attrs,
node_name=self.node.name,
type=self.node.op_type.lower(),
)
logger.info(f"Parsed Softmax node {self.node.name}")
return node_obj
| wangxdgg/zigzag_2 | zigzag/classes/io/onnx/softmax2.py | softmax2.py | py | 3,148 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "zigzag.classes.io.onnx.parser.Parser",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "zigzag.classes.io.onnx.utils.get_node_input_output_dimension_shapes",
"line_number": 50,... |
4978717366 | #!/usr/bin/python3
from PyQt5 import QtCore
from PyQt5.QtCore import QSize, QUrl
from PyQt5.QtMultimedia import QMediaContent, QMediaPlayer
from PyQt5.QtMultimediaWidgets import QVideoWidget
from PyQt5.QtWidgets import *
from PyQt5.QtWidgets import QMainWindow,QWidget, QPushButton
from PyQt5.QtGui import QIcon, QPixmap, QCursor
import sys, os, time
from playsound import playsound
dirname = os.path.dirname(os.path.abspath(__file__)) + '/'
class VideoPlayer(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle("Study With Me")
self.setWindowIcon(QIcon(QPixmap(dirname+'media/icons/coding.svg')))
self.setFixedSize(1900,1000)
# This line for updateting window for seconds timer
QApplication.processEvents()
menubar = self.menuBar()
menubar.setObjectName('menu')
file_menu = menubar.addMenu('&File')
help_menu = menubar.addMenu('&Help')
help = QAction(QIcon(dirname+'media/icons/information.svg'), 'ShortCuts', self)
help.triggered.connect(self.help_info)
help_menu.addAction(help)
file_video = QAction(QIcon(dirname+'media/icons/video.svg'), 'Select videofile', self)
file_video.triggered.connect(self.user_video)
file_menu.addAction(file_video)
#VIDEOPLAYER
'''
Installing VideoPlayer settings
'''
self.mediaPlayer = QMediaPlayer(None, QMediaPlayer.VideoSurface)
videoWidget = QVideoWidget()
videoWidget.setFixedSize(1700,1000)
self.mediaPlayer.setVideoOutput(videoWidget)
self.mediaPlayer.setMedia(QMediaContent(QUrl.fromLocalFile(dirname+'media/videos/video1.mp4')))
self.mediaPlayer.play()
'''
Installing Central Widget for Window
'''
wid = QWidget(self)
self.setCentralWidget(wid)
layout = QHBoxLayout()
#CONFIGURATION SIDEBAR
self.sideLayout = QVBoxLayout()
self.sideLayout.setObjectName('sideLayout')
#CONFIGURATION TIMERBAR
'''
Timer_is_run variable created for run report timer
'''
self.timer_is_run = False
self.timerLayout = QHBoxLayout()
self.count_minute = QLabel('25')
self.count_minute.setObjectName('counter')
self.count_second = QLabel('00')
self.count_second.setObjectName('counter')
self.count_separater = QLabel(':')
self.count_separater.setObjectName('counter')
self.start_btn = QPushButton('START')
self.start_btn.setCursor(QCursor(QtCore.Qt.PointingHandCursor))
self.start_btn.setObjectName('start_btn')
self.restart_btn = QPushButton()
self.restart_btn.setCursor(QCursor(QtCore.Qt.PointingHandCursor))
self.restart_btn.setIcon(QIcon(QPixmap(dirname+'media/icons/restart.png')))
self.restart_btn.setIconSize(QSize(40,40))
self.restart_btn.setObjectName('restart_btn')
self.pause_btn = QPushButton('PAUSE')
self.pause_btn.setCursor(QCursor(QtCore.Qt.PointingHandCursor))
self.pause_btn.setObjectName('start_btn')
# Stack
'''
Stack_btn created for switch two buttons - restart button and start button
'''
self.stack_btn = QStackedWidget()
self.stack_btn.addWidget(self.start_btn)
self.stack_btn.addWidget(self.pause_btn)
# Selected default button for stack
self.stack_btn.setCurrentWidget(self.start_btn)
self.timerLayout.addWidget(self.count_minute)
self.timerLayout.addWidget(self.count_separater)
self.timerLayout.addWidget(self.count_second)
'''
Stretch created for remove empty space between timer labels and timer buttons
'''
self.timerLayout.addStretch()
self.timerLayout.addWidget(self.stack_btn)
self.timerLayout.addWidget(self.restart_btn)
self.sideLayout.addLayout(self.timerLayout)
self.start_btn.clicked.connect(self.start)
self.restart_btn.clicked.connect(self.restart)
self.pause_btn.clicked.connect(self.pause)
#CONFIGURATION RADIO BUTTONS IN GROUPBOX
self.radio_layout = QHBoxLayout()
self.radio_group = QGroupBox()
self.radio_group.setObjectName('radio_group')
self.pomodoro_rad = QRadioButton('Pomodoro')
self.pomodoro_rad.setCursor(QCursor(QtCore.Qt.PointingHandCursor))
self.pomodoro_rad.setChecked(True)
self.short_rad = QRadioButton('Short Break')
self.short_rad.setCursor(QCursor(QtCore.Qt.PointingHandCursor))
self.long_rad = QRadioButton('Long Break')
self.long_rad.setCursor(QCursor(QtCore.Qt.PointingHandCursor))
self.radio_layout.addWidget(self.pomodoro_rad)
self.radio_layout.addWidget(self.short_rad)
self.radio_layout.addWidget(self.long_rad)
self.radio_group.setLayout(self.radio_layout)
self.sideLayout.addWidget(self.radio_group)
self.sideLayout.addStretch()
self.pomodoro_rad.clicked.connect(lambda x: self.set_time('25'))
self.short_rad.clicked.connect(lambda x: self.set_time('5'))
self.long_rad.clicked.connect(lambda x: self.set_time('15'))
#CONFIGURATION VIDEO-BUTTONS FOR SELECT BACKGROUND VIDEO
self.grid_videos = QGridLayout()
self.create_video_button(icon=f'{dirname}media/icons/study.svg', url=f'{dirname}media/videos/video1.mp4', row=0, column=0, tip='Study with me', cut='1')
self.create_video_button(icon=f'{dirname}media/icons/abstract.svg', url=f'{dirname}media/videos/video2.mp4', row=0, column=1, tip='Abstaction', cut='2')
self.create_video_button(icon=f'{dirname}media/icons/landscape.svg', url=f'{dirname}media/videos/video3.mp4', row=0, column=2, tip='River', cut='3')
self.create_video_button(icon=f'{dirname}media/icons/forest.svg', url=f'{dirname}media/videos/video4.mp4', row=0, column=3, tip='Nature', cut='4')
self.create_video_button(icon=f'{dirname}media/icons/mountain.svg', url=f'{dirname}media/videos/video5.mp4', row=1, column=0, tip='Mountains', cut='5')
self.create_video_button(icon=f'{dirname}media/icons/fire.svg', url=f'{dirname}media/videos/video6.mp4', row=1, column=1, tip='Campfire', cut='6')
self.create_video_button(icon=f'{dirname}media/icons/programming.svg', url=f'{dirname}media/videos/video7.mp4', row=1, column=2, tip='Coding Time', cut='7')
self.create_video_button(icon=f'{dirname}media/icons/galaxy.svg', url=f'{dirname}media/videos/video8.mp4', row=1, column=3, tip='Space', cut='8')
#CONFIGURATION VOLUME SLIDER
self.volumeLayout = QHBoxLayout()
self.vol_ico = QPushButton('')
self.vol_ico.setIcon(QIcon(QPixmap('media/icons/volume.svg')))
self.vol_ico.setCursor(QCursor(QtCore.Qt.PointingHandCursor))
self.vol_ico.clicked.connect(lambda: self.vol_slider.setValue(0))
self.vol_ico.setIconSize(QSize(40,40))
self.vol_ico.setObjectName('vol_ico')
self.vol_slider = QSlider()
self.vol_slider.setOrientation(QtCore.Qt.Horizontal)
self.vol_slider.setCursor(QCursor(QtCore.Qt.PointingHandCursor))
# SET DEFAULT VOLUME LEVEL
self.vol_slider.setValue(90)
self.vol_slider.valueChanged.connect(self.change_volume)
self.volumeLayout.addWidget(self.vol_ico)
self.volumeLayout.addWidget(self.vol_slider)
self.sideLayout.addLayout(self.volumeLayout)
self.sideLayout.addStretch()
self.sideLayout.addLayout(self.grid_videos)
self.sideLayout.addStretch(10)
layout.addLayout(self.sideLayout)
layout.addWidget(videoWidget)
wid.setLayout(layout)
self.x = 0 # для колесика мышки
help.setShortcut('Ctrl+I')
file_video.setShortcut('Ctrl+O')
self.vol_ico.setShortcut('Ctrl+M')
self.long_rad.setShortcut('Ctrl+L')
self.short_rad.setShortcut('Ctrl+S')
self.pomodoro_rad.setShortcut('Ctrl+P')
self.restart_btn.setShortcut('Esc')
self.pause_btn.setShortcut('SPACE')
self.start_btn.setShortcut('SPACE')
# APP LOGIC
'''
This functions accept five arguments for create button.
1. Icon take the path for icon button
2. Url take the video path
3. Row and Column set place for object
4. Tip tells about icon video
'''
def create_video_button(self, icon, url, row, column, tip, cut):
self.button = QPushButton()
self.button.setShortcut(cut)
self.button.setIcon(QIcon(QPixmap(icon)))
self.button.setIconSize(QSize(40,40))
self.button.setObjectName('video_button')
self.button.setCursor(QCursor(QtCore.Qt.PointingHandCursor))
self.button.setToolTip(tip)
self.button.clicked.connect(lambda x: self.open_video(url))
self.grid_videos.addWidget(self.button, row, column)
'''
Changing the volume with the mouse
'''
def wheelEvent(self, event):
number = event.angleDelta().y()
if number == 120:
self.vol_slider.setValue(self.vol_slider.value() + 3)
elif number == -120:
self.vol_slider.setValue(self.vol_slider.value() - 3)
'''
This method shows the user possible keyboard shortcuts
'''
def help_info(self):
info = '<h4>Hello, World! We have some shortcuts for you!</h4>\n \
<p>Press <b>Ctrl+I</b> for call Help info</p>\
<p>Press <b>Ctrl+M</b> for mute volumn</p>\
<p>Press <b>Ctrl+L</b> for call Long Break</p>\
<p>Press <b>Ctrl+S</b> for call Short Break</p>\
<p>Press <b>Ctrl+P</b> for call Pomodoro method</p>\
<p>Press <b>Ctrl+O</b> for open your videofile.</p>\
<p>Press <b>SPACE</b> for Pause/Start timer</p>\
<p>Press <b>Esc</b> for STOP timer</p>\
<p>You can use numbers keyboard <b>(1-8)</b> for select video</p>'
QMessageBox.about(self, 'About Program', info)
'''
When User selected RadioButton this function set right time for timer
'''
def set_time(self, minute):
self.count_minute.setText(minute)
self.count_second.setText('00')
self.timer_is_run = False
'''
This function tracks changes for volume slider and set current volume video.
'''
def change_volume(self):
volume = self.vol_slider.value()
if volume == 0:
self.vol_ico.setIcon(QIcon(QPixmap('media/icons/volume-x.svg')))
self.mediaPlayer.setVolume(volume)
else:
self.vol_ico.setIcon(QIcon(QPixmap('media/icons/volume.svg')))
self.mediaPlayer.setVolume(volume)
'''
After user clicked button, this function opens the current video
'''
def open_video(self, path):
self.mediaPlayer.setMedia(QMediaContent(QUrl.fromLocalFile(path)))
self.mediaPlayer.play()
'''
When user clicked Start-button this function be:
1. Disabled all radio_buttons
2. Run timer
3. Replaces start-button with pause-button
'''
def start(self):
self.radio_group.setDisabled(True)
self.timer_is_run = True
self.stack_btn.setCurrentWidget(self.pause_btn)
self.tik_tak()
'''
Timer Logic.
First, we take the current value of the timestamps to calculate the total number of seconds.
The total number of seconds we use to run the report cycle.
During the loop, we always check whether the user has pressed pause.
If pressed, we exit the loop and save the last time value to our labels.
Otherwise, we start checking:
If the second is not equal to zero , we subtract one from it, otherwise we look at what minutes are equal to.
If the minutes are greater than zero, then we subtract one from the minute, and assign the number 59 to the second.
If there are no minutes and seconds, we exit the cycle
At the end, we start the sound signal
'''
def tik_tak(self):
min, sec = map(int, (self.count_minute.text(), self.count_second.text()))
len_seconds = min * 60 + sec
for s in range(len_seconds):
QApplication.processEvents()
if self.timer_is_run:
if sec > 0:
sec -= 1
self.count_second.setText(str(sec))
time.sleep(1)
# print(self.count_minute.text(), self.count_second.text())
else:
if min > 0:
sec = 59
min -= 1
self.count_second.setText(str(sec))
self.count_minute.setText(str(min))
time.sleep(1)
# print(self.count_minute.text(), self.count_second.text())
if sec == min == 0:
self.radio_group.setDisabled(False)
self.stack_btn.setCurrentWidget(self.start_btn)
playsound('media/sounds/over_sound.mp3', True)
self.timer_is_run = False
'''
When user clicked restart button activated this function.ц
Before exiting the loop, the function checks which button is currently active to replace the text on the label
'''
def restart(self):
times = {
'Pomodoro': '25',
'Short Break': '5',
'Long Break': '15'
}
self.radio_group.setDisabled(False)
self.stack_btn.setCurrentWidget(self.start_btn)
self.timer_is_run = False
time.sleep(1)
for item in self.radio_group.children()[1::]:
if item.isChecked():
self.count_minute.setText(times[item.text()])
self.count_second.setText('00')
'''
The function interrupts the timer and saves the last time value on the label
'''
def pause(self):
self.radio_group.setDisabled(False)
self.timer_is_run = False
self.stack_btn.setCurrentWidget(self.start_btn)
def user_video(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(self, 'Открыть файл', '',
'MP4 Files (*.mp4);; MOV Files (*.mov)', options=options)
if fileName:
self.mediaPlayer.setMedia(QMediaContent(QUrl.fromLocalFile(fileName)))
self.mediaPlayer.play()
'''
Exit from app
'''
def closeEvent(self, event):
event.accept()
sys.exit()
if __name__ == "__main__":
app = QApplication(sys.argv)
videoplayer = VideoPlayer()
style = ''
with open('style.css', 'r') as file:
for line in file:
style += line
videoplayer.setStyleSheet(style)
videoplayer.showMaximized()
videoplayer.show()
sys.exit(app.exec_())
| SalomanYu/StudyWithMe | main.py | main.py | py | 15,163 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QMainWin... |
28853226017 | import bpy
from bpy.types import Menu
brush_icons = {}
def create_icons():
global brush_icons
icons_directory = bpy.utils.system_resource('DATAFILES', path="icons")
brushes = [
"border_mask",
"border_hide",
"box_trim",
"line_project",
]
import os
for brush in brushes:
icon_str = f"ops.sculpt.{brush}.dat"
filename = f"{icons_directory}/{icon_str}"
icon_value = bpy.app.icons.new_triangles_from_file(filename)
brush_icons[brush] = icon_value
def release_icons():
global brush_icons
for value in brush_icons.values():
bpy.app.icons.release(value)
class PIE_MT_hide_mask_brushes(Menu):
# label is displayed at the center of the pie menu.
bl_label = "Hide/Mask Brush Menu"
bl_idname = "PIE_MT_hide_mask_brushes"
bl_options = {"REGISTER", "UNDO"}
def draw(self, context):
global brush_icons
layout = self.layout
pie = layout.menu_pie()
op = pie.operator("wm.tool_set_by_id", text=" Mask", icon_value=brush_icons["border_mask"])
op.name = "builtin.box_mask"
op = pie.operator("wm.tool_set_by_id", text=" Hide", icon_value=brush_icons["border_hide"])
op.name = "builtin.box_hide"
op = pie.operator("wm.tool_set_by_id", text=" Trim", icon_value=brush_icons["box_trim"])
op.name = "builtin.box_trim"
op = pie.operator("wm.tool_set_by_id", text=" Line Project", icon_value=brush_icons["line_project"])
op.name = "builtin.line_project"
class PIE_MT_init_face_sets(Menu):
bl_label = "Init Face Sets"
bl_idname = "PIE_MT_init_face_sets"
bl_options = {"REGISTER", "UNDO"}
def draw(self, context):
layout = self.layout
pie = layout.menu_pie()
op = pie.operator("sculpt.face_sets_init", text='Loose Parts', icon="OUTLINER_DATA_POINTCLOUD")
op.mode = 'LOOSE_PARTS'
op = pie.operator("sculpt.face_sets_init", text='Face Set Boundaries', icon="PIVOT_BOUNDBOX")
op.mode = 'FACE_SET_BOUNDARIES'
op = pie.operator("sculpt.face_sets_init", text='Materials', icon="MATERIAL")
op.mode = 'MATERIALS'
op = pie.operator("sculpt.face_sets_init", text='Normals', icon="NORMALS_VERTEX_FACE")
op.mode = 'NORMALS'
op = pie.operator("sculpt.face_sets_init", text='UV Seams', icon="UV_EDGESEL")
op.mode = 'UV_SEAMS'
op = pie.operator("sculpt.face_sets_init", text='Edge Creases', icon="EDGESEL")
op.mode = 'CREASES'
op = pie.operator("sculpt.face_sets_init", text='Edge Bevel Weight', icon="MOD_BEVEL")
op.mode = 'BEVEL_WEIGHT'
op = pie.operator("sculpt.face_sets_init", text='Sharp Edges', icon="SHARPCURVE")
op.mode = 'SHARP_EDGES'
classes = (
PIE_MT_hide_mask_brushes,
PIE_MT_init_face_sets,
)
from my_pie_menus import utils
kms = [
{
"keymap_operator": "wm.call_menu_pie",
"name": "Sculpt",
"letter": "ONE",
"shift": 0,
"ctrl": 0,
"alt": 1,
"space_type": "VIEW_3D",
"region_type": "WINDOW",
"keywords": {"name": "PIE_MT_init_face_sets"},
},
{
"keymap_operator": "wm.call_menu_pie",
"name": "Sculpt",
"letter": "TWO",
"shift": 0,
"ctrl": 0,
"alt": 1,
"space_type": "VIEW_3D",
"region_type": "WINDOW",
"keywords": {"name": "PIE_MT_hide_mask_brushes"},
},
]
addon_keymaps = []
def register():
create_icons()
utils.register_classes(classes)
utils.register_keymaps(kms, addon_keymaps)
def unregister():
release_icons()
for cls in classes:
bpy.utils.unregister_class(cls)
utils.unregister_keymaps(kms)
| jmobley0429/my_pie_menus | menus/sculpt_mode_pies.py | sculpt_mode_pies.py | py | 3,776 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "bpy.utils.system_resource",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "bpy.utils",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "bpy.app.icons.new_triangles_from_file",
"line_number": 22,
"usage_type": "call"
},
{
"ap... |
7696440509 | from django.conf.urls import patterns, include, url
from .views import *
urlpatterns = patterns('',
url(r'^reservar/(?P<id>\d+)/$',Reservrlibros),
url(r'^consultaLibos/$',ConsultaLibros.as_view(), name='ConsultaLibros'),
url(r'^reservaExitosa/$',MostrarReservas),
#url(r'^Verreservas/$',Verreservas),
#url(r'^busqueda_ajax/$',ReservasLibros.as_view(), name='buscarView'), el id lo pasamos en una vista como parametro
)
| juanjavierlimachi/Biblioteca | Biblioteca/Biblioteca/apps/estudiantes/urls.py | urls.py | py | 426 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "django.conf.urls.patterns",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "djan... |
33136179454 | from telebot import types
import telebot, wikipedia, re
from config import *
from base_bot import bot
# Test-bot
IDLE = 0
LISTENING_TO_COMMANDS = 2
bot_state = IDLE
@bot.message_handler(commands=['test'])
def start_message(message):
markup = telebot.types.InlineKeyboardMarkup()
markup.add(telebot.types.InlineKeyboardButton(text='Три', callback_data=3))
markup.add(telebot.types.InlineKeyboardButton(text='Четыре', callback_data=4))
markup.add(telebot.types.InlineKeyboardButton(text='Пять', callback_data=5))
bot.send_message(message.chat.id, text="Какая средняя оценка была у Вас в школе?", reply_markup=markup)
@bot.callback_query_handler(func=lambda call: True)
def query_handler(call):
bot.answer_callback_query(callback_query_id=call.id, text='Спасибо за честный ответ!')
answer = ''
if call.data == '3':
answer = 'Вы троечник!'
elif call.data == '4':
answer = 'Вы хорошист!'
elif call.data == '5':
answer = 'Вы отличник!'
bot.send_message(call.message.chat.id, answer)
bot.edit_message_reply_markup(call.message.chat.id, call.message.message_id)
@bot.message_handler(commands=['stop'])
def stop(m, res=False):
global bot_state
bot_state = IDLE
def handle_text(message):
if bot_state != IDLE:
bot.send_message(message.chat.id, getwiki(message.text), reply_markup=keyboard1)
keyboard1 = telebot.types.ReplyKeyboardMarkup(True, True)
keyboard1.row('/test', '/stop')
if __name__ == "__main__":
from base_bot import main
main() | TheGustOff/telegram_bot_gust_MUIV | test_bot.py | test_bot.py | py | 1,635 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "telebot.types.InlineKeyboardMarkup",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "telebot.types",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "telebot.types.InlineKeyboardButton",
"line_number": 14,
"usage_type": "call"
},
... |
69822409383 | # -*- coding: utf-8 -*-
"""Subclass of ``BasisSet`` designed to represent an OpenMX configuration."""
import collections
import json
import pathlib
from typing import Sequence
from importlib_resources import files
from aiida_basis.data.basis import PaoData
from ...metadata import openmx as openmx_metadata
from ..mixins import RecommendedOrbitalConfigurationMixin
from .basis import BasisSet
__all__ = ('OpenmxConfiguration', 'OpenmxBasisSet')
OpenmxConfiguration = collections.namedtuple('OpenmxConfiguration', ['version', 'protocol', 'hardness'])
class OpenmxBasisSet(RecommendedOrbitalConfigurationMixin, BasisSet):
"""Subclass of ``BasisSet`` designed to represent a set of OpenMX PAOs.
The `OpenmxBasisSet` is essentially a `BasisSet` with some additional constraints. It can only
be used to contain the bases and corresponding metadata of the PAO basis sets included with
the OpenMX source code.
"""
_basis_types = (PaoData,)
label_template = 'OpenMX/{version}/{protocol}/{hardness}'
default_configuration = OpenmxConfiguration('19', 'standard', 'soft')
valid_configurations = (
OpenmxConfiguration('19', 'quick', 'soft'), OpenmxConfiguration('19', 'quick', 'hard'),
OpenmxConfiguration('19', 'standard', 'soft'), OpenmxConfiguration('19', 'standard', 'hard'),
OpenmxConfiguration('19', 'precise', 'soft'), OpenmxConfiguration('19', 'precise', 'hard')
# FUTURE: add 2013 configurations
)
url_base = 'https://t-ozaki.issp.u-tokyo.ac.jp/'
url_version = {'19': 'vps_pao2019/', '13': 'vps_pao2013/'}
@classmethod
def get_valid_labels(cls) -> Sequence[str]:
"""Return the tuple of labels of all valid OpenMX basis set configurations.
:return: valid configuration labels.
"""
configurations = set(cls.valid_configurations)
return tuple(cls.format_configuration_label(configuration) for configuration in configurations)
@classmethod
def format_configuration_label(cls, configuration: OpenmxConfiguration) -> str:
"""Format a label for an `OpenmxConfiguration` with the required syntax.
:param configuration: OpenMX basis set configuration.
:returns: label.
"""
return cls.label_template.format(
version=configuration.version, protocol=configuration.protocol, hardness=configuration.hardness
)
@classmethod
def get_configuration_metadata_filepath(cls, configuration: OpenmxConfiguration) -> pathlib.Path:
"""Return the filepath to the metadata JSON of a given `OpenmxConfiguration`.
:param configuration: OpenMX basis configuration.
:return: metadata filepath.
"""
metadata_filename = f'{configuration.version}_{configuration.protocol}_{configuration.hardness}.json'
return files(openmx_metadata) / metadata_filename
@classmethod
def get_configuration_metadata(cls, configuration: OpenmxConfiguration):
"""Return the metadata dictionary for an `OpenmxConfiguration`.
:param configuration: OpenMX basis set configuration.
:returns: metadata dictionary.
"""
metadata_filepath = cls.get_configuration_metadata_filepath(configuration)
try:
with open(metadata_filepath, 'r') as stream:
metadata = json.load(stream)
except FileNotFoundError as exception:
raise FileNotFoundError(
f'Metadata JSON for {cls.format_configuration_label(configuration)} could not be found'
) from exception
except OSError as exception:
raise OSError(
f'Error while opening the metadata file for {cls.format_configuration_label(configuration)}'
) from exception
return metadata
@classmethod
def get_element_metadata(cls, element: str, configuration: OpenmxConfiguration):
"""Return the metadata dictionary for an element from an OpenMX basis set configuration.
:param: element IUPAC element symbol.
:configuration: OpenMX basis set configuration.
:returns: element metadata.
:raises: `ValueError` if the element does not exist in the configuration metadata.
"""
configuration_metadata = cls.get_configuration_metadata(configuration)
try:
metadata = configuration_metadata[element]
except KeyError as exception:
raise ValueError(
f'The element {element} does not have an entry in the metadata of '
'{cls.format_configuration_label(configuration)}'
) from exception
return metadata
# @classmethod
# def get_url_file(cls, element: str, configuration: OpenmxConfiguration):
# """Return the URL for the PAO file for a given basis set label and element.
# :param element: IUPAC element symbol.
# :param configuration: basis set configuration.
# :returns: the URL from which the PAO basis file can be downloaded.
# :raises: `ValueError` if the configuration or the element symbol is invalid.
# """
# if configuration not in cls.valid_configurations:
# raise ValueError(f'{cls.format_configuration_label(configuration)} is not a valid configuration')
# element_metadata = cls.get_pao_metadata(element, configuration)
# url = cls.url_base + cls.url_version[configuration.version] + f'{element}/' + element_metadata['filename']
# return url
# @classmethod
# def get_urls_configuration(cls, configuration: OpenmxConfiguration):
# """Return the URLs for all the PAO files of a given OpenMX basis set configuration.
# :param configuration: OpenMX basis set configuration.
# :returns: list of URLs
# :raises: `ValueError` is the configuration is invalid.
# """
# if configuration not in cls.valid_configurations:
# raise ValueError(f'{cls.format_configuration_label(configuration)} is not a valid configuration')
# configuration_metadata = cls.get_configuration_metadata(configuration)
# url_base = cls.url_base + cls.url_version[configuration.version]
# urls = [
# url_base + f'{element}/' + metadata['filename'] for element, metadata in configuration_metadata.items()
# ]
# return urls
@classmethod
def get_md5s_configuration(cls, configuration: OpenmxConfiguration):
"""Return the MD5s for all the PAO files of a given OpenMX basis set configuration.
:param configuration: OpenMX basis set configuration.
:returns: dictionary of MD5s
:raises: `ValueError` is the configuration is invalid.
"""
if configuration not in cls.valid_configurations:
raise ValueError(f'{cls.format_configuration_label(configuration)} is not a valid configuration')
configuration_metadata = cls.get_configuration_metadata(configuration)
md5s = {element: metadata['md5'] for element, metadata in configuration_metadata.items()}
return md5s
@classmethod
def get_orbital_configs_configuration(cls, configuration: OpenmxConfiguration):
"""Return the orbital configuration tuples for all the PAO files of a given OpenMX basis set configuration.
:param configuration: OpenMX basis set configuration.
:returns: dictionary of MD5s
:raises: `ValueError` is the configuration is invalid.
"""
if configuration not in cls.valid_configurations:
raise ValueError(f'{cls.format_configuration_label(configuration)} is not a valid configuration')
configuration_metadata = cls.get_configuration_metadata(configuration)
orbital_configs = {
element: metadata['orbital_configuration'] for element, metadata in configuration_metadata.items()
}
return orbital_configs
def __init__(self, label=None, **kwargs):
"""Construct a new instance, validating that the label matches the required format."""
if label not in self.get_valid_labels():
raise ValueError(f'the label `{label}` is not a valid OpenMX basis set configuration label.')
super().__init__(label=label, **kwargs)
| azadoks/aiida-basis | aiida_basis/groups/set/openmx.py | openmx.py | py | 8,289 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.namedtuple",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "mixins.RecommendedOrbitalConfigurationMixin",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "basis.BasisSet",
"line_number": 19,
"usage_type": "name"
},
{
... |
72692974183 | from ast import literal_eval
import pymongo
# Handles all interactions with the database
class DbManager:
database = None
def __init__(self):
# Client instantiation with the MongoDB Client
self.client = pymongo.MongoClient(
"mongodb+srv://gdp:gdp@propaganda.m00hm.mongodb.net/Trilateral?retryWrites=true&w=majority")
# Sets the database to our Trilateral Database in the MongoDB Client
self.database = self.client.Trilateral
# Deletes an entire collection
def drop_collections(self):
try:
self.database['documents_document'].drop()
self.database['documents_claim'].drop()
self.database['documents_graph'].drop()
self.database['tweets_tweet'].drop()
self.database['tweets_query'].drop()
self.database['trends_trend'].drop()
except pymongo.errors.PyMongoError:
print("Collection not found Found in Database")
# Returns all documents of a specific collection
def get_all_documents(self, uid: str):
try:
return list(self.database['documents_document'].find({"uid": uid}))
except pymongo.errors.PyMongoError:
print("No Collection Documents_Document, Found in Database")
# Returns the number of documents in the collection under the specified uid
def count_all_documents(self, uid: str):
try:
return self.database['documents_document'].find({"uid": uid}).count()
except pymongo.errors.PyMongoError:
print("Returns no documents, uid %s, Found in Database", uid)
# Returns the number of tweets in the collection under the specified uid
def count_all_tweets(self, uid: str):
try:
return self.database['tweets_tweet'].find({"uid": uid}).count()
except pymongo.errors.PyMongoError:
print("Returns no tweets, uid %s, Found in Database", uid)
# Returns a list of cleaned tokens from all the Documents under the specified UID
def get_all_cleaned_tokens(self, uid: str):
try:
ini_list = list(self.database['documents_document'].find({"uid": uid},
{"_id": 0, "cleaned_tokens": 1}))
cleaned_tokens = []
for tokens in ini_list:
res = literal_eval(tokens['cleaned_tokens'])
cleaned_tokens.extend(res)
return cleaned_tokens
except pymongo.errors.PyMongoError:
print("No Collection, Documents_Document Found in Database")
# Returns all the text-bodies from each Document under the specified UID
def get_all_main_texts(self, uid: str):
try:
ini_list = list(self.database['documents_document'].find({"uid": uid},
{"_id": 0, "text_body": 1}))
main_text = []
for text in ini_list:
main_text.append(text['text_body'])
return " ".join([text for text in main_text])
except pymongo.errors.PyMongoError:
print("No Collection, Documents_document Found in Database")
# Returns all Tweets under the specified UID
def get_all_tweets(self, uid: str):
try:
ini_list = list(self.database['tweets_tweet'].find({"uid": uid}))
tweets = []
for t in ini_list:
tweets.append(
dict(uid=t['uid'], screen_name=t['screen_name'], created_at=t['created_at'], text=t['text'],
favorite_count=t['favorite_count'],
retweet_count=t['retweet_count'], user_location=t['user_location'],
sentiment=t['sentiment']))
return ini_list
except pymongo.errors.PyMongoError:
print("No Collection, Tweets_tweet Found in Database")
# Returns a list of html_links from each Document under the specified UID
def get_all_html_links(self, uid: str):
try:
ini_list = list(self.database['documents_document'].find({"uid": uid},
{"_id": 0, "html_links": 1}))
html_links = []
for html_link in ini_list:
res = literal_eval(html_link['html_links'])
html_links.extend(res)
return html_links
except pymongo.errors.PyMongoError:
print("No Objects, UID: %s, Found in Collection, Documents_document", uid)
# Returns a claim under the specified UI
def get_claim(self, uid: str):
try:
c_result = self.database['documents_claim'].find({"uid": uid},
{"_id": 0, "claim": 1})
claim = c_result[0]['claim']
return claim
except pymongo.errors.PyMongoError:
print("No Objects, UID: %s, Found in Collection, Documents_claim", uid)
# Returns a query under the specified UID
def get_query(self, uid: str):
try:
q_result = self.database['tweets_query'].find({"uid": uid},
{"_id": 0, "query": 1})
query = q_result[0]['query']
return query
except pymongo.errors.PyMongoError:
print("No Objects, UID: %s, Found in Collection, Tweets_Query", uid)
# Returns all causal data with a specified UID
def get_causal(self, uid: str):
try:
causal = self.database['trends_trend'].find({"uid": uid})
causal_item = causal[0]
return causal_item
except pymongo.errors.PyMongoError:
print("No Objects, UID: %s, Found in Collection, Trends_trend", uid)
| madeleinemvis/original_gdp | BackEnd/functions/dbmanager.py | dbmanager.py | py | 5,836 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pymongo.errors",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "pymongo.errors",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "pymongo.e... |
1141389329 | from werkzeug.security import check_password_hash
from db.SedmDb import SedmDB
import datetime
import os
import json
import re
import pandas as pd
import numpy as np
import requests
import glob
import time
from decimal import Decimal
from bokeh.io import curdoc
from bokeh.layouts import row, column
from bokeh.models import ColumnDataSource, Label
from bokeh.models import CDSView, GroupFilter
from bokeh.models.tools import HoverTool
from bokeh.models.ranges import Range1d
from bokeh.models.axes import LinearAxis
from bokeh.models.annotations import BoxAnnotation
from bokeh.plotting import figure
from bokeh.embed import components
from astropy.time import Time
import astropy.units as u
from astropy.coordinates import EarthLocation, SkyCoord, AltAz, get_sun,\
get_moon
from scheduler.scheduler import ScheduleNight
pd.options.mode.chained_assignment = None # default='warn'
superuser_list = ['SEDm_admin', 2, 20180523190352189]
pd.set_option('display.max_colwidth', -1)
request_values = ['id', 'object_id', 'marshal_id',
'user_id', 'allocation_id',
'exptime', 'priority', 'inidate',
'enddate', 'maxairmass',
'cadence', 'phasesamples',
'sampletolerance', 'filters',
'nexposures', 'obs_seq', 'status',
'creationdate', 'lastmodified',
'max_fwhm', 'min_moon_dist',
'max_moon_illum', 'max_cloud_cover',
'seq_repeats', 'seq_completed',
'last_obs_jd']
object_values = ['id', 'marshal_id', 'name', 'iauname',
'ra', 'dec', 'epoch', 'typedesig', 'magnitude']
"""
request_form_values = ['request_id', 'object_id', 'marshal_id',
'user_id', 'allocation', 'ifu', 'ifu_use_mag',
'ab', 'rc', 'do_r', 'do_g', 'do_i', 'do_u',
'r_exptime', 'g_exptime', 'i_exptime', 'u_exptime',
'r_repeats', 'g_repeats', 'i_repeats', 'u_repeats',
'ifu_exptime', 'priority', 'inidate', 'rc_use_mag',
'enddate', 'maxairmass', 'status', 'max_fwhm',
'min_moon_dist', 'max_moon_illum', 'max_cloud_cover',
'seq_repeats', 'seq_completed']
"""
request_form_values = ['request_id', 'object_id', 'marshal_id',
'user_id', 'allocation', 'ifu', 'ifu_use_mag', 'rc', 'do_r', 'do_g', 'do_i', 'do_u',
'r_exptime', 'g_exptime', 'i_exptime', 'u_exptime',
'r_repeats', 'g_repeats', 'i_repeats', 'u_repeats',
'ifu_exptime', 'priority', 'inidate', 'rc_use_mag',
'enddate', 'maxairmass', 'status', 'max_fwhm',
'min_moon_dist', 'max_moon_illum', 'max_cloud_cover',
'seq_repeats', 'seq_completed']
rc_filter_list = ['r', 'g', 'i', 'u']
schedule = ScheduleNight()
# this all needs to go in some sort of config file instead of changing the
# source code constantly
computer = os.uname()[1] # a quick fix
port = 0
host = 'none'
if computer == 'pele':
raw_dir = '/scr/rsw/sedm/raw/'
phot_dir = '/scr/rsw/sedm/phot/'
redux_dir = '/scr/rsw/sedm/data/redux/'
new_phot_dir = '/scr/rsw/sedm/data/redux/phot/'
status_dir = '/scr/rsw/'
requests_dir = '/scr/rsw/'
base_dir = '/scr/rsw/'
host = 'pharos.caltech.edu'
port = 5432
elif computer == 'pharos':
raw_dir = '/scr2/sedm/raw/'
phot_dir = '/scr2/sedm/phot/'
new_phot_dir = '/scr2/sedmdrp/redux/phot/'
redux_dir = '/scr2/sedmdrp/redux/'
status_dir = '/scr2/sedm/raw/telstatus/'
requests_dir = '/scr2/sedm/logs/requests/'
host = 'localhost'
base_dir = '/scr2/sedmdrp/'
port = 5432
elif computer == 'minar':
raw_dir = '/data/sedmdrp/raw/'
phot_dir = '/data/sedmdrp/redux/phot/'
new_phot_dir = '/data/sedmdrp/redux/phot/'
redux_dir = '/data/sedmdrp/redux/'
status_dir = '/data/sedmdrp/raw/telstatus/'
requests_dir = '/data/sedmdrp/logs/requests/'
host = 'localhost'
base_dir = '/data/sedmdrp/'
port = 5432
elif computer == 'ether':
raw_dir = '/home/rsw/sedm_data/raw/'
phot_dir = '/home/rsw/sedm/phot/'
redux_dir = '/home/rsw/sedm_data/redux/'
new_phot_dir = '/home/rsw/sedm_data/redux/phot/'
requests_dir = '/home/rsw/'
base_dir = '/home/rsw/sedm_data/'
host = 'localhost'
port = 22222
print(computer, port, host, "inputs")
db = SedmDB(host=host, dbname='sedmdb', port=port)
def get_from_users(user_id):
return db.get_from_users(['id', 'username'], {'id': user_id})
def get_object_values(objid=None):
try:
objid = int(objid)
except Exception as e:
return {'error': str(e)}
ret = db.get_from_object(values=object_values,
where_dict={'id': int(objid)})
if not ret:
return {'error': "No object found with that id number"}
return make_dict_from_dbget(object_values, ret[0])
def get_object_info(name=None, ra=None, dec=None, radius=5, out_type='html'):
"""
:param radius:
:param out_type:
:param name:
:param ra:
:param dec:
:return:
"""
# 1. Start by looking for objects by name or coordinates
if name:
ids = db.get_object_id_from_name(name)
elif ra and dec:
ids = db.get_objects_near(ra=ra, dec=dec, radius=radius)
else:
ids = None
# 2. If no results return empty list and message
if not ids:
return {'message': 'No objects found with that name or coordinates',
'objects': False}
# 3. If there are objects get a list of values for each match
obj_list = []
for obj in ids:
ret = db.get_from_object(values=object_values,
where_dict={'id': obj[0]})
obj_list.append(ret[0])
# 4. Convert the list into the appropriate output
if out_type == 'html':
df = pd.DataFrame(obj_list, columns=object_values)
df['Select'] = df['id'].apply(add_link)
return {'message': df.to_html(escape=False, classes='table',
index=False)}
else:
return obj_list
def fancy_request_table(df):
"""
df: pandas dataframe
intended for tables of requests from get_requests_for_user,
ie with columns:
['allocation', 'object', 'RA', 'DEC', 'start date', 'end date',
'priority','status', 'lastmodified', 'UPDATE']
returns: IPython HTML object
the html for df but with the following changes:
-if 'RA' and 'Dec' mean it won't rise tonight, both fields are red
-'name' column now has links to the growth marshal
-table width is 100%,
which is important for the fancy tables to display right
-priority is a float to allow finer tuning of the scheduler
"""
def highlight_set(hrow, color='#ff9999'):
"""
makes 'RA' and 'DEC' fields highlighted if it won't get high when
it's dark out meant for tables with both 'RA' and 'DEC' columns
"""
red = 'background-color: {}'.format(color)
try:
# peak at longitude-based midnight, approx
best_ra = ((Time.now().mjd - 58382.) * 360/365.)
# more than 8h from midnight
if 180 - abs(180 - (best_ra - float(hrow['RA'])) % 360) > 120:
return [red if i == 'RA' else '' for i in hrow.index.values]
# red if it'll never go above ~40deg
if hrow['DEC'] < -15.:
return [red if i == 'DEC' else '' for i in hrow.index.values]
else:
return ['' for _ in hrow.index.values]
except KeyError:
return ['' for _ in hrow.index.values]
def improve_obs_seq(li):
"""
takes a list like ['1ifu'], [180, 180, 180], etc and makes it take up
less space and also be more human-readable
"""
try:
if type(li[0]) == str:
# ie it's an obs_seq
for i, val in enumerate(li):
if val[0] == '1':
li[i] = val[1:]
else:
li[i] = val[1:] + ' x' + val[0]
else: # ie exptime
for i, val in enumerate(li):
if all([j == val for j in li[i:]]) and i < len(li) - 1:
# all the rest match, of which there's >1
li = li[:i] + ['{}ea'.format(val)]
break
else:
li[i] = str(val)
return ', '.join(li)
except:
return "ERROR,PARSING_THE_FILTER_STRING"
df['status'] = [i.lower() for i in df['status']]
df['allocation'] = [i.replace('2018A-', '').replace('2018B-', '')
.replace('2019B-', '').replace('2021B-', '')
.replace('2022A-', '')
for i in df['allocation']]
for col in ('obs_seq', 'exptime'):
df[col] = [improve_obs_seq(i) for i in df[col]]
styled = df.style\
.apply(highlight_set, axis=1)\
.format(
{'object': '<a href="https://fritz.science/source/{0}">{0}</a>',
'RA': '{:.3f}', 'DEC': '{:.3f}', 'priority': '{:.1f}',
'start date': '{:%b %d}', 'end date': '{:%b %d}',
'lastmodified': '{:%b %d %H:%M}',
'UPDATE': '<a href="request?request_id={}">+</a>'})\
.set_table_styles([{'text-align': 'left'}])\
.set_table_attributes('style="width:100%" '
'class="dataframe_fancy table '
'table-striped nowrap"')\
.set_table_styles(
[{'selector': '.row_heading',
'props': [('display', 'none')]},
{'selector': '.blank.level0',
'props': [('display', 'none')]}])
# this .replace() thing is super bad form but it's faster for now than
# finding the right way
return styled.render()\
.replace('RA</th>', '<a href="#" data-toggle="tooltip" '
'title="red if peaks >8h from midnight">RA</a></th>')\
.replace('DEC</th>', '<a href="#" data-toggle="tooltip" '
'title="red if peaks below 40deg">dec</a></th>')
def get_homepage(userid, username):
sedm_dict = {'enddate':
datetime.datetime.utcnow() + datetime.timedelta(days=30),
'inidate':
datetime.datetime.utcnow() - datetime.timedelta(days=7,
hours=8)}
# 1. Get a dataframe of all requests for the current user
reqs = get_requests_for_user(userid, sedm_dict['inidate'],
sedm_dict['enddate'])
# organize requests into dataframes by whether they are completed or not
complete = reqs[(reqs['status'] == 'COMPLETED') |
(reqs['status'] == 'OBSERVED') |
(reqs['status'] == 'OBSERVED')]
active = reqs[(reqs['status'] == 'ACTIVE')]
pending = reqs[(reqs['status'] == 'PENDING')]
expired = reqs[(reqs['status'] == 'EXPIRED')]
failed = reqs[(reqs['status'] == 'FAILED')]
# retrieve information about the user's allocations
ac = get_allocations_user(userid)
# Create html tables
sedm_dict['active'] = {'table': fancy_request_table(active),
'title': 'Active Request'}
sedm_dict['pending'] = {'table': fancy_request_table(pending),
'title': 'Pending Requests'}
sedm_dict['complete'] = {'table': fancy_request_table(complete),
'title': 'Completed Requests in the last 7 days'}
sedm_dict['expired'] = {'table': fancy_request_table(expired),
'title': 'Expired Requests in the last 7 days'}
sedm_dict['failed'] = {'table': fancy_request_table(failed),
'title': 'Failed Exposures in the last 7 days'}
sedm_dict['allocations'] = {
'table': ac.to_html(escape=False, classes='table table-striped',
index=False, col_space=10),
'title': 'Your Active Allocations'}
sedm_dict['visibility'] = {'title': 'Visibilities for pending requests',
'url': '/visibility'}
# Make a greeting statement
sedm_dict['greeting'] = 'Hello %s!' % username
return sedm_dict
###############################################################################
# THIS SECTION HANDLES EXPIRED TARGETS. #
# KEYWORD:EXPIRED #
###############################################################################
def show_expired(days=7):
"""
:return:
"""
print(days)
###############################################################################
# THIS SECTION HANDLES ALL THINGS RELATED TO THE SCHEDULER PAGE. #
# KEYWORD:SCHEDULER #
###############################################################################
def get_schedule():
"""
:return:
"""
with open('static/scheduler/scheduler.html', 'r') as myfile:
data = myfile.read().replace('\n', '')
return {'scheduler': data}
###############################################################################
# THIS SECTION HANDLES ALL THINGS RELATED TO THE REQUEST PAGE. #
# KEYWORD:REQUEST #
###############################################################################
def add_link(objid):
return """<input type = "button" onclick = 'addValues("%s")'
value = "Use" />""" % objid
def get_request_page(userid, form1, content=None):
req_dict = {}
# Start by getting all the allocations the user can add targets under
# Create a list for a select field on the request page
alloc = get_allocations_user(userid)
if alloc is None or len(alloc) == 0:
choices = [(0, "You have no active allocations!")]
else:
choices = [z for z in zip(alloc['id'], alloc['allocation'])]
choices.insert(0, (0, '------'))
form1.allocation.choices = choices
# if an object id is given or a request id, prepopulate the field
if content:
req_dict.update(populate_form(content, form1))
# Set the start date if one is not given
if not form1.inidate.data:
form1.inidate.data = datetime.datetime.today()
if not form1.enddate.data:
form1.enddate.data = datetime.datetime.today() + datetime.timedelta(2)
#
return req_dict, form1
def populate_form(content, form):
"""
:param content:
:param form:
:return:
"""
# We look to see if there is a request id to begin with because if so then
# the object info will automatically be tied into that. If it is not in
# there then we should start looking at other keywords.
if 'request_id' in content:
data = db.get_from_request(values=request_values,
where_dict={'id': content['request_id'][0]})
ret_dict = make_dict_from_dbget(headers=request_values, data=data[0])
obj_dict = get_object_values(objid=ret_dict['object_id'])
# Remove the identical id so as not to get confused
del obj_dict['id']
del obj_dict['marshal_id']
ret_dict.update(obj_dict)
ret_dict['request_id'] = content['request_id'][0]
# I am setting a status_id because when I do
# form.status.data = x['status']
# it doesn't set the proper select option.
ret_dict['status_id'] = ret_dict['status']
ret_dict.update(parse_db_target_filters(ret_dict['obs_seq'],
ret_dict['exptime']))
elif 'object_id' in content:
ret_dict = get_object_values(objid=content['object_id'][0])
if 'error' in ret_dict:
ret_dict['message'] = ret_dict['error']
else:
ret_dict = {'message': "There was nothing to process"}
# TODO: There has to be a better way to do this...
if 'request_id' in ret_dict:
form.request_id.data = ret_dict['request_id']
if 'object_id' in ret_dict:
form.object_id.data = ret_dict['object_id']
if 'marshal_id' in ret_dict:
form.marshal_id.data = ret_dict['marshal_id']
if 'allocation_id' in ret_dict:
form.allocation_id.data = ret_dict['allocation_id']
form.allocation.data = str(ret_dict['allocation_id'])
if 'ra' in ret_dict:
form.obj_ra.data = ret_dict['ra']
if 'dec' in ret_dict:
form.obj_dec.data = ret_dict['dec']
if 'epoch' in ret_dict:
form.obj_epoch.data = ret_dict['epoch']
if 'magnitude' in ret_dict:
form.obj_mag.data = ret_dict['magnitude']
if 'name' in ret_dict:
form.obj_name.data = ret_dict['name']
if 'inidate' in ret_dict:
form.inidate.data = ret_dict['inidate']
if 'enddate' in ret_dict:
form.enddate.data = ret_dict['enddate']
if 'min_moon_distance' in ret_dict:
form.min_moon_distance = ret_dict['min_moon_distance']
if 'priority' in ret_dict:
form.priority.data = ret_dict['priority']
if 'maxairmass' in ret_dict:
form.maxairmass.data = ret_dict['maxairmass']
if 'cadence' in ret_dict:
form.cadence.data = ret_dict['cadence']
if 'phasesamples' in ret_dict:
form.phasesamples.data = ret_dict['phasesamples']
if 'sampletolerance' in ret_dict:
form.sampletolerance.data = ret_dict['sampletolerance']
if 'status_id' in ret_dict:
form.status.data = ret_dict['status_id']
if 'max_fwhm' in ret_dict:
form.max_fwhm.data = ret_dict['max_fwhm']
if 'seq_repeats' in ret_dict:
if not ret_dict['seq_repeats']:
form.seq_repeats.data = 1
else:
form.seq_repeats.data = ret_dict['seq_repeats']
if 'seq_completed' in ret_dict:
form.seq_repeats.data = ret_dict['seq_completed']
if 'max_moon_illum' in ret_dict:
form.max_moon_illum.data = ret_dict['max_moon_illum']
if 'max_cloud_cover' in ret_dict:
form.max_cloud_cover.data = ret_dict['max_cloud_cover']
if 'creationdate' in ret_dict:
form.creationdate.data = ret_dict['creationdate']
if 'lastmodified' in ret_dict:
form.lastmodified.data = ret_dict['lastmodified']
# if 'last_obs_jd' in ret_dict:
# form.last_obs_jd.data = ret_dict['last_obs_jd']
if 'do_ifu' in ret_dict:
form.ifu.data = ret_dict['do_ifu']
if 'ifu_exptime' in ret_dict:
form.ifu_exptime.data = ret_dict['ifu_exptime']
#if 'ab' in ret_dict:
# form.ab.data = ret_dict['ab']
if 'do_rc' in ret_dict:
form.rc.data = ret_dict['do_rc']
if 'do_r' in ret_dict:
form.do_r.data = ret_dict['do_r']
if 'do_i' in ret_dict:
form.do_i.data = ret_dict['do_i']
if 'do_g' in ret_dict:
form.do_g.data = ret_dict['do_g']
if 'do_u' in ret_dict:
form.do_u.data = ret_dict['do_u']
if 'r_exptime' in ret_dict:
form.r_exptime.data = ret_dict['r_exptime']
if 'g_exptime' in ret_dict:
form.g_exptime.data = ret_dict['g_exptime']
if 'i_exptime' in ret_dict:
form.i_exptime.data = ret_dict['i_exptime']
if 'u_exptime' in ret_dict:
form.u_exptime.data = ret_dict['u_exptime']
if 'r_repeats' in ret_dict:
form.r_repeats.data = ret_dict['r_repeats']
if 'r_repeats' in ret_dict:
form.r_repeats.data = ret_dict['r_repeats']
if 'g_repeats' in ret_dict:
form.g_repeats.data = ret_dict['g_repeats']
if 'i_repeats' in ret_dict:
form.i_repeats.data = ret_dict['i_repeats']
if 'u_repeats' in ret_dict:
form.u_repeats.data = ret_dict['u_repeats']
if 'seq_completed' in ret_dict:
form.seq_completed.data = ret_dict['seq_completed']
return ret_dict
def parse_db_target_filters(obs_seq, exptime):
"""
Parse database target scheme
:param obs_seq:
:param exptime:
:return:
"""
# Prep the variables
rc_filters = ['r', 'g', 'i', 'u']
return_dict = {
'do_ifu': False, 'ifu_exptime': 0,
'do_rc': False,
'do_r': False, 'r_exptime': 0, 'r_repeats': 1,
'do_g': False, 'g_exptime': 0, 'g_repeats': 1,
'do_i': False, 'i_exptime': 0, 'i_repeats': 1,
'do_u': False, 'u_exptime': 0, 'u_repeats': 1,
}
# 1. First we extract the filter sequence
seq = list(obs_seq)
exptime = list(exptime)
# 2. Remove ifu observations first if they exist
index = [i for i, s in enumerate(seq) if 'ifu' in s]
if index:
for j in index:
seq.pop(j)
return_dict['ifu_exptime'] = int(exptime.pop(j))
return_dict['do_ifu'] = True
if return_dict['ifu_exptime'] == 0:
return_dict['do_ifu'] = False
# 3. If the seq list is empty then there is no photmetry follow-up
# and we should exit
if not seq:
return return_dict
# 4. If we are still here then we need to get the photometry sequence
return_dict['do_rc'] = True
for i in range(len(seq)):
flt = seq[i][-1]
flt_exptime = int(exptime[i])
flt_repeat = int(seq[i][:-1])
# 4a. After parsing the indivual elements we need to check that they are
# valid values
if flt in rc_filters:
if 0 <= flt_exptime <= 1000:
if 1 <= flt_repeat <= 100:
return_dict['do_%s' % flt] = True
return_dict['%s_exptime' % flt] = flt_exptime
return_dict['%s_repeats' % flt] = flt_repeat
else:
continue
return return_dict
def add_object_to_db(content):
"""
:param content:
:return:
"""
return_dict = {'message': ''}
# Add the object to the database
if content['obj_ra'] and content['obj_dec']:
ra = content['obj_ra']
dec = content['obj_dec']
if ":" in ra or ":" in dec:
c = SkyCoord(ra=ra, dec=dec, unit=(u.hourangle, u.deg))
ra = c.ra.degree
dec = c.dec.degree
if content['obj_epoch']:
epoch = content['obj_epoch']
else:
epoch = 2000
objdict = {
'name': content['obj_name'],
'ra': ra,
'dec': dec,
'typedesig': 'f',
'epoch': epoch,
}
if content['obj_mag']:
objdict['magnitude'] = content['obj_mag']
objid, msg = db.add_object(objdict)
if objid == -1:
return_dict['message'] += msg + ('--For now I am going to '
'assume that you want to '
'use this object and will '
'go ahead with the rest '
'of the request--')
objid = msg.split()[-1]
else:
return_dict['message'] = ("How am I suppose to add your request "
"if you don't give me any coordinates--")
objid = False
return return_dict, objid
def process_request_form(content, form, userid):
"""
:param content:
:param userid:
:param form:
:return:
"""
request_dict = {}
process_dict = {'message': ''}
obs_seq_dict = {}
alloc = get_allocations_user(int(userid))
if alloc is None or len(alloc) == 0:
choices = [(0, "You have no active allocations!")]
else:
choices = [z for z in zip(alloc['id'], alloc['allocation'])]
choices.insert(0, (0, '------'))
form.allocation.choices = choices
# 1. Let's start by making sure we have all the information needed for the
# object id
if 'object_id' in content and content['object_id']:
objid = content['object_id']
else:
message, objid = add_object_to_db(content)
if not objid:
return {**process_dict, **message}, form
else:
if 'message' in message:
process_dict['message'] += message['message']
request_dict['object_id'] = int(objid)
# 2. Now let's put together the request
# by getting all the values into a dictionary
"""
obs_seq_key_list = ['ifu', 'rc', 'ab', 'do_r', 'do_i', 'do_u', 'do_g',
'r_repeats', 'g_repeats', 'i_repeats', 'u_repeats',
'r_exptime', 'g_exptime', 'i_exptime', 'u_exptime',
'ifu_use_mag', 'rc_use_mag', 'ifu_exptime']
"""
obs_seq_key_list = ['ifu', 'rc', 'do_r', 'do_i', 'do_u', 'do_g',
'r_repeats', 'g_repeats', 'i_repeats', 'u_repeats',
'r_exptime', 'g_exptime', 'i_exptime', 'u_exptime',
'ifu_use_mag', 'rc_use_mag', 'ifu_exptime']
for key in request_form_values:
try:
# This should handle both the case when an object id has already
# been added and when we had to generate a new one
if key == 'object_id':
pass
# This case will handle new requests when a user id is not given
elif key == 'user_id' and not content['user_id']:
request_dict['user_id'] = userid
elif key == 'allocation':
request_dict['allocation_id'] = content[key]
# This section should handle all the observation data such as if
# we want ifu/rc follow-up and exposure times. Note that because
# of the database format we must handle this data outside
# the request dictionary.
elif key in obs_seq_key_list:
if key in content:
obs_seq_dict[key] = content[key]
else:
obs_seq_dict[key] = False
else:
request_dict[key] = content[key]
except Exception as e:
print(str(e), key, 't')
pass
# print("I made it here")
# 3. Now we need to create the obs_seq and exptime entries
# We need to also make sure and add the object magnitude
# to calculate exposure times
if content['obj_mag']:
obs_seq_dict['obj_mag'] = content['obj_mag']
else:
obs_seq_dict['obj_mag'] = 17.5
filter_dict = (make_obs_seq(obs_seq_dict))
if 'ERROR' in filter_dict:
process_dict['message'] += filter_dict['ERROR']
else:
process_dict['message'] += filter_dict.pop("proc_message")
request_dict = {**filter_dict, **request_dict}
if 'request_id' in content and content['request_id']:
request_dict['id'] = int(content['request_id'])
request_dict.pop('request_id')
for k, v in request_dict.items():
if not v:
request_dict[k] = -1
# ret = db.update_request(request_dict)
db.update_request(request_dict)
else:
# print("I AM HERE NOW")
if 'request_id' in request_dict:
request_dict.pop('request_id')
request_dict['user_id'] = int(request_dict['user_id'])
# print(request_dict)
if 'external_id' in content:
request_dict['external_id'] = content['external_id']
# ret = db.add_request(request_dict)
db.add_request(request_dict)
# print(ret)
return process_dict, form
def get_add_csv(user_id, form, content):
"""
:param user_id:
:param form:
:param content:
:return:
"""
return {'test': 'test'}, form, user_id, content
def process_add_csv(content, form, user_id):
"""
:param content:
:param form:
:param user_id:
:return:
"""
return {'test': 'test'}, form, content, user_id
def make_obs_seq(obs_seq_dict):
"""
:param obs_seq_dict:
:return:
"""
filters_list = []
exptime_list = []
ret_dict = {"proc_message": ""}
if isinstance(obs_seq_dict['ifu'], bool):
if obs_seq_dict['ifu']:
obs_seq_dict['ifu'] = 'y'
else:
obs_seq_dict['ifu'] = 'n'
if isinstance(obs_seq_dict['rc'], bool):
if obs_seq_dict['rc']:
obs_seq_dict['rc'] = 'y'
else:
obs_seq_dict['rc'] = 'n'
if obs_seq_dict['ifu'].lower() in ['y', 'yes', 'true']:
# There may be case in the future where people want more than one IFU
# at a time. In which case this code will need to be changed.
if obs_seq_dict['ifu_use_mag']:
if obs_seq_dict['ifu_exptime'] and \
int(obs_seq_dict['ifu_exptime']) > 0:
ret_dict["proc_message"] += ("You should know that you "
"supplied a non-zero value "
"in the ifu exposure time "
"field. However because you "
"checked the use magnitude box "
"I will be ignoring the supplied "
"value.--")
try:
mag = float(obs_seq_dict['obj_mag'])
if mag == 0:
ret_dict['proc_message'] += ("I find it hard to believe "
"that you really wanted to "
"observe something zero "
"magnitude. So I can't let "
"this go through. Feel free"
"to contact me and dispute "
"this.--")
ifu_exptime = False
else:
ifu_exptime = get_filter_exptime('ifu', mag)
except Exception as e:
ret_dict['proc_message'] += ("For some reason I couldn't "
"process your magnitude. If you "
"didn't add one then that is on "
"you. Otherwise there is something"
" wrong with this '%s' value. For"
" the record here is the error "
"message %s--" %
(obs_seq_dict['obj_mag'], str(e)))
ifu_exptime = False
else:
try:
ifu_exptime = int(obs_seq_dict['ifu_exptime'])
if 0 <= ifu_exptime <= 7200:
pass
else:
ret_dict['proc_message'] += ("I don't know what you are "
"trying to do but %s is not an"
" acceptable IFU exposure "
"time. It's either less than "
" 0 or more than two hours.--"
% str(ifu_exptime))
ifu_exptime = False
except Exception as e:
ret_dict['proc_message'] += ("There is something wrong with "
"your exposure time value. '%s' "
"is not a proper value. Here is "
"the error message return: %s--" %
(obs_seq_dict['ifu_exptime'],
str(e)))
ifu_exptime = False
if ifu_exptime:
filters_list.append("1ifu")
exptime_list.append(str(ifu_exptime))
# print(obs_seq_dict)
if obs_seq_dict['rc'].lower() in ['y', 'yes', 'true']:
for flt in rc_filter_list:
if obs_seq_dict['do_%s' % flt]:
repeats = obs_seq_dict['%s_repeats' % flt]
if 1 <= int(repeats) <= 100:
pass
else:
ret_dict['proc_message'] += ("There is something wrong "
"with the number of "
"repeats you have "
"requested. Forcing it to 1"
"--")
repeats = 1
if obs_seq_dict['rc_use_mag']:
mag = obs_seq_dict['obj_mag']
exptime = get_filter_exptime(flt, mag)
filters_list.append("%s%s" % (str(repeats), flt))
exptime_list.append(str(exptime))
else:
exptime = int(obs_seq_dict['%s_exptime' % flt])
if 0 <= exptime <= 1000:
pass
else:
ret_dict['proc_message'] += ("The exposure time (%s) "
"you entered for filter "
"(%s) makes no sense. If "
"you entered something "
"more than 10mins it is "
"wasting time. Feel free "
"to contact me to disput "
"this--" % (str(exptime),
flt))
exptime = False
if exptime:
filters_list.append("%s%s" % (str(repeats), flt))
exptime_list.append(str(exptime))
if not filters_list:
ret_dict["ERROR"] = "NO FILTERS COULD BE DETERMINED"
return ret_dict
else:
if len(filters_list) == len(exptime_list):
ret_dict['obs_seq'] = '{%s}' % ','.join(filters_list)
ret_dict['exptime'] = '{%s}' % ','.join(exptime_list)
else:
ret_dict["ERROR"] = ("Filter and exposure time list don't match "
"%s : %s" % (','.join(filters_list),
','.join(exptime_list)))
return ret_dict
def get_allocations_user(user_id, return_type=''):
res = db.execute_sql(""" SELECT a.id, a.designator, p.designator,
g.designator, a.time_allocated, a.time_spent
FROM allocation a, program p, groups g, usergroups ug
WHERE a.program_id = p.id AND p.group_id = g.id
AND g.id = ug.group_id AND a.active is True AND
ug.user_id = %d""" % user_id)
# create the dataframe and set the allocation names to be linked
if return_type == 'list':
data = []
for i in res:
data.append(i[0])
else:
data = pd.DataFrame(res, columns=['id', 'allocation', 'program',
'group', 'time allocated',
'time spent'])
return data
def get_requests_for_user(user_id, inidate=None, enddate=None):
"""
:param user_id:
:param inidate:
:param enddate:
:return:
"""
if not inidate:
inidate = datetime.datetime.utcnow() - datetime.timedelta(days=7,
hours=8)
if not enddate:
enddate = datetime.datetime.utcnow() + datetime.timedelta(days=1)
request_query = ("""SELECT a.designator, o.name, o.ra, o.dec, r.inidate,
r.enddate, r.priority, r.status, r.lastmodified, r.obs_seq, r.exptime, r.id
FROM request r, object o, allocation a
WHERE o.id = r.object_id AND a.id = r.allocation_id
AND ( r.enddate > DATE('%s') AND r.inidate <= DATE('%s') )
AND r.allocation_id IN
(SELECT a.id
FROM allocation a, groups g, usergroups ug, users u, program p
WHERE ug.user_id = u.id AND ug.group_id = g.id AND u.id = %d AND
p.group_id = g.id AND a.program_id = p.id
) ORDER BY r.lastmodified DESC;""" % (inidate, enddate, user_id))
data = db.execute_sql(request_query)
data = pd.DataFrame(data,
columns=['allocation', 'object', 'RA', 'DEC',
'start date', 'end date', 'priority', 'status',
'lastmodified', 'obs_seq', 'exptime',
'UPDATE'])
if user_id in superuser_list:
pass
# data['UPDATE'] = data['UPDATE'].apply(convert_to_link)
else:
data.drop(columns=['RA', 'DEC'])
return data
def convert_to_link(reqid):
return """http://pharos.caltech.edu/request?request_id=%s""" % reqid
###############################################################################
# THIS SECTION HANDLES ALL THINGS RELATED TO THE OBJECT PAGE. #
# KEYWORD:OBJECT #
###############################################################################
def get_object(object_name, user_id):
"""
:param object_name:
:param user_id:
:return:
"""
if user_id:
pass
# 1. Start by getting the requested object
objects = get_object_info(object_name, out_type='html')
# 2. Check if there were and objects if not then go on
if not objects:
return {'message': 'Could not find any targets with that name under '
'your allocation'}
else:
return {'message': objects['message']}
###############################################################################
# THIS SECTION HANDLES ALL THINGS RELATED TO THE LOGIN PAGE. #
# KEYWORD:LOGIN #
###############################################################################
def check_login(username, password):
"""
:param username:
:param password:
:return:
"""
user_pass = db.get_from_users(['username', 'password', 'id'],
{'username': username})
# print(user_pass)
if not user_pass:
return False, 'Incorrect username or password!'
if check_password_hash(user_pass[0][1], password=password):
return True, user_pass[0][2]
else:
return False, 'Incorrect username or password!!'
def password_change(form, user_id):
"""
:param form:
:param user_id:
:return:
"""
# check for correct password and change if true
password = form.password.data
new_password = form.pass_new.data
new_password_conf = form.pass_conf.data
user_pass = db.get_from_users(['username', 'password', 'id'],
{'id': user_id})
if not user_pass:
return {'message': "User not found"}
elif user_pass[0] == -1:
message = user_pass[1]
return {'message': message}
elif check_password_hash(user_pass[0][1], password):
if new_password == new_password_conf:
db.update_user({'id': user_pass[0][2],
'password': new_password})
return {'message': 'Password Changed!'}
else:
message = "Incorrect username or password!"
return {'message': message}
###############################################################################
# THIS SECTION HANDLES ALL THINGS RELATED TO THE STATS PAGE. #
# KEYWORD:STATS #
###############################################################################
def get_project_stats(content, user_id=""):
"""
:param content:
:param user_id:
:return:
"""
# Start by getting all the allocations for a user
if 'inidate' not in content:
inidate = None
else:
inidate = content['inidate']
if 'enddate' not in content:
enddate = None
else:
enddate = content['enddate']
data = get_allocation_stats(user_id, inidate=inidate, enddate=enddate)
plots = plot_stats_allocation(data)
script, div = components(plots)
return {'script': script, 'div': div}
def get_allocation_stats(user_id, inidate=None, enddate=None):
"""
Obtains a list of allocations that belong to the user and
query the total allocated name and time spent for that allocation.
If no user_id is provided, all active allocations are returned.
"""
if user_id is None:
res = db.get_from_allocation(["designator", "time_allocated",
"time_spent"], {"active": True})
df = pd.DataFrame(res, columns=["designator", "time_allocated",
"time_spent"])
alloc_hours = np.array([ta.total_seconds() / 3600.
for ta in df["time_allocated"]])
spent_hours = np.array([ts.total_seconds() / 3600.
for ts in df["time_spent"]])
free_hours = alloc_hours - spent_hours
free_hours[np.where(free_hours < 0)] = 0.
df = df.assign(alloc_hours=alloc_hours, spent_hours=spent_hours,
free_hours=free_hours)
else:
if inidate is None or enddate is None:
res = db.execute_sql(""" SELECT a.designator, a.time_allocated,
a.time_spent
FROM allocation a, program p, groups g, usergroups ug
WHERE a.program_id = p.id AND p.group_id = g.id
AND g.id = ug.group_id AND a.active is True
AND ug.user_id = %d""" % user_id)
df = pd.DataFrame(res, columns=["designator", "time_allocated",
"time_spent"])
alloc_hours = np.array([ta.total_seconds() / 3600.
for ta in df["time_allocated"]])
spent_hours = np.array([ts.total_seconds() / 3600.
for ts in df["time_spent"]])
free_hours = alloc_hours - spent_hours
free_hours[np.where(free_hours < 0)] = 0.
df = df.assign(alloc_hours=alloc_hours, spent_hours=spent_hours,
free_hours=free_hours)
else:
res = db.execute_sql(""" SELECT DISTINCT a.id, a.designator,
a.time_allocated
FROM allocation a, program p, groups g, usergroups ug
WHERE a.program_id = p.id AND p.group_id = g.id
AND g.id = ug.group_id AND a.active is True
AND ug.user_id = %d;""" % user_id)
allocdes = []
spent_hours = []
alloc = []
for ais in res:
spent = db.get_allocation_spent_time(ais[0], inidate, enddate)
allocdes.append(ais[1])
spent_hours.append(int(spent) / 3600.)
alloc.append(ais[2])
res = np.array([allocdes, alloc, spent_hours])
df = pd.DataFrame(res.T, columns=["designator", "time_allocated",
"time_spent"])
alloc_hours = np.array([ta.total_seconds() / 3600.
for ta in df["time_allocated"]])
free_hours = alloc_hours - spent_hours
free_hours[np.where(free_hours < 0)] = 0.
df = df.assign(alloc_hours=alloc_hours, spent_hours=spent_hours,
free_hours=free_hours)
df = df.sort_values(by=["alloc_hours"], ascending=False)
alloc_names = df["designator"].values
category = ["alloc_hours", "spent_hours", "free_hours"]
data = {'allocations': alloc_names}
for cat in category:
data[cat] = df[cat]
return data
def plot_stats_allocation(data):
"""
Plots in the shape of bars the time available and spent for each active
allocation.
"""
data = {key: np.nan_to_num(data[key]) for key in data}
# Create the first plot with the allocation hours
alloc_names = data['allocations']
categories = ["spent_hours", "free_hours"]
colors = ["#e84d60", "darkgreen"] # "#c9d9d3"
n_names = len(alloc_names)
source = ColumnDataSource(data=data)
p = figure(x_range=alloc_names, plot_height=420, plot_width=80 * 8,
title="Time spent/available for SEDM allocations this term",
toolbar_location=None, tools="")
p.vbar_stack(categories, x='allocations', width=0.9, color=colors,
source=source, legend=["Spent", "Available"])
p.y_range.start = 0
p.x_range.range_padding = 0.1
p.xgrid.grid_line_color = None
p.axis.minor_tick_line_color = None
p.outline_line_color = None
p.legend.location = "top_right"
p.legend.orientation = "horizontal"
p.yaxis.axis_label = 'Hours'
p.xaxis.major_label_orientation = 0.3
# Create the second plot with the % spent
alloc_names = data['allocations']
percentage = (data["spent_hours"] / data["alloc_hours"]) * 100
colors = n_names * ['#084594']
'''for i, p in enumerate(percentage):
if p<50: colors[i] = '#22A784'
elif p>50 and p<75: colors[i] = '#FD9F6C'
else: colors[i] = '#DD4968'''
source = ColumnDataSource(data=dict(alloc_names=alloc_names,
percentage=percentage, color=colors))
p2 = figure(x_range=alloc_names, y_range=(0, 100), plot_height=420,
plot_width=80 * 8,
title="Percentage of time spent",
toolbar_location=None, tools="")
p2.vbar(x='alloc_names', top='percentage', width=0.9, color='color',
source=source)
p2.xgrid.grid_line_color = None
p2.legend.orientation = "horizontal"
p2.legend.location = "top_center"
p2.yaxis.axis_label = '% time spent'
p2.xaxis.major_label_orientation = 0.3
# Create the pie charts
pie_colors = 10 * ["red", "green", "blue", "orange", "yellow", 'lime',
'brown', 'cyan', 'magenta', 'olive', 'black', 'teal',
'gold', 'crimson', 'moccasin', 'greenyellow', 'navy',
'ivory', 'lightpink']
# First one with the time spent
# define starts/ends for wedges from percentages of a circle
percents_only = np.round(np.array(list(data["spent_hours"] /
np.sum(data["spent_hours"])))
* 100, 1)
percents = np.cumsum([0] + list(data["spent_hours"] /
np.sum(data["spent_hours"])))
starts = [per * 2 * np.pi for per in percents[:-1]]
ends = [per * 2 * np.pi for per in percents[1:]]
p3 = figure(x_range=(-1, 2.5), y_range=(-1.1, 1.1), plot_height=420,
plot_width=600, title="% spent")
# Add individual wedges:
for i in range(n_names):
p3.wedge(x=0, y=0, radius=.9, start_angle=starts[i], end_angle=ends[i],
color=pie_colors[i],
legend="[{0}%] {1}".format(percents_only[i], alloc_names[i]))
p3.xgrid.grid_line_color = None
p3.ygrid.grid_line_color = None
p3.legend.orientation = "vertical"
p3.legend.location = "top_right"
p3.legend.border_line_alpha = 0
p3.legend.background_fill_color = None
p3.xaxis.visible = False
p3.yaxis.visible = False
# Second one with the time allocated
# define starts/ends for wedges from percentages of a circle
percents_only = np.round(np.array(list(data["alloc_hours"] /
np.sum(data["alloc_hours"])))
* 100, 1)
percents = np.cumsum([0] + list(data["alloc_hours"] /
np.sum(data["alloc_hours"])))
starts = [per * 2 * np.pi for per in percents[:-1]]
ends = [per * 2 * np.pi for per in percents[1:]]
p4 = figure(x_range=(-1, 2.5), y_range=(-1.1, 1.1), plot_height=420,
plot_width=600,
title="% time allocated to each program")
# Add individual wedges:
for i in range(n_names):
p4.wedge(x=0, y=0, radius=.9, start_angle=starts[i], end_angle=ends[i],
color=pie_colors[i],
legend="[{0}%] {1}".format(percents_only[i], alloc_names[i]))
p4.xgrid.grid_line_color = None
p4.ygrid.grid_line_color = None
p4.legend.orientation = "vertical"
p4.legend.location = "top_right"
p4.legend.border_line_alpha = 0
p4.legend.background_fill_color = None
p4.xaxis.visible = False
p4.yaxis.visible = False
layout = row(column(p, p2), column(p4, p3))
curdoc().add_root(layout)
curdoc().title = "Allocation stats"
return layout
###############################################################################
# THIS SECTION HANDLES ALL THINGS RELATED TO THE VIEW_DATA PAGE. #
# KEYWORD:VIEW_DATA #
###############################################################################
def get_ab_what(obsdir):
"""get a pseudo what list for A/B cubes"""
ablist = []
cubes = glob.glob(os.path.join(obsdir, "e3d_crr_b_ifu*.fits"))
for e3df in cubes:
# get root filename
rute = '_'.join(e3df.split('/')[-1].split('_')[1:7])
# is this a standard single cube?
crrf = glob.glob(os.path.join(obsdir, rute + '.fit*'))
if len(crrf) > 0:
continue
fname = '_'.join(e3df.split('/')[-1].split('_')[3:7]) + '.fits'
targ = e3df.split('/')[-1].split('_')[7].split('.fit')[0]
ablist.append(" "+fname+" (1.000/0.1/1.0 s): " + targ + " [A]")
return ablist
def get_ifu_products(obsdir=None, user_id=None, obsdate="", show_finder=True,
product_type='all', camera_type='ifu'):
"""
:param obsdir:
:param user_id:
:param obsdate:
:param product_type:
:param show_finder:
:param camera_type:
:return:
"""
# ifu_dict = {}
if product_type:
pass
if camera_type:
pass
if not obsdate:
obsdate = datetime.datetime.utcnow().strftime("%Y%m%d")
else:
obsdate = obsdate[0]
if not obsdir:
obsdir = '%s%s/' % (redux_dir, obsdate)
else:
obsdir = obsdir[0]
# Look first to make sure there is a data directory.
if not os.path.exists(obsdir):
return {'message': 'No data directory could be located for %s UT' %
os.path.basename(os.path.normpath(obsdir)),
'obsdate': obsdate}
sedm_dict = {'obsdate': obsdate,
'sci_data': ''}
# Now lets get the non-science products (i.e. calibrations)
calib_dict = {'flat3d': os.path.join(obsdir, '%s_flat3d.png' % obsdate),
'wavesolution': os.path.join(obsdir,
'%s_wavesolution'
'_dispersionmap.png' % obsdate),
'cube_lambdarms': os.path.join(obsdir, 'cube_lambdarms.png'),
'cube_trace_sigma': os.path.join(obsdir,
'cube_trace_sigma.png')}
# If a calibration frame doesn't exist then pop it out to avoid bad links
# on the page
remove_list = []
div_str = ''
for k, v in calib_dict.items():
if not os.path.exists(v):
remove_list.append(k)
if remove_list:
for i in remove_list:
calib_dict.pop(i)
# print(calib_dict, 'calib products')
if user_id == 2: # SEDM_admin
if os.path.exists(os.path.join(obsdir, 'report.txt')):
ext_report = """<a href="http://pharos.caltech.edu/data_r/redux/{0}/report.txt">Extraction</a>""".format(obsdate)
else:
ext_report = ""
if os.path.exists(os.path.join(obsdir, 'report_ztf_fritz.txt')):
frz_report = """<a href="http://pharos.caltech.edu/data_r/redux/{0}/report_ztf_fritz.txt">Fritz</a>""".format(obsdate)
else:
frz_report = ""
if os.path.exists(os.path.join(obsdir, 'report_ztf_growth.txt')):
grw_report = """<a href="http://pharos.caltech.edu/data_r/redux/{0}/report_ztf_growth.txt">Growth</a>""".format(obsdate)
else:
grw_report = ""
if os.path.exists(os.path.join(obsdir, 'what.txt')):
wha_report = """<a href="http://pharos.caltech.edu/data_r/redux/{0}/what.txt" type="plain/text">What</a>""".format(obsdate)
else:
wha_report = ""
div_str += """<div class="row">"""
div_str += """<h4>Reports</h4>"""
div_str += """{0} {1} {2} {3}""".format(ext_report, frz_report,
grw_report, wha_report)
div_str += "</div>"
div_str += """<div class="row">"""
div_str += """<h4>Calibrations</h4>"""
for k, v in calib_dict.items():
impath = "/data/%s/%s" % (obsdate, os.path.basename(v))
impathlink = "/data/%s/%s" % (obsdate,
os.path.basename(v.replace('.png',
'.pdf')))
if not os.path.exists(impathlink):
impathlink = impath
div_str += """<div class="col-md-{0}">
<div class="thumbnail">
<a href="{1}">
<img src="{2}" width="{3}px" height="{4}px">
</a>
</div>
</div>""".format(2, impathlink, impath, 400, 400)
div_str += "</div>"
sedm_dict['sci_data'] += div_str
# To get ifu products we first look to see if a what.list file has been
# created. This way we will know which files to add to our dict and
# whether the user has permissions to see the file
if not os.path.exists(os.path.join(obsdir, 'what.list')):
return {'message': 'Could not find summary file (what.list) for %s UT' %
os.path.basename(os.path.normpath(obsdir))}
# Go through the what list and return all non-calibration entries
with open(os.path.join(obsdir, 'what.list')) as f:
what_list = f.read().splitlines()
if os.path.exists(os.path.join(obsdir, 'abpairs.tab')):
what_list.extend(get_ab_what(obsdir))
what_list.sort()
science_list = []
standard_list = []
for targ in what_list:
if 'Calib' in targ:
pass
elif '[A]' in targ or '[B]' in targ or 'STD' in targ:
science_list.append(targ)
elif 'STD' in targ:
pass
# standard_list.append(targ)
else:
# There shouldn't be anything here but should put something in
# later to verify this is the case
pass
# Now we go through and make sure the user is allowed to see this target
show_list = []
if len(science_list) >= 1:
allocation_id_list = get_allocations_user(user_id=user_id,
return_type='list')
for sci_targ in science_list:
object_id = False
target_requests = False
# Start by pulling up all request that match the science target
targ_name = sci_targ.split(':')[1].split()[0]
if 'STD' not in targ_name:
# 1. Get the object id
object_ids = db.get_object_id_from_name(targ_name)
if len(object_ids) == 1:
object_id = object_ids[0][0]
elif len(object_ids) > 1:
# TODO what really needs to happen here is that we need to
# TODO cont: find the id that is closest to the obsdate.
# TODO cont: For now I am just going to use last added
# print(object_ids)
object_id = object_ids[-1][0]
elif not object_ids and ('at' in targ_name.lower()
or 'sn' in targ_name.lower()):
# sometimes it's at 2018abc not at2018abc in the db
targ_name = targ_name[:2] + ' ' + targ_name[2:]
object_ids = db.get_object_id_from_name(targ_name)
try:
object_id = object_ids[-1][0]
except IndexError:
object_id = False
# print("There was an error. You can't see this")
# If we are not the admin then we need to check
# if the user can see the object
if user_id not in [2, 20200227202025683]:
if object_id:
target_requests = db.get_from_request(
values=['allocation_id'],
where_dict={'object_id': object_id,
'status': 'COMPLETED'})
if not target_requests:
target_requests = db.get_from_request(
values=['allocation_id'],
where_dict={'object_id': object_id,
'status': 'OBSERVED'})
# print("Object id", object_id)
# Right now I am only seeing if there exists a match between
# allocations of all request. It's possible the request
# could have been made by another group as another follow-up
# and thus the user shouldn't be able to see it. This
# should be able to be fixed once all request are listed in
# the headers of the science images.
for req in target_requests:
# print(sci_targ, targ_name)
# print(allocation_id_list,
# "List of allocations this person can see")
if req[0] in allocation_id_list:
show_list.append((sci_targ, targ_name))
else:
print("You can't see this at allocation id list")
else:
show_list.append((sci_targ, targ_name))
else:
targ_name = sci_targ.split(':')[1].split()[0].replace('STD-',
'')
show_list.append((sci_targ, targ_name))
if len(standard_list) >= 1:
for std_targ in standard_list:
targ_name = std_targ.split(':')[1].split()[0].replace('STD-', '')
show_list.append((std_targ, targ_name))
# We have our list of targets that we can be shown, now lets actually find
# the files that we will show on the web page. To make this backwards
# compatible I have to look for two types of files
if len(show_list) >= 1:
science_dict = {}
count = 0
div_str = ''
for targ in show_list:
# print(targ)
targ_params = targ[0].split()
fits_file = targ_params[0].replace('.fits', '')
name = targ[1]
image_list = (glob.glob('%sifu_spaxels_*%s*.png' % (obsdir,
fits_file)) +
glob.glob('%simage_%s*.png' % (obsdir, name)))
spec_list = (glob.glob('%s%s_SEDM.png' % (obsdir, name)) +
glob.glob('%sspec_forcepsf*%s*.png' %
(obsdir, fits_file)) +
glob.glob('%sspec_auto*%s*.png' % (obsdir, fits_file)))
e3d_list = (glob.glob('%se3d*%s*.fits' % (obsdir, fits_file)))
spec_ascii_list = (glob.glob('%sspec_forcepsf*%s*.txt' %
(obsdir, fits_file)) +
glob.glob('%sspec_auto*%s*.txt' % (obsdir,
fits_file)))
fluxcals = (glob.glob('%sfluxcal_*%s*.fits' % (obsdir, fits_file)))
if name not in science_dict:
science_dict[name] = {'image_list': image_list,
'spec_list': spec_list,
'e3d_list': e3d_list,
'spec_ascii_list': spec_ascii_list,
'fluxcals': fluxcals}
else:
# We do this to handle cases where there are two or more of
# the same object name
science_dict[name+'_xRx_%s' % str(count)] = {
'image_list': image_list, 'spec_list': spec_list,
'e3d_list': e3d_list, 'spec_ascii_list': spec_ascii_list,
'fluxcals': fluxcals}
count += 1
# Alright now we build the table that will show the spectra, image file
# and classification.
# count = 0
for obj, obj_data in science_dict.items():
if '_xRx_' in obj:
obj = obj.split('_xRx_')[0]
if 'ZTF' in obj:
obj_link = ('<a href="https://fritz.science/source/'
'%s">%s</a>' %
(obj, obj))
div_str += """<div class="row">"""
div_str += """<h4>%s</h4>""" % obj_link
else:
div_str += """<div class="row">"""
div_str += """<h4>%s</h4>""" % obj
if obj_data['e3d_list']:
for j in obj_data['e3d_list']:
impath = "/data/%s/%s" % (obsdate, os.path.basename(j))
div_str += ('<div class="col-md-{2}">'
'<a href="%s">E3D File</a>'
'</div>' % impath)
if obj_data['spec_ascii_list']:
for j in obj_data['spec_ascii_list']:
impath = "/data/%s/%s" % (obsdate, os.path.basename(j))
div_str += ('<div class="col-md-{2}">'
'<a href="%s">ASCII Spec File</a>'
'</div>' % impath)
if obj_data['fluxcals']:
for j in obj_data['fluxcals']:
impath = "/data/%s/%s" % (obsdate, os.path.basename(j))
div_str += ('<div class="col-md-{2}">'
'<a href="%s">Flux calibration file</a>'
'</div>' % impath)
# ToDO: Grab data from somewhere to put in the meta data column
if obj_data['image_list']:
for i in obj_data['image_list']:
impath = "/data/%s/%s" % (obsdate, os.path.basename(i))
impathlink = "/data/%s/%s" % (
obsdate, os.path.basename(i.replace('.png', '.pdf')))
if not os.path.exists(impathlink):
impathlink = impath
div_str += """<div class="col-md-{0}">
<div class="thumbnail">
<a href="{1}">
<img src="{2}" width="{3}px" height="{4}px">
</a>
</div>
</div>""".format(2, impathlink, impath, 400, 400)
if show_finder:
# Check if finders exists in redux directory and if not then
# log at the old phot directory location
path1 = os.path.join(redux_dir, obsdate, 'finders')
path2 = os.path.join(phot_dir, obsdate, 'finders')
if os.path.exists(path1):
finder_path = path1
else:
finder_path = path2
if os.path.exists(finder_path):
finder_img = glob.glob(finder_path + '/*%s*.png' % obj)
if finder_img:
impathlink = "/data/%s/%s" % (
obsdate, os.path.basename(finder_img[-1]))
div_str += """<div class="col-md-{0}">
<div class="thumbnail">
<a href="{1}">
<img src="{2}" width="{3}px" height="{4}px">
</a>
</div>
</div>""".format(4, impathlink,
impathlink, 250, 250)
if obj_data['spec_list']:
for i in obj_data['spec_list']:
impath = "/data/%s/%s" % (obsdate, os.path.basename(i))
impathlink = "/data/%s/%s" % (
obsdate, os.path.basename(i.replace('.png', '.pdf')))
if not os.path.exists(impathlink):
impathlink = impath
div_str += """<div class="col-lg-{0}">
<div class="thumbnail">
<a href="{1}">
<img src="{2}" width="{3}px" height="{4}px">
</a>
</div>
</div>""".format(4, impathlink, impath, 400, 400)
div_str += "</div>"
sedm_dict['sci_data'] += div_str
return sedm_dict
def get_rc_products(obsdate=None, product=None, user_id=None, camera_type='rc'):
"""
:param obsdate:
:param product:
:param user_id:
:param camera_type:
:return:
"""
if user_id:
pass
if camera_type:
pass
# print(product, 'product')
raw_png_dir = ['acquisition', 'bias', 'dome', 'focus',
'guider_images', 'guider_movies', 'twilight',
'science_raw']
sedm_dict = {}
if not obsdate:
obsdate = datetime.datetime.utcnow().strftime("%Y%m%d")
sedm_dict['obsdate'] = obsdate
elif isinstance(obsdate, list):
obsdate = obsdate[0]
sedm_dict['obsdate'] = obsdate
if not product:
product = 'science'
display_dict = {}
ext = '*.png'
sci_path = None
if product.lower() == 'science':
# print(new_phot_dir, obsdate)
sci_path = os.path.join(new_phot_dir, obsdate, 'reduced', 'png')
if not os.path.exists(sci_path):
# print("Path doesn't exist", sci_path)
sedm_dict['data'] = "No %s images found" % product
elif product.lower() == 'acquisition':
sci_path = os.path.join(new_phot_dir, obsdate, 'reduced', 'png')
if not os.path.exists(sci_path):
# print("Path doesn't exist", sci_path)
sedm_dict['data'] = "No %s images found" % product
elif product.lower() in raw_png_dir:
# print(new_phot_dir, obsdate)
if 'guider' in product.lower():
p_split = product.split("_")
if p_split[-1] == 'movies':
ext = '*.gif'
product = 'guider'
sci_path = os.path.join(new_phot_dir, obsdate,
'pngraw', product.lower().replace('_raw', ''))
# print(sci_path, "Science path in alt")
if not os.path.exists(sci_path):
print("Path doesn't exist")
# print("Looking in directory:", sci_path)
find_path = os.path.join(sci_path, ext)
# print(find_path, 'find_path')
files = glob.glob(find_path)
# print("Files found", files)
for file in files:
base_name = os.path.basename(file).replace(".png", "")
if product.lower() == 'science' and 'ACQ' in base_name:
continue
elif product.lower() == 'science':
filters = base_name.split("_")
if filters[-1] == "0":
objfilt = filters[-3]
imgfilt = filters[-2]
else:
objfilt = filters[-2]
imgfilt = filters[-1]
if objfilt == imgfilt:
if 'data' in display_dict:
display_dict['data'].append(file)
else:
display_dict['data'] = [file]
elif product.lower() == 'acquisition':
if 'ACQ' not in base_name:
continue
elif "_r_r" not in base_name and "_NA_r" not in base_name:
continue
else:
if 'data' in display_dict:
display_dict['data'].append(file)
else:
display_dict['data'] = [file]
else:
if 'data' in display_dict:
display_dict['data'].append(file)
else:
display_dict['data'] = [file]
div_str = ''
if user_id == 2: # SEDM_admin
obsdir = os.path.join(new_phot_dir, obsdate)
if os.path.exists(os.path.join(obsdir, 'rcwhat.txt')):
wha_report = """<a href="http://pharos.caltech.edu/data_r/redux/phot/{0}/rcwhat.txt" type="plain/text">RCWhat</a>""".format(obsdate)
div_str += """<div class="row">"""
div_str += """<h4>{0}</h4>""".format(wha_report)
div_str += "</div>"
if 'data' in display_dict:
count = 100
for fil in sorted(display_dict['data']):
# fil = fil.replace(base_dir, '')
impath = "/data_r/%s" % fil.replace(base_dir, '')
if 'reduced' in fil:
fits_suffix = '.fits'
if os.path.exists(fil.replace('/png', '').replace('.png',
'.fits.gz')):
fits_suffix = '.fits.gz'
fil = fil.replace(base_dir, '')
impathlink = "/data_r/%s" % fil.replace('/png/', '/').replace(
'.png', fits_suffix)
elif 'pngraw' in fil and '.gif' not in fil:
base_link = fil.replace(base_dir, '').split('/pngraw/')[0]
fits_suffix = '.fits'
png_suffix = '_all.png'
if 'Bias' in fil or 'Flat' in fil:
png_suffix = '.png'
if os.path.exists(
os.path.join(
new_phot_dir, obsdate,
os.path.basename(fil).replace(png_suffix,
'.fits.gz'))):
fits_suffix = '.fits.gz'
fil = fil.replace(base_dir, '')
impathlink = "/data_r/%s" % \
os.path.join(base_link,
os.path.basename(fil).replace(
png_suffix, fits_suffix))
else:
impathlink = "/data_r/%s" % fil.replace(base_dir, '')
div_str += """<div class="col-sm-4"><div class="card">
<a href="{1}?image={4}" data-toggle="lightbox" data-gallery="example-gallery">
<img style="width:300px" class="card-img-top" src="{1}?image{4}" alt="Card image">
</a>
<div class="cardbody">
<h6 class="card-title">{2}</h6>
<a href="http://pharos.caltech.edu{0}" class="btn btn-primary">
Download
</a>
</div>
</div></div>""".format(impathlink, impath,
os.path.basename(fil), impath,
count)
count += 1
div_str += ''
sedm_dict['data'] = div_str
else:
sedm_dict['data'] = "No %s images found" % product
# print(sedm_dict)
return sedm_dict
###############################################################################
# THIS SECTION HANDLES THE ACTIVE_VISIBILITIES PAGE. #
# KEYWORD:VISIBILITIES #??? #
###############################################################################
def get_pending_visibility(userid):
sedm_dict = {'enddate': datetime.datetime.utcnow() +
datetime.timedelta(days=1),
'inidate': datetime.datetime.utcnow() -
datetime.timedelta(days=3, hours=8)}
# 1. Get a dataframe of all requests for the current user
reqs = get_requests_for_user(userid, sedm_dict['inidate'],
sedm_dict['enddate'])
# organize requests into dataframes by whether they are pending or not
pending = reqs[(reqs['status'] == 'PENDING')]
# retrieve information about the user's allocations
# ac = get_allocations_user(userid)
# Create html tables
sedm_dict['pending'] = {'table': fancy_request_table(pending),
'title': 'Pending Requests'}
sedm_dict['script'], sedm_dict['div'] = plot_visibility(userid, sedm_dict)
return sedm_dict
def plot_visibility(userid, sedm_dict, obsdate=None):
"""
plots visibilities for pending/active requests at the current date.
Will be adapted to plot previous observations and arbitrary objects userid:
user whose allocations will be shown in color with details. Others will be
greyed out
userid: <int>
sedm_dict: <dict>
should have ['active']['table'] and ['enddate'] and ['inidate']
obsdate: <str> YYYYMMDD
if "None", will use current date
returns: components of a bokeh figure with the appropriate plot
"""
allocpalette = ['#1f78b4', '#33a02c', '#e31a1c', '#ff7f00', '#6a3d9a',
'#b15928', '#a6cee3', '#b2df8a', '#fb9a99', '#fdbf6f',
'#cab2d6', '#ffff99']
reqs = get_requests_for_user(2,
sedm_dict['inidate'],
sedm_dict['enddate']) # admin
active = reqs[(reqs['status'] == 'PENDING') | (reqs['status'] == 'ACTIVE')]
# ['allocation', 'object', 'RA', 'DEC', 'start date', 'end date',
# 'priority', 'status', 'lastmodified', 'obs_seq', 'exptime', 'UPDATE']
allowed_allocs = get_allocations_user(userid)
active['allocation'].mask(~np.in1d(active['allocation'],
allowed_allocs['allocation']),
other='other', inplace=True)
programs = {i['allocation']: i['program']
for _, i in allowed_allocs.iterrows()}
programs['other'] = 'other'
# this needs to be alphabetical for the legend to look correct
active.sort_values('allocation')
p = figure(plot_width=700, plot_height=500, toolbar_location='above',
y_range=(0, 90), y_axis_location="right")
# setup with axes, sun/moon, frames, background
# TODO Dima says to never ever use SkyCoord in production code
palomar_mountain = EarthLocation(lon=243.1361*u.deg, lat=33.3558*u.deg,
height=1712*u.m)
utcoffset = -7 * u.hour # Pacific Daylight Time
# plotting a single object, or the pending objects in future
if obsdate is None:
time = (Time.now() - utcoffset).datetime # date is based on local time
time = Time(datetime.datetime(time.year, time.month, time.day))
else: # past observations on a particular night
time = Time(datetime.datetime(int(obsdate[:4]), int(obsdate[4:6]),
int(obsdate[6:8])))
# all_requests = reqs[reqs['status'] == 'COMPLETED']
# all_requests = all_requests[time - 12 * u.hour
# <= all_requests['startdate']
# < time + 12 * u.hour]
midnight = time - utcoffset # 7am local time of correct date, midnight UTC
delta_midnight = np.linspace(-8, 8, 500) * u.hour
t = midnight + delta_midnight
abstimes = np.asarray([i.datetime.strftime('%I:%M %p')
for i in t + utcoffset])
frame = AltAz(obstime=t, location=palomar_mountain)
sun_alt = get_sun(t).transform_to(frame).alt
moon_alt = get_moon(t).transform_to(frame).alt
# shading for nighttime and twilight
dark_times = delta_midnight[sun_alt < 0].value
twilit_times = delta_midnight[sun_alt < -18 * u.deg].value
plotted_times = delta_midnight[sun_alt < 5 * u.deg].value
twilight = BoxAnnotation(left=min(twilit_times), right=max(twilit_times),
bottom=0, fill_alpha=0.15, fill_color='black',
level='underlay')
night = BoxAnnotation(left=min(dark_times), right=max(dark_times),
bottom=0, fill_alpha=0.25, fill_color='black',
level='underlay')
earth = BoxAnnotation(top=0, fill_alpha=0.8, fill_color='sienna')
p.add_layout(night)
p.add_layout(twilight)
p.add_layout(earth)
# sun
# p.line(delta_midnight, sun_alt, line_color='red', name="Sun",
# legend='Sun', line_dash='dashed')
# moon
p.line(delta_midnight, moon_alt, line_color='yellow', line_dash='dashed',
name="Moon", legend='Moon')
# labels and axes
p.title.text = "Visibility for %s UTC" % midnight
p.xaxis.axis_label = "Hours from PDT Midnight"
p.x_range.start = min(plotted_times)
p.x_range.end = max(plotted_times)
p.yaxis.axis_label = "Airmass"
# primary airmass label on right
airmasses = (1.01, 1.1, 1.25, 1.5, 2., 3., 6.)
ticker = [90 - np.arccos(1./i) * 180/np.pi for i in airmasses]
p.yaxis.ticker = ticker
p.yaxis.major_label_overrides = {tick: str(airmasses[i])
for i, tick in enumerate(ticker)}
# add supplementary alt label on left
p.extra_y_ranges = {"altitude": Range1d(0, 90)}
p.add_layout(LinearAxis(y_range_name="altitude",
axis_label='Altitude [deg]'), 'left')
##########################################################################
# adding data from the actual objects
# objs = SkyCoord(np.array(ras, dtype=np.float),
# np.array(decs, dtype=np.float), unit="deg")
approx_midnight = int(Time.now().jd - .5) + .5 - utcoffset.value/24.
palo_sin_lat = 0.549836545
palo_cos_lat = 0.835272275
palo_long = 243.1362
alloc_color = {}
for i, val in allowed_allocs.iterrows():
alloc_color[val['allocation']] = allocpalette[i % len(allocpalette)]
alloc_color['other'] = 'lightgray'
tooltipped = [] # things with tooltips
# make it #name when we get to bokeh 0.13
tooltips = [('obj', '@name'),
('time', '@abstime'),
('altitude', u"@alt\N{DEGREE SIGN}"),
('airmass', '@airmass')]
for _, req in active.iterrows():
# iterrows doesn't preserve datatypes and turns ra, dec into decimals?
req['ra'] = float(req['RA'])
req['dec'] = float(req['DEC'])
color = alloc_color[req['allocation']]
# vvv I got this formula from some website for the navy
# but forgot to copy the url
alt = 180 / np.pi * np.arcsin(palo_cos_lat *
np.cos(np.pi/180 *
(palo_long - req['ra'] + 15 *
(18.697374558 + 24.06570982 *
(delta_midnight.value/24. +
approx_midnight - 2451545)))) *
np.cos(req['dec'] * np.pi/180) +
palo_sin_lat * np.sin(req['dec'] *
np.pi/180))
airmass = 1./np.cos((90 - alt) * np.pi/180)
source = ColumnDataSource(dict(times=delta_midnight, alt=alt,
airmass=airmass, abstime=abstimes,
priority=np.full(len(t),
int(req['priority'])),
alloc=np.full(len(t),
req['allocation'][6:]),
name=np.full(len(abstimes),
req['object'])))
# delete the name when we get to bokeh 0.13
if len(active) == 1: # single object
legend = req['object']
line_width = 5
else:
legend = '{}'.format(programs[req['allocation']])
# tooltips += [('priority', '@priority'),
# ('allocation', '@alloc')]
# plot that highlights observed part of the night
if req['status'] == 'COMPLETED':
# full path of the night
dotted = p.line('times', 'alt', color=color, source=source,
line_dash='2 2', name=req['object'],
line_width=1, legend=legend)
# manually crop the source so only thick observed
# part has tooltips
endtime = req['lastmodified']
# TODO sometimes it's 2ifu or no ifu
exptime = {req['obs_seq'][i]: req['exptime'][i]
for i in range(len(req['obs_seq']))}['1ifu']
initime = endtime - exptime * u.second
mask = np.logical_and(
delta_midnight + midnight + utcoffset > initime,
delta_midnight + midnight + utcoffset < endtime)
source = ColumnDataSource(pd.DataFrame(source.data)[mask])
# all it changes is the line width
line_width = int(req['priority'] + 3)
else:
line_width = int(req['priority'])
path = p.line('times', 'alt', color=color, source=source,
name=''.format(req['object']),
line_width=line_width, legend=legend)
if not req['allocation'] == 'other':
tooltipped.append(path)
p.legend.click_policy = 'hide'
p.legend.location = 'bottom_right'
p.add_tools(HoverTool(renderers=tooltipped, tooltips=tooltips))
curdoc().add_root(p)
curdoc().title = 'Visibility plot'
return components(p)
###############################################################################
# THIS SECTION IS THE WEATHER STATS SECTION. #
# KEYWORD:WEATHER_STATS #
###############################################################################
def get_weather_stats(obsdate=None):
message = ""
if not obsdate:
# get the weather stats
statsfile, mydate = search_stats_file()
if statsfile is not None and mydate is not None:
stats_plot = plot_stats(statsfile, mydate)
if stats_plot is None:
message += " No statistics log found up to 100 days prior to" \
" today... Weather has been terrible lately!"
script, div = None, None
else:
message += " Weather statistics for last opened day: %s" % (
os.path.basename(os.path.dirname(os.path.dirname(
statsfile))))
script, div = components(stats_plot)
else:
script, div = None, None
else:
mydate_in = obsdate.replace("-", "")
# Just making sure that we have only allowed digits in the date
mydate = re.findall(r"(2\d{3}[0-1]\d[0-3]\d)", mydate_in)
if len(mydate) == 0:
message += "Incorrect format for the date! Your input is: %s." \
" Shall be YYYYMMDD. \n" % mydate_in
script, div = "", ""
else:
mydate = mydate[0]
message = ""
statsfile, mydate_out = search_stats_file(mydate)
stats_plot = plot_stats(statsfile, mydate)
if not statsfile:
message = message + "No statistics log found for the date %s." \
" Showing P18 data." % mydate
script, div = components(stats_plot)
else:
stats_plot = plot_stats(statsfile, mydate)
message = message + "Weather statistics for selected day: %s"\
% mydate
script, div = components(stats_plot)
return {'script': script, 'div': div, 'message': message}
def search_stats_file(mydate=None):
"""
Returns the last stats file that is present in the system according to
the present date. It also returns a message stating what date that was.
"""
# If the date is specified, we will try to locate the right file.
# None will be returned if it does not exist.
if mydate:
s = os.path.join(phot_dir, mydate, "stats/stats.log")
if os.path.exists(s):
if os.path.getsize(s) > 0:
return s, mydate
else:
return None, None
else:
s = os.path.join(new_phot_dir, mydate, "stats/stats.log")
if os.path.exists(s):
if os.path.getsize(s) > 0:
return s, mydate
else:
return None, None
return None, None
else:
curdate = datetime.datetime.utcnow()
# Try to find the stat files up to 100 days before today's date.
i = 0
while i < 100:
newdate = curdate
newdatedir = "%d%02d%02d" % (newdate.year, newdate.month,
newdate.day)
s = os.path.join(phot_dir, newdatedir, "stats/stats.log")
s_new = os.path.join(new_phot_dir, newdatedir, "stats/stats.log")
if os.path.exists(s):
if os.path.getsize(s) > 0:
return s, newdatedir
# else:
# return None, None
elif os.path.exists(s_new):
if os.path.getsize(s_new) > 0:
return s_new, newdatedir
# else:
# return None, None
i = i + 1
curdate -= datetime.timedelta(days=1)
return None, None
def load_p48seeing(obsdate):
obtime, seeing = get_p18obsdata(obsdate)
local_date = np.array(obtime)
d = pd.DataFrame({'date': local_date, 'seeing': seeing})
return d
def load_stats(statsfile='stats.log'):
data = pd.read_csv(statsfile, header=None,
names=['path', 'obj', 'jd', 'ns', 'fwhm', 'ellipticity',
'bkg', 'airmass', 'in_temp', 'imtype', 'out_temp',
'in_hum'])
jds = data['jd']
t = Time(jds, format='jd', scale='utc')
date = t.utc.datetime
day_frac_diff = datetime.timedelta(
np.ceil((datetime.datetime.now() -
datetime.datetime.utcnow()).total_seconds()) / 3600 / 24)
local_date = date + day_frac_diff
data2 = data.assign(localdate=local_date)
data2.set_index('localdate')
return pd.DataFrame(
{'date': data2['localdate'], 'ns': data2['ns'], 'fwhm': data2['fwhm'],
'ellipticity': data2['ellipticity'], 'bkg': data2['bkg'],
'airmass': data2['airmass'], 'in_temp': data2['in_temp'],
'imtype': data2['imtype'], 'out_temp': data2['out_temp'],
'in_hum': data2['in_hum']})
def plot_stats(statsfile, mydate):
source = ColumnDataSource(
data=dict(date=[], ns=[], fwhm=[], ellipticity=[], bkg=[], airmass=[],
in_temp=[], imtype=[], out_temp=[], in_hum=[]))
source_static = ColumnDataSource(
data=dict(date=[], ns=[], fwhm=[], ellipticity=[], bkg=[], airmass=[],
in_temp=[], imtype=[], out_temp=[], in_hum=[]))
view_science = CDSView(source=source,
filters=[GroupFilter(column_name='imtype',
group='SCIENCE')])
view_acquisition = CDSView(source=source,
filters=[GroupFilter(column_name='imtype',
group='ACQUISITION')])
view_guider = CDSView(source=source,
filters=[GroupFilter(column_name='imtype',
group='GUIDER')])
view_focus = CDSView(source=source,
filters=[GroupFilter(column_name='imtype',
group='FOCUS')])
source_p48 = ColumnDataSource(data=dict(date=[], seeing=[]))
def update(selected=None):
if selected:
pass
if statsfile:
data = load_stats(statsfile)
source.data = source.from_df(data[['date', 'ns', 'fwhm',
'ellipticity', 'bkg', 'airmass',
'in_temp', 'imtype', 'out_temp',
'in_hum']])
source_static.data = source.data
p48 = load_p48seeing(mydate)
source_p48.data = source_p48.from_df(p48[['date', 'seeing']])
source_static_p48.data = source_p48.data
source_static_p48 = ColumnDataSource(data=dict(date=[], seeing=[]))
tools = 'pan,box_zoom,reset'
p48seeing = figure(plot_width=425, plot_height=250, tools=tools,
x_axis_type='datetime', active_drag="box_zoom")
p48seeing.circle('date', 'seeing', source=source_static_p48, color="black")
p48seeing.title.text = "P18 seeing [arcsec]"
if statsfile:
ns = figure(plot_width=425, plot_height=250, tools=tools,
x_axis_type='datetime', active_drag="box_zoom")
ns.line('date', 'ns', source=source_static)
ns.circle('date', 'ns', size=1, source=source, color=None,
selection_color="orange")
ns.title.text = "Number of bright sources extracted"
bkg = figure(plot_width=425, plot_height=250, tools=tools,
x_axis_type='datetime', active_drag="box_zoom")
bkg.x_range = ns.x_range
bkg.line('date', 'bkg', source=source_static)
bkg.circle('date', 'bkg', size=1, source=source, color=None,
selection_color="orange")
bkg.title.text = "Background (counts)"
temp = figure(plot_width=425, plot_height=250, tools=tools,
x_axis_type='datetime', active_drag="box_zoom")
temp.x_range = ns.x_range
temp.line('date', 'in_temp', source=source_static, color='blue',
legend="Inside")
temp.line('date', 'out_temp', source=source_static, color='green',
legend="Outside")
temp.circle('date', 'in_temp', size=1, source=source, color=None,
selection_color="orange")
temp.title.text = "Temperature [C]"
fwhm = figure(plot_width=425, plot_height=250, tools=tools,
x_axis_type='datetime', active_drag="box_zoom")
fwhm.x_range = ns.x_range
fwhm.circle('date', 'fwhm', source=source_static, color="green",
legend="Focus", view=view_focus)
fwhm.circle('date', 'fwhm', source=source_static, color="red",
legend="Science", view=view_science)
fwhm.circle('date', 'fwhm', source=source_static, color="blue",
legend="Acquisition", view=view_acquisition)
fwhm.circle('date', 'fwhm', source=source_static, color="black",
legend="Guider", view=view_guider)
fwhm.circle('date', 'fwhm', size=1, source=source, color=None,
selection_color="orange")
fwhm.title.text = "P60 FWHM [arcsec]"
airmass = figure(plot_width=425, plot_height=250, tools=tools,
x_axis_type='datetime', active_drag="box_zoom")
airmass.x_range = ns.x_range
airmass.line('date', 'airmass', source=source_static)
airmass.circle('date', 'airmass', size=1, source=source, color=None,
selection_color="orange")
airmass.title.text = "Airmass"
ellipticity = figure(plot_width=425, plot_height=250, tools=tools,
x_axis_type='datetime',
active_drag="box_zoom")
ellipticity.x_range = ns.x_range
ellipticity.line('date', 'ellipticity', source=source_static)
ellipticity.circle('date', 'ellipticity', size=1, source=source,
color=None, selection_color="orange")
ellipticity.title.text = "Ellipticity"
humidity = figure(plot_width=425, plot_height=250, tools=tools,
x_axis_type='datetime', active_drag="box_zoom")
humidity.x_range = ns.x_range
humidity.line('date', 'in_hum', source=source_static)
humidity.circle('date', 'in_hum', size=1, source=source, color=None,
selection_color="orange")
humidity.title.text = "Inside Humidity [%]"
p48seeing.x_range = ns.x_range
left = column(fwhm, p48seeing, airmass)
center = column(ellipticity, ns, bkg, )
right = column(temp, humidity)
layout = row(left, center, right)
else:
layout = row(column(p48seeing))
# initialize
update()
curdoc().add_root(layout)
curdoc().title = "Stats"
return layout
def plot_not_found_message(day):
not_found = figure(plot_width=900, plot_height=450, x_range=[0, 900],
y_range=[0, 450])
not_found.image(image=[np.zeros([900, 450]) + 0.1], x=0, y=0, dw=900,
dh=450)
citation = Label(x=50, y=225, x_units='screen', y_units='screen',
text='No statistics found for today \n '
'(likely we were weathered out...)')
not_found.add_layout(citation)
not_found.title.text = "Statistics not found for day %s" % day
layout = column(not_found)
curdoc().add_root(layout)
curdoc().title = "Stats not found"
###############################################################################
# THIS SECTION IS A COMMON UTILITIES SECTION #
# KEYWORD:UTILITIES #
###############################################################################
def get_config_paths():
return dict(path={
'path_archive': redux_dir,
'path_phot': phot_dir,
'path_redux_phot': new_phot_dir,
'path_raw': raw_dir,
'path_requests': requests_dir})
def get_marshal_id(marshal='growth', request_id=None):
"""
:param marshal:
:param request_id:
:return:
"""
try:
request_id = int(request_id)
except Exception as e:
return {'error': str(e)}
ret = db.get_from_request(values=['marshal_id', 'external_id'],
where_dict={'id': request_id})
if not ret:
return {'error': "No object found with that id number"}
if marshal == 'growth':
ret = make_dict_from_dbget(['marshal_id', 'external_id'], ret[0])
if isinstance(ret['marshal_id'], int) and ret['marshal_id'] <= 100:
return {'error': "Request is not a valid growth marshal request"}
elif isinstance(ret['marshal_id'], str):
return {'error': ret['marshal_id']}
elif not ret['marshal_id']:
return {'error': ret['marshal_id']}
elif ret['external_id'] == 2:
return {'error': "Not a growth request"}
else:
return ret
def get_user_observations(username, password, obsdate):
"""
:param username:
:param password:
:param obsdate:
:return:
"""
# print(username, type(username))
ret = check_login(username, password)
# print(ret)
if not ret[0]:
return {'message': "User name and password do not match"}
user_id = ret[1]
obsdir = os.path.join(redux_dir, obsdate)
obsdir += '/'
calib_files = ['Xe.fits', 'Hg.fits', 'Cd.fits', 'dome.fits',
'bkgd_dome.fits', 'e3d_dome.fits', '%s_Flat.fits' % obsdate]
pkl_list = (glob.glob('%s*.pkl' % obsdir))
master_calib_list = []
for file in calib_files:
if os.path.exists(os.path.join(obsdir, file)):
master_calib_list.append(os.path.join(obsdir, file))
master_calib_list += pkl_list
# print(master_calib_list, 'master')
# Look first to make sure there is a data directory.
if not obsdate:
return {'message': 'No obsdate given in json request'}
if not os.path.exists(obsdir):
return {'message': 'No data directory could be located for %s UT' %
os.path.basename(os.path.normpath(obsdir)),
'obsdate': obsdate}
# sedm_dict = {'obsdate': obsdate, 'sci_data': ''}
# Now lets get the non-science products (i.e. calibrations)
calib_dict = {'flat3d': os.path.join(obsdir, '%s_flat3d.png' % obsdate),
'wavesolution': os.path.join(obsdir,
'%s_wavesolution'
'_dispersionmap.png' % obsdate),
'cube_lambdarms': os.path.join(obsdir, 'cube_lambdarms.png'),
'cube_trace_sigma': os.path.join(obsdir,
'cube_trace_sigma.png')}
# If a calibration frame doesn't exist then pop it out to avoid bad links
# on the page
remove_list = []
data_list = []
for k, v in calib_dict.items():
if not os.path.exists(v):
remove_list.append(k)
if remove_list:
for i in remove_list:
calib_dict.pop(i)
# print(calib_dict, 'calib products')
for v in master_calib_list:
impath = "/data/%s/%s" % (obsdate, os.path.basename(v))
data_list.append(impath)
for k, v in calib_dict.items():
impath = "/data/%s/%s" % (obsdate, os.path.basename(v))
impathlink = "/data/%s/%s" % (obsdate,
os.path.basename(v.replace('.png',
'.pdf')))
if not os.path.exists(impathlink):
impathlink = impath
data_list.append(impathlink)
# To get ifu products we first look to see if a what.list file has been
# created. This way we will know which files to add to our dict and
# whether the user has permissions to see the file
if not os.path.exists(os.path.join(obsdir, 'what.list')):
return {'message': 'Could not find summary file (what.list) for %s UT' %
os.path.basename(os.path.normpath(obsdir))}
# Go throught the what list and return all non-calibration entries
with open(os.path.join(obsdir, 'what.list')) as f:
what_list = f.read().splitlines()
science_list = []
standard_list = []
for targ in what_list:
if 'Calib' in targ:
pass
elif '[A]' in targ or '[B]' in targ or 'STD' in targ:
science_list.append(targ)
elif 'STD' in targ:
standard_list.append(targ)
else:
# There shouldn't be anything here but should put something in
# later to verify this is the case
pass
# Now we go through and make sure the user is allowed to see this target
show_list = []
if len(science_list) >= 1:
allocation_id_list = get_allocations_user(user_id=user_id,
return_type='list')
for sci_targ in science_list:
# Start by pulling up all request that match the science target
targ_name = sci_targ.split(':')[1].split()[0]
if user_id == 2:
show_list.append((sci_targ, targ_name))
continue
if 'STD' not in targ_name:
# 1. Get the object id
object_ids = db.get_object_id_from_name(targ_name)
object_id = None
if len(object_ids) == 1:
object_id = object_ids[0][0]
elif len(object_ids) > 1:
# TODO what really needs to happen here is that we need to
# TODO find the id that is closest to the obsdate.
# TODO For now I am just going to use last added
# print(object_ids)
object_id = object_ids[-1][0]
elif not object_ids and ('at' in targ_name.lower()
or 'sn' in targ_name.lower()):
# sometimes it's at 2018abc not at2018abc in the db
targ_name = targ_name[:2] + ' ' + targ_name[2:]
object_ids = db.get_object_id_from_name(targ_name)
try:
object_id = object_ids[-1][0]
except IndexError:
print("There was an error. You can't see this")
target_requests = db.get_from_request(
values=['allocation_id'],
where_dict={'object_id': object_id, 'status': 'COMPLETED'})
if not target_requests:
target_requests = db.get_from_request(
values=['allocation_id'],
where_dict={'object_id': object_id,
'status': 'OBSERVED'})
# Right now I am only seeing if there exists a match between
# allocations of all request. It's possible the request could
# have been made by another group as another follow-up and thus
# the user shouldn't be able to see it. This should be able to
# be fixed once all request are listed in the headers of the
# science images.
for req in target_requests:
if req[0] in allocation_id_list:
show_list.append((sci_targ, targ_name))
else:
print("You can't see this at target request")
else:
targ_name = sci_targ.split(':')[1].split()[0].replace('STD-',
'')
show_list.append((sci_targ, targ_name))
if len(standard_list) >= 1:
for std_targ in standard_list:
targ_name = std_targ.split(':')[1].split()[0].replace('STD-', '')
show_list.append((std_targ, targ_name))
# We have our list of targets that we can be shown, now lets actually find
# the files that we will show on the web page. To make this backwards
# compatible I have to look for two types of files
# print(show_list, "Show list")
if len(show_list) >= 1:
science_dict = {}
count = 0
# div_str = ''
for targ in show_list:
# print(targ)
targ_params = targ[0].split()
fits_file = targ_params[0].replace('.fits', '')
name = targ[1]
# print(obsdir, fits_file)
# print('%s%s_SEDM.png' % (obsdir, name))
# print('%sspec_forcepsf*%s*.png' % (obsdir,fits_file))
# print('%sspec_auto*%s*.png' % (obsdir, fits_file))
image_list = (glob.glob('%sifu_spaxels_*%s*.png' % (obsdir,
fits_file)) +
glob.glob('%simage_%s*.png' % (obsdir, name)))
spec_list = (glob.glob('%s%s_SEDM.png' % (obsdir, name)) +
glob.glob('%sspec_forcepsf*%s*.png' % (obsdir,
fits_file)) +
glob.glob('%sspec_auto*%s*.png' % (obsdir, fits_file)))
spec_all_list = glob.glob("%sspec*%s*" % (obsdir, name))
e3d_list = (glob.glob('%se3d*%s*.fits' % (obsdir, fits_file)))
spec_ascii_list = (glob.glob('%sspec_forcepsf*%s*.txt'
% (obsdir, fits_file)) +
glob.glob('%sspec_auto*%s*.txt' % (obsdir,
fits_file)))
fluxcals = (glob.glob('%sfluxcal_*%s*.fits' % (obsdir, fits_file)))
background = (glob.glob('%sbkgd_crr_b_%s.fits' % (obsdir,
fits_file)))
astrom_list = (glob.glob('%sguider_crr_b_%s_astrom.fits'
% (obsdir, fits_file)))
if name not in science_dict:
science_dict[name] = {'image_list': image_list,
'spec_list': spec_list,
'e3d_list': e3d_list,
'spec_ascii_list': spec_ascii_list,
'fluxcals': fluxcals,
'specall': spec_all_list,
'background': background,
'astrom': astrom_list}
else:
# We do this to handle cases where there are two or more of
# the same object name
science_dict[name+'_xRx_%s' % str(count)] = {
'image_list': image_list, 'spec_list': spec_list,
'e3d_list': e3d_list, 'spec_ascii_list': spec_ascii_list,
'fluxcals': fluxcals, 'specall': spec_all_list,
'background': background, 'astrom': astrom_list}
count += 1
# Alright now we build the table that will show the spectra, image file
# and classification.
# count = 0
# print(science_dict)
for obj, obj_data in science_dict.items():
if '_xRx_' in obj:
obj = obj.split('_xRx_')[0]
if obj_data['e3d_list']:
for j in obj_data['specall']:
if j.split('.')[-1] in ['fits', 'png', 'txt', 'pdf']:
data_list.append("/data/%s/%s" % (obsdate,
os.path.basename(j)))
for j in obj_data['e3d_list']:
data_list.append("/data/%s/%s" % (obsdate,
os.path.basename(j)))
if obj_data['spec_ascii_list']:
for j in obj_data['spec_ascii_list']:
data_list.append("/data/%s/%s" % (obsdate,
os.path.basename(j)))
if obj_data['fluxcals']:
for j in obj_data['fluxcals']:
data_list.append("/data/%s/%s" % (obsdate,
os.path.basename(j)))
if obj_data['background']:
for j in obj_data['background']:
data_list.append("/data/%s/%s" % (obsdate,
os.path.basename(j)))
if obj_data['astrom']:
for j in obj_data['astrom']:
data_list.append("/data/%s/%s" % (obsdate,
os.path.basename(j)))
# ToDO: Grab data from somewhere to put in the meta data column
if obj_data['image_list']:
for i in obj_data['image_list']:
impath = "/data/%s/%s" % (obsdate, os.path.basename(i))
impathlink = "/data/%s/%s" % \
(obsdate, os.path.basename(i.replace('.png',
'.pdf')))
if not os.path.exists(impathlink):
impathlink = impath
data_list.append(impathlink)
# Check if finders exists in redux directory and if not then
# log at the old phot directory location
path1 = os.path.join(redux_dir, obsdate, 'finders')
path2 = os.path.join(phot_dir, obsdate, 'finders')
if os.path.exists(path1):
finder_path = path1
else:
finder_path = path2
if os.path.exists(finder_path):
finder_img = glob.glob(finder_path + '/*%s*.png' % obj)
if finder_img:
data_list.append("/data/%s/%s" %
(obsdate,
os.path.basename(finder_img[-1])))
if obj_data['spec_list']:
for i in obj_data['spec_list']:
impath = "/data/%s/%s" % (obsdate, os.path.basename(i))
impathlink = "/data/%s/%s" % \
(obsdate, os.path.basename(i.replace('.png',
'.pdf')))
if not os.path.exists(impathlink):
impathlink = impath
data_list.append(impathlink)
return_dict = {'data': data_list}
return return_dict
def get_status():
"""
:return:
"""
with open(status_dir+'telstatus.json') as json_file:
try:
data = json.load(json_file)
except json.decoder.JSONDecodeError:
print("JSON decode error, trying again")
json_file.close()
time.sleep(1)
with open(status_dir + 'telstatus.json') as json_file2:
try:
data = json.load(json_file2)
except json.decoder.JSONDecodeError:
print("JSON decode error")
data = {}
try:
rc_start_time = datetime.datetime.strptime(data['rc_LastStartTime'],
'%Y-%m-%d %H:%M:%S.%f')
rc_end_time = rc_start_time + datetime.timedelta(
seconds=float(data['rc_ExposureTime']))
data['rc_EndExposureTime'] = rc_end_time.strftime("%Y-%m-%d %H:%M:%S")
data['rc_LastStartTime'] = rc_start_time.strftime("%Y-%m-%d %H:%M:%S")
except:
data['rc_EndExposureTime'] = "NA"
data['rc_LastStartTime'] = "NA"
try:
ifu_start_time = datetime.datetime.strptime(data['ifu_LastStartTime'],
'%Y-%m-%d %H:%M:%S.%f')
ifu_end_time = ifu_start_time + datetime.timedelta(
seconds=float(data['ifu_ExposureTime']))
data['ifu_EndExposureTime'] = ifu_end_time.strftime("%Y-%m-%d %H:%M:%S")
data['ifu_LastStartTime'] = ifu_start_time.strftime("%Y-%m-%d %H:%M:%S")
except:
data['ifu_EndExposureTime'] = "NA"
data['ifu_LastStartTime'] = "NA"
# print("Last IFU exp start time: %s" % data['ifu_LastStartTime'])
return data
def get_obstimes():
times = schedule.get_observing_times(return_type='json')
times['sciTime'] = '#'
return times
def make_dict_from_dbget(headers, data, decimal_to_float=True):
"""
This function takes data from the returns of get_from_* returns and puts
it in a dictionary form
:param decimal_to_float:
:param headers: list of db header names
:param data: tuples
:return:
"""
if len(headers) != len(data):
return {'error': 'headers and data are not of equal lengths'}
return_dict = {}
for i in range(len(headers)):
if decimal_to_float and isinstance(data[i], Decimal):
return_dict[headers[i]] = float(data[i])
else:
return_dict[headers[i]] = data[i]
return return_dict
def get_filter_exptime(obsfilter, mag):
"""
:param obsfilter:
:param mag:
:return:
"""
mag = float(mag)
if mag > 18:
ifu_exptime = 2250
r_exptime = 180
g_exptime = 180
i_exptime = 180
u_exptime = 300
elif 15 < mag < 18:
ifu_exptime = 1800
r_exptime = 120
g_exptime = 120
i_exptime = 120
u_exptime = 300
elif 13 < mag < 15:
ifu_exptime = 1200
r_exptime = 1
g_exptime = 1
i_exptime = 1
u_exptime = 30
elif 11 < mag < 13:
ifu_exptime = 90
r_exptime = 10
g_exptime = 10
i_exptime = 10
u_exptime = 60
elif 10 < mag < 12:
ifu_exptime = 300
r_exptime = 30
g_exptime = 30
i_exptime = 30
u_exptime = 60
elif 12 < mag < 13:
ifu_exptime = 600
r_exptime = 60
g_exptime = 60
i_exptime = 60
u_exptime = 120
elif 13 < mag < 15:
ifu_exptime = 900
r_exptime = 90
g_exptime = 90
i_exptime = 90
u_exptime = 180
else:
ifu_exptime = 1800
r_exptime = 90
g_exptime = 90
i_exptime = 90
u_exptime = 90
if obsfilter == 'ifu':
return str(ifu_exptime)
elif obsfilter == 'r':
return str(r_exptime)
elif obsfilter == 'g':
return str(g_exptime)
elif obsfilter == 'i':
return str(i_exptime)
elif obsfilter == 'u':
return str(u_exptime)
else:
return str(0)
def get_p18obsdata(obsdate):
"""
:param obsdate: Must be in "Year-Month-Day" or "YYYYMMDD" format
:return: List of dates and average seeing
"""
# 1. Create the URL to get the seeing for the requested night
p18date = []
p18seeing = []
if not obsdate:
f = datetime.datetime.strptime(obsdate,
"%Y%m%d") - datetime.timedelta(days=1)
obsd = datetime.datetime.strptime(obsdate, "%Y%m%d")
elif "-" in obsdate:
f = datetime.datetime.strptime(obsdate,
"%Y-%m-%d") - datetime.timedelta(days=1)
obsd = datetime.datetime.strptime(obsdate, "%Y-%m-%d")
else:
f = datetime.datetime.strptime(obsdate,
"%Y%m%d") - datetime.timedelta(days=1)
obsd = datetime.datetime.strptime(obsdate, "%Y%m%d")
y, m, d = [f.strftime("%Y"), int(f.strftime("%m")), int(f.strftime("%d"))]
p18obsdate = "%s-%s-%s" % (y, m, d)
# 2. Get the data from the link
page = requests.get(
'http://nera.palomar.caltech.edu/P18_seeing/seeing_log_%s.log'
% p18obsdate)
data = page.content.decode("ISO-8859-1")
# 3. Split the page by newlines
data = data.split('\n')
# 4. Loop through the data and only use points that have
# 4 or more seeing values to average
for i in data:
try:
i = i.split()
if len(i) > 5 and int(i[5]) > 4:
d = '%s %s' % (i[1], i[0])
p18date.append(datetime.datetime.strptime(d,
"%m/%d/%Y %H:%M:%S"))
p18seeing.append(float(i[4]))
except Exception as e:
print(str(e))
obsd = obsd.replace(hour=7)
return [obsd], [0]
return p18date, p18seeing
# if __name__ == "__main__":
# x = get_ifu_products('/scr7/rsw/sedm/redux/20180827/', 189)
# print(x)
| scizen9/sedmpy | web/model.py | model.py | py | 117,058 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "pandas.options",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "pandas.set_option",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "scheduler.scheduler.ScheduleNight",
"line_number": 72,
"usage_type": "call"
},
{
"api_name"... |
18039525483 | from keras import layers,models,optimizers,losses
from keras.datasets import cifar10
import tensorflow as tf
import pandas as pd
import matplotlib.pyplot as plt
(X_train,y_train),(X_test,y_test)=cifar10.load_data()
print(X_train.shape)
print(y_train.shape)
HIDDEN_SIZE=256#要有下划线
NUM_CLASSES=10#要有下划线避免语法重叠
LEARNING_RATE=1E-3
model=models.Sequential()
model.add(layers.Conv2D(32,(3,3),activation='relu',input_shape=(32,32,3)))
model.add(layers.MaxPool2D(2,2))
model.add(layers.Conv2D(64,(3,3),activation='relu'))
model.add(layers.MaxPool2D(2,2))
model.add(layers.Flatten())
model.add(layers.Dense(HIDDEN_SIZE,activation='relu'))
model.add(layers.Dense(NUM_CLASSES,activation='softmax'))
model.compile(
optimizer=optimizers.Adam(learning_rate=LEARNING_RATE),
loss=losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy']
)
EPOCH=10
history=model.fit(X_train,y_train,epochs=EPOCH,validation_split=0.2)
#pd.DataFrame(history.history).plot(figsize=(8,5))
pd.DataFrame(history.history).plot(figsize=(8,5))
plt.grid(True)
plt.show()
result=model.evaluate(X_train,y_train,verbose=0)
print('卷积神经网络在cifar10数据集上的准确率为%.2f%%'%(result[1]*100))
print('卷积神经网络在cifar10数据集上的loss为%.2f'%(result[0]))
import numpy as np
pred=model.predict(X_test)
pred = np.argmax(pred, axis = 1)[:10]
label = np.argmax(y_test,axis = 1)[:10]
print(pred)
print(label)
model.save('model2.h5')
| westbchampion/Python_to_Kaggle | 手写卷积神经网络_test.py | 手写卷积神经网络_test.py | py | 1,540 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "keras.datasets.cifar10.load_data",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "keras.datasets.cifar10",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "keras.models.Sequential",
"line_number": 16,
"usage_type": "call"
},
{
"api... |
7210833797 | # -*- coding: utf-8 -*-
from odoo import api, models, fields, registry
import odoo
from odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT
import json
import logging
_logger = logging.getLogger(__name__)
class pos_call_log(models.Model):
_rec_name = "call_model"
_name = "pos.call.log"
_description = "Log datas of pos sessions"
min_id = fields.Integer('Min Id', required=1, index=True, readonly=1)
max_id = fields.Integer('Max Id', required=1, index=True, readonly=1)
call_domain = fields.Char('Domain', required=1, index=True, readonly=1)
call_results = fields.Char('Results', readonly=1)
call_model = fields.Char('Model', required=1, index=True, readonly=1)
call_fields = fields.Char('Fields', index=True, readonly=1)
active = fields.Boolean('Active', default=True)
write_date = fields.Datetime('Write date', readonly=1)
@api.multi
def compare_database_write_date(self, model, pos_write_date):
last_logs = self.search([('call_model', '=', model), ('write_date', '<', pos_write_date)])
if last_logs:
_logger.info('POS write date is %s' % pos_write_date)
_logger.info('Model %s write date is %s' % (model, last_logs[0].write_date))
return True
else:
return False
def covert_datetime(self, model, datas):
all_fields = self.env[model].fields_get()
version_info = odoo.release.version_info[0]
if version_info == 12:
if all_fields:
for data in datas:
for field, value in data.items():
if field == 'model':
continue
if all_fields[field] and all_fields[field]['type'] in ['date', 'datetime'] and value:
data[field] = value.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
return datas
@api.multi
def refresh_call_logs(self):
_logger.info('========================= BEGIN refresh_call_logs ========================================')
cache_database_object = self.env['pos.cache.database']
logs = self.search([])
for log in logs:
call_fields = cache_database_object.get_fields_by_model(log.call_model)
call_domain = cache_database_object.get_domain_by_model(log.call_model)
call_domain.append(['id', '>=', log.min_id])
call_domain.append(['id', '<=', log.max_id])
_logger.info('Refresh log of model: %s' % log.call_model)
_logger.info(call_domain)
_logger.info('===============================')
results = self.env[log.call_model].sudo().search_read(
call_domain,
call_fields)
version_info = odoo.release.version_info[0]
if version_info == 12:
all_fields = self.env[log.call_model].fields_get()
if all_fields:
for result in results:
for field, value in result.items():
if field == 'model':
continue
if all_fields[field] and all_fields[field]['type'] in ['date', 'datetime'] and value:
result[field] = value.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
log.write({
'call_results': json.dumps(results),
'call_fields': json.dumps(call_fields),
'call_domain': json.dumps(call_domain),
})
self.env['pos.cache.database'].search([]).unlink()
_logger.info('========================= END refresh_call_logs ========================================')
return True
| mahmohammed16881688/odoo_12 | addons/pos_retail/models/pos/pos_call_log.py | pos_call_log.py | py | 3,733 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "odoo.models.Model",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "odoo.models",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Integ... |
74936854824 | from loguru import logger
import configparser as cfg
import os
def logger_handler(msg: str, mode=2) -> None:
"""
Handles logging of messages
mode: 0 = debug, 1 = info, 2 = error (default)
"""
# construct logger
_log_constructor(mode)
# log message
if mode == 0:
logger.exception(msg)
elif mode == 1:
logger.info(msg)
else:
logger.error(msg)
def _log_constructor(mode: int) -> None:
"""
internal function to construct the logger
requires config file in the same directory!
"""
# read config file
config = cfg.ConfigParser()
config.read('config.ini')
_log_level_mapping = {0: 'DEBUG', 1: 'INFO', 2: 'ERROR'}
# define log levels as defined in config file
_config_level = {0: 'LOGGING_DEBUG', 1: 'LOGGING_INFO', 2: 'LOGGING_ERROR'}
_mode = _log_level_mapping[mode]
_cnf_lvl = _config_level[mode]
_file_name = os.path.expanduser(
config.get(_cnf_lvl, 'log_file', fallback=''))
_serialize = config.get(_cnf_lvl, 'log_serialize')
_diagnose = config.get(_cnf_lvl, 'log_diagnose')
logger.add(_file_name,
rotation=config.get(_cnf_lvl, 'log_rotate', fallback=''),
level=_mode,
format=config.get(_cnf_lvl, 'log_format', fallback=''),
compression=config.get(
_cnf_lvl, 'log_compression', fallback=''),
diagnose=eval(_diagnose),
serialize=eval(_serialize))
| Anton0Lashov/dng_extractor | _logger.py | _logger.py | py | 1,503 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "loguru.logger.exception",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "loguru.logger",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "loguru.logger.info",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "loguru.logge... |
19634743961 | """
Usage: negotiator-cli [OPTIONS] GUEST_UNIX_SOCKET
Communicate from a KVM/QEMU host system with running guest systems using a
guest agent daemon running inside the guests.
Supported options:
-c, --list-commands
List the commands that the guest exposes to its host.
-e, --execute=COMMAND
Execute the given command inside GUEST_UNIX_SOCKET. The standard output stream of
the command inside the guest is intercepted and copied to the standard
output stream on the host. If the command exits with a nonzero status code
the negotiator-host program will also exit with a nonzero status code.
-t, --timeout=SECONDS
Set the number of seconds before a remote call without a response times
out. A value of zero disables the timeout (in this case the command can
hang indefinitely). The default is 10 seconds.
-h, --help
Show this message and exit.
"""
from humanfriendly import Timer
from negotiator_common.config import DEFAULT_TIMEOUT
from negotiator_common import NegotiatorInterface
from negotiator_common.utils import TimeOut
import coloredlogs
import functools
import getopt
import logging
import os
import shlex
import socket
import sys
# Initialize a logger for this module.
logger = logging.getLogger(__name__)
class GuestChannel(NegotiatorInterface):
"""
The host side of the channel connecting KVM/QEMU hosts and guests.
This is a modificaiton of negotiator_host.GuestChannel
"""
def __init__(self, unix_socket):
if not unix_socket:
raise GuestChannelInitializationError("No UNIX socket pathname provided!")
# Connect to the UNIX socket.
logger.debug("Opening UNIX socket: %s", unix_socket)
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
logger.debug("Connecting to UNIX socket: %s", unix_socket)
self.socket.connect(unix_socket)
except Exception:
raise GuestChannelInitializationError("Guest refused connection attempt!")
logger.debug("Successfully connected to UNIX socket!")
# Initialize the super class, passing it a file like object connected
# to the character device in read/write mode.
super(GuestChannel, self).__init__(handle=self.socket.makefile(),
label="UNIX socket %s" % unix_socket)
def prepare_environment(self):
"""
Prepare environment variables for command execution on KVM/QEMU hosts.
The following environment variables are currently exposed to commands:
``$NEGOTIATOR_GUEST``
The name of the KVM/QEMU guest that invoked the command.
"""
os.environ['NEGOTIATOR_GUEST'] = self.guest_name
class GuestChannelInitializationError(Exception):
"""Exception raised by :py:class:`GuestChannel` when socket initialization fails."""
class Context(object):
"""Enables :py:func:`main()` to inject a custom timeout into partially applied actions."""
def __init__(self):
"""Initialize a context for executing commands on the host."""
self.timeout = DEFAULT_TIMEOUT
def print_commands(self, guest_unix_socket):
"""Print the commands supported by the guest."""
with TimeOut(self.timeout):
channel = GuestChannel(unix_socket=guest_unix_socket)
print('\n'.join(sorted(channel.call_remote_method('list_commands'))))
def execute_command(self, guest_unix_socket, command_line):
"""Execute a command inside the named guest."""
with TimeOut(self.timeout):
timer = Timer()
channel = GuestChannel(unix_socket=guest_unix_socket)
output = channel.call_remote_method('execute', *shlex.split(command_line), capture=True)
logger.debug("Took %s to execute remote command.", timer)
print(output.rstrip())
def main():
"""Command line interface for the ``negotiator-cli`` program."""
# Initialize logging to the terminal and system log.
coloredlogs.install(syslog=True)
# Parse the command line arguments.
actions = []
context = Context()
try:
options, arguments = getopt.getopt(sys.argv[1:], 'ce:t:h', [
'list-commands', 'execute=', 'timeout=', 'help'
])
for option, value in options:
if option in ('-c', '--list-commands'):
assert len(arguments) == 1, \
"Please provide the unix socket of a guest as the 1st and only positional argument!"
actions.append(functools.partial(context.print_commands, arguments[0]))
elif option in ('-e', '--execute'):
assert len(arguments) == 1, \
"Please provide the unix socket of a guest as the 1st and only positional argument!"
actions.append(functools.partial(context.execute_command, arguments[0], value))
elif option in ('-t', '--timeout'):
context.timeout = int(value)
elif option in ('-h', '--help'):
usage()
sys.exit(0)
if not actions:
usage()
sys.exit(0)
except Exception:
logger.exception("Failed to parse command line arguments!")
sys.exit(1)
# Execute the requested action(s).
try:
for action in actions:
action()
except Exception:
logger.exception("Caught a fatal exception! Terminating ..")
sys.exit(1)
def usage():
"""Print a user friendly usage message to the terminal."""
print(__doc__.strip())
if __name__ == "__main__":
main() | htrc/HTRC-DataCapsules | backend/tools/negotiator-cli/negotiator-cli.py | negotiator-cli.py | py | 5,639 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "negotiator_common.NegotiatorInterface",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "socket.socket",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": ... |
5644011022 | import agentpy as ap
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.patches as mpatches
import seaborn as sns
def status_stackplot(data, ax):
"""Stackplot of people's condition over time."""
x = data.index.get_level_values("t")
y = [data[var] for var in ["store", "buy", "sell"]]
color_map = {"labels": ["store", "buy", "sell"], "colors": ["blue", "orange", "green"]}
ax.stackplot(x, y, **color_map)
ax.legend()
ax.set_xlim(0, max(1, len(x) - 1))
ax.set_ylim(0, 1)
ax.set_xlabel("Time steps")
ax.set_ylabel("Percentage of population")
ax.set_title("Proportion of agents taking each action")
def cost_lineplot(data, ax):
x = data.index.get_level_values("t")[1:]
y = -data["daily_cost"][1:]
ax.plot(x, y)
# Fit a linear regression model
coeffs = np.polyfit(x, y, 1)
m = coeffs[0]
b = coeffs[1]
# Plot the regression line
ax.plot(x, m * x + b, color="black", linestyle="--")
ax.legend()
ax.set_xlim(0, max(1, len(x) - 1))
ax.set_xlabel("Time steps")
ax.set_ylabel("Daily cost (arbitrary units)")
def transfer_lineplot(data, ax):
x = data.index.get_level_values("t")[1:]
local = data["local_transfer"][1:]
grid = data["grid_transfer"][1:]
sns.set()
ax.plot(x, local, label="Local transfer")
ax.plot(x, grid, label="Grid transfer")
ax.legend()
ax.set_xlabel("Time steps")
ax.set_ylabel("Daily energy sources (arbitrary units)")
def reward_lineplot(data, ax):
x = data.index.get_level_values("t")[1:]
y = data["reward"][1:]
ax.plot(x, y)
# Fit a linear regression model
coeffs = np.polyfit(x, y, 1)
m = coeffs[0]
b = coeffs[1]
# Plot the regression line
ax.plot(x, m * x + b, color="black", linestyle="--")
ax.legend()
ax.set_xlim(0, max(1, len(x) - 1))
ax.set_xlabel("Time steps")
ax.set_ylabel("Reward (arbitrary units)")
def animation_plot(model, ax):
group_grid = model.network.attr_grid("status")
color_dict = {-1: "orange", 0: "blue", 1: "green"}
action_dict = {"buy": "orange", "sell": "green", "store": "blue"}
cmap = colors.ListedColormap([color_dict[key] for key in color_dict])
ap.gridplot(group_grid, cmap=cmap, ax=ax)
# Create legend
legend_handles = [
mpatches.Patch(color=color, label=label) for label, color in action_dict.items()
]
ax.legend(handles=legend_handles)
ax.set_title(f"Energyshed model \n Time-step: {model.t} Weather: {model.weather}")
def q_values_plot(i, q_values):
# Extract the state and action spaces from the q-values
state_space = sorted(set([key[0] for q_values in q_values for key in q_values.keys()]))
action_space = sorted(set([key[1] for q_values in q_values for key in q_values.keys()]))
q_values = q_values[i]
# Create an empty matrix to hold the q-values
q_values_matrix = np.zeros((len(state_space), len(action_space)))
for j, state in enumerate(state_space):
for k, action in enumerate(action_space):
q_values_matrix[j, k] = q_values.get((state, action), 0)
value_map = {-1: "Neg. energy bal.", 0: "Zero energy bal.", 1: "Pos. energy bal."}
state_space_labels = [
(value_map[energy], weather, store) for energy, weather, store in state_space
]
# Clear the previous plot and plot the new heat map
plt.clf()
sns.heatmap(
q_values_matrix,
annot=True,
fmt=".3g",
xticklabels=action_space,
yticklabels=state_space_labels,
norm=colors.Normalize(vmin=-50, vmax=10),
)
| jacob-evarts/energyshed-simulation | src/plots.py | plots.py | py | 3,649 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.polyfit",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "seaborn.set",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "numpy.polyfit",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "matplotlib.colors.ListedColor... |
43965888115 | import ast
from django.db.models import Q
from django.db import transaction
from django.core.exceptions import ValidationError as DjangoValidationError
from rest_framework.permissions import IsAuthenticated
from rest_framework.exceptions import ValidationError
from rest_framework.generics import ListAPIView
from common_config.api_code import HTTP_201_CREATED, HTTP_400_BAD_REQUEST, HTTP_OK, HTTP_500_INTERNAL_SERVER_ERROR
from common_config.api_message import ADD_SERVICE, UPDATE_SERVICE, INVALID_PAGE_SIZE, \
DELETE_SERVICE, EXTRA_QUERY_PARAMS, INVALID_PAGE_NUMBER, INVALID_BOOLEAN_FLAG, BLANK_PARAM, INVALID_SORT_BY, \
INVALID_SORT_BY_FIELD_PARAM, REQUIRED_PARAMS, INVALID_STATUS_FILTER, INVALID_SERVICE_IMAGE_ID
from common_config.constant import SERVICE_CATEGORY
from common_config.logger.logging_handler import logger
from common_config.generics import get_object_or_404
from utils.api_response import APIResponse
from utils.permissions import IsAuthorized
from utils.pagination import Pagination
from utils.views.service import ServiceListCreateMixin, ServiceRetrieveUpdateDeleteMixin
from services.models.service import Service
from services.serializers.service import ServiceCreateSerializer, ServiceViewSerializer, ServiceListSerializer, \
ServiceUpdateSerializer
from price_groups.tasks.store_service import linked_services_to_store_task, linked_service_and_options_to_store_task
class ServiceListCreateView(ServiceListCreateMixin):
"""
An Api View which provides a method to add new service or view list services.
Accepts the following GET/POST header parameters: access token
Returns the success/fail message.
"""
queryset = Service.objects.all()
serializer_class = ServiceCreateSerializer
pagination_class = Pagination
permission_classes = (IsAuthenticated, IsAuthorized,)
permission_required = ('add_service', 'list_service',)
query_filter_params = ["is_active", "include_deleted", "page", "page_size", "status", "sort_by", "search",
"sort_by_field"]
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.errors = dict()
self.params = dict()
def validate_query_param(self, page_size, page):
# check pre define query parameter if contain extra query param then raise error message
if len(self.params) > 0 and not all(key in self.query_filter_params for key in self.params.keys()):
extra_param = [key for key in self.params if key not in self.query_filter_params]
self.errors.setdefault("message", []).append(EXTRA_QUERY_PARAMS.format(extra_param))
# check page size must number
if "page_size" in self.params and not page_size.isnumeric():
self.errors.setdefault("page_size", []).append(INVALID_PAGE_SIZE)
if "page" in self.params and not page.isnumeric():
self.errors.setdefault("page", []).append(INVALID_PAGE_NUMBER)
if "status" in self.params:
try:
self.params['status'] = ast.literal_eval(self.params['status'])
except Exception as err:
self.errors.setdefault("status", []).append(INVALID_STATUS_FILTER.format(
type(self.params['status']).__name__))
if not isinstance(self.params['status'], list):
self.errors.setdefault("status", []).append(INVALID_STATUS_FILTER.format(
type(self.params['status']).__name__))
if "is_active" in self.params:
try:
eval(self.params['is_active'])
except Exception as err:
self.errors.setdefault("is_active", []).append(
INVALID_BOOLEAN_FLAG.format("is_active", self.params['is_active']))
if "sort_by" in self.params:
if self.params['sort_by'] == "":
self.errors.setdefault("sort_by", []).append(BLANK_PARAM)
elif self.params['sort_by'].lower() not in ["asc", "desc"]:
self.errors.setdefault("sort_by", []).append(INVALID_SORT_BY.format(self.params['sort_by']))
if "search" in self.params and self.params['search'] == "":
self.errors.setdefault("search", []).append(BLANK_PARAM)
if "sort_by_field" in self.params and self.params['sort_by_field'] not in ["name", "description", "status",
"price"]:
self.errors.setdefault("sort_by_field", []).append(INVALID_SORT_BY_FIELD_PARAM)
if "sort_by_field" in self.params and "sort_by" not in self.params:
self.errors.setdefault("sort_by", []).append(REQUIRED_PARAMS)
if "include_deleted" in self.params:
try:
eval(self.params['include_deleted'])
except Exception as err:
self.errors.setdefault("include_deleted", []).append(
INVALID_BOOLEAN_FLAG.format("include_deleted", self.params['include_deleted']))
else:
if not self.errors:
# validate view soft deleted object view permission
IsAuthorized.has_include_deleted_permission(self.request, "list_service")
def filter_queryset(self, params):
filter_kwargs = {'is_active': True}
if "is_active" in params and params['is_active'] in ['False']:
filter_kwargs['is_active'] = False
if "status" in params:
filter_kwargs['status__in'] = params.get('status')
if "sort_by_field" in params:
if params['sort_by_field'] == "name":
sort_by_field = "name"
elif params['sort_by_field'] == "status":
STATUS_CHOICE = Service.STATUS_CHOICES
# sort service status
service_status = sorted(STATUS_CHOICE, key=lambda tup: tup[1], reverse=True)
# get sorted status
sorted_list = [x[0] for x in service_status]
from django.db.models import Case, When
# sort by field
sort_by_field = Case(
*[When(status=status, then=pos) for pos, status in enumerate(sorted_list)])
elif params['sort_by_field'] == "description":
sort_by_field = "description"
else:
sort_by_field = "price"
else:
sort_by_field = "created_on"
query = Q()
if "search" in params:
query = Q(name__icontains=params['search']) | Q(description__icontains=params['search']) | \
Q(Q(category_tags__name__icontains=params['search']) &
Q(category_tags__entity_type=SERVICE_CATEGORY))
for item in filter_kwargs:
query = query & Q(**{item: filter_kwargs[item]})
if "sort_by" in params and params['sort_by'] == "asc":
return self.queryset.filter(query).order_by(sort_by_field)
return self.queryset.filter(query).order_by(sort_by_field).reverse()
def get(self, request, *args, **kwargs):
"""
In this method validate request query parameters and filter and return service list.
return success/error message.
"""
self.params = request.query_params.copy()
page_size = self.params.get('page_size', None)
page = self.params.get('page', None)
# validate sales order params
self.validate_query_param(page_size, page)
if self.errors:
return APIResponse(self.errors, HTTP_400_BAD_REQUEST)
error_msg, status_code = None, None
try:
# filter and get all service based on query params
queryset = self.filter_queryset(self.params)
except DjangoValidationError as err:
error_msg, status_code = err.args[0], HTTP_400_BAD_REQUEST
except Exception as err:
logger.error("Unexpected error occurred : %s.", err.args[0])
error_msg, status_code = err.args[0], HTTP_500_INTERNAL_SERVER_ERROR
if error_msg is not None:
return APIResponse({"message": error_msg}, status_code)
is_pagination = False
# set api request page number
if page is not None:
self.paginator.page = page
is_pagination = True
# set request api page size number
if page_size is None:
page_size = 10
self.paginator.page_size = page_size
return self.paginator.generate_response(queryset, ServiceListSerializer, request, is_pagination)
@transaction.atomic
def post(self, request, *args, **kwargs):
"""
In this method validate service from data and created new service.
return success/error message.
"""
request_data = request.data.copy()
try:
# validate service and service option fields value
serializer, validate_data = self.validate(request_data)
except ValidationError as err:
return APIResponse(err.args[0], HTTP_400_BAD_REQUEST)
except Exception as err:
logger.error("Unexpected error occurred : %s.", err)
return APIResponse({"message": err.args[0]}, HTTP_400_BAD_REQUEST)
# get last transaction save point id
sid = transaction.savepoint()
try:
# add new service
instance, priceGroupServiceIdList = serializer.create(validate_data)
except ValidationError as err:
# roll back transaction if any exception occur while adding service and service option
transaction.savepoint_rollback(sid)
return APIResponse(err.args[0], HTTP_400_BAD_REQUEST)
except Exception as err:
# roll back transaction if any exception occur while adding service and service option
transaction.savepoint_rollback(sid)
logger.error("Unexpected error occurred : %s.", err.args[0])
return APIResponse({"message": err.args[0]}, HTTP_400_BAD_REQUEST)
# convert model object into json
data = ServiceViewSerializer(instance).data
data['message'] = ADD_SERVICE
if priceGroupServiceIdList:
# system user assign services to store
linked_services_to_store_task.delay({'priceGroupServiceIdList': priceGroupServiceIdList})
return APIResponse(data, HTTP_201_CREATED)
class ServiceRetrieveUpdateDeleteView(ServiceRetrieveUpdateDeleteMixin):
"""
An Api View which provides a method to get, update and delete service.
Accepts the following GET/PUT/DELETE header parameters: access token
Returns the success/fail message.
"""
queryset = Service.objects.all()
serializer_class = ServiceUpdateSerializer
permission_classes = (IsAuthenticated, IsAuthorized,)
permission_required = ('change_service', 'view_service', 'delete_service',)
lookup_field = 'pk'
def get_object(self):
queryset = self.filter_queryset(self.get_queryset())
# Perform the lookup filtering.
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
filter_kwargs = {self.lookup_field: self.kwargs[lookup_url_kwarg]}
# get object
obj = get_object_or_404(queryset, "service_id", **filter_kwargs)
return obj
def get(self, request, *args, **kwargs):
# get service object
instance = self.get_object()
# serialize service objects
serializer = ServiceViewSerializer(instance)
return APIResponse(serializer.data, HTTP_OK)
@transaction.atomic
def put(self, request, *args, **kwargs):
# get service object
instance = self.get_object()
# get request form data
request_data = request.data
try:
# validate service and service option fields value
serializer, validated_data = self.validate(request_data)
except ValidationError as err:
return APIResponse(err.args[0], HTTP_400_BAD_REQUEST)
except Exception as err:
logger.error("Unexpected error occurred : %s.", err)
return APIResponse({"message": err.args[0]}, HTTP_400_BAD_REQUEST)
if "del_images" in validated_data and len(validated_data['del_images']) > 0:
del_images = validated_data.get("del_images")
errors = {}
images = [x.id for x in instance.images.all()]
for x in del_images:
if x.id not in images:
errors.setdefault("del_images", []).append(INVALID_SERVICE_IMAGE_ID.format(x.id))
if len(errors) > 0:
return APIResponse(errors, HTTP_400_BAD_REQUEST)
# get last transaction save point id
sid = transaction.savepoint()
try:
# update service
instance, priceGroupServiceIdList = serializer.update(instance, validated_data)
except ValidationError as err:
logger.error("validation error occurred 1 : %s.", err.args[0])
# roll back transaction if any exception occur while adding service and service option
transaction.savepoint_rollback(sid)
return APIResponse(err.args[0], HTTP_400_BAD_REQUEST)
except Exception as err:
logger.error("Unexpected error occurred 2 : %s.", err.args[0])
# roll back transaction if any exception occur while update service and service option
transaction.savepoint_rollback(sid)
return APIResponse({"message": err.args[0]}, HTTP_400_BAD_REQUEST)
# convert model object into json
data = ServiceViewSerializer(instance).data
data['message'] = UPDATE_SERVICE
task_payload = {}
if priceGroupServiceIdList:
task_payload['priceGroupServiceIdList'] = priceGroupServiceIdList
if "createOptionIds" in request.session:
task_payload['createOptionIds'] = request.session['createOptionIds']
del request.session['createOptionIds']
if task_payload:
# system user assign services to store
linked_service_and_options_to_store_task.delay(task_payload)
return APIResponse(data, HTTP_OK)
@transaction.atomic
def delete(self, request, *args, **kwargs):
# validate and get service object
instance = self.get_object()
# get last transaction save point id
sid = transaction.savepoint()
try:
# soft delete service
instance.delete()
except Exception as err:
# roll back transaction if any exception occur while delete service
transaction.savepoint_rollback(sid)
logger.error("Unexpected error occurred : %s.", err.args[0])
return APIResponse({"message": err.args[0]}, HTTP_400_BAD_REQUEST)
return APIResponse({'message': DELETE_SERVICE}, HTTP_OK)
class UpdateServiceOptionSequenceNumber(ListAPIView):
"""
An Api View which provides a method to update service option sequence number.
Accepts the following GET header parameters: access token
Returns the success/fail message.
"""
queryset = Service.objects.all()
permission_classes = (IsAuthenticated, IsAuthorized,)
permission_required = ('change_service',)
def get(self, request, *args, **kwargs):
services = Service.objects.all()
for service_obj in services:
options = service_obj.options.all().order_by("id")
sequence = 1
for option_obj in options:
option_obj.sequence = sequence
option_obj.save()
# update price group service option
for price_list_option_obj in option_obj.price_group_options.all():
price_list_option_obj.sequence = sequence
price_list_option_obj.save()
sequence += 1
return APIResponse({'message': "Service option sequence updated successfully."}, HTTP_OK) | BharatPlutus/python-django-sample | services/views/service.py | service.py | py | 16,111 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "utils.views.service.ServiceListCreateMixin",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "services.models.service.Service.objects.all",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "services.models.service.Service.objects",
"line_number... |
26921480455 | from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium import webdriver
import time
'''
제가 실행하면 보이나요
python3 test.py
쳐보실래요??
저만 실행 되나요?? 안되는데 그러면 ㅠㅠ
맥으로 하는거라 리팩토링하면서 하고있어요
지금 날짜 찾는거까진 했거든요 이제 몇일인지 찾아야해요
ㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋ 함 봐볼까여
'''
'''
근데 그거 그냥 코드로 박으면 안되나여? 궁금하네여 예
'''
options = webdriver.ChromeOptions()
options.add_argument("window-size=800,600")
driver = webdriver.Chrome()
wait = WebDriverWait(driver, 10)
url = "https://ticket.interpark.com/Gate/TPLogin.asp"
driver.get(url)
def interpark_login(): # 인터파크 로그인
driver.switch_to.frame(driver.find_element(By.TAG_NAME, "iframe"))
driver.find_element(By.ID, "userId").send_keys("chlwldnjs0416")
driver.find_element(By.ID, "userPwd").send_keys("#Chl4689056")
driver.find_element(By.ID, "btn_login").click()
def booking_number_site(): # 예약번호 입력 후, 입장
driver.get(
"http://poticket.interpark.com/Book/BookSession.asp?GroupCode="
# + showcode_entry.get()
+ "23002291"
)
# def date_select():
# # Select date
# while True:
# driver.switch_to.frame(driver.find_element(By.ID, "ifrmBookStep"))
# # if int(calender_entry.get()) > 0: # 날짜 설정
# if int(1) > 0: # 날짜 설정
# # for i in range(int(calender_entry.get())): # 해당 월 아닐시 +1씩 증가하여 해당 월 찾기.
# for i in range(int(1)): # +1씩 증가하여 해당 월 찾기.:) 날짜 설정과 같은 넘버로 해야함.
# driver.find_element(
# By.XPATH, "/html/body/div/div[1]/div[1]/div/span[3]").click()
# try:
# '''
# 회차 클릭 해야함.
# '''
# time.sleep(100)
# driver.find_element(
# # By.XPATH, '(//*[@id="CellPlayDate"])' + "[" + date_entry.get() + "]" # 회차 설정
# # By.XPATH, '(//*[@id="CellPlayDate"])' + "[" + 21 + "]").click() # 회차 설정
# By.XPATH, '(//*[@id="CellPlayDate"])' + "[" + 21 + "]").click() # 회차 설정
# break
# except NoSuchElementException:
# print("NoSearch")
# # # link_go()
# # # go()
# # # Select round
# # # round_xpath = f"/html/body/div/div[3]/div[1]/div/span/ul/li[{round_entry.get()}]/a"
# # round_xpath = f"/html/body/div/div[3]/div[1]/div/span/ul/li['10']/a"
# # wait.until(EC.element_to_be_clickable((By.XPATH, round_xpath))).click()
# # # Click next button
# # driver.switch_to.default_content()
# # driver.find_element(By.ID, "LargeNextBtnImage").click()
def date_select():
day_value = 23
# 날짜
while True:
driver.switch_to.frame(driver.find_element(By.ID, "ifrmBookStep"))
if int(1) == 0:
pass
elif int(1) >= 1:
for i in range(1, int(1) + 1):
driver.find_element(
By.XPATH, "/html/body/div/div[1]/div[1]/div/span[3]"
).click()
try:
driver.find_element(
By.XPATH, '(//*[@id="CellPlayDate"])' +
"[" + day_value + "]"
).click()
break
except NoSuchElementException:
# # link_go()
# # go()
# print("Element 못찾음.")
time.sleep(1111)
# 회차
wait.until(
EC.element_to_be_clickable(
(
By.XPATH,
"/html/body/div/div[3]/div[1]/div/span/ul/li["
+ round_entry.get()
+ "]/a",
)
)
).click()
driver.switch_to.default_content()
driver.find_element(By.ID, "LargeNextBtnImage").click()
def find_random_seat(): # 좌석 무작위로 설정
driver.switch_to.default_content()
seat_frame = driver.find_element(By.NAME, "ifrmSeat")
driver.switch_to.frame(seat_frame)
# wait.until(EC.presence_of_element_located(
# ))
interpark_login() # 예약번호 입력 후, 입장
booking_number_site() # 예약번호 입력 후, 입장
date_select() # 상품 날짜 찾기.
# find_random_seat() # 좌석 무작위로 설정
| sinde530/python | interpark/test.py | test.py | py | 4,841 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "selenium.webdriver.ChromeOptions",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 28,
"usage_type": "call"
},
{
"api... |
29754454026 | import sys
from PyQt6 import QtCore, QtGui, QtWidgets
from CurConUi import Ui_MainWindow
from currency_converter import CurrencyConverter
class CurrencyConv(QtWidgets.QMainWindow):
def __init__(self):
super(CurrencyConv, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.init_ui()
def init_ui(self):
self.ui.line_new_currency.setPlaceholderText("В какую валюту перевести")
self.ui.line_old_currency.setPlaceholderText("Из какой валюты перевести")
self.ui.line_old_amount.setPlaceholderText("У вас было")
self.ui.button_convert.clicked.connect(self.convert)
# self.ui.button_convert.setObjectName()
def convert(self):
converter = CurrencyConverter()
old_currency = self.ui.line_old_currency.text().upper()
new_currency = self.ui.line_new_currency.text().upper()
old_amount = self.ui.line_old_amount.text()
if old_amount.isdigit() and old_currency and new_currency:
new_amount = round(converter.convert(int(old_amount), f"{old_currency}", f"{new_currency}"), 2)
self.ui.line_new_amount.setText(str(new_amount))
else:
self.ui.line_new_amount.setText("Ошибка ввода")
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
application = CurrencyConv()
application.show()
sys.exit(app.exec())
| AdirtKa/CurrencyConverter | main.py | main.py | py | 1,476 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PyQt6.QtWidgets.QMainWindow",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "PyQt6.QtWidgets",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "CurConUi.Ui_MainWindow",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": ... |
18065190929 | from __future__ import absolute_import
import logging
import numpy as np
from .import utils
from .import sampling
from sklearn.preprocessing import MultiLabelBinarizer, LabelBinarizer
from sklearn.model_selection import StratifiedShuffleSplit
logger = logging.getLogger(__name__)
class Dataset(object):
def __init__(self, inputs, labels, test_indices=None, **kwargs):
"""Encapsulates all pieces of data to run an experiment. This is basically a bag of items that makes it
easy to serialize and deserialize everything as a unit.
Args:
inputs: The raw model inputs. This can be set to None if you dont want
to serialize this value when you save the dataset.
labels: The raw output labels.
test_indices: The optional test indices to use. Ideally, this should be generated one time and reused
across experiments to make results comparable. `generate_test_indices` can be used generate first
time indices.
**kwargs: Additional key value items to store.
"""
self.X = np.array(inputs)
self.y = np.array(labels)
for key, value in kwargs.items():
setattr(self, key, value)
self._test_indices = None
self._train_indices = None
self.test_indices = test_indices
self.is_multi_label = isinstance(labels[0], (set, list, tuple))
self.label_encoder = MultiLabelBinarizer() if self.is_multi_label else LabelBinarizer()
self.y = self.label_encoder.fit_transform(self.y).flatten()
def update_test_indices(self, test_size=0.1):
"""Updates `test_indices` property with indices of `test_size` proportion.
Args:
test_size: The test proportion in [0, 1] (Default value: 0.1)
"""
if self.is_multi_label:
self._train_indices, self._test_indices = sampling.multi_label_train_test_split(self.y, test_size)
else:
sss = StratifiedShuffleSplit(n_splits=1, test_size=test_size)
self._train_indices, self._test_indices = next(sss.split(self.X, self.y))
def save(self, file_path):
"""Serializes this dataset to a file.
Args:
file_path: The file path to use.
"""
utils.dump(self, file_path)
def train_val_split(self, split_ratio=0.1):
"""Generates train and validation sets from the training indices.
Args:
split_ratio: The split proportion in [0, 1] (Default value: 0.1)
Returns:
The stratified train and val subsets. Multi-label outputs are handled as well.
"""
if self.is_multi_label:
train_indices, val_indices = sampling.multi_label_train_test_split(self.y, split_ratio)
else:
sss = StratifiedShuffleSplit(n_splits=1, test_size=split_ratio)
train_indices, val_indices = next(sss.split(self.X, self.y))
return self.X[train_indices], self.X[val_indices], self.y[train_indices], self.y[val_indices]
@staticmethod
def load(file_path):
"""Loads the dataset from a file.
Args:
file_path: The file path to use.
Returns:
The `Dataset` instance.
"""
return utils.load(file_path)
@property
def test_indices(self):
return self._test_indices
@test_indices.setter
def test_indices(self, test_indices):
if test_indices is None:
self._train_indices = np.arange(0, len(self.y))
else:
self._test_indices = test_indices
self._train_indices = np.setdiff1d(np.arange(0, len(self.y)), self.test_indices)
@property
def train_indices(self):
return self._train_indices
@property
def labels(self):
return self.label_encoder.classes_
@property
def num_classes(self):
if len(self.y.shape) == 1:
return 1
else:
return len(self.labels)
| raghakot/keras-text | keras_text/data.py | data.py | py | 4,007 | python | en | code | 422 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.Multi... |
37538329416 | import scrapy
class CjSpider(scrapy.Spider):
name = 'cj'
# allowed_domains = ['caijing.com']
start_urls = ['https://www.dyxhw.com/']
def parse(self, response):
typess = response.xpath('//div[@class="nav clearfix"]/a[@class="j_ch_nav _block_news_menu"]/@href').getall()
for one_type in typess:
# print(one_type)
yield scrapy.Request(url=one_type, callback=self.parse_types)
def parse_types(self, response):
news_links = response.xpath('//ul[@class="list14 ml10"]/li/a/@href').getall()
for news_link in news_links:
# print(news_link)
yield scrapy.Request(url=news_link, callback=self.parse_detial)
def parse_detial(self, response):
title = response.xpath('//h1[@class="title"]/text()').get()
contents = response.xpath('//div[@class="clearfix"]/p/text()').getall()
content = '\n'.join(x for x in contents)
recurse = response.xpath('//div[@class="info fl"]//tr/td/text()').get().strip()
pubtime = response.xpath('//div[@class="info fl"]//span[@class="pubTime"]/text()').get()
item = dict()
item['title'] = title
item['content'] = content
item['pubtime'] = pubtime
yield item
rela_article = response.xpath('//div[@class="pic-list clearfix"]//h3/a/@href').getall()
if rela_article:
for rela in rela_article:
yield scrapy.Request(url=rela, callback=self.parse_detial)
| ykallan/caijingguancha | caijingguancha/caijingguancha/spiders/cj.py | cj.py | py | 1,497 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scrapy.Spider",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "scrapy.Request",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "scrapy.Request",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "scrapy.Request",
... |
24011968546 | # --------------------------------------------------------
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import numpy as np
import os
import sys
from transforms3d.quaternions import *
from transforms3d.euler import *
from transforms3d.axangles import *
import random
from tensorboardX import SummaryWriter
from scipy.spatial import cKDTree
import scipy.io as sio
import IPython
import time
from torch import nn
from torch import optim
import torch
import torch.nn.functional as F
from torch.optim import Adam
from collections import deque
import tabulate
import cv2
import matplotlib.pyplot as plt
import yaml
import core
import copy
import math
from easydict import EasyDict as edict
from pointnet2_ops.pointnet2_utils import furthest_point_sample, gather_operation
import colorsys
import psutil
import GPUtil
from core.common_utils import *
import pybullet as p
HAS_PLANNER_INSTALLED = True
try:
from OMG.ycb_render.robotPose import robot_pykdl
except:
HAS_PLANNER_INSTALLED = False
# global variables
V = cam_V = [[-0.9351, 0.3518, 0.0428, 0.3037],
[0.2065, 0.639, -0.741, 0.132],
[-0.2881, -0.684, -0.6702, 1.8803],
[0.0, 0.0, 0.0, 1.0]]
hand_finger_point = np.array([ [ 0., 0., 0. , -0. , 0. , -0. ],
[ 0., 0., 0.053, -0.053, 0.053, -0.053],
[ 0., 0., 0.075, 0.075, 0.105, 0.105]])
anchor_seeds = np.array([
[0.0, -1.285, 0, -2.356, 0.0, 1.571, 0.785],
[2.5, 0.23, -2.89, -1.69, 0.056, 1.46, -1.27],
[2.8, 0.23, -2.89, -1.69, 0.056, 1.46, -1.27],
[2, 0.23, -2.89, -1.69, 0.056, 1.46, -1.27],
[2.5, 0.83, -2.89, -1.69, 0.056, 1.46, -1.27],
[0.049, 1.22, -1.87, -0.67, 2.12, 0.99, -0.85],
[-2.28, -0.43, 2.47, -1.35, 0.62, 2.28, -0.27],
[-2.02, -1.29, 2.20, -0.83, 0.22, 1.18, 0.74],
[-2.2, 0.03, -2.89, -1.69, 0.056, 1.46, -1.27],
[-2.5, -0.71, -2.73, -0.82, -0.7, 0.62, -0.56],
[-2, -0.71, -2.73, -0.82, -0.7, 0.62, -0.56],
[-2.66, -0.55, 2.06, -1.77, 0.96, 1.77, -1.35],
[1.51, -1.48, -1.12, -1.55, -1.57, 1.15, 0.24],
[-2.61, -0.98, 2.26, -0.85, 0.61, 1.64, 0.23]
])
renderer = None
robot = None
robot_points = None
panda = None
panda_clients = []
def require_panda(num=1):
global panda, panda_clients
if panda is None:
from env.panda_gripper_hand_camera import Panda
import pybullet_utils.bullet_client as bc
panda_clients = [bc.BulletClient(connection_mode=p.DIRECT) for i in range(num)]
panda = [Panda(stepsize=1./ 1000., base_shift=[-0.05, 0.0, 10.], bullet_client=panda_clients[i]) for i in range(num)] # -0.65
return panda, panda_clients
def require_robot(new=False):
if new: return robot_pykdl.robot_kinematics(None, data_path='../../../')
global robot
if robot is None:
robot = robot_pykdl.robot_kinematics(None, data_path='../../../')
return robot
def require_renderer(large_fov=False, offset=False ):
global renderer, robot
if renderer is None :
from OMG.ycb_render.ycb_renderer import YCBRenderer
width, height = 640, 480
renderer = YCBRenderer(width=width, height=height, offset=offset, gpu_id=0)
if not large_fov:
renderer.set_projection_matrix(width, height, width * 0.8, width * 0.8, width / 2, height / 2, 0.1, 6)
renderer.set_camera_default()
else:
renderer.set_fov(90)
models = ["link1", "link2", "link3", "link4", "link5", "link6", "link7", "hand", "finger", "finger"]
obj_paths = ["data/robots/{}.DAE".format(item) for item in models]
renderer.load_objects(obj_paths)
robot = require_robot()
return renderer, robot
def truncated_normal(tensor, mean=0, std=1, trunc_std=2):
size = tensor.shape
tmp = tensor.new_empty(size + (8,)).normal_() # 4
valid = (tmp < trunc_std) & (tmp > -trunc_std)
ind = valid.max(-1, keepdim=True)[1]
tensor.data.copy_(tmp.gather(-1, ind).squeeze(-1))
tensor.data.mul_(std).add_(mean)
return tensor
def sample_gaussian(size, truncate_std=None, device=None):
y = torch.randn(*size).float()
y = y if device is None else y.to(device)
if truncate_std is not None:
truncated_normal(y, mean=0, std=1, trunc_std=truncate_std)
return y
def vis_network_input(xyz, xyz_features):
for i in range(len(xyzs)):
renderer, _ = require_renderer()
vis_point(renderer, xyz_features[i], interact=2)
def get_usage():
GPUs = GPUtil.getGPUs()
memory_usage = psutil.virtual_memory().percent
gpu_usage = max([GPU.memoryUsed for GPU in GPUs])
return gpu_usage, memory_usage
def solve_ik(joints, pose):
"""
For simulating trajectory
"""
ik = robot.inverse_kinematics_trac_ik(pose[:3], ros_quat(pose[3:]), seed=joints[:7])
if ik is not None:
joints = np.append(np.array(ik), [0, 0.04, 0.04])
return joints
def bullet_ik(pandas, joints, poses, panda_clients): # simulate
target_joints = []
for panda, joint, p_client, pose in zip(pandas, joints, panda_clients, poses):
panda.reset(np.array(joint).flatten())
pos, orn = pose[:3], ros_quat(pose[3:])
target_joints.append(np.array(p_client.calculateInverseKinematics(panda.pandaUid,
panda.pandaEndEffectorIndex, pos, orn)))
return np.stack(target_joints, axis=0)
def generate_simulated_learner_trajectory(point_state, joints, agent, remain_timestep, max_traj_num=1, vis=False, gaddpg=False):
"""
use the current point cloud and bullet kinemetics to simulate observation and action for a trajectory
extract the stored plan in the dataset for plan encoding
param: 4 x N, 9
"""
MAX_CLIENT_NUM = max(agent.test_traj_num, 8)
pandas, panda_clients = require_panda(MAX_CLIENT_NUM)
num = len(point_state)
if num > 1:
init_joints = joints[0].flatten()
total_step = int(remain_timestep[0])
else:
init_joints = joints.flatten()
total_step = remain_timestep
pandas = pandas[:num]
panda_clients = panda_clients[:num]
ef_pose = []
for panda, p_client in zip(pandas, panda_clients):
panda.reset(joints=init_joints.flatten())
pos, orn = p_client.getLinkState(panda.pandaUid, panda.pandaEndEffectorIndex)[:2]
ef_pose.append(unpack_pose(list(pos) + [orn[3], orn[0], orn[1], orn[2]]))
ef_pose = np.stack(ef_pose, axis=0)
sim_point_state = point_state[0] if len(point_state.shape) == 3 and point_state.shape[0] == 1 else point_state
sim_pose = np.stack([np.eye(4)] * num, axis=0)
plan, sim_states, sim_joints, sim_actions, sim_poses = [], [], [], [], [sim_pose]
agent.train_traj_feature = False # avoid regenerating traj latent
# rollout
step_func = agent.batch_select_action if num > 1 else agent.select_action
if has_check(agent, 'vis_traj') and hasattr(agent, 'gaddpg') and gaddpg: step_func = agent.gaddpg_step #
for episode_steps in range(total_step):
state = [[sim_point_state, np.zeros(1)], None, None, None]
action, _, _, aux_pred = step_func(state, remain_timestep=remain_timestep - episode_steps, curr_joint=joints )
if len(action.shape) == 1:
action = action[None]
action_pose = unpack_action_batch(action)
ef_pose = np.matmul(ef_pose, action_pose) # joints
joints = bullet_ik(pandas, joints, pack_pose_batch(ef_pose), panda_clients)
sim_point_state = se3_transform_pc(se3_inverse_batch(action_pose), sim_point_state)
plan.append(joints)
sim_actions.append(action)
sim_states.append(sim_point_state)
sim_poses.append(np.matmul(sim_poses[-1], action_pose))
sim_joints.append(joints)
sim_poses = list(pack_pose_rot_first_batch(np.concatenate(sim_poses, axis=0)).reshape(-1, num, 7).transpose((1,0,2)))
agent.train_traj_feature = True
sim_integer_time = np.arange(total_step, 0, -1)
traj_time_batch = sim_integer_time[::-1] / float(total_step)
return sim_poses, traj_time_batch, sim_joints, sim_states, sim_integer_time
def generate_simulated_expert_trajectory(state, plan, curr_joint, curr_traj_time=0, idx=0, vis=False,robot=None):
"""
use the current point cloud to simulate observation and action for a trajectory
extract the stored plan in the dataset for plan encoding
param: 4 x N, T x 9, 9
"""
arm_collision_point = get_collision_points()
if robot is None: robot = require_robot()
curr_ef_pose = (robot.forward_kinematics_parallel(wrap_value(curr_joint)[None], offset=False)[0][7])
global_pc = np.matmul(curr_ef_pose[:3, :3], state[:3, :-500]) + curr_ef_pose[:3, [3]]
pc_mask = state[[3]]
max_len = len(plan)
traj_len = plan_length(plan)
plan = plan[:traj_len]
if traj_len == 0:
return (state[None], plan, np.zeros(6)[None], pack_pose_rot_first(np.eye(4))[None],
pack_pose_rot_first(np.eye(4))[None], [1.],
[[idx, 0, curr_traj_time]])
plan_link_poses = robot.forward_kinematics_parallel(wrap_values(plan), offset=False)
sim_poses = pack_pose_rot_first_batch(np.matmul(se3_inverse(curr_ef_pose)[None], plan_link_poses[:, 7]))
sim_goals = pack_pose_rot_first_batch(np.matmul(se3_inverse_batch(\
np.concatenate((curr_ef_pose[None], plan_link_poses[:, 7]), axis=0)), plan_link_poses[-1, 7][None]))
# not used
inv_ef_pose = se3_inverse_batch(plan_link_poses[:, 7])
sim_states = np.matmul(inv_ef_pose[:, :3, :3], global_pc[None]) + inv_ef_pose[:, :3, [3]]
plan_link_poses = np.matmul(inv_ef_pose[:, None], plan_link_poses)
collision_point = np.matmul(plan_link_poses[...,:3,:3], arm_collision_point.swapaxes(-1, -2)[:,:3]).swapaxes(-1, -2) + \
plan_link_poses[...,:3,[3]].swapaxes(-1, -2)
collision_point = collision_point.reshape([len(plan_link_poses), -1, 3]).swapaxes(-1, -2)
sim_states = np.concatenate((sim_states, collision_point), axis=-1) # robot points
sim_states = np.concatenate((sim_states, np.tile(pc_mask[None], (len(sim_states), 1, 1))), axis=1) # mask
sim_joints = np.concatenate((curr_joint[None], plan), axis=0)
sim_actions = np.zeros([len(sim_joints) - 1, 6]) # not used
sim_traj_idx = [[idx, j / float(traj_len), curr_traj_time + (j + 1) / max_len] for j in range(traj_len + 1)]
sim_states = np.concatenate((state[None], sim_states),axis=0) #
plan = np.concatenate((curr_joint[None], plan),axis=0) #
sim_poses = np.concatenate((pack_pose_rot_first(np.eye(4))[None], sim_poses),axis=0)
sim_actions = np.concatenate((sim_actions, np.zeros(6)[None]),axis=0)
sim_integer_time = np.arange(traj_len + 1, 0, -1)
return (sim_states, plan, sim_actions, sim_poses, sim_goals, sim_integer_time, sim_traj_idx)
def vis_learner_traj(state, joints, agent, remain_timestep):
"""
visualize rollout using the current traj_feat
"""
remain_timestep = min(remain_timestep, 45)
point_state = state[0][0][None]
poses = robot.forward_kinematics_parallel(wrap_value(joints)[None], offset=False)[0]
ef_pose = poses[7]
packed_poses = [pack_pose(pose) for pose in poses]
sampler_multi_traj = len(agent.traj_feat_target_test) > 1
# make copy
traj_feat_copy = agent.traj_feat.clone()
agent.traj_feat = agent.traj_feat_target_test # restore sampler latent
max_traj_num = 1
if sampler_multi_traj:
max_traj_num = agent.test_traj_num
joints = np.tile(joints, (max_traj_num, 1))
point_state = np.tile(point_state, (max_traj_num, 1, 1))
remain_timestep = torch.ones(max_traj_num).cuda() * remain_timestep
vis_traj = generate_simulated_learner_trajectory(point_state, joints, agent, remain_timestep, max_traj_num)[0]
traj_lines = []
hues = np.linspace(0., 5./6, max_traj_num )
colors = np.stack([colorsys.hsv_to_rgb(hue, 1.0, 1.0) for hue in hues]) * 255
lines_color = (np.repeat(colors, 2, axis=0).astype(np.int)).tolist()
for i in range(max_traj_num):
traj_lines.extend(gripper_traj_lines(ef_pose, vis_traj[i]))
lines = traj_lines
else:
vis_traj = generate_simulated_learner_trajectory(point_state, joints, agent, remain_timestep, max_traj_num)[0]
traj_line, grasp_line = gripper_traj_lines(ef_pose, vis_traj[0])
lines = [(traj_line[0], traj_line[1]), (grasp_line[0], grasp_line[1])]
lines_color = [[0, 0, 255], [0, 0, 255]]
vis_point_state = state[0][0]
vis_point_state = vis_point_state[:, 6:] # avoid hand point collision
target_mask = get_target_mask(vis_point_state)
point_color = get_point_color(vis_point_state)
vis_point_state = se3_transform_pc(ef_pose, vis_point_state) # base coordinate
renderer = require_renderer()[0]
renderer.vis(packed_poses, range(len(poses)),
shifted_pose=np.eye(4),
interact=2,
V=np.array(V),
visualize_context={
"white_bg": True,
"project_point": [vis_point_state[:3]],
"project_color": [point_color],
"point_size": [3],
"reset_line_point": True,
"static_buffer": True,
"line": lines,
"line_color": lines_color,
}
)
agent.traj_feat = traj_feat_copy
def joint_to_cartesian(new_joints, curr_joint):
"""
Convert joint space action to task space action by fk
"""
r = require_robot()
ef_pose = r.forward_kinematics_parallel(wrap_value(curr_joint)[None], offset=False)[0][-3]
ef_pose_ = r.forward_kinematics_parallel(wrap_value(new_joints)[None], offset=False)[0][-3]
rel_pose = se3_inverse(ef_pose).dot(ef_pose_)
action = np.hstack([rel_pose[:3,3], mat2euler(rel_pose[:3,:3])])
return action
def check_ngc():
"""
check for using cluster in training
"""
GPUs = GPUtil.getGPUs()
gpu_limit = max([GPU.memoryTotal for GPU in GPUs])
return (gpu_limit > 14000)
def plan_length(plan):
if len(plan) == 0: return plan
if type(plan) is np.ndarray:
return np.sum(np.abs(plan).sum(-1) > 0)
else:
return torch.sum(torch.abs(plan).sum(-1) > 0)
def pad_traj_plan(plan, max_len=50):
padded_plan = np.zeros((max_len, 9))
if len(plan) == 0: return padded_plan
padded_plan[:len(plan)] = plan
return padded_plan
def update_net_args(config, spec, net_args):
net_args["model_scale"] = config.feature_input_dim / 512.
net_args["group_norm"] = True
if has_check(config, 'sa_channel_concat'):
spec["net_kwargs"]["action_concat"] = True
if has_check(config, 'joint_point_state_input'):
net_args["extra_latent"] += 7
if has_check(config, 'feature_option'):
net_args["feature_option"] = config.feature_option
if has_check(config, 'value_overwrite_lr') and config.value_overwrite_lr > 0:
spec["opt_kwargs"]["lr"] = config.value_overwrite_lr
def update_traj_net_args(config, spec, net_args):
net_args["feature_extractor_class"] = config.traj_feature_extractor_class
net_args["num_inputs"] = config.traj_latent_size
net_args["hidden_dim"] = config.feature_input_dim
net_args["feat_head_dim"] = config.traj_latent_size
net_args["config"] = config
net_args["model_scale"] = config.traj_latent_size / 512.
net_args["feature_option"] = config.st_feature_option
net_args["group_norm"] = True
spec["opt_kwargs"]["lr"] = config.traj_net_lr
net_args["extra_latent"] += 7
def update_traj_sampler_net_args(config, spec, net_args):
net_args["num_inputs"] = config.traj_latent_size
net_args["hidden_dim"] = config.feature_input_dim
net_args["feat_head_dim"] = config.traj_latent_size
net_args["config"] = config
net_args["output_model_scale"] = config.traj_latent_size / 512.
net_args["model_scale"] = config.traj_sampler_latent_size / 512.
net_args["feature_option"] = config.traj_feature_option
net_args["group_norm"] = True
net_args["extra_latent"] += 7 # joint
spec["opt_kwargs"]["lr"] = config.traj_sampler_net_lr
if config.sampler_extra_abs_time:
net_args["extra_latent"] += 1 # time
def make_nets_opts_schedulers(model_spec, config, cuda_device="cuda"):
specs = yaml.load(open(model_spec).read(), Loader=yaml.SafeLoader) #
ret = {}
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
for net_name, spec in specs.items():
net_args = spec.get("net_kwargs", {})
if net_name == "state_feature_extractor":
if has_check(config, 'state_feature_extractor'):
spec["class"] = config.state_feature_extractor
net_args["input_dim"] = config.channel_num
update_net_args(config, spec, net_args)
if net_name == 'traj_feature_extractor':
if has_check(config, 'train_traj_feature'):
net_args["input_dim"] = config.channel_num
update_traj_net_args(config, spec, net_args)
else:
continue
if net_name == 'traj_feature_sampler':
if has_check(config, 'train_traj_sampler') :
net_args["input_dim"] = config.channel_num
update_traj_sampler_net_args(config, spec, net_args)
else:
continue
print('net_name:', net_name)
net_class = getattr(core.networks, spec["class"])
net = net_class(**net_args)
net = torch.nn.DataParallel(net).to("cuda")
d = {
"net": net,
}
if "opt" in spec:
d["opt"] = getattr(optim, spec["opt"])(
net.parameters(), **spec["opt_kwargs"]
)
if len(config.overwrite_feat_milestone) > 0:
spec["scheduler_kwargs"]["milestones"] = config.overwrite_feat_milestone
print("schedule:", spec["scheduler_kwargs"]["milestones"])
d["scheduler"] = getattr(optim.lr_scheduler, spec["scheduler"])(
d["opt"], **spec["scheduler_kwargs"]
)
if hasattr(net.module, "encoder"):
d["encoder_opt"] = getattr(optim, spec["opt"])(
net.module.encoder.parameters(), **spec["opt_kwargs"]
)
d["encoder_scheduler"] = getattr(optim.lr_scheduler, spec["scheduler"])(
d["encoder_opt"], **spec["scheduler_kwargs"]
)
if hasattr(net.module, "value_encoder"):
d["val_encoder_opt"] = getattr(optim, spec["opt"])(
net.module.value_encoder.parameters(), **spec["opt_kwargs"]
)
d["val_encoder_scheduler"] = getattr(
optim.lr_scheduler, spec["scheduler"]
)(d["val_encoder_opt"], **spec["scheduler_kwargs"])
ret[net_name] = d
return ret
def get_fc_feat_head(input_dim, dim_list, output_dim, acti_func='nn.ReLU', end_with_act=False):
model_list = [nn.Linear(input_dim, dim_list[0]), nn.ReLU(True)]
for i in range(1, len(dim_list)):
model_list.extend([nn.Linear(dim_list[i-1], dim_list[i]), eval(acti_func)(True)])
model_list.append(nn.Linear(dim_list[-1], output_dim))
if end_with_act: model_list.append(eval(acti_func)(True))
return nn.Sequential(*model_list)
def get_info(state, opt="img", IMG_SIZE=(112, 112)):
if opt == "img":
return (state[0][1][:3].T * 255).astype(np.uint8)
if opt == "intr":
cam_proj = np.array(state[-2][48:]).reshape([4, 4])
return projection_to_intrinsics(cam_proj, IMG_SIZE[0], IMG_SIZE[1])[:3, :3]
if opt == "point":
return state[0][0]
def get_collision_points():
"""
load collision points with the order of the link list and end effector
"""
global robot_points
if robot_points is None:
collision_file = 'data/robots/all_collision_pts.npy'
if not os.path.exists(collision_file):
collision_pts = []
links = [
"link1",
"link2",
"link3",
"link4",
"link5",
"link6",
"link7",
"hand",
"finger",
"finger",
]
for i in range(len(links)):
file = "data/robots/{}.xyz".format(links[i])
pts = np.loadtxt(file)
sample_pts = pts[random.sample(range(pts.shape[0]), 50)]
collision_pts.append(sample_pts)
collision_pts = np.array(collision_pts)
np.save(collision_file, collision_pts)
else:
collision_pts = np.load(collision_file)
robot_points = collision_pts
return robot_points
def sample_latent(batch_size, latent_size):
return torch.randn(batch_size, latent_size).cuda()
def add_extra_text(img, extra_text, text_size=0.3, corner='tl'):
img = img.copy()
img_ratio = img.shape[0] / 256
gap = int(15 * img_ratio)
width, height = img.shape[:2]
offset_h = 0 if corner.startswith('t') else height - int(50 * img_ratio)
offset_w = 0 if corner.endswith('l') else int(width - 30 * img_ratio)
sign = 1 if corner.startswith('t') else -1
text_size = 0.3 * img_ratio
for i, t in enumerate(extra_text): #
cv2.putText(
img, t,
(offset_w, offset_h + sign * (gap + i * gap)),
cv2.FONT_HERSHEY_DUPLEX,
text_size, [255,0,0] ) # 0.7
return img
def write_video(
traj,
scene_file,
overhead_traj=None,
expert_traj=None,
overhead_expert_traj=None,
name=0,
IMG_SIZE=(112, 112),
output_dir="output_misc/",
logdir="policy",
target_name="",
surfix="",
use_pred_grasp=False,
success=False,
use_value=False,
extra_text=None
):
ratio = 1 if expert_traj is None else 2
result = "success" if success else "failure"
video_writer = make_video_writer(
os.path.join(
output_dir,
"rl_output_video_{}/{}_rollout.avi".format(surfix, scene_file),
),
int(ratio * IMG_SIZE[0]),
int(IMG_SIZE[1]),
)
text_color = [255, 0, 0] if use_pred_grasp else [0, 255, 0]
for i in range(len(traj)):
img = traj[i][..., [2, 1, 0]]
if expert_traj is not None:
idx = min(len(expert_traj) - 1, i)
img = np.concatenate((img, expert_traj[idx][..., [2, 1, 0]]), axis=1)
img = img.astype(np.uint8)
if extra_text is not None:
img = add_extra_text(img, extra_text)
video_writer.write(img)
if overhead_traj is not None:
width, height = overhead_traj[0].shape[1], overhead_traj[0].shape[0]
overhead_video_writer = make_video_writer(
os.path.join( output_dir,
"rl_output_video_{}/{}_overhead_rollout.avi".format(surfix, scene_file)), int(ratio * width), height )
for i in range(len(overhead_traj)):
img = overhead_traj[i][..., [2, 1, 0]]
if overhead_expert_traj is not None:
idx = min(len(overhead_expert_traj) - 1, i)
img = np.concatenate((img, overhead_expert_traj[idx][..., [2, 1, 0]]), axis=1)
overhead_video_writer.write(img.astype(np.uint8))
def append_pointcloud_time(agent, point_state, time_batch=None, traj=True, train=True):
if not train:
if not hasattr(agent, 'timestep'):
traj_integer_time_batch = torch.Tensor([0]).float().cuda()
else:
traj_integer_time_batch = agent.timestep
else:
traj_integer_time_batch = time_batch
if agent.sampler_extra_abs_time:
traj_time_batch = traj_integer_time_batch.view(-1,1,1).expand(-1, -1, point_state.shape[2])
point_state = torch.cat((point_state, traj_time_batch), dim=1)
return point_state
def preprocess_points(config, state_input, curr_joint, time_batch=None, traj=False, append_pc_time=False):
"""
process point cloud for network input
"""
if type(curr_joint) is not torch.Tensor:
curr_joint = torch.from_numpy(curr_joint).cuda().float()
if type(state_input) is not torch.Tensor:
state_input = torch.from_numpy(state_input).cuda().float()
state_input_batch = state_input.clone()
curr_joint = curr_joint[:, :7]
if state_input_batch.shape[-1] > 4500: # robot point included
state_input_batch = remove_robot_pt(state_input_batch)
if (not traj and has_check(config, 'joint_point_state_input')) or \
(traj and has_check(config, 'traj_joint_point_state_input')):
curr_joint_bc = curr_joint[...,None].expand(-1, -1, state_input_batch.shape[-1])
state_input_batch = torch.cat((state_input_batch, curr_joint_bc), dim=1)
if append_pc_time and hasattr(config, 'test_mode'):
state_input_batch = append_pointcloud_time(config, state_input_batch, time_batch, False, not config.test_mode)
return state_input_batch
def get_point_color(vis_points):
tgt_mask = get_target_mask(vis_points)
obs_mask = get_obs_mask(vis_points)
near_obs_mask = get_near_obs_mask(vis_points)
rob_mask = get_robot_mask(vis_points)
target_color = [0, 255, 0]
obs_color = [255, 0, 0]
rob_color = [0, 0, 255]
near_obs_color = [255, 0, 0]
pt_color = np.zeros_like(vis_points[:3]).T
pt_color[tgt_mask] = target_color
pt_color[obs_mask] = obs_color
pt_color[rob_mask] = rob_color
pt_color[near_obs_mask] = near_obs_color
return pt_color
def sample_ef(target, near=0.2, far=0.50):
# sample a camera extrinsics
count = 0
ik = None
outer_loop_num = 20
inner_loop_num = 5
robot = require_robot()
for _ in range(outer_loop_num):
theta = np.random.uniform(low=0, high=1*np.pi/2)
phi = np.random.uniform(low=np.pi/2, high=3*np.pi/2) # half sphere
r = np.random.uniform(low=near, high=far) # sphere radius
pos = np.array([r*np.sin(theta)*np.cos(phi), r*np.sin(theta)*np.sin(phi), r*np.cos(theta)])
trans = pos + target + np.random.uniform(-0.03, 0.03, 3)
trans[2] = np.clip(trans[2], 0.2, 0.6)
trans[1] = np.clip(trans[1], -0.3, 0.3)
trans[0] = np.clip(trans[0], 0.0, 0.5)
pos = trans - target
for i in range(inner_loop_num):
rand_up = np.array([0, 0, -1])
rand_up = rand_up / np.linalg.norm(rand_up)
R = inv_lookat(pos, 2 * pos, rand_up).dot(rotZ(-np.pi/2)[:3, :3])
quat = ros_quat(mat2quat(R))
ik = robot.inverse_kinematics(trans, quat, seed=anchor_seeds[np.random.randint(len(anchor_seeds))])
if ik is not None:
break
return ik
def gripper_traj_lines(start_pose, traj_state, joint_output=False, gripper_along=False ):
ef_lines = []
gripper_lines = [np.zeros([3, 0]), np.zeros([3, 0])]
draw_gripper_traj_line = gripper_along
if joint_output:
r = require_robot()
curr_joint = np.concatenate((traj_state, 0.04 * np.ones((len(traj_state), 2))), axis=-1)
traj_state = r.forward_kinematics_parallel(wrap_values(curr_joint ), offset=False)[:, 7]
for grasp_idx, grasp in enumerate(traj_state):
if not joint_output:
if grasp_idx == 0:
grasp = np.eye(4)
elif len(grasp) == 6:
grasp = unpack_pose_euler(grasp)
else:
grasp = unpack_pose_rot_first(grasp)
grasp_pose = start_pose.dot(grasp)
line_starts, line_ends = grasp_gripper_lines(grasp_pose[None])
gripper_lines[0] = np.concatenate((gripper_lines[0], line_starts[0]), axis=-1)
gripper_lines[1] = np.concatenate((gripper_lines[1], line_ends[0]), axis=-1)
ef_lines.append(grasp_pose[:3, 3])
if not draw_gripper_traj_line:
gripper_lines[0] = gripper_lines[0][:,-5:]
gripper_lines[1] = gripper_lines[1][:,-5:]
if len(ef_lines) > 1:
ef_lines = [[ef_lines[idx], ef_lines[idx+1]] for idx in range(len(ef_lines) - 1)]
ef_lines = np.array(ef_lines)
ef_lines = [ef_lines.T[:, 0], ef_lines.T[:, 1]]
else:
ef_lines = []
return [gripper_lines, ef_lines]
def vis_traj(point, curr_joint, traj_state=None, V=cam_V, interact=2, used_renderer=None, gripper_along=False):
# visualize traj with renderer
renderer, robot = require_renderer( )
if type(point) is torch.Tensor:
point = point.detach().cpu().numpy()
point = point[0]
if point.shape(1) != 4096:
point = point[:,6:-500] # remove gripper and robot point
if type(curr_joint) is torch.Tensor:
curr_joint = curr_joint.detach().cpu().numpy()
if len(curr_joint) == 7:
curr_joint = np.append(curr_joint, [0, 0])
poses_ = robot.forward_kinematics_parallel(wrap_value(curr_joint), offset=False)[0]
poses_2 = [pack_pose(pose) for pose in poses_]
point_color = get_point_color(point)
point = se3_transform_pc(poses_[7], point)
if traj_state is not None:
if type(traj_state) is torch.Tensor:
traj_state = traj_state.detach().cpu().numpy()
if type(traj_state) is list and len(traj_state) > 4:
traj_lines = []
line_colors = get_mask_colors(len(traj_state) * 2 + 5)[5:]
for i in range(len(traj_state)):
traj_lines.extend(gripper_traj_lines(poses_[7], traj_state[i]))
else:
gripper_lines, ef_lines = gripper_traj_lines(poses_[7], traj_state, gripper_along=gripper_along)
line_colors = [[255, 255, 0], [0, 0, 255]]
traj_lines = [gripper_lines, ef_lines]
rgb = renderer.vis(poses_2, list(range(10)),
shifted_pose=np.eye(4),
interact=interact,
V=np.array(V),
visualize_context={
"white_bg": True,
"project_point": [point[:3]],
"project_color": [point_color],
"static_buffer": True,
"reset_line_point": True,
"line": traj_lines,
"line_color": line_colors,
}
)
else:
rgb = renderer.vis(poses_2, list(range(10)),
shifted_pose=np.eye(4),
interact=interact,
V=np.array(V),
visualize_context={
"white_bg": True,
"project_point": [point[:3]],
"project_color": [point_color],
"static_buffer": True,
"reset_line_point": True,
"thickness": [2]
} )
return rgb
def vis_point(renderer, point_state, window_name='test', interact=1, curr_joint=None, grasp=None, V=None):
"""visualize single point state """
if type(point_state) is torch.Tensor:
point_state = point_state.detach().cpu().numpy()
vis_points = point_state.copy()
pt_color = get_point_color(vis_points)
if V is None:
V = [[ 0.3021, 0.668, 0.6801, 0. ],
[-0.7739, -0.2447, 0.5841, 0. ],
[ 0.5566, -0.7028, 0.4431, 1.1434],
[ 0., 0., 0., 1. ]]
line, line_color = [], []
cls_indexes, poses = [], []
if grasp is not None:
line_starts, line_ends = grasp_gripper_lines(unpack_pose_rot_first(grasp.detach().cpu().numpy())[None])
line = [(line_starts[0], line_ends[0])]
line_color = [[255, 255, 0]]
return renderer.vis( poses, cls_indexes,
shifted_pose=np.eye(4),
interact=interact,
V=np.array(V),
visualize_context={
"white_bg": True,
"project_point": [vis_points[:3] ],
"project_color": [pt_color],
"point_size": [3],
"reset_line_point": True,
"static_buffer": True,
"line": line,
"line_color": line_color,
}
)
def compose_state_traj(data_list, CONFIG, step=0):
"""downsampling traj """
downsample_length = (len(data_list[0]) - step) // int(CONFIG.sparsify_traj_ratio)
idx = list(np.linspace(step, len(data_list[0]) - 1, downsample_length).astype(np.int))
torch_data_list = []
for data_idx, data in enumerate(data_list):
torch_data_list.append(torch.from_numpy(np.stack([data[i] for i in idx], axis=0)).cuda().float())
return torch_data_list
def update_expert_traj(agent, expert_data_list, cfg, step=0, remote=False):
""" compute expert traj latent embedding """
expert_exec_traj = compose_state_traj(expert_data_list, cfg.RL_TRAIN, step)
if remote:
recons_traj = agent.select_traj.remote(None,
expert_data_list[0][step][None],
None,
vis=False,
remain_timestep=cfg.RL_MAX_STEP,
curr_joint=expert_data_list[1][step][None],
gt_traj=expert_exec_traj) # generate the traj latent
else:
recons_traj = agent.select_traj(None,
expert_data_list[0][step][None],
None,
vis=False,
remain_timestep=cfg.RL_MAX_STEP,
curr_joint=expert_data_list[1][step][None],
gt_traj=expert_exec_traj) # generate the traj latent
return expert_exec_traj, recons_traj, expert_exec_traj[3].detach().cpu().numpy()
def get_gaddpg(path=None, load_joint_trained_model=False):
""" get pretrained GA-DDPG Models """
from core.ddpg import DDPG
gaddpg_dict = edict()
gaddpg_dict = edict(yaml.load(open("output/demo_model/config.yaml", "r")))
net_dict = make_nets_opts_schedulers(gaddpg_dict.RL_MODEL_SPEC, gaddpg_dict.RL_TRAIN)
gaddpg = DDPG(512, PandaTaskSpace6D(), gaddpg_dict.RL_TRAIN)
gaddpg.setup_feature_extractor(net_dict, True)
gaddpg.load_model('output/demo_model')
gaddpg.set_mode(True)
return gaddpg
def proj_point_img(img, K, offset_pose, points=None, color=(255, 0, 0),
vis=False, neg_y=True, traj=None, joint_output=False, last_joint=None,
remain_timestep=-1, gt_goal=None, traj_offset_pose=None, extra_text=None,
model_name=None, vis_traj_gradient_color=False):
# draw traj lines, goal / actions predictions and texts in image plane
target_mask = get_target_mask(points)
obs_mask = get_obs_mask(points)
robot_mask = get_robot_mask(points)
colors = [[0, 255, 0], [255, 0, 0], [0, 0, 255]]
img = img.copy()
# point first
for i, mask in enumerate([target_mask, obs_mask, robot_mask]):
points_i = points[:, mask]
points_xyz = points_i[:3]
xyz_points = offset_pose[:3, :3].dot(points_xyz) + offset_pose[:3, [3]]
if neg_y: xyz_points[:2] *= -1
x, y, valid_idx_mask = valid_3d_to_2d(K, xyz_points, img)
img[y[valid_idx_mask], x[valid_idx_mask]] = colors[i]
if traj_offset_pose is None: traj_offset_pose = offset_pose
if traj is not None and traj[0] is not None:
if (remain_timestep == -1 or len(traj) == remain_timestep) and type(traj) is not list:
line_colors = [[255, 255, 0], [0, 255, 255]]
traj_lines = gripper_traj_lines(traj_offset_pose, traj, joint_output)
else:
if type(traj) is list:
traj_num = len(traj)
remain_timestep = len(traj[0])
traj[0] = traj[0][:,:7]
traj = np.concatenate(traj, axis=0)
else:
traj_num = int(len(traj) / remain_timestep)
remain_timestep = int(remain_timestep)
traj_lines = []
hues = np.linspace(0., 5./6, traj_num )
colors = np.stack([colorsys.hsv_to_rgb(hue, 1.0, 1.0) for hue in hues]) * 255
line_colors = np.repeat(colors, 2, axis=0)
for i in range(traj_num):
traj_lines.extend(gripper_traj_lines(traj_offset_pose, traj[i*remain_timestep:(i+1)*remain_timestep ] ))
for line_i, lines in enumerate(traj_lines):
lines = np.array(lines)
if len(lines) == 0: continue
if neg_y: lines[:, :2] *= -1
p_xyz = np.matmul(K, lines)
x, y = (p_xyz[:,0] / p_xyz[:,2]).astype(np.int), (p_xyz[:,1] / p_xyz[:,2]).astype(np.int)
x = np.clip(x, 0, img.shape[0] - 1)
y = np.clip(y, 0, img.shape[1] - 1)
for i in range(x.shape[1]):
# avoid clipping issues
color = line_colors[line_i]
color = (int(color[0]), int(color[1]), int(color[2]))
if np.abs(x[0, i] - x[1, i]) > 100 or np.abs(y[0, i] - y[1, i]) > 100:
continue
if line_i == 1 and len(traj_lines) > 4 and not vis_traj_gradient_color:
cv2.line(img, (x[0, i], y[0, i]), (x[1, i], y[1, i]), color, 2)
else:
cv2.line(img, (x[0, i], y[0, i]), (x[1, i], y[1, i]), color, 1)
if extra_text is not None:
img = add_extra_text(img, extra_text, 1.5) # 0.7
return img
def draw_grasp_img(img, pose, K, offset_pose, color=(0, 0, 255), vis=False, neg=True ):
img_cpy = img.copy()
line_index = [[0, 1, 1, 2, 3], [1, 2, 3, 4, 5]]
hand_anchor_points = grasp_points_from_pose(pose, offset_pose)
if neg: hand_anchor_points[:2] *= -1
p_xyz = K.dot(hand_anchor_points)
x, y = (p_xyz[0] / p_xyz[2]).astype(np.int), (p_xyz[1] / p_xyz[2]).astype(np.int)
x = np.clip(x, 0, img.shape[0] - 1)
y = np.clip(y, 0, img.shape[1] - 1)
for i in range(len(line_index[0])):
pt1 = (x[line_index[0][i]], y[line_index[0][i]])
pt2 = (x[line_index[1][i]], y[line_index[1][i]])
cv2.line(img_cpy, pt1, pt2, color, 2)
return img_cpy
def remove_robot_pt(points):
return points[..., :-500]
def reparameterize(mu, logsigma, truncated=True, fix_eps=None):
std = torch.exp(logsigma)
if truncated:
eps = sample_gaussian(std.shape, truncate_std=2.).cuda()
else:
eps = torch.randn_like(std)
if fix_eps is not None:
eps = fix_eps
return mu + eps * std
def has_check(x, prop):
return hasattr(x, prop) and getattr(x, prop)
def check_scene(env, state=None, start_rot=None, object_performance=None, planner=None,
scene_name=None, run_iter=0, check_ik=False, CONFIG=None, load_test_scene=False):
"""
check if a scene is valid by its distance, view, hand direction, target object state, and object counts
"""
if load_test_scene : return name_check(env, object_performance, run_iter)
MAX_TEST_PER_OBJ = CONFIG.max_test_per_obj
pose_flag = pose_check(env, state, start_rot, CONFIG)
name_flag = name_check(env, object_performance, run_iter, MAX_TEST_PER_OBJ)
collision_flag = not env.collided
check_flag = pose_flag and name_flag and collision_flag
if check_flag and check_ik:
goal_validity = planner.expert_plan(return_success=True, check_scene=True)
if not goal_validity:
return False
return check_flag
def sample_scene(env, planner, object_performance=None, scene_file=None, run_iter=0, CONFIG=None, timeout=6.):
"""
sample scenes with ik filtering
"""
state = None
MAX_TEST_PER_OBJ = CONFIG.max_test_per_obj
start_time = time.time()
outer_cnt, inner_cnt = CONFIG.scene_sample_check_ik_cnt, CONFIG.scene_sample_inner_cnt
if CONFIG.index_file == 'filter_shapenet.json':
inner_cnt *= 3
for _ in range(outer_cnt):
for _ in range(inner_cnt):
if time.time() - start_time > timeout:
return state, False
flag = not test_cnt_check(env, object_performance, run_iter, MAX_TEST_PER_OBJ)
if flag: break
state = env.reset( scene_file=None, init_joints=rand_sample_joint(env, None, CONFIG.ENV_NEAR, CONFIG.ENV_FAR),
reset_free=True, enforce_face_target=True )
cur_ef_pose = env._get_ef_pose(mat=True)
flag = check_scene(env, state, cur_ef_pose[:3, :3], object_performance,
planner, scene_file, run_iter, False, CONFIG)
if flag: break
if flag and check_scene(env, state, cur_ef_pose[:3, :3], object_performance,
planner, scene_file, run_iter, True, CONFIG):
break
return state, flag
def select_target_point(state, target_pt_num=1024):
"""get target point cloud from scene point cloud """
point_state = state[0][0]
target_mask = get_target_mask(point_state)
point_state = point_state[:4, target_mask]
gripper_pc = point_state[:4, :6]
point_num = min(point_state.shape[1], target_pt_num)
obj_pc = regularize_pc_point_count(point_state.T, point_num, False).T
point_state = np.concatenate((gripper_pc, obj_pc), axis=1)
return [(point_state, state[0][1])] + state[1:]
def gaddpg_action(gaddpg, state, action, episode_steps, max_steps, curr_joint, return_goal=False):
"""apply GA-DDPG action """
state = select_target_point(state)
if state[0][0].shape[1] > 0:
gaddpg_remain_step = max(min(max_steps-episode_steps + 1, 30), 1)
print('use gaddpg remaining step: {}...'.format(gaddpg_remain_step))
action, _, _, aux_pred = gaddpg.select_action(state, remain_timestep=gaddpg_remain_step, curr_joint=curr_joint)
if return_goal:
return action, aux_pred
return action
return np.zeros(6), np.ones(7) * 0.01
class PandaTaskSpace6D():
def __init__(self):
self.high = np.array([0.1, 0.1, 0.1, np.pi/6, np.pi/6, np.pi/6])
self.low = np.array([-0.1, -0.1, -0.1, -np.pi/6, -np.pi/6, -np.pi/6])
self.shape = [6]
self.bounds = np.vstack([self.low, self.high])
class RobotMLP(nn.Module):
""" simple Pointnet-Like MLP """
def __init__(self, in_channels, out_channels, dim=1, gn=False, gn_num=16):
super(RobotMLP, self).__init__()
if dim == 1:
conv = nn.Conv1d
if gn:
bn = lambda k: nn.GroupNorm(gn_num, k)
else:
bn = nn.BatchNorm1d
else:
raise ValueError
if not isinstance(out_channels, (list, tuple)):
out_channels = [out_channels]
layers = []
for oc in out_channels:
layers.extend([
conv(in_channels, oc, 1),
bn(oc),
nn.ReLU(True),
])
in_channels = oc
self.layers = nn.Sequential(*layers)
def forward(self, inputs, masks=None):
inp_shape = inputs.shape
inputs = inputs.view(-1, inputs.shape[-2], inputs.shape[-1])
x = self.layers(inputs)
x = torch.max(x, 2, keepdim=True)[0]
x = x.view(len(x), -1)
return x
def clean_dir(dst_dir):
if os.path.exists(dst_dir):
os.system('rm -rf {}/*'.format(dst_dir))
def get_usage_and_success():
"""Get gpu and memory usages as well as current performance """
GPUs = GPUtil.getGPUs()
memory_usage = psutil.virtual_memory().percent
gpu_usage = max([GPU.memoryUsed for GPU in GPUs])
return memory_usage, gpu_usage
def get_model_path(output_dir, name, env_name, surfix):
actor_path = "{}/{}_actor_{}_{}".format(
output_dir, name, env_name, surfix )
critic_path = "{}/{}_critic_{}_{}".format(
output_dir, name, env_name, surfix )
traj_feat_path = "{}/{}_traj_feat_{}_{}".format(
output_dir, name, env_name, surfix )
traj_sampler_path = "{}/{}_traj_sampler_{}_{}".format(
output_dir, name, env_name, surfix )
state_feat_path = "{}/{}_state_feat_{}_{}".format(
output_dir, name, env_name, surfix )
return actor_path, critic_path, traj_feat_path, traj_sampler_path, state_feat_path
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def get_loss_info_dict():
return { 'bc_loss': deque([0], maxlen=50),
'policy_grasp_aux_loss': deque([0], maxlen=50),
'critic_gaddpg_loss': deque([0], maxlen=100),
'critic_loss': deque([0], maxlen=100),
'kl_loss': deque([0], maxlen=50),
'sampler_grasp_aux_loss': deque([0], maxlen=50),
'sampler_bc_loss': deque([0], maxlen=50),
'traj_latent_loss': deque([0], maxlen=50),
'gaddpg_loss': deque([0], maxlen=50),
'reward_mask_num': deque([0], maxlen=5),
'expert_reward_mask_num': deque([0], maxlen=5),
'value_mean': deque([0], maxlen=5),
'return_mean': deque([0], maxlen=5),
'gaddpg_pred_mean': deque([0], maxlen=5),
'traj_grad': deque([0], maxlen=5),
'traj_param': deque([0], maxlen=5),
'policy_param': deque([0], maxlen=5),
'sampler_mean': deque([0], maxlen=5),
'traj_num': deque([0], maxlen=5),
'sampler_logsigma': deque([0], maxlen=5),
'policy_grad': deque([0], maxlen=5),
'feat_grad': deque([0], maxlen=5),
'feat_param': deque([0], maxlen=5),
'val_feat_grad': deque([0], maxlen=5),
'val_feat_param': deque([0], maxlen=5),
'critic_grad': deque([0], maxlen=5),
'critic_param': deque([0], maxlen=5),
'train_batch_size': deque([0], maxlen=5),
}
def flip(x, dim):
indices = [slice(None)] * x.dim()
indices[dim] = torch.arange(x.size(dim) - 1, -1, -1, dtype=torch.long, device=x.device)
return x[tuple(indices)]
def create_bottleneck(input_size, latent_size):
logmu = nn.Linear(input_size, latent_size)
logvar = nn.Linear(input_size, latent_size)
return nn.ModuleList([logmu, logvar])
def get_policy_class(policy_net_name, args):
policy = getattr(core.networks, policy_net_name)(
args.num_inputs,
args.action_dim,
args.hidden_size,
args.action_space,
extra_pred_dim=args.extra_pred_dim,
config=args,
).to('cuda')
policy_optim = Adam(
policy.parameters(), lr=args.lr, eps=1e-5, weight_decay=1e-5 )
policy_scheduler = torch.optim.lr_scheduler.MultiStepLR(
policy_optim, milestones=args.policy_milestones, gamma=args.lr_gamma)
policy_target = getattr(core.networks, policy_net_name)(
args.num_inputs,
args.action_dim,
args.hidden_size,
args.action_space,
extra_pred_dim=args.extra_pred_dim,
config=args,
).to('cuda')
return policy, policy_optim, policy_scheduler, policy_target
def get_critic(args):
model = core.networks.ResidualQNetwork if has_check(args, 'dense_critic') else core.networks.QNetwork
critic = model(
args.critic_num_input,
args.critic_value_dim,
args.hidden_size,
extra_pred_dim=args.critic_extra_pred_dim,
).cuda()
critic_optim = Adam(
critic.parameters(), lr=args.value_lr, eps=1e-5, weight_decay=1e-5 )
critic_scheduler = torch.optim.lr_scheduler.MultiStepLR(
critic_optim,
milestones=args.value_milestones,
gamma=args.value_lr_gamma,
)
critic_target = model(
args.critic_num_input,
args.critic_value_dim,
args.hidden_size,
extra_pred_dim=args.critic_extra_pred_dim,
).cuda()
return critic, critic_optim, critic_scheduler, critic_target | liruiw/HCG | core/utils.py | utils.py | py | 48,148 | python | en | code | 13 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "pybullet_utils.bullet_client.BulletClient",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "pyb... |
6043631950 | from .dbtest import (
DbTest,
dbconnect
)
import os
from psycopg2.extras import (
RealDictCursor,
RealDictRow
)
PATH_TO_SQL_DIR = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
"..",
"sql"
)
)
class TestExample(DbTest):
@dbconnect
def test_select_organizations(self, conn):
self.load_fixtures(
conn,
os.path.join(PATH_TO_SQL_DIR, "organizations.sql")
)
sql = """
SELECT * FROM organizations;
"""
with conn.cursor(cursor_factory=RealDictCursor) as cur:
cur.execute(sql)
organizations = cur.fetchall()
assert len(organizations) == 7
@dbconnect
def test_count_the_number_of_subordinates(self, conn):
self.load_fixtures(
conn,
os.path.join(PATH_TO_SQL_DIR, "organizations.sql")
)
sql = """
SELECT COUNT(enterprise_sales_enterprise_customers.sales_organization_id) as subordinates_count, organizations."id" from organizations
LEFT JOIN enterprise_sales_enterprise_customers ON organizations.id=enterprise_sales_enterprise_customers.sales_organization_id
GROUP BY enterprise_sales_enterprise_customers.sales_organization_id, organizations."id" ORDER BY organizations."id";
"""
with conn.cursor(cursor_factory=RealDictCursor) as cur:
cur.execute(sql)
actual = cur.fetchall()
assert len(actual) == 7
assert actual == [
RealDictRow(**{
"subordinates_count": 0,
"id": 1,
})
, RealDictRow(**{
"subordinates_count": 4,
"id": 2,
})
, RealDictRow(**{
"subordinates_count": 0,
"id": 3,
})
, RealDictRow(**{
"subordinates_count": 0,
"id": 4,
})
, RealDictRow(**{
"subordinates_count": 0,
"id": 5,
})
, RealDictRow(**{
"subordinates_count": 1,
"id": 6,
})
, RealDictRow(**{
"subordinates_count": 0,
"id": 7,
})
]
@dbconnect
def test_calculate_center_of_each_segment(self, conn):
self.load_fixtures(
conn,
os.path.join(PATH_TO_SQL_DIR, "japan_segments.sql")
)
sql = """
SELECT sub_query.id, ST_X(sub_query.bounds_center) as longitude, ST_Y(sub_query.bounds_center) as latitude
FROM (SELECT japan_segments.id as id, st_centroid(bounds) as bounds_center FROM japan_segments) as sub_query;
"""
with conn.cursor(cursor_factory=RealDictCursor) as cur:
cur.execute(sql)
actual = cur.fetchall()
assert len(actual) == 10
assert actual == [
RealDictRow(**{
"id": "KAGOSHIMA_1",
"longitude": 130.642228315775,
"latitude": 30.7045454545455,
})
, RealDictRow(**{
"id": "KAGOSHIMA_2",
"longitude": 130.694183864916,
"latitude": 30.7045454545455,
})
, RealDictRow(**{
"id": "KAGOSHIMA_3",
"longitude": 130.746139414057,
"latitude": 30.7045454545455,
})
, RealDictRow(**{
"id": "KAGOSHIMA_4",
"longitude": 129.707028431231,
"latitude": 30.75,
})
, RealDictRow(**{
"id": "KAGOSHIMA_5",
"longitude": 129.758983980373,
"latitude": 30.75,
})
, RealDictRow(**{
"id": "KAGOSHIMA_6",
"longitude": 129.810939529514,
"latitude": 30.75,
})
, RealDictRow(**{
"id": "KAGOSHIMA_7",
"longitude": 129.862895078655,
"latitude": 30.75,
})
, RealDictRow(**{
"id": "KAGOSHIMA_8",
"longitude": 129.914850627797,
"latitude": 30.75,
})
, RealDictRow(**{
"id": "KAGOSHIMA_9",
"longitude": 129.966806176937,
"latitude": 30.75,
})
, RealDictRow(**{
"id": "KAGOSHIMA_10",
"longitude": 130.018761726079,
"latitude": 30.75,
})
]
@dbconnect
def test_segments_using_geojson_boundary(self, conn):
self.load_fixtures(
conn,
os.path.join(PATH_TO_SQL_DIR, "japan_segments.sql")
)
sql = """
SELECT sub.id from (SELECT * from japan_segments, (SELECT ST_GeomFromEWKT('SRID=4326;POLYGON((130.27313232421875 30.519681272749402,131.02020263671875 30.519681272749402,
131.02020263671875 30.80909017893796,130.27313232421875 30.80909017893796,130.27313232421875 30.519681272749402))') as boundary) as sub_query) as sub where ST_Contains(sub.boundary, sub.bounds)
"""
with conn.cursor(cursor_factory=RealDictCursor) as cur:
cur.execute(sql)
actual = cur.fetchall()
print(actual)
assert len(actual) == 3
assert actual == [
RealDictRow(**{
"id": "KAGOSHIMA_1",
})
, RealDictRow(**{
"id": "KAGOSHIMA_2",
})
, RealDictRow(**{
"id": "KAGOSHIMA_3",
})
]
| HaithamKhedrSalem/postgis-practices-solution | test/test_example.py | test_example.py | py | 6,115 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.abspath",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number"... |
14183098505 | import pygame
import config
import math
from unit import Unit
from unit_move import UnitMove
class Enemy(Unit):
def __init__(self, x, y):
super().__init__(x, y)
self.start_position = [x, y]
self.time_till_damage = 0
self.look = [] # [center_x, center_y, radius]
self.score = 0
def move_to_position(self, move_to_position):
position = self.get_position()
# Find direction vector (dx, dy) between enemy and player.
dx, dy = move_to_position[0] - position[0], move_to_position[1] - position[1]
dist = math.hypot(dx, dy)
if dx < 0:
self.set_direction(UnitMove.LEFT)
else:
self.set_direction(UnitMove.RIGHT)
if dist > 1:
dx, dy = dx / dist, dy / dist # Normalize.
# Move along this normalized vector towards the player at current speed.
position[0] += dx * config.BAT_VELOCITY
position[1] += dy * config.BAT_VELOCITY
self.set_position(position[0], position[1])
def contains_look(self, player):
player_corners = player.get_hitbox_corners()
enemy_look = self.get_look()
player_center = [player_corners[0][0] + (player_corners[1][0] - player_corners[0][0])/2,
player_corners[0][1] + (player_corners[3][1] - player_corners[0][1])/2]
dx = enemy_look[0] - player_center[0]
dy = enemy_look[1] - player_center[1]
if dx * dx + dy * dy <= enemy_look[2] * enemy_look[2]:
return True
return False
def contains(self, player):
player_hitbox = player.get_hitbox()
enemy_hitbox = self.get_hitbox()
time_till_damage = self.get_time_till_damage()
if time_till_damage == 0:
# Check player up left corner is in enemy hitbox
if enemy_hitbox[0] <= player_hitbox[0] <= enemy_hitbox[0] + enemy_hitbox[2]:
if enemy_hitbox[1] <= player_hitbox[1] <= enemy_hitbox[1] + enemy_hitbox[3]:
self.set_time_till_damage(time_till_damage + 1)
return True
# Check player up right corner is in enemy hitbox
if enemy_hitbox[0] <= player_hitbox[0] <= enemy_hitbox[0] + enemy_hitbox[2]:
if enemy_hitbox[1] <= player_hitbox[1] + player_hitbox[3] <= enemy_hitbox[1] + enemy_hitbox[3]:
self.set_time_till_damage(time_till_damage + 1)
return True
# Check player down right corner is in enemy hitbox
if enemy_hitbox[0] <= player_hitbox[0] + player_hitbox[2] <= enemy_hitbox[0] + enemy_hitbox[2]:
if enemy_hitbox[1] <= player_hitbox[1] + player_hitbox[3] <= enemy_hitbox[1] + enemy_hitbox[3]:
self.set_time_till_damage(time_till_damage + 1)
return True
# Check player down left corner is in enemy hitbox
if enemy_hitbox[0] <= player_hitbox[0] + player_hitbox[2] <= enemy_hitbox[0] + enemy_hitbox[2]:
if enemy_hitbox[1] <= player_hitbox[1] <= enemy_hitbox[1] + enemy_hitbox[3]:
self.set_time_till_damage(time_till_damage + 1)
return True
else:
time_till_damage += 1
if time_till_damage < config.TIME_TILL_DAMAGE:
self.set_time_till_damage(time_till_damage + 1)
else:
self.set_time_till_damage(0)
return False
def render_look(self, screen, camera):
look = self.get_look()
pygame.draw.circle(screen, (0, 0, 255), [look[0] - camera[0], look[1] - camera[1]], look[2], 1)
def set_look(self, hitbox):
self.look = [hitbox[0] + hitbox[2] / 2, hitbox[1] + hitbox[3] / 2, config.RADIUS]
def get_look(self):
return self.look
def get_start_position(self):
return self.start_position
def set_time_till_damage(self, time_till_damage):
self.time_till_damage = time_till_damage
def get_time_till_damage(self):
return self.time_till_damage
def move_directrion(self, dx, dy):
pass
def get_score(self):
return self.score
| EdySima/The-Lost-Penguin | enemy.py | enemy.py | py | 4,317 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "unit.Unit",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "math.hypot",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "unit_move.UnitMove.LEFT",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "unit_move.UnitMove",... |
17354358336 | import typing as t
import numpy as np
from emo_utils import convert_to_one_hot
from emo_utils import predict
from emo_utils import softmax
from tensorflow.keras.layers import LSTM
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Embedding
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
def sentence_to_avg(
sentence: str,
word_to_vec_map: dict[str, t.Any],
) -> np.ndarray:
"""
Converts a sentence (string) into a list of words (strings). Extracts the GloVe
representation of each word and averages its value into a single vector encoding
the meaning of the sentence.
Arguments:
sentence -- string, one training example from X
word_to_vec_map -- dictionary mapping every word in a vocabulary into its
50-dimensional vector representation
Returns:
avg -- average vector encoding information about the sentence, numpy-array of
shape (J,), where J can be any number
"""
words = [w.lower() for w in sentence.split()]
any_word = list(word_to_vec_map.keys())[0]
avg = np.zeros(word_to_vec_map[any_word].shape[0])
count = 0
for w in words:
if w in word_to_vec_map:
avg += word_to_vec_map[w]
count += 1
if count > 0:
avg = avg / count
return avg
def model(
X,
Y,
word_to_vec_map,
learning_rate=0.01,
num_iterations=400,
):
"""
Model to train word vector representations in numpy.
Arguments:
X -- input data, numpy array of sentences as strings, of shape (m,)
Y -- labels, numpy array of integers between 0 and 7, numpy-array of shape (m, 1)
word_to_vec_map -- dictionary mapping every word in a vocabulary into its
50-dimensional vector representation
learning_rate -- learning_rate for the stochastic gradient descent algorithm
num_iterations -- number of iterations
Returns:
pred -- vector of predictions, numpy-array of shape (m, 1)
W -- weight matrix of the softmax layer, of shape (n_y, n_h)
b -- bias of the softmax layer, of shape (n_y,)
"""
# Get a valid word contained in the word_to_vec_map
any_word = list(word_to_vec_map.keys())[0]
# number of training examples
m = Y.shape[0]
# number of classes
n_y = len(np.unique(Y))
# dimensions of the GloVe vectors
n_h = word_to_vec_map[any_word].shape[0]
# Initialize parameters using Xavier initialization
W = np.random.randn(n_y, n_h) / np.sqrt(n_h)
b = np.zeros((n_y,))
# Convert Y to Y_one_hot with n_y classes
Y_oh = convert_to_one_hot(Y, C=n_y)
# Optimization loop
for t in range(num_iterations):
cost = 0
dW = 0
db = 0
# Loop over the training examples
for i in range(m):
# Average the word vectors of the words from the i'th training example
avg = sentence_to_avg(X[i], word_to_vec_map)
# Forward propagate the avg through the softmax layer.
z = W @ avg + b
a = softmax(z)
# Add the cost using the i'th training label's one hot representation and
# "A" (the output of the softmax)
cost += -np.sum(Y_oh[i] * np.log(a))
# Compute gradients
dz = a - Y_oh[i]
dW += np.dot(dz.reshape(n_y, 1), avg.reshape(1, n_h))
db += dz
# Update parameters with Stochastic Gradient Descent
W = W - learning_rate * dW
b = b - learning_rate * db
assert type(cost) == np.float64, "Incorrect implementation of cost"
assert cost.shape == (), "Incorrect implementation of cost"
if t % 100 == 0:
print("Epoch: " + str(t) + " --- cost = " + str(cost))
pred = predict(X, Y, W, b, word_to_vec_map)
return pred, W, b
def sentences_to_indices(
X,
word_to_index,
max_len,
):
"""
Converts an array of sentences (strings) into an array of indices corresponding to
words in the sentences. The output shape should be such that it can be given to
`Embedding()` (described in Figure 4).
Arguments:
X -- array of sentences (strings), of shape (m,)
word_to_index -- a dictionary containing the each word mapped to its index
max_len -- maximum number of words in a sentence. You can assume every sentence in
X is no longer than this.
Returns:
X_indices -- array of indices corresponding to words in the sentences from X, of
shape (m, max_len)
"""
# number of training examples
m = X.shape[0]
X_indices = np.zeros((m, max_len))
for i in range(m):
sentence_words = [w.lower() for w in X[i].split()]
j = 0
for w in sentence_words:
if w in word_to_index:
X_indices[i, j] = word_to_index[w]
j += 1
return X_indices
def pretrained_embedding_layer(
word_to_vec_map,
word_to_index,
):
"""
Creates a Keras Embedding() layer and loads in pre-trained GloVe 50-dimensional
vectors.
Arguments:
word_to_vec_map -- dictionary mapping words to their GloVe vector representation.
word_to_index -- dictionary mapping from words to their indices in
the vocabulary (400,001 words)
Returns:
embedding_layer -- pretrained layer Keras instance
"""
# adding 1 to fit Keras embedding (requirement)
vocab_size = len(word_to_index) + 1
any_word = list(word_to_vec_map.keys())[0]
# define dimensionality of your GloVe word vectors (= 50)
emb_dim = word_to_vec_map[any_word].shape[0]
# Initialize the embedding matrix as a numpy array of zeros.
emb_matrix = np.zeros((vocab_size, emb_dim))
# Set each row "idx" of the embedding matrix to be
# the word vector representation of the idx'th word of the vocabulary
for word, idx in word_to_index.items():
emb_matrix[idx, :] = word_to_vec_map[word]
# Define Keras embedding layer with the correct input and output sizes
# Make it non-trainable.
embedding_layer = Embedding(
input_dim=vocab_size,
output_dim=emb_dim,
trainable=False,
)
embedding_layer.build((None,))
# Set the weights of the embedding layer to the embedding matrix.
embedding_layer.set_weights([emb_matrix])
return embedding_layer
def Emojify_V2(
input_shape,
word_to_vec_map,
word_to_index,
):
"""
Function creating the Emojify-v2 model's graph.
Arguments:
input_shape -- shape of the input, usually (max_len,)
word_to_vec_map -- dictionary mapping every word in a vocabulary into its
50-dimensional vector representation
word_to_index -- dictionary mapping from words to their indices in the
vocabulary (400,001 words)
Returns:
model -- a model instance in Keras
"""
### START CODE HERE ###
# Define sentence_indices as the input of the graph.
# It should be of shape input_shape and dtype 'int32'
# (as it contains indices, which are integers).
sentence_indices = Input(shape=input_shape, dtype="int32")
# Create the embedding layer pretrained with GloVe Vectors (≈1 line)
embedding_layer = pretrained_embedding_layer(word_to_vec_map, word_to_index)
# Propagate sentence_indices through your embedding layer
# (See additional hints in the instructions).
embeddings = embedding_layer(sentence_indices)
# Propagate the embeddings through an LSTM layer with 128-dimensional hidden state
# The returned output should be a batch of sequences.
X = LSTM(units=128, return_sequences=True)(embeddings)
# Add dropout with a probability of 0.5
X = Dropout(rate=0.5)(X)
# Propagate X trough another LSTM layer with 128-dimensional hidden state
# The returned output should be a single hidden state, not a batch of sequences.
X = LSTM(units=128, return_sequences=False)(X)
# Add dropout with a probability of 0.5
X = Dropout(rate=0.5)(X)
# Propagate X through a Dense layer with 5 units
X = Dense(units=5)(X)
# Add a softmax activation
X = Activation("softmax")(X)
# Create Model instance which converts sentence_indices into X.
model = Model(inputs=sentence_indices, outputs=X)
return model
| HarryMWinters/ML_Coursework | Course 6, Sequence Models/Week 2/assignment_2/Emoji_v3a.py | Emoji_v3a.py | py | 8,499 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.Any",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "numpy.unique",
"li... |
9910221737 | import subprocess
import threading
import io
from fcntl import fcntl, F_GETFL, F_SETFL
from os import O_NONBLOCK
import sys
#from flask_socketio import SocketIO
command = "pintos -v -k --qemu --disk cs162proj.dsk -- -q run shell"
class Shell():
def set_flags(self, pipe):
flags = fcntl(pipe, F_GETFL)
fcntl(pipe, F_SETFL, flags | O_NONBLOCK)
def __init__(self, app, command):
self.app = app
self.cmd = command
self.p = None
self.output_thread = threading.Thread()
def run(self):
if self.p == None:
self.p = subprocess.Popen(self.cmd.split(' '),
cwd="./os_build",
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
)
self.set_flags(self.p.stdout)
self.set_flags(self.p.stdin)
def output(self):
buf = io.StringIO()
while True:
data = self.p.stdout.read(1)
if data:
buf.write(data.decode('utf-8'))
else:
if (buf.getvalue() != ""):
self.app.emit('send_output', buf.getvalue())
buf = io.StringIO()
def input(self, command):
self.p.stdin.write(command.encode())
self.p.stdin.flush()
shell = Shell(None, command)
| dietd/webpintos | shell.py | shell.py | py | 1,345 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "fcntl.fcntl",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "fcntl.F_GETFL",
"line_number": 13,
"usage_type": "argument"
},
{
"api_name": "fcntl.fcntl",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "fcntl.F_SETFL",
"line_n... |
38488846369 | #!/usr/bin/env python3
#
# 10. Bayesian History Matching technique (advanced use)
#
import os
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import json
from GPErks.constants import DEFAULT_TMP_OUTFILE_DIR
from GPErks.perks.history_matching import Wave
from GPErks.serialization.labels import read_labels_from_file
from GPErks.serialization.path import posix_path
from GPErks.utils.array import get_minmax
from GPErks.utils.plotting import interp_col, get_col
from GPErks.utils.sampling import Sampler
from gpytorch.kernels import MaternKernel, ScaleKernel
from gpytorch.likelihoods import GaussianLikelihood
from torchmetrics import MeanSquaredError, R2Score
from GPErks.gp.data.dataset import Dataset
from GPErks.gp.experiment import GPExperiment
from GPErks.gp.mean import LinearMean
from GPErks.log.logger import get_logger
from GPErks.train.emulator import GPEmulator
from GPErks.utils.random import set_seed
def main():
# Set logger and enforce reproducibility
log = get_logger()
seed = 8
set_seed(seed)
# Load experimental values (mean and variance) you aim to match
exp_data_file = posix_path(os.getcwd(), "data", "example_10", "expdata.json")
expdata = {}
with open(exp_data_file, "r") as f:
expdata = json.load(f)
exp_mean = [val["mean"] for val in expdata.values()]
exp_var = [val["var"] for val in expdata.values()]
# Load input parameters and output features' names
dataset_dir = Path(posix_path(os.getcwd(), "datasets", "stefano", "8p", "sham"))
xlabels = read_labels_from_file(dataset_dir / "xlabels.txt")
ylabels = read_labels_from_file(dataset_dir / "ylabels.txt")
feature_idx = {key: val for val, key in enumerate(ylabels)}
active_features = list(expdata.keys())
active_indices = [feature_idx[f] for f in active_features]
# Train list of univariate emulators (one for each feature to match)
X = np.loadtxt(dataset_dir / "X.txt", dtype=float)
Y = np.loadtxt(dataset_dir / "Y.txt", dtype=float)
emulators = []
for idx, feature in zip(active_indices, active_features):
y = Y[:, idx]
dataset = Dataset(X, y, x_labels=xlabels, y_label=feature)
likelihood = GaussianLikelihood()
mean = LinearMean(degree=1, input_size=dataset.input_size, bias=True)
covar = ScaleKernel(MaternKernel(ard_num_dims=dataset.input_size))
metrics = [MeanSquaredError(), R2Score()]
experiment = GPExperiment(
dataset,
likelihood,
mean,
covar,
metrics=metrics,
seed=seed
)
device = "cpu"
emulator = GPEmulator(experiment, device)
emulator.train_auto()
emulators.append(emulator)
minmax = get_minmax(X)
waveno = 1 # number of iteration we are at (wave id if you want)
cutoff = 3.0 # threshold value for the implausibility criterion
maxno = 1 # explained below
# The univariate GPE of each output feature will give for each point x_i a specific implausibility measure.
# With the current implausibility criterion, for each x_i we take the maximum implausibility across all the output
# features. With maxno=1, the maximum is calculated across all the output features (i.e., till the last worse
# implausibility measure). If maxno=2 --> till the previous-to-last worse implausibility measure and so on.
# With this criterion, the worse-performing emulator (the output feature which is the least well captured) will
# dominate the entire analysis and thus determine if a point is non-implausible or implausible
w = Wave(
emulator=emulators,
Itrain=minmax,
cutoff=cutoff,
maxno=maxno,
mean=exp_mean,
var=exp_var,
) # instantiate the wave object
sampler = Sampler(design="lhs", dim=X.shape[1], seed=seed)
n_samples = 100000
X = sampler.sample(
n_samples,
l_bounds=list(minmax[:, 0]),
u_bounds=list(minmax[:, 1]),
)
# Run one iteration of HM, which is: apply the implausibility criterion to detect regions of non-implausible
# and of implausible points starting from the initial samples in X
w.find_regions(X)
w.print_stats() # show statistics about the two obtained spaces
w.plot_wave(xlabels=xlabels, display="impl") # plot the current wave of history matching (impl. measure plot)
w.plot_wave(xlabels=xlabels, display="var") # we can also check the accuracy of the GPEs for the current wave
# note: if filepath=<path_to_file> flag is provided, the plot will be saved to <path_to_file>
# How to continue on the next wave in 5 steps
#
# (0) Save an exact copy of the wave. We always recommend saving each wave right on completion before manipulating
# its internal structure as you might need it later for other purposes (see Appendix - A2)
outfiles_dir = Path(DEFAULT_TMP_OUTFILE_DIR)
outfiles_dir.mkdir(parents=True, exist_ok=True)
w0 = w.copy()
w0.print_stats()
w0.save(outfiles_dir / f"wave_{waveno}.json")
# (1) From the current non-implausible region, select points to be simulated and points to be used as tests
# for the next wave
n_tests = 10000 # number of TEST points we want for the next wave
n_simuls = 128 # number of current NIMP points we want to simulate to augment training dataset for the next wave
n_avail_nimps = len(w0.nimp_idx) # we currently have available only this number of NIMP points
if n_tests + n_simuls > n_avail_nimps: # if they are not enough
n_total_points = n_tests + n_simuls
w.augment_nimp(n_total_points) # use 'cloud technique' to generate new NIMP points starting from existing ones
# Get the requested datasets
X_simul, X_test = w.get_nimps(n_simuls)
# We now have all the necessary data to run the next wave: a dataset to simulate to augment the training dataset
# and build new emulators, and new TEST points to be evaluated with the new emulators. Saving the data to files.
np.savetxt(outfiles_dir / f"X_simul_{waveno}.txt", X_simul, fmt="%.6f")
np.savetxt(outfiles_dir / f"X_test_{waveno}.txt", X_test, fmt="%.6f")
w.print_stats() # quick check on TESTS, IMP, and NIMP sets' sizes after augmentation
# (2) Simulate the selected points X_simul
# (3) Add the simulated points and respective results to the training dataset used in the previous wave
# (3) Train GPEs on the new, augmented training dataset
# (4) Start a new wave of HM, where the initial parameter space to be split into non-implausible and
# implausible regions is no more a Latin Hypercube design but is now the non-implausible region obtained
# (and augmented) in the previous wave (i.e., X_test)
# Appendix
#
# (A1) Visual check on the datasets generated for the next wave
X_nimp = w.NIMP
X_test = np.loadtxt(outfiles_dir / f"X_test_{waveno}.txt", dtype=float)
X_simul = np.loadtxt(outfiles_dir / f"X_simul_{waveno}.txt", dtype=float)
# We will inspect only 2 dimensions of the full 8D parameter space to keep it simple
param = [4, 5] # select 2 dimensions
subset_idx = list(np.random.randint(0, X_test.shape[0], size=10*X_simul.shape[0])) # select an example portion
colors = interp_col(get_col("blue"), 4) # getting some blue colour variants
# Plotting current wave NIMP + next wave TEST + next wave SIMUL
fig, axis = plt.subplots(1, 1)
axis.scatter(X_nimp[:, param[0]], X_nimp[:, param[1]], fc=colors[1], ec=colors[1], label=f"X_nimp of wave {waveno}")
axis.scatter(X_test[subset_idx, param[0]], X_test[subset_idx, param[1]], fc=colors[-1], ec=colors[-1], label=f"X_test for wave {waveno+1}")
axis.scatter(X_simul[:, param[0]], X_simul[:, param[1]], fc='r', ec='r', label=f"X_simul for wave {waveno+1}")
axis.set_xlabel(xlabels[param[0]], fontsize=12)
axis.set_ylabel(xlabels[param[1]], fontsize=12)
axis.legend()
fig.tight_layout()
plt.show() # TEST + SIMUL points for NEXT wave are all within NIMP space CURRENT wave
# (A2) Loading a wave object
# You can load a wave object by providing the same data used to instantiate the wave (emulator, Itrain, cutoff,
# maxno, mean, var). This is normally done when you need to re-run the wave differently. Alternatively, you can load
# the wave object with no arguments. This is normally done when you need to examine the wave internal structure.
# Let's try loading with no arguments.
w = Wave()
w.load(Path(outfiles_dir) / f"wave_{waveno}.json")
w.print_stats() # notice that TESTS, IMP, and NIMP sets' sizes are the same as pre-augmentation
# You can get a list of all wave object attributes by printing:
# print(w.__dict__.keys())
# Noteworthy attributes are:
# W.I: implausibility measure obtained for each point in the test set
# W.PV: percentage emulator variance over experimental variance at each point (given as a fraction)
# W.NIMP: non-implausible region
# W.nimp_idx: indices of the initial test set which resulted to be non-implausible
# W.IMP: implausible region
# W.imp_idx: indices of the initial test set which resulted to be implausible
# W.simul_idx: indices of W.NIMP that were selected to be simulated for the next wave
# W.nsimul_idx: indices of W.NIMP which were not selected for simulations
# (the respective points will appear in the test set of the next wave instead)
# The original test set is not stored as an attribute to save space. However, this information can still be
# retrieved from the stored attributes as:
X_test = w.reconstruct_tests()
print((np.equal(X_test, X)).all()) # the test set of first wave was the LHD we generated initially in this script
if __name__ == "__main__":
main()
| stelong/GPErks | examples/example_10.py | example_10.py | py | 9,883 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "GPErks.log.logger.get_logger",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "GPErks.utils.random.set_seed",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "GPErks.serialization.path.posix_path",
"line_number": 38,
"usage_type": "call"
... |
41191349576 | import sys
sys.path.append('../../preprocess')
from make_pca import load_landmarks
import numpy as np
import tensorflow as tf
from pfld import predict_landmarks as pfld_predict_landmarks
from pfld_custom import predict_landmarks as pfld_custom_predict_landmarks
from skimage.color import rgb2gray
import cv2
import dlib
from skimage.transform import resize
from prepare_data import IMAGE_SIZE, view_img, resize_lmks
from skimage.transform import resize
import matplotlib
from train_pfld import normalize_data
import os
from detector import get_face_detector
matplotlib.use("TkAgg")
# IMAGE_SIZE = 224
class Rect:
def __init__(self, t, b, l, r):
self.t = t
self.b = b
self.l = l
self.r = r
def top(self):
return self.t
def bottom(self):
return self.b
def right(self):
return self.r
def left(self):
return self.l
def predict(data, model_path, predict_fn, image_size=IMAGE_SIZE, depth_multiplier=1.0, **kwargs):
input_shape = [None, image_size, image_size, 3]
inputs = tf.placeholder(tf.float32, shape=input_shape, name='input_images')
preds, _, _ = predict_fn(inputs, image_size, is_training=False, depth_multiplier=depth_multiplier, **kwargs)
print('predict tensor = ', preds)
saver = tf.train.Saver()
# g = tf.get_default_graph()
# tf.contrib.quantize.create_eval_graph(input_graph=g)
with tf.Session() as sess:
saver.restore(sess, model_path)
# sess.run(tf.global_variables_initializer())
results = sess.run(preds, feed_dict={inputs: data})
print('landmarks = ', results)
# print('S1 > ')
return results
def predict_tflite(data, model_path):
interpreter = tf.lite.Interpreter(model_path=model_path)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
print('input_details ', input_details[0], ' data shape ', data.shape)
interpreter.set_tensor(input_details[0]['index'], data)
interpreter.invoke()
landmarks = interpreter.get_tensor(output_details[0]['index'])
print('landmarks = ', landmarks)
return landmarks
def crop(img, box):
return img[box.top(): box.bottom(), box.left(): box.right()]
def crop_landmarks(landmarks, box):
return landmarks - np.array([box.left(), box.top()])
def predict_single(img_path, model_path,
image_size=IMAGE_SIZE,
depth_multiplier=1.0,
predict_fn=pfld_predict_landmarks,
zero_mean=True,
box_detector='dlib',
**kwargs):
img_size = image_size
gt_landmark = None
if box_detector == 'gt':
points, imgs_sizes, imgs = load_landmarks('/home/tamvm/Downloads/ibug_300W_large_face_landmark_dataset/labels_ibug_300W_train.xml')
fn = os.path.basename(img_path)
gt_landmark = None
for idx, img in enumerate(imgs):
if img.endswith(fn):
gt_landmark = points[idx]
break
if gt_landmark is not None:
min_y, max_y = gt_landmark[:,1].min(), gt_landmark[:,1].max()
min_x, max_x = gt_landmark[:,0].min(), gt_landmark[:,0].max()
box = Rect(min_y, max_y, min_x, max_x)
# _, gt_landmark = crop_and_resize(, gt_landmark, image_size)
elif box_detector == 'tf':
detector = get_face_detector()
l, t, r, b = detector.detect(img_path)
box = Rect(t, b, l, r)
# get face bound
else:
img = dlib.load_rgb_image(img_path)
detector = dlib.get_frontal_face_detector()
box = detector(img, 1)[0]
oridata = cv2.imread(img_path)
# if image_size ==80:
# oridata = cv2.cvtColor(oridata,cv2.COLOR_BGR2RGB)
data = crop(oridata, box)
data = resize(data, (img_size, img_size), anti_aliasing=True, mode='reflect')
# view_img(data, None)
normalized_data = normalize_data(data)
if model_path.endswith('.tflite'):
# print('using tflite model ', model_path)
# is_unint8 = model_path.find('uint8') >= 0
# if is_unint8:
# print('int model')
# lmks = predict_tflite((np.reshape(data, (1, *data.shape)) * 255).astype(np.uint8), model_path)[0]
# else:
print('float model')
lmks = predict_tflite(np.reshape(normalized_data, (1, *normalized_data.shape)).astype(np.float32), model_path)[0]
else:
lmks = predict(np.reshape(normalized_data, (1, *normalized_data.shape)), model_path, predict_fn,
image_size=image_size,
depth_multiplier=depth_multiplier,
**kwargs)[0]
# print('landmark = ', lmks)
if zero_mean:
for i in range(0, 68):
lmks[i*2] = (lmks[i*2]/2+0.5)* image_size# (lmks[i*2]/2+0.5)*image_size
lmks[i*2+1] = (lmks[i*2+1]/2 + 0.5) * image_size# (lmks[i*2+1]/2 + 0.5)*image_size
else:
for i in range(0, 68):
lmks[i*2] = (lmks[i*2])* image_size# (lmks[i*2]/2+0.5)*image_size
lmks[i*2+1] = (lmks[i*2+1]) * image_size# (lmks[i*2+1]/2 + 0.5)*image_size
# print('landmarks after denorm', lmks)
lmks = lmks.reshape((68, 2))
view_img(data, lmks)
if __name__ == '__main__':
# 2960256451_1.jpg
# '/home/tamvm/Downloads/ibug_300W_large_face_landmark_dataset/helen/testset/30427236_1.jpg'
use_tflite = False
model = 'pfld-custom-80-025m-saux7-x3'
# model = 'ailab'
if model == 'pfld-64':
predict_single('/home/tamvm/Downloads/test_face_tamvm_2.jpg', #'/home/tamvm/Downloads/ibug_300W_large_face_landmark_dataset/helen/trainset/2960256451_1.jpg',
'../../data/checkpoints-pfld-64-05m/pfld-311400' if not use_tflite else '../../data/pfld-64-quant.tflite',
depth_multiplier=0.5,
image_size=64)
elif model == 'pfld-112':
predict_single('/home/tamvm/Downloads/test_face_tamvm_2.jpg', #'/home/tamvm/Downloads/ibug_300W_large_face_landmark_dataset/helen/trainset/2960256451_1.jpg',
'../../data/checkpoints-pfld-112/pfld-1983600' if not use_tflite else '../../data/pfld-112-quant.tflite',
# '../../data/pfld-64.tflite',
image_size=112)
elif model == 'pfld-80':
predict_single('/home/tamvm/Downloads/ibug_300W_large_face_landmark_dataset/helen/testset/3035796193_1_mirror.jpg', #'/home/tamvm/Downloads/ibug_300W_large_face_landmark_dataset/helen/trainset/2960256451_1.jpg',
'../../data/checkpoints-pfld-80-025m/pfld-449100',
# '../../data/pfld-64.tflite',
zero_mean=False,
depth_multiplier=0.25,
image_size=80)
elif model == 'pfld-custom-80':
predict_single('/home/tamvm/Downloads/test_face_tamvm_2.jpg', #'/home/tamvm/Downloads/ibug_300W_large_face_landmark_dataset/helen/trainset/2960256451_1.jpg',
'../../data/checkpoints-pfld-custom/pfld-183000',
predict_fn=pfld_custom_predict_landmarks,
# '../../data/pfld-64.tflite',
depth_multiplier=1,
zero_mean=True,
image_size=80)
elif model == 'pfld-custom-80-025m':
predict_single('/home/tamvm/Downloads/test_face_tamvm_2.jpg', #'/home/tamvm/Downloads/ibug_300W_large_face_landmark_dataset/helen/trainset/2960256451_1.jpg',
'../../data/checkpoints-pfld-custom-80-025m/pfld-314100',
predict_fn=pfld_custom_predict_landmarks,
# '../../data/pfld-64.tflite',
depth_multiplier=0.25,
zero_mean=True,
image_size=80)
elif model == 'pfld-custom-80-025m-aux7':
predict_single('/home/tamvm/Downloads/test_face_tamvm_2.jpg', #'/home/tamvm/Downloads/ibug_300W_large_face_landmark_dataset/helen/trainset/2960256451_1.jpg',
'../../data/checkpoints-pfld-custom-80-025m-aux7/pfld-376500',
predict_fn=pfld_custom_predict_landmarks,
# '../../data/pfld-64.tflite',
depth_multiplier=0.25,
zero_mean=True,
image_size=80,
aux_start_layer='layer_7')
elif model == 'pfld-custom-80-025m-aux7-x3':
predict_single( '/home/tamvm/Downloads/ibug_300W_large_face_landmark_dataset/helen/testset/3035796193_1_mirror.jpg', #'/home/tamvm/Downloads/ibug_300W_large_face_landmark_dataset/helen/trainset/2960256451_1.jpg',
'../../data/checkpoints-pfld-custom-80-025m-aux7-x3/pfld-220000',
predict_fn=pfld_custom_predict_landmarks,
# '../../data/pfld-64.tflite',
depth_multiplier=0.25,
zero_mean=True,
image_size=80,
fc_x_n=3,
box_detector='tf',
aux_start_layer='layer_7')
elif model == 'pfld-custom-80-025m-saux7-x3':
predict_single( '/home/tamvm/Downloads/ibug_300W_large_face_landmark_dataset/helen/testset/3035796193_1_mirror.jpg', #'/home/tamvm/Downloads/ibug_300W_large_face_landmark_dataset/helen/trainset/2960256451_1.jpg',
'../../data/checkpoints-pfld-custom-80-025m-saux7-x3/pfld-310500',
predict_fn=pfld_custom_predict_landmarks,
# '../../data/pfld-64.tflite',
depth_multiplier=0.25,
simple_aux=True,
zero_mean=True,
image_size=80,
fc_x_n=3,
box_detector='dlib',
aux_start_layer='layer_7')
elif model == 'pfld-custom-80-025m-aux7-x4-m3':
predict_single('/home/tamvm/Downloads/test_face_tamvm_2.jpg',# '/home/tamvm/Downloads/ibug_300W_large_face_landmark_dataset/helen/testset/3035796193_1_mirror.jpg', #'/home/tamvm/Downloads/ibug_300W_large_face_landmark_dataset/helen/trainset/2960256451_1.jpg',
'../../data/checkpoints-pfld-custom-80-025m-aux7-x4-m3/pfld-131500',
predict_fn=pfld_custom_predict_landmarks,
# '../../data/pfld-64.tflite',
depth_multiplier=0.25,
zero_mean=True,
image_size=80,
fc_x_n=4,
mid_conv_n=3,
box_detector='tf',
aux_start_layer='layer_7')
elif model == 'pfld-custom-80-025m-aux8':
predict_single('/home/tamvm/Downloads/test_face_tamvm_2.jpg', #'/home/tamvm/Downloads/ibug_300W_large_face_landmark_dataset/helen/trainset/2960256451_1.jpg',
'../../data/checkpoints-pfld-custom-80-025m-aux8/pfld-445500',
predict_fn=pfld_custom_predict_landmarks,
# '../../data/pfld-64.tflite',
depth_multiplier=0.25,
zero_mean=True,
image_size=80,
aux_start_layer='layer_8')
else:
use_tflite = True
predict_single('/home/tamvm/Downloads/ibug_300W_large_face_landmark_dataset/helen/testset/3035796193_1_mirror.jpg', #'/home/tamvm/Downloads/ibug_300W_large_face_landmark_dataset/helen/trainset/2960256451_1.jpg',
'../../data/landmark_80pose.tflite',
normalize_lmks=True,
# '../../data/pfld-64.tflite',
image_size=80)
| vuamitom/shapenet-tensorflow | model/pfld/eval_pfld.py | eval_pfld.py | py | 11,142 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.use",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "prepare_data.IMAGE_SIZE",
... |
38031979552 | import click
from .core import NmapReportParser, NmapReport, CSVFileParser, JsonOutput, BateaModel, MatrixOutput
from defusedxml import ElementTree
from xml.etree.ElementTree import ParseError
from batea import build_report
import warnings
warnings.filterwarnings('ignore')
@click.command(context_settings=dict(help_option_names=['-h', '--help']))
@click.option("-c", "--read-csv", type=click.File('r'), multiple=True)
@click.option("-x", "--read-xml", type=click.File('r'), multiple=True)
@click.option("-n", "--n-output", type=int, default=5)
@click.option("-A", "--output-all", is_flag=True)
@click.option("-L", "--load-model", type=click.File('rb'), default=None)
@click.option("-D", "--dump-model", type=click.File('wb'), default=None)
@click.option("-f", "--input-format", type=str, default='xml')
@click.option('-v', '--verbose', count=True)
@click.option('-oM', "--output-matrix", type=click.File('w'), default=None)
@click.argument("nmap_reports", type=click.File('r'), nargs=-1)
def main(*, nmap_reports, input_format, dump_model, load_model,
output_all, read_csv, read_xml, n_output, verbose, output_matrix):
"""Context-driven asset ranking based using anomaly detection"""
report = build_report()
csv_parser = CSVFileParser()
xml_parser = NmapReportParser()
if output_matrix:
output_manager = MatrixOutput(output_matrix)
else:
output_manager = JsonOutput(verbose)
try:
if input_format == 'xml':
for file in nmap_reports:
report.hosts.extend([host for host in xml_parser.load_hosts(file)])
if input_format == 'csv':
for file in nmap_reports:
report.hosts.extend([host for host in csv_parser.load_hosts(file)])
if read_csv:
for file in read_csv:
report.hosts.extend([host for host in csv_parser.load_hosts(file)])
if read_xml:
for file in read_xml:
report.hosts.extend([host for host in xml_parser.load_hosts(file)])
except (ParseError, UnicodeDecodeError, ElementTree.ParseError, ValueError) as e:
output_manager.log_parse_error(e)
raise SystemExit
if len(report.hosts) == 0:
output_manager.log_empty_report()
raise SystemExit
report_features = report.get_feature_names()
output_manager.add_report_info(report)
matrix_rep = report.generate_matrix_representation()
batea = BateaModel(report_features=report_features)
if load_model is not None:
batea.load_model(load_model)
else:
batea.build_model()
batea.model.fit(matrix_rep)
scores = -batea.model.score_samples(matrix_rep)
output_manager.add_scores(scores)
if output_all:
n_output = len(scores)
n_output = min(n_output, len(scores))
top_n = scores.argsort()[-n_output:][::-1]
for i, j in enumerate(top_n):
output_manager.add_host_info(
rank=str(i+1),
score=scores[j],
host=report.hosts[j],
features={name: value for name, value in zip(report_features, matrix_rep[j, :])}
)
output_manager.flush()
if dump_model:
batea.dump_model(dump_model)
if __name__ == "__main__":
main()
| delvelabs/batea | batea/__main__.py | __main__.py | py | 3,254 | python | en | code | 287 | github-code | 36 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "batea.build_report",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "core.CSVFileParser",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "core.Nma... |
13823383640 | import numpy as np
import matplotlib.pyplot as plt
from shapely import geometry
from numpy.linalg import norm
from random import *
import pickle
def reach_set_calc(x_val, reach_range):
"""
:type x_val: list
:type reach_range: float
:return: reach_set: Polygon
Description: With given x and reach_range, generate a rectangular set centering at x with side length
2 * reach_range
"""
p1 = geometry.Point(x_val[0] - reach_range, x_val[1] - reach_range)
p2 = geometry.Point(x_val[0] + reach_range, x_val[1] - reach_range)
p3 = geometry.Point(x_val[0] + reach_range, x_val[1] + reach_range)
p4 = geometry.Point(x_val[0] - reach_range, x_val[1] + reach_range)
vertex_list = [p1, p2, p3, p4]
reach_set = geometry.Polygon(vertex_list)
return reach_set
def all_Q_plt(Q, node_num, color_set, line_style_set, T, plt_scale):
"""
:param Q: dict
:param node_num: int
:param color_set: list
:param line_style_set: list
:param T: int
:return: None
"""
# Plot all given convex sets
for t_val in range(T + 1):
for node in range(1, node_num + 1):
hcoord_q, vcoord_q = Q[f"Q_t={t_val}^i={node}"].region.exterior.xy
# plt.fill(hcoord_q, vcoord_q, alpha=0.1, facecolor=color_set[t_val], edgecolor=color_set[t_val],
# linewidth=2,
# linestyle=line_style_set[node - 1], label=fr"$Q_{t_val}^{{({node})}}$")
plt.fill(hcoord_q, vcoord_q, alpha=1, facecolor='none', edgecolor=color_set[t_val],
linewidth=1.5,
linestyle=line_style_set[node - 1], label=r"$\mathcal{Q}_" + fr"{t_val}^{{({node})}}$")
plt.legend(fontsize=14)
plt.grid(True)
plt.axis(plt_scale)
return None
def set_plotter(set, plt_color, alpha_val):
"""
:type set: Polygon
:type plt_color: string
:type alpha_val: float
:return: None
"""
hcoord, vcoord = set.exterior.xy
# plt.fill(hcoord, vcoord, alpha=alpha_val, facecolor=plt_color, edgecolor=plt_color)
plt.fill(hcoord, vcoord, alpha=alpha_val, facecolor='none', edgecolor=plt_color)
def game_plt(full_tree, oppo_action, Q, colors, UV_dict, t, prev_x_action, R, control):
"""
:param full_tree: list
:param oppo_action: State
:param Q: dict
:param colors: list
:param UV_dict: dict
:param t: int
:param prev_x_action: State
:param R: float
:return: player_action: State
"""
prev_x_state = prev_x_action.state
ax = plt.gca()
ax.set_aspect(1)
# Plot selected Qt
Qt = Q[f"Q_t={t}^i={oppo_action.state}"].region
set_plotter(Qt, colors[t], alpha_val=0.05)
# Plot the set discretized over
if t == 0:
set = Qt
else:
R_set = reach_set_calc(prev_x_action.state, R)
set = Qt.intersection(R_set)
set_plotter(set, colors[t], alpha_val=0.1)
# Find disc xt in the set
# disc_x_list = [action.state for action in full_tree if action.parent_state == oppo_action]
# Plot disc xt in the set
# for disc_x in disc_x_list:
# plt.scatter(disc_x[0], disc_x[1], color=colors[t], linewidths=0.1, marker='.')
if control in ['1', '2']: # Opt pl vs. Opt op or Opt pl vs. Sub-opt op
# Find optimal player action xt
player_action = UV_dict[f"V_t={t} ({prev_x_action.state}, {oppo_action.state})"].action
player_state = player_action.state
else: # Control == '3', Sub-opt pl vs. Opt op
# Randomly pick player action xt
player_action = choice([action for action in full_tree if action.parent_state == oppo_action])
player_state = player_action.state
# Plot optimal xt in the set
plt.scatter(player_state[0], player_state[1], color=colors[t], linewidths=1.5, marker='.')
# plt.scatter(player_state[0], player_state[1], color='black', linewidths=0.1, marker='.')
if t != 0:
# Connect optimal xt state approximation to prev_x_state
plt.plot([prev_x_state[0], player_state[0]], [prev_x_state[1], player_state[1]], color='black')
return player_action
# Given a rectangular set, return discrete points inside the set
# def discrete_x_calc(poly, t_node, approx_para):
def discrete_x_calc(poly, approx_para, bound_rmv):
"""
:type approx_para: int
:type poly: Polygon
:type bound_rmv: string
:return discrete_x: list
"""
[hcoord_val, vcoord_val] = poly.exterior.xy # Find the horizontal and vertical coordinates of poly's vertices
discrete_x = []
for x_hcoord in np.linspace(min(hcoord_val), max(hcoord_val), approx_para):
for x_vcoord in np.linspace(min(vcoord_val), max(vcoord_val), approx_para):
discrete_x += [[x_hcoord, x_vcoord]]
discrete_x_copy = discrete_x[:] # Back up original discrete list
if bound_rmv.lower() == 'y':
# Find discrete x on the boundary
bound_x = []
for x_eval in discrete_x_copy:
if x_eval[0] in hcoord_val or x_eval[1] in vcoord_val:
bound_x.append(x_eval)
# Remove discrete x on the boundary from original discrete list
discrete_x.pop(discrete_x.index(x_eval))
print(bound_x)
return discrete_x
class State:
# Set node loc i_t and parent_node i_t-1 as the attributes to newly defined OpNode object
def __init__(self, state_value, parent_state, t_step, side):
"""
:type state_value: int (Opponent), list (Player)
:type parent_state: State / None (dummy_i)
:type t_step: int, None(dummy i)
:type side: str ('Player'/'Opponent')
"""
self.state = state_value
self.side = side
self.parent_state = parent_state
self.children_state_list = []
self.t_step = t_step
# Define methods that determine child nodes list with current node (Specific to graph)
def add_child_state(self, child_state):
"""
:type child_state: State
"""
self.children_state_list.append(child_state)
class ConvexBody:
def __init__(self, t_step, node, vertices):
"""
:type t_step: int
:type node: int
:type vertices: list
"""
self.t_step = t_step
self.t_node = node
self.region = geometry.Polygon(vertices)
class Value: # Value function
def __init__(self, player_state, oppo_state, t_step, side, value, action):
"""
:type player_state: State (None for U0)
:type oppo_state: State (None for U0)
:type t_step: int
:type side: string
:type value: float
:type action: State
"""
self.side = side
self.player_state = player_state
self.oppo_state = oppo_state
self.t_step = t_step
self.value = value
self.action = action
if __name__ == "__main__":
#################################### Display ####################################
"""
Plot trajectory result. Allow user to control opponent, while player always applies its optimal strategy by
computer. Allow re-run functionality.
IDEA: Separate game computation section (discretization, optimal value approximation) and game play section
(display) as two different .py files
"""
method = ''
while method.lower() != 'terminate':
method = input("Which method to use or terminate the program? [Old/New/Terminate]: ")
if method.lower() == 'new':
# Load tree info new files into the program
tree_file = open(f'tree_info_new (epsilon = {0.15}, extra_disc_para = {5})', 'rb')
tree_info = pickle.load(tree_file)
tree_file.close()
# Assign keys from tree_info to variables in this program
Q = tree_info['Q']
full_tree = tree_info['full_tree']
UV_dict = tree_info['UV_dict']
T = tree_info['T']
num_nodes = tree_info['num_nodes']
colors = tree_info['colors']
line_style_list = tree_info['line_style_list']
plt_scale = tree_info['plt_scale']
extra_disc_para = tree_info['extra_disc_para']
scale_para = tree_info['scale_para']
dummy_i = tree_info['dummy_i']
performance_bound = tree_info['performance_bound']
R = tree_info['R']
method = tree_info['method']
# Plot all convex sets
plt_scale_Q = [0, 0.8, 0, 0.8]
all_Q_plt(Q, num_nodes, colors, line_style_list, T, plt_scale_Q)
ax = plt.gca()
ax.set_aspect(1)
plt.show()
plt_scale = [0.3, 0.4, 0.3, 0.4]
msg = ''
oppo_hist = dict()
while msg.lower() != 'n':
# Define figure and ax for result plot figure
fig, ax = plt.subplots(figsize=(8, 8))
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontsize(22)
tot_cost = 0
all_Q_plt(Q, num_nodes, colors, line_style_list, T, plt_scale)
## Still need to add opt player vs. opt opponent
control = input("Opt Player vs. Opt Opponent [1] / Opt Player vs. Sub-opt Opponent [2] / Sub-opt "
"Player vs. Opt Opponent [3]? ")
if control not in ['1', '2', '3']:
print('Invalid game setting. Select again.')
else: # Valid game setting
if control == '2': # Case of Player (PC) vs. Opponent (User)
# Initialize the game
t = 0
opt_player_action = dummy_i
opt_player_state = dummy_i.state
while t <= T:
prev_x_action = opt_player_action
prev_x_state = opt_player_state
oppo_node = int(input("Enter opponent action: "))
if t == 0:
if oppo_node not in range(num_nodes + 1):
print("Invalid selection of node. Try again.")
else: # oppo_node is valid with given graph
oppo_hist[f"i{t}"] = oppo_node # Store selected oppo_node to oppo_hist
oppo_action = [action for action in full_tree if action.state == oppo_node and
action.parent_state == prev_x_action][0]
# Plot the game process
opt_player_action = game_plt(full_tree, oppo_action, Q, colors, UV_dict, t,
prev_x_action, R, control)
opt_player_state = opt_player_action.state
# # Plot selected Q0
# Q0 = Q[f"Q_t={t}^i={oppo_action.state}"].region
# set_plotter(Q0, colors[t], alpha_val=0.25)
# set_plotter(Q0, colors[t], alpha_val=0.5)
#
# # Find disc x0 in Q0
# disc_x_list = [action.state for action in full_tree if action.parent_state ==
# oppo_action]
#
# # Plot disc x0 in Q0
# for disc_x in disc_x_list:
# plt.scatter(disc_x[0], disc_x[1], color=colors[t], linewidths=0.5, marker='.')
#
# # Find optimal player action x0 (Can be made UDF)
# opt_player_action = UV_dict[f"V_t={t} ({prev_x_action.state}, "
# f"{oppo_action.state})"].action
# opt_player_state = opt_player_action.state
#
# # Plot optimal x0 in Q0
# plt.scatter(opt_player_state[0], opt_player_state[1], color='black', linewidths=0.1,
# marker='.')
t += 1 # Update t value
# Display
print(f"Optimal Player State Approximation: {opt_player_state}")
else: # t != 0
if oppo_node not in [action.state for action in prev_x_action.children_state_list]:
print("Invalid selection of node. Try again.")
else: # selected oppo_node is a reachable node
oppo_hist[f"i{t}"] = oppo_node
oppo_action = [action for action in full_tree if action.state == oppo_node and
action.parent_state == prev_x_action][0]
# Plot the game process
opt_player_action = game_plt(full_tree, oppo_action, Q, colors, UV_dict, t,
prev_x_action, R, control)
opt_player_state = opt_player_action.state
# # Plot selected Qt
# Qt = Q[f"Q_t={t}^i={oppo_action.state}"].region
# set_plotter(Qt, colors[t], alpha_val=0.25)
#
# # Plot R(previous_x) intersect Qt
# R_set = reach_set_calc(prev_x_state, R)
# R_intersect_Q = Qt.intersection(R_set)
# set_plotter(R_intersect_Q, colors[t], alpha_val=0.5)
#
# # Find disc xt in R(previous_x) intersect Qt
# disc_x_list = [action.state for action in full_tree if action.parent_state ==
# oppo_action]
#
# # Plot disc xt in R(previous_x) intersect Qt
# for disc_x in disc_x_list:
# plt.scatter(disc_x[0], disc_x[1], color=colors[t], linewidths=0.5, marker='.')
#
# # Find optimal player action xt in R(previous_x) intersect Qt
# opt_player_action = UV_dict[f"V_t={t} ({prev_x_action.state}, {oppo_action.state}"
# f")"].action
# opt_player_state = opt_player_action.state
#
# # Plot optimal x_t in R(previous_x) intersect Qt
# plt.scatter(opt_player_state[0], opt_player_state[1], color='black', linewidths=0.1,
# marker='.')
#
# # Connect optimal x_t state approximation to prev_x_state
# plt.plot([prev_x_state[0], opt_player_state[0]],
# [prev_x_state[1], opt_player_state[1]], color='black')
# Update cost
tot_cost += norm(np.array(prev_x_state) - np.array(opt_player_state), 2)
t += 1 # Update t value
# Display
print(f"Optimal Player State Approximation: {opt_player_state}")
print(f"Cost: {tot_cost}")
print(f"Running {method} method")
# plt.title(fr"Sub-optimal Opponent vs. Optimal Player " + '\n' +
# fr"Opponent History: $i_0={oppo_hist['i0']}$, $i_1={oppo_hist['i1']}$, "
# fr"$i_2={oppo_hist['i2']}$" + "\n" + fr"$\epsilon$={performance_bound}"
# fr"(Without Boundary), Total Cost={round(tot_cost, 4)}")
elif control == '1': # Case of Player (PC) vs. Opponent (PC)
for t in range(T+1):
if t == 0:
opt_oppo_action = UV_dict[f"U_t={t} ({dummy_i.state}, {None})"].action
prev_x_action = opt_oppo_action.parent_state
### Need to check line 846 - 848 correctness!!!! Continue Here
# Plot game process
opt_player_action = game_plt(full_tree, opt_oppo_action, Q, colors, UV_dict, t,
prev_x_action, R, control)
opt_player_state = opt_player_action.state
prev_x_action = opt_player_action # Reassign prev_x_action for next iteration use
# Update oppo_hist
oppo_hist[f"i{t}"] = opt_oppo_action.state
else: # When t != 0
opt_oppo_action = UV_dict[f"U_t={t} ({prev_x_action.state}, {opt_oppo_action.state})"].\
action
# Plot game process
opt_player_action = game_plt(full_tree, opt_oppo_action, Q, colors, UV_dict, t,
prev_x_action, R, control)
opt_player_state = opt_player_action.state
tot_cost += norm(np.array(prev_x_action.state) - np.array(opt_player_state), 2)
prev_x_action = opt_player_action
# Update oppo_hist
oppo_hist[f"i{t}"] = opt_oppo_action.state
# Display
print(f"\nt={t}")
print(f"Optimal i{t}: {opt_oppo_action.state}")
print(f"Optimal Player State Approximation: {opt_player_action.state}")
print(f"Total Cost: {tot_cost}")
print(f"Running {method} method")
# plt.title(fr"Optimal Opponent vs. Optimal Player " + '\n' +
# fr"Opponent History: $i_0={oppo_hist['i0']}$, $i_1={oppo_hist['i1']}$, "
# fr"$i_2={oppo_hist['i2']}$" + "\n" + fr"$\epsilon$={performance_bound}"
# fr"(Without Boundary), Total Cost={round(tot_cost, 4)}")
elif control == '3':
for t in range(T+1):
if t == 0:
opt_oppo_action = UV_dict[f"U_t={t} ({dummy_i.state}, {None})"].action
prev_x_action = opt_oppo_action.parent_state
# Plot game process
ram_player_action = game_plt(full_tree, opt_oppo_action, Q, colors, UV_dict, t,
prev_x_action, R, control)
ram_player_state = ram_player_action.state
prev_x_action = ram_player_action
# Update oppo_hist
oppo_hist[f"i{t}"] = opt_oppo_action.state
else:
opt_oppo_action = UV_dict[f"U_t={t} ({prev_x_action.state}, {opt_oppo_action.state}"
f")"].action
ram_player_action = game_plt(full_tree, opt_oppo_action, Q, colors, UV_dict, t,
prev_x_action, R, control)
ram_player_state = ram_player_action.state
tot_cost += norm(np.array(prev_x_action.state) - np.array(ram_player_state), 2)
prev_x_action = ram_player_action
# Update oppo_hist
oppo_hist[f"i{t}"] = opt_oppo_action.state
# Display
print(f"\nt={t}")
print(f"Optimal i{t}: {opt_oppo_action.state}")
print(f"Sub-optimal Player State Approximation: {ram_player_state}")
print(f"Total Cost: {tot_cost}")
print(f"Running {method} method")
# plt.title(fr"Optimal Opponent vs. Sub-optimal Player " + '\n' +
# fr"Opponent History: $i_0={oppo_hist['i0']}$, $i_1={oppo_hist['i1']}$, "
# fr"$i_2={oppo_hist['i2']}$" + "\n" + fr"$\epsilon$={performance_bound}"
# fr"(Without Boundary), Total Cost={round(tot_cost, 4)}")
plt.show()
# Save Simulation Results
sim_result = {
'tot_cost': tot_cost,
'performance_bound': performance_bound,
'extra_disc_para': extra_disc_para
}
sim_file = open(f'sim_result_new (epsilon = {performance_bound}, extra_disc_para = {extra_disc_para})',
'wb')
pickle.dump(sim_result, sim_file)
sim_file.close()
msg = input(f"Rerun (Method: {method})? [Y/N] ")
#################################### End Here ####################################
#################################### Display ####################################
elif method.lower() == 'old':
tot_cost = 0
# Load tree info old files into the program
tree_file = open('tree_info_old', 'rb')
tree_info = pickle.load(tree_file)
tree_file.close()
# Assign keys from tree_info to variables in this program
Q = tree_info['Q']
tree_no_lf_copy = tree_info['tree_no_lf_copy']
UV_dict = tree_info['UV_dict']
T = tree_info['T']
num_nodes = tree_info['num_nodes']
colors = tree_info['colors']
line_style_list = tree_info['line_style_list']
plt_scale = tree_info['plt_scale']
disc_para = tree_info['disc_para']
scale_para = tree_info['scale_para']
dummy_i = tree_info['dummy_i']
R = tree_info['R']
method = tree_info['method']
boundary_rmv = tree_info['boundary_rmv']
msg = ''
oppo_hist = dict()
while msg.lower() != 'n':
control = input("Player (PC) vs. Opponent (PC) [1] / Player (PC) vs. Opponent (User) [2]? ")
if control not in ['1', '2']:
print('Invalid game setting. Select again.')
else: # control is in ['1', '2']
if control == '2':
# Let user be opponent, show player optimal action approximation for demo (Plot them)
t = 0
opt_player_action = None
opt_player_state = None
tot_cost = 0
while t <= T:
print(f"\nt={t}")
# I reassigned opt_player_action to avoid warning about potentially undefined
# opt_player_action in else statements
prev_x_action = opt_player_action
prev_x_state = opt_player_state # Reassignment needed for later generation of R_intersect_Q
oppo_node = int(input("Enter opponent action: "))
if t == 0:
if oppo_node not in range(num_nodes + 1):
print("Invalid selection of node. Try again.")
else:
oppo_action = [action for action in tree_no_lf_copy if
action.state == oppo_node and action.t_step == t]
oppo_action = oppo_action[0]
oppo_hist[f"i{t}"] = oppo_action.state
# Plot selected Q0
Q0 = Q[f"Q_t={t}^i={oppo_node}"]
set_plotter(Q0.region, colors[t], alpha_val=0.5)
# Plot discrete x0 in Q0
"""
disc_x0_list = [action.state for action in tree_no_lf_copy if
action.parent_state == oppo_action]
"""
disc_x0_list = discrete_x_calc(Q[f'Q_t={t}^i={oppo_node}'].region, disc_para,
bound_rmv=boundary_rmv)
for disc_x0 in disc_x0_list:
plt.scatter(disc_x0[0], disc_x0[1], color=colors[t], linewidths=0.5, marker='.')
opt_player_action = UV_dict[
f"V_t={t} ({oppo_action.parent_state.state}, {oppo_action.state})"].action
opt_player_state = opt_player_action.state # value of optimal x0 approximation
print(f"Optimal Player State Approximation: {opt_player_state}")
plt.scatter(opt_player_state[0], opt_player_state[1], color='black', linewidths=0.1,
marker='.')
t += 1
else: # t != 0
if oppo_node not in [action.state for action in prev_x_action.children_state_list]:
print("Invalid selection of node. Try again.")
else:
oppo_action = \
[state for state in tree_no_lf_copy if
state.state == oppo_node and state.parent_state ==
prev_x_action][0]
oppo_hist[f"i{t}"] = oppo_action.state
opt_player_action = UV_dict[
f"V_t={t} ({prev_x_action.state}, {oppo_action.state})"].action
opt_player_state = opt_player_action.state
print(f"Optimal Player State Approximation: {opt_player_state}")
# Plot Qt
Qt = Q[f"Q_t={t}^i={oppo_action.state}"]
set_plotter(Qt.region, colors[t], alpha_val=0.25)
# Plot R(previous_x) intersect Q
R_set = reach_set_calc(prev_x_state, R)
R_intersect_Q = Q[f"Q_t={t}^i={oppo_action.state}"].region.intersection(R_set)
set_plotter(R_intersect_Q, colors[t], alpha_val=0.5)
# Plot discrete x in R_intersect_Q
disc_x_list = discrete_x_calc(R_intersect_Q, approx_para=disc_para,
bound_rmv=boundary_rmv)
for disc_x in disc_x_list:
plt.scatter(disc_x[0], disc_x[1], color=colors[t], linewidths=0.1, marker='.')
# Plot optimal x_t state approximation
plt.scatter(opt_player_state[0], opt_player_state[1], facecolor='black',
linewidths=0.1, marker='.')
# Connect optimal x_t state approximation to prev_x_state
plt.plot([prev_x_state[0], opt_player_state[0]],
[prev_x_state[1], opt_player_state[1]], color='black')
tot_cost += norm(np.array(prev_x_state) - np.array(opt_player_state), 2)
print(f"Total Cost: {tot_cost}")
t += 1
if boundary_rmv.lower() == 'n':
plt.title(
fr"Opponent History: $i_0={oppo_hist['i0']}$, $i_1={oppo_hist['i1']}$, $i_2={oppo_hist['i2']}$ "
f"\n{disc_para}x{disc_para} Discretization, Total Cost={round(tot_cost, 4)}")
else:
plt.title(
fr"Opponent History: $i_0={oppo_hist['i0']}$, $i_1={oppo_hist['i1']}$, $i_2={oppo_hist['i2']}$ "
f"\n{disc_para}x{disc_para} Discretization (Without Boundary), Total Cost={round(tot_cost, 4)}")
elif control == '1':
opt_oppo_action = dummy_i
prev_x_action = opt_oppo_action.parent_state
tot_cost = 0
for t in range(T + 1):
if t == 0:
# Find optimal i_0
opt_oppo_action = UV_dict[f"U_t={t} ({dummy_i.state}, {None})"].action
# Plot Q0
Q0 = Q[f'Q_t={t}^i={opt_oppo_action.state}']
set_plotter(Q0.region, colors[t], alpha_val=0.25)
set_plotter(Q0.region, colors[t], alpha_val=0.5)
# Find discrete x0 in Q0
disc_x_list = [action.state for action in tree_no_lf_copy if action.parent_state ==
opt_oppo_action]
else: # when t is not 0
# Find optimal i_t
opt_oppo_action = UV_dict[
f"U_t={t} ({prev_x_action.state}, {opt_oppo_action.state})"].action
# Plot selected Qt
Qt = Q[f"Q_t={t}^i={opt_oppo_action.state}"]
set_plotter(Qt.region, colors[t], alpha_val=0.25)
# Plot R(previous_x) intersect Q
R_set = reach_set_calc(prev_x_action.state, R)
R_intersect_Q = Qt.region.intersection(R_set)
set_plotter(R_intersect_Q, colors[t], alpha_val=0.5)
# Find discrete x in R_intersect_Q
disc_x_list = discrete_x_calc(R_intersect_Q, disc_para, bound_rmv=boundary_rmv)
# Output message
print(f"\nt={t}")
print(f"Optimal i{t}: {opt_oppo_action.state}")
# Plot discrete x in sets
for disc_x in disc_x_list:
plt.scatter(disc_x[0], disc_x[1], color=colors[t], linewidths=0.1, marker='.')
# Given x_t-1 and i_t, find approximation of optimal x_t
opt_player_action = \
UV_dict[
f"V_t={t} ({opt_oppo_action.parent_state.state}, {opt_oppo_action.state})"].action
print(f"Optimal Player State Approximation: {opt_player_action.state}")
# Plot optimal x_t state approximation
plt.scatter(opt_player_action.state[0], opt_player_action.state[1], facecolor='black',
linewidth=0.1,
marker='.')
# Connect optimal x_t state approximation to prev_x_state
if t != 0:
plt.plot([prev_x_action.state[0], opt_player_action.state[0]],
[prev_x_action.state[1], opt_player_action.state[1]], color='black')
# Update total cost
tot_cost += norm(np.array(prev_x_action.state) - np.array(opt_player_action.state), 2)
print(f"Total Cost: {tot_cost}")
prev_x_action = opt_player_action
# Store optimal opponent history
oppo_hist[f"i{t}"] = opt_oppo_action.state
# Plot display
if boundary_rmv.lower() == 'n':
plt.title(fr"Optimal Opponent History: $i_0={oppo_hist['i0']}$, $i_1={oppo_hist['i1']}$, "
fr"$i_2={oppo_hist['i2']}$ "
f"\n{disc_para}x{disc_para} Discretization, Total Cost={round(tot_cost, 4)}")
else:
plt.title(fr"Optimal Opponent History: $i_0={oppo_hist['i0']}$, $i_1={oppo_hist['i1']}$, "
fr"$i_2={oppo_hist['i2']}$ "
f"\n{disc_para}x{disc_para} Discretization (Without Boundary), Total Cost={round(tot_cost, 4)}")
# Plot all given convex sets
for t_val in range(T + 1):
for node in range(1, num_nodes + 1):
hcoord_q, vcoord_q = Q[f"Q_t={t_val}^i={node}"].region.exterior.xy
plt.fill(hcoord_q, vcoord_q, alpha=0.1, facecolor=colors[t_val], edgecolor=colors[t_val],
linewidth=2,
linestyle=line_style_list[node - 1], label=fr"$Q_{t_val}^{{({node})}}$")
plt.legend(fontsize=8)
plt.grid(True)
plt.axis(plt_scale)
if control == '1':
plt.savefig(f"Optimal Opponent History {oppo_hist['i0']}{oppo_hist['i1']}{oppo_hist['i2']}, "
f"disc_para={disc_para}")
else:
plt.savefig(
f"Opponent History {oppo_hist['i0']}{oppo_hist['i1']}{oppo_hist['i2']}, disc_para={disc_para}")
plt.show()
msg = input("Rerun? [Y/N] ")
pass
| DRK98519/aCBC | game_play.py | game_play.py | py | 36,021 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "shapely.geometry.Point",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "shapely.geometry",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "shapely.geometry.Point",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "shapel... |
35396451951 | #!/usr/bin/env python3
# coding=utf-8
'''MDMForm 系统配置主窗口'''
import os
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtGui import QPalette, QPixmap, QIcon
from PyQt5.QtWidgets import QMainWindow,QMessageBox,QTableWidgetItem,QFileDialog
from PyQt5 import QtSql
from PyQt5.QtSql import QSqlQuery
from openpyxl import load_workbook,Workbook
import warnings
warnings.filterwarnings('ignore')
BASE_DIR= os.path.dirname(os.path.dirname(os.path.abspath(__file__) ) )
sys.path.append( BASE_DIR )
from ui.Ui_MDMForm import Ui_MDMForm
class MDMForm(QMainWindow,Ui_MDMForm):
def __tablesql(self):
"""返回所有配置表的信息,用来对照Excel模板文件信息
Returns:
string: 查询表SQL语句
"""
sql= 'SELECT object_id, \
object_name, \
object_name_cn, \
object_desc, \
template_file, \
template_sheet, \
start_row, \
end_row \
FROM xt_objects \
where object_type=\'T\' \
order by object_id ASC'
return sql
def __columnsql(self,id):
"""指定表的列及对应excel内汉字名称
Args:
id (int): 配置的表ID
Returns:
string: SQL查询列信息
"""
sql= 'SELECT \
object_name, \
object_name_cn, \
object_desc, \
column_mapping \
FROM xt_objects \
where object_type=\'C\' \
and parent_object_id= '
sql += str(id)
#sql += ' and rim(column_mapping) !=\'\' '
sql += ' order by column_mapping asc'
return sql
def __columns(self,id):
"""指定表的列及对应excel内汉字名称
Args:
id (int): 配置的表ID
Returns:
dict: 回指定表的列及对应excel内汉字名称字典
"""
cols = dict()
if not self.db.isOpen():
if not self.db.open():
QMessageBox.critical(self, 'MDM', self.db.lastError().text())
return
query = QSqlQuery()
if not query.exec(self.__columnsql(str(id))):
QMessageBox.critical(self,'MDM', query.lastError().text())
else:
while query.next():
cols[query.value('object_name')] = query.value('object_name_cn')
return cols
def __columnmap(self,id):
"""指定表的列及对应excel内对应列
Args:
id (int): 配置的表ID
Returns:
dict: 回指定表的列及对应excel内对应excel的列
"""
cols = dict()
if not self.db.isOpen():
if not self.db.open():
QMessageBox.critical(self, 'MDM', self.db.lastError().text())
return
query = QSqlQuery()
if not query.exec(self.__columnsql(str(id))):
QMessageBox.critical(self,'MDM', query.lastError().text())
else:
while query.next():
cols[query.value('object_name')] = query.value('column_mapping')
return cols
def __insertsql(self,columnmap,tbname):
"""插入配置表语句
Args:
columnmap (dict): 配置表字段与Excel对应列
tbname (string): 配置表名
Returns:
string: 带参数的sql insert语句
"""
sqlk = ''
sqlv = ''
for k,v in columnmap.items():
if len(str(v))>0:
if len(sqlk) > 0:
sqlk +=','
sqlv +=','
sqlk += str(k)
sqlv += '?'
sql = 'insert into '
sql += tbname
sql += ' ('
sql += sqlk
sql += ') values ('
sql += sqlv
sql +=')'
return sql
def __init__(self):
super().__init__()
self.setupUi(self)
self.setupUiEx()
self.addConnect()
self.db = QtSql.QSqlDatabase.addDatabase('QSQLITE')
self.db.setDatabaseName(os.path.join(BASE_DIR,'db\\mdm.db'))
self.initData()
if self.MDMListWidget.count()>0:
self.MDMListWidget.setCurrentItem(self.MDMListWidget.item(0))
self.mdmListClick()
def closeEvent(self, QCloseEvent):
if self.db.isOpen:
self.db.close()
def setupUiEx(self):
palette = QPalette()
icon = QIcon()
appPath=os.path.join(BASE_DIR,u'res\\icon\\mdmconf.ico')
icon.addPixmap(QPixmap(appPath))
self.setWindowIcon(icon)
def addConnect(self):
self.MDMListWidget.clicked.connect(self.mdmListClick)
self.btnTemplate.clicked.connect(self.templateClick)
self.btnImport.clicked.connect(self.importClick)
self.btnExport.clicked.connect(self.exportClick)
self.btnUpdate.clicked.connect(self.updateClick)
def initData(self):
self.MDMListWidget.clear()
if not self.db.isOpen():
if not self.db.open():
QMessageBox.critical(self, 'MDM', self.db.lastError().text())
return
query = QSqlQuery()
if query.exec(self.__tablesql()):
while query.next():
qItem = QtWidgets.QListWidgetItem()
cols = dict()
cols['object_id'] = query.value('object_id')
cols['object_name'] = query.value('object_name')
cols['object_name_cn'] = query.value('object_name_cn')
cols['object_desc'] = query.value('object_desc')
cols['template_file'] = query.value('template_file')
cols['template_sheet'] = query.value('template_sheet')
cols['start_row'] = query.value('start_row')
cols['end_row'] = query.value('end_row')
qItem.setData(QtCore.Qt.ItemDataRole.UserRole,cols)
qItem.setText(query.value('object_name_cn'))
self.MDMListWidget.addItem(qItem)
else:
QMessageBox.critical(self,'MDM', query.lastError().text())
def showData(self,id,name):
"""绑定配置数据到界面
Args:
id (int): 配置表ID
name (string): 配置表名
"""
cols = self.__columns(str(id))
self.dataTableWidget.clear()
self.dataTableWidget.setRowCount(0)
self.dataTableWidget.setColumnCount(len(cols))
self.dataTableWidget.setHorizontalHeaderLabels(cols.values())
sql = ''
for col in cols.keys():
if(len(sql))>0:
sql += ','
sql += str(col)
sql = 'select ' + sql
sql +=' from '
sql += str(name)
if not self.db.isOpen():
if not self.db.open():
QMessageBox.critical(self, 'MDM', self.db.lastError().text())
return
query = QSqlQuery()
if not query.exec(sql):
QMessageBox.critical(self,'MDM', query.lastError().text())
else:
while query.next():
rows=self.dataTableWidget.rowCount()
self.dataTableWidget.insertRow(rows)
for i in range(len(cols)):
qtitem=QTableWidgetItem(str(query.value(list(cols.keys())[i])))
self.dataTableWidget.setItem(rows,i,qtitem)
def mdmListClick(self):
qItem=self.MDMListWidget.currentItem()
tconfs = dict(qItem.data(QtCore.Qt.ItemDataRole.UserRole))
self.dataLabel.setText(str(tconfs['object_name_cn']) + ' : ' + str(tconfs['object_desc']))
self.temFile.setText(str(tconfs['template_file']))
self.temSheet.setText(str(tconfs['template_sheet']))
self.temStart.setText(str(tconfs['start_row']))
self.temEnd.setText(str(tconfs['end_row']))
self.showData(str(tconfs['object_id']),str(tconfs['object_name']))
def templateClick(self):
if self.MDMListWidget.count()<=0:
QMessageBox.information(self,'MDM', '请先选择要打开配置文件对应的基础数据配置表')
return
qItem=self.MDMListWidget.currentItem()
tconfs = dict(qItem.data(QtCore.Qt.ItemDataRole.UserRole))
if str(tconfs['template_file']) =='':
QMessageBox.information(self,'MDM', '当前基础数据表尚未配置对应的配置文件')
return
appPath=os.path.join(BASE_DIR,str(tconfs['template_file']))
#subprocess.run(appPath)
os.system('start ' + appPath)
#os.startfile(appPath)
return
def importClick(self):
if self.MDMListWidget.count()<=0:
QMessageBox.information(self,'MDM', '请先选择要重新导入数据的基础数据配置表')
return
fNames= QFileDialog.getOpenFileName(self,'导入基础数据', '/','Excel File (*.xlsx)')
if not fNames[0]:
return
qItem=self.MDMListWidget.currentItem()
tconfs = dict(qItem.data(QtCore.Qt.ItemDataRole.UserRole))
sheetName = str(tconfs['template_sheet'])
if QMessageBox.question(self, 'MDM', '确认更新模板配置表[' +sheetName + ']的数据?',QMessageBox.Yes|QMessageBox.No) == QMessageBox.No:
return
startRow = 2 #默认没有设置起始值,则默认从第二行开始(第一行为标题)
if str(tconfs['start_row']).isdigit():
startRow=int(str(tconfs['start_row']))
endRow = 0 #没有设置结束行,默认后面数据行全部加载
if str(tconfs['end_row']).isdigit():
endRow=int(str(tconfs['end_row']))
columnMap =dict()
columnMap = self.__columnmap(str(tconfs['object_id']))
try:
wb= load_workbook(filename=fNames[0],read_only=True,data_only=True)
if not (wb.sheetnames.index(sheetName) >= 0):
QMessageBox.warning(self,'MDM', '选择的文件:' + fNames[0] + ',未包含配置指定的Sheet[' +sheetName + ']')
wb.close()
return
ws=wb[sheetName]
if endRow == 0:
endRow = ws.max_row # type: ignore
sql = 'delete from ' + str(tconfs['object_name'])
if not self.db.isOpen():
if not self.db.open():
QMessageBox.critical(self, 'MDM', self.db.lastError().text())
return
query = QSqlQuery()
if not query.exec_(sql):
QMessageBox.warning(self,'MDM', '清空数据表[' + str(tconfs['object_name_cn'] + ':' + query.lastQuery() + ']失败' + query.lastError().text()))
wb.close()
return
sql = self.__insertsql(columnMap,str(tconfs['object_name']) )
query.prepare(sql)
for iRow in (range(startRow,endRow+1)):
bAllEmptyflag = True
for k,v in columnMap.items():
if len(str(v))<=0:
continue
if ws[str(v)+str(iRow)].value is None:
qvalue =''
else:
qvalue = str(ws[str(v)+str(iRow)].value)# type: ignore
if not(len(qvalue)==0 or qvalue.isspace()):
bAllEmptyflag = False
query.addBindValue(qvalue)
if bAllEmptyflag:
continue
elif not query.exec():
QMessageBox.warning(self,'MDM', '执行语句[' + query.lastQuery() + ']失败,' + query.lastError().text())
wb.close()
return
wb.close()
self.showData(str(tconfs['object_id']),str(tconfs['object_name']))
QMessageBox.information(self,'MDM', '导入数据[' + str(tconfs['object_name_cn'])+ ']完成')
except (NameError,ZeroDivisionError):
QMessageBox.critical(self, '动力电缆计算', '变量名错误或除数为0')
except OSError as reason:
QMessageBox.critical(self, '动力电缆计算', str(reason))
except TypeError as reason:
QMessageBox.critical(self, '动力电缆计算', str(reason))
except :
QMessageBox.information(self,'动力电缆计算','导出数据文件失败')
def exportClick(self):
if self.MDMListWidget.count()<=0:
QMessageBox.information(self,'MDM', '请先选择要重新导入数据的基础数据配置表')
return
fNames= QFileDialog.getSaveFileName(self,'下载基础数据', '/','Excel File (*.xlsx)')
if not fNames[0]:
return
qItem=self.MDMListWidget.currentItem()
tconfs = dict(qItem.data(QtCore.Qt.ItemDataRole.UserRole))
sheetName = str(tconfs['template_sheet'])
startRow = 2 #默认没有设置起始值,则默认从第二行开始(第一行为标题)
if str(tconfs['start_row']).isdigit():
startRow=int(str(tconfs['start_row']))
columnMap = dict()
column = dict()
columnMap = self.__columnmap(str(tconfs['object_id']))
column = self.__columns(str(tconfs['object_id']))
try:
wb = Workbook()
ws = wb.active
ws.title = sheetName
for k,v in columnMap.items():
if len(str(v))<=0:
continue
ws[str(v)+str(startRow-1)] = column[k] # type: ignore
sql = ''
for col in columnMap.keys():
if(len(sql))>0:
sql += ','
sql += str(col)
sql = 'select ' + sql
sql +=' from '
sql += str(tconfs['object_name'])
if not self.db.isOpen():
if not self.db.open():
QMessageBox.critical(self, 'MDM', self.db.lastError().text())
return
query = QSqlQuery()
if not query.exec(sql):
QMessageBox.critical(self,'MDM', query.lastError().text())
return
iRow = startRow
while query.next():
for k,v in columnMap.items():
if len(str(v))<=0:
continue
ws[str(v)+str(iRow)] = str(query.value(str(k))) # type: ignore
iRow += 1
wb.save(fNames[0])
wb.close
QMessageBox.information(self,'MDM','导出数据完成,文件名:' + fNames[0])
except (NameError,ZeroDivisionError):
QMessageBox.critical(self, '动力电缆计算', '变量名错误或除数为0')
except OSError as reason:
QMessageBox.critical(self, '动力电缆计算', str(reason))
except TypeError as reason:
QMessageBox.critical(self, '动力电缆计算', str(reason))
except :
QMessageBox.information(self,'动力电缆计算','导出数据文件失败')
return
def updateClick(self):
if self.MDMListWidget.count()<=0:
QMessageBox.information(self,'MDM', '请先选择要更新模板文件数据的基础数据配置表')
return
qItem=self.MDMListWidget.currentItem()
tconfs = dict(qItem.data(QtCore.Qt.ItemDataRole.UserRole))
if str(tconfs['template_file']) =='':
QMessageBox.information(self,'MDM', '当前基础数据表尚未配置对应的配置文件')
return
tempfile=os.path.join(BASE_DIR,str(tconfs['template_file']))
sheetName = str(tconfs['template_sheet'])
if QMessageBox.question(self, 'MDM', '确认更新本地模板文件:' + tempfile + ',配置表[' + sheetName + ']的数据?',QMessageBox.Yes|QMessageBox.No) == QMessageBox.No:
return
startRow = 2 #默认没有设置起始值,则默认从第二行开始(第一行为标题)
if str(tconfs['start_row']).isdigit():
startRow=int(str(tconfs['start_row']))
try:
columnMap = dict()
column = dict()
columnMap = self.__columnmap(str(tconfs['object_id']))
column = self.__columns(str(tconfs['object_id']))
wb = load_workbook(tempfile,False)
if not (wb.sheetnames.index(sheetName) >= 0):
QMessageBox.warning(self,'MDM', '选择的文件:' + tempfile + ',未包含配置指定的Sheet[' + sheetName + ']')
wb.close()
return
ws=wb[sheetName]
#maxRow = ws.max_row # type: ignore
# #暂未实现清除文档中老数据(考虑有附加列未导入数据库,如图片等)
'''
if startRow >1:
for k,v in columnMap.items():
if len(str(v))<=0:
continue
QMessageBox.information(self,'MDM', str(column[k]))
ws[str(v)+str(startRow-1)] = str(column[k]) # type: ignore
#更新标题暂未实现
'''
sql = ''
for col in columnMap.keys():
if(len(sql))>0:
sql += ','
sql += str(col)
sql = 'select ' + sql
sql +=' from '
sql += str(tconfs['object_name'])
if not self.db.isOpen():
if not self.db.open():
QMessageBox.critical(self, 'MDM', self.db.lastError().text())
return
query = QSqlQuery()
if not query.exec(sql):
QMessageBox.critical(self,'MDM', query.lastError().text())
return
iRow = startRow
while query.next():
for k,v in columnMap.items():
if len(str(v))<=0:
continue
ws[str(v)+str(iRow)] = str(query.value(str(k))) # type: ignore
iRow += 1
wb.save(tempfile)
wb.close
QMessageBox.information(self,'MDM','更新模板文件数据:' + tempfile + '完成')
except (NameError,ZeroDivisionError):
QMessageBox.critical(self, '动力电缆计算', '变量名错误或除数为0')
except OSError as reason:
QMessageBox.critical(self, '动力电缆计算', str(reason))
except TypeError as reason:
QMessageBox.critical(self, '动力电缆计算', str(reason))
except :
QMessageBox.information(self,'动力电缆计算','导出数据文件失败')
return | LeeZhang1979/UniTools | src/MDMForm.py | MDMForm.py | py | 19,446 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath"... |
32041540540 | import torch
import drjit as dr
import mitsuba as mi
import sys,os,json
import importlib
sys.path.append(".")
import cv2
import numpy as np
if torch.cuda.is_available():
device = torch.device("cuda:0")
torch.cuda.set_device(device)
else:
device = torch.device("cpu")
from utils.logger import Logger
from utils.matcher import Matcher
from mitsuba.scalar_rgb import Transform4f as T
from tqdm.std import tqdm
mi.set_variant('cuda_ad_rgb')
log_level = 1
Pooler = torch.nn.AvgPool2d(kernel_size=2)
@dr.wrap_ad(source='drjit', target='torch')
def down_res_loss(st, img, img_ref):
img = img[None,...].permute(0,3,1,2)
img_ref = img_ref[None,...].permute(0,3,1,2)
while st>0:
img = Pooler(img)
img_ref = Pooler(img_ref)
st = st-1
if log_level>0:
Logger.save_img("down_res.png",img.permute(0,2,3,1)[0])
return torch.mean((img-img_ref)**2)
if __name__=="__main__":
method = sys.argv[1]
config = sys.argv[2]
Logger.init(exp_name=config+"/"+method, show=False, debug=False, path="results/",add_time=False)
tasks = importlib.import_module(f'exp.{config}') # import specific task
resolution = tasks.resolution #resolution
spp = tasks.spp # spp
scene = tasks.scene # scene
thres = tasks.thres # for hybrid scheme
max_depth = tasks.max_depth
match_res = tasks.match_res
# get target image
if hasattr(tasks,"gt_img")==True:
gt_img = torch.from_numpy(cv2.cvtColor(cv2.imread(tasks.gt_img),cv2.COLOR_BGR2RGB)).to(device)/255.0
img_ref = mi.TensorXf(gt_img.reshape(-1,3))
else:
if hasattr(tasks,"gt_scene")==True:
img_ref = mi.render(tasks.gt_scene, seed=0, spp=8192, sensor=0)
else:
img_ref = mi.render(scene, seed=0, spp=8192, sensor=0)
img_ref = img_ref[...,:3]
img_np = np.array(mi.util.convert_to_bitmap(img_ref))
gt_img = torch.from_numpy(img_np).to(device)/255.0
if log_level>0:
Logger.save_img("gt_img.png",gt_img)
gt_img_low= torch.from_numpy(cv2.resize(np.array(mi.util.convert_to_bitmap(img_ref)),(match_res,match_res))).to(device)/255.0
# pixel matcher using optimal transport(Sinkhorn)
matcher = Matcher(match_res, device)
# get optimized parameter and transformation
opt, apply_transformation, output, params = tasks.optim_settings()
apply_transformation(params, opt)
for key in opt.keys():
dr.enable_grad(opt[key])
params = mi.traverse(scene)
# get init image
img_init = mi.render(scene, params, seed=0, spp=512, sensor=0)
init_img = torch.from_numpy(np.array( mi.util.convert_to_bitmap(img_init[...,:3]))).to(device)/255.0
if log_level>0:
Logger.save_img("init_img.png",init_img)
# deal with hybrid scheme
if method.endswith("hybrid"):
method = method[:-7]
integrator2 = mi.load_dict({
'type': "prb_reparam",
'max_depth': max_depth
})
else:
thres = 10000
# define integrator
integrator1 = mi.load_dict({
'type': method,
'max_depth': max_depth
})
# camera settings are slightly different between EPSM and PRB.
if method.startswith("manifold"):
sensor_id = 1
else:
sensor_id = 0
loop = tqdm(range(tasks.it))
for it in loop:
apply_transformation(params, opt)
if it<thres:
img = mi.render(scene, params, seed=it, spp=spp, integrator=integrator1, sensor=sensor_id)
else:
if it==thres:
for key in opt.keys():
opt.reset(key)
img = mi.render(scene, params, seed=it, spp=spp, integrator=integrator2, sensor=0)
imgs = np.array(mi.util.convert_to_bitmap(img[...,:3]))
if log_level>0:
Logger.save_img(f"optim.png",imgs/255.0,flip=False)
Logger.add_image(f"optim",imgs/255.0,flip=False)
if log_level>1:
Logger.save_img_2(f"optim{it}.png",imgs/255.0,flip=False)
if img.shape[-1]==5:
render_img = torch.from_numpy(cv2.resize(imgs,(match_res,match_res))).to(device)/255.0
grad_ = matcher.match_Sinkhorn(render_img[...,:3].reshape(-1,3), gt_img_low[...,:3].reshape(-1,3))
grad_ = grad_.reshape(match_res,match_res,5)
grad_ = grad_.repeat(resolution//match_res,resolution//match_res,1)
grad = mi.TensorXf(grad_)
dr.backward(img*grad)
else:
# whether using multi-resolution loss
# loss = down_res_loss(6-((7*it)//tasks.it),img,img_ref[...,:3])
loss = dr.sum(dr.sqr(img - img_ref[...,:3])) / len(img)
dr.backward(loss)
try:
# remove nan in grad
dic = {}
for key in opt.keys():
x = dr.grad(opt[key])
x[dr.isnan(x)] = 0
dr.set_grad(opt[key],x)
dic[key] = float(opt[key].torch().item())#.item()
if log_level>1:
Logger.save_param(f"param{it}.npy",dic)
except:
pass
opt.step()
loop.set_description(f"Iteration {it:02d}: error={output(opt)}")
Logger.exit()
img_final = mi.render(scene, params, seed=0, spp=8192, sensor=0)
img_final = torch.from_numpy(np.array( mi.util.convert_to_bitmap(img_final[...,:3]))).to(device)/255.0
if log_level>0:
Logger.save_img(f"{sys.argv[1]}.png",img_final)
print("finish optim")
| jkxing/EPSM_Mitsuba3 | EPSM/optim.py | optim.py | py | 5,566 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"... |
4517993066 | import os
from django.contrib.auth.views import redirect_to_login
from chat.models import *
from django.db.models.query_utils import Q
from notification.models import *
from user.models import *
from post.models import *
from post.forms import *
from group.models import *
from django.shortcuts import redirect, render
from django.urls import reverse
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.contrib.auth.decorators import login_required
from pydub.silence import split_on_silence
from pydub import AudioSegment
import numpy as np
import librosa
import math
import pickle
def home(request):
me = None if request.user.id is None else User.objects.get(id=request.user.id)
if me is None:
return redirect(reverse('user:login'))
personnal_chats = ChatBox.objects.filter(Q(user1=me)|Q(user2=me))
group_chats = list(GroupChatBox.objects.filter(creator=me)) + [join.groupchatbox for join in JoinGroupChat.objects.filter(invitee=me)]
my_groups = set(list(Group.objects.filter(admins__in=[me])) + list(Group.objects.filter(members__in=[me])))
#online_users = User.objects.filter(is_online=True)
online_users = User.objects.filter(Q(is_online=True)&~Q(id=me.id))
posts = Post.objects.all()
context = {
'posts': [{
'view': 'list',
'post': post,
'reactions': Reaction.objects.filter(post=post),
'comments': Comment.objects.filter(post=post),
} for post in reversed(posts)],
'me': me,
'personnal_chats': [{
'chat': chat,
'receiver_id': chat.user2.id if chat.user1 == me else chat.user1.id,
} for chat in personnal_chats],
'group_chats': [{
'chat': chat,
'latest_msg': GroupMessage.objects.filter(chatbox=chat).order_by('-sent')[0]
} for chat in group_chats],
'my_groups': my_groups,
'online_users': online_users,
'my_notifications': list(reversed(PostNotification.objects.filter(recipient=me).exclude(actor=me))),
}
return render(request, 'home.html', context)
def get_mfcc(file_path):
y, sr = librosa.load(file_path) # read .wav file
hop_length = math.floor(sr*0.010) # 10ms hop
win_length = math.floor(sr*0.025) # 25ms frame
# mfcc is 12 x T matrix
mfcc = librosa.feature.mfcc(
y, sr, n_mfcc=12, n_fft=1024,
hop_length=hop_length, win_length=win_length)
# substract mean from mfcc --> normalize mfcc
mfcc = mfcc - np.mean(mfcc, axis=1).reshape((-1,1))
# delta feature 1st order and 2nd order
delta1 = librosa.feature.delta(mfcc, order=1)
delta2 = librosa.feature.delta(mfcc, order=2)
# X is 36 x T
X = np.concatenate([mfcc, delta1, delta2], axis=0) # O^r
# return T x 36 (transpose of X)
return X.T # hmmlearn use T x N matrix
def detect_leading_silence(sound, silence_threshold=-42.0, chunk_size=10):
'''
sound is a pydub.AudioSegment
silence_threshold in dB
chunk_size in ms
iterate over chunks until you find the first one with sound
'''
trim_ms = 0 # ms
assert chunk_size > 0 # to avoid infinite loop
while sound[trim_ms:trim_ms+chunk_size].dBFS < silence_threshold and trim_ms < len(sound):
trim_ms += chunk_size
return trim_ms
def search(request, filename):
me = None if request.user.id is None else User.objects.get(id=request.user.id)
my_groups = set(list(Group.objects.filter(admins__in=[me])) + list(Group.objects.filter(members__in=[me])))
# Get file audio
abs_path = "E:/Code/Python/Django/tomo/tomo/voice_search_data/"
audio_data = AudioSegment.from_file(abs_path+filename, format="wav")
os.remove(abs_path+filename)
# split audio into single word's audio
audio_chunks = split_on_silence(audio_data, min_silence_len=500, silence_thresh=-30)
# export to folder
for i, chunk in enumerate(audio_chunks):
out_file = "tomo/voice_search_data/chunk{0}.wav".format(i)
print("exporting", out_file)
chunk.export(out_file, format="wav")
predict_words = []
# Predict each segmented audio
i = 0
for audio_name in os.listdir('tomo/voice_search_data'):
if audio_name == 'search.wav':
continue # ignore if this is the original file
audio_data = AudioSegment.from_file(abs_path+audio_name, format="wav")
# trim silence
start_trim = detect_leading_silence(audio_data)
end_trim = detect_leading_silence(audio_data.reverse())
trimmed_sound = audio_data[start_trim:len(audio_data)-end_trim]
trimmed_sound.export(f"tomo/voice_search_data/trimmed{i}.wav", format="wav")
# get model
class_names = ['con', 'học', 'nhà', 'sinh', 'tuyển', 'một', 'hai', 'ba', 'bốn', 'năm', 'sáu', 'bảy', 'tám', 'chín', 'có', 'không', 'ngày', 'tháng', 'lớp']
model = {}
for key in class_names:
name = f"tomo/models/model_{key}.model"
with open(name, 'rb') as file:
model[key] = pickle.load(file)
# predict
record_mfcc = get_mfcc(f"tomo/voice_search_data/trimmed{i}.wav")
scores = [model[cname].score(record_mfcc) for cname in class_names]
predict_word = class_names[np.argmax(scores)]
# convert word of num into num (if exist)
'''num = {
'một': 1,
'hai': 2,
'ba': 3,
'bốn': 4,
'năm': 5,
'sáu': 6,
'bảy': 7,
'tám': 8,
'chín': 9,
}
if predict_word in num:
predict_word = num[predict_word]'''
predict_words.append(predict_word)
os.remove("tomo/voice_search_data/" + audio_name)
os.remove(f"tomo/voice_search_data/trimmed{i}.wav")
i += 1
# Get posts relating to predicted word
posts_search_result = []
all_posts = Post.objects.all()
for post in all_posts:
if any(str(predict_word) in post.text for predict_word in predict_words):
posts_search_result.append(post)
context = {
'posts': [{
'view': 'list',
'post': post,
'reactions': Reaction.objects.filter(post=post),
'comments': Comment.objects.filter(post=post),
} for post in reversed(posts_search_result)],
'my_groups': my_groups,
'predict_words': predict_words,
'me': me,
}
return render(request, 'search_result.html', context)
def conv_to_num(word):
return {
'một': 1,
'hai': 2,
'ba': 3,
'bốn': 4,
'năm': 5,
'sáu': 6,
'bảy': 7,
'tám': 8,
'chín': 9,
}[word]
| longnp030/SocialNetwork-Py | tomo/views.py | views.py | py | 6,784 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.shortcuts.redirect",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "django.db.models.query_utils.Q",
"line_number": 26,
"usage_type": "call"
},
{
"api_... |
41744416176 | from st7920 import ST7920
from random import randint
from time import sleep
import curses
import collections
SCALE = 4
WIDTH = 128/SCALE
HEIGHT = 64/SCALE
score = 0
alive = True
s = ST7920()
def newfoodpos():
return [randint(0,WIDTH-1), randint(0,HEIGHT-1)]
def update():
global headpos, foodpos, score
if headdir == 0:
newpos = [headpos[0]+1, headpos[1]]
elif headdir == 1:
newpos = [headpos[0], headpos[1]+1]
elif headdir == 2:
newpos = [headpos[0]-1, headpos[1]]
else:
newpos = [headpos[0], headpos[1]-1]
if newpos[0]<0: newpos[0] += WIDTH
if newpos[0]>=WIDTH: newpos[0] = 0
if newpos[1]<0: newpos[1] += HEIGHT
if newpos[1]>=HEIGHT: newpos[1] = 0
if (newpos in snakebits):
dead()
if newpos[0]==foodpos[0] and newpos[1]==foodpos[1]:
foodpos = newfoodpos() # don't remove if we hit the food
score += 1
else:
snakebits.popleft() #remove the last tail bit
snakebits.append(newpos)
headpos = newpos
draw()
s.redraw()
def dead():
global alive
alive = False
s.clear()
s.put_text("You died!", ((WIDTH*SCALE)-54)/2, ((HEIGHT*SCALE)/2)-8)
msg = "Score: " + str(score)
s.put_text(msg, ((WIDTH*SCALE)-(6*len(msg)))/2, ((HEIGHT*SCALE)/2))
s.redraw()
exit()
def draw():
s.clear()
s.rect(foodpos[0]*SCALE, foodpos[1]*SCALE, ((foodpos[0]+1)*SCALE)-1, ((foodpos[1]+1)*SCALE)-1)
for bit in snakebits:
s.fill_rect(bit[0]*SCALE, bit[1]*SCALE, ((bit[0]+1)*SCALE)-1, ((bit[1]+1)*SCALE)-1)
def showsplash(screen):
s.clear()
s.put_text("SNAKE!", ((WIDTH*SCALE)-36)/2, ((HEIGHT*SCALE)/2)-16)
s.put_text("Arrow keys", ((WIDTH*SCALE)-60)/2, ((HEIGHT*SCALE)/2))
s.put_text("to control!", ((WIDTH*SCALE)-66)/2, ((HEIGHT*SCALE)/2)+8)
s.redraw()
sleep(3)
while screen.getch() != -1: # clear the input buffer
pass
def main(screen):
global headdir
screen.nodelay(1)
showsplash(screen)
while alive:
char = screen.getch()
if char==113: exit()
elif char==curses.KEY_RIGHT and headdir!=2 : headdir = 0
elif char==curses.KEY_DOWN and headdir!=3: headdir = 1
elif char==curses.KEY_LEFT and headdir!=0: headdir = 2
elif char==curses.KEY_UP and headdir!=1: headdir = 3
update()
sleep(0.05)
s.clear()
s.redraw()
foodpos = newfoodpos()
snakebits = collections.deque()
headpos = [5,5]
snakebits.append([2,5])
snakebits.append([3,5])
snakebits.append([4,5])
snakebits.append(headpos)
headdir = 0 #0:east, 1:south, 2:west, 3:north
curses.wrapper(main)
| JMW95/RaspiLCDGames | snake.py | snake.py | py | 2,499 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "st7920.ST7920",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "curses.KEY_RIGHT",
"line_... |
42243521560 | import numpy as np
import matplotlib.pyplot as plt
import bead_util as bu
import scipy.signal as ss
path = "/data/20180927/bead1/spinning/50s_monitor_5min_gaps"
files = bu.find_all_fnames(path)
index = 0
fdrive = 1210.7
bw = 0.5
bwp = 5.
Ns = 250000
Fs = 5000.
k = 1e-13*(2.*np.pi*370.)**2
df = bu.DataFile()
df.load(files[-2])
df.load_other_data()
df.diagonalize()
drive = df.other_data[2]
resp = ss.detrend(df.pos_data[index])*df.conv_facs[0]/k
drive = ss.detrend(df.other_data[2])*df.conv_facs[0]/k
respft = np.fft.rfft(resp)
driveft = np.fft.rfft(drive)
freqs = np.fft.rfftfreq(Ns, d = 1./Fs)
#plot the data
plt.loglog(freqs, np.abs(respft)*2./Ns)
plt.axvline(x = fdrive, linestyle = '--', color = 'k', label = str(fdrive)+"Hz drive", alpha = 0.5)
plt.legend()
plt.xlabel("Frequency [Hz]")
plt.ylabel("Apparent Displacement [m]")
plt.show()
#plot the zoom
plt.semilogy(freqs, np.abs(respft)*2./Ns)
plt.axvline(x = fdrive, linestyle = '--', color = 'k', label = str(fdrive)+"Hz drive", alpha = 0.5)
plt.legend()
plt.xlabel("Frequency [Hz]")
plt.ylabel("Apparent Displacement [m]")
plt.xlim([fdrive-bwp/2., fdrive+bwp/2.])
plt.show()
#get inst amp and phase
tarr = np.linspace(0., 50., 250000)
respft_line = respft
driveft_line = driveft
respft_line[np.abs(freqs - fdrive)>bw] = 0.
driveft_line[np.abs(freqs - fdrive)>bw] = 0.
anal_signal_resp = ss.hilbert(np.fft.irfft(respft_line))
anal_signal_drive = ss.hilbert(np.fft.irfft(driveft_line))
phir = np.unwrap(np.angle(anal_signal_resp)) - np.unwrap(np.angle(anal_signal_drive))
plt.plot(tarr, np.abs(anal_signal_resp))
plt.xlabel("Time [s]")
plt.ylabel("Instantaneous Amplitude [m]")
plt.ylim([0, 4e-10])
plt.xlim([0, 50])
plt.show()
plt.plot(tarr, np.abs(phir))
plt.xlabel("Time [s]")
plt.ylabel("Drive Response Phase Difference [rad]")
plt.xlim([0, 50])
#plt.ylim([0, 3])
plt.show()
| charlesblakemore/opt_lev_analysis | scripts/spinning/old_scripts/inst_amp_phase_plot.py | inst_amp_phase_plot.py | py | 1,852 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "bead_util.find_all_fnames",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "bead_util.DataFile",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "scipy.signa... |
17285744603 | from abc import ABCMeta, abstractmethod
import subprocess
import io
from logging import Logger
class Action(metaclass=ABCMeta):
def __init__(self, action_id, job, **kwargs):
self.id = action_id
self.job = job
@abstractmethod
def to_text(self, logger: Logger) -> str:
pass
class TextAction(Action):
def __init__(self, action_id, job, **kwargs):
super(TextAction, self).__init__(action_id, job)
self.text = kwargs.get("text", "今天又是元气满满的一天")
def to_text(self, loger: Logger) -> str:
loger.info(f"Text to text Length: {len(self.text)}")
return self.text
class CommandAction(Action):
def __init__(self, action_id, job, **kwargs):
super(CommandAction, self).__init__(action_id, job)
self.command = kwargs.get("command", "echo Hello")
def to_text(self, loger: Logger) -> str:
proc = subprocess.Popen(self.command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=-1)
proc.wait()
stream_stdout = io.TextIOWrapper(proc.stdout, encoding='utf-8')
stream_stderr = io.TextIOWrapper(proc.stderr, encoding='utf-8')
str_stdout = str(stream_stdout.read()).strip()
str_stderr = str(stream_stderr.read()).strip()
loger.info(f"Command to text Command {self.command} stdout: {str_stdout}")
loger.info(f"Command to text Command {self.command} stdout: {str_stderr}")
if len(str_stdout) == 0:
return str_stdout
else:
return str_stdout + "\n" + str_stderr
| SuperH-0630/HelloEmail | action.py | action.py | py | 1,587 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "abc.ABCMeta",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "logging.Logger",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "abc.abstractmethod",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "logging.Logger",
"li... |
7619783575 | import time
from pathlib import Path
import torch
import torch.nn as nn
from torch.optim import RMSprop, Adam
from torch.optim.lr_scheduler import ReduceLROnPlateau
from .evaluate import evaluate
from .logger import print_logger
def train_net(net,
dataloaders,
device,
result_path : Path,
learning_rate: float = 0.1,
epochs : int = 999,
):
train_loader = dataloaders['Train']
val_loader = dataloaders['Valid']
early_stop = 0
early_stop_criterion = 12
best_val_score = 0
total_start_time = time.time()
logger = print_logger(result_path.joinpath('LOG').with_suffix('.txt'))
image_path = result_path.joinpath('Prediction')
image_path.mkdir(exist_ok=True, parents = True)
checkpoint = result_path.joinpath('Model_Weight').with_suffix('.pth')
checkpoint.parent.mkdir(exist_ok = True, parents = True)
optimizer = RMSprop(net.parameters(), lr=learning_rate, weight_decay=1e-8)
# optimizer = Adam(net.parameters(), lr=learning_rate)
scheduler = ReduceLROnPlateau(optimizer, mode = 'max', factor = 0.1, patience = 4, min_lr = 1e-5) # goal: maximize Dice score
criterion = nn.CrossEntropyLoss()
for epoch in range(epochs+1):
start_time = time.time()
net.train()
epoch_loss = 0
for images, true_masks, _ in train_loader :
images = images.to(device=device, dtype=torch.float32)
true_masks = true_masks.to(device=device, dtype=torch.float32)
masks_pred = net(images)
loss = criterion(masks_pred, true_masks)
optimizer.zero_grad(set_to_none=True)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_loss = epoch_loss / len(train_loader)
dice_score, sensitivity, specificity = evaluate(net, val_loader, device, image_path)
scheduler.step(dice_score)
if dice_score <= best_val_score :
early_stop += 1
else :
early_stop = 0
best_val_score = dice_score
torch.save(net.state_dict(), checkpoint)
if early_stop == early_stop_criterion :
break
time_elapsed = time.time() - start_time
total_elapsed = time.time() - total_start_time
total_min = total_elapsed // 60
total_sec = total_elapsed % 60
lr = optimizer.param_groups[0]['lr']
logger(f'[EPOCH : {epoch:3d}/{epochs:3d}] \
| LOSS : [{epoch_loss:.4f}] \
| DICE : [{best_val_score:.4f}] \
| SENSI : [{sensitivity:.4f}] \
| SPECI : [{specificity:.4f}] \
| ES : [{early_stop}/{early_stop_criterion}] \
| LR : [{lr:.5f}] \
| TIME : [{int(time_elapsed):3d}S / {int(total_min):2d}M {int(total_sec):2d}S]'
)
net.load_state_dict(torch.load(checkpoint))
final_val_score = evaluate(net, val_loader, device, image_path)
logger(f'\n\nFINAL VALIDATION SCORE : {final_val_score}')
return net
| kimjh0107/2022_Rayence_Medical_Image_processing | src/train.py | train.py | py | 3,042 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "logger.print_logger",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "torch.optim.RMSprop",
... |
41673811681 | import os
import shutil
from object2urdf import ObjectUrdfBuilder
from cleanup_tools import get_immediate_subdirectories
import argparse
import shapenet
from glob import glob
import point_cloud_utils as pcu
import numpy as np
import trimesh
def as_mesh(scene_or_mesh):
# Utils function that returns a mesh from a trimesh.Trimesh() or trimesh.scene.Scene()
if isinstance(scene_or_mesh, trimesh.Scene):
mesh = trimesh.util.concatenate([
trimesh.Trimesh(vertices=m.vertices, faces=m.faces)
for m in scene_or_mesh.geometry.values()])
else:
mesh = scene_or_mesh
return mesh
# Update file
def replace_in_file(filepath, original, replacement):
"""Replace original string with replacement string in file at filepath.
These can be single strings or list of strings."""
with open(filepath, "r") as file:
filedata = file.read()
original = [original] if not isinstance(original, list) else original
replacement = [replacement] if not isinstance(replacement, list) else replacement
assert len(original) == len(replacement)
for idx in range(len(original)):
filedata = filedata.replace(original[idx], replacement[idx])
with open(filepath, "w") as file:
file.write(filedata)
def main(args):
# Create new directory to place processed files
new_folder = os.path.join(os.path.dirname(shapenet.__file__), 'ShapeNetCoreV2urdf')
if not os.path.exists(new_folder):
os.makedirs(new_folder)
# Create __init__.py file
initfile = os.path.join(new_folder, '__init__.py')
try:
open(initfile, 'x')
except FileExistsError:
pass
shapenet_folder = os.path.join(os.path.dirname(shapenet.__file__), 'ShapeNetCoreV2')
subdirs = get_immediate_subdirectories(shapenet_folder)
for subdir in subdirs:
category_folder = os.path.join(shapenet_folder, subdir)
# Create new directory for the ShapeNet category
new_category_folder = os.path.join(new_folder, subdir)
if not os.path.exists(new_category_folder):
os.makedirs(new_category_folder)
# copy prototype.urdf to subdir
src_proto = os.path.join(shapenet_folder, '_prototype.urdf')
dst_proto = os.path.join(new_category_folder, '_prototype.urdf')
shutil.copy2(src_proto, dst_proto)
builder = ObjectUrdfBuilder(new_category_folder)
obj_paths = glob(os.path.join(category_folder, '*', 'models', '*.obj'))
for obj_path in obj_paths:
# Create new directory for the ShapeNet object
new_object_folder = os.path.join(new_category_folder, obj_path.split(os.sep)[-3])
if not os.path.exists(new_object_folder):
os.makedirs(new_object_folder)
if args.watertight:
# Generate watertight mesh
mesh = as_mesh(trimesh.load(obj_path))
if mesh.is_watertight:
# Copy .obj to new directory
shutil.copy2(obj_path, os.path.join(new_object_folder, 'model.obj'))
else:
vm, fm = pcu.make_mesh_watertight(mesh.vertices, mesh.faces, 50000)
watertight_path = os.path.join(new_object_folder, 'model.obj')
pcu.save_mesh_vf(watertight_path, vm, fm, dtype=np.float32)
else:
# Copy .obj to new directory
shutil.copy2(obj_path, os.path.join(new_object_folder, 'model.obj'))
# build urdf
builder.build_urdf(filename=new_object_folder,
force_overwrite=True,
decompose_concave=False,
force_decompose=False,
center=None)
# rename urdf with their .obj name
src_urdf_path = glob(os.path.join(new_category_folder, '[!_]*.urdf'))[0]
dst_urdf_path = os.path.join(new_object_folder, 'model.urdf')
shutil.move(src_urdf_path, dst_urdf_path)
# Add flag 'concave=yes' to allow concave meshes in simulators,
# edit the new urdf with the updated mesh path
obj_index = dst_urdf_path.split(os.sep)[-2]
original = [f'filename=\"{obj_index}\"',
'collision']
replacement = ['filename=\"model.obj\"',
'collision concave=\"yes\"']
replace_in_file(dst_urdf_path, original, replacement)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"--watertight", default=False, action='store_true', help="Extract watertight meshes and watertight URDF"
)
args = parser.parse_args()
main(args)
| dexterousrobot/obj_urdfs | obj_urdfs/build_shapenet_urdfs.py | build_shapenet_urdfs.py | py | 4,788 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "trimesh.Scene",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "trimesh.util.concatenate",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "trimesh.util",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "trimesh... |
73915422185 | from flask import Flask, request
import base64
from PIL import Image
from preprocess4 import Pr1
from preprocess2 import test_transforms
import torch
from torchvision import models
import torch.nn as nn
import numpy as np
import cv2
def get_net():
finetune_net = nn.Sequential()
finetune_net.features = models.resnet18(weights='ResNet18_Weights.DEFAULT')
finetune_net.output_new = nn.Sequential(nn.Linear(1000, 256),
nn.ReLU(),
nn.Linear(256, 26))
finetune_net = finetune_net.to('cpu')
for param in finetune_net.features.parameters():
param.requires_grad = False
return finetune_net
# Load the saved parameters
saved_params = torch.load('my_model61.pt', map_location=torch.device('cpu'))
# Create a new instance of the model and load the parameters
model_test = get_net()
model_test.load_state_dict(saved_params)
model_test.eval()
classes = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
app = Flask(__name__)
@app.route('/upload', methods=['POST'])
def upload():
try:
print(0)
# Get the base64 image string from the request
base64_image = request.json['image']
print(1)
# Decode the base64 image string to bytes
image_bytes = base64.b64decode(base64_image)
print(2)
# Convert the image bytes to a numpy array
nparr = np.fromstring(image_bytes, np.uint8)
# Decode the numpy array to an image using OpenCV
frame = cv2.imdecode(nparr, cv2.IMREAD_UNCHANGED)
# Process the image as needed
p1 = Pr1(frame)
processed_frame = p1.detect_crop_and_segment_hands(p1.image)
if processed_frame is not None:
cropped_hand_array = Image.fromarray(processed_frame)
# Apply the transformations
#img_tensor = test_transforms(cropped_hand_array)
#Make a prediction using the model
#prediction = model_test(img_tensor[None].to("cpu"))
# Get the predicted label
#pred_label = classes[torch.max(prediction, dim=1)[1]]
#print(pred_label)
# Return a response if needed
return {'status': 'success'}
except Exception as e:
return {'status': 'error', 'message': str(e)}
if __name__ == '__main__':
app.run()
| Moezwalha/Alphabet-SL_Prediction_Service | app.py | app.py | py | 2,494 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Sequential",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "torchvision.models.resnet18",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torchvision.... |
24888015371 | from typing import Iterable
import torch
import numpy as np
from scipy.spatial.distance import cdist
from tqdm import tqdm
import ot
def cost_matrix(
data: np.ndarray, cost: str = 'correlation',
normalize_features: bool = True) -> np.ndarray:
"""Compute an empirical ground cost matrix, i.e. a pairwise distance matrix
between the rows of the dataset (l1-normalized by default). Accepted
distances are the ones compatible with Scipy's `cdist`.
Args:
data (np.ndarray):
The input data, samples as columns and features as rows.
cost (str):
The metric use. Defaults to `correlation`.
normalize_features (bool, optional):
Whether to divide the rows by their sum before
computing distances. Defaults to True.
Returns:
np.ndarray: The pairwise cost matrix.
"""
if normalize_features:
sums = data.sum(1).reshape(-1, 1)
C = cdist(data/sums, data/sums, metric=cost)
else:
C = cdist(data, data, metric=cost)
return C/C.max()
def OT_distance_matrix(
data: np.ndarray,
cost: np.ndarray,
eps: float = .1,
dtype: torch.dtype = torch.double,
device: str = 'cuda',
divide_max: bool = False,
numItermax: int = 500,
stopThr: float = 1e-5,
batch_size: int = 200) -> np.ndarray:
"""Compute the pairwise Optimal Transport distance matrix. We compute
Sinkhorn Divergences using POT's implementation of the Sinkhorn algorithm.
Computations are done using PyTorch on a specified device. But the result is
a numpy array. This allows not saturating the GPU for large matrices.
Args:
data (np.ndarray):
The input data, as a numpy array.
cost (np.ndarray):
The ground cost between features.
eps (float, optional):
The entropic regularization parameter. Small regularization requires
more iterations and double precision. Defaults to .1.
dtype (torch.dtype, optional):
The torch dtype used for computations. Double is more precise but
takes up more space. Defaults to torch.double.
device (str, optional):
The torch device to compute on, typically 'cpu' or 'cuda'.
Defaults to 'cuda'.
divide_max (bool, optional):
Whether to divide the resulting matrix by its maximum value.
This can be useful to compare matrices. Defaults to False.
numItermax (int, optional):
Used by POT, maximum number of Sinkhorn iterations. Defaults to 500.
stopThr (float, optional):
Used by POT, tolerance for early stopping in the Sinkhorn iterations.
Defaults to 1e-5.
batch_size (int, optional):
The batch size, i.e. how many distances can be computed at the same
time. Should be as large as possible on your hardware. Defaults to 200.
Returns:
np.ndarray: The pairwise OT distance matrix.
"""
# Move the cost to PyTorch.
C = torch.from_numpy(cost)
C = C.to(device=device, dtype=dtype)
# Compute the kernel
K = torch.exp(-C/eps)
data_tensor = torch.from_numpy(data)
data_tensor = data_tensor.to(device=device, dtype=dtype)
D = torch.zeros(data_tensor.shape[1], data_tensor.shape[1], device='cpu', dtype=dtype)
pbar = tqdm(total=data_tensor.shape[1]*(data_tensor.shape[1] - 1)//2, leave=False)
errors = []
# Iterate over the lines.
for i in range(data_tensor.shape[1]):
for ii in np.split(range(i+1), np.arange(batch_size, i+1, batch_size)):
# Compute the Sinkhorn dual variables
_, wass_log = ot.sinkhorn(
data_tensor[:,i].contiguous(), # This is the source histogram.
data_tensor[:,ii].contiguous(), # These are the target histograms.
C, # This is the ground cost.
eps, # This is the regularization parameter.
log=True, # Return the dual variables
stopThr=stopThr,
numItermax=numItermax
)
# Compute the exponential dual potentials.
f, g = eps*wass_log['u'].log(), eps*wass_log['v'].log()
if len(wass_log['err']) > 0:
errors.append(wass_log['err'][-1])
# Compute the Sinkhorn costs.
# These will be used to compute the Sinkhorn divergences
wass = (
f*data_tensor[:,[i]*len(ii)] +
g*data_tensor[:,ii] -
eps*wass_log['u']*(K@wass_log['v'])
).sum(0)
# Add them in the distance matrix (including symmetric values).
D[i,ii] = D[ii,i] = wass.cpu()
pbar.update(len(ii))
pbar.close()
# Get the diagonal terms OT_eps(a, a).
d = torch.diagonal(D)
# The Sinkhorn divergence is OT(a, b) - (OT(a, a) + OT(b, b))/2.
D = D - .5*(d.view(-1, 1) + d.view(1, -1))
# Make sure there are no negative values.
assert((D < 0).sum() == 0)
# Make sure the diagonal is zero.
D.fill_diagonal_(0)
if divide_max:
D /= torch.max(D)
return D.numpy(), errors
def C_index(D: np.ndarray, clusters: np.ndarray) -> float:
"""Compute the C index, a measure of how well the pairwise distances reflect
ground truth clusters. Implemented here for reference, but the silhouette
score (aka Average Silhouette Width) is a more standard metric for this.
Args:
D (np.ndarray): The pairwise distances.
clusters (np.ndarray): The ground truth clusters.
Returns:
float: The C index.
"""
Sw = Nw = 0
for c in np.unique(clusters):
idx = np.where(clusters == c)[0]
Sw += D[idx][:,idx].sum()/2
Nw += int(len(idx)*(len(idx) - 1)/2)
els = []
for i in range(len(D)):
for j in range(i):
els.append(D[i, j])
Smin = np.sort(np.array(els))[:Nw].sum()
Smax = np.sort(np.array(els))[::-1][:Nw].sum()
return (Sw - Smin)/(Smax - Smin) | cantinilab/OT-scOmics | src/otscomics/__init__.py | __init__.py | py | 5,668 | python | en | code | 31 | github-code | 36 | [
{
"api_name": "numpy.ndarray",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "scipy.spatial.distance.cdist",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "scipy.spatial.distance.cdist",
"line_number": 31,
"usage_type": "call"
},
{
"api_n... |
36168624616 | from enum import Enum, auto
from pathlib import Path
import numpy as np
import pandas as pd
import pendulum
import pytest
from whatsappnalysis.lib.custom_types import ChatDataset, Schema
from whatsappnalysis.lib.data_loader import WhatsappLoader
class TestWhatsappLoader:
""" Tests for ChatDataset """
test_chat_txt = (
"2/5/20, 8:38 PM - Author 1: Hello world\n"
"2/5/20, 8:39 PM - Author 1: I like balloons\n"
"2/5/20, 8:39 PM - Author 2: I like balloons too!\n"
"2/5/20, 8:42 PM - Author 3: foo\n"
"2/5/20, 8:42 PM - Author 3: Balloons are terrible\n"
"2/5/20, 8:45 PM - Author 2: False\n"
)
test_chat_df = pd.DataFrame.from_dict(
{
"CHAT_NAME": {
0: "test_chat",
1: "test_chat",
2: "test_chat",
3: "test_chat",
4: "test_chat",
5: "test_chat",
},
"TIMESTAMP": {
0: pendulum.parse("2020-02-05 20:38:00+0000"),
1: pendulum.parse("2020-02-05 20:39:00+0000"),
2: pendulum.parse("2020-02-05 20:39:00+0000"),
3: pendulum.parse("2020-02-05 20:42:00+0000"),
4: pendulum.parse("2020-02-05 20:42:00+0000"),
5: pendulum.parse("2020-02-05 20:45:00+0000"),
},
"AUTHOR": {
0: "Author 1",
1: "Author 1",
2: "Author 2",
3: "Author 3",
4: "Author 3",
5: "Author 2",
},
"MESSAGE": {
0: "Hello world",
1: "I like balloons",
2: "I like balloons too!",
3: "foo",
4: "Balloons are terrible",
5: "False",
},
"HAS_MEDIA": {
0: False,
1: False,
2: False,
3: False,
4: False,
5: False,
},
}
)
class Columns(Enum):
TIMESTAMP = auto()
AUTHOR = auto()
MESSAGE = auto()
schema = Schema(
columns=Columns,
columns_to_dtypes={Columns.TIMESTAMP.name: np.dtype("datetime64[ns]")},
)
def test_load_from_txt(self, tmp_path: Path):
""" Test loading from txt file"""
# Arrange
expected = self.test_chat_df.astype({"TIMESTAMP": np.dtype("datetime64[ns]")})
raw_path = tmp_path / "test_chat.txt"
with raw_path.open("w") as file:
file.write(self.test_chat_txt)
dataset = ChatDataset(schema=self.schema)
# Act
result = WhatsappLoader().load_from_txt(raw_path)
# Assert
pd.testing.assert_frame_equal(result.data, expected)
def test_load_from_txt_bad_file(self, tmp_path: Path):
""" Test loading from txt file"""
# Arrange
raw_path = tmp_path / "test_chat.txt"
with raw_path.open("w") as file:
file.write("")
# Act / assert
with pytest.raises(TypeError):
WhatsappLoader().load_from_txt(raw_path)
| lbartell/whatsappnalysis | tests/test_lib/test_data_loader/test_whatsapp_loader.py | test_whatsapp_loader.py | py | 3,175 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.DataFrame.from_dict",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "pendulum.parse",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pendu... |
32505694658 | import streamlit as st
# import pandas as pd
import numpy as np
import pydeck as pdk
import plotly.express as px
from ParserXML import *
from ConverterToHTML import *
from VisualTools import *
__all__ = [st, pd, np, pdk, px]
DATE_TIME = "date/time"
local_path = ""
file_name = "Datasets/50k_cleaned_from_xml.csv"
DATA_URL = local_path + file_name
st.title("Motor Vehicle Collisions Analyzer")
st.markdown("This application is a Streamlit dashboard that can \
be used to analyze XML-file from the State Automobile Inspection ЁЯЪФЁЯТе")
st.markdown("ЁЯФ╡ Author: **Andriy Fedorych**")
st.markdown("ЁЯЯб GitHub: [**StopFuture**](https://github.com/StopFuture)")
upload_check = False
xml_source_file = st.file_uploader("Upload XML File", type="xml")
if xml_source_file is not None and upload_check is False:
try:
context_t = DefParserXML(XMLDictStrategy())
context_t.strategy = XMLDictStrategy()
imported = context_t.extract_data(xml_source_file.name)
Converter = ConverterToHTML(xml_source_file.name)
@st.cache(persist=True)
def load_data(imported_data):
def lowercase(el): return str(el).lower()
imported_data.rename(lowercase, axis='columns', inplace=True)
imported_data.dropna(subset=["latitude", "longitude", "injured_persons", "date-time", "on_street_name"],
inplace=True)
imported_data['date-time'] = pd.to_datetime(imported_data['date-time'], format='%Y-%m-%d %H:%M:%S')
for name in ["injured_persons", "killed_persons", "injured_pedestrians",
"killed_pedestrians", "injured_cyclists", "killed_cyclists", "injured_motorists",
"killed_motorists"]:
imported_data[name] = imported_data[name].astype('int')
imported_data['latitude'] = imported_data['latitude'].astype('float')
imported_data['longitude'] = imported_data['longitude'].astype('float')
return imported_data
upload_check = True
except Exception as exp:
x = exp
st.markdown("тЪая╕П я╕П**The file is not from the SAI system, try upload another file**")
else:
upload_check = False
if upload_check:
data = load_data(imported)
origin = data
st.header("Where are the most people injured in city?")
injured_people = st.slider("Number of persons injured in vehicle collisions", 0, 18)
midpoint = (np.average(data["latitude"].dropna(how="any")), np.average(data["longitude"].dropna(how="any")))
tmp_data = data.query("injured_persons >= @injured_people")[cols]
HeatMap(data, midpoint, injured_people)
if st.checkbox("Show Raw Data ", False):
st.subheader('Raw Data')
x = (st.text_input("Number of displayed rows : ", value="1"))
st.write(tmp_data.head(int(x) if x != "" else 0))
DownloadButton(tmp_data, Converter)
st.header("How many collisions occur during a given time of day(60 min interval)?")
hour = st.slider("Hour to look at", 0, 24)
data = data[data['date-time'].dt.hour == hour]
st.markdown(f"Vehicle collisions between {hour}:00 and {hour + 1}:00")
HistMap(data, midpoint)
if st.checkbox("Show Raw Data", False):
st.subheader('Raw Data')
x = (st.text_input("Number of displayed rows: ", value="10"))
st.write(data.head(int(x) if x != "" else 0))
st.button(
f"Extract this data as {Converter.set_source(st.text_input('Select a name:', value=Converter.source))}.html ",
key=None, help=None, on_click=Converter.create_html(tmp_data, Converter.source))
# Hist
st.subheader("Breakdown by minute between %i:00 and %i:00" % (hour, (hour + 1) % 24))
filtered = data[
(data['date-time'].dt.hour >= hour) & (data['date-time'].dt.hour <= hour + 1)
]
hist = np.histogram(filtered["date-time"].dt.minute, bins=60, range=(0, 60))[0]
chart_data = pd.DataFrame({'minute': range(0, 60, 1), 'crashes': hist})
fig = px.bar(chart_data, x="minute", y="crashes", hover_data=["minute", "crashes"], height=500)
st.write(fig)
st.markdown("The data may be inaccurate, because most of the time is rounded up to 5 minutes")
if st.checkbox("Show raw data", False):
st.subheader('Raw Data')
st.write(data.head(10))
st.header("Top dangerous streets by affected class")
Box(data)
st.header("Creating html file from source data")
FinalHtmlCreator(origin, Converter)
| StopFuture/AnalyzerXML | AnalyzerXML.py | AnalyzerXML.py | py | 4,533 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "plotly.express",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "streamlit.title",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "streamlit.markdown",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "streamlit.markdown"... |
32568853073 | # -*- coding: utf-8 -*-
from logbook import Logger
import numpy as np
import pandas as pd
from zipline.data.bundles import register
from zipline.utils.calendars import get_calendar
EXPORT_FOLDER = '/mnt/data/earnings_calls/export/'
log = Logger('zipline_ingest.py')
def bundle_hf_data(price_file, debug = False):
def ingest(environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
cache,
show_progress,
output_dir,
start,
end):
log.info("Starting bundle build from %s" % price_file)
data = pd.read_hdf(price_file)
data.dropna(subset = ['Open', 'Close'], inplace = True)
data = data.loc[data.Currency == 'USD']
data['instrument_key'] = data.instrument_key.str.upper()
log.info("Importing %d instruments" % len(data.instrument_key.unique()))
dfMetadata = []
def read_instruments():
for sid, (instrument_key, instrument_data) in enumerate(data.groupby('instrument_key')):
log.debug("Reading instrument %s" % instrument_key)
log.debug("\tInstrument has %d rows" % len(instrument_data))
if len(instrument_data) == 0:
log.debug("\tNo data for instrument, skipping")
continue
instrument_data.drop_duplicates(subset = ['Date'], inplace = True)
instrument_data.set_index('Date', inplace = True)
instrument_data.sort_index(inplace = True)
#dfData['exchange_open'] = instrument_data.index.map(calendar.is_open_on_minute)
#dfData = dfData[dfData['exchange_open'] == True]
start_date = instrument_data.index[0]
log.debug("\tstart_date %s" % start_date)
end_date = instrument_data.index[-1]
log.debug("\tend_date %s" % end_date)
ac_date = end_date + pd.Timedelta(days=1)
log.debug("\tac_date %s" % ac_date)
sessions = get_calendar('NYSE').sessions_in_range(start_date, end_date)
instrument_data = instrument_data.reindex(sessions)
# Update our meta data
dfMetadata.append((sid, instrument_key, start_date, end_date, \
ac_date, instrument_key, "Eikon"))
instrument_data['High'] = np.nan
instrument_data['Low'] = np.nan
instrument_data['Volume'].fillna(1.0, inplace = True)
instrument_data = instrument_data.loc[:, ['Open', 'High', 'Low', 'Close', 'Volume']]
instrument_data.columns = ['open', 'high', 'low', 'close', 'volume']
instrument_data = instrument_data.astype(float)
yield (sid, instrument_data)
if debug:
break
liData = read_instruments()
log.info("calling daily_bar_writer")
daily_bar_writer.write(liData,
show_progress = True)
log.info("returned from daily_bar_writer")
dfMetadata = pd.DataFrame(dfMetadata,
columns=['sid', 'asset_name', 'start_date',
'end_date', 'auto_close_date',
'symbol', 'exchange'])\
.set_index('sid')
log.info("calling asset_db_writer")
log.info(dfMetadata)
asset_db_writer.write(equities = dfMetadata)
log.info("returned from asset_db_writer")
log.info("calling adjustment_writer")
adjustment_writer.write()
log.info("returned from adjustment_writer")
return ingest
register(
'eikon-data-bundle',
bundle_hf_data(price_file = EXPORT_FOLDER + "/adjusted_prices.hdf",
debug = False),
)
| olgsfrt/earningscall | backtest/zipline_ingest.py | zipline_ingest.py | py | 4,373 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logbook.Logger",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pandas.read_hdf",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pandas.Timedelta",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "zipline.utils.calenda... |
3600506632 | import cv2
img = cv2.imread("sample1.png")
cv2.imwrite("sample2.png", img)
img2 = cv2.imread("sample2.png")
grayImg = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
cv2.imshow("Gray", grayImg)
cv2.waitKey(0)
cv2.destroyAllWindows()
| rohith274/AiGuide | AI/Day1/ReadImage.py | ReadImage.py | py | 226 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.imread",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 6,
... |
36830649760 | # To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %% [markdown]
# # SciKitLearn 机器学习库
# - VScode中, `ctrl + /` 快速注释代码
# %%
# Sklearn 通用的学习模式
# 案例1. 本例鸢尾花数据集,使用KNN模块实现分类
import numpy as np
from sklearn import datasets
# from sklearn.cross_validation import train_test_split # cross_validation包早已不再使用,功能划入model_selection模块中
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
iris = datasets.load_iris() # 加载鸢尾花数据集
iris_X = iris.data # 属性存入X变量,作为特征向量集合
iris_y = iris.target # 标签存入y变量,作为目标向量
print(iris_X[:2,:])
print(iris_y)
# %%
# 数据集划分
X_train, X_test, y_train, y_test = train_test_split(iris_X, iris_y, test_size=0.3)
# 将iris_X和iris_y都分别按30%测试集的比例划分train集和test集
# 定义用到的模块
knn = KNeighborsClassifier() # 使用knn模块训练数据分类
# knn = KNeighborsClassifier(n_neighbors=5) # K近邻会将邻近点求平均,这里可指定平均邻近几个点的值
knn.fit(X_train, y_train) # 使用的是fit函数
# 测试预测结果
print(knn.predict(X_test))
print(y_test)
# 可视化(自己增加)
yuc = knn.predict(X_test) # 预测结果
zhs = y_test # 实际值
import numpy as np
idx = np.arange(0,len(yuc),1) # 按元素数(len取值)生成索引,用于x坐标
import matplotlib.pyplot as plt
plt.figure()
plt.scatter(idx,yuc,s=80,c='g',alpha=0.5) # idx为x,预测和实际值为y
plt.scatter(idx,zhs,s=80,c='r',alpha=0.5) # 设置图形足够大,颜色区分,有透明度
# %%
# 案例2. 本例波士顿房价数据集,使用linear_model实现线性回归预测
from sklearn import datasets
from sklearn.linear_model import LinearRegression
loaded_data = datasets.load_boston() # 加载波士顿房价数据集
data_X = loaded_data.data # 数据的data属性就是特征向量集
data_y = loaded_data.target # 数据的target属性就是目标函数
model = LinearRegression() # 使用线性回归模型
model.fit(data_X, data_y)
print(model.predict(data_X))
print(data_y)
# 可视化(自己增加)
yuc = model.predict(data_X) # 预测结果
zhs = data_y # 实际值
import numpy as np
idx = np.arange(0,len(yuc),1) # 按元素数(len取值)成索引,用于x坐标
import matplotlib.pyplot as plt
plt.figure(figsize=(12,4))
plt.plot(idx,yuc,c='g',alpha=0.5) # idx为x,预测和实际值为y
plt.plot(idx,zhs,c='r',alpha=0.5) # 设置颜色区分,有透明度
# %%
# model模块的常见属性和功能,如上述的predict预测功能(1分类2回归)
model = LinearRegression() # 指定本例所用的model
model.fit(X,y) # 对特征向量集和目标向量,用模型进行拟合
model.predict(X) # 对测试集数据X,用模型进行预测
model.coef_ # 模型的斜率
model.intercept_ # 模型的截距
model.get_params() # 获得模型选择时给模型定义的参数
model.score(X,y) # 对预测结果打分。用X预测,用y做真值进行比较。R^2方式打分
# %%
# 预处理preprocessing
# 标准化normalization、正则化、特征缩放feature scaling
# Idea: Make sure features are on a similar scale. 各特征处于相近的量级,便于学习
from sklearn import preprocessing
X = preprocessing.scale(X) # 对数据进行预处理(标准化,缩放到0-1之间的数值)
# %%
# 交叉验证(数据集分割)
# 上面案例1中的数据集分割方式,按照固定比例分割
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
# 为了有效评价模型,对数据集进行多次不同模式的分割,分别测试并平均其准确率
from sklearn.model_selection import cross_val_score # cross_val_score函数也并入model_selection
knn = KNeighborsClassifier(n_neighbors=5) # 计算5个近邻点
score = cross_val_score(knn, X, y, cv=5, scoring='accuracy') # 分类问题用准确率
# 打分由多次分割评估结果平均而来,使用knn模型,对X预测,用y验证,使用5种分割方案,打分使用准确率进行
loss = -cross_val_score(knn, X, y, cv=5, scoring='neg_mean_squared_error') # 回归问题用均方差(原值时负值)
# 原mean_squared_error参数已弃用
# %%
# 学习率曲线,可视化学习的准确率变化过程
from sklearn.model_selection import learning_curve # 学习曲线也放入model_selection
from sklearn.datasets import load_digits
from sklearn.svm import SVC
import matplotlib.pyplot as plt
import numpy as np
digits = load_digits() # 加载数据集
X = digits.data # digits属性作为特征向量集
y = digits.target # 目标向量
# 学习曲线计算(指定阶段的准确率/损失值变化),输出给训练集大小、训练集损失、测试集损失等变量
# gamma是学习率(速率),阶段有数组指定,损失计算和上述交叉验证方法一样
train_sizes, train_loss, test_loss = learning_curve(
SVC(gamma=0.001),X,y,cv=10,scoring='neg_mean_squared_error',
train_sizes=[0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1])
train_loss_mean = -np.mean(train_loss,axis=1) # 上述cv10次分割的值求均值
test_loss_mean = -np.mean(test_loss,axis=1)
plt.plot(train_sizes, train_loss_mean, 'o-',color='r', label="training")
plt.plot(train_sizes, test_loss_mean, 'o-',color='g', label="cross-validation")
plt.xlabel('training examples')
plt.ylabel('loss')
plt.legend(loc='best')
plt.show()
# %%
# 模型调参过程,使用validation_curve评估参数取值变化过程中评估指标的变化曲线,根据是否欠拟合或过拟合来选取该参数的合适范围
from sklearn.model_selection import validation_curve # 评估曲线也放入model_selection
from sklearn.datasets import load_digits
from sklearn.svm import SVC
import matplotlib.pyplot as plt
import numpy as np
digits = load_digits()
X = digits.data
y = digits.target
param_range = np.logspace(-6, -2.3, 10) # 在区间取5个点,用于测试参数(调参)
# 评估曲线计算(指定阶段的准确率/损失值变化),输出给训练集大小、训练集损失、测试集损失等变量
# gamma是学习率(速率),阶段有数组指定,损失计算和上述交叉验证方法一样
train_loss, test_loss = validation_curve( # 改用评估曲线,返回值没有train_sizes
# SVC的固定参数去掉,后面给出参数名和取值范围(已指定)
SVC(),X,y,param_name='gamma',param_range=param_range, cv=10,scoring='neg_mean_squared_error')
train_loss_mean = -np.mean(train_loss,axis=1) # 上述cv10次分割的值求均值
test_loss_mean = -np.mean(test_loss,axis=1)
plt.plot(param_range, train_loss_mean, 'o-',color='r', label="training")
plt.plot(param_range, test_loss_mean, 'o-',color='g', label="cross-validation")
plt.xlabel('gamma')
plt.ylabel('loss')
plt.legend(loc='best')
plt.show()
# %%
# 保存model和参数
# pickle方法
import pickle
with open('/path/to/file.pickle','wb') as f: # 打开句柄-写入
pickle.dump(model,f) # 保存模型
with open('/path/to/file.pickle','rb') as f: # 打开句柄-读出
mdl = pickle.load(f) # 加载模型
print(mdl.predict(X[0:1])) # 使用模型预测
# joblib方法-sklearn
from sklearn.externals import joblib
joblib.dump(model,'/path/to/file.pkl') # 保存模型
mdl = joblib.load('/path/to/file.pkl') # 加载模型
print(mdl.predict(X[0:1])) # 使用模型预测
| oca-john/Python3-xi | Python3-ipynb/py3.sklearn.py | py3.sklearn.py | py | 7,841 | python | zh | code | 0 | github-code | 36 | [
{
"api_name": "sklearn.datasets.load_iris",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sklearn.datasets",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 27,
"usage_type": "call"
},
{
... |
5515751660 | import yaml
class Config:
def __init__(self):
self.load_model_epochs = None
self.debug = None
self.n_epochs = None
self.load_g_model_score = None
self.load_d_model_score = None
self.model_no = None
self.batch_size = None
self.n_split = None
self.max_lr = None
self.min_lr = None
self.lambda1 = None
self.lambda2 = None
self.seed = None
self.dataloader_seed = None
self.device = None
self.size = None
self.load()
def load(self):
with open('config/config.yml', 'r') as f:
config = yaml.load(f, Loader=yaml.SafeLoader)
self.load_model_epochs = config.get('LOAD_MODEL_EPOCH')
self.debug = config.get('DEBUG')
self.n_epochs = config.get('N_EPOCHS')
self.load_g_model_score = config.get('LOAD_G_MODEL_SCORE')
self.load_d_model_score = config.get('LOAD_D_MODEL_SCORE')
self.model_no = config.get('MODEL_NO')
self.batch_size = config.get('BATCH_SIZE')
self.n_split = config.get('N_SPLIT')
self.max_lr = config.get('MAX_LR')
self.min_lr = config.get('MIN_LR')
self.lambda1 = config.get('LAMBDA1')
self.lambda2 = config.get('LAMBDA2')
self.seed = config.get('SEED')
self.dataloader_seed = config.get('DATALOADER_SEED')
self.device = config.get('DEVICE')
self.size = config.get('SIZE')
class TestConfig:
def __init__(self):
self.load_model_epochs = None
self.debug = None
self.load_g_model_score = None
self.batch_size = None
self.seed = None
self.device = None
self.size = None
self.load()
def load(self):
with open('config/test_config.yml', 'r') as f:
config = yaml.load(f, Loader=yaml.SafeLoader)
self.load_model_epochs = config.get('LOAD_MODEL_EPOCH')
self.debug = config.get('DEBUG')
self.load_g_model_score = config.get('LOAD_G_MODEL_SCORE')
self.batch_size = config.get('BATCH_SIZE')
self.seed = config.get('SEED')
self.device = config.get('DEVICE')
self.size = config.get('SIZE') | spider-man-tm/pix2pix_gray_to_color | config/config.py | config.py | py | 2,313 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "yaml.load",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "yaml.SafeLoader",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "yaml.load",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "yaml.SafeLoader",
"line_... |
41245539467 | from pynvml import *
import logging
from datasets import load_dataset
from datasets import ClassLabel
from transformers import LukeTokenizer, LukeModel, LukeForEntityPairClassification, TrainingArguments, Trainer
import torch
from tqdm import trange
# construir função que converta spans de relativos a frase para globais
import random
import os
import json
def print_gpu_utilization():
nvmlInit()
handle = nvmlDeviceGetHandleByIndex(0)
info = nvmlDeviceGetMemoryInfo(handle)
print(f"GPU memory occupied: {info.used//1024**2} MB.")
class MyDataset(torch.utils.data.Dataset):
def __init__(self, encodings, labels):
self.encodings = encodings
self.labels = labels
def __getitem__(self, idx):
item = {key: val[idx].clone().detach() for key, val in self.encodings.items()}
item['labels'] = torch.tensor(self.labels[idx])
return item
def __len__(self):
return len(self.labels)
def convert_spans(item):
sents = []
sent_map = []
entities = item["vertexSet"]
entity_start, entity_end = [], []
mention_types = []
entity_spans = []
for entity in entities:
for mention in entity:
if mention["sent_id"] != 0:
current_id = mention["sent_id"]
mention["pos"] = [sum(len(s) for s in item["sents"][:current_id])+mention["pos"][0],
sum(len(s) for s in item["sents"][:current_id])+mention["pos"][1]]
mention["sent_id"] = 0
pos = mention["pos"]
mention_types.append(mention['type'])
entity_spans.append(pos)
item["vertexSet"] = entities
return item, entity_spans
def load_examples_test(dataset):
examples = []
for i, item in enumerate(dataset["validation"]):
concat_tokens = []
counter = 0
converted_item, entity_spans = convert_spans(item)
tokens = item["sents"]
for j in range(len(tokens)):
concat_tokens += tokens[j]
del j
tokens = concat_tokens
del concat_tokens
# new
text = ""
cur = 0
new_char_spans = [0]*len(entity_spans)
entity_spans.sort(key=lambda y:y[0])
for target_entity in entity_spans:
tamanho_texto = len(text)
text += " ".join(tokens[cur: target_entity[0]])
if text:
text += " "
char_start = len(text)
text += " ".join(tokens[target_entity[0]: target_entity[1]])
char_end = len(text)
new_char_spans[counter] = (char_start, char_end)
text += " "
cur = target_entity[1]
counter+=1
text += " ".join(tokens[cur:])
text = text.rstrip()
# get true labels
labels_pairs = tuple(zip(item["labels"]["head"], item["labels"]["tail"], item["labels"]["relation_id"]))
entity_spans = [tuple(l) for l in entity_spans]
oldToNewPos = dict(zip(entity_spans, new_char_spans))
entities = item["vertexSet"]
correlations = []
for pair in labels_pairs:
for head in entities[pair[0]]:
if tuple(head["pos"]) in oldToNewPos:
head["pos"]=oldToNewPos[tuple(head["pos"])]
for tail in entities[pair[1]]:
if tuple(tail["pos"]) in oldToNewPos:
tail["pos"] = oldToNewPos[tuple(tail["pos"])]
pack = tuple((head["pos"], tail["pos"], pair[2]))
correlations += (pack),
item["vertexSet"] = entities
examples.append(dict(
text=text,
entity_spans= [d[:][:-1] for d in correlations],
labels = [d[:][-1] for d in correlations]
))
return examples
def load_examples_competition(dataset):
examples = []
for i, item in enumerate(dataset["test"]):
concat_tokens = []
counter = 0
converted_item, entity_spans = convert_spans(item)
tokens = item["sents"]
for j in range(len(tokens)):
concat_tokens += tokens[j]
del j
tokens = concat_tokens
del concat_tokens
# new
text = ""
cur = 0
new_char_spans = [0]*len(entity_spans)
entity_spans.sort(key=lambda y:y[0])
for target_entity in entity_spans:
tamanho_texto = len(text)
text += " ".join(tokens[cur: target_entity[0]])
if text:
text += " "
char_start = len(text)
text += " ".join(tokens[target_entity[0]: target_entity[1]])
char_end = len(text)
new_char_spans[counter] = (char_start, char_end)
text += " "
cur = target_entity[1]
counter+=1
text += " ".join(tokens[cur:])
text = text.rstrip()
aux_head = 0
aux_tail = 0
labels_pairs = []
# get true labels
for head_id in range(len(item["vertexSet"])):
for tail_id in range(len(item["vertexSet"])):
if (head_id!=tail_id):
labels_pair = tuple([head_id, tail_id , "Na"])
labels_pairs.append(labels_pair)
entity_spans = [tuple(l) for l in entity_spans]
oldToNewPos = dict(zip(entity_spans, new_char_spans))
entities = item["vertexSet"]
correlations = []
for pair in labels_pairs:
head = random.choice(entities[pair[0]])
tail = random.choice(entities[pair[1]])
entity_head_id = pair[0]
entity_tail_id = pair[1]
rel = pair[2]
if tuple(head["pos"]) in oldToNewPos:
head["pos"]=oldToNewPos[tuple(head["pos"])]
if tuple(tail["pos"]) in oldToNewPos:
tail["pos"] = oldToNewPos[tuple(tail["pos"])]
pack = tuple((head["pos"], tail["pos"], pair[2], tuple([entity_head_id, entity_tail_id]), item["title"]))
item["vertexSet"] = entities
examples.append(dict(
text=text,
entity_spans= pack[:2],
labels = pack[2],
idxs_entity_pair = pack[3],
title = pack[4]
))
return examples
torch.cuda.empty_cache()
dataset = load_dataset("docred")
max_value = 0
#for i, item in enumerate(dataset["train_annotated"]):
# total_text_len = 0
# tokens = item["sents"]
# num_relations = len(item["labels"]["head"])
class ModifiedClassicLuke(LukeForEntityPairClassification):
def __init__(self, config):
super().__init__(config)
self.classifier = torch.nn.Linear(in_features = 2048, out_features = 97, bias = True)
logging.info("Loading data and finetuned dataset for CLASSIC LUKE")
# FAZER LOAD DO MODEL FINETUNED DE 3 EPOCHS
model = ModifiedClassicLuke.from_pretrained("model_finetuned_classic")
tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-tacred")
test_examples = load_examples_competition(dataset)
maximum = 0
max_seq = 0
logging.info("Memory before choosing GPU")
#torch.cuda.empty_cache()
########################## Choose GPU ########################
# set the GPU device to use
cuda_device= 0 # mudar para 0 para dar o cuda
if cuda_device < 0:
device = torch.device("cpu")
else:
device = torch.device(f"cuda:{cuda_device}")
#model = model.to(device)
#model.eval()
# Convert to inputs
for batch_start_idx in trange(0, len(test_examples), len(test_examples)):
batch_examples = test_examples[batch_start_idx:batch_start_idx+len(test_examples)]
texts = [example["text"] for example in batch_examples]
entity_spans = [example["entity_spans"] for example in batch_examples]
#gold_labels = [example["labels"] for example in batch_examples]
idxs_entity_pair = [example["idxs_entity_pair"] for example in batch_examples]
titles = [example["title"] for example in batch_examples]
for i in range(len(entity_spans)):
entity_spans[i] = list(entity_spans[i])
del batch_examples
logging.info("Removing too big examples!!")
num_rejected = 0
clean_texts = []
clean_ents = []
clean_idxs_entity_pairs = []
clean_titles = []
tokenizer2 = LukeTokenizer.from_pretrained("studio-ousia/luke-large")
for ix in range(len(texts)):
input = tokenizer2(texts[ix])
if len(input.data["input_ids"]) > 500:
num_rejected+=1
continue
clean_texts.append(texts[i])
clean_ents.append(entity_spans[ix])
clean_idxs_entity_pairs.append(idxs_entity_pair)
clean_titles.append(titles)
texts = clean_texts
entity_spans = clean_ents
idxs_entity_pair = clean_idxs_entity_pairs
titles = clean_titles
torch.cuda.empty_cache()
relations_code_list = ["P1376",
"P607",
"P136",
"P137",
"P131",
"P527",
"P1412",
"P206",
"P205",
"P449",
"P127",
"P123",
"P86",
"P840",
"P355",
"P737",
"P740",
"P190",
"P576",
"P749",
"P112",
"P118",
"P17",
"P19",
"P3373",
"P6",
"P276",
"P1001",
"P580",
"P582",
"P585",
"P463",
"P676",
"P674",
"P264",
"P108",
"P102",
"P25",
"P27",
"P26",
"P20",
"P22",
"Na",
"P807",
"P800",
"P279",
"P1336",
"P577",
"P570",
"P571",
"P178",
"P179",
"P272",
"P170",
"P171",
"P172",
"P175",
"P176",
"P39",
"P30",
"P31",
"P36",
"P37",
"P35",
"P400",
"P403",
"P361",
"P364",
"P569",
"P710",
"P1344",
"P488",
"P241",
"P162",
"P161",
"P166",
"P40",
"P1441",
"P156",
"P155",
"P150",
"P551",
"P706",
"P159",
"P495",
"P58",
"P194",
"P54",
"P57",
"P50",
"P1366",
"P1365",
"P937",
"P140",
"P69",
"P1198",
"P1056"]
c2l = ClassLabel(num_classes = 97, names = relations_code_list)
label_list_ids = [c2l.str2int(label) for label in relations_code_list]
#gold_labels_ids = [c2l.str2int(label) for label in gold_labels]
#aa = [c2l.int2str(label) for label in gold_labels_ids] # convert ints to CODE of label!! USE IN EVAL
#inputs = tokenizer(text=texts[0], entity_spans = entity_spans[0], padding = "max_length", max_length = 1024, task = "entity_pair_classification", return_tensors = "pt")
#torch.save(inputs, 'inputs_eval.pt')
#test_dataset = MyDataset(inputs, gold_labels_ids)
logging.info("Beginning of evaluation batching")
output_dir = "evalClassic_17Out"
if not os.path.exists(output_dir):
os.makedirs(output_dir)
output_filename = os.path.join(output_dir, 'results.json')
output_file = open(output_filename, 'w')
batch_size = 10
rel2word = {
"Na": "Na",
"P6": "head of government",
"P17": "country",
"P19": "place of birth",
"P20": "place of death",
"P22": "father",
"P25": "mother",
"P26": "spouse",
"P27": "country of citizenship",
"P30": "continent",
"P31": "instance of",
"P35": "head of state",
"P36": "capital",
"P37": "official language",
"P39": "position held",
"P40": "child",
"P50": "author",
"P54": "member of sports team",
"P57": "director",
"P58": "screenwriter",
"P69": "educated at",
"P86": "composer",
"P102": "member of political party",
"P108": "employer",
"P112": "founded by",
"P118": "league",
"P123": "publisher",
"P127": "owned by",
"P131": "located in the administrative territorial entity",
"P136": "genre",
"P137": "operator",
"P140": "religion",
"P150": "contains administrative territorial entity",
"P155": "follows",
"P156": "followed by",
"P159": "headquarters location",
"P161": "cast member",
"P162": "producer",
"P166": "award received",
"P170": "creator",
"P171": "parent taxon",
"P172": "ethnic group",
"P175": "performer",
"P176": "manufacturer",
"P178": "developer",
"P179": "series",
"P190": "sister city",
"P194": "legislative body",
"P205": "basin country",
"P206": "located in or next to body of water",
"P241": "military branch",
"P264": "record label",
"P272": "production company",
"P276": "location",
"P279": "subclass of",
"P355": "subsidiary",
"P361": "part of",
"P364": "original language of work",
"P400": "platform",
"P403": "mouth of the watercourse",
"P449": "original network",
"P463": "member of",
"P488": "chairperson",
"P495": "country of origin",
"P527": "has part",
"P551": "residence",
"P569": "date of birth",
"P570": "date of death",
"P571": "inception",
"P576": "dissolved, abolished or demolished",
"P577": "publication date",
"P580": "start time",
"P582": "end time",
"P585": "point in time",
"P607": "conflict",
"P674": "characters",
"P676": "lyrics by",
"P706": "located on terrain feature",
"P710": "participant",
"P737": "influenced by",
"P740": "location of formation",
"P749": "parent organization",
"P800": "notable work",
"P807": "separated from",
"P840": "narrative location",
"P937": "work location",
"P1001": "applies to jurisdiction",
"P1056": "product or material produced",
"P1198": "unemployment rate",
"P1336": "territory claimed by",
"P1344": "participant of",
"P1365": "replaces",
"P1366": "replaced by",
"P1376": "capital of",
"P1412": "languages spoken, written or signed",
"P1441": "present in work",
"P3373": "sibling"}
num_predicted = 0
num_gold = 0
num_correct = 0
this_pair = []
all_pairs = []
list_of_dicts = []
torch.cuda.empty_cache()
logging.info("Evaluation will start now!:")
model.eval()
model.to(device)
for batch_start_idx in trange(0, len(test_examples), batch_size):# len(test_examples) 100
batch_examples = test_examples[batch_start_idx:batch_start_idx + batch_size]
texts = [example["text"] for example in batch_examples]
entity_spans = [example["entity_spans"] for example in batch_examples]
idxs_entity_pair = [example["idxs_entity_pair"] for example in batch_examples]
titles = [example["title"] for example in batch_examples]
#gold_labels = [example["labels"] for example in batch_examples]
#gold_labels_ids = [c2l.str2int(label) for label in gold_labels]
for i in range(len(entity_spans)):
entity_spans[i] = list(entity_spans[i])
inputs = tokenizer(text=texts, entity_spans=entity_spans, truncation=True, padding = "max_length", max_length = 512, task = "entity_pair_classification", return_tensors = "pt").to(device)
with torch.no_grad():
outputs = model(**inputs)
predicted_indices = outputs.logits.argmax(-1)
predicted_labels = [c2l.int2str(pred) for pred in predicted_indices.tolist()]
predicted_relation = [rel2word.get(rel) for rel in predicted_labels]
for i in range(len(predicted_relation)):
list_of_dicts.append(dict(
title=titles[i],
h_idx=idxs_entity_pair[i][0],
t_idx = idxs_entity_pair[i][1],
r = predicted_relation[i]
))
torch.cuda.empty_cache()
json_object = json.dumps(list_of_dicts, indent = 4)
with open("results_classic.json", "w") as outfile:
outfile.write(json_object)
| joseMalaquias/tese | DOCRED/classic_obtainJSON.py | classic_obtainJSON.py | py | 17,283 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.utils",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "random.choice",
"lin... |
4393844283 | # 클레어와 물약
# r1 x
# https://www.acmicpc.net/problem/20119
# https://welog.tistory.com/256
import sys
from collections import deque
input = sys.stdin.readline
n, m = map(int, input().split())
graph = [set() for _ in range(n + 1)]
recipe_dict = {}
for _ in range(m):
data = list(map(int, input().split()))
if data[-1] not in recipe_dict:
recipe_dict[data[-1]] = [[data[1:-1], data[0]]]
else:
recipe_dict[data[-1]].append([data[1:-1], data[0]])
for i in range(1, len(data) - 1):
graph[data[i]].add(data[-1])
l = int(input())
l_list = list(map(int, input().split()))
check = [False] * (n + 1)
result = set()
for i in l_list:
check[i] = True
result.add(i)
q = deque(l_list)
while q:
now = q.popleft()
for i in graph[now]:
if check[i]:
continue
for idx in range(len(recipe_dict[i])):
recipe, cnt = recipe_dict[i][idx]
if now in recipe:
recipe.remove(now)
cnt -= 1
recipe_dict[i][idx] = [recipe, cnt]
if cnt == 0:
check[i] = True
q.append(i)
result.add(i)
print(len(result))
result = list(result)
result.sort()
print(*result) | sjjam/Algorithm-Python | baekjoon/20119.py | 20119.py | py | 1,292 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.stdin",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "collections.deque",
"line_number": 35,
"usage_type": "call"
}
] |
18347648906 | import pandas as pd
import numpy as np
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import plotly.io as pio
from matplotlib import cm
# set defaults for charts
pio.templates.default = "plotly_white"
@np.vectorize
def calculate_tax(income):
brackets = [9950, 40525, 86375, 164925, 209425, 523600]
rates = [0.10, 0.12, 0.22, 0.24, 0.32, 0.35, 0.37]
tax = 0
for i in range(len(brackets)):
if income > brackets[i]:
if i == 0:
tax += rates[i] * brackets[i]
else:
tax += rates[i] * (brackets[i] - brackets[i - 1])
else:
if i == 0:
tax += rates[i] * income
else:
tax += rates[i] * (income - brackets[i - 1])
break
if income > brackets[-1]:
tax += rates[-1] * (income - brackets[-1])
return tax
# parameters
variables = ['robert_income', 'isabel_income', 'expenses', 'assets']
# create initial setup DataFrame
data = pd.DataFrame({
'year': [2023],
'robert_income': [100000],
'isabel_income': [200000],
'expenses': [50000],
'assets': [800000]
}).set_index('year')
growth_assumptions = {
'robert_income': 0.0,
'isabel_income': 0.0,
'expenses': 0.01,
'assets': 0.04
}
shocks = {
2027: {
'robert_income': (-10000, 'Robert leaves Google'),
'isabel_income': (-100000, 'Isabel book deals are smaller')
},
2030: {
'expenses': (30000, 'Childcare')
}
}
volatility = 0.08 # standard deviation of asset growth
simulations = 1000 # number of simulations
# create a DataFrame to hold the future projections
projection = pd.DataFrame(index=range(2023, 2083))
# initialize a DataFrame with simulations for assets
asset_simulations = pd.DataFrame(1 + volatility * np.random.standard_normal(size=(60,10000)),
index=projection.index,
columns=['simulation_'+str(i) for i in range(10000)]
)
# chain all
asset_simulations = asset_simulations.cumprod()
# loop over years
for year in projection.index:
if year == 2023:
# handle base year
for var in variables:
projection.loc[year, var] = data.loc[2023, var]
asset_simulations.loc[year] = data.loc[2023, 'assets']
else:
# apply growth assumptions and shocks
for var in variables:
projection.loc[year, var] = projection.loc[year - 1, var] * (1 + growth_assumptions[var])
if year in shocks and var in shocks[year]:
shock, _ = shocks[year][var]
projection.loc[year, var] += shock
# calculate household income and savings
projection.loc[year, 'household_income'] = projection.loc[year, 'robert_income'] + projection.loc[year, 'isabel_income']
projection.loc[year, 'taxes'] = calculate_tax(projection.loc[year, 'household_income'])
projection.loc[year, 'net_household_income'] = projection.loc[year, 'household_income'] - projection.loc[year, 'taxes']
# calculate savings
projection.loc[year, 'savings'] = projection.loc[year, 'net_household_income'] - projection.loc[year, 'expenses']
# add savings to assets
projection.loc[year, 'assets'] += projection.loc[year, 'savings']
# add volatility to assets
asset_simulations.loc[year] = projection.loc[year - 1, 'assets'] * (asset_simulations.loc[year])
# plot income, expenses, and savings
fig = go.Figure(layout=go.Layout(template='plotly_white'))
for var in ['robert_income', 'isabel_income', 'expenses', 'savings', 'household_income','net_household_income','taxes']:
fig.add_trace(go.Scatter(x=projection.index, y=projection[var], mode='lines', name=var))
fig.show()
# plot asset simulations as a fan chart
fig = go.Figure()
percentiles = [1, 5, 20, 50, 80, 95, 99]
colors = [cm.Blues(x) for x in np.linspace(0.01, 1, 7)]
for i in range(len(percentiles)):
percentile = percentiles[i]
color = colors[i]
asset_percentile = asset_simulations.apply(lambda x: np.percentile(x, percentile), axis=1)
fig.add_trace(go.Scatter(x=asset_percentile.index, y=asset_percentile, fill='tonexty', fillcolor='rgba'+str(color), line_color='rgba'+str(color), name=str(percentile)+'th percentile'))
fig.show()
# plot shocks
all_shock_values = []
for shock_type in ['assets', 'robert_income', 'isabel_income', 'expenses']:
for year, shocks_in_year in shocks.items():
if shock_type in shocks_in_year:
all_shock_values.append(shocks_in_year[shock_type][0])
fig = make_subplots(rows=4, cols=1, shared_xaxes=True, shared_yaxes='rows')
for shock_type, subplot in zip(['assets', 'robert_income', 'isabel_income', 'expenses'], [1, 2, 3, 4]):
shock_years = []
shock_values = []
hover_texts = [] # New list to store hover text labels
for year, shocks_in_year in shocks.items():
if shock_type in shocks_in_year:
shock_years.append(year)
shock_values.append(shocks_in_year[shock_type][0])
hover_texts.append(shocks_in_year[shock_type][1]) # Add the hover text label to the list
fig.add_trace(go.Bar(x=shock_years, y=shock_values, name=shock_type + ' shocks', text=hover_texts, textposition='outside', hovertemplate='%{text}', textfont=dict(color='rgba(0,0,0,0)')), row=subplot, col=1)
fig.update_xaxes(range=[2023, 2082])
fig.update_yaxes(range=[min(all_shock_values), max(all_shock_values)])
fig.update_layout(template='plotly_white')
fig.show()
| robert-sturrock/financial-projections | financial_projections.py | financial_projections.py | py | 5,578 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "plotly.io.templates",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "plotly.io",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "numpy.vectorize",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFram... |
10731341714 | from os.path import isfile
import json
from db import normalize
from itertools import product
class Settings:
def __init__(self, user_id):
self.user_id = user_id
self.search = {}
self.match = []
@staticmethod
def from_dict(settings: dict) -> tuple:
return (
settings.get('search'),
settings.get('match'),
)
@staticmethod
def to_dict(search, match) -> dict:
return {
'search': search,
'match': match,
}
def load_from_file(self) -> bool:
file_name = f'settings_{self.user_id}.json'
if not isfile(file_name):
return False
with open(file_name, mode='r', encoding='utf-8') as file:
self.search, self.match = self.from_dict(json.load(file))
return True
def save_to_file(self):
file_name = f'settings_{self.user_id}.json'
with open(file_name, mode='w', encoding='utf-8') as file:
json.dump(
self.to_dict(self.search, self.match),
file, ensure_ascii=False, indent=4)
def load_from_vk(self, vk_user: dict) -> bool:
vk_user['sex'] = {1: 2, 2: 1}.get(vk_user['sex'], 0)
search_fields = {
'city', 'country', 'hometown',
'sex', 'has_photo', 'religion'
}
search_fields_with_fix = {
'universities': 'university',
'schools': 'school',
'career': 'company'
}
match = {
'universities', 'schools', 'status', 'activities',
'interests', 'music', 'movies', 'tv', 'books',
'games', 'about', 'quotes', 'career', 'military', 'langs',
'verified', 'sex', 'city', 'country', 'home_town', 'has_photo',
'has_mobile', 'common_count', 'occupation', 'relation',
'can_post', 'can_see_all_posts', 'can_see_audio',
'can_write_private_message', 'can_send_friend_request',
'is_hidden_from_feed', 'blacklisted', 'blacklisted_by_me',
'political', 'religion', 'inspired_by', 'people_main',
'life_main', 'smoking', 'alcohol'
}
search_params = {
field: value for field, value in vk_user.items()
if field in search_fields
}
for field, alias in search_fields_with_fix.items():
if field in vk_user and len(vk_user[field]) == 1:
search_params[alias] = vk_user[field][0]
self.search = search_params.copy()
self.match = [
(field, value) for field, value in vk_user.items()
if field in match and value
]
def load_settings(self, vk):
if self.load_from_file():
return
user = vk.get_user(self.user_id)
normalize.normalize(user)
self.load_from_vk(user)
def make_flat_searc_params(self, searc_params=None):
if searc_params is None:
searc_params = self.search
arrays = [
product([key], value) for key, value in searc_params.items()
if isinstance(value, (list, tuple))
]
result = []
for iteam in map(dict, product(*arrays)):
new_iteam = searc_params.copy()
new_iteam.update(iteam)
result.append(new_iteam)
return result
def add_settings(self):
new_settings = {
'sort': [0, 1],
'online': [0, 1]
}
self.search.update(new_settings)
def get_base_searc(self):
return {
'sex': self.search.get('sex', [0, 1, 2]),
'age_from': self.search.get('age_from', 18),
'age_to': self.search.get('age_to', 25),
'country': self.search.get('country', 1),
}
| rychanya/vkinder | src/vkinder/settings.py | settings.py | py | 3,811 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.isfile",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "db.normalize.normalize",
"line... |
29382149888 | import json
import boto3
from botocore.exceptions import ClientError
import os
region = os.environ['AWS_REGION']
sess = boto3.session.Session(region_name=region)
# def get_bucket_name():
# ssmClient = sess.client('ssm')
# response = ssmClient.get_parameter(
# Name = 'ProserveProject_S3BucketName',
# WithDecryption = True)
# return response['Parameter']['Value']
def lambda_handler(event, context):
s3Client = sess.client('s3')
# try:
# bucketName = get_bucket_name()
# except ClientError as e:
# print(e)
# return {
# 'statusCode': 500,
# 'body': json.dumps("An error occurred")
# }
bucketName = os.environ['BUCKET_NAME']
objectKey = json.loads(event['body'])["objectKey"].strip()
response = s3Client.delete_object(
Bucket = bucketName,
Key = objectKey,
VersionId = "null",
)
return {
'statusCode': 204,
'headers': {
'Access-Control-Allow-Headers': '*',
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'OPTIONS,POST,GET,DELETE'
},
}
| ferozbaig96/Proserve-project | lambdas/DeleteS3Object.py | DeleteS3Object.py | py | 1,191 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "boto3.session.Session",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "boto3.session",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
... |
20049305182 | import numpy as np
import cv2
from scipy import ndimage, interpolate
def track(I, J, input_points, total_points, window=(21, 21), min_disp=0.01):
output = []
I_gray = cv2.cvtColor(I, cv2.COLOR_BGR2GRAY)
J_gray = cv2.cvtColor(J, cv2.COLOR_BGR2GRAY)
#normalization
I_norm = I_gray/I_gray.max()
J_norm = J_gray/J_gray.max()
for points in input_points:
print('inside calculate')
d = calculate_new_point(I_norm, J_norm, points[0], points[1], window)
if d is not None:
print('output '+str(output))
output.append((points[0] + d[0], points[1] + d[1]))
output = np.asarray(output).T
output = output.astype(int)
frame = J.copy()
for point in zip(*total_points[::-1]):
print('printing new points')
print(point)
print(type(point))
J = cv2.circle(J, point, 3, (0, 0, 255), 10)
for point in zip(*output[::-1]):
print('printing new points')
print(point)
print(type(point))
J = cv2.circle(J, point, 3, (0, 0, 255), 10)
# for point in zip(*output[::-1]):
# frame = cv2.circle(frame, point, 3, (255, 0, 0), 4)
return J, output
def calculate_new_point(I, J, x, y, window):
displ_tot = np.array([0., 0.]).T
# The window to evaluate
win_x = np.arange(x, x + window[0], dtype=float)
win_y = np.arange(y, y + window[1], dtype=float)
roi = I[x:x + window[0], y: y + window[1]]
# Find image gradient in I
Ix = cv2.Sobel(roi,cv2.CV_64F,1,0,ksize=3)
Iy = cv2.Sobel(roi,cv2.CV_64F,0,1,ksize=3)
# Calculate the Hessian matrix
Ix = Ix.flatten()
Iy = Iy.flatten()
A = np.array([Ix, Iy])
T = A.dot(A.T)
#T = np.matmul(A, A.T)
# Check that H is not singular
if np.linalg.det(T) == 0:
return None
T_inv = np.linalg.inv(T)
# Bilinear interpolation
x_arr = np.arange(0, J.shape[1])
y_arr = np.arange(0, J.shape[0])
J_bilinear = interpolate.interp2d(x_arr, y_arr, J, kind='linear')
for x in range(35):
try:
# Calculate e matrix
J_window = J_bilinear(win_x + displ_tot[0], win_y + displ_tot[1])
D = (I[x:x + window[0], y: y + window[1]]-J_window).flatten()
e = -1*(np.dot(A,D))
d_temp = np.dot(T_inv, e)
displ_tot = displ_tot + d_temp
return displ_tot
except:
return None
# calculate displacement
def compute_corners(img, threshold=0.5):
img_cpy = img.copy()
# Grayscale
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#Ix = Convolution.convolution(img_gray, 'sobel_x')
#Iy = Convolution.convolution(img_gray, 'sobel_y')
Ix = cv2.Sobel(img_gray,cv2.CV_64F,1,0,ksize=3)
Iy = cv2.Sobel(img_gray,cv2.CV_64F,0,1,ksize=3)
Ix2 = np.square(Ix)
Iy2 = np.square(Iy)
Ixdy = Ix*Iy
#g_Ix2 = Convolution.convolution(dx2, 'gaussian')
#g_Iy2 = Convolution.convolution(dy2, 'gaussian')
#g_IxIy = Convolution.convolution(dxdy, 'gaussian')
g_Ix2 = cv2.GaussianBlur(Ix2, (3,3),0)
g_Iy2 = cv2.GaussianBlur(Iy2, (3,3),0)
g_IxIy = cv2.GaussianBlur(Ixdy, (3,3),0)
R = g_Ix2*g_Iy2 - np.square(g_IxIy) - 0.22*np.square(g_Ix2 + g_Iy2)
# find all points above threshold
img_cpy[R>threshold]=[255,0,0]
return img_cpy, np.where(R > threshold*R.max())
cap = cv2.VideoCapture('Ass_img/MarinaBayatNightDrone.mp4')
# Check if camera opened successfully
if (cap.isOpened()== False):
print("Error opening video stream or file")
# Capture frame-by-frame
ret, frame = cap.read()
cv2.namedWindow('Frame',cv2.WINDOW_NORMAL)
cv2.resizeWindow('Frame', 900,600)
old_frame, points = compute_corners(frame)
points = np.asarray(points)
total_points = points
print(len(points.T))
cv2.imshow('Frame',old_frame)
# Read until video is completed
while(cap.isOpened()):
ret, new_frame = cap.read()
old_frame, points = track(old_frame, new_frame, points.T, total_points)
cv2.imshow('Frame',old_frame)
print('points and total points')
print(points)
print('total points')
print(total_points)
total_points = np.hstack((total_points, points))
# Press Q on keyboard to exit
if cv2.waitKey(25) & 0xFF == ord('q'):
break
# When everything done, release the video capture object
cap.release()
# Closes all the frames
cv2.destroyAllWindows()
#
# corner_det = Corner_Detector()
# corners = corner_det.compute_corners()
| ocinemod87/Advanced_Topics_Image_Analysis | Assignment_1/Assignment_1.py | Assignment_1.py | py | 4,477 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.cvtColor",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
... |
30876684301 | # This is necessary to find the main code
import operator
import sys
from Bomberman.bomberman.entity import MonsterEntity
from Bomberman.bomberman.sensed_world import SensedWorld
sys.path.insert(0, '../bomberman')
# Import necessary stuff
from entity import CharacterEntity
from colorama import Fore, Back
from queue import PriorityQueue
import math
from enum import Enum
class TestCharacter(CharacterEntity):
destination = (0, 0)
expectiDepth = 3
minimaxDepth = 4
bound = 4
def do(self, wrld):
# Your code here
loc = (self.x, self.y)
wrldState = self.evaluateState(wrld)
characterState = wrldState[0]
# exit is first destination
self.destination = wrld.exitcell
# If the exit is right next to us, just pick it
if wrld.exitcell in self.getNeighbors(loc, wrld, [obstacles.EXIT, obstacles.PLAYER]):
move = self.calculateD(loc, wrld.exitcell)
self.move(move[0], move[1])
return
# There is a monster close to us
if characterState == state.UNSAFE:
self.place_bomb()
# running away from stupid
if wrldState[1][0] == 'stupid':
v, action = self.maxvalue(wrld, loc, 0, 'stupid')
next_move = self.calculateD(loc, action)
self.move(next_move[0], next_move[1])
# Running away from aggressive
if wrldState[1][0] == 'aggressive':
v, action = self.miniMaxvalue(wrld, -math.inf, math.inf, loc, 0, 'aggressive')
next_move = self.calculateD(loc, action)
self.move(next_move[0], next_move[1])
# Running away from selfpreserving
if wrldState[1][0] == 'selfpreserving':
v, action = self.miniMaxvalue(wrld, -math.inf, math.inf, loc, 0, 'selfpreserving')
next_move = self.calculateD(loc, action)
self.move(next_move[0], next_move[1])
# What to do when there is a bomb near us
if characterState == state.NEAR_BOMB:
next_move = (0, 0)
max = 0
name = ''
flag = True
if wrldState[1]:
name = wrldState[1][0]
flag = False
if self.bomb_check(loc, wrld):
for cell in self.getNeighbors(loc, wrld, [obstacles.EXIT]):
if not self.bomb_check(cell, wrld):
# predict one step ahead
next_move = self.calculateD(loc, cell)
newWrld = SensedWorld.from_world(wrld)
character = next(iter(newWrld.characters.values()))[0]
new_move = self.calculateD((character.x, character.y), (cell[0], cell[1]))
character.move(new_move[0], new_move[1])
if name != '':
monster = self.getMonster(newWrld, name)
monster.move(0, 0)
newerWrld = newWrld.next()[0]
if flag:
test = self.exit_utility(newerWrld)
else:
test = self.utility(newerWrld, name)
if test > max:
max = test
next_move = new_move
self.move(next_move[0], next_move[1])
else:
self.move(0, 0)
# What to do if we cannot currently reach the exit
if characterState == state.BLOCKED:
walls = []
route = []
reachable = False
# Map a direct course to the exit, ignoring walls
came_from, cost_so_far = self.AStar(wrld, loc, wrld.exitcell, [obstacles.EXIT, obstacles.WALL])
path = wrld.exitcell
while path != loc:
path = came_from[path]
route.append(path)
# Find all the walls you have to go through
for stepnum, step in enumerate(route):
self.set_cell_color(step[0], step[1], Fore.RED + Back.GREEN)
if wrld.wall_at(step[0], step[1]):
walls.append(route[stepnum+1])
# Choose the closest reachable wall to the exit
closest_wall = (0,0)
for wall in (walls):
new_goal = wall
came_from, cost_so_far = self.AStar(wrld, loc, new_goal, [obstacles.EXIT])
for path in came_from:
if path == new_goal:
closest_wall = new_goal
reachable = True
break
if reachable: break
self.destination = closest_wall
# Navigate to that location
came_from, cost_so_far = self.AStar(wrld, loc, closest_wall, [obstacles.EXIT])
path = closest_wall
next_m = (0, 0)
while path != loc:
temp = path
path = came_from[path]
if path == loc:
next_m = temp
break
next_move = self.calculateD(loc, next_m)
# Place bomb at wall -- deal with diagonal!?!
if loc == closest_wall:
self.place_bomb()
else:
self.move(next_move[0], next_move[1])
# What to do if there are no monsters near us and we can reach the exit
if characterState == state.SAFE:
# Just do A star
came_from, cost_so_far = self.AStar(wrld, loc, self.destination, [obstacles.EXIT])
path = self.destination
next_m = (0, 0)
while path != loc:
temp = path
path = came_from[path]
if path == loc:
next_m = temp
break
next_move = self.calculateD(loc, next_m)
self.move(next_move[0], next_move[1])
# Max Value function of expecitmax
def maxvalue(self, wrld, curr, d, name):
# Terminal state
if self.evaluateState(wrld)[0] == state.SAFE or d == self.expectiDepth:
return self.utility(wrld, name), curr
if self.evaluateState(wrld)[0] == state.DEAD:
return -10000, curr
v = -math.inf
action = (0, 0)
for a in self.getNeighbors(curr, wrld, [obstacles.EXIT]):
# simulate a new world where we make the move
newWrld = SensedWorld.from_world(wrld)
character = next(iter(newWrld.characters.values()))[0]
new_move = self.calculateD((character.x, character.y), (a[0], a[1]))
character.move(new_move[0], new_move[1])
monster = self.getMonster(newWrld, name)
monster.move(0, 0)
newerWrld = newWrld.next()[0]
val = self.expvalue(newerWrld, a, d + 1, name)
if val > v:
v = val
action = a
return v, action
# Expected Value part of expectimax
def expvalue(self, wrld, act, d, name):
if self.evaluateState(wrld)[0] == state.SAFE or d == self.expectiDepth:
return self.utility(wrld, name)
v = 0
mcurr = self.getMonster(wrld, name)
possible_moves = self.getNeighbors((mcurr.x, mcurr.y), wrld, [obstacles.PLAYER])
for a in possible_moves:
p = 1.0/len(possible_moves)
# Predict a step ahead using simulated world
newWrld = SensedWorld.from_world(wrld)
monster = self.getMonster(newWrld, name)
new_move = self.calculateD((monster.x, monster.y), (a[0], a[1]))
monster.move(new_move[0], new_move[1])
try:
character = next(iter(newWrld.characters.values()))[0]
except(IndexError, StopIteration):
return -10000
character.move(0, 0)
newerWrld = newWrld.next()[0]
value = self.maxvalue(newerWrld, act, d+1, name)[0]
v = v + p*value
return v
# Alpha Beta Minimax
# Max value for Alpha-Beta Pruning
def miniMaxvalue(self, wrld, alpha, beta, curr, d, name):
# Terminal State is we are safe or depth reached
if self.evaluateState(wrld)[0] == state.SAFE or d == self.minimaxDepth:
return self.utility(wrld, name), curr
if self.evaluateState(wrld)[0] == state.DEAD:
return -10000, curr
v = -math.inf
action = (0, 0)
for a in self.getNeighbors(curr, wrld, [obstacles.EXIT, obstacles.PLAYER]):
# Simulate a new world where we made that action
newWrld = SensedWorld.from_world(wrld)
character = next(iter(newWrld.characters.values()))[0]
new_move = self.calculateD((character.x, character.y), (a[0], a[1]))
monster = self.getMonster(newWrld, name)
character.move(new_move[0], new_move[1])
monster.move(0, 0)
newerWrld = newWrld.next()[0]
val = self.minvalue(newerWrld, alpha, beta, a, d+1, name)
if val > v:
v = val
action = a
if v >= beta:
return v, a
alpha = max(alpha, v)
return v, action
# Min value for Minimax Alpha-Beta Pruning
def minvalue(self, wrld, alpha, beta, act, d, name):
# Terminal State is we are safe or depth reached
if self.evaluateState(wrld)[0] == state.SAFE or d == self.minimaxDepth:
return self.utility(wrld, name)
v = math.inf
mcurr = self.getMonster(wrld, name)
possible_moves = self.getNeighbors((mcurr.x, mcurr.y), wrld, [obstacles.PLAYER, obstacles.EXIT, obstacles.MONSTER])
for a in possible_moves:
# Simulate a new world where we made that action
newWrld = SensedWorld.from_world(wrld)
monster = self.getMonster(newWrld, name)
new_move = self.calculateD((monster.x, monster.y), (a[0], a[1]))
monster.move(new_move[0], new_move[1])
try:
character = next(iter(newWrld.characters.values()))[0]
except(IndexError, StopIteration):
return -10000
character.move(0, 0)
newerWrld = newWrld.next()[0]
val, act = self.miniMaxvalue(newerWrld, alpha, beta, act, d + 1, name)
v = min(v, val)
if v <= alpha:
return v
beta = min(beta, v)
return v
# Main utility function for terminal states
def utility(self, wrld, name):
# Utility for stupid monster
if name == 'stupid':
return 6*(1/(1 + self.exit_utility(wrld))) - 1*(1/((1 + self.monster_utility(wrld, name))**2))
# Utility for non-stupid monster
else:
return 20 * (1 / (1 + self.exit_utility(wrld))) - 50 * (1 / ((1 + self.monster_utility(wrld, name)) ** 2)) + self.dpangle(wrld, name)
# Calculate Vector between us, the monster, and the exit
def dpangle(self, wrld, name):
try:
chara = next(iter(wrld.characters.values()))
character = chara[0]
except (IndexError, StopIteration):
return -10
# Vector for character to exit
e = self.destination
loc = (character.x, character.y)
ce = tuple(map(operator.sub, e, loc))
eu = self.calculateH(e, loc)
if ce == (0, 0) or eu == 0:
return 10000
# Vector for character to monster
monster = self.getMonster(wrld, name)
mu = self.calculateH((monster.x, monster.y), loc)
cm = tuple(map(operator.sub, (monster.x, monster.y), loc))
if cm == (0, 0) or mu == 0:
return -10000
# Dot product
dp = (ce[0] * cm[0]) + (ce[1] * cm[1])
cosangle = dp / (eu * mu)
try:
angle = math.degrees(math.acos(cosangle))
except(ValueError):
return -10
if self.exit_utility(wrld) <= 4:
return 10
# Return values based on if it is higher or lower than 90 degrees
if angle >= 90:
return eu
else:
return -mu
# Gets the monster in the current world with a name
def getMonster(self, wrld, name):
for monster in list(wrld.monsters.values()):
if monster[0].name == name:
return monster[0]
return MonsterEntity('dead', [0], 0, 0)
# Utility function for the distance to the exit
def exit_utility(self, wrld):
try:
chara = next(iter(wrld.characters.values()))
character = chara[0]
except (IndexError, StopIteration):
return 10
loc = (character.x, character.y)
e = self.destination
exit_came_from, exit_cost_so_far = self.AStar(wrld, loc, (e[0], e[1]), [obstacles.EXIT])
counter = 0
path = (e[0], e[1])
while path != loc:
try:
path = exit_came_from[path]
except (KeyError):
return self.calculateH(loc, e)
counter += 1
if counter == -1:
return counter
return counter
# Utility function for the distance to the monster
def monster_utility(self, wrld, name):
try:
chara = next(iter(wrld.characters.values()))
character = chara[0]
except (IndexError, StopIteration):
return -10
m = self.getMonster(wrld, name)
if m.name == 'dead':
return 100
loc = (character.x, character.y)
mloc = (m.x, m.y)
monster_came_from, monster_cost_so_far = self.AStar(wrld, loc, mloc, [obstacles.MONSTER, obstacles.PLAYER, obstacles.EXIT])
counter = 0
path = mloc
while path != loc:
try:
path = monster_came_from[path]
except (KeyError):
return 100
counter += 1
return counter
# A Star algorithm
def AStar(self, wrld, start, goal, list_of_e):
frontier = PriorityQueue()
frontier.put((0, start))
came_from = {}
cost_so_far = {}
came_from[start] = None
cost_so_far[start] = 0
while not frontier.empty():
current = frontier.get()[1]
if current == goal:
break
for next in self.getNeighbors(current, wrld, list_of_e):
new_cost = cost_so_far[current] + self.calculateH(next, current)
if next not in cost_so_far or new_cost < cost_so_far[next]:
cost_so_far[next] = new_cost
priority = new_cost + self.calculateH(goal, next)
frontier.put((priority, next))
came_from[next] = current
return came_from, cost_so_far
# Heuristic calculation - returns euclidean distance
def calculateH(self, loc1, loc2):
(x1, y1) = loc1
(x2, y2) = loc2
return math.sqrt(((loc1[0] - loc2[0]) ** 2) + ((loc1[1] - loc2[1]) ** 2))
# Calculates the dx and dy between two locations
def calculateD(self, loc1, loc2):
(x1, y1) = loc1
(x2, y2) = loc2
return ((x2 - x1), (y2 - y1))
# Returns the neighbors of a particular location according to the obstacles passed in - obstacles passed in ARE AVAILABLE to be considered neighbors
def getNeighbors(self, loc, wrld, list_of_e):
list_of_N = []
for dx in [-1, 0, 1]:
# Avoid out-of-bound indexing
if (loc[0] + dx >= 0) and (loc[0] + dx < wrld.width()):
# Loop through delta y
for dy in [-1, 0, 1]:
# Make sure the monster is moving
if (dx != 0) or (dy != 0):
# Avoid out-of-bound indexing
if (loc[1] + dy >= 0) and (loc[1] + dy < wrld.height()):
# No need to check impossible moves
if obstacles.EXIT in list_of_e:
if wrld.exit_at(loc[0] + dx, loc[1] + dy):
list_of_N.append((loc[0] + dx, loc[1] + dy))
if obstacles.MONSTER in list_of_e:
if wrld.monsters_at(loc[0] + dx, loc[1] + dy):
list_of_N.append((loc[0] + dx, loc[1] + dy))
if obstacles.PLAYER in list_of_e:
if wrld.characters_at(loc[0] + dx, loc[1] + dy):
list_of_N.append((loc[0] + dx, loc[1] + dy))
if obstacles.WALL in list_of_e:
if wrld.wall_at(loc[0] + dx, loc[1] + dy):
list_of_N.append((loc[0] + dx, loc[1] + dy))
if wrld.empty_at(loc[0] + dx, loc[1] + dy):
list_of_N.append((loc[0] + dx, loc[1] + dy))
return list_of_N
# Checks if location is in range of a bomb
def bomb_check(self, loc, wrld):
bomb_range = wrld.expl_range
for dx in range(-bomb_range, bomb_range):
# Avoid out-of-bound indexing
if (loc[0] + dx >= 0) and (loc[0] + dx < wrld.width()):
if wrld.bomb_at((loc[0] + dx), loc[1]):
return True
for dy in range(-bomb_range, bomb_range):
# Avoid out-of-bound indexing
if (loc[1] + dy >= 0) and (loc[1] + dy < wrld.height()):
if wrld.bomb_at(loc[0], (loc[1] + dy)):
return True
return False
# Checks if location is in range of an explosion
def expl_check(self, loc, wrld):
bomb_range = wrld.expl_range
for dx in range(-bomb_range, bomb_range):
# Avoid out-of-bound indexing
if (loc[0] + dx >= 0) and (loc[0] + dx < wrld.width()):
if wrld.explosion_at((loc[0] + dx), loc[1]):
return True
for dy in range(-bomb_range, bomb_range):
# Avoid out-of-bound indexing
if (loc[1] + dy >= 0) and (loc[1] + dy < wrld.height()):
if wrld.explosion_at(loc[0], (loc[1] + dy)):
return True
return False
#Returns states and potentially a list of threats
def evaluateState(self, wrld):
monsters = []
try:
chara = next(iter(wrld.characters.values()))
character = chara[0]
except (IndexError, StopIteration):
return state.DEAD, []
try:
monsters = list(wrld.monsters.values())
except (StopIteration):
pass
loc = (character.x, character.y)
counters = {}
#Calculate each distance to the monster
for monster in monsters:
m = monster[0]
monsterType = m.name
mloc = (m.x, m.y)
monster_came_from, monster_cost_so_far = self.AStar(wrld, loc, mloc, [obstacles.MONSTER, obstacles.PLAYER, obstacles.EXIT])
counter = 0
path = mloc
while path != loc:
try:
path = monster_came_from[path]
except (KeyError):
counter = 100
break
counter += 1
counters[monsterType] = counter
counts = [(k, v) for k, v in counters.items() if v <= 4]
flag = False
monsterTypes = []
for count in counts:
if count[1] <= self.bound:
flag = True
monsterTypes.append((count[0], count[1]))
threats = []
# Sort the monster list in order of closest
monsterTypes.sort(key=lambda x: x[1])
for monster in monsterTypes:
threats.append(monster[0])
if flag:
return state.UNSAFE, threats
if (wrld.bombs or wrld.explosions):
return state.NEAR_BOMB, []
# Does safe path exist?
came_from, cost_so_far = self.AStar(wrld, loc, wrld.exitcell, [obstacles.EXIT])
for path in came_from:
if (path == wrld.exitcell):
return state.SAFE, []
return state.BLOCKED, []
class state(Enum):
SAFE = 1
UNSAFE = 2
DEAD = 3
NEAR_BOMB = 4
BLOCKED = 5
class obstacles(Enum):
EXIT = 1
MONSTER = 2
WALL = 3
BOMB = 4
EXPLOSION = 5
PLAYER = 6 | ifeeney/CS4341-projects | Bomberman/group10/testcharacter.py | testcharacter.py | py | 20,664 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.path.insert",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "entity.CharacterEntity",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "math.inf",
"li... |
38930582027 | import torch
from torch import nn
def conv_block(in_channel, channel, kernel_size=3, stride=1, padding=1, inplace=False):
norm = nn.InstanceNorm2d
return nn.Sequential(
nn.Conv2d(in_channel, channel, kernel_size=kernel_size, stride=stride, padding=padding, bias=True),
norm(channel, affine=True, momentum=0.4),
nn.ELU(inplace=inplace)
)
class ResNextBlock(nn.Module):
"""
resnext不是太好,将原先的+改为了cat,残差不变
我想命名为 ResNextX
"""
def __init__(self, in_channel, out_channel, split_num: int = 4, **kwargs):
super().__init__()
mid_channel = 4
self.split_num = split_num
blocks = []
kernel_style = [
(1, 1),
(1, 3),
(3, 1),
(3, 3)
]
padding_style = [
(k1 // 2, k2 // 2) for k1, k2 in kernel_style
]
inplace = kwargs.get("inplace", True)
for i in range(split_num):
blocks.append(nn.Sequential(
conv_block(in_channel, mid_channel, kernel_size=kernel_style[i], stride=1, padding=padding_style[i], inplace=inplace),
conv_block(mid_channel, mid_channel, kernel_size=3, stride=1, padding=1, inplace=inplace),
conv_block(mid_channel, out_channel, kernel_size=kernel_style[i], stride=1, padding=padding_style[i], inplace=inplace),
# conv_block(out_channel, mid_channel, kernel_size=1, stride=1, padding=0, inplace=inplace),
# conv_block(mid_channel, mid_channel, kernel_size=3, stride=1, padding=1, inplace=inplace),
# conv_block(mid_channel, out_channel, kernel_size=1, stride=1, padding=0, inplace=inplace)
))
self.scale = conv_block(out_channel * split_num, out_channel, kernel_size=1, stride=1, padding=0, inplace=inplace)
self.blocks = nn.ModuleList(blocks)
if in_channel != out_channel:
self.skip = nn.Conv2d(in_channel, out_channel, kernel_size=1, stride=1, padding=0)
else:
self.skip = None
def forward(self, x):
if self.skip:
res = self.skip(x)
else:
res = x
outputs = []
for i in range(self.split_num):
outputs.append(torch.add(self.blocks[i](x), res))
x = self.scale(torch.cat(outputs, dim=1))
return torch.add(x, res)
| QiliangFan/Drive | models/resnetx.py | resnetx.py | py | 2,452 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.InstanceNorm2d",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "torch.nn",
... |
2063152797 | from flask import Flask, render_template, redirect, url_for, flash, request, Blueprint, abort
from flask_login import LoginManager, current_user, login_user, logout_user, login_required
from flask_migrate import Migrate
from werkzeug.urls import url_parse
from models import *
from forms import *
from flask_admin import Admin, expose, AdminIndexView
from flask_admin.contrib.sqla import ModelView
from flask_admin.form import ImageUploadField
import os
from jinja2 import Environment
import base64
app = Flask(__name__)
app.config['SECRET_KEY'] = 'supersecretkey'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///marketplace.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['UPLOAD_FOLDER'] = 'static'
db.init_app(app)
with app.app_context():
db.create_all()
migrate = Migrate(app, db)
login = LoginManager(app)
login.login_view = 'login'
@login.user_loader
def load_user(id):
return User.query.get(int(id))
@app.route('/')
@app.route('/home')
def index():
# with open('D:/market/market/static/images/qaz_thumb.png', 'rb') as f:
# image_data = base64.b64encode(f.read()).decode()
# image = Product(name='xxxxx', description='aaa', price=112, quantity=1, image=image_data, category='sdsd', seller_id=4)
# db.session.add(image)
# db.session.commit()
return render_template('home.html', products=Product.query)
@app.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is None or not user.check_password(form.password.data):
flash('Неправильная почта или пароль')
return redirect(url_for('login'))
login_user(user, remember=form.remember_me.data)
return redirect(url_for('lc'))
# next_page = request.args.get('next')
# # if not next_page or url_parse(next_page).netloc != '':
# # next_page = url_for('index')
# # return redirect(next_page)
return render_template('login.html', title='Sign In', form=form)
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
@app.route('/register', methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = RegistrationForm()
if form.validate_on_submit():
user = User(username=form.username.data, email=form.email.data, password=form.password.data, role=form.role.data)
db.session.add(user)
db.session.commit()
return redirect(url_for('login'))
return render_template('register.html', title='Register', form=form)
@app.route('/lc')
@login_required
def lc():
return render_template('all.html')
@app.route('/uploadproduct', methods=['GET', 'POST'])
def upload():
form = ProductForm()
if form.validate_on_submit():
image = Product(name=form.name.data, description=form.description.data, price=form.price.data, quantity=form.quantity.data, image=base64.b64encode(form.image.data.read()).decode(), category=form.category.data, seller_id=current_user.id)
db.session.add(image)
db.session.commit()
return redirect(url_for('lc'))
return render_template('product.html', title='nnn', form=form)
class UserView(ModelView):
def is_accessible(self):
return current_user.role == 'admin'
class ProductView(ModelView):
def is_accessible(self):
return current_user.is_authenticated and current_user.role == 'admin'
def get_query(self):
if current_user.role == 'admin':
return self.session.query(self.model)
# else:
# return self.session.query(self.model).filter_by(seller_id=current_user.id)
def get_count_query(self):
if current_user.role == 'admin':
return db.session.query(db.func.count(self.model.id))
class CustomAdminIndexView(AdminIndexView):
@expose('/')
def index(self):
if not current_user.is_authenticated or current_user.role != 'admin':
abort(404) # Forbidden
return super().index()
admin = Admin(app, index_view=CustomAdminIndexView())
admin.add_view(UserView(User, db.session))
admin.add_view(ProductView(Product, db.session, category='Products', name='Edit Products'))
if __name__ == '__main__':
app.run(debug=True) | Dimmj/market12 | market/app.py | app.py | py | 4,672 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "flask_migrate.Migrate",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "flask_login.LoginManager",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "flask.rend... |
31243963609 | import argparse
import datetime
import pycloudlib
CI_DEFAULT_TAG = "uaclient"
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-t", "--tag", dest="tag", action="store",
default=CI_DEFAULT_TAG,
help=(
"Tag to determine which instances will be deleted."
"If the tag is present in the instance name, it will "
"be marked for deletion. "
"Default: {}".format(CI_DEFAULT_TAG))
)
parser.add_argument(
"-b", "--before-date", dest="before_date", action="store",
help=("Resources created before this date will be deleted."
" Format: MM/DD/YY")
)
parser.add_argument(
"--credentials-path", dest="credentials_path",
help="""
Path to json file representing the GCP credentials. That file
must a be a json dict containing all the necessary credentials
to manage GCP resources."""
)
parser.add_argument(
"--project-id", dest="project_id",
help="Name of the project id this script will operate on"
)
parser.add_argument(
"--region", dest="region",
help="Name of the region this script will operate on"
)
parser.add_argument(
"--zone", dest="zone",
help="Name of the zone this script will operate on"
)
return parser
def clean_gcp(credentials_path, project_id, tag, before_date, region, zone):
gce = pycloudlib.GCE(
tag='cleanup',
credentials_path=credentials_path,
project=project_id,
region=region,
zone=zone
)
all_instances = gce.compute.instances().list(
project=gce.project,
zone=gce.zone
).execute()
for instance in all_instances.get('items', []):
created_at = datetime.datetime.strptime(
instance["creationTimestamp"].split("T")[0], "%Y-%M-%d"
)
# If the machine is running for more than 2 days, we should
# delete it, regardless of the name tag
if created_at < before_date - datetime.timedelta(days=2):
print("Deleting instance {} ...".format(
instance['name']))
instance = gce.get_instance(
instance_id=instance['name']
)
instance.delete()
elif tag in instance['name'] and created_at < before_date:
print("Deleting instance {} ...".format(
instance['name']))
instance = gce.get_instance(
instance_id=instance['name']
)
instance.delete()
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
if args.before_date:
before_date = datetime.datetime.strptime(
args.before_date, "%m/%d/%Y"
)
else:
before_date = datetime.datetime.today() - datetime.timedelta(days=1)
clean_gcp(
credentials_path=args.credentials_path,
project_id=args.project_id,
tag=args.tag,
before_date=before_date,
region=args.region,
zone=args.zone
)
| canonical/server-test-scripts | ubuntu-advantage-client/gcp_cleanup.py | gcp_cleanup.py | py | 3,138 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pycloudlib.GCE",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "dat... |
28657759948 | from torchvision import transforms
from torch.utils.data import dataset, dataloader
from torchvision.datasets.folder import default_loader
from utils.RandomErasing import RandomErasing
from utils.RandomSampler import RandomSampler
from opt import opt
import glob
import pandas as pd
import numpy as np
import os.path as osp
class Data(object):
def __init__(self):
# paper is (384, 128)
train_transform = transforms.Compose([
transforms.Resize((256, 256), interpolation=3),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
RandomErasing(probability=0.5, mean=[0.0, 0.0, 0.0])
])
test_transform = transforms.Compose([
transforms.Resize((256, 256), interpolation=3),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
self.trainset = EvaluationAICityCar(train_transform, 'train', opt.data_path)
self.testset = EvaluationAICityCar(test_transform, 'test', opt.data_path)
self.queryset = EvaluationAICityCar(test_transform, 'query', opt.data_path)
self.train_loader = dataloader.DataLoader(self.trainset,
sampler=RandomSampler(self.trainset, batch_id=opt.batchid,
batch_image=opt.batchimage),
batch_size=opt.batchid * opt.batchimage, num_workers=8,
pin_memory=True)
self.test_loader = dataloader.DataLoader(self.testset, batch_size=opt.batchtest, num_workers=1, pin_memory=True)
self.query_loader = dataloader.DataLoader(self.queryset, batch_size=opt.batchtest, num_workers=1,
pin_memory=True)
def process(data_path, dtype, num):
img = []
car_id = []
id_list = np.array(pd.read_csv(data_path + '/train_label.csv', header=None))
if dtype == 'train':
for i in range(10):
if i != num:
path = data_path + '/folder' + str(i) + '/'
dir_path = glob.glob(osp.join(path, '*.jpg'))
for j in range(len(dir_path)):
img.append(dir_path[j])
tmp = str(dir_path[j])
car_id.append(id_list[int(tmp[-10:-4])-1][0])
elif dtype == 'test':
path = data_path + '/folder' + str(num) + '/test.txt'
data = np.array(pd.read_csv(path, header=None))
for i in range(data.shape[0]):
tmp = data_path + '/folder' + str(num) + '/' + data[i]
img.append(tmp[0])
tmp = str(data[i])
car_id.append(id_list[int(tmp[-12:-6]) - 1][0])
elif dtype == 'query':
path = data_path + '/folder' + str(num) + '/query.txt'
data = np.array(pd.read_csv(path, header=None))
for i in range(data.shape[0]):
tmp = data_path + '/folder' + str(num) + '/' + data[i]
img.append(tmp[0])
tmp = str(data[i])
car_id.append(id_list[int(tmp[-12:-6]) - 1][0])
return img, car_id
class EvaluationAICityCar(dataset.Dataset):
def __init__(self, transform, dtype, data_path):
super(EvaluationAICityCar, self).__init__()
self.transform = transform
self.loader = default_loader
self.data_path = data_path
self.imgs, self.id = process(self.data_path, dtype, num=6)
def __getitem__(self, index):
path = self.imgs[index]
if self.id[index] <= 95:
target = self.id[index] - 1
else:
target = self.id[index] - 146
img = self.loader(path)
if self.transform is not None:
img = self.transform(img)
return img, target
def __len__(self):
return len(self.imgs)
| DavisonHu/AICity-track2-Re-id | loader/Evaluation_AICity_data.py | Evaluation_AICity_data.py | py | 4,017 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "torchvision.transforms.Compose",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Resize",
"line_number": 17,
"usage_type": "call"
},
{
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.