hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c2fb0f86f4a3e69043e5a1a430d789099947416 | 28,918 | py | Python | src/train_model.py | aaronbuhendwa/twophasePINN | 77bdcb2a07ab31dc9ab43623cf6b776a97c0b5c8 | [
"MIT"
] | 5 | 2021-06-09T07:03:40.000Z | 2021-12-27T08:43:52.000Z | src/train_model.py | aaronbuhendwa/twophasePINN | 77bdcb2a07ab31dc9ab43623cf6b776a97c0b5c8 | [
"MIT"
] | null | null | null | src/train_model.py | aaronbuhendwa/twophasePINN | 77bdcb2a07ab31dc9ab43623cf6b776a97c0b5c8 | [
"MIT"
] | 3 | 2021-02-04T15:21:32.000Z | 2021-12-14T14:34:28.000Z | import sys
sys.path.append("../utilities")
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
GPU_ID = "0"
os.environ["CUDA_VISIBLE_DEVICES"]= GPU_ID
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
from tensorflow.keras import backend as K
import numpy as np
import pandas as pd
import scipy.io
from generate_points import *
from utilities import *
import time
import math
import glob
from datetime import datetime
import shutil
import logging
np.random.seed(1234)
tf.set_random_seed(1234)
class TwoPhasePinn:
''' This class implements a physics-informed neural network. It approximates the incompressible two-phase Navier-Stokes equations in 2D
using a Volume-of-Fluid approach. Thus, the neural network maps (x, y, t) -> (u, v, p, a) where a is the volume fraction field. The placeholders and
losses have to be constructed for each case individually as they depend on the boundary conditions. The present implementation corresponds to the
rising bubble case, see paper.
Args:
sess: tensorflow session
dtype: data type
hidden_layers: list containing number of nodes for each hidden layer
activation_functions: dictionary assigning layers to activation function
adaptive_activation_coeff: dictionary assigning layers to adaptive activation coeff
adaptive_activation_init: dictionary assigning initial value to adaptive activation coeff
adaptive_activation_n: list containing the scale factor of the adapative activation coeff for each layer - must have same length as hidden_layers
use_ad_act: bool indicating whether to use adaptive activation coeff
loss_weights_A: loss weight for volume fraction loss
loss_weights_PDE: loss weights for PDEs
checkpoint_interval: interval in epochs indicating when to save model
epochs: list of epochs
batch_sizes: list of batch sizes - should have same length as epochs
learning_rates: list of learning rates - should have same length as epochs
'''
def __init__(self, sess, dtype, hidden_layers, activation_functions, adaptive_activation_coeff, adaptive_activation_n,
adaptive_activation_init, use_ad_act, loss_weights_A, loss_weights_PDE, mu, sigma, g, rho, u_ref, L_ref, checkpoint_interval, epochs, batch_sizes,
learning_rates):
# CREATE OUTPUT FOLDER AND GET LOGGER
self.dirname, logpath = self.make_output_dir()
self.logger = self.get_logger(logpath)
# PHYSICAL PARAMETERS
self.mu1 = mu[0]
self.mu2 = mu[1]
self.sigma = sigma
self.g = g
self.rho1 = rho[0]
self.rho2 = rho[1]
self.U_ref = u_ref
self.L_ref = L_ref
# MEMBERS FOR SAVING CHECKPOINTS AND TRACKING
self.epoch_loss_checkpoints = 1e10
self.checkpoint_interval = checkpoint_interval
self.mean_epoch_time = 0
# SGD OPT MEMBERS
self.learning_rates = learning_rates
self.epochs = epochs
self.batch_sizes = batch_sizes
# TENSORFLOW SESSION
self.sess = sess
K.set_session(self.sess)
self.print("Building Computational Graph")
# PLACEHOLDERS
x_A = tf.placeholder(dtype=dtype, shape=[None, 1], name="x_A")
y_A = tf.placeholder(dtype=dtype, shape=[None, 1], name="y_A")
t_A = tf.placeholder(dtype=dtype, shape=[None, 1], name="t_A")
a_A = tf.placeholder(dtype=dtype, shape=[None, 1], name="a_A")
x_N = tf.placeholder(dtype=dtype, shape=[None, 1], name="x_N")
y_N = tf.placeholder(dtype=dtype, shape=[None, 1], name="y_N")
t_N = tf.placeholder(dtype=dtype, shape=[None, 1], name="t_N")
p_N = tf.placeholder(dtype=dtype, shape=[None, 1], name="p_N")
x_E = tf.placeholder(dtype=dtype, shape=[None, 1], name="x_E")
y_E = tf.placeholder(dtype=dtype, shape=[None, 1], name="y_E")
x_W = tf.placeholder(dtype=dtype, shape=[None, 1], name="x_W")
y_W = tf.placeholder(dtype=dtype, shape=[None, 1], name="y_W")
t_EW = tf.placeholder(dtype=dtype, shape=[None, 1], name="t_EW")
x_NSEW = tf.placeholder(dtype=dtype, shape=[None, 1], name="x_NSEW")
y_NSEW = tf.placeholder(dtype=dtype, shape=[None, 1], name="y_NSEW")
t_NSEW = tf.placeholder(dtype=dtype, shape=[None, 1], name="t_NSEW")
u_NSEW = tf.placeholder(dtype=dtype, shape=[None, 1], name="u_NSEW")
v_NSEW = tf.placeholder(dtype=dtype, shape=[None, 1], name="v_NSEW")
x_PDE = tf.placeholder(dtype=dtype, shape=[None, 1], name="x_PDE")
y_PDE = tf.placeholder(dtype=dtype, shape=[None, 1], name="y_PDE")
t_PDE = tf.placeholder(dtype=dtype, shape=[None, 1], name="t_PDE")
f_PDE = tf.placeholder(dtype=dtype, shape=[None, 1], name="f_PDE")
self.learning_rate_opt = tf.placeholder(dtype=dtype, shape=[], name="learning_rate")
data_set_names = ["A", "PDE", "N", "EW", "NSEW"]
self.placeholders = dict((name, []) for name in data_set_names)
self.placeholders["A"].extend([x_A, y_A, t_A, a_A])
self.placeholders["PDE"].extend([x_PDE, y_PDE, t_PDE, f_PDE])
self.placeholders["N"].extend([x_N, y_N, t_N, p_N])
self.placeholders["EW"].extend([x_E, y_E, x_W, y_W, t_EW])
self.placeholders["NSEW"].extend([x_NSEW, y_NSEW, t_NSEW, u_NSEW, v_NSEW])
# VARIABLES ADAPTIVE ACTIVATION FOR HIDDEN LAYERS
self.sanity_check_activation_functions(activation_functions, adaptive_activation_coeff, adaptive_activation_n, adaptive_activation_init, hidden_layers)
self.ad_act_coeff = {}
if use_ad_act:
for key in adaptive_activation_coeff:
initial_value = adaptive_activation_init[key]
self.ad_act_coeff[key] = tf.Variable(initial_value, name=key)
activation_functions_dict = self.get_activation_function_dict(activation_functions, adaptive_activation_coeff, adaptive_activation_n, hidden_layers, use_ad_act)
# NETWORK ARCHITECTURE
outputs = ["output_u", "output_v", "output_p", "output_a"]
activations_output = [None, None, "exponential", "sigmoid"]
output_layer = list(zip(outputs, activations_output))
nn = NNCreator(dtype)
self.model = nn.get_model_dnn(3, hidden_layers, output_layer, activation_functions_dict, use_ad_act)
# LOSSES ASSOCIATED WITH A
output_tensors = self.model(tf.concat([x_A, y_A, t_A], 1))
loss_a_A = tf.reduce_mean(tf.square(a_A - output_tensors[3]))
# LOSSES ASSOCIATED WITH FIXED VALUE NORTH SOUTH EAST WEST
start = time.time()
output_tensors = self.model(tf.concat([x_NSEW, y_NSEW, t_NSEW], 1))
loss_u_NSEW = tf.reduce_mean(tf.square(u_NSEW - output_tensors[0]))
loss_v_NSEW = tf.reduce_mean(tf.square(v_NSEW - output_tensors[1]))
loss_NSEW = tf.reduce_sum(tf.stack([loss_u_NSEW, loss_v_NSEW]))
self.print(time.time()-start, "s")
# LOSSES ASSOCIATED WITH FIXED PRESSURE NORTH
start = time.time()
output_tensors = self.model(tf.concat([x_N, y_N, t_N], 1))
loss_p_N = tf.reduce_mean(tf.square(p_N - output_tensors[2]))
self.print(time.time()-start, "s")
# LOSSES ASSOCIATED WITH PERIODIC BOUNDARY EAST WEST
start = time.time()
output_east = self.model(tf.concat([x_E, y_E, t_EW], 1))
output_west = self.model(tf.concat([x_W, y_W, t_EW], 1))
loss_u_EW = tf.reduce_mean(tf.square(output_east[0] - output_west[0]))
loss_v_EW = tf.reduce_mean(tf.square(output_east[1] - output_west[1]))
loss_p_EW = tf.reduce_mean(tf.square(output_east[2] - output_west[2]))
loss_EW = tf.reduce_sum(tf.stack([loss_u_EW, loss_v_EW, loss_p_EW]))
self.print(time.time()-start, "s")
loss_NSEW = tf.reduce_sum(tf.stack([loss_p_N, loss_EW, loss_NSEW]))
# LOSSES ASSOCIATED WITH PDEs -> PHYSICS INFORMED NEURAL NETS
start = time.time()
PDE_tensors = self.PDE_caller(x_PDE, y_PDE, t_PDE)
loss_PDE_m = tf.losses.mean_squared_error(f_PDE, PDE_tensors[0])
loss_PDE_u = tf.losses.mean_squared_error(f_PDE, PDE_tensors[1])
loss_PDE_v = tf.losses.mean_squared_error(f_PDE, PDE_tensors[2])
loss_PDE_a = tf.losses.mean_squared_error(f_PDE, PDE_tensors[3])
self.print(time.time()-start, "s")
loss_PDE = tf.tensordot(tf.stack([loss_PDE_m, loss_PDE_u, loss_PDE_v, loss_PDE_a]), np.array(loss_weights_PDE).astype("float32"), 1)
# TOTAL LOSS
loss_complete = loss_a_A + loss_NSEW + loss_PDE
# OPTIMIZERS
start = time.time()
self.optimizer = tf.train.AdamOptimizer(self.learning_rate_opt)
self.minimize_op = self.optimizer.minimize(loss_complete)
self.print(time.time()-start, "s")
# DEFINING LISTS AND DICTIONARIES FOR TRACKING LOSSES AND SPECIFIC TENSORS
self.loss_tensor_list = [loss_complete, loss_a_A, loss_NSEW, loss_PDE_m, loss_PDE_u, loss_PDE_v, loss_PDE_a]
self.loss_list = ["l", "a", "NSEW", "m", "u", "v", "PDE_a"]
self.epoch_loss = dict.fromkeys(self.loss_list, 0)
self.loss_history = dict((loss, []) for loss in self.loss_list)
self.ad_act_coeff_history = dict((key, []) for key in self.ad_act_coeff)
# INITIALIZING VARIABLES
self.sess.run(tf.global_variables_initializer())
# SET WEIGHTS AND OPTIMIZER STATE IF AVAILABLE
self.set_variables()
# FINALIZING
self.model.save_weights(os.path.join(self.dirname, "Weights_loss_%.4e.h5" % (self.epoch_loss_checkpoints)))
self.sess.graph.finalize()
def make_output_dir(self):
if not os.path.exists("checkpoints"):
os.mkdir("checkpoints")
dirname = os.path.abspath(os.path.join("checkpoints", datetime.now().strftime("%b-%d-%Y_%H-%M-%S")))
os.mkdir(dirname)
shutil.copyfile(__file__, os.path.join(dirname, __file__))
shutil.copyfile("generate_points.py", os.path.join(dirname, "generate_points.py"))
logpath = os.path.join(dirname, "output.log")
return dirname, logpath
def get_logger(self, logpath):
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
sh = logging.StreamHandler()
sh.setLevel(logging.DEBUG)
sh.setFormatter(logging.Formatter('%(message)s'))
fh = logging.FileHandler(logpath)
logger.addHandler(sh)
logger.addHandler(fh)
return logger
def sanity_check_activation_functions(self, activation_functions, adaptive_activations, adaptive_activation_n, adaptive_activation_init, hidden_layers):
no_layers = len(hidden_layers)
check = 0
for key, value in list(adaptive_activations.items()):
check += sum(value)
assert no_layers*(no_layers+1)/2 == check, "Not every layer has been assigned with an adaptive activation coefficient unambiguously"
check = 0
for key, value in list(activation_functions.items()):
check += sum(value)
assert no_layers*(no_layers+1)/2 == check, "Not every layer has been assigned with an activation function unambiguously"
assert no_layers == len(adaptive_activation_n), "Not every layer has an adaptive activation precoefficient"
assert adaptive_activation_init.keys() == adaptive_activations.keys(), "Not every adaptive activation coefficient has been assigned an initial value"
def get_activation_function_dict(self, activation_functions, adaptive_activation_coeff, adaptive_activation_n, hidden_layers, use_ad_act):
activation_functions_dict = dict((key, [0, 0, 0]) for key in range(1, len(hidden_layers) + 1))
for layer_no in activation_functions_dict:
activation_functions_dict[layer_no][2] = adaptive_activation_n[layer_no-1]
for func_name, layers in activation_functions.items():
if layer_no in layers:
activation_functions_dict[layer_no][0] = func_name
if use_ad_act: # if use_ad_act is False, self.ad_act_coeff is empty!
for coeff_name, layers in adaptive_activation_coeff.items():
if layer_no in layers:
activation_functions_dict[layer_no][1] = self.ad_act_coeff[coeff_name]
return activation_functions_dict
def compute_gradients(self, x, y, t):
u, v, p, a = self.model(tf.concat([x, y, t], 1))
u_x = tf.gradients(u, x)[0]
u_y = tf.gradients(u, y)[0]
u_t = tf.gradients(u, t)[0]
u_xx = tf.gradients(u_x, x)[0]
u_yy = tf.gradients(u_y, y)[0]
v_x = tf.gradients(v, x)[0]
v_y = tf.gradients(v, y)[0]
v_t = tf.gradients(v, t)[0]
v_xx = tf.gradients(v_x, x)[0]
v_yy = tf.gradients(v_y, y)[0]
p_x = tf.gradients(p, x)[0]
p_y = tf.gradients(p, y)[0]
a_x = tf.gradients(a, x)[0]
a_y = tf.gradients(a, y)[0]
a_t = tf.gradients(a, t)[0]
a_xx = tf.gradients(a_x, x)[0]
a_yy = tf.gradients(a_y, y)[0]
a_xy = tf.gradients(a_x, y)[0]
return [u, u_x, u_y, u_t, u_xx, u_yy], [v, v_x, v_y, v_t, v_xx, v_yy], [p, p_x, p_y], [a, a_x, a_y, a_t, a_xx, a_yy, a_xy]
def PDE_caller(self, x, y, t):
u_gradients, v_gradients, p_gradients, a_gradients = self.compute_gradients(x, y, t)
u, u_x, u_y, u_t, u_xx, u_yy = u_gradients[:]
v, v_x, v_y, v_t, v_xx, v_yy = v_gradients[:]
p, p_x, p_y = p_gradients[:]
a, a_x, a_y, a_t, a_xx, a_yy, a_xy = a_gradients[:]
mu = self.mu2 + (self.mu1 - self.mu2) * a
mu_x = (self.mu1 - self.mu2) * a_x
mu_y = (self.mu1 - self.mu2) * a_y
rho = self.rho2 + (self.rho1 - self.rho2) * a
abs_interface_grad = tf.sqrt(tf.square(a_x) + tf.square(a_y) + np.finfo(float).eps)
curvature = - ( (a_xx + a_yy)/abs_interface_grad - (a_x**2*a_xx + a_y**2*a_yy + 2*a_x*a_y*a_xy)/tf.pow(abs_interface_grad, 3) )
rho_ref = self.rho2
one_Re = mu/(rho_ref*self.U_ref*self.L_ref)
one_Re_x = mu_x/(rho_ref*self.U_ref*self.L_ref)
one_Re_y = mu_y/(rho_ref*self.U_ref*self.L_ref)
one_We = self.sigma/(rho_ref*self.U_ref**2*self.L_ref)
one_Fr = self.g*self.L_ref/self.U_ref**2
PDE_m = u_x + v_y
PDE_a = a_t + u*a_x + v*a_y
PDE_u = (u_t + u*u_x + v*u_y)*rho/rho_ref + p_x - one_We*curvature*a_x - one_Re*(u_xx + u_yy) - 2.0*one_Re_x*u_x - one_Re_y*(u_y + v_x)
PDE_v = (v_t + u*v_x + v*v_y)*rho/rho_ref + p_y - one_We*curvature*a_y - one_Re*(v_xx + v_yy) - rho/rho_ref*one_Fr - 2.0*one_Re_y*v_y - one_Re_x*(u_y + v_x)
return PDE_m, PDE_u, PDE_v, PDE_a
def set_variables(self):
''' Implements functionality to continue training from checkpoint. Loads the weights and optimizer state
from the .h5 file and the .mat file, respectively. This is only done if the necessary files are located in
the same folder as this script '''
for file in glob.glob("*loss*"):
if file.endswith("h5"):
self.model.load_weights(file)
self.print("Loading weights from file", file)
if file.endswith("mat"):
matfile = scipy.io.loadmat(file, squeeze_me=True)
self.print("Setting optimizer variables according to file", file)
optimizer_state = matfile["optimizer_state"]
optimizer_variables = self.optimizer.variables()
assert len(optimizer_variables) == len(optimizer_state), "Loading optimizer state failed: Not as many optimizer states saved as required, check architecture/aac compatibility!"
for i in range(0, len(optimizer_variables)):
if optimizer_variables[i].shape == (1,): # Shapes that require (1,) are loaded as floats from .mat file, thus have to be converted to np.array
optimizer_state[i] = np.array([optimizer_state[i]])
if len(optimizer_variables[i].shape) == 2:
if optimizer_variables[i].shape[1] == 1: # Shapes that require (?,1) are loaded as (?,) from .mat file, thus need reshaping
optimizer_state[i] = optimizer_state[i].reshape(len(optimizer_state[i]),1)
self.sess.run(optimizer_variables[i].assign(optimizer_state[i]))
self.print("Setting adaptive activation coefficients according to file", file)
ad_act_coeff = matfile["ad_act_coeff"]
if len(self.ad_act_coeff) > 0:
assert list(self.ad_act_coeff.keys()) == list(ad_act_coeff.dtype.names), "Loading adaptive activation coefficients failed: Restart coefficients %s do not match input %s" %(list(ad_act_coeff.dtype.names), list(self.ad_act_coeff.keys()))
for key in self.ad_act_coeff:
self.sess.run(self.ad_act_coeff[key].assign(float(ad_act_coeff[key])))
def train(self, data_sets):
''' Implements the training loop
Args:
data_sets: Dictionary assigning a pandas dataframe to each loss '''
self.check_matching_keys(data_sets)
self.print_point_distribution(data_sets)
self.print("\nEPOCHS: ", self.epochs, " BATCH SIZES: ", self.batch_sizes, " LEARNING RATES: ", self.learning_rates)
start_total = time.time()
for counter, epoch_value in enumerate(self.epochs):
batch_sizes, number_of_batches = self.get_batch_sizes(counter, data_sets)
for e in range(1, epoch_value + 1):
start_epoch = time.time()
data_sets = self.shuffle_data_and_reset_epoch_losses(data_sets)
for b in range(number_of_batches):
batches = self.get_batches(data_sets, b, batch_sizes)
tf_dict = self.get_feed_dict(batches, counter)
_, batch_losses = self.sess.run([self.minimize_op, self.loss_tensor_list], tf_dict)
self.assign_batch_losses(batch_losses)
self.append_loss_and_activation_coeff_history()
self.save_model_checkpoint(self.epoch_loss[self.loss_list[0]], e, counter)
self.print_info(e, self.epochs[counter], time.time() - start_epoch)
self.print("\nTotal training time: %5.3fs" % (time.time() - start_total))
self.logger.handlers[1].close()
def check_matching_keys(self, data_sets):
for key1, key2 in zip(data_sets, self.placeholders):
assert key1 == key2, "Data set key %s does not match placeholder key %s" % (key1, key2)
def print_point_distribution(self, data_sets):
no_points = 0
for key in data_sets:
no_points += data_sets[key].shape[0]
self.print("Training data %10s shape: %s" %(key, data_sets[key].shape))
self.print("Total number of points %d" % no_points)
def shuffle_data_and_reset_epoch_losses(self, data_sets):
for key in data_sets:
length = len(data_sets[key])
shuffled_indices = np.random.choice(length, length, replace=False)
data_sets[key] = pd.DataFrame(data=data_sets[key].to_numpy()[shuffled_indices,:], columns=data_sets[key].columns)
for key in self.epoch_loss:
self.epoch_loss[key] = 0
return data_sets
def get_batches(self, data, b, batch_sizes):
batches = dict.fromkeys(data.keys(), 0)
for key in data:
batches[key] = data[key][b*batch_sizes[key]:(b+1)*batch_sizes[key]]
return batches
def assign_batch_losses(self, batch_losses):
for loss_values, key in zip(batch_losses, self.epoch_loss):
self.epoch_loss[key] += loss_values
def append_loss_and_activation_coeff_history(self):
for key in self.loss_history:
self.loss_history[key].append(self.epoch_loss[key])
for key, value in self.ad_act_coeff.items():
self.ad_act_coeff_history[key].append(self.sess.run(value))
def get_feed_dict(self, batches , counter):
tf_dict = {self.learning_rate_opt: self.learning_rates[counter]}
feed_dicts = []
for i, key in enumerate(self.placeholders):
feed_dicts.append(dict.fromkeys(self.placeholders[key], 0))
for placeholder, column_name in zip(self.placeholders[key], batches[key].columns):
assert placeholder.name[:-2] == column_name, "Placeholder %s does not match column %s in data %s!" % (placeholder.name[:-2], column_name, key)
feed_dicts[i][placeholder] = np.transpose(np.atleast_2d(batches[key][column_name].to_numpy()))
for dicts in feed_dicts:
tf_dict.update(dicts)
return tf_dict
def save_model_checkpoint(self, loss, epoch, counter):
''' Saves the following files in self.dirname when a checkpoint epoch is reached:
1) architecture (.json)
2) weights (.h5)
3) optimizer state, loss history, adaptive activation coefficient history (.mat)
These files may be used to restart a training run from checkpoint '''
if loss < self.epoch_loss_checkpoints and not (epoch)%self.checkpoint_interval:
for file in glob.glob(os.path.join(self.dirname, "*")):
if file.endswith("json") or file.endswith("h5") or file.endswith("mat"):
os.remove(file)
writeToJSONFile(self.dirname, "loss_%.4e_architecture" % (loss), self.model.to_json())
data = dict(loss_history=self.loss_history, ad_act_coeff_history=self.ad_act_coeff_history, optimizer_state=self.sess.run(self.optimizer.variables()),
ad_act_coeff=self.sess.run(self.ad_act_coeff), epoch=epoch, learning_rate=self.learning_rates[counter])
scipy.io.savemat(os.path.join(self.dirname, "loss_%.4e_variables.mat") % (loss), data)
self.model.save_weights(os.path.join(self.dirname, "loss_%.4e_weights.h5" % (loss)))
self.epoch_loss_checkpoints = loss
def print_info(self, current_epoch, epochs, time_for_epoch):
if current_epoch == 1: # skipping first epoch, because it takes way longer
self.mean_epoch_time = 0
else:
self.mean_epoch_time = self.mean_epoch_time*(current_epoch-2)/(current_epoch-1) + time_for_epoch/(current_epoch-1)
string = ["Epoch: %5d/%d - %7.2fms - avg: %7.2fms" % (current_epoch, epochs, time_for_epoch*1e3, self.mean_epoch_time*1e3)]
for key, value in self.epoch_loss.items():
string.append(" - %s: %.4e" % (key, value))
for key, act_coeff in self.ad_act_coeff.items():
string.append(" - %s: %.4e" % (key, self.sess.run(act_coeff)))
self.print(*string)
def get_batch_sizes(self, counter, data_sets):
number_of_samples = sum([len(data_sets[key]) for key in data_sets])
batch_sizes_datasets = dict.fromkeys(data_sets.keys(), 0)
if self.batch_sizes[counter] >= number_of_samples:
number_of_batches = 1
for key in data_sets:
batch_sizes_datasets[key] = len(data_sets[key])
self.print("Batch size is larger equal the amount of training samples, thus going full batch mode")
self.print("Total batch size: ", number_of_samples, " - ", "Batch sizes: ", batch_sizes_datasets, " - ", "learning rate: ", self.learning_rates[counter], "\n")
else:
number_of_batches = math.ceil(number_of_samples/self.batch_sizes[counter])
batch_percentages = dict.fromkeys(data_sets.keys(), 0)
print_batches = dict.fromkeys(data_sets.keys(), "")
for key in data_sets:
batch_percentages[key] = len(data_sets[key])/number_of_samples
batch_sizes_datasets[key] = math.ceil(self.batch_sizes[counter]*batch_percentages[key])
print_batches[key] = "%d/%d" % (batch_sizes_datasets[key], 0 if batch_sizes_datasets[key] == 0 else len(data_sets[key])%batch_sizes_datasets[key])
total_batch_size = sum([batch_sizes_datasets[key] for key in batch_sizes_datasets])
self.print("\nTotal batch size: ", total_batch_size, " - ", "number of batches: ", number_of_batches, " - ", "Batch sizes: ", print_batches, " - ", "learning rate: ", self.learning_rates[counter])
for key in data_sets:
if len(data_sets[key]) == 0:
continue
assert (number_of_batches - 1) * batch_sizes_datasets[key] < len(data_sets[key]), "The specified batch size of %d will lead to empty batches with the present batch ratio, increase the batch size!" % (self.batch_sizes[counter])
return batch_sizes_datasets, number_of_batches
def print(self, *args):
for word in args:
if len(args) == 1:
self.logger.info(word)
elif word != args[-1]:
for handler in self.logger.handlers:
handler.terminator = ""
if type(word) == float or type(word) == np.float64 or type(word) == np.float32:
self.logger.info("%.4e" % (word))
else:
self.logger.info(word)
else:
for handler in self.logger.handlers:
handler.terminator = "\n"
if type(word) == float or type(word) == np.float64 or type(word) == np.float32:
self.logger.info("%.4e" % (word))
else:
self.logger.info(word)
def compute_batch_size(training_data, number_of_batches):
''' Computes the batch size from number of batches and amount of training samples '''
number_of_samples = sum([len(training_data[key]) for key in training_data])
return math.ceil(number_of_samples/number_of_batches)
def main():
''' This scripts trains a PINN for the rising bubble case in <paper_cite_TBA>. The user may define the following:
1) Number of points for various losses (check function description)
2) The neural network architecture, i.e. number of hidden layers and the nodes in each hidden layer
3) The training hyperparameters, i.e. number of epochs, batch size and learning rates
'''
# SETTING UP SESSION
sess = tf.Session()
# PARAMETRS FOR THE TRAINING DATA - NUMBER OF POINTS (NOP) FOR VARIOUS LOSSES
NOP_a = (500, 400)
NOP_PDE = (400, 2000, 3000)
NOP_north = (20, 20)
NOP_south = (20, 20)
NOP_east = (20, 20)
NOP_west = (20, 20)
training_data = get_training_data(NOP_a, NOP_PDE, NOP_north, NOP_south, NOP_east, NOP_west)
# NEURAL NETWORK ARCHITECTURE
dtype = tf.float32
no_layers = 8
hidden_layers = [350]*no_layers
activation_functions = dict(tanh = range(1,no_layers+1)) # dict assigning layer activation function to layer number
# ADAPIVE ACTIVATION COEFFICIENTS SETUP
adaptive_activation_coeff = {"aac_1": range(1,no_layers+1)} # list shows corresponding layer numbers
adaptive_activation_init = {"aac_1": 0.1}
adaptive_activation_n = [10]*no_layers # prefactor for activation function
use_ad_act = False
# PHYSICAL PARAMETERS
mu = [1.0, 10.0]
sigma = 24.5
g = -0.98
rho = [100, 1000]
u_ref = 1.0
L_ref = 0.25
# HYPERPARAMETERS FOR TRAINING
loss_weights_A = [1.0]
loss_weights_PDE = [1.0, 10.0, 10.0, 1.0]
epochs = [5000]*5
number_of_batches = 20
batch_sizes = [compute_batch_size(training_data, number_of_batches)]*5
learning_rates = [1e-4, 5e-5, 1e-5, 5e-6, 1e-6]
checkpoint_interval = 100
# INSTANTIATE PINN
PINN = TwoPhasePinn(sess, dtype, hidden_layers, activation_functions, adaptive_activation_coeff, adaptive_activation_n,
adaptive_activation_init, use_ad_act, loss_weights_A, loss_weights_PDE, mu, sigma, g, rho, u_ref, L_ref, checkpoint_interval, epochs,
batch_sizes, learning_rates)
# TRAINING
PINN.train(training_data)
if __name__ == "__main__":
main()
| 51.639286 | 255 | 0.635694 | import sys
sys.path.append("../utilities")
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
GPU_ID = "0"
os.environ["CUDA_VISIBLE_DEVICES"]= GPU_ID
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
from tensorflow.keras import backend as K
import numpy as np
import pandas as pd
import scipy.io
from generate_points import *
from utilities import *
import time
import math
import glob
from datetime import datetime
import shutil
import logging
np.random.seed(1234)
tf.set_random_seed(1234)
class TwoPhasePinn:
def __init__(self, sess, dtype, hidden_layers, activation_functions, adaptive_activation_coeff, adaptive_activation_n,
adaptive_activation_init, use_ad_act, loss_weights_A, loss_weights_PDE, mu, sigma, g, rho, u_ref, L_ref, checkpoint_interval, epochs, batch_sizes,
learning_rates):
self.dirname, logpath = self.make_output_dir()
self.logger = self.get_logger(logpath)
self.mu1 = mu[0]
self.mu2 = mu[1]
self.sigma = sigma
self.g = g
self.rho1 = rho[0]
self.rho2 = rho[1]
self.U_ref = u_ref
self.L_ref = L_ref
self.epoch_loss_checkpoints = 1e10
self.checkpoint_interval = checkpoint_interval
self.mean_epoch_time = 0
self.learning_rates = learning_rates
self.epochs = epochs
self.batch_sizes = batch_sizes
self.sess = sess
K.set_session(self.sess)
self.print("Building Computational Graph")
x_A = tf.placeholder(dtype=dtype, shape=[None, 1], name="x_A")
y_A = tf.placeholder(dtype=dtype, shape=[None, 1], name="y_A")
t_A = tf.placeholder(dtype=dtype, shape=[None, 1], name="t_A")
a_A = tf.placeholder(dtype=dtype, shape=[None, 1], name="a_A")
x_N = tf.placeholder(dtype=dtype, shape=[None, 1], name="x_N")
y_N = tf.placeholder(dtype=dtype, shape=[None, 1], name="y_N")
t_N = tf.placeholder(dtype=dtype, shape=[None, 1], name="t_N")
p_N = tf.placeholder(dtype=dtype, shape=[None, 1], name="p_N")
x_E = tf.placeholder(dtype=dtype, shape=[None, 1], name="x_E")
y_E = tf.placeholder(dtype=dtype, shape=[None, 1], name="y_E")
x_W = tf.placeholder(dtype=dtype, shape=[None, 1], name="x_W")
y_W = tf.placeholder(dtype=dtype, shape=[None, 1], name="y_W")
t_EW = tf.placeholder(dtype=dtype, shape=[None, 1], name="t_EW")
x_NSEW = tf.placeholder(dtype=dtype, shape=[None, 1], name="x_NSEW")
y_NSEW = tf.placeholder(dtype=dtype, shape=[None, 1], name="y_NSEW")
t_NSEW = tf.placeholder(dtype=dtype, shape=[None, 1], name="t_NSEW")
u_NSEW = tf.placeholder(dtype=dtype, shape=[None, 1], name="u_NSEW")
v_NSEW = tf.placeholder(dtype=dtype, shape=[None, 1], name="v_NSEW")
x_PDE = tf.placeholder(dtype=dtype, shape=[None, 1], name="x_PDE")
y_PDE = tf.placeholder(dtype=dtype, shape=[None, 1], name="y_PDE")
t_PDE = tf.placeholder(dtype=dtype, shape=[None, 1], name="t_PDE")
f_PDE = tf.placeholder(dtype=dtype, shape=[None, 1], name="f_PDE")
self.learning_rate_opt = tf.placeholder(dtype=dtype, shape=[], name="learning_rate")
data_set_names = ["A", "PDE", "N", "EW", "NSEW"]
self.placeholders = dict((name, []) for name in data_set_names)
self.placeholders["A"].extend([x_A, y_A, t_A, a_A])
self.placeholders["PDE"].extend([x_PDE, y_PDE, t_PDE, f_PDE])
self.placeholders["N"].extend([x_N, y_N, t_N, p_N])
self.placeholders["EW"].extend([x_E, y_E, x_W, y_W, t_EW])
self.placeholders["NSEW"].extend([x_NSEW, y_NSEW, t_NSEW, u_NSEW, v_NSEW])
self.sanity_check_activation_functions(activation_functions, adaptive_activation_coeff, adaptive_activation_n, adaptive_activation_init, hidden_layers)
self.ad_act_coeff = {}
if use_ad_act:
for key in adaptive_activation_coeff:
initial_value = adaptive_activation_init[key]
self.ad_act_coeff[key] = tf.Variable(initial_value, name=key)
activation_functions_dict = self.get_activation_function_dict(activation_functions, adaptive_activation_coeff, adaptive_activation_n, hidden_layers, use_ad_act)
outputs = ["output_u", "output_v", "output_p", "output_a"]
activations_output = [None, None, "exponential", "sigmoid"]
output_layer = list(zip(outputs, activations_output))
nn = NNCreator(dtype)
self.model = nn.get_model_dnn(3, hidden_layers, output_layer, activation_functions_dict, use_ad_act)
output_tensors = self.model(tf.concat([x_A, y_A, t_A], 1))
loss_a_A = tf.reduce_mean(tf.square(a_A - output_tensors[3]))
start = time.time()
output_tensors = self.model(tf.concat([x_NSEW, y_NSEW, t_NSEW], 1))
loss_u_NSEW = tf.reduce_mean(tf.square(u_NSEW - output_tensors[0]))
loss_v_NSEW = tf.reduce_mean(tf.square(v_NSEW - output_tensors[1]))
loss_NSEW = tf.reduce_sum(tf.stack([loss_u_NSEW, loss_v_NSEW]))
self.print(time.time()-start, "s")
start = time.time()
output_tensors = self.model(tf.concat([x_N, y_N, t_N], 1))
loss_p_N = tf.reduce_mean(tf.square(p_N - output_tensors[2]))
self.print(time.time()-start, "s")
start = time.time()
output_east = self.model(tf.concat([x_E, y_E, t_EW], 1))
output_west = self.model(tf.concat([x_W, y_W, t_EW], 1))
loss_u_EW = tf.reduce_mean(tf.square(output_east[0] - output_west[0]))
loss_v_EW = tf.reduce_mean(tf.square(output_east[1] - output_west[1]))
loss_p_EW = tf.reduce_mean(tf.square(output_east[2] - output_west[2]))
loss_EW = tf.reduce_sum(tf.stack([loss_u_EW, loss_v_EW, loss_p_EW]))
self.print(time.time()-start, "s")
loss_NSEW = tf.reduce_sum(tf.stack([loss_p_N, loss_EW, loss_NSEW]))
start = time.time()
PDE_tensors = self.PDE_caller(x_PDE, y_PDE, t_PDE)
loss_PDE_m = tf.losses.mean_squared_error(f_PDE, PDE_tensors[0])
loss_PDE_u = tf.losses.mean_squared_error(f_PDE, PDE_tensors[1])
loss_PDE_v = tf.losses.mean_squared_error(f_PDE, PDE_tensors[2])
loss_PDE_a = tf.losses.mean_squared_error(f_PDE, PDE_tensors[3])
self.print(time.time()-start, "s")
loss_PDE = tf.tensordot(tf.stack([loss_PDE_m, loss_PDE_u, loss_PDE_v, loss_PDE_a]), np.array(loss_weights_PDE).astype("float32"), 1)
loss_complete = loss_a_A + loss_NSEW + loss_PDE
start = time.time()
self.optimizer = tf.train.AdamOptimizer(self.learning_rate_opt)
self.minimize_op = self.optimizer.minimize(loss_complete)
self.print(time.time()-start, "s")
self.loss_tensor_list = [loss_complete, loss_a_A, loss_NSEW, loss_PDE_m, loss_PDE_u, loss_PDE_v, loss_PDE_a]
self.loss_list = ["l", "a", "NSEW", "m", "u", "v", "PDE_a"]
self.epoch_loss = dict.fromkeys(self.loss_list, 0)
self.loss_history = dict((loss, []) for loss in self.loss_list)
self.ad_act_coeff_history = dict((key, []) for key in self.ad_act_coeff)
self.sess.run(tf.global_variables_initializer())
self.set_variables()
self.model.save_weights(os.path.join(self.dirname, "Weights_loss_%.4e.h5" % (self.epoch_loss_checkpoints)))
self.sess.graph.finalize()
def make_output_dir(self):
if not os.path.exists("checkpoints"):
os.mkdir("checkpoints")
dirname = os.path.abspath(os.path.join("checkpoints", datetime.now().strftime("%b-%d-%Y_%H-%M-%S")))
os.mkdir(dirname)
shutil.copyfile(__file__, os.path.join(dirname, __file__))
shutil.copyfile("generate_points.py", os.path.join(dirname, "generate_points.py"))
logpath = os.path.join(dirname, "output.log")
return dirname, logpath
def get_logger(self, logpath):
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
sh = logging.StreamHandler()
sh.setLevel(logging.DEBUG)
sh.setFormatter(logging.Formatter('%(message)s'))
fh = logging.FileHandler(logpath)
logger.addHandler(sh)
logger.addHandler(fh)
return logger
def sanity_check_activation_functions(self, activation_functions, adaptive_activations, adaptive_activation_n, adaptive_activation_init, hidden_layers):
no_layers = len(hidden_layers)
check = 0
for key, value in list(adaptive_activations.items()):
check += sum(value)
assert no_layers*(no_layers+1)/2 == check, "Not every layer has been assigned with an adaptive activation coefficient unambiguously"
check = 0
for key, value in list(activation_functions.items()):
check += sum(value)
assert no_layers*(no_layers+1)/2 == check, "Not every layer has been assigned with an activation function unambiguously"
assert no_layers == len(adaptive_activation_n), "Not every layer has an adaptive activation precoefficient"
assert adaptive_activation_init.keys() == adaptive_activations.keys(), "Not every adaptive activation coefficient has been assigned an initial value"
def get_activation_function_dict(self, activation_functions, adaptive_activation_coeff, adaptive_activation_n, hidden_layers, use_ad_act):
activation_functions_dict = dict((key, [0, 0, 0]) for key in range(1, len(hidden_layers) + 1))
for layer_no in activation_functions_dict:
activation_functions_dict[layer_no][2] = adaptive_activation_n[layer_no-1]
for func_name, layers in activation_functions.items():
if layer_no in layers:
activation_functions_dict[layer_no][0] = func_name
if use_ad_act:
for coeff_name, layers in adaptive_activation_coeff.items():
if layer_no in layers:
activation_functions_dict[layer_no][1] = self.ad_act_coeff[coeff_name]
return activation_functions_dict
def compute_gradients(self, x, y, t):
u, v, p, a = self.model(tf.concat([x, y, t], 1))
u_x = tf.gradients(u, x)[0]
u_y = tf.gradients(u, y)[0]
u_t = tf.gradients(u, t)[0]
u_xx = tf.gradients(u_x, x)[0]
u_yy = tf.gradients(u_y, y)[0]
v_x = tf.gradients(v, x)[0]
v_y = tf.gradients(v, y)[0]
v_t = tf.gradients(v, t)[0]
v_xx = tf.gradients(v_x, x)[0]
v_yy = tf.gradients(v_y, y)[0]
p_x = tf.gradients(p, x)[0]
p_y = tf.gradients(p, y)[0]
a_x = tf.gradients(a, x)[0]
a_y = tf.gradients(a, y)[0]
a_t = tf.gradients(a, t)[0]
a_xx = tf.gradients(a_x, x)[0]
a_yy = tf.gradients(a_y, y)[0]
a_xy = tf.gradients(a_x, y)[0]
return [u, u_x, u_y, u_t, u_xx, u_yy], [v, v_x, v_y, v_t, v_xx, v_yy], [p, p_x, p_y], [a, a_x, a_y, a_t, a_xx, a_yy, a_xy]
def PDE_caller(self, x, y, t):
u_gradients, v_gradients, p_gradients, a_gradients = self.compute_gradients(x, y, t)
u, u_x, u_y, u_t, u_xx, u_yy = u_gradients[:]
v, v_x, v_y, v_t, v_xx, v_yy = v_gradients[:]
p, p_x, p_y = p_gradients[:]
a, a_x, a_y, a_t, a_xx, a_yy, a_xy = a_gradients[:]
mu = self.mu2 + (self.mu1 - self.mu2) * a
mu_x = (self.mu1 - self.mu2) * a_x
mu_y = (self.mu1 - self.mu2) * a_y
rho = self.rho2 + (self.rho1 - self.rho2) * a
abs_interface_grad = tf.sqrt(tf.square(a_x) + tf.square(a_y) + np.finfo(float).eps)
curvature = - ( (a_xx + a_yy)/abs_interface_grad - (a_x**2*a_xx + a_y**2*a_yy + 2*a_x*a_y*a_xy)/tf.pow(abs_interface_grad, 3) )
rho_ref = self.rho2
one_Re = mu/(rho_ref*self.U_ref*self.L_ref)
one_Re_x = mu_x/(rho_ref*self.U_ref*self.L_ref)
one_Re_y = mu_y/(rho_ref*self.U_ref*self.L_ref)
one_We = self.sigma/(rho_ref*self.U_ref**2*self.L_ref)
one_Fr = self.g*self.L_ref/self.U_ref**2
PDE_m = u_x + v_y
PDE_a = a_t + u*a_x + v*a_y
PDE_u = (u_t + u*u_x + v*u_y)*rho/rho_ref + p_x - one_We*curvature*a_x - one_Re*(u_xx + u_yy) - 2.0*one_Re_x*u_x - one_Re_y*(u_y + v_x)
PDE_v = (v_t + u*v_x + v*v_y)*rho/rho_ref + p_y - one_We*curvature*a_y - one_Re*(v_xx + v_yy) - rho/rho_ref*one_Fr - 2.0*one_Re_y*v_y - one_Re_x*(u_y + v_x)
return PDE_m, PDE_u, PDE_v, PDE_a
def set_variables(self):
for file in glob.glob("*loss*"):
if file.endswith("h5"):
self.model.load_weights(file)
self.print("Loading weights from file", file)
if file.endswith("mat"):
matfile = scipy.io.loadmat(file, squeeze_me=True)
self.print("Setting optimizer variables according to file", file)
optimizer_state = matfile["optimizer_state"]
optimizer_variables = self.optimizer.variables()
assert len(optimizer_variables) == len(optimizer_state), "Loading optimizer state failed: Not as many optimizer states saved as required, check architecture/aac compatibility!"
for i in range(0, len(optimizer_variables)):
if optimizer_variables[i].shape == (1,):
optimizer_state[i] = np.array([optimizer_state[i]])
if len(optimizer_variables[i].shape) == 2:
if optimizer_variables[i].shape[1] == 1:
optimizer_state[i] = optimizer_state[i].reshape(len(optimizer_state[i]),1)
self.sess.run(optimizer_variables[i].assign(optimizer_state[i]))
self.print("Setting adaptive activation coefficients according to file", file)
ad_act_coeff = matfile["ad_act_coeff"]
if len(self.ad_act_coeff) > 0:
assert list(self.ad_act_coeff.keys()) == list(ad_act_coeff.dtype.names), "Loading adaptive activation coefficients failed: Restart coefficients %s do not match input %s" %(list(ad_act_coeff.dtype.names), list(self.ad_act_coeff.keys()))
for key in self.ad_act_coeff:
self.sess.run(self.ad_act_coeff[key].assign(float(ad_act_coeff[key])))
def train(self, data_sets):
self.check_matching_keys(data_sets)
self.print_point_distribution(data_sets)
self.print("\nEPOCHS: ", self.epochs, " BATCH SIZES: ", self.batch_sizes, " LEARNING RATES: ", self.learning_rates)
start_total = time.time()
for counter, epoch_value in enumerate(self.epochs):
batch_sizes, number_of_batches = self.get_batch_sizes(counter, data_sets)
for e in range(1, epoch_value + 1):
start_epoch = time.time()
data_sets = self.shuffle_data_and_reset_epoch_losses(data_sets)
for b in range(number_of_batches):
batches = self.get_batches(data_sets, b, batch_sizes)
tf_dict = self.get_feed_dict(batches, counter)
_, batch_losses = self.sess.run([self.minimize_op, self.loss_tensor_list], tf_dict)
self.assign_batch_losses(batch_losses)
self.append_loss_and_activation_coeff_history()
self.save_model_checkpoint(self.epoch_loss[self.loss_list[0]], e, counter)
self.print_info(e, self.epochs[counter], time.time() - start_epoch)
self.print("\nTotal training time: %5.3fs" % (time.time() - start_total))
self.logger.handlers[1].close()
def check_matching_keys(self, data_sets):
for key1, key2 in zip(data_sets, self.placeholders):
assert key1 == key2, "Data set key %s does not match placeholder key %s" % (key1, key2)
def print_point_distribution(self, data_sets):
no_points = 0
for key in data_sets:
no_points += data_sets[key].shape[0]
self.print("Training data %10s shape: %s" %(key, data_sets[key].shape))
self.print("Total number of points %d" % no_points)
def shuffle_data_and_reset_epoch_losses(self, data_sets):
for key in data_sets:
length = len(data_sets[key])
shuffled_indices = np.random.choice(length, length, replace=False)
data_sets[key] = pd.DataFrame(data=data_sets[key].to_numpy()[shuffled_indices,:], columns=data_sets[key].columns)
for key in self.epoch_loss:
self.epoch_loss[key] = 0
return data_sets
def get_batches(self, data, b, batch_sizes):
batches = dict.fromkeys(data.keys(), 0)
for key in data:
batches[key] = data[key][b*batch_sizes[key]:(b+1)*batch_sizes[key]]
return batches
def assign_batch_losses(self, batch_losses):
for loss_values, key in zip(batch_losses, self.epoch_loss):
self.epoch_loss[key] += loss_values
def append_loss_and_activation_coeff_history(self):
for key in self.loss_history:
self.loss_history[key].append(self.epoch_loss[key])
for key, value in self.ad_act_coeff.items():
self.ad_act_coeff_history[key].append(self.sess.run(value))
def get_feed_dict(self, batches , counter):
tf_dict = {self.learning_rate_opt: self.learning_rates[counter]}
feed_dicts = []
for i, key in enumerate(self.placeholders):
feed_dicts.append(dict.fromkeys(self.placeholders[key], 0))
for placeholder, column_name in zip(self.placeholders[key], batches[key].columns):
assert placeholder.name[:-2] == column_name, "Placeholder %s does not match column %s in data %s!" % (placeholder.name[:-2], column_name, key)
feed_dicts[i][placeholder] = np.transpose(np.atleast_2d(batches[key][column_name].to_numpy()))
for dicts in feed_dicts:
tf_dict.update(dicts)
return tf_dict
def save_model_checkpoint(self, loss, epoch, counter):
if loss < self.epoch_loss_checkpoints and not (epoch)%self.checkpoint_interval:
for file in glob.glob(os.path.join(self.dirname, "*")):
if file.endswith("json") or file.endswith("h5") or file.endswith("mat"):
os.remove(file)
writeToJSONFile(self.dirname, "loss_%.4e_architecture" % (loss), self.model.to_json())
data = dict(loss_history=self.loss_history, ad_act_coeff_history=self.ad_act_coeff_history, optimizer_state=self.sess.run(self.optimizer.variables()),
ad_act_coeff=self.sess.run(self.ad_act_coeff), epoch=epoch, learning_rate=self.learning_rates[counter])
scipy.io.savemat(os.path.join(self.dirname, "loss_%.4e_variables.mat") % (loss), data)
self.model.save_weights(os.path.join(self.dirname, "loss_%.4e_weights.h5" % (loss)))
self.epoch_loss_checkpoints = loss
def print_info(self, current_epoch, epochs, time_for_epoch):
if current_epoch == 1:
self.mean_epoch_time = 0
else:
self.mean_epoch_time = self.mean_epoch_time*(current_epoch-2)/(current_epoch-1) + time_for_epoch/(current_epoch-1)
string = ["Epoch: %5d/%d - %7.2fms - avg: %7.2fms" % (current_epoch, epochs, time_for_epoch*1e3, self.mean_epoch_time*1e3)]
for key, value in self.epoch_loss.items():
string.append(" - %s: %.4e" % (key, value))
for key, act_coeff in self.ad_act_coeff.items():
string.append(" - %s: %.4e" % (key, self.sess.run(act_coeff)))
self.print(*string)
def get_batch_sizes(self, counter, data_sets):
number_of_samples = sum([len(data_sets[key]) for key in data_sets])
batch_sizes_datasets = dict.fromkeys(data_sets.keys(), 0)
if self.batch_sizes[counter] >= number_of_samples:
number_of_batches = 1
for key in data_sets:
batch_sizes_datasets[key] = len(data_sets[key])
self.print("Batch size is larger equal the amount of training samples, thus going full batch mode")
self.print("Total batch size: ", number_of_samples, " - ", "Batch sizes: ", batch_sizes_datasets, " - ", "learning rate: ", self.learning_rates[counter], "\n")
else:
number_of_batches = math.ceil(number_of_samples/self.batch_sizes[counter])
batch_percentages = dict.fromkeys(data_sets.keys(), 0)
print_batches = dict.fromkeys(data_sets.keys(), "")
for key in data_sets:
batch_percentages[key] = len(data_sets[key])/number_of_samples
batch_sizes_datasets[key] = math.ceil(self.batch_sizes[counter]*batch_percentages[key])
print_batches[key] = "%d/%d" % (batch_sizes_datasets[key], 0 if batch_sizes_datasets[key] == 0 else len(data_sets[key])%batch_sizes_datasets[key])
total_batch_size = sum([batch_sizes_datasets[key] for key in batch_sizes_datasets])
self.print("\nTotal batch size: ", total_batch_size, " - ", "number of batches: ", number_of_batches, " - ", "Batch sizes: ", print_batches, " - ", "learning rate: ", self.learning_rates[counter])
for key in data_sets:
if len(data_sets[key]) == 0:
continue
assert (number_of_batches - 1) * batch_sizes_datasets[key] < len(data_sets[key]), "The specified batch size of %d will lead to empty batches with the present batch ratio, increase the batch size!" % (self.batch_sizes[counter])
return batch_sizes_datasets, number_of_batches
def print(self, *args):
for word in args:
if len(args) == 1:
self.logger.info(word)
elif word != args[-1]:
for handler in self.logger.handlers:
handler.terminator = ""
if type(word) == float or type(word) == np.float64 or type(word) == np.float32:
self.logger.info("%.4e" % (word))
else:
self.logger.info(word)
else:
for handler in self.logger.handlers:
handler.terminator = "\n"
if type(word) == float or type(word) == np.float64 or type(word) == np.float32:
self.logger.info("%.4e" % (word))
else:
self.logger.info(word)
def compute_batch_size(training_data, number_of_batches):
number_of_samples = sum([len(training_data[key]) for key in training_data])
return math.ceil(number_of_samples/number_of_batches)
def main():
sess = tf.Session()
NOP_a = (500, 400)
NOP_PDE = (400, 2000, 3000)
NOP_north = (20, 20)
NOP_south = (20, 20)
NOP_east = (20, 20)
NOP_west = (20, 20)
training_data = get_training_data(NOP_a, NOP_PDE, NOP_north, NOP_south, NOP_east, NOP_west)
dtype = tf.float32
no_layers = 8
hidden_layers = [350]*no_layers
activation_functions = dict(tanh = range(1,no_layers+1))
adaptive_activation_coeff = {"aac_1": range(1,no_layers+1)}
adaptive_activation_init = {"aac_1": 0.1}
adaptive_activation_n = [10]*no_layers
use_ad_act = False
mu = [1.0, 10.0]
sigma = 24.5
g = -0.98
rho = [100, 1000]
u_ref = 1.0
L_ref = 0.25
loss_weights_A = [1.0]
loss_weights_PDE = [1.0, 10.0, 10.0, 1.0]
epochs = [5000]*5
number_of_batches = 20
batch_sizes = [compute_batch_size(training_data, number_of_batches)]*5
learning_rates = [1e-4, 5e-5, 1e-5, 5e-6, 1e-6]
checkpoint_interval = 100
PINN = TwoPhasePinn(sess, dtype, hidden_layers, activation_functions, adaptive_activation_coeff, adaptive_activation_n,
adaptive_activation_init, use_ad_act, loss_weights_A, loss_weights_PDE, mu, sigma, g, rho, u_ref, L_ref, checkpoint_interval, epochs,
batch_sizes, learning_rates)
PINN.train(training_data)
if __name__ == "__main__":
main()
| true | true |
1c2fb1a603f08b5ab90357ffe685fd73b30386e3 | 13,860 | py | Python | tests/functional/test_sphinx_ext_autodoc.py | lipro/publishing-withsphinx | 80d1f5d190e7123b73f1e1917f72ee80ad45221b | [
"MIT"
] | null | null | null | tests/functional/test_sphinx_ext_autodoc.py | lipro/publishing-withsphinx | 80d1f5d190e7123b73f1e1917f72ee80ad45221b | [
"MIT"
] | 28 | 2016-11-13T10:40:37.000Z | 2019-02-28T17:24:15.000Z | tests/functional/test_sphinx_ext_autodoc.py | lipro/publishing-withsphinx | 80d1f5d190e7123b73f1e1917f72ee80ad45221b | [
"MIT"
] | 1 | 2016-11-15T19:34:56.000Z | 2016-11-15T19:34:56.000Z | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Stephan Linz
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# How to write tests: http://docs.python-guide.org/en/latest/writing/tests/
#
'''
test_sphinx_ext_autodoc
~~~~~~~~~~~~~~~~~~~~~~~
This module contains basic functional tests of the sphinx.ext.autodoc extension
as part of the publishing.withsphinx package.
:copyright: Copyright 2014-2016 by Li-Pro.Net, see AUTHORS.
:license: MIT, see LICENSE for details.
'''
from __future__ import absolute_import
from tests.functional import fixtures
import re
class TestCaseSphinxExtAutoDoc(fixtures.TestCaseFunctionalPublishingSphinx):
@fixtures.with_html_app(
testroot='ext-autodoc',
)
def test_build_html(self, app, status, warning):
'''
FUNCTIONAL TEST: sphinx.ext.autodoc: can build html
'''
app.builder.build_update()
print(status.getvalue())
print(warning.getvalue())
p = fixtures.path(app.outdir / 'index.html')
self.assertTrue(p.isfile(), 'missing file ' + p)
c = p.read_text(encoding='utf-8')
print(c)
# check API auto-documentation
r = re.compile(
'(?ms)'
+ re.escape(r'<p>A pypi demonstration vehicle.</p>') + '.*'
+ re.escape(r'<p>This is something I want to say that is not in the docstring.</p>') + '.*'
+ re.escape(r'<em class="property">class </em>')
+ re.escape(self.get_html_code(args=' class="descclassname"'))
+ re.escape(r'an_example_pypi_project.useful_1.') + re.escape(self.get_html_code(close=True))
+ re.escape(self.get_html_code(args=' class="descname"')) + re.escape(r'MyPublicClass')
+ re.escape(self.get_html_code(close=True)) + '.*'
+ re.escape(r"<em>foo</em>, <em>bar='baz'</em>") + '.*'
+ re.escape(r'<p>We use this as a public class example class.</p>') + '.*'
+ re.escape(self.get_html_code(args=' class="descname"')) + re.escape(r'get_foobar')
+ re.escape(self.get_html_code(close=True)) + '.*' + re.escape(r'<em>foo</em>, <em>bar=True</em>') + '.*'
+ re.escape(r'<p>This gets the foobar</p>') + '.*'
+ re.escape(r'<p>This really should have a full function definition, but I am too lazy.</p>') + '.*'
+ re.escape(self.get_html_code(args=' class="descclassname"'))
+ re.escape(r'an_example_pypi_project.useful_1.') + re.escape(self.get_html_code(close=True))
+ re.escape(self.get_html_code(args=' class="descname"')) + re.escape(r'public_fn_with_googley_docstring')
+ re.escape(self.get_html_code(close=True)) + '.*'
+ re.escape(r'<em>name</em>, <em>state=None</em>') + '.*'
+ re.escape(r'<p>This function does something.</p>') + '.*'
+ re.escape(self.get_html_code(args=' class="descclassname"'))
+ re.escape(r'an_example_pypi_project.useful_1.') + re.escape(self.get_html_code(close=True))
+ re.escape(self.get_html_code(args=' class="descname"')) + re.escape(r'public_fn_with_sphinxy_docstring')
+ re.escape(self.get_html_code(close=True)) + '.*'
+ re.escape(r'<em>name</em>, <em>state=None</em>') + '.*'
+ re.escape(r'<p>This function does something.</p>') + '.*'
+ re.escape(r'<p>This is something I want to say that is not in the docstring.</p>') + '.*'
+ re.escape(r'<p>A very useful module indeed.</p>') + '.*'
+ re.escape(self.get_html_code(args=' class="descclassname"'))
+ re.escape(r'an_example_pypi_project.useful_2.') + re.escape(self.get_html_code(close=True))
+ re.escape(self.get_html_code(args=' class="descname"')) + re.escape(r'public_fn_with_sphinxy_docstring')
+ re.escape(self.get_html_code(close=True)) + '.*'
+ re.escape(r'<em>name</em>, <em>state=None</em>') + '.*'
+ re.escape(r'<p>This function does something.</p>') + '.*'
+ re.escape(self.get_html_code(args=' class="descclassname"'))
+ re.escape(r'an_example_pypi_project.useful_2.') + re.escape(self.get_html_code(close=True))
+ re.escape(self.get_html_code(args=' class="descname"')) + re.escape(r'_private_fn_with_docstring')
+ re.escape(self.get_html_code(close=True)) + '.*'
+ re.escape(r"<em>foo</em>, <em>bar='baz'</em>, <em>foobarbas=None</em>") + '.*'
+ re.escape(r'<p>I have a docstring, but ') + '.*' + re.escape(r'</p>') + '.*'
+ re.escape(r'<em class="property">class </em>')
+ re.escape(self.get_html_code(args=' class="descclassname"'))
+ re.escape(r'an_example_pypi_project.useful_2.') + re.escape(self.get_html_code(close=True))
+ re.escape(self.get_html_code(args=' class="descname"')) + re.escape(r'MyPublicClass')
+ re.escape(self.get_html_code(close=True)) + '.*'
+ re.escape(r"<em>foo</em>, <em>bar='baz'</em>") + '.*'
+ re.escape(r'<p>We use this as a public class example class.</p>') + '.*'
+ re.escape(self.get_html_code(args=' class="descname"')) + re.escape(r'_get_baz')
+ re.escape(self.get_html_code(close=True)) + '.*' + re.escape(r'<em>baz=None</em>') + '.*'
+ re.escape(r'<p>A private function to get baz.</p>') + '.*'
+ re.escape(r'<p>This really should have a full function definition, but I am too lazy.</p>') + '.*'
+ re.escape(self.get_html_code(args=' class="descname"')) + re.escape(r'get_foobar')
+ re.escape(self.get_html_code(close=True)) + '.*' + re.escape(r'<em>foo</em>, <em>bar=True</em>') + '.*'
+ re.escape(r'<p>This gets the foobar</p>') + '.*'
+ re.escape(r'<p>This really should have a full function definition, but I am too lazy.</p>')
)
self.assertRegex(c, r)
@fixtures.with_latex_app(
testroot='ext-autodoc',
)
def test_build_latex(self, app, status, warning):
'''
FUNCTIONAL TEST: sphinx.ext.autodoc: can build latex
'''
app.builder.build_update()
print(status.getvalue())
print(warning.getvalue())
p = fixtures.path(app.outdir / 'index.tex')
self.assertTrue(p.isfile(), 'missing file ' + p)
c = p.read_text(encoding='utf-8')
print(c)
# check API auto-documentation
r = re.compile(
'(?ms)'
+ re.escape(r'A pypi demonstration vehicle.') + '.*'
+ re.escape(r'This is something I want to say that is not in the docstring.') + '.*'
+ re.escape(self.get_latex_code_strong() + r'{class }') + '.*'
+ re.escape(self.get_latex_code() + r'{an\_example\_pypi\_project.useful\_1.}') + '.*'
+ re.escape(self.get_latex_bfcode() + r'{MyPublicClass}') + '.*'
+ re.escape(r"{\emph{foo}, \emph{bar='baz'}}") + '.*'
+ re.escape(r'We use this as a public class example class.') + '.*'
+ re.escape(self.get_latex_bfcode() + r'{get\_foobar}') + '.*'
+ re.escape(r'{\emph{foo}, \emph{bar=True}}') + '.*'
+ re.escape(r'This gets the foobar') + '.*'
+ re.escape(r'This really should have a full function definition, but I am too lazy.') + '.*'
+ re.escape(self.get_latex_code() + r'{an\_example\_pypi\_project.useful\_1.}') + '.*'
+ re.escape(self.get_latex_bfcode() + r'{public\_fn\_with\_googley\_docstring}') + '.*'
+ re.escape(r'{\emph{name}, \emph{state=None}}') + '.*'
+ re.escape(r'This function does something.') + '.*'
+ re.escape(self.get_latex_code() + r'{an\_example\_pypi\_project.useful\_1.}') + '.*'
+ re.escape(self.get_latex_bfcode() + r'{public\_fn\_with\_sphinxy\_docstring}') + '.*'
+ re.escape(r'{\emph{name}, \emph{state=None}}') + '.*'
+ re.escape(r'This function does something.') + '.*'
+ re.escape(r'This is something I want to say that is not in the docstring.') + '.*'
+ re.escape(r'A very useful module indeed.') + '.*'
+ re.escape(self.get_latex_code() + r'{an\_example\_pypi\_project.useful\_2.}') + '.*'
+ re.escape(self.get_latex_bfcode() + r'{public\_fn\_with\_sphinxy\_docstring}') + '.*'
+ re.escape(r'{\emph{name}, \emph{state=None}}') + '.*'
+ re.escape(r'This function does something.') + '.*'
+ re.escape(self.get_latex_code() + r'{an\_example\_pypi\_project.useful\_2.}') + '.*'
+ re.escape(self.get_latex_bfcode() + r'{\_private\_fn\_with\_docstring}') + '.*'
+ re.escape(r"{\emph{foo}, \emph{bar='baz'}, \emph{foobarbas=None}}") + '.*'
+ re.escape(r'I have a docstring, but ') + '.*'
+ re.escape(self.get_latex_code_strong() + r'{class }') + '.*'
+ re.escape(self.get_latex_code() + r'{an\_example\_pypi\_project.useful\_2.}') + '.*'
+ re.escape(self.get_latex_bfcode() + r'{MyPublicClass}') + '.*'
+ re.escape(r"{\emph{foo}, \emph{bar='baz'}}") + '.*'
+ re.escape(r'We use this as a public class example class.') + '.*'
+ re.escape(self.get_latex_bfcode() + r'{\_get\_baz}') + '.*'
+ re.escape(r'{\emph{baz=None}}') + '.*'
+ re.escape(r'A private function to get baz.') + '.*'
+ re.escape(r'This really should have a full function definition, but I am too lazy.') + '.*'
+ re.escape(self.get_latex_bfcode() + r'{get\_foobar}') + '.*'
+ re.escape(r'{\emph{foo}, \emph{bar=True}}') + '.*'
+ re.escape(r'This gets the foobar') + '.*'
+ re.escape(r'This really should have a full function definition, but I am too lazy.')
)
self.assertRegex(c, r)
@fixtures.with_text_app(
testroot='ext-autodoc',
)
def test_build_text(self, app, status, warning):
'''
FUNCTIONAL TEST: sphinx.ext.autodoc: can build text
'''
app.builder.build_update()
print(status.getvalue())
print(warning.getvalue())
p = fixtures.path(app.outdir / 'index.txt')
self.assertTrue(p.isfile(), 'missing file ' + p)
c = p.read_text(encoding='utf-8')
print(c)
# check API auto-documentation
r = re.compile(
'(?ms)'
+ re.escape(r'A pypi demonstration vehicle.') + '.*'
+ re.escape(r'This is something I want to say that is not in the docstring.') + '.*'
+ re.escape(r"class an_example_pypi_project.useful_1.MyPublicClass(foo, bar='baz')") + '.*'
+ re.escape(r' We use this as a public class example class.') + '.*'
+ re.escape(r' get_foobar(foo, bar=True)') + '.*'
+ re.escape(r' This gets the foobar') + '.*'
+ re.escape(r' This really should have a full function definition, but I am too') + '.*'
+ re.escape(r' lazy.') + '.*'
+ re.escape(r'an_example_pypi_project.useful_1.public_fn_with_googley_docstring(name, state=None)') + '.*'
+ re.escape(r' This function does something.') + '.*'
+ re.escape(r'an_example_pypi_project.useful_1.public_fn_with_sphinxy_docstring(name, state=None)') + '.*'
+ re.escape(r' This function does something.') + '.*'
+ re.escape(r'This is something I want to say that is not in the docstring.') + '.*'
+ re.escape(r'A very useful module indeed.') + '.*'
+ re.escape(r'an_example_pypi_project.useful_2.public_fn_with_sphinxy_docstring(name, state=None)') + '.*'
+ re.escape(r' This function does something.') + '.*'
+ re.escape(r'an_example_pypi_project.useful_2._private_fn_with_docstring')
+ re.escape(r"(foo, bar='baz', foobarbas=None)") + '.*'
+ re.escape(r' I have a docstring, but ') + '.*'
+ re.escape(r"class an_example_pypi_project.useful_2.MyPublicClass(foo, bar='baz')") + '.*'
+ re.escape(r' We use this as a public class example class.') + '.*'
+ re.escape(r' _get_baz(baz=None)') + '.*'
+ re.escape(r' A private function to get baz.') + '.*'
+ re.escape(r' This really should have a full function definition, but I am too') + '.*'
+ re.escape(r' lazy.') + '.*'
+ re.escape(r' get_foobar(foo, bar=True)') + '.*'
+ re.escape(r' This gets the foobar') + '.*'
+ re.escape(r' This really should have a full function definition, but I am too') + '.*'
+ re.escape(r' lazy.')
)
self.assertRegex(c, r)
if __name__ == "__main__":
fixtures.main()
| 56.803279 | 118 | 0.581457 |
from __future__ import absolute_import
from tests.functional import fixtures
import re
class TestCaseSphinxExtAutoDoc(fixtures.TestCaseFunctionalPublishingSphinx):
@fixtures.with_html_app(
testroot='ext-autodoc',
)
def test_build_html(self, app, status, warning):
app.builder.build_update()
print(status.getvalue())
print(warning.getvalue())
p = fixtures.path(app.outdir / 'index.html')
self.assertTrue(p.isfile(), 'missing file ' + p)
c = p.read_text(encoding='utf-8')
print(c)
r = re.compile(
'(?ms)'
+ re.escape(r'<p>A pypi demonstration vehicle.</p>') + '.*'
+ re.escape(r'<p>This is something I want to say that is not in the docstring.</p>') + '.*'
+ re.escape(r'<em class="property">class </em>')
+ re.escape(self.get_html_code(args=' class="descclassname"'))
+ re.escape(r'an_example_pypi_project.useful_1.') + re.escape(self.get_html_code(close=True))
+ re.escape(self.get_html_code(args=' class="descname"')) + re.escape(r'MyPublicClass')
+ re.escape(self.get_html_code(close=True)) + '.*'
+ re.escape(r"<em>foo</em>, <em>bar='baz'</em>") + '.*'
+ re.escape(r'<p>We use this as a public class example class.</p>') + '.*'
+ re.escape(self.get_html_code(args=' class="descname"')) + re.escape(r'get_foobar')
+ re.escape(self.get_html_code(close=True)) + '.*' + re.escape(r'<em>foo</em>, <em>bar=True</em>') + '.*'
+ re.escape(r'<p>This gets the foobar</p>') + '.*'
+ re.escape(r'<p>This really should have a full function definition, but I am too lazy.</p>') + '.*'
+ re.escape(self.get_html_code(args=' class="descclassname"'))
+ re.escape(r'an_example_pypi_project.useful_1.') + re.escape(self.get_html_code(close=True))
+ re.escape(self.get_html_code(args=' class="descname"')) + re.escape(r'public_fn_with_googley_docstring')
+ re.escape(self.get_html_code(close=True)) + '.*'
+ re.escape(r'<em>name</em>, <em>state=None</em>') + '.*'
+ re.escape(r'<p>This function does something.</p>') + '.*'
+ re.escape(self.get_html_code(args=' class="descclassname"'))
+ re.escape(r'an_example_pypi_project.useful_1.') + re.escape(self.get_html_code(close=True))
+ re.escape(self.get_html_code(args=' class="descname"')) + re.escape(r'public_fn_with_sphinxy_docstring')
+ re.escape(self.get_html_code(close=True)) + '.*'
+ re.escape(r'<em>name</em>, <em>state=None</em>') + '.*'
+ re.escape(r'<p>This function does something.</p>') + '.*'
+ re.escape(r'<p>This is something I want to say that is not in the docstring.</p>') + '.*'
+ re.escape(r'<p>A very useful module indeed.</p>') + '.*'
+ re.escape(self.get_html_code(args=' class="descclassname"'))
+ re.escape(r'an_example_pypi_project.useful_2.') + re.escape(self.get_html_code(close=True))
+ re.escape(self.get_html_code(args=' class="descname"')) + re.escape(r'public_fn_with_sphinxy_docstring')
+ re.escape(self.get_html_code(close=True)) + '.*'
+ re.escape(r'<em>name</em>, <em>state=None</em>') + '.*'
+ re.escape(r'<p>This function does something.</p>') + '.*'
+ re.escape(self.get_html_code(args=' class="descclassname"'))
+ re.escape(r'an_example_pypi_project.useful_2.') + re.escape(self.get_html_code(close=True))
+ re.escape(self.get_html_code(args=' class="descname"')) + re.escape(r'_private_fn_with_docstring')
+ re.escape(self.get_html_code(close=True)) + '.*'
+ re.escape(r"<em>foo</em>, <em>bar='baz'</em>, <em>foobarbas=None</em>") + '.*'
+ re.escape(r'<p>I have a docstring, but ') + '.*' + re.escape(r'</p>') + '.*'
+ re.escape(r'<em class="property">class </em>')
+ re.escape(self.get_html_code(args=' class="descclassname"'))
+ re.escape(r'an_example_pypi_project.useful_2.') + re.escape(self.get_html_code(close=True))
+ re.escape(self.get_html_code(args=' class="descname"')) + re.escape(r'MyPublicClass')
+ re.escape(self.get_html_code(close=True)) + '.*'
+ re.escape(r"<em>foo</em>, <em>bar='baz'</em>") + '.*'
+ re.escape(r'<p>We use this as a public class example class.</p>') + '.*'
+ re.escape(self.get_html_code(args=' class="descname"')) + re.escape(r'_get_baz')
+ re.escape(self.get_html_code(close=True)) + '.*' + re.escape(r'<em>baz=None</em>') + '.*'
+ re.escape(r'<p>A private function to get baz.</p>') + '.*'
+ re.escape(r'<p>This really should have a full function definition, but I am too lazy.</p>') + '.*'
+ re.escape(self.get_html_code(args=' class="descname"')) + re.escape(r'get_foobar')
+ re.escape(self.get_html_code(close=True)) + '.*' + re.escape(r'<em>foo</em>, <em>bar=True</em>') + '.*'
+ re.escape(r'<p>This gets the foobar</p>') + '.*'
+ re.escape(r'<p>This really should have a full function definition, but I am too lazy.</p>')
)
self.assertRegex(c, r)
@fixtures.with_latex_app(
testroot='ext-autodoc',
)
def test_build_latex(self, app, status, warning):
app.builder.build_update()
print(status.getvalue())
print(warning.getvalue())
p = fixtures.path(app.outdir / 'index.tex')
self.assertTrue(p.isfile(), 'missing file ' + p)
c = p.read_text(encoding='utf-8')
print(c)
r = re.compile(
'(?ms)'
+ re.escape(r'A pypi demonstration vehicle.') + '.*'
+ re.escape(r'This is something I want to say that is not in the docstring.') + '.*'
+ re.escape(self.get_latex_code_strong() + r'{class }') + '.*'
+ re.escape(self.get_latex_code() + r'{an\_example\_pypi\_project.useful\_1.}') + '.*'
+ re.escape(self.get_latex_bfcode() + r'{MyPublicClass}') + '.*'
+ re.escape(r"{\emph{foo}, \emph{bar='baz'}}") + '.*'
+ re.escape(r'We use this as a public class example class.') + '.*'
+ re.escape(self.get_latex_bfcode() + r'{get\_foobar}') + '.*'
+ re.escape(r'{\emph{foo}, \emph{bar=True}}') + '.*'
+ re.escape(r'This gets the foobar') + '.*'
+ re.escape(r'This really should have a full function definition, but I am too lazy.') + '.*'
+ re.escape(self.get_latex_code() + r'{an\_example\_pypi\_project.useful\_1.}') + '.*'
+ re.escape(self.get_latex_bfcode() + r'{public\_fn\_with\_googley\_docstring}') + '.*'
+ re.escape(r'{\emph{name}, \emph{state=None}}') + '.*'
+ re.escape(r'This function does something.') + '.*'
+ re.escape(self.get_latex_code() + r'{an\_example\_pypi\_project.useful\_1.}') + '.*'
+ re.escape(self.get_latex_bfcode() + r'{public\_fn\_with\_sphinxy\_docstring}') + '.*'
+ re.escape(r'{\emph{name}, \emph{state=None}}') + '.*'
+ re.escape(r'This function does something.') + '.*'
+ re.escape(r'This is something I want to say that is not in the docstring.') + '.*'
+ re.escape(r'A very useful module indeed.') + '.*'
+ re.escape(self.get_latex_code() + r'{an\_example\_pypi\_project.useful\_2.}') + '.*'
+ re.escape(self.get_latex_bfcode() + r'{public\_fn\_with\_sphinxy\_docstring}') + '.*'
+ re.escape(r'{\emph{name}, \emph{state=None}}') + '.*'
+ re.escape(r'This function does something.') + '.*'
+ re.escape(self.get_latex_code() + r'{an\_example\_pypi\_project.useful\_2.}') + '.*'
+ re.escape(self.get_latex_bfcode() + r'{\_private\_fn\_with\_docstring}') + '.*'
+ re.escape(r"{\emph{foo}, \emph{bar='baz'}, \emph{foobarbas=None}}") + '.*'
+ re.escape(r'I have a docstring, but ') + '.*'
+ re.escape(self.get_latex_code_strong() + r'{class }') + '.*'
+ re.escape(self.get_latex_code() + r'{an\_example\_pypi\_project.useful\_2.}') + '.*'
+ re.escape(self.get_latex_bfcode() + r'{MyPublicClass}') + '.*'
+ re.escape(r"{\emph{foo}, \emph{bar='baz'}}") + '.*'
+ re.escape(r'We use this as a public class example class.') + '.*'
+ re.escape(self.get_latex_bfcode() + r'{\_get\_baz}') + '.*'
+ re.escape(r'{\emph{baz=None}}') + '.*'
+ re.escape(r'A private function to get baz.') + '.*'
+ re.escape(r'This really should have a full function definition, but I am too lazy.') + '.*'
+ re.escape(self.get_latex_bfcode() + r'{get\_foobar}') + '.*'
+ re.escape(r'{\emph{foo}, \emph{bar=True}}') + '.*'
+ re.escape(r'This gets the foobar') + '.*'
+ re.escape(r'This really should have a full function definition, but I am too lazy.')
)
self.assertRegex(c, r)
@fixtures.with_text_app(
testroot='ext-autodoc',
)
def test_build_text(self, app, status, warning):
app.builder.build_update()
print(status.getvalue())
print(warning.getvalue())
p = fixtures.path(app.outdir / 'index.txt')
self.assertTrue(p.isfile(), 'missing file ' + p)
c = p.read_text(encoding='utf-8')
print(c)
r = re.compile(
'(?ms)'
+ re.escape(r'A pypi demonstration vehicle.') + '.*'
+ re.escape(r'This is something I want to say that is not in the docstring.') + '.*'
+ re.escape(r"class an_example_pypi_project.useful_1.MyPublicClass(foo, bar='baz')") + '.*'
+ re.escape(r' We use this as a public class example class.') + '.*'
+ re.escape(r' get_foobar(foo, bar=True)') + '.*'
+ re.escape(r' This gets the foobar') + '.*'
+ re.escape(r' This really should have a full function definition, but I am too') + '.*'
+ re.escape(r' lazy.') + '.*'
+ re.escape(r'an_example_pypi_project.useful_1.public_fn_with_googley_docstring(name, state=None)') + '.*'
+ re.escape(r' This function does something.') + '.*'
+ re.escape(r'an_example_pypi_project.useful_1.public_fn_with_sphinxy_docstring(name, state=None)') + '.*'
+ re.escape(r' This function does something.') + '.*'
+ re.escape(r'This is something I want to say that is not in the docstring.') + '.*'
+ re.escape(r'A very useful module indeed.') + '.*'
+ re.escape(r'an_example_pypi_project.useful_2.public_fn_with_sphinxy_docstring(name, state=None)') + '.*'
+ re.escape(r' This function does something.') + '.*'
+ re.escape(r'an_example_pypi_project.useful_2._private_fn_with_docstring')
+ re.escape(r"(foo, bar='baz', foobarbas=None)") + '.*'
+ re.escape(r' I have a docstring, but ') + '.*'
+ re.escape(r"class an_example_pypi_project.useful_2.MyPublicClass(foo, bar='baz')") + '.*'
+ re.escape(r' We use this as a public class example class.') + '.*'
+ re.escape(r' _get_baz(baz=None)') + '.*'
+ re.escape(r' A private function to get baz.') + '.*'
+ re.escape(r' This really should have a full function definition, but I am too') + '.*'
+ re.escape(r' lazy.') + '.*'
+ re.escape(r' get_foobar(foo, bar=True)') + '.*'
+ re.escape(r' This gets the foobar') + '.*'
+ re.escape(r' This really should have a full function definition, but I am too') + '.*'
+ re.escape(r' lazy.')
)
self.assertRegex(c, r)
if __name__ == "__main__":
fixtures.main()
| true | true |
1c2fb3146c4ad81957fc32a3a4da2d4c4935acf1 | 39,525 | py | Python | astropy/io/fits/tests/test_hdulist.py | reidarkind/astropy | 0d8e7dea86d39b9faad025708b852814c8d5d41a | [
"BSD-3-Clause"
] | 445 | 2019-01-26T13:50:26.000Z | 2022-03-18T05:17:38.000Z | Library/lib/python3.7/site-packages/astropy-4.0-py3.7-macosx-10.9-x86_64.egg/astropy/io/fits/tests/test_hdulist.py | gengyong/Carnets | 8930a14f69360d4db115a85ff9e0f6efa80fa2e7 | [
"BSD-3-Clause"
] | 242 | 2019-01-29T15:48:27.000Z | 2022-03-31T22:09:21.000Z | Library/lib/python3.7/site-packages/astropy-4.0-py3.7-macosx-10.9-x86_64.egg/astropy/io/fits/tests/test_hdulist.py | gengyong/Carnets | 8930a14f69360d4db115a85ff9e0f6efa80fa2e7 | [
"BSD-3-Clause"
] | 31 | 2019-03-10T09:51:27.000Z | 2022-02-14T23:11:12.000Z | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import glob
import io
import os
import sys
import copy
import subprocess
import pytest
import numpy as np
from astropy.io.fits.verify import VerifyError
from astropy.io import fits
from astropy.tests.helper import raises, catch_warnings, ignore_warnings
from astropy.utils.exceptions import AstropyUserWarning, AstropyDeprecationWarning
from . import FitsTestCase
class TestHDUListFunctions(FitsTestCase):
def test_update_name(self):
with fits.open(self.data('o4sp040b0_raw.fits')) as hdul:
hdul[4].name = 'Jim'
hdul[4].ver = 9
assert hdul[('JIM', 9)].header['extname'] == 'JIM'
def test_hdu_file_bytes(self):
with fits.open(self.data('checksum.fits')) as hdul:
res = hdul[0].filebytes()
assert res == 11520
res = hdul[1].filebytes()
assert res == 8640
def test_hdulist_file_info(self):
def test_fileinfo(**kwargs):
assert res['datSpan'] == kwargs.get('datSpan', 2880)
assert res['resized'] == kwargs.get('resized', False)
assert res['filename'] == self.data('checksum.fits')
assert res['datLoc'] == kwargs.get('datLoc', 8640)
assert res['hdrLoc'] == kwargs.get('hdrLoc', 0)
assert res['filemode'] == 'readonly'
with fits.open(self.data('checksum.fits')) as hdul:
res = hdul.fileinfo(0)
res = hdul.fileinfo(1)
test_fileinfo(datLoc=17280, hdrLoc=11520)
hdu = fits.ImageHDU(data=hdul[0].data)
hdul.insert(1, hdu)
res = hdul.fileinfo(0)
test_fileinfo(resized=True)
res = hdul.fileinfo(1)
test_fileinfo(datSpan=None, resized=True, datLoc=None, hdrLoc=None)
res = hdul.fileinfo(2)
test_fileinfo(resized=1, datLoc=17280, hdrLoc=11520)
def test_create_from_multiple_primary(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/145
Ensure that a validation error occurs when saving an HDUList containing
multiple PrimaryHDUs.
"""
hdul = fits.HDUList([fits.PrimaryHDU(), fits.PrimaryHDU()])
pytest.raises(VerifyError, hdul.writeto, self.temp('temp.fits'),
output_verify='exception')
def test_append_primary_to_empty_list(self):
# Tests appending a Simple PrimaryHDU to an empty HDUList.
hdul = fits.HDUList()
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.append(hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
def test_append_extension_to_empty_list(self):
"""Tests appending a Simple ImageHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.ImageHDU(np.arange(100, dtype=np.int32))
hdul.append(hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
def test_append_table_extension_to_empty_list(self):
"""Tests appending a Simple Table ExtensionHDU to a empty HDUList."""
hdul = fits.HDUList()
with fits.open(self.data('tb.fits')) as hdul1:
hdul.append(hdul1[1])
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''),
(1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
def test_append_groupshdu_to_empty_list(self):
"""Tests appending a Simple GroupsHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.GroupsHDU()
hdul.append(hdu)
info = [(0, 'PRIMARY', 1, 'GroupsHDU', 8, (), '',
'1 Groups 0 Parameters')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
def test_append_primary_to_non_empty_list(self):
"""Tests appending a Simple PrimaryHDU to a non-empty HDUList."""
with fits.open(self.data('arange.fits')) as hdul:
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.append(hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 7, (11, 10, 7), 'int32', ''),
(1, '', 1, 'ImageHDU', 6, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
def test_append_extension_to_non_empty_list(self):
"""Tests appending a Simple ExtensionHDU to a non-empty HDUList."""
with fits.open(self.data('tb.fits')) as hdul:
hdul.append(hdul[1])
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 11, (), '', ''),
(1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', ''),
(2, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
@raises(ValueError)
def test_append_groupshdu_to_non_empty_list(self):
"""Tests appending a Simple GroupsHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.append(hdu)
hdu = fits.GroupsHDU()
hdul.append(hdu)
def test_insert_primary_to_empty_list(self):
"""Tests inserting a Simple PrimaryHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.insert(0, hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_extension_to_empty_list(self):
"""Tests inserting a Simple ImageHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.ImageHDU(np.arange(100, dtype=np.int32))
hdul.insert(0, hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_table_extension_to_empty_list(self):
"""Tests inserting a Simple Table ExtensionHDU to a empty HDUList."""
hdul = fits.HDUList()
with fits.open(self.data('tb.fits')) as hdul1:
hdul.insert(0, hdul1[1])
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''),
(1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_groupshdu_to_empty_list(self):
"""Tests inserting a Simple GroupsHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.GroupsHDU()
hdul.insert(0, hdu)
info = [(0, 'PRIMARY', 1, 'GroupsHDU', 8, (), '',
'1 Groups 0 Parameters')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_primary_to_non_empty_list(self):
"""Tests inserting a Simple PrimaryHDU to a non-empty HDUList."""
with fits.open(self.data('arange.fits')) as hdul:
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.insert(1, hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 7, (11, 10, 7), 'int32', ''),
(1, '', 1, 'ImageHDU', 6, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_extension_to_non_empty_list(self):
"""Tests inserting a Simple ExtensionHDU to a non-empty HDUList."""
with fits.open(self.data('tb.fits')) as hdul:
hdul.insert(1, hdul[1])
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 11, (), '', ''),
(1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', ''),
(2, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_groupshdu_to_non_empty_list(self):
"""Tests inserting a Simple GroupsHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.insert(0, hdu)
hdu = fits.GroupsHDU()
with pytest.raises(ValueError):
hdul.insert(1, hdu)
info = [(0, 'PRIMARY', 1, 'GroupsHDU', 8, (), '',
'1 Groups 0 Parameters'),
(1, '', 1, 'ImageHDU', 6, (100,), 'int32', '')]
hdul.insert(0, hdu)
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
@raises(ValueError)
def test_insert_groupshdu_to_begin_of_hdulist_with_groupshdu(self):
"""
Tests inserting a Simple GroupsHDU to the beginning of an HDUList
that that already contains a GroupsHDU.
"""
hdul = fits.HDUList()
hdu = fits.GroupsHDU()
hdul.insert(0, hdu)
hdul.insert(0, hdu)
def test_insert_extension_to_primary_in_non_empty_list(self):
# Tests inserting a Simple ExtensionHDU to a non-empty HDUList.
with fits.open(self.data('tb.fits')) as hdul:
hdul.insert(0, hdul[1])
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''),
(1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', ''),
(2, '', 1, 'ImageHDU', 12, (), '', ''),
(3, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_image_extension_to_primary_in_non_empty_list(self):
"""
Tests inserting a Simple Image ExtensionHDU to a non-empty HDUList
as the primary HDU.
"""
with fits.open(self.data('tb.fits')) as hdul:
hdu = fits.ImageHDU(np.arange(100, dtype=np.int32))
hdul.insert(0, hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', ''),
(1, '', 1, 'ImageHDU', 12, (), '', ''),
(2, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_filename(self):
"""Tests the HDUList filename method."""
with fits.open(self.data('tb.fits')) as hdul:
name = hdul.filename()
assert name == self.data('tb.fits')
def test_file_like(self):
"""
Tests the use of a file like object with no tell or seek methods
in HDUList.writeto(), HDULIST.flush() or astropy.io.fits.writeto()
"""
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul = fits.HDUList()
hdul.append(hdu)
tmpfile = open(self.temp('tmpfile.fits'), 'wb')
hdul.writeto(tmpfile)
tmpfile.close()
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')]
assert fits.info(self.temp('tmpfile.fits'), output=False) == info
def test_file_like_2(self):
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
tmpfile = open(self.temp('tmpfile.fits'), 'wb')
hdul = fits.open(tmpfile, mode='ostream')
hdul.append(hdu)
hdul.flush()
tmpfile.close()
hdul.close()
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')]
assert fits.info(self.temp('tmpfile.fits'), output=False) == info
def test_file_like_3(self):
tmpfile = open(self.temp('tmpfile.fits'), 'wb')
fits.writeto(tmpfile, np.arange(100, dtype=np.int32))
tmpfile.close()
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')]
assert fits.info(self.temp('tmpfile.fits'), output=False) == info
def test_shallow_copy(self):
"""
Tests that `HDUList.__copy__()` and `HDUList.copy()` return a
shallow copy (regression test for #7211).
"""
n = np.arange(10.0)
primary_hdu = fits.PrimaryHDU(n)
hdu = fits.ImageHDU(n)
hdul = fits.HDUList([primary_hdu, hdu])
for hdulcopy in (hdul.copy(), copy.copy(hdul)):
assert isinstance(hdulcopy, fits.HDUList)
assert hdulcopy is not hdul
assert hdulcopy[0] is hdul[0]
assert hdulcopy[1] is hdul[1]
def test_deep_copy(self):
"""
Tests that `HDUList.__deepcopy__()` returns a deep copy.
"""
n = np.arange(10.0)
primary_hdu = fits.PrimaryHDU(n)
hdu = fits.ImageHDU(n)
hdul = fits.HDUList([primary_hdu, hdu])
hdulcopy = copy.deepcopy(hdul)
assert isinstance(hdulcopy, fits.HDUList)
assert hdulcopy is not hdul
for index in range(len(hdul)):
assert hdulcopy[index] is not hdul[index]
assert hdulcopy[index].header == hdul[index].header
np.testing.assert_array_equal(hdulcopy[index].data, hdul[index].data)
def test_new_hdu_extname(self):
"""
Tests that new extension HDUs that are added to an HDUList can be
properly indexed by their EXTNAME/EXTVER (regression test for
ticket:48).
"""
with fits.open(self.data('test0.fits')) as f:
hdul = fits.HDUList()
hdul.append(f[0].copy())
hdu = fits.ImageHDU(header=f[1].header)
hdul.append(hdu)
assert hdul[1].header['EXTNAME'] == 'SCI'
assert hdul[1].header['EXTVER'] == 1
assert hdul.index_of(('SCI', 1)) == 1
assert hdul.index_of(hdu) == len(hdul) - 1
def test_update_filelike(self):
"""Test opening a file-like object in update mode and resizing the
HDU.
"""
sf = io.BytesIO()
arr = np.zeros((100, 100))
hdu = fits.PrimaryHDU(data=arr)
hdu.writeto(sf)
sf.seek(0)
arr = np.zeros((200, 200))
hdul = fits.open(sf, mode='update')
hdul[0].data = arr
hdul.flush()
sf.seek(0)
hdul = fits.open(sf)
assert len(hdul) == 1
assert (hdul[0].data == arr).all()
def test_flush_readonly(self):
"""Test flushing changes to a file opened in a read only mode."""
oldmtime = os.stat(self.data('test0.fits')).st_mtime
hdul = fits.open(self.data('test0.fits'))
hdul[0].header['FOO'] = 'BAR'
with catch_warnings(AstropyUserWarning) as w:
hdul.flush()
assert len(w) == 1
assert 'mode is not supported' in str(w[0].message)
assert oldmtime == os.stat(self.data('test0.fits')).st_mtime
def test_fix_extend_keyword(self):
hdul = fits.HDUList()
hdul.append(fits.PrimaryHDU())
hdul.append(fits.ImageHDU())
del hdul[0].header['EXTEND']
hdul.verify('silentfix')
assert 'EXTEND' in hdul[0].header
assert hdul[0].header['EXTEND'] is True
def test_fix_malformed_naxisj(self):
"""
Tests that malformed NAXISj values are fixed sensibly.
"""
hdu = fits.open(self.data('arange.fits'))
# Malform NAXISj header data
hdu[0].header['NAXIS1'] = 11.0
hdu[0].header['NAXIS2'] = '10.0'
hdu[0].header['NAXIS3'] = '7'
# Axes cache needs to be malformed as well
hdu[0]._axes = [11.0, '10.0', '7']
# Perform verification including the fix
hdu.verify('silentfix')
# Check that malformed data was converted
assert hdu[0].header['NAXIS1'] == 11
assert hdu[0].header['NAXIS2'] == 10
assert hdu[0].header['NAXIS3'] == 7
hdu.close()
def test_fix_wellformed_naxisj(self):
"""
Tests that wellformed NAXISj values are not modified.
"""
hdu = fits.open(self.data('arange.fits'))
# Fake new NAXISj header data
hdu[0].header['NAXIS1'] = 768
hdu[0].header['NAXIS2'] = 64
hdu[0].header['NAXIS3'] = 8
# Axes cache needs to be faked as well
hdu[0]._axes = [768, 64, 8]
# Perform verification including the fix
hdu.verify('silentfix')
# Check that malformed data was converted
assert hdu[0].header['NAXIS1'] == 768
assert hdu[0].header['NAXIS2'] == 64
assert hdu[0].header['NAXIS3'] == 8
hdu.close()
def test_new_hdulist_extend_keyword(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/114
Tests that adding a PrimaryHDU to a new HDUList object updates the
EXTEND keyword on that HDU.
"""
h0 = fits.Header()
hdu = fits.PrimaryHDU(header=h0)
sci = fits.ImageHDU(data=np.array(10))
image = fits.HDUList([hdu, sci])
image.writeto(self.temp('temp.fits'))
assert 'EXTEND' in hdu.header
assert hdu.header['EXTEND'] is True
def test_replace_memmaped_array(self):
# Copy the original before we modify it
with fits.open(self.data('test0.fits')) as hdul:
hdul.writeto(self.temp('temp.fits'))
hdul = fits.open(self.temp('temp.fits'), mode='update', memmap=True)
old_data = hdul[1].data.copy()
hdul[1].data = hdul[1].data + 1
hdul.close()
with fits.open(self.temp('temp.fits'), memmap=True) as hdul:
assert ((old_data + 1) == hdul[1].data).all()
def test_open_file_with_end_padding(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/106
Open files with end padding bytes.
"""
with fits.open(self.data('test0.fits'),
do_not_scale_image_data=True) as hdul:
info = hdul.info(output=False)
hdul.writeto(self.temp('temp.fits'))
with open(self.temp('temp.fits'), 'ab') as f:
f.seek(0, os.SEEK_END)
f.write(b'\0' * 2880)
with ignore_warnings():
assert info == fits.info(self.temp('temp.fits'), output=False,
do_not_scale_image_data=True)
def test_open_file_with_bad_header_padding(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/136
Open files with nulls for header block padding instead of spaces.
"""
a = np.arange(100).reshape(10, 10)
hdu = fits.PrimaryHDU(data=a)
hdu.writeto(self.temp('temp.fits'))
# Figure out where the header padding begins and fill it with nulls
end_card_pos = str(hdu.header).index('END' + ' ' * 77)
padding_start = end_card_pos + 80
padding_len = 2880 - padding_start
with open(self.temp('temp.fits'), 'r+b') as f:
f.seek(padding_start)
f.write('\0'.encode('ascii') * padding_len)
with catch_warnings(AstropyUserWarning) as w:
with fits.open(self.temp('temp.fits')) as hdul:
assert (hdul[0].data == a).all()
assert ('contains null bytes instead of spaces' in
str(w[0].message))
assert len(w) == 1
assert len(hdul) == 1
assert str(hdul[0].header) == str(hdu.header)
def test_update_with_truncated_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/148
Test that saving an update where the header is shorter than the
original header doesn't leave a stump from the old header in the file.
"""
data = np.arange(100)
hdu = fits.PrimaryHDU(data=data)
idx = 1
while len(hdu.header) < 34:
hdu.header[f'TEST{idx}'] = idx
idx += 1
hdu.writeto(self.temp('temp.fits'), checksum=True)
with fits.open(self.temp('temp.fits'), mode='update') as hdul:
# Modify the header, forcing it to be rewritten
hdul[0].header['TEST1'] = 2
with fits.open(self.temp('temp.fits')) as hdul:
assert (hdul[0].data == data).all()
# This test used to fail on Windows - if it fails again in future, see
# https://github.com/astropy/astropy/issues/5797
# The warning appears on Windows but cannot be explicitly caught.
@pytest.mark.filterwarnings("ignore:Assigning the 'data' attribute is an "
"inherently unsafe operation")
def test_update_resized_header(self):
"""
Test saving updates to a file where the header is one block smaller
than before, and in the case where the heade ris one block larger than
before.
"""
data = np.arange(100)
hdu = fits.PrimaryHDU(data=data)
idx = 1
while len(str(hdu.header)) <= 2880:
hdu.header[f'TEST{idx}'] = idx
idx += 1
orig_header = hdu.header.copy()
hdu.writeto(self.temp('temp.fits'))
with fits.open(self.temp('temp.fits'), mode='update') as hdul:
while len(str(hdul[0].header)) > 2880:
del hdul[0].header[-1]
with fits.open(self.temp('temp.fits')) as hdul:
assert hdul[0].header == orig_header[:-1]
assert (hdul[0].data == data).all()
with fits.open(self.temp('temp.fits'), mode='update') as hdul:
idx = 101
while len(str(hdul[0].header)) <= 2880 * 2:
hdul[0].header[f'TEST{idx}'] = idx
idx += 1
# Touch something in the data too so that it has to be rewritten
hdul[0].data[0] = 27
with fits.open(self.temp('temp.fits')) as hdul:
assert hdul[0].header[:-37] == orig_header[:-1]
assert hdul[0].data[0] == 27
assert (hdul[0].data[1:] == data[1:]).all()
def test_update_resized_header2(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/150
This is similar to test_update_resized_header, but specifically tests a
case of multiple consecutive flush() calls on the same HDUList object,
where each flush() requires a resize.
"""
data1 = np.arange(100)
data2 = np.arange(100) + 100
phdu = fits.PrimaryHDU(data=data1)
hdu = fits.ImageHDU(data=data2)
phdu.writeto(self.temp('temp.fits'))
with fits.open(self.temp('temp.fits'), mode='append') as hdul:
hdul.append(hdu)
with fits.open(self.temp('temp.fits'), mode='update') as hdul:
idx = 1
while len(str(hdul[0].header)) <= 2880 * 2:
hdul[0].header[f'TEST{idx}'] = idx
idx += 1
hdul.flush()
hdul.append(hdu)
with fits.open(self.temp('temp.fits')) as hdul:
assert (hdul[0].data == data1).all()
assert hdul[1].header == hdu.header
assert (hdul[1].data == data2).all()
assert (hdul[2].data == data2).all()
@ignore_warnings()
def test_hdul_fromstring(self):
"""
Test creating the HDUList structure in memory from a string containing
an entire FITS file. This is similar to test_hdu_fromstring but for an
entire multi-extension FITS file at once.
"""
# Tests HDUList.fromstring for all of Astropy's built in test files
def test_fromstring(filename):
with fits.open(filename) as hdul:
orig_info = hdul.info(output=False)
with open(filename, 'rb') as f:
dat = f.read()
hdul2 = fits.HDUList.fromstring(dat)
assert orig_info == hdul2.info(output=False)
for idx in range(len(hdul)):
assert hdul[idx].header == hdul2[idx].header
if hdul[idx].data is None or hdul2[idx].data is None:
assert hdul[idx].data == hdul2[idx].data
elif (hdul[idx].data.dtype.fields and
hdul2[idx].data.dtype.fields):
# Compare tables
for n in hdul[idx].data.names:
c1 = hdul[idx].data[n]
c2 = hdul2[idx].data[n]
assert (c1 == c2).all()
elif (any(dim == 0 for dim in hdul[idx].data.shape) or
any(dim == 0 for dim in hdul2[idx].data.shape)):
# For some reason some combinations of Python and Numpy
# on Windows result in MemoryErrors when trying to work
# on memmap arrays with more than one dimension but
# some dimensions of size zero, so include a special
# case for that
return hdul[idx].data.shape == hdul2[idx].data.shape
else:
np.testing.assert_array_equal(hdul[idx].data,
hdul2[idx].data)
for filename in glob.glob(os.path.join(self.data_dir, '*.fits')):
if sys.platform == 'win32' and filename == 'zerowidth.fits':
# Running this test on this file causes a crash in some
# versions of Numpy on Windows. See ticket:
# https://aeon.stsci.edu/ssb/trac/pyfits/ticket/174
continue
elif filename.endswith('variable_length_table.fits'):
# Comparing variable length arrays is non-trivial and thus
# skipped at this point.
# TODO: That's probably possible, so one could make it work.
continue
test_fromstring(filename)
# Test that creating an HDUList from something silly raises a TypeError
pytest.raises(TypeError, fits.HDUList.fromstring, ['a', 'b', 'c'])
def test_save_backup(self):
"""Test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/121
Save backup of file before flushing changes.
"""
self.copy_file('scale.fits')
with ignore_warnings():
with fits.open(self.temp('scale.fits'), mode='update',
save_backup=True) as hdul:
# Make some changes to the original file to force its header
# and data to be rewritten
hdul[0].header['TEST'] = 'TEST'
hdul[0].data[0] = 0
assert os.path.exists(self.temp('scale.fits.bak'))
with fits.open(self.data('scale.fits'),
do_not_scale_image_data=True) as hdul1:
with fits.open(self.temp('scale.fits.bak'),
do_not_scale_image_data=True) as hdul2:
assert hdul1[0].header == hdul2[0].header
assert (hdul1[0].data == hdul2[0].data).all()
with ignore_warnings():
with fits.open(self.temp('scale.fits'), mode='update',
save_backup=True) as hdul:
# One more time to see if multiple backups are made
hdul[0].header['TEST2'] = 'TEST'
hdul[0].data[0] = 1
assert os.path.exists(self.temp('scale.fits.bak'))
assert os.path.exists(self.temp('scale.fits.bak.1'))
def test_replace_mmap_data(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/25
Replacing the mmap'd data of one file with mmap'd data from a
different file should work.
"""
arr_a = np.arange(10)
arr_b = arr_a * 2
def test(mmap_a, mmap_b):
hdu_a = fits.PrimaryHDU(data=arr_a)
hdu_a.writeto(self.temp('test_a.fits'), overwrite=True)
hdu_b = fits.PrimaryHDU(data=arr_b)
hdu_b.writeto(self.temp('test_b.fits'), overwrite=True)
with fits.open(self.temp('test_a.fits'), mode='update',
memmap=mmap_a) as hdul_a:
with fits.open(self.temp('test_b.fits'),
memmap=mmap_b) as hdul_b:
hdul_a[0].data = hdul_b[0].data
with fits.open(self.temp('test_a.fits')) as hdul_a:
assert np.all(hdul_a[0].data == arr_b)
with ignore_warnings():
test(True, True)
# Repeat the same test but this time don't mmap A
test(False, True)
# Finally, without mmaping B
test(True, False)
def test_replace_mmap_data_2(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/25
Replacing the mmap'd data of one file with mmap'd data from a
different file should work. Like test_replace_mmap_data but with
table data instead of image data.
"""
arr_a = np.arange(10)
arr_b = arr_a * 2
def test(mmap_a, mmap_b):
col_a = fits.Column(name='a', format='J', array=arr_a)
col_b = fits.Column(name='b', format='J', array=arr_b)
hdu_a = fits.BinTableHDU.from_columns([col_a])
hdu_a.writeto(self.temp('test_a.fits'), overwrite=True)
hdu_b = fits.BinTableHDU.from_columns([col_b])
hdu_b.writeto(self.temp('test_b.fits'), overwrite=True)
with fits.open(self.temp('test_a.fits'), mode='update',
memmap=mmap_a) as hdul_a:
with fits.open(self.temp('test_b.fits'),
memmap=mmap_b) as hdul_b:
hdul_a[1].data = hdul_b[1].data
with fits.open(self.temp('test_a.fits')) as hdul_a:
assert 'b' in hdul_a[1].columns.names
assert 'a' not in hdul_a[1].columns.names
assert np.all(hdul_a[1].data['b'] == arr_b)
with ignore_warnings():
test(True, True)
# Repeat the same test but this time don't mmap A
test(False, True)
# Finally, without mmaping B
test(True, False)
def test_extname_in_hdulist(self):
"""
Tests to make sure that the 'in' operator works.
Regression test for https://github.com/astropy/astropy/issues/3060
"""
with fits.open(self.data('o4sp040b0_raw.fits')) as hdulist:
hdulist.append(fits.ImageHDU(name='a'))
assert 'a' in hdulist
assert 'A' in hdulist
assert ('a', 1) in hdulist
assert ('A', 1) in hdulist
assert 'b' not in hdulist
assert ('a', 2) not in hdulist
assert ('b', 1) not in hdulist
assert ('b', 2) not in hdulist
assert hdulist[0] in hdulist
assert fits.ImageHDU() not in hdulist
def test_overwrite_vs_clobber(self):
hdulist = fits.HDUList([fits.PrimaryHDU()])
hdulist.writeto(self.temp('test_overwrite.fits'))
hdulist.writeto(self.temp('test_overwrite.fits'), overwrite=True)
with catch_warnings(AstropyDeprecationWarning) as warning_lines:
hdulist.writeto(self.temp('test_overwrite.fits'), clobber=True)
assert warning_lines[0].category == AstropyDeprecationWarning
assert (str(warning_lines[0].message) == '"clobber" was '
'deprecated in version 2.0 and will be removed in a '
'future version. Use argument "overwrite" instead.')
def test_invalid_hdu_key_in_contains(self):
"""
Make sure invalid keys in the 'in' operator return False.
Regression test for https://github.com/astropy/astropy/issues/5583
"""
hdulist = fits.HDUList(fits.PrimaryHDU())
hdulist.append(fits.ImageHDU())
hdulist.append(fits.ImageHDU())
# A more or less random assortment of things which are not valid keys.
bad_keys = [None, 3.5, {}]
for key in bad_keys:
assert not (key in hdulist)
def test_iteration_of_lazy_loaded_hdulist(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5585
"""
hdulist = fits.HDUList(fits.PrimaryHDU())
hdulist.append(fits.ImageHDU(name='SCI'))
hdulist.append(fits.ImageHDU(name='SCI'))
hdulist.append(fits.ImageHDU(name='nada'))
hdulist.append(fits.ImageHDU(name='SCI'))
filename = self.temp('many_extension.fits')
hdulist.writeto(filename)
f = fits.open(filename)
# Check that all extensions are read if f is not sliced
all_exts = [ext for ext in f]
assert len(all_exts) == 5
# Reload the file to ensure we are still lazy loading
f.close()
f = fits.open(filename)
# Try a simple slice with no conditional on the ext. This is essentially
# the reported failure.
all_exts_but_zero = [ext for ext in f[1:]]
assert len(all_exts_but_zero) == 4
# Reload the file to ensure we are still lazy loading
f.close()
f = fits.open(filename)
# Check whether behavior is proper if the upper end of the slice is not
# omitted.
read_exts = [ext for ext in f[1:4] if ext.header['EXTNAME'] == 'SCI']
assert len(read_exts) == 2
f.close()
def test_proper_error_raised_on_non_fits_file_with_unicode(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5594
The failure shows up when (in python 3+) you try to open a file
with unicode content that is not actually a FITS file. See:
https://github.com/astropy/astropy/issues/5594#issuecomment-266583218
"""
import codecs
filename = self.temp('not-fits-with-unicode.fits')
with codecs.open(filename, mode='w', encoding='utf=8') as f:
f.write('Ce\xe7i ne marche pas')
# This should raise an OSError because there is no end card.
with pytest.raises(OSError):
with pytest.warns(AstropyUserWarning, match=r'non-ASCII characters '
r'are present in the FITS file header'):
fits.open(filename)
def test_no_resource_warning_raised_on_non_fits_file(self):
"""
Regression test for https://github.com/astropy/astropy/issues/6168
The ResourceWarning shows up when (in python 3+) you try to
open a non-FITS file when using a filename.
"""
# To avoid creating the file multiple times the tests are
# all included in one test file. See the discussion to the
# PR at https://github.com/astropy/astropy/issues/6168
#
filename = self.temp('not-fits.fits')
with open(filename, mode='w') as f:
f.write('# header line\n')
f.write('0.1 0.2\n')
# Opening the file should raise an OSError however the file
# is opened (there are two distinct code paths, depending on
# whether ignore_missing_end is True or False).
#
# Explicit tests are added to make sure the file handle is not
# closed when passed in to fits.open. In this case the ResourceWarning
# was not raised, but a check is still included.
#
with catch_warnings(ResourceWarning) as ws:
# Make sure that files opened by the user are not closed
with open(filename, mode='rb') as f:
with pytest.raises(OSError):
fits.open(f, ignore_missing_end=False)
assert not f.closed
with open(filename, mode='rb') as f:
with pytest.raises(OSError):
fits.open(f, ignore_missing_end=True)
assert not f.closed
with pytest.raises(OSError):
fits.open(filename, ignore_missing_end=False)
with pytest.raises(OSError):
fits.open(filename, ignore_missing_end=True)
assert len(ws) == 0
def test_pop_with_lazy_load(self):
filename = self.data('checksum.fits')
with fits.open(filename) as hdul:
# Try popping the hdulist before doing anything else. This makes sure
# that https://github.com/astropy/astropy/issues/7185 is fixed.
hdu = hdul.pop()
assert len(hdul) == 1
# Read the file again and try popping from the beginning
with fits.open(filename) as hdul2:
hdu2 = hdul2.pop(0)
assert len(hdul2) == 1
# Just a sanity check
with fits.open(filename) as hdul3:
assert len(hdul3) == 2
assert hdul3[0].header == hdu2.header
assert hdul3[1].header == hdu.header
def test_pop_extname(self):
with fits.open(self.data('o4sp040b0_raw.fits')) as hdul:
assert len(hdul) == 7
hdu1 = hdul[1]
hdu4 = hdul[4]
hdu_popped = hdul.pop(('SCI', 2))
assert len(hdul) == 6
assert hdu_popped is hdu4
hdu_popped = hdul.pop('SCI')
assert len(hdul) == 5
assert hdu_popped is hdu1
# Skip due to https://github.com/astropy/astropy/issues/8916
@pytest.mark.skipif('sys.platform.startswith("win32")')
def test_write_hdulist_to_stream(self):
"""
Unit test for https://github.com/astropy/astropy/issues/7435
to ensure that an HDUList can be written to a stream.
"""
data = np.array([[1, 2, 3], [4, 5, 6]])
hdu = fits.PrimaryHDU(data)
hdulist = fits.HDUList([hdu])
with open(self.temp('test.fits'), 'wb') as fout:
with subprocess.Popen(["cat"], stdin=subprocess.PIPE,
stdout=fout) as p:
hdulist.writeto(p.stdin)
| 37.287736 | 85 | 0.57141 |
import glob
import io
import os
import sys
import copy
import subprocess
import pytest
import numpy as np
from astropy.io.fits.verify import VerifyError
from astropy.io import fits
from astropy.tests.helper import raises, catch_warnings, ignore_warnings
from astropy.utils.exceptions import AstropyUserWarning, AstropyDeprecationWarning
from . import FitsTestCase
class TestHDUListFunctions(FitsTestCase):
def test_update_name(self):
with fits.open(self.data('o4sp040b0_raw.fits')) as hdul:
hdul[4].name = 'Jim'
hdul[4].ver = 9
assert hdul[('JIM', 9)].header['extname'] == 'JIM'
def test_hdu_file_bytes(self):
with fits.open(self.data('checksum.fits')) as hdul:
res = hdul[0].filebytes()
assert res == 11520
res = hdul[1].filebytes()
assert res == 8640
def test_hdulist_file_info(self):
def test_fileinfo(**kwargs):
assert res['datSpan'] == kwargs.get('datSpan', 2880)
assert res['resized'] == kwargs.get('resized', False)
assert res['filename'] == self.data('checksum.fits')
assert res['datLoc'] == kwargs.get('datLoc', 8640)
assert res['hdrLoc'] == kwargs.get('hdrLoc', 0)
assert res['filemode'] == 'readonly'
with fits.open(self.data('checksum.fits')) as hdul:
res = hdul.fileinfo(0)
res = hdul.fileinfo(1)
test_fileinfo(datLoc=17280, hdrLoc=11520)
hdu = fits.ImageHDU(data=hdul[0].data)
hdul.insert(1, hdu)
res = hdul.fileinfo(0)
test_fileinfo(resized=True)
res = hdul.fileinfo(1)
test_fileinfo(datSpan=None, resized=True, datLoc=None, hdrLoc=None)
res = hdul.fileinfo(2)
test_fileinfo(resized=1, datLoc=17280, hdrLoc=11520)
def test_create_from_multiple_primary(self):
hdul = fits.HDUList([fits.PrimaryHDU(), fits.PrimaryHDU()])
pytest.raises(VerifyError, hdul.writeto, self.temp('temp.fits'),
output_verify='exception')
def test_append_primary_to_empty_list(self):
hdul = fits.HDUList()
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.append(hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
def test_append_extension_to_empty_list(self):
hdul = fits.HDUList()
hdu = fits.ImageHDU(np.arange(100, dtype=np.int32))
hdul.append(hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
def test_append_table_extension_to_empty_list(self):
hdul = fits.HDUList()
with fits.open(self.data('tb.fits')) as hdul1:
hdul.append(hdul1[1])
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''),
(1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
def test_append_groupshdu_to_empty_list(self):
hdul = fits.HDUList()
hdu = fits.GroupsHDU()
hdul.append(hdu)
info = [(0, 'PRIMARY', 1, 'GroupsHDU', 8, (), '',
'1 Groups 0 Parameters')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
def test_append_primary_to_non_empty_list(self):
with fits.open(self.data('arange.fits')) as hdul:
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.append(hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 7, (11, 10, 7), 'int32', ''),
(1, '', 1, 'ImageHDU', 6, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
def test_append_extension_to_non_empty_list(self):
with fits.open(self.data('tb.fits')) as hdul:
hdul.append(hdul[1])
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 11, (), '', ''),
(1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', ''),
(2, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
@raises(ValueError)
def test_append_groupshdu_to_non_empty_list(self):
hdul = fits.HDUList()
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.append(hdu)
hdu = fits.GroupsHDU()
hdul.append(hdu)
def test_insert_primary_to_empty_list(self):
hdul = fits.HDUList()
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.insert(0, hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_extension_to_empty_list(self):
hdul = fits.HDUList()
hdu = fits.ImageHDU(np.arange(100, dtype=np.int32))
hdul.insert(0, hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_table_extension_to_empty_list(self):
hdul = fits.HDUList()
with fits.open(self.data('tb.fits')) as hdul1:
hdul.insert(0, hdul1[1])
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''),
(1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_groupshdu_to_empty_list(self):
hdul = fits.HDUList()
hdu = fits.GroupsHDU()
hdul.insert(0, hdu)
info = [(0, 'PRIMARY', 1, 'GroupsHDU', 8, (), '',
'1 Groups 0 Parameters')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_primary_to_non_empty_list(self):
with fits.open(self.data('arange.fits')) as hdul:
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.insert(1, hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 7, (11, 10, 7), 'int32', ''),
(1, '', 1, 'ImageHDU', 6, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_extension_to_non_empty_list(self):
with fits.open(self.data('tb.fits')) as hdul:
hdul.insert(1, hdul[1])
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 11, (), '', ''),
(1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', ''),
(2, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_groupshdu_to_non_empty_list(self):
hdul = fits.HDUList()
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.insert(0, hdu)
hdu = fits.GroupsHDU()
with pytest.raises(ValueError):
hdul.insert(1, hdu)
info = [(0, 'PRIMARY', 1, 'GroupsHDU', 8, (), '',
'1 Groups 0 Parameters'),
(1, '', 1, 'ImageHDU', 6, (100,), 'int32', '')]
hdul.insert(0, hdu)
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
@raises(ValueError)
def test_insert_groupshdu_to_begin_of_hdulist_with_groupshdu(self):
hdul = fits.HDUList()
hdu = fits.GroupsHDU()
hdul.insert(0, hdu)
hdul.insert(0, hdu)
def test_insert_extension_to_primary_in_non_empty_list(self):
with fits.open(self.data('tb.fits')) as hdul:
hdul.insert(0, hdul[1])
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''),
(1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', ''),
(2, '', 1, 'ImageHDU', 12, (), '', ''),
(3, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_image_extension_to_primary_in_non_empty_list(self):
with fits.open(self.data('tb.fits')) as hdul:
hdu = fits.ImageHDU(np.arange(100, dtype=np.int32))
hdul.insert(0, hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', ''),
(1, '', 1, 'ImageHDU', 12, (), '', ''),
(2, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_filename(self):
with fits.open(self.data('tb.fits')) as hdul:
name = hdul.filename()
assert name == self.data('tb.fits')
def test_file_like(self):
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul = fits.HDUList()
hdul.append(hdu)
tmpfile = open(self.temp('tmpfile.fits'), 'wb')
hdul.writeto(tmpfile)
tmpfile.close()
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')]
assert fits.info(self.temp('tmpfile.fits'), output=False) == info
def test_file_like_2(self):
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
tmpfile = open(self.temp('tmpfile.fits'), 'wb')
hdul = fits.open(tmpfile, mode='ostream')
hdul.append(hdu)
hdul.flush()
tmpfile.close()
hdul.close()
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')]
assert fits.info(self.temp('tmpfile.fits'), output=False) == info
def test_file_like_3(self):
tmpfile = open(self.temp('tmpfile.fits'), 'wb')
fits.writeto(tmpfile, np.arange(100, dtype=np.int32))
tmpfile.close()
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')]
assert fits.info(self.temp('tmpfile.fits'), output=False) == info
def test_shallow_copy(self):
n = np.arange(10.0)
primary_hdu = fits.PrimaryHDU(n)
hdu = fits.ImageHDU(n)
hdul = fits.HDUList([primary_hdu, hdu])
for hdulcopy in (hdul.copy(), copy.copy(hdul)):
assert isinstance(hdulcopy, fits.HDUList)
assert hdulcopy is not hdul
assert hdulcopy[0] is hdul[0]
assert hdulcopy[1] is hdul[1]
def test_deep_copy(self):
n = np.arange(10.0)
primary_hdu = fits.PrimaryHDU(n)
hdu = fits.ImageHDU(n)
hdul = fits.HDUList([primary_hdu, hdu])
hdulcopy = copy.deepcopy(hdul)
assert isinstance(hdulcopy, fits.HDUList)
assert hdulcopy is not hdul
for index in range(len(hdul)):
assert hdulcopy[index] is not hdul[index]
assert hdulcopy[index].header == hdul[index].header
np.testing.assert_array_equal(hdulcopy[index].data, hdul[index].data)
def test_new_hdu_extname(self):
with fits.open(self.data('test0.fits')) as f:
hdul = fits.HDUList()
hdul.append(f[0].copy())
hdu = fits.ImageHDU(header=f[1].header)
hdul.append(hdu)
assert hdul[1].header['EXTNAME'] == 'SCI'
assert hdul[1].header['EXTVER'] == 1
assert hdul.index_of(('SCI', 1)) == 1
assert hdul.index_of(hdu) == len(hdul) - 1
def test_update_filelike(self):
sf = io.BytesIO()
arr = np.zeros((100, 100))
hdu = fits.PrimaryHDU(data=arr)
hdu.writeto(sf)
sf.seek(0)
arr = np.zeros((200, 200))
hdul = fits.open(sf, mode='update')
hdul[0].data = arr
hdul.flush()
sf.seek(0)
hdul = fits.open(sf)
assert len(hdul) == 1
assert (hdul[0].data == arr).all()
def test_flush_readonly(self):
oldmtime = os.stat(self.data('test0.fits')).st_mtime
hdul = fits.open(self.data('test0.fits'))
hdul[0].header['FOO'] = 'BAR'
with catch_warnings(AstropyUserWarning) as w:
hdul.flush()
assert len(w) == 1
assert 'mode is not supported' in str(w[0].message)
assert oldmtime == os.stat(self.data('test0.fits')).st_mtime
def test_fix_extend_keyword(self):
hdul = fits.HDUList()
hdul.append(fits.PrimaryHDU())
hdul.append(fits.ImageHDU())
del hdul[0].header['EXTEND']
hdul.verify('silentfix')
assert 'EXTEND' in hdul[0].header
assert hdul[0].header['EXTEND'] is True
def test_fix_malformed_naxisj(self):
hdu = fits.open(self.data('arange.fits'))
hdu[0].header['NAXIS1'] = 11.0
hdu[0].header['NAXIS2'] = '10.0'
hdu[0].header['NAXIS3'] = '7'
hdu[0]._axes = [11.0, '10.0', '7']
hdu.verify('silentfix')
assert hdu[0].header['NAXIS1'] == 11
assert hdu[0].header['NAXIS2'] == 10
assert hdu[0].header['NAXIS3'] == 7
hdu.close()
def test_fix_wellformed_naxisj(self):
hdu = fits.open(self.data('arange.fits'))
hdu[0].header['NAXIS1'] = 768
hdu[0].header['NAXIS2'] = 64
hdu[0].header['NAXIS3'] = 8
hdu[0]._axes = [768, 64, 8]
hdu.verify('silentfix')
assert hdu[0].header['NAXIS1'] == 768
assert hdu[0].header['NAXIS2'] == 64
assert hdu[0].header['NAXIS3'] == 8
hdu.close()
def test_new_hdulist_extend_keyword(self):
h0 = fits.Header()
hdu = fits.PrimaryHDU(header=h0)
sci = fits.ImageHDU(data=np.array(10))
image = fits.HDUList([hdu, sci])
image.writeto(self.temp('temp.fits'))
assert 'EXTEND' in hdu.header
assert hdu.header['EXTEND'] is True
def test_replace_memmaped_array(self):
with fits.open(self.data('test0.fits')) as hdul:
hdul.writeto(self.temp('temp.fits'))
hdul = fits.open(self.temp('temp.fits'), mode='update', memmap=True)
old_data = hdul[1].data.copy()
hdul[1].data = hdul[1].data + 1
hdul.close()
with fits.open(self.temp('temp.fits'), memmap=True) as hdul:
assert ((old_data + 1) == hdul[1].data).all()
def test_open_file_with_end_padding(self):
with fits.open(self.data('test0.fits'),
do_not_scale_image_data=True) as hdul:
info = hdul.info(output=False)
hdul.writeto(self.temp('temp.fits'))
with open(self.temp('temp.fits'), 'ab') as f:
f.seek(0, os.SEEK_END)
f.write(b'\0' * 2880)
with ignore_warnings():
assert info == fits.info(self.temp('temp.fits'), output=False,
do_not_scale_image_data=True)
def test_open_file_with_bad_header_padding(self):
a = np.arange(100).reshape(10, 10)
hdu = fits.PrimaryHDU(data=a)
hdu.writeto(self.temp('temp.fits'))
end_card_pos = str(hdu.header).index('END' + ' ' * 77)
padding_start = end_card_pos + 80
padding_len = 2880 - padding_start
with open(self.temp('temp.fits'), 'r+b') as f:
f.seek(padding_start)
f.write('\0'.encode('ascii') * padding_len)
with catch_warnings(AstropyUserWarning) as w:
with fits.open(self.temp('temp.fits')) as hdul:
assert (hdul[0].data == a).all()
assert ('contains null bytes instead of spaces' in
str(w[0].message))
assert len(w) == 1
assert len(hdul) == 1
assert str(hdul[0].header) == str(hdu.header)
def test_update_with_truncated_header(self):
data = np.arange(100)
hdu = fits.PrimaryHDU(data=data)
idx = 1
while len(hdu.header) < 34:
hdu.header[f'TEST{idx}'] = idx
idx += 1
hdu.writeto(self.temp('temp.fits'), checksum=True)
with fits.open(self.temp('temp.fits'), mode='update') as hdul:
hdul[0].header['TEST1'] = 2
with fits.open(self.temp('temp.fits')) as hdul:
assert (hdul[0].data == data).all()
@pytest.mark.filterwarnings("ignore:Assigning the 'data' attribute is an "
"inherently unsafe operation")
def test_update_resized_header(self):
data = np.arange(100)
hdu = fits.PrimaryHDU(data=data)
idx = 1
while len(str(hdu.header)) <= 2880:
hdu.header[f'TEST{idx}'] = idx
idx += 1
orig_header = hdu.header.copy()
hdu.writeto(self.temp('temp.fits'))
with fits.open(self.temp('temp.fits'), mode='update') as hdul:
while len(str(hdul[0].header)) > 2880:
del hdul[0].header[-1]
with fits.open(self.temp('temp.fits')) as hdul:
assert hdul[0].header == orig_header[:-1]
assert (hdul[0].data == data).all()
with fits.open(self.temp('temp.fits'), mode='update') as hdul:
idx = 101
while len(str(hdul[0].header)) <= 2880 * 2:
hdul[0].header[f'TEST{idx}'] = idx
idx += 1
hdul[0].data[0] = 27
with fits.open(self.temp('temp.fits')) as hdul:
assert hdul[0].header[:-37] == orig_header[:-1]
assert hdul[0].data[0] == 27
assert (hdul[0].data[1:] == data[1:]).all()
def test_update_resized_header2(self):
data1 = np.arange(100)
data2 = np.arange(100) + 100
phdu = fits.PrimaryHDU(data=data1)
hdu = fits.ImageHDU(data=data2)
phdu.writeto(self.temp('temp.fits'))
with fits.open(self.temp('temp.fits'), mode='append') as hdul:
hdul.append(hdu)
with fits.open(self.temp('temp.fits'), mode='update') as hdul:
idx = 1
while len(str(hdul[0].header)) <= 2880 * 2:
hdul[0].header[f'TEST{idx}'] = idx
idx += 1
hdul.flush()
hdul.append(hdu)
with fits.open(self.temp('temp.fits')) as hdul:
assert (hdul[0].data == data1).all()
assert hdul[1].header == hdu.header
assert (hdul[1].data == data2).all()
assert (hdul[2].data == data2).all()
@ignore_warnings()
def test_hdul_fromstring(self):
def test_fromstring(filename):
with fits.open(filename) as hdul:
orig_info = hdul.info(output=False)
with open(filename, 'rb') as f:
dat = f.read()
hdul2 = fits.HDUList.fromstring(dat)
assert orig_info == hdul2.info(output=False)
for idx in range(len(hdul)):
assert hdul[idx].header == hdul2[idx].header
if hdul[idx].data is None or hdul2[idx].data is None:
assert hdul[idx].data == hdul2[idx].data
elif (hdul[idx].data.dtype.fields and
hdul2[idx].data.dtype.fields):
# Compare tables
for n in hdul[idx].data.names:
c1 = hdul[idx].data[n]
c2 = hdul2[idx].data[n]
assert (c1 == c2).all()
elif (any(dim == 0 for dim in hdul[idx].data.shape) or
any(dim == 0 for dim in hdul2[idx].data.shape)):
# For some reason some combinations of Python and Numpy
# on Windows result in MemoryErrors when trying to work
# on memmap arrays with more than one dimension but
# some dimensions of size zero, so include a special
# case for that
return hdul[idx].data.shape == hdul2[idx].data.shape
else:
np.testing.assert_array_equal(hdul[idx].data,
hdul2[idx].data)
for filename in glob.glob(os.path.join(self.data_dir, '*.fits')):
if sys.platform == 'win32' and filename == 'zerowidth.fits':
# Running this test on this file causes a crash in some
# versions of Numpy on Windows. See ticket:
# https://aeon.stsci.edu/ssb/trac/pyfits/ticket/174
continue
elif filename.endswith('variable_length_table.fits'):
# Comparing variable length arrays is non-trivial and thus
# skipped at this point.
# TODO: That's probably possible, so one could make it work.
continue
test_fromstring(filename)
pytest.raises(TypeError, fits.HDUList.fromstring, ['a', 'b', 'c'])
def test_save_backup(self):
self.copy_file('scale.fits')
with ignore_warnings():
with fits.open(self.temp('scale.fits'), mode='update',
save_backup=True) as hdul:
hdul[0].header['TEST'] = 'TEST'
hdul[0].data[0] = 0
assert os.path.exists(self.temp('scale.fits.bak'))
with fits.open(self.data('scale.fits'),
do_not_scale_image_data=True) as hdul1:
with fits.open(self.temp('scale.fits.bak'),
do_not_scale_image_data=True) as hdul2:
assert hdul1[0].header == hdul2[0].header
assert (hdul1[0].data == hdul2[0].data).all()
with ignore_warnings():
with fits.open(self.temp('scale.fits'), mode='update',
save_backup=True) as hdul:
hdul[0].header['TEST2'] = 'TEST'
hdul[0].data[0] = 1
assert os.path.exists(self.temp('scale.fits.bak'))
assert os.path.exists(self.temp('scale.fits.bak.1'))
def test_replace_mmap_data(self):
arr_a = np.arange(10)
arr_b = arr_a * 2
def test(mmap_a, mmap_b):
hdu_a = fits.PrimaryHDU(data=arr_a)
hdu_a.writeto(self.temp('test_a.fits'), overwrite=True)
hdu_b = fits.PrimaryHDU(data=arr_b)
hdu_b.writeto(self.temp('test_b.fits'), overwrite=True)
with fits.open(self.temp('test_a.fits'), mode='update',
memmap=mmap_a) as hdul_a:
with fits.open(self.temp('test_b.fits'),
memmap=mmap_b) as hdul_b:
hdul_a[0].data = hdul_b[0].data
with fits.open(self.temp('test_a.fits')) as hdul_a:
assert np.all(hdul_a[0].data == arr_b)
with ignore_warnings():
test(True, True)
test(False, True)
# Finally, without mmaping B
test(True, False)
def test_replace_mmap_data_2(self):
arr_a = np.arange(10)
arr_b = arr_a * 2
def test(mmap_a, mmap_b):
col_a = fits.Column(name='a', format='J', array=arr_a)
col_b = fits.Column(name='b', format='J', array=arr_b)
hdu_a = fits.BinTableHDU.from_columns([col_a])
hdu_a.writeto(self.temp('test_a.fits'), overwrite=True)
hdu_b = fits.BinTableHDU.from_columns([col_b])
hdu_b.writeto(self.temp('test_b.fits'), overwrite=True)
with fits.open(self.temp('test_a.fits'), mode='update',
memmap=mmap_a) as hdul_a:
with fits.open(self.temp('test_b.fits'),
memmap=mmap_b) as hdul_b:
hdul_a[1].data = hdul_b[1].data
with fits.open(self.temp('test_a.fits')) as hdul_a:
assert 'b' in hdul_a[1].columns.names
assert 'a' not in hdul_a[1].columns.names
assert np.all(hdul_a[1].data['b'] == arr_b)
with ignore_warnings():
test(True, True)
# Repeat the same test but this time don't mmap A
test(False, True)
test(True, False)
def test_extname_in_hdulist(self):
with fits.open(self.data('o4sp040b0_raw.fits')) as hdulist:
hdulist.append(fits.ImageHDU(name='a'))
assert 'a' in hdulist
assert 'A' in hdulist
assert ('a', 1) in hdulist
assert ('A', 1) in hdulist
assert 'b' not in hdulist
assert ('a', 2) not in hdulist
assert ('b', 1) not in hdulist
assert ('b', 2) not in hdulist
assert hdulist[0] in hdulist
assert fits.ImageHDU() not in hdulist
def test_overwrite_vs_clobber(self):
hdulist = fits.HDUList([fits.PrimaryHDU()])
hdulist.writeto(self.temp('test_overwrite.fits'))
hdulist.writeto(self.temp('test_overwrite.fits'), overwrite=True)
with catch_warnings(AstropyDeprecationWarning) as warning_lines:
hdulist.writeto(self.temp('test_overwrite.fits'), clobber=True)
assert warning_lines[0].category == AstropyDeprecationWarning
assert (str(warning_lines[0].message) == '"clobber" was '
'deprecated in version 2.0 and will be removed in a '
'future version. Use argument "overwrite" instead.')
def test_invalid_hdu_key_in_contains(self):
hdulist = fits.HDUList(fits.PrimaryHDU())
hdulist.append(fits.ImageHDU())
hdulist.append(fits.ImageHDU())
bad_keys = [None, 3.5, {}]
for key in bad_keys:
assert not (key in hdulist)
def test_iteration_of_lazy_loaded_hdulist(self):
hdulist = fits.HDUList(fits.PrimaryHDU())
hdulist.append(fits.ImageHDU(name='SCI'))
hdulist.append(fits.ImageHDU(name='SCI'))
hdulist.append(fits.ImageHDU(name='nada'))
hdulist.append(fits.ImageHDU(name='SCI'))
filename = self.temp('many_extension.fits')
hdulist.writeto(filename)
f = fits.open(filename)
all_exts = [ext for ext in f]
assert len(all_exts) == 5
f.close()
f = fits.open(filename)
all_exts_but_zero = [ext for ext in f[1:]]
assert len(all_exts_but_zero) == 4
f.close()
f = fits.open(filename)
read_exts = [ext for ext in f[1:4] if ext.header['EXTNAME'] == 'SCI']
assert len(read_exts) == 2
f.close()
def test_proper_error_raised_on_non_fits_file_with_unicode(self):
import codecs
filename = self.temp('not-fits-with-unicode.fits')
with codecs.open(filename, mode='w', encoding='utf=8') as f:
f.write('Ce\xe7i ne marche pas')
with pytest.raises(OSError):
with pytest.warns(AstropyUserWarning, match=r'non-ASCII characters '
r'are present in the FITS file header'):
fits.open(filename)
def test_no_resource_warning_raised_on_non_fits_file(self):
filename = self.temp('not-fits.fits')
with open(filename, mode='w') as f:
f.write('# header line\n')
f.write('0.1 0.2\n')
with catch_warnings(ResourceWarning) as ws:
with open(filename, mode='rb') as f:
with pytest.raises(OSError):
fits.open(f, ignore_missing_end=False)
assert not f.closed
with open(filename, mode='rb') as f:
with pytest.raises(OSError):
fits.open(f, ignore_missing_end=True)
assert not f.closed
with pytest.raises(OSError):
fits.open(filename, ignore_missing_end=False)
with pytest.raises(OSError):
fits.open(filename, ignore_missing_end=True)
assert len(ws) == 0
def test_pop_with_lazy_load(self):
filename = self.data('checksum.fits')
with fits.open(filename) as hdul:
hdu = hdul.pop()
assert len(hdul) == 1
with fits.open(filename) as hdul2:
hdu2 = hdul2.pop(0)
assert len(hdul2) == 1
with fits.open(filename) as hdul3:
assert len(hdul3) == 2
assert hdul3[0].header == hdu2.header
assert hdul3[1].header == hdu.header
def test_pop_extname(self):
with fits.open(self.data('o4sp040b0_raw.fits')) as hdul:
assert len(hdul) == 7
hdu1 = hdul[1]
hdu4 = hdul[4]
hdu_popped = hdul.pop(('SCI', 2))
assert len(hdul) == 6
assert hdu_popped is hdu4
hdu_popped = hdul.pop('SCI')
assert len(hdul) == 5
assert hdu_popped is hdu1
@pytest.mark.skipif('sys.platform.startswith("win32")')
def test_write_hdulist_to_stream(self):
data = np.array([[1, 2, 3], [4, 5, 6]])
hdu = fits.PrimaryHDU(data)
hdulist = fits.HDUList([hdu])
with open(self.temp('test.fits'), 'wb') as fout:
with subprocess.Popen(["cat"], stdin=subprocess.PIPE,
stdout=fout) as p:
hdulist.writeto(p.stdin)
| true | true |
1c2fb3bd371d853ef0b2d0d8fca7b77602f5083b | 284 | py | Python | metadata_service/entity/description.py | jacobhjkim/amundsenmetadatalibrary | 19b5c7bf0ac496a357648b1a1b28f28f4d6a9ffb | [
"Apache-2.0"
] | null | null | null | metadata_service/entity/description.py | jacobhjkim/amundsenmetadatalibrary | 19b5c7bf0ac496a357648b1a1b28f28f4d6a9ffb | [
"Apache-2.0"
] | 1 | 2020-09-24T17:05:39.000Z | 2020-09-24T17:05:39.000Z | metadata_service/entity/description.py | jacobhjkim/amundsenmetadatalibrary | 19b5c7bf0ac496a357648b1a1b28f28f4d6a9ffb | [
"Apache-2.0"
] | null | null | null | import attr
from marshmallow_annotations.ext.attrs import AttrsSchema
@attr.s(auto_attribs=True, kw_only=True)
class Description:
description: str = attr.ib()
class DescriptionSchema(AttrsSchema):
class Meta:
target = Description
register_as_scheme = True
| 20.285714 | 57 | 0.742958 | import attr
from marshmallow_annotations.ext.attrs import AttrsSchema
@attr.s(auto_attribs=True, kw_only=True)
class Description:
description: str = attr.ib()
class DescriptionSchema(AttrsSchema):
class Meta:
target = Description
register_as_scheme = True
| true | true |
1c2fb3e1ff5c2206cf4863d4a3992cc8e52cf425 | 1,640 | py | Python | setup.py | robertopreste/HmtNote | 0f2c0f684a45c0087cabc3cb15f61803fac7daf1 | [
"MIT"
] | 11 | 2019-04-11T07:06:41.000Z | 2021-03-22T09:13:40.000Z | setup.py | robertopreste/HmtNote | 0f2c0f684a45c0087cabc3cb15f61803fac7daf1 | [
"MIT"
] | 64 | 2019-03-04T11:18:25.000Z | 2022-03-31T23:03:01.000Z | setup.py | robertopreste/HmtNote | 0f2c0f684a45c0087cabc3cb15f61803fac7daf1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Created by Roberto Preste
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = ["Click==7.0", "requests==2.22.0", "numpy==1.16.4",
"pandas==0.24.2", "aiohttp==3.5.4", "aiofiles==0.4.0",
"vcfpy2==0.1.2", "scikit-allel==1.2.1"]
setup_requirements = ['pytest-runner', ]
test_requirements = ['pytest', ]
setup( # pragma: no cover
author="Roberto Preste",
author_email='robertopreste@gmail.com',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
description="Human mitochondrial variants annotation using HmtVar.",
entry_points={
'console_scripts': [
'hmtnote=hmtnote.cli:main',
],
},
install_requires=requirements,
license="MIT license",
long_description=readme + '\n\n' + history,
long_description_content_type="text/x-rst",
include_package_data=True,
keywords='hmtnote',
name='hmtnote',
packages=find_packages(include=['hmtnote']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/robertopreste/hmtnote',
version='0.7.2',
zip_safe=False,
)
| 30.943396 | 72 | 0.632927 |
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = ["Click==7.0", "requests==2.22.0", "numpy==1.16.4",
"pandas==0.24.2", "aiohttp==3.5.4", "aiofiles==0.4.0",
"vcfpy2==0.1.2", "scikit-allel==1.2.1"]
setup_requirements = ['pytest-runner', ]
test_requirements = ['pytest', ]
setup(
author="Roberto Preste",
author_email='robertopreste@gmail.com',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
description="Human mitochondrial variants annotation using HmtVar.",
entry_points={
'console_scripts': [
'hmtnote=hmtnote.cli:main',
],
},
install_requires=requirements,
license="MIT license",
long_description=readme + '\n\n' + history,
long_description_content_type="text/x-rst",
include_package_data=True,
keywords='hmtnote',
name='hmtnote',
packages=find_packages(include=['hmtnote']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/robertopreste/hmtnote',
version='0.7.2',
zip_safe=False,
)
| true | true |
1c2fb49649471ec4b964ba521bb4466ad02558f5 | 1,475 | py | Python | ponyexpress/migrations/versions/334aefbc10df_.py | TelekomCloud/pony-express | a825b518687719be5dfe95692008c2129db115cd | [
"Apache-2.0"
] | null | null | null | ponyexpress/migrations/versions/334aefbc10df_.py | TelekomCloud/pony-express | a825b518687719be5dfe95692008c2129db115cd | [
"Apache-2.0"
] | null | null | null | ponyexpress/migrations/versions/334aefbc10df_.py | TelekomCloud/pony-express | a825b518687719be5dfe95692008c2129db115cd | [
"Apache-2.0"
] | null | null | null | """empty message
Revision ID: 334aefbc10df
Revises: 36b5aaa11324
Create Date: 2014-04-01 16:37:00.017980
"""
# revision identifiers, used by Alembic.
revision = '334aefbc10df'
down_revision = '36b5aaa11324'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('repositories',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('uri', sa.String(length=255), nullable=True),
sa.Column('label', sa.String(length=255), nullable=True),
sa.Column('provider', sa.String(length=12), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('repohistory',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('repo_id', sa.Integer(), nullable=True),
sa.Column('pkgsha', sa.String(length=255), nullable=True),
sa.Column('pkgname', sa.String(length=255), nullable=True),
sa.Column('pkgversion', sa.String(length=64), nullable=True),
sa.Column('pkgsource', sa.Text(), nullable=True),
sa.Column('released', sa.DATE(), nullable=True),
sa.ForeignKeyConstraint(['repo_id'], ['repositories.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('repohistory')
op.drop_table('repositories')
### end Alembic commands ###
| 32.065217 | 65 | 0.677288 |
revision = '334aefbc10df'
down_revision = '36b5aaa11324'
from alembic import op
import sqlalchemy as sa
def upgrade():
ength=255), nullable=True),
sa.Column('uri', sa.String(length=255), nullable=True),
sa.Column('label', sa.String(length=255), nullable=True),
sa.Column('provider', sa.String(length=12), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('repohistory',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('repo_id', sa.Integer(), nullable=True),
sa.Column('pkgsha', sa.String(length=255), nullable=True),
sa.Column('pkgname', sa.String(length=255), nullable=True),
sa.Column('pkgversion', sa.String(length=64), nullable=True),
sa.Column('pkgsource', sa.Text(), nullable=True),
sa.Column('released', sa.DATE(), nullable=True),
sa.ForeignKeyConstraint(['repo_id'], ['repositories.id'], ),
sa.PrimaryKeyConstraint('id')
)
| true | true |
1c2fb4d2c2ee899687cfffb6413569efa4eb69d4 | 2,264 | py | Python | test/upbit_test.py | BbChip0103/kimchi_premium_notice | e812fda7bae7cd01c4ca82892e9466831ff96698 | [
"Apache-2.0"
] | null | null | null | test/upbit_test.py | BbChip0103/kimchi_premium_notice | e812fda7bae7cd01c4ca82892e9466831ff96698 | [
"Apache-2.0"
] | null | null | null | test/upbit_test.py | BbChip0103/kimchi_premium_notice | e812fda7bae7cd01c4ca82892e9466831ff96698 | [
"Apache-2.0"
] | null | null | null | import asyncio
import websockets
import json
import ccxt
async def upbit_ws_client(coin_list, callback):
uri = 'wss://api.upbit.com/websocket/v1'
async with websockets.connect(uri) as websocket:
subscribe_fmt = [
{'ticket': 'bbchip13'},
{'format': 'SIMPLE'}
]
subscribe_fmt += [
{
'type': 'ticker',
'codes': ['KRW-{}'.format(coin_name)],
'isOnlyRealtime': True
} for coin_name in coin_list
]
subscribe_data = json.dumps(subscribe_fmt)
await websocket.send(subscribe_data)
while True:
res = await websocket.recv()
res = json.loads(res)
print(res['cd'], res['tp'])
def get_upbit_coin_list():
upbit_exchange_id = 'upbit'
upbit_exchange_class = getattr(ccxt, upbit_exchange_id)
upbit_exchange = upbit_exchange_class({
'apiKey': 'YOUR_APP_KEY',
'secret': 'YOUR_SECRET',
})
upbit_coin_dict = {
k:v for k, v in upbit_exchange.load_markets().items()
if '/KRW' in k
}
upbit_coin_list = [
name.replace('/KRW', '') for name in list(upbit_coin_dict.keys())
]
return upbit_coin_list
def get_binance_coin_list():
binance_exchange_id = 'binance'
binance_exchange_class = getattr(ccxt, binance_exchange_id)
binance_exchange = binance_exchange_class({
'apiKey': 'YOUR_APP_KEY',
'secret': 'YOUR_SECRET',
})
binance_coin_dict = {
k:v for k, v in binance_exchange.load_markets().items()
if '/USDT' in k and v['active'] == True
}
binance_coin_list = [
name.replace('/USDT', '') for name in list(binance_coin_dict.keys())
]
return binance_coin_list
def upbit_callback_func():
pass
if __name__ == "__main__":
upbit_coin_list = get_upbit_coin_list()
binance_coin_list = get_binance_coin_list()
overlapped_coin_list = list(set(upbit_coin_list)&set(binance_coin_list))
tasks = [
asyncio.ensure_future(
upbit_ws_client(overlapped_coin_list, upbit_callback_func)
),
]
event_loop = asyncio.get_event_loop()
event_loop.run_until_complete(asyncio.wait(tasks))
| 27.277108 | 77 | 0.616166 | import asyncio
import websockets
import json
import ccxt
async def upbit_ws_client(coin_list, callback):
uri = 'wss://api.upbit.com/websocket/v1'
async with websockets.connect(uri) as websocket:
subscribe_fmt = [
{'ticket': 'bbchip13'},
{'format': 'SIMPLE'}
]
subscribe_fmt += [
{
'type': 'ticker',
'codes': ['KRW-{}'.format(coin_name)],
'isOnlyRealtime': True
} for coin_name in coin_list
]
subscribe_data = json.dumps(subscribe_fmt)
await websocket.send(subscribe_data)
while True:
res = await websocket.recv()
res = json.loads(res)
print(res['cd'], res['tp'])
def get_upbit_coin_list():
upbit_exchange_id = 'upbit'
upbit_exchange_class = getattr(ccxt, upbit_exchange_id)
upbit_exchange = upbit_exchange_class({
'apiKey': 'YOUR_APP_KEY',
'secret': 'YOUR_SECRET',
})
upbit_coin_dict = {
k:v for k, v in upbit_exchange.load_markets().items()
if '/KRW' in k
}
upbit_coin_list = [
name.replace('/KRW', '') for name in list(upbit_coin_dict.keys())
]
return upbit_coin_list
def get_binance_coin_list():
binance_exchange_id = 'binance'
binance_exchange_class = getattr(ccxt, binance_exchange_id)
binance_exchange = binance_exchange_class({
'apiKey': 'YOUR_APP_KEY',
'secret': 'YOUR_SECRET',
})
binance_coin_dict = {
k:v for k, v in binance_exchange.load_markets().items()
if '/USDT' in k and v['active'] == True
}
binance_coin_list = [
name.replace('/USDT', '') for name in list(binance_coin_dict.keys())
]
return binance_coin_list
def upbit_callback_func():
pass
if __name__ == "__main__":
upbit_coin_list = get_upbit_coin_list()
binance_coin_list = get_binance_coin_list()
overlapped_coin_list = list(set(upbit_coin_list)&set(binance_coin_list))
tasks = [
asyncio.ensure_future(
upbit_ws_client(overlapped_coin_list, upbit_callback_func)
),
]
event_loop = asyncio.get_event_loop()
event_loop.run_until_complete(asyncio.wait(tasks))
| true | true |
1c2fb4f74a9d9b8bbf553cbbcff20b7de2e4e7ec | 30,576 | py | Python | src/utils/imagenet_labels.py | u-shiori/DLtemplate2021 | 10d266b450a6505255c44a570c04cbc0d99a2568 | [
"MIT"
] | null | null | null | src/utils/imagenet_labels.py | u-shiori/DLtemplate2021 | 10d266b450a6505255c44a570c04cbc0d99a2568 | [
"MIT"
] | null | null | null | src/utils/imagenet_labels.py | u-shiori/DLtemplate2021 | 10d266b450a6505255c44a570c04cbc0d99a2568 | [
"MIT"
] | null | null | null | idx2label = {0: 'tench, Tinca tinca',
1: 'goldfish, Carassius auratus',
2: 'great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias',
3: 'tiger shark, Galeocerdo cuvieri',
4: 'hammerhead, hammerhead shark',
5: 'electric ray, crampfish, numbfish, torpedo',
6: 'stingray',
7: 'cock',
8: 'hen',
9: 'ostrich, Struthio camelus',
10: 'brambling, Fringilla montifringilla',
11: 'goldfinch, Carduelis carduelis',
12: 'house finch, linnet, Carpodacus mexicanus',
13: 'junco, snowbird',
14: 'indigo bunting, indigo finch, indigo bird, Passerina cyanea',
15: 'robin, American robin, Turdus migratorius',
16: 'bulbul',
17: 'jay',
18: 'magpie',
19: 'chickadee',
20: 'water ouzel, dipper',
21: 'kite',
22: 'bald eagle, American eagle, Haliaeetus leucocephalus',
23: 'vulture',
24: 'great grey owl, great gray owl, Strix nebulosa',
25: 'European fire salamander, Salamandra salamandra',
26: 'common newt, Triturus vulgaris',
27: 'eft',
28: 'spotted salamander, Ambystoma maculatum',
29: 'axolotl, mud puppy, Ambystoma mexicanum',
30: 'bullfrog, Rana catesbeiana',
31: 'tree frog, tree-frog',
32: 'tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui',
33: 'loggerhead, loggerhead turtle, Caretta caretta',
34: 'leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea',
35: 'mud turtle',
36: 'terrapin',
37: 'box turtle, box tortoise',
38: 'banded gecko',
39: 'common iguana, iguana, Iguana iguana',
40: 'American chameleon, anole, Anolis carolinensis',
41: 'whiptail, whiptail lizard',
42: 'agama',
43: 'frilled lizard, Chlamydosaurus kingi',
44: 'alligator lizard',
45: 'Gila monster, Heloderma suspectum',
46: 'green lizard, Lacerta viridis',
47: 'African chameleon, Chamaeleo chamaeleon',
48: 'Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis',
49: 'African crocodile, Nile crocodile, Crocodylus niloticus',
50: 'American alligator, Alligator mississipiensis',
51: 'triceratops',
52: 'thunder snake, worm snake, Carphophis amoenus',
53: 'ringneck snake, ring-necked snake, ring snake',
54: 'hognose snake, puff adder, sand viper',
55: 'green snake, grass snake',
56: 'king snake, kingsnake',
57: 'garter snake, grass snake',
58: 'water snake',
59: 'vine snake',
60: 'night snake, Hypsiglena torquata',
61: 'boa constrictor, Constrictor constrictor',
62: 'rock python, rock snake, Python sebae',
63: 'Indian cobra, Naja naja',
64: 'green mamba',
65: 'sea snake',
66: 'horned viper, cerastes, sand viper, horned asp, Cerastes cornutus',
67: 'diamondback, diamondback rattlesnake, Crotalus adamanteus',
68: 'sidewinder, horned rattlesnake, Crotalus cerastes',
69: 'trilobite',
70: 'harvestman, daddy longlegs, Phalangium opilio',
71: 'scorpion',
72: 'black and gold garden spider, Argiope aurantia',
73: 'barn spider, Araneus cavaticus',
74: 'garden spider, Aranea diademata',
75: 'black widow, Latrodectus mactans',
76: 'tarantula',
77: 'wolf spider, hunting spider',
78: 'tick',
79: 'centipede',
80: 'black grouse',
81: 'ptarmigan',
82: 'ruffed grouse, partridge, Bonasa umbellus',
83: 'prairie chicken, prairie grouse, prairie fowl',
84: 'peacock',
85: 'quail',
86: 'partridge',
87: 'African grey, African gray, Psittacus erithacus',
88: 'macaw',
89: 'sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita',
90: 'lorikeet',
91: 'coucal',
92: 'bee eater',
93: 'hornbill',
94: 'hummingbird',
95: 'jacamar',
96: 'toucan',
97: 'drake',
98: 'red-breasted merganser, Mergus serrator',
99: 'goose',
100: 'black swan, Cygnus atratus',
101: 'tusker',
102: 'echidna, spiny anteater, anteater',
103: 'platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus',
104: 'wallaby, brush kangaroo',
105: 'koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus',
106: 'wombat',
107: 'jellyfish',
108: 'sea anemone, anemone',
109: 'brain coral',
110: 'flatworm, platyhelminth',
111: 'nematode, nematode worm, roundworm',
112: 'conch',
113: 'snail',
114: 'slug',
115: 'sea slug, nudibranch',
116: 'chiton, coat-of-mail shell, sea cradle, polyplacophore',
117: 'chambered nautilus, pearly nautilus, nautilus',
118: 'Dungeness crab, Cancer magister',
119: 'rock crab, Cancer irroratus',
120: 'fiddler crab',
121: 'king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica',
122: 'American lobster, Northern lobster, Maine lobster, Homarus americanus',
123: 'spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish',
124: 'crayfish, crawfish, crawdad, crawdaddy',
125: 'hermit crab',
126: 'isopod',
127: 'white stork, Ciconia ciconia',
128: 'black stork, Ciconia nigra',
129: 'spoonbill',
130: 'flamingo',
131: 'little blue heron, Egretta caerulea',
132: 'American egret, great white heron, Egretta albus',
133: 'bittern',
134: 'crane',
135: 'limpkin, Aramus pictus',
136: 'European gallinule, Porphyrio porphyrio',
137: 'American coot, marsh hen, mud hen, water hen, Fulica americana',
138: 'bustard',
139: 'ruddy turnstone, Arenaria interpres',
140: 'red-backed sandpiper, dunlin, Erolia alpina',
141: 'redshank, Tringa totanus',
142: 'dowitcher',
143: 'oystercatcher, oyster catcher',
144: 'pelican',
145: 'king penguin, Aptenodytes patagonica',
146: 'albatross, mollymawk',
147: 'grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus',
148: 'killer whale, killer, orca, grampus, sea wolf, Orcinus orca',
149: 'dugong, Dugong dugon',
150: 'sea lion',
151: 'Chihuahua',
152: 'Japanese spaniel',
153: 'Maltese dog, Maltese terrier, Maltese',
154: 'Pekinese, Pekingese, Peke',
155: 'Shih-Tzu',
156: 'Blenheim spaniel',
157: 'papillon',
158: 'toy terrier',
159: 'Rhodesian ridgeback',
160: 'Afghan hound, Afghan',
161: 'basset, basset hound',
162: 'beagle',
163: 'bloodhound, sleuthhound',
164: 'bluetick',
165: 'black-and-tan coonhound',
166: 'Walker hound, Walker foxhound',
167: 'English foxhound',
168: 'redbone',
169: 'borzoi, Russian wolfhound',
170: 'Irish wolfhound',
171: 'Italian greyhound',
172: 'whippet',
173: 'Ibizan hound, Ibizan Podenco',
174: 'Norwegian elkhound, elkhound',
175: 'otterhound, otter hound',
176: 'Saluki, gazelle hound',
177: 'Scottish deerhound, deerhound',
178: 'Weimaraner',
179: 'Staffordshire bullterrier, Staffordshire bull terrier',
180: 'American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier',
181: 'Bedlington terrier',
182: 'Border terrier',
183: 'Kerry blue terrier',
184: 'Irish terrier',
185: 'Norfolk terrier',
186: 'Norwich terrier',
187: 'Yorkshire terrier',
188: 'wire-haired fox terrier',
189: 'Lakeland terrier',
190: 'Sealyham terrier, Sealyham',
191: 'Airedale, Airedale terrier',
192: 'cairn, cairn terrier',
193: 'Australian terrier',
194: 'Dandie Dinmont, Dandie Dinmont terrier',
195: 'Boston bull, Boston terrier',
196: 'miniature schnauzer',
197: 'giant schnauzer',
198: 'standard schnauzer',
199: 'Scotch terrier, Scottish terrier, Scottie',
200: 'Tibetan terrier, chrysanthemum dog',
201: 'silky terrier, Sydney silky',
202: 'soft-coated wheaten terrier',
203: 'West Highland white terrier',
204: 'Lhasa, Lhasa apso',
205: 'flat-coated retriever',
206: 'curly-coated retriever',
207: 'golden retriever',
208: 'Labrador retriever',
209: 'Chesapeake Bay retriever',
210: 'German short-haired pointer',
211: 'vizsla, Hungarian pointer',
212: 'English setter',
213: 'Irish setter, red setter',
214: 'Gordon setter',
215: 'Brittany spaniel',
216: 'clumber, clumber spaniel',
217: 'English springer, English springer spaniel',
218: 'Welsh springer spaniel',
219: 'cocker spaniel, English cocker spaniel, cocker',
220: 'Sussex spaniel',
221: 'Irish water spaniel',
222: 'kuvasz',
223: 'schipperke',
224: 'groenendael',
225: 'malinois',
226: 'briard',
227: 'kelpie',
228: 'komondor',
229: 'Old English sheepdog, bobtail',
230: 'Shetland sheepdog, Shetland sheep dog, Shetland',
231: 'collie',
232: 'Border collie',
233: 'Bouvier des Flandres, Bouviers des Flandres',
234: 'Rottweiler',
235: 'German shepherd, German shepherd dog, German police dog, alsatian',
236: 'Doberman, Doberman pinscher',
237: 'miniature pinscher',
238: 'Greater Swiss Mountain dog',
239: 'Bernese mountain dog',
240: 'Appenzeller',
241: 'EntleBucher',
242: 'boxer',
243: 'bull mastiff',
244: 'Tibetan mastiff',
245: 'French bulldog',
246: 'Great Dane',
247: 'Saint Bernard, St Bernard',
248: 'Eskimo dog, husky',
249: 'malamute, malemute, Alaskan malamute',
250: 'Siberian husky',
251: 'dalmatian, coach dog, carriage dog',
252: 'affenpinscher, monkey pinscher, monkey dog',
253: 'basenji',
254: 'pug, pug-dog',
255: 'Leonberg',
256: 'Newfoundland, Newfoundland dog',
257: 'Great Pyrenees',
258: 'Samoyed, Samoyede',
259: 'Pomeranian',
260: 'chow, chow chow',
261: 'keeshond',
262: 'Brabancon griffon',
263: 'Pembroke, Pembroke Welsh corgi',
264: 'Cardigan, Cardigan Welsh corgi',
265: 'toy poodle',
266: 'miniature poodle',
267: 'standard poodle',
268: 'Mexican hairless',
269: 'timber wolf, grey wolf, gray wolf, Canis lupus',
270: 'white wolf, Arctic wolf, Canis lupus tundrarum',
271: 'red wolf, maned wolf, Canis rufus, Canis niger',
272: 'coyote, prairie wolf, brush wolf, Canis latrans',
273: 'dingo, warrigal, warragal, Canis dingo',
274: 'dhole, Cuon alpinus',
275: 'African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus',
276: 'hyena, hyaena',
277: 'red fox, Vulpes vulpes',
278: 'kit fox, Vulpes macrotis',
279: 'Arctic fox, white fox, Alopex lagopus',
280: 'grey fox, gray fox, Urocyon cinereoargenteus',
281: 'tabby, tabby cat',
282: 'tiger cat',
283: 'Persian cat',
284: 'Siamese cat, Siamese',
285: 'Egyptian cat',
286: 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor',
287: 'lynx, catamount',
288: 'leopard, Panthera pardus',
289: 'snow leopard, ounce, Panthera uncia',
290: 'jaguar, panther, Panthera onca, Felis onca',
291: 'lion, king of beasts, Panthera leo',
292: 'tiger, Panthera tigris',
293: 'cheetah, chetah, Acinonyx jubatus',
294: 'brown bear, bruin, Ursus arctos',
295: 'American black bear, black bear, Ursus americanus, Euarctos americanus',
296: 'ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus',
297: 'sloth bear, Melursus ursinus, Ursus ursinus',
298: 'mongoose',
299: 'meerkat, mierkat',
300: 'tiger beetle',
301: 'ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle',
302: 'ground beetle, carabid beetle',
303: 'long-horned beetle, longicorn, longicorn beetle',
304: 'leaf beetle, chrysomelid',
305: 'dung beetle',
306: 'rhinoceros beetle',
307: 'weevil',
308: 'fly',
309: 'bee',
310: 'ant, emmet, pismire',
311: 'grasshopper, hopper',
312: 'cricket',
313: 'walking stick, walkingstick, stick insect',
314: 'cockroach, roach',
315: 'mantis, mantid',
316: 'cicada, cicala',
317: 'leafhopper',
318: 'lacewing, lacewing fly',
319: "dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk",
320: 'damselfly',
321: 'admiral',
322: 'ringlet, ringlet butterfly',
323: 'monarch, monarch butterfly, milkweed butterfly, Danaus plexippus',
324: 'cabbage butterfly',
325: 'sulphur butterfly, sulfur butterfly',
326: 'lycaenid, lycaenid butterfly',
327: 'starfish, sea star',
328: 'sea urchin',
329: 'sea cucumber, holothurian',
330: 'wood rabbit, cottontail, cottontail rabbit',
331: 'hare',
332: 'Angora, Angora rabbit',
333: 'hamster',
334: 'porcupine, hedgehog',
335: 'fox squirrel, eastern fox squirrel, Sciurus niger',
336: 'marmot',
337: 'beaver',
338: 'guinea pig, Cavia cobaya',
339: 'sorrel',
340: 'zebra',
341: 'hog, pig, grunter, squealer, Sus scrofa',
342: 'wild boar, boar, Sus scrofa',
343: 'warthog',
344: 'hippopotamus, hippo, river horse, Hippopotamus amphibius',
345: 'ox',
346: 'water buffalo, water ox, Asiatic buffalo, Bubalus bubalis',
347: 'bison',
348: 'ram, tup',
349: 'bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis',
350: 'ibex, Capra ibex',
351: 'hartebeest',
352: 'impala, Aepyceros melampus',
353: 'gazelle',
354: 'Arabian camel, dromedary, Camelus dromedarius',
355: 'llama',
356: 'weasel',
357: 'mink',
358: 'polecat, fitch, foulmart, foumart, Mustela putorius',
359: 'black-footed ferret, ferret, Mustela nigripes',
360: 'otter',
361: 'skunk, polecat, wood pussy',
362: 'badger',
363: 'armadillo',
364: 'three-toed sloth, ai, Bradypus tridactylus',
365: 'orangutan, orang, orangutang, Pongo pygmaeus',
366: 'gorilla, Gorilla gorilla',
367: 'chimpanzee, chimp, Pan troglodytes',
368: 'gibbon, Hylobates lar',
369: 'siamang, Hylobates syndactylus, Symphalangus syndactylus',
370: 'guenon, guenon monkey',
371: 'patas, hussar monkey, Erythrocebus patas',
372: 'baboon',
373: 'macaque',
374: 'langur',
375: 'colobus, colobus monkey',
376: 'proboscis monkey, Nasalis larvatus',
377: 'marmoset',
378: 'capuchin, ringtail, Cebus capucinus',
379: 'howler monkey, howler',
380: 'titi, titi monkey',
381: 'spider monkey, Ateles geoffroyi',
382: 'squirrel monkey, Saimiri sciureus',
383: 'Madagascar cat, ring-tailed lemur, Lemur catta',
384: 'indri, indris, Indri indri, Indri brevicaudatus',
385: 'Indian elephant, Elephas maximus',
386: 'African elephant, Loxodonta africana',
387: 'lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens',
388: 'giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca',
389: 'barracouta, snoek',
390: 'eel',
391: 'coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch',
392: 'rock beauty, Holocanthus tricolor',
393: 'anemone fish',
394: 'sturgeon',
395: 'gar, garfish, garpike, billfish, Lepisosteus osseus',
396: 'lionfish',
397: 'puffer, pufferfish, blowfish, globefish',
398: 'abacus',
399: 'abaya',
400: "academic gown, academic robe, judge's robe",
401: 'accordion, piano accordion, squeeze box',
402: 'acoustic guitar',
403: 'aircraft carrier, carrier, flattop, attack aircraft carrier',
404: 'airliner',
405: 'airship, dirigible',
406: 'altar',
407: 'ambulance',
408: 'amphibian, amphibious vehicle',
409: 'analog clock',
410: 'apiary, bee house',
411: 'apron',
412: 'ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin',
413: 'assault rifle, assault gun',
414: 'backpack, back pack, knapsack, packsack, rucksack, haversack',
415: 'bakery, bakeshop, bakehouse',
416: 'balance beam, beam',
417: 'balloon',
418: 'ballpoint, ballpoint pen, ballpen, Biro',
419: 'Band Aid',
420: 'banjo',
421: 'bannister, banister, balustrade, balusters, handrail',
422: 'barbell',
423: 'barber chair',
424: 'barbershop',
425: 'barn',
426: 'barometer',
427: 'barrel, cask',
428: 'barrow, garden cart, lawn cart, wheelbarrow',
429: 'baseball',
430: 'basketball',
431: 'bassinet',
432: 'bassoon',
433: 'bathing cap, swimming cap',
434: 'bath towel',
435: 'bathtub, bathing tub, bath, tub',
436: 'beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon',
437: 'beacon, lighthouse, beacon light, pharos',
438: 'beaker',
439: 'bearskin, busby, shako',
440: 'beer bottle',
441: 'beer glass',
442: 'bell cote, bell cot',
443: 'bib',
444: 'bicycle-built-for-two, tandem bicycle, tandem',
445: 'bikini, two-piece',
446: 'binder, ring-binder',
447: 'binoculars, field glasses, opera glasses',
448: 'birdhouse',
449: 'boathouse',
450: 'bobsled, bobsleigh, bob',
451: 'bolo tie, bolo, bola tie, bola',
452: 'bonnet, poke bonnet',
453: 'bookcase',
454: 'bookshop, bookstore, bookstall',
455: 'bottlecap',
456: 'bow',
457: 'bow tie, bow-tie, bowtie',
458: 'brass, memorial tablet, plaque',
459: 'brassiere, bra, bandeau',
460: 'breakwater, groin, groyne, mole, bulwark, seawall, jetty',
461: 'breastplate, aegis, egis',
462: 'broom',
463: 'bucket, pail',
464: 'buckle',
465: 'bulletproof vest',
466: 'bullet train, bullet',
467: 'butcher shop, meat market',
468: 'cab, hack, taxi, taxicab',
469: 'caldron, cauldron',
470: 'candle, taper, wax light',
471: 'cannon',
472: 'canoe',
473: 'can opener, tin opener',
474: 'cardigan',
475: 'car mirror',
476: 'carousel, carrousel, merry-go-round, roundabout, whirligig',
477: "carpenter's kit, tool kit",
478: 'carton',
479: 'car wheel',
480: 'cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM',
481: 'cassette',
482: 'cassette player',
483: 'castle',
484: 'catamaran',
485: 'CD player',
486: 'cello, violoncello',
487: 'cellular telephone, cellular phone, cellphone, cell, mobile phone',
488: 'chain',
489: 'chainlink fence',
490: 'chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour',
491: 'chain saw, chainsaw',
492: 'chest',
493: 'chiffonier, commode',
494: 'chime, bell, gong',
495: 'china cabinet, china closet',
496: 'Christmas stocking',
497: 'church, church building',
498: 'cinema, movie theater, movie theatre, movie house, picture palace',
499: 'cleaver, meat cleaver, chopper',
500: 'cliff dwelling',
501: 'cloak',
502: 'clog, geta, patten, sabot',
503: 'cocktail shaker',
504: 'coffee mug',
505: 'coffeepot',
506: 'coil, spiral, volute, whorl, helix',
507: 'combination lock',
508: 'computer keyboard, keypad',
509: 'confectionery, confectionary, candy store',
510: 'container ship, containership, container vessel',
511: 'convertible',
512: 'corkscrew, bottle screw',
513: 'cornet, horn, trumpet, trump',
514: 'cowboy boot',
515: 'cowboy hat, ten-gallon hat',
516: 'cradle',
517: 'crane',
518: 'crash helmet',
519: 'crate',
520: 'crib, cot',
521: 'Crock Pot',
522: 'croquet ball',
523: 'crutch',
524: 'cuirass',
525: 'dam, dike, dyke',
526: 'desk',
527: 'desktop computer',
528: 'dial telephone, dial phone',
529: 'diaper, nappy, napkin',
530: 'digital clock',
531: 'digital watch',
532: 'dining table, board',
533: 'dishrag, dishcloth',
534: 'dishwasher, dish washer, dishwashing machine',
535: 'disk brake, disc brake',
536: 'dock, dockage, docking facility',
537: 'dogsled, dog sled, dog sleigh',
538: 'dome',
539: 'doormat, welcome mat',
540: 'drilling platform, offshore rig',
541: 'drum, membranophone, tympan',
542: 'drumstick',
543: 'dumbbell',
544: 'Dutch oven',
545: 'electric fan, blower',
546: 'electric guitar',
547: 'electric locomotive',
548: 'entertainment center',
549: 'envelope',
550: 'espresso maker',
551: 'face powder',
552: 'feather boa, boa',
553: 'file, file cabinet, filing cabinet',
554: 'fireboat',
555: 'fire engine, fire truck',
556: 'fire screen, fireguard',
557: 'flagpole, flagstaff',
558: 'flute, transverse flute',
559: 'folding chair',
560: 'football helmet',
561: 'forklift',
562: 'fountain',
563: 'fountain pen',
564: 'four-poster',
565: 'freight car',
566: 'French horn, horn',
567: 'frying pan, frypan, skillet',
568: 'fur coat',
569: 'garbage truck, dustcart',
570: 'gasmask, respirator, gas helmet',
571: 'gas pump, gasoline pump, petrol pump, island dispenser',
572: 'goblet',
573: 'go-kart',
574: 'golf ball',
575: 'golfcart, golf cart',
576: 'gondola',
577: 'gong, tam-tam',
578: 'gown',
579: 'grand piano, grand',
580: 'greenhouse, nursery, glasshouse',
581: 'grille, radiator grille',
582: 'grocery store, grocery, food market, market',
583: 'guillotine',
584: 'hair slide',
585: 'hair spray',
586: 'half track',
587: 'hammer',
588: 'hamper',
589: 'hand blower, blow dryer, blow drier, hair dryer, hair drier',
590: 'hand-held computer, hand-held microcomputer',
591: 'handkerchief, hankie, hanky, hankey',
592: 'hard disc, hard disk, fixed disk',
593: 'harmonica, mouth organ, harp, mouth harp',
594: 'harp',
595: 'harvester, reaper',
596: 'hatchet',
597: 'holster',
598: 'home theater, home theatre',
599: 'honeycomb',
600: 'hook, claw',
601: 'hoopskirt, crinoline',
602: 'horizontal bar, high bar',
603: 'horse cart, horse-cart',
604: 'hourglass',
605: 'iPod',
606: 'iron, smoothing iron',
607: "jack-o'-lantern",
608: 'jean, blue jean, denim',
609: 'jeep, landrover',
610: 'jersey, T-shirt, tee shirt',
611: 'jigsaw puzzle',
612: 'jinrikisha, ricksha, rickshaw',
613: 'joystick',
614: 'kimono',
615: 'knee pad',
616: 'knot',
617: 'lab coat, laboratory coat',
618: 'ladle',
619: 'lampshade, lamp shade',
620: 'laptop, laptop computer',
621: 'lawn mower, mower',
622: 'lens cap, lens cover',
623: 'letter opener, paper knife, paperknife',
624: 'library',
625: 'lifeboat',
626: 'lighter, light, igniter, ignitor',
627: 'limousine, limo',
628: 'liner, ocean liner',
629: 'lipstick, lip rouge',
630: 'Loafer',
631: 'lotion',
632: 'loudspeaker, speaker, speaker unit, loudspeaker system, speaker system',
633: "loupe, jeweler's loupe",
634: 'lumbermill, sawmill',
635: 'magnetic compass',
636: 'mailbag, postbag',
637: 'mailbox, letter box',
638: 'maillot',
639: 'maillot, tank suit',
640: 'manhole cover',
641: 'maraca',
642: 'marimba, xylophone',
643: 'mask',
644: 'matchstick',
645: 'maypole',
646: 'maze, labyrinth',
647: 'measuring cup',
648: 'medicine chest, medicine cabinet',
649: 'megalith, megalithic structure',
650: 'microphone, mike',
651: 'microwave, microwave oven',
652: 'military uniform',
653: 'milk can',
654: 'minibus',
655: 'miniskirt, mini',
656: 'minivan',
657: 'missile',
658: 'mitten',
659: 'mixing bowl',
660: 'mobile home, manufactured home',
661: 'Model T',
662: 'modem',
663: 'monastery',
664: 'monitor',
665: 'moped',
666: 'mortar',
667: 'mortarboard',
668: 'mosque',
669: 'mosquito net',
670: 'motor scooter, scooter',
671: 'mountain bike, all-terrain bike, off-roader',
672: 'mountain tent',
673: 'mouse, computer mouse',
674: 'mousetrap',
675: 'moving van',
676: 'muzzle',
677: 'nail',
678: 'neck brace',
679: 'necklace',
680: 'nipple',
681: 'notebook, notebook computer',
682: 'obelisk',
683: 'oboe, hautboy, hautbois',
684: 'ocarina, sweet potato',
685: 'odometer, hodometer, mileometer, milometer',
686: 'oil filter',
687: 'organ, pipe organ',
688: 'oscilloscope, scope, cathode-ray oscilloscope, CRO',
689: 'overskirt',
690: 'oxcart',
691: 'oxygen mask',
692: 'packet',
693: 'paddle, boat paddle',
694: 'paddlewheel, paddle wheel',
695: 'padlock',
696: 'paintbrush',
697: "pajama, pyjama, pj's, jammies",
698: 'palace',
699: 'panpipe, pandean pipe, syrinx',
700: 'paper towel',
701: 'parachute, chute',
702: 'parallel bars, bars',
703: 'park bench',
704: 'parking meter',
705: 'passenger car, coach, carriage',
706: 'patio, terrace',
707: 'pay-phone, pay-station',
708: 'pedestal, plinth, footstall',
709: 'pencil box, pencil case',
710: 'pencil sharpener',
711: 'perfume, essence',
712: 'Petri dish',
713: 'photocopier',
714: 'pick, plectrum, plectron',
715: 'pickelhaube',
716: 'picket fence, paling',
717: 'pickup, pickup truck',
718: 'pier',
719: 'piggy bank, penny bank',
720: 'pill bottle',
721: 'pillow',
722: 'ping-pong ball',
723: 'pinwheel',
724: 'pirate, pirate ship',
725: 'pitcher, ewer',
726: "plane, carpenter's plane, woodworking plane",
727: 'planetarium',
728: 'plastic bag',
729: 'plate rack',
730: 'plow, plough',
731: "plunger, plumber's helper",
732: 'Polaroid camera, Polaroid Land camera',
733: 'pole',
734: 'police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria',
735: 'poncho',
736: 'pool table, billiard table, snooker table',
737: 'pop bottle, soda bottle',
738: 'pot, flowerpot',
739: "potter's wheel",
740: 'power drill',
741: 'prayer rug, prayer mat',
742: 'printer',
743: 'prison, prison house',
744: 'projectile, missile',
745: 'projector',
746: 'puck, hockey puck',
747: 'punching bag, punch bag, punching ball, punchball',
748: 'purse',
749: 'quill, quill pen',
750: 'quilt, comforter, comfort, puff',
751: 'racer, race car, racing car',
752: 'racket, racquet',
753: 'radiator',
754: 'radio, wireless',
755: 'radio telescope, radio reflector',
756: 'rain barrel',
757: 'recreational vehicle, RV, R.V.',
758: 'reel',
759: 'reflex camera',
760: 'refrigerator, icebox',
761: 'remote control, remote',
762: 'restaurant, eating house, eating place, eatery',
763: 'revolver, six-gun, six-shooter',
764: 'rifle',
765: 'rocking chair, rocker',
766: 'rotisserie',
767: 'rubber eraser, rubber, pencil eraser',
768: 'rugby ball',
769: 'rule, ruler',
770: 'running shoe',
771: 'safe',
772: 'safety pin',
773: 'saltshaker, salt shaker',
774: 'sandal',
775: 'sarong',
776: 'sax, saxophone',
777: 'scabbard',
778: 'scale, weighing machine',
779: 'school bus',
780: 'schooner',
781: 'scoreboard',
782: 'screen, CRT screen',
783: 'screw',
784: 'screwdriver',
785: 'seat belt, seatbelt',
786: 'sewing machine',
787: 'shield, buckler',
788: 'shoe shop, shoe-shop, shoe store',
789: 'shoji',
790: 'shopping basket',
791: 'shopping cart',
792: 'shovel',
793: 'shower cap',
794: 'shower curtain',
795: 'ski',
796: 'ski mask',
797: 'sleeping bag',
798: 'slide rule, slipstick',
799: 'sliding door',
800: 'slot, one-armed bandit',
801: 'snorkel',
802: 'snowmobile',
803: 'snowplow, snowplough',
804: 'soap dispenser',
805: 'soccer ball',
806: 'sock',
807: 'solar dish, solar collector, solar furnace',
808: 'sombrero',
809: 'soup bowl',
810: 'space bar',
811: 'space heater',
812: 'space shuttle',
813: 'spatula',
814: 'speedboat',
815: "spider web, spider's web",
816: 'spindle',
817: 'sports car, sport car',
818: 'spotlight, spot',
819: 'stage',
820: 'steam locomotive',
821: 'steel arch bridge',
822: 'steel drum',
823: 'stethoscope',
824: 'stole',
825: 'stone wall',
826: 'stopwatch, stop watch',
827: 'stove',
828: 'strainer',
829: 'streetcar, tram, tramcar, trolley, trolley car',
830: 'stretcher',
831: 'studio couch, day bed',
832: 'stupa, tope',
833: 'submarine, pigboat, sub, U-boat',
834: 'suit, suit of clothes',
835: 'sundial',
836: 'sunglass',
837: 'sunglasses, dark glasses, shades',
838: 'sunscreen, sunblock, sun blocker',
839: 'suspension bridge',
840: 'swab, swob, mop',
841: 'sweatshirt',
842: 'swimming trunks, bathing trunks',
843: 'swing',
844: 'switch, electric switch, electrical switch',
845: 'syringe',
846: 'table lamp',
847: 'tank, army tank, armored combat vehicle, armoured combat vehicle',
848: 'tape player',
849: 'teapot',
850: 'teddy, teddy bear',
851: 'television, television system',
852: 'tennis ball',
853: 'thatch, thatched roof',
854: 'theater curtain, theatre curtain',
855: 'thimble',
856: 'thresher, thrasher, threshing machine',
857: 'throne',
858: 'tile roof',
859: 'toaster',
860: 'tobacco shop, tobacconist shop, tobacconist',
861: 'toilet seat',
862: 'torch',
863: 'totem pole',
864: 'tow truck, tow car, wrecker',
865: 'toyshop',
866: 'tractor',
867: 'trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi',
868: 'tray',
869: 'trench coat',
870: 'tricycle, trike, velocipede',
871: 'trimaran',
872: 'tripod',
873: 'triumphal arch',
874: 'trolleybus, trolley coach, trackless trolley',
875: 'trombone',
876: 'tub, vat',
877: 'turnstile',
878: 'typewriter keyboard',
879: 'umbrella',
880: 'unicycle, monocycle',
881: 'upright, upright piano',
882: 'vacuum, vacuum cleaner',
883: 'vase',
884: 'vault',
885: 'velvet',
886: 'vending machine',
887: 'vestment',
888: 'viaduct',
889: 'violin, fiddle',
890: 'volleyball',
891: 'waffle iron',
892: 'wall clock',
893: 'wallet, billfold, notecase, pocketbook',
894: 'wardrobe, closet, press',
895: 'warplane, military plane',
896: 'washbasin, handbasin, washbowl, lavabo, wash-hand basin',
897: 'washer, automatic washer, washing machine',
898: 'water bottle',
899: 'water jug',
900: 'water tower',
901: 'whiskey jug',
902: 'whistle',
903: 'wig',
904: 'window screen',
905: 'window shade',
906: 'Windsor tie',
907: 'wine bottle',
908: 'wing',
909: 'wok',
910: 'wooden spoon',
911: 'wool, woolen, woollen',
912: 'worm fence, snake fence, snake-rail fence, Virginia fence',
913: 'wreck',
914: 'yawl',
915: 'yurt',
916: 'web site, website, internet site, site',
917: 'comic book',
918: 'crossword puzzle, crossword',
919: 'street sign',
920: 'traffic light, traffic signal, stoplight',
921: 'book jacket, dust cover, dust jacket, dust wrapper',
922: 'menu',
923: 'plate',
924: 'guacamole',
925: 'consomme',
926: 'hot pot, hotpot',
927: 'trifle',
928: 'ice cream, icecream',
929: 'ice lolly, lolly, lollipop, popsicle',
930: 'French loaf',
931: 'bagel, beigel',
932: 'pretzel',
933: 'cheeseburger',
934: 'hotdog, hot dog, red hot',
935: 'mashed potato',
936: 'head cabbage',
937: 'broccoli',
938: 'cauliflower',
939: 'zucchini, courgette',
940: 'spaghetti squash',
941: 'acorn squash',
942: 'butternut squash',
943: 'cucumber, cuke',
944: 'artichoke, globe artichoke',
945: 'bell pepper',
946: 'cardoon',
947: 'mushroom',
948: 'Granny Smith',
949: 'strawberry',
950: 'orange',
951: 'lemon',
952: 'fig',
953: 'pineapple, ananas',
954: 'banana',
955: 'jackfruit, jak, jack',
956: 'custard apple',
957: 'pomegranate',
958: 'hay',
959: 'carbonara',
960: 'chocolate sauce, chocolate syrup',
961: 'dough',
962: 'meat loaf, meatloaf',
963: 'pizza, pizza pie',
964: 'potpie',
965: 'burrito',
966: 'red wine',
967: 'espresso',
968: 'cup',
969: 'eggnog',
970: 'alp',
971: 'bubble',
972: 'cliff, drop, drop-off',
973: 'coral reef',
974: 'geyser',
975: 'lakeside, lakeshore',
976: 'promontory, headland, head, foreland',
977: 'sandbar, sand bar',
978: 'seashore, coast, seacoast, sea-coast',
979: 'valley, vale',
980: 'volcano',
981: 'ballplayer, baseball player',
982: 'groom, bridegroom',
983: 'scuba diver',
984: 'rapeseed',
985: 'daisy',
986: "yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum",
987: 'corn',
988: 'acorn',
989: 'hip, rose hip, rosehip',
990: 'buckeye, horse chestnut, conker',
991: 'coral fungus',
992: 'agaric',
993: 'gyromitra',
994: 'stinkhorn, carrion fungus',
995: 'earthstar',
996: 'hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa',
997: 'bolete',
998: 'ear, spike, capitulum',
999: 'toilet tissue, toilet paper, bathroom tissue'} | 30.576 | 130 | 0.680534 | idx2label = {0: 'tench, Tinca tinca',
1: 'goldfish, Carassius auratus',
2: 'great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias',
3: 'tiger shark, Galeocerdo cuvieri',
4: 'hammerhead, hammerhead shark',
5: 'electric ray, crampfish, numbfish, torpedo',
6: 'stingray',
7: 'cock',
8: 'hen',
9: 'ostrich, Struthio camelus',
10: 'brambling, Fringilla montifringilla',
11: 'goldfinch, Carduelis carduelis',
12: 'house finch, linnet, Carpodacus mexicanus',
13: 'junco, snowbird',
14: 'indigo bunting, indigo finch, indigo bird, Passerina cyanea',
15: 'robin, American robin, Turdus migratorius',
16: 'bulbul',
17: 'jay',
18: 'magpie',
19: 'chickadee',
20: 'water ouzel, dipper',
21: 'kite',
22: 'bald eagle, American eagle, Haliaeetus leucocephalus',
23: 'vulture',
24: 'great grey owl, great gray owl, Strix nebulosa',
25: 'European fire salamander, Salamandra salamandra',
26: 'common newt, Triturus vulgaris',
27: 'eft',
28: 'spotted salamander, Ambystoma maculatum',
29: 'axolotl, mud puppy, Ambystoma mexicanum',
30: 'bullfrog, Rana catesbeiana',
31: 'tree frog, tree-frog',
32: 'tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui',
33: 'loggerhead, loggerhead turtle, Caretta caretta',
34: 'leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea',
35: 'mud turtle',
36: 'terrapin',
37: 'box turtle, box tortoise',
38: 'banded gecko',
39: 'common iguana, iguana, Iguana iguana',
40: 'American chameleon, anole, Anolis carolinensis',
41: 'whiptail, whiptail lizard',
42: 'agama',
43: 'frilled lizard, Chlamydosaurus kingi',
44: 'alligator lizard',
45: 'Gila monster, Heloderma suspectum',
46: 'green lizard, Lacerta viridis',
47: 'African chameleon, Chamaeleo chamaeleon',
48: 'Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis',
49: 'African crocodile, Nile crocodile, Crocodylus niloticus',
50: 'American alligator, Alligator mississipiensis',
51: 'triceratops',
52: 'thunder snake, worm snake, Carphophis amoenus',
53: 'ringneck snake, ring-necked snake, ring snake',
54: 'hognose snake, puff adder, sand viper',
55: 'green snake, grass snake',
56: 'king snake, kingsnake',
57: 'garter snake, grass snake',
58: 'water snake',
59: 'vine snake',
60: 'night snake, Hypsiglena torquata',
61: 'boa constrictor, Constrictor constrictor',
62: 'rock python, rock snake, Python sebae',
63: 'Indian cobra, Naja naja',
64: 'green mamba',
65: 'sea snake',
66: 'horned viper, cerastes, sand viper, horned asp, Cerastes cornutus',
67: 'diamondback, diamondback rattlesnake, Crotalus adamanteus',
68: 'sidewinder, horned rattlesnake, Crotalus cerastes',
69: 'trilobite',
70: 'harvestman, daddy longlegs, Phalangium opilio',
71: 'scorpion',
72: 'black and gold garden spider, Argiope aurantia',
73: 'barn spider, Araneus cavaticus',
74: 'garden spider, Aranea diademata',
75: 'black widow, Latrodectus mactans',
76: 'tarantula',
77: 'wolf spider, hunting spider',
78: 'tick',
79: 'centipede',
80: 'black grouse',
81: 'ptarmigan',
82: 'ruffed grouse, partridge, Bonasa umbellus',
83: 'prairie chicken, prairie grouse, prairie fowl',
84: 'peacock',
85: 'quail',
86: 'partridge',
87: 'African grey, African gray, Psittacus erithacus',
88: 'macaw',
89: 'sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita',
90: 'lorikeet',
91: 'coucal',
92: 'bee eater',
93: 'hornbill',
94: 'hummingbird',
95: 'jacamar',
96: 'toucan',
97: 'drake',
98: 'red-breasted merganser, Mergus serrator',
99: 'goose',
100: 'black swan, Cygnus atratus',
101: 'tusker',
102: 'echidna, spiny anteater, anteater',
103: 'platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus',
104: 'wallaby, brush kangaroo',
105: 'koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus',
106: 'wombat',
107: 'jellyfish',
108: 'sea anemone, anemone',
109: 'brain coral',
110: 'flatworm, platyhelminth',
111: 'nematode, nematode worm, roundworm',
112: 'conch',
113: 'snail',
114: 'slug',
115: 'sea slug, nudibranch',
116: 'chiton, coat-of-mail shell, sea cradle, polyplacophore',
117: 'chambered nautilus, pearly nautilus, nautilus',
118: 'Dungeness crab, Cancer magister',
119: 'rock crab, Cancer irroratus',
120: 'fiddler crab',
121: 'king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica',
122: 'American lobster, Northern lobster, Maine lobster, Homarus americanus',
123: 'spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish',
124: 'crayfish, crawfish, crawdad, crawdaddy',
125: 'hermit crab',
126: 'isopod',
127: 'white stork, Ciconia ciconia',
128: 'black stork, Ciconia nigra',
129: 'spoonbill',
130: 'flamingo',
131: 'little blue heron, Egretta caerulea',
132: 'American egret, great white heron, Egretta albus',
133: 'bittern',
134: 'crane',
135: 'limpkin, Aramus pictus',
136: 'European gallinule, Porphyrio porphyrio',
137: 'American coot, marsh hen, mud hen, water hen, Fulica americana',
138: 'bustard',
139: 'ruddy turnstone, Arenaria interpres',
140: 'red-backed sandpiper, dunlin, Erolia alpina',
141: 'redshank, Tringa totanus',
142: 'dowitcher',
143: 'oystercatcher, oyster catcher',
144: 'pelican',
145: 'king penguin, Aptenodytes patagonica',
146: 'albatross, mollymawk',
147: 'grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus',
148: 'killer whale, killer, orca, grampus, sea wolf, Orcinus orca',
149: 'dugong, Dugong dugon',
150: 'sea lion',
151: 'Chihuahua',
152: 'Japanese spaniel',
153: 'Maltese dog, Maltese terrier, Maltese',
154: 'Pekinese, Pekingese, Peke',
155: 'Shih-Tzu',
156: 'Blenheim spaniel',
157: 'papillon',
158: 'toy terrier',
159: 'Rhodesian ridgeback',
160: 'Afghan hound, Afghan',
161: 'basset, basset hound',
162: 'beagle',
163: 'bloodhound, sleuthhound',
164: 'bluetick',
165: 'black-and-tan coonhound',
166: 'Walker hound, Walker foxhound',
167: 'English foxhound',
168: 'redbone',
169: 'borzoi, Russian wolfhound',
170: 'Irish wolfhound',
171: 'Italian greyhound',
172: 'whippet',
173: 'Ibizan hound, Ibizan Podenco',
174: 'Norwegian elkhound, elkhound',
175: 'otterhound, otter hound',
176: 'Saluki, gazelle hound',
177: 'Scottish deerhound, deerhound',
178: 'Weimaraner',
179: 'Staffordshire bullterrier, Staffordshire bull terrier',
180: 'American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier',
181: 'Bedlington terrier',
182: 'Border terrier',
183: 'Kerry blue terrier',
184: 'Irish terrier',
185: 'Norfolk terrier',
186: 'Norwich terrier',
187: 'Yorkshire terrier',
188: 'wire-haired fox terrier',
189: 'Lakeland terrier',
190: 'Sealyham terrier, Sealyham',
191: 'Airedale, Airedale terrier',
192: 'cairn, cairn terrier',
193: 'Australian terrier',
194: 'Dandie Dinmont, Dandie Dinmont terrier',
195: 'Boston bull, Boston terrier',
196: 'miniature schnauzer',
197: 'giant schnauzer',
198: 'standard schnauzer',
199: 'Scotch terrier, Scottish terrier, Scottie',
200: 'Tibetan terrier, chrysanthemum dog',
201: 'silky terrier, Sydney silky',
202: 'soft-coated wheaten terrier',
203: 'West Highland white terrier',
204: 'Lhasa, Lhasa apso',
205: 'flat-coated retriever',
206: 'curly-coated retriever',
207: 'golden retriever',
208: 'Labrador retriever',
209: 'Chesapeake Bay retriever',
210: 'German short-haired pointer',
211: 'vizsla, Hungarian pointer',
212: 'English setter',
213: 'Irish setter, red setter',
214: 'Gordon setter',
215: 'Brittany spaniel',
216: 'clumber, clumber spaniel',
217: 'English springer, English springer spaniel',
218: 'Welsh springer spaniel',
219: 'cocker spaniel, English cocker spaniel, cocker',
220: 'Sussex spaniel',
221: 'Irish water spaniel',
222: 'kuvasz',
223: 'schipperke',
224: 'groenendael',
225: 'malinois',
226: 'briard',
227: 'kelpie',
228: 'komondor',
229: 'Old English sheepdog, bobtail',
230: 'Shetland sheepdog, Shetland sheep dog, Shetland',
231: 'collie',
232: 'Border collie',
233: 'Bouvier des Flandres, Bouviers des Flandres',
234: 'Rottweiler',
235: 'German shepherd, German shepherd dog, German police dog, alsatian',
236: 'Doberman, Doberman pinscher',
237: 'miniature pinscher',
238: 'Greater Swiss Mountain dog',
239: 'Bernese mountain dog',
240: 'Appenzeller',
241: 'EntleBucher',
242: 'boxer',
243: 'bull mastiff',
244: 'Tibetan mastiff',
245: 'French bulldog',
246: 'Great Dane',
247: 'Saint Bernard, St Bernard',
248: 'Eskimo dog, husky',
249: 'malamute, malemute, Alaskan malamute',
250: 'Siberian husky',
251: 'dalmatian, coach dog, carriage dog',
252: 'affenpinscher, monkey pinscher, monkey dog',
253: 'basenji',
254: 'pug, pug-dog',
255: 'Leonberg',
256: 'Newfoundland, Newfoundland dog',
257: 'Great Pyrenees',
258: 'Samoyed, Samoyede',
259: 'Pomeranian',
260: 'chow, chow chow',
261: 'keeshond',
262: 'Brabancon griffon',
263: 'Pembroke, Pembroke Welsh corgi',
264: 'Cardigan, Cardigan Welsh corgi',
265: 'toy poodle',
266: 'miniature poodle',
267: 'standard poodle',
268: 'Mexican hairless',
269: 'timber wolf, grey wolf, gray wolf, Canis lupus',
270: 'white wolf, Arctic wolf, Canis lupus tundrarum',
271: 'red wolf, maned wolf, Canis rufus, Canis niger',
272: 'coyote, prairie wolf, brush wolf, Canis latrans',
273: 'dingo, warrigal, warragal, Canis dingo',
274: 'dhole, Cuon alpinus',
275: 'African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus',
276: 'hyena, hyaena',
277: 'red fox, Vulpes vulpes',
278: 'kit fox, Vulpes macrotis',
279: 'Arctic fox, white fox, Alopex lagopus',
280: 'grey fox, gray fox, Urocyon cinereoargenteus',
281: 'tabby, tabby cat',
282: 'tiger cat',
283: 'Persian cat',
284: 'Siamese cat, Siamese',
285: 'Egyptian cat',
286: 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor',
287: 'lynx, catamount',
288: 'leopard, Panthera pardus',
289: 'snow leopard, ounce, Panthera uncia',
290: 'jaguar, panther, Panthera onca, Felis onca',
291: 'lion, king of beasts, Panthera leo',
292: 'tiger, Panthera tigris',
293: 'cheetah, chetah, Acinonyx jubatus',
294: 'brown bear, bruin, Ursus arctos',
295: 'American black bear, black bear, Ursus americanus, Euarctos americanus',
296: 'ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus',
297: 'sloth bear, Melursus ursinus, Ursus ursinus',
298: 'mongoose',
299: 'meerkat, mierkat',
300: 'tiger beetle',
301: 'ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle',
302: 'ground beetle, carabid beetle',
303: 'long-horned beetle, longicorn, longicorn beetle',
304: 'leaf beetle, chrysomelid',
305: 'dung beetle',
306: 'rhinoceros beetle',
307: 'weevil',
308: 'fly',
309: 'bee',
310: 'ant, emmet, pismire',
311: 'grasshopper, hopper',
312: 'cricket',
313: 'walking stick, walkingstick, stick insect',
314: 'cockroach, roach',
315: 'mantis, mantid',
316: 'cicada, cicala',
317: 'leafhopper',
318: 'lacewing, lacewing fly',
319: "dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk",
320: 'damselfly',
321: 'admiral',
322: 'ringlet, ringlet butterfly',
323: 'monarch, monarch butterfly, milkweed butterfly, Danaus plexippus',
324: 'cabbage butterfly',
325: 'sulphur butterfly, sulfur butterfly',
326: 'lycaenid, lycaenid butterfly',
327: 'starfish, sea star',
328: 'sea urchin',
329: 'sea cucumber, holothurian',
330: 'wood rabbit, cottontail, cottontail rabbit',
331: 'hare',
332: 'Angora, Angora rabbit',
333: 'hamster',
334: 'porcupine, hedgehog',
335: 'fox squirrel, eastern fox squirrel, Sciurus niger',
336: 'marmot',
337: 'beaver',
338: 'guinea pig, Cavia cobaya',
339: 'sorrel',
340: 'zebra',
341: 'hog, pig, grunter, squealer, Sus scrofa',
342: 'wild boar, boar, Sus scrofa',
343: 'warthog',
344: 'hippopotamus, hippo, river horse, Hippopotamus amphibius',
345: 'ox',
346: 'water buffalo, water ox, Asiatic buffalo, Bubalus bubalis',
347: 'bison',
348: 'ram, tup',
349: 'bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis',
350: 'ibex, Capra ibex',
351: 'hartebeest',
352: 'impala, Aepyceros melampus',
353: 'gazelle',
354: 'Arabian camel, dromedary, Camelus dromedarius',
355: 'llama',
356: 'weasel',
357: 'mink',
358: 'polecat, fitch, foulmart, foumart, Mustela putorius',
359: 'black-footed ferret, ferret, Mustela nigripes',
360: 'otter',
361: 'skunk, polecat, wood pussy',
362: 'badger',
363: 'armadillo',
364: 'three-toed sloth, ai, Bradypus tridactylus',
365: 'orangutan, orang, orangutang, Pongo pygmaeus',
366: 'gorilla, Gorilla gorilla',
367: 'chimpanzee, chimp, Pan troglodytes',
368: 'gibbon, Hylobates lar',
369: 'siamang, Hylobates syndactylus, Symphalangus syndactylus',
370: 'guenon, guenon monkey',
371: 'patas, hussar monkey, Erythrocebus patas',
372: 'baboon',
373: 'macaque',
374: 'langur',
375: 'colobus, colobus monkey',
376: 'proboscis monkey, Nasalis larvatus',
377: 'marmoset',
378: 'capuchin, ringtail, Cebus capucinus',
379: 'howler monkey, howler',
380: 'titi, titi monkey',
381: 'spider monkey, Ateles geoffroyi',
382: 'squirrel monkey, Saimiri sciureus',
383: 'Madagascar cat, ring-tailed lemur, Lemur catta',
384: 'indri, indris, Indri indri, Indri brevicaudatus',
385: 'Indian elephant, Elephas maximus',
386: 'African elephant, Loxodonta africana',
387: 'lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens',
388: 'giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca',
389: 'barracouta, snoek',
390: 'eel',
391: 'coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch',
392: 'rock beauty, Holocanthus tricolor',
393: 'anemone fish',
394: 'sturgeon',
395: 'gar, garfish, garpike, billfish, Lepisosteus osseus',
396: 'lionfish',
397: 'puffer, pufferfish, blowfish, globefish',
398: 'abacus',
399: 'abaya',
400: "academic gown, academic robe, judge's robe",
401: 'accordion, piano accordion, squeeze box',
402: 'acoustic guitar',
403: 'aircraft carrier, carrier, flattop, attack aircraft carrier',
404: 'airliner',
405: 'airship, dirigible',
406: 'altar',
407: 'ambulance',
408: 'amphibian, amphibious vehicle',
409: 'analog clock',
410: 'apiary, bee house',
411: 'apron',
412: 'ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin',
413: 'assault rifle, assault gun',
414: 'backpack, back pack, knapsack, packsack, rucksack, haversack',
415: 'bakery, bakeshop, bakehouse',
416: 'balance beam, beam',
417: 'balloon',
418: 'ballpoint, ballpoint pen, ballpen, Biro',
419: 'Band Aid',
420: 'banjo',
421: 'bannister, banister, balustrade, balusters, handrail',
422: 'barbell',
423: 'barber chair',
424: 'barbershop',
425: 'barn',
426: 'barometer',
427: 'barrel, cask',
428: 'barrow, garden cart, lawn cart, wheelbarrow',
429: 'baseball',
430: 'basketball',
431: 'bassinet',
432: 'bassoon',
433: 'bathing cap, swimming cap',
434: 'bath towel',
435: 'bathtub, bathing tub, bath, tub',
436: 'beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon',
437: 'beacon, lighthouse, beacon light, pharos',
438: 'beaker',
439: 'bearskin, busby, shako',
440: 'beer bottle',
441: 'beer glass',
442: 'bell cote, bell cot',
443: 'bib',
444: 'bicycle-built-for-two, tandem bicycle, tandem',
445: 'bikini, two-piece',
446: 'binder, ring-binder',
447: 'binoculars, field glasses, opera glasses',
448: 'birdhouse',
449: 'boathouse',
450: 'bobsled, bobsleigh, bob',
451: 'bolo tie, bolo, bola tie, bola',
452: 'bonnet, poke bonnet',
453: 'bookcase',
454: 'bookshop, bookstore, bookstall',
455: 'bottlecap',
456: 'bow',
457: 'bow tie, bow-tie, bowtie',
458: 'brass, memorial tablet, plaque',
459: 'brassiere, bra, bandeau',
460: 'breakwater, groin, groyne, mole, bulwark, seawall, jetty',
461: 'breastplate, aegis, egis',
462: 'broom',
463: 'bucket, pail',
464: 'buckle',
465: 'bulletproof vest',
466: 'bullet train, bullet',
467: 'butcher shop, meat market',
468: 'cab, hack, taxi, taxicab',
469: 'caldron, cauldron',
470: 'candle, taper, wax light',
471: 'cannon',
472: 'canoe',
473: 'can opener, tin opener',
474: 'cardigan',
475: 'car mirror',
476: 'carousel, carrousel, merry-go-round, roundabout, whirligig',
477: "carpenter's kit, tool kit",
478: 'carton',
479: 'car wheel',
480: 'cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM',
481: 'cassette',
482: 'cassette player',
483: 'castle',
484: 'catamaran',
485: 'CD player',
486: 'cello, violoncello',
487: 'cellular telephone, cellular phone, cellphone, cell, mobile phone',
488: 'chain',
489: 'chainlink fence',
490: 'chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour',
491: 'chain saw, chainsaw',
492: 'chest',
493: 'chiffonier, commode',
494: 'chime, bell, gong',
495: 'china cabinet, china closet',
496: 'Christmas stocking',
497: 'church, church building',
498: 'cinema, movie theater, movie theatre, movie house, picture palace',
499: 'cleaver, meat cleaver, chopper',
500: 'cliff dwelling',
501: 'cloak',
502: 'clog, geta, patten, sabot',
503: 'cocktail shaker',
504: 'coffee mug',
505: 'coffeepot',
506: 'coil, spiral, volute, whorl, helix',
507: 'combination lock',
508: 'computer keyboard, keypad',
509: 'confectionery, confectionary, candy store',
510: 'container ship, containership, container vessel',
511: 'convertible',
512: 'corkscrew, bottle screw',
513: 'cornet, horn, trumpet, trump',
514: 'cowboy boot',
515: 'cowboy hat, ten-gallon hat',
516: 'cradle',
517: 'crane',
518: 'crash helmet',
519: 'crate',
520: 'crib, cot',
521: 'Crock Pot',
522: 'croquet ball',
523: 'crutch',
524: 'cuirass',
525: 'dam, dike, dyke',
526: 'desk',
527: 'desktop computer',
528: 'dial telephone, dial phone',
529: 'diaper, nappy, napkin',
530: 'digital clock',
531: 'digital watch',
532: 'dining table, board',
533: 'dishrag, dishcloth',
534: 'dishwasher, dish washer, dishwashing machine',
535: 'disk brake, disc brake',
536: 'dock, dockage, docking facility',
537: 'dogsled, dog sled, dog sleigh',
538: 'dome',
539: 'doormat, welcome mat',
540: 'drilling platform, offshore rig',
541: 'drum, membranophone, tympan',
542: 'drumstick',
543: 'dumbbell',
544: 'Dutch oven',
545: 'electric fan, blower',
546: 'electric guitar',
547: 'electric locomotive',
548: 'entertainment center',
549: 'envelope',
550: 'espresso maker',
551: 'face powder',
552: 'feather boa, boa',
553: 'file, file cabinet, filing cabinet',
554: 'fireboat',
555: 'fire engine, fire truck',
556: 'fire screen, fireguard',
557: 'flagpole, flagstaff',
558: 'flute, transverse flute',
559: 'folding chair',
560: 'football helmet',
561: 'forklift',
562: 'fountain',
563: 'fountain pen',
564: 'four-poster',
565: 'freight car',
566: 'French horn, horn',
567: 'frying pan, frypan, skillet',
568: 'fur coat',
569: 'garbage truck, dustcart',
570: 'gasmask, respirator, gas helmet',
571: 'gas pump, gasoline pump, petrol pump, island dispenser',
572: 'goblet',
573: 'go-kart',
574: 'golf ball',
575: 'golfcart, golf cart',
576: 'gondola',
577: 'gong, tam-tam',
578: 'gown',
579: 'grand piano, grand',
580: 'greenhouse, nursery, glasshouse',
581: 'grille, radiator grille',
582: 'grocery store, grocery, food market, market',
583: 'guillotine',
584: 'hair slide',
585: 'hair spray',
586: 'half track',
587: 'hammer',
588: 'hamper',
589: 'hand blower, blow dryer, blow drier, hair dryer, hair drier',
590: 'hand-held computer, hand-held microcomputer',
591: 'handkerchief, hankie, hanky, hankey',
592: 'hard disc, hard disk, fixed disk',
593: 'harmonica, mouth organ, harp, mouth harp',
594: 'harp',
595: 'harvester, reaper',
596: 'hatchet',
597: 'holster',
598: 'home theater, home theatre',
599: 'honeycomb',
600: 'hook, claw',
601: 'hoopskirt, crinoline',
602: 'horizontal bar, high bar',
603: 'horse cart, horse-cart',
604: 'hourglass',
605: 'iPod',
606: 'iron, smoothing iron',
607: "jack-o'-lantern",
608: 'jean, blue jean, denim',
609: 'jeep, landrover',
610: 'jersey, T-shirt, tee shirt',
611: 'jigsaw puzzle',
612: 'jinrikisha, ricksha, rickshaw',
613: 'joystick',
614: 'kimono',
615: 'knee pad',
616: 'knot',
617: 'lab coat, laboratory coat',
618: 'ladle',
619: 'lampshade, lamp shade',
620: 'laptop, laptop computer',
621: 'lawn mower, mower',
622: 'lens cap, lens cover',
623: 'letter opener, paper knife, paperknife',
624: 'library',
625: 'lifeboat',
626: 'lighter, light, igniter, ignitor',
627: 'limousine, limo',
628: 'liner, ocean liner',
629: 'lipstick, lip rouge',
630: 'Loafer',
631: 'lotion',
632: 'loudspeaker, speaker, speaker unit, loudspeaker system, speaker system',
633: "loupe, jeweler's loupe",
634: 'lumbermill, sawmill',
635: 'magnetic compass',
636: 'mailbag, postbag',
637: 'mailbox, letter box',
638: 'maillot',
639: 'maillot, tank suit',
640: 'manhole cover',
641: 'maraca',
642: 'marimba, xylophone',
643: 'mask',
644: 'matchstick',
645: 'maypole',
646: 'maze, labyrinth',
647: 'measuring cup',
648: 'medicine chest, medicine cabinet',
649: 'megalith, megalithic structure',
650: 'microphone, mike',
651: 'microwave, microwave oven',
652: 'military uniform',
653: 'milk can',
654: 'minibus',
655: 'miniskirt, mini',
656: 'minivan',
657: 'missile',
658: 'mitten',
659: 'mixing bowl',
660: 'mobile home, manufactured home',
661: 'Model T',
662: 'modem',
663: 'monastery',
664: 'monitor',
665: 'moped',
666: 'mortar',
667: 'mortarboard',
668: 'mosque',
669: 'mosquito net',
670: 'motor scooter, scooter',
671: 'mountain bike, all-terrain bike, off-roader',
672: 'mountain tent',
673: 'mouse, computer mouse',
674: 'mousetrap',
675: 'moving van',
676: 'muzzle',
677: 'nail',
678: 'neck brace',
679: 'necklace',
680: 'nipple',
681: 'notebook, notebook computer',
682: 'obelisk',
683: 'oboe, hautboy, hautbois',
684: 'ocarina, sweet potato',
685: 'odometer, hodometer, mileometer, milometer',
686: 'oil filter',
687: 'organ, pipe organ',
688: 'oscilloscope, scope, cathode-ray oscilloscope, CRO',
689: 'overskirt',
690: 'oxcart',
691: 'oxygen mask',
692: 'packet',
693: 'paddle, boat paddle',
694: 'paddlewheel, paddle wheel',
695: 'padlock',
696: 'paintbrush',
697: "pajama, pyjama, pj's, jammies",
698: 'palace',
699: 'panpipe, pandean pipe, syrinx',
700: 'paper towel',
701: 'parachute, chute',
702: 'parallel bars, bars',
703: 'park bench',
704: 'parking meter',
705: 'passenger car, coach, carriage',
706: 'patio, terrace',
707: 'pay-phone, pay-station',
708: 'pedestal, plinth, footstall',
709: 'pencil box, pencil case',
710: 'pencil sharpener',
711: 'perfume, essence',
712: 'Petri dish',
713: 'photocopier',
714: 'pick, plectrum, plectron',
715: 'pickelhaube',
716: 'picket fence, paling',
717: 'pickup, pickup truck',
718: 'pier',
719: 'piggy bank, penny bank',
720: 'pill bottle',
721: 'pillow',
722: 'ping-pong ball',
723: 'pinwheel',
724: 'pirate, pirate ship',
725: 'pitcher, ewer',
726: "plane, carpenter's plane, woodworking plane",
727: 'planetarium',
728: 'plastic bag',
729: 'plate rack',
730: 'plow, plough',
731: "plunger, plumber's helper",
732: 'Polaroid camera, Polaroid Land camera',
733: 'pole',
734: 'police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria',
735: 'poncho',
736: 'pool table, billiard table, snooker table',
737: 'pop bottle, soda bottle',
738: 'pot, flowerpot',
739: "potter's wheel",
740: 'power drill',
741: 'prayer rug, prayer mat',
742: 'printer',
743: 'prison, prison house',
744: 'projectile, missile',
745: 'projector',
746: 'puck, hockey puck',
747: 'punching bag, punch bag, punching ball, punchball',
748: 'purse',
749: 'quill, quill pen',
750: 'quilt, comforter, comfort, puff',
751: 'racer, race car, racing car',
752: 'racket, racquet',
753: 'radiator',
754: 'radio, wireless',
755: 'radio telescope, radio reflector',
756: 'rain barrel',
757: 'recreational vehicle, RV, R.V.',
758: 'reel',
759: 'reflex camera',
760: 'refrigerator, icebox',
761: 'remote control, remote',
762: 'restaurant, eating house, eating place, eatery',
763: 'revolver, six-gun, six-shooter',
764: 'rifle',
765: 'rocking chair, rocker',
766: 'rotisserie',
767: 'rubber eraser, rubber, pencil eraser',
768: 'rugby ball',
769: 'rule, ruler',
770: 'running shoe',
771: 'safe',
772: 'safety pin',
773: 'saltshaker, salt shaker',
774: 'sandal',
775: 'sarong',
776: 'sax, saxophone',
777: 'scabbard',
778: 'scale, weighing machine',
779: 'school bus',
780: 'schooner',
781: 'scoreboard',
782: 'screen, CRT screen',
783: 'screw',
784: 'screwdriver',
785: 'seat belt, seatbelt',
786: 'sewing machine',
787: 'shield, buckler',
788: 'shoe shop, shoe-shop, shoe store',
789: 'shoji',
790: 'shopping basket',
791: 'shopping cart',
792: 'shovel',
793: 'shower cap',
794: 'shower curtain',
795: 'ski',
796: 'ski mask',
797: 'sleeping bag',
798: 'slide rule, slipstick',
799: 'sliding door',
800: 'slot, one-armed bandit',
801: 'snorkel',
802: 'snowmobile',
803: 'snowplow, snowplough',
804: 'soap dispenser',
805: 'soccer ball',
806: 'sock',
807: 'solar dish, solar collector, solar furnace',
808: 'sombrero',
809: 'soup bowl',
810: 'space bar',
811: 'space heater',
812: 'space shuttle',
813: 'spatula',
814: 'speedboat',
815: "spider web, spider's web",
816: 'spindle',
817: 'sports car, sport car',
818: 'spotlight, spot',
819: 'stage',
820: 'steam locomotive',
821: 'steel arch bridge',
822: 'steel drum',
823: 'stethoscope',
824: 'stole',
825: 'stone wall',
826: 'stopwatch, stop watch',
827: 'stove',
828: 'strainer',
829: 'streetcar, tram, tramcar, trolley, trolley car',
830: 'stretcher',
831: 'studio couch, day bed',
832: 'stupa, tope',
833: 'submarine, pigboat, sub, U-boat',
834: 'suit, suit of clothes',
835: 'sundial',
836: 'sunglass',
837: 'sunglasses, dark glasses, shades',
838: 'sunscreen, sunblock, sun blocker',
839: 'suspension bridge',
840: 'swab, swob, mop',
841: 'sweatshirt',
842: 'swimming trunks, bathing trunks',
843: 'swing',
844: 'switch, electric switch, electrical switch',
845: 'syringe',
846: 'table lamp',
847: 'tank, army tank, armored combat vehicle, armoured combat vehicle',
848: 'tape player',
849: 'teapot',
850: 'teddy, teddy bear',
851: 'television, television system',
852: 'tennis ball',
853: 'thatch, thatched roof',
854: 'theater curtain, theatre curtain',
855: 'thimble',
856: 'thresher, thrasher, threshing machine',
857: 'throne',
858: 'tile roof',
859: 'toaster',
860: 'tobacco shop, tobacconist shop, tobacconist',
861: 'toilet seat',
862: 'torch',
863: 'totem pole',
864: 'tow truck, tow car, wrecker',
865: 'toyshop',
866: 'tractor',
867: 'trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi',
868: 'tray',
869: 'trench coat',
870: 'tricycle, trike, velocipede',
871: 'trimaran',
872: 'tripod',
873: 'triumphal arch',
874: 'trolleybus, trolley coach, trackless trolley',
875: 'trombone',
876: 'tub, vat',
877: 'turnstile',
878: 'typewriter keyboard',
879: 'umbrella',
880: 'unicycle, monocycle',
881: 'upright, upright piano',
882: 'vacuum, vacuum cleaner',
883: 'vase',
884: 'vault',
885: 'velvet',
886: 'vending machine',
887: 'vestment',
888: 'viaduct',
889: 'violin, fiddle',
890: 'volleyball',
891: 'waffle iron',
892: 'wall clock',
893: 'wallet, billfold, notecase, pocketbook',
894: 'wardrobe, closet, press',
895: 'warplane, military plane',
896: 'washbasin, handbasin, washbowl, lavabo, wash-hand basin',
897: 'washer, automatic washer, washing machine',
898: 'water bottle',
899: 'water jug',
900: 'water tower',
901: 'whiskey jug',
902: 'whistle',
903: 'wig',
904: 'window screen',
905: 'window shade',
906: 'Windsor tie',
907: 'wine bottle',
908: 'wing',
909: 'wok',
910: 'wooden spoon',
911: 'wool, woolen, woollen',
912: 'worm fence, snake fence, snake-rail fence, Virginia fence',
913: 'wreck',
914: 'yawl',
915: 'yurt',
916: 'web site, website, internet site, site',
917: 'comic book',
918: 'crossword puzzle, crossword',
919: 'street sign',
920: 'traffic light, traffic signal, stoplight',
921: 'book jacket, dust cover, dust jacket, dust wrapper',
922: 'menu',
923: 'plate',
924: 'guacamole',
925: 'consomme',
926: 'hot pot, hotpot',
927: 'trifle',
928: 'ice cream, icecream',
929: 'ice lolly, lolly, lollipop, popsicle',
930: 'French loaf',
931: 'bagel, beigel',
932: 'pretzel',
933: 'cheeseburger',
934: 'hotdog, hot dog, red hot',
935: 'mashed potato',
936: 'head cabbage',
937: 'broccoli',
938: 'cauliflower',
939: 'zucchini, courgette',
940: 'spaghetti squash',
941: 'acorn squash',
942: 'butternut squash',
943: 'cucumber, cuke',
944: 'artichoke, globe artichoke',
945: 'bell pepper',
946: 'cardoon',
947: 'mushroom',
948: 'Granny Smith',
949: 'strawberry',
950: 'orange',
951: 'lemon',
952: 'fig',
953: 'pineapple, ananas',
954: 'banana',
955: 'jackfruit, jak, jack',
956: 'custard apple',
957: 'pomegranate',
958: 'hay',
959: 'carbonara',
960: 'chocolate sauce, chocolate syrup',
961: 'dough',
962: 'meat loaf, meatloaf',
963: 'pizza, pizza pie',
964: 'potpie',
965: 'burrito',
966: 'red wine',
967: 'espresso',
968: 'cup',
969: 'eggnog',
970: 'alp',
971: 'bubble',
972: 'cliff, drop, drop-off',
973: 'coral reef',
974: 'geyser',
975: 'lakeside, lakeshore',
976: 'promontory, headland, head, foreland',
977: 'sandbar, sand bar',
978: 'seashore, coast, seacoast, sea-coast',
979: 'valley, vale',
980: 'volcano',
981: 'ballplayer, baseball player',
982: 'groom, bridegroom',
983: 'scuba diver',
984: 'rapeseed',
985: 'daisy',
986: "yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum",
987: 'corn',
988: 'acorn',
989: 'hip, rose hip, rosehip',
990: 'buckeye, horse chestnut, conker',
991: 'coral fungus',
992: 'agaric',
993: 'gyromitra',
994: 'stinkhorn, carrion fungus',
995: 'earthstar',
996: 'hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa',
997: 'bolete',
998: 'ear, spike, capitulum',
999: 'toilet tissue, toilet paper, bathroom tissue'} | true | true |
1c2fb54378459ed8b7825cffc34fee728bda2df1 | 378 | py | Python | lib/datasets/_init_paths.py | liuqiang3/faster-rcnn | b665b46223d8623222f4d67299c232e39b242299 | [
"MIT"
] | null | null | null | lib/datasets/_init_paths.py | liuqiang3/faster-rcnn | b665b46223d8623222f4d67299c232e39b242299 | [
"MIT"
] | null | null | null | lib/datasets/_init_paths.py | liuqiang3/faster-rcnn | b665b46223d8623222f4d67299c232e39b242299 | [
"MIT"
] | null | null | null | import os.path as osp
import sys
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
this_dir = osp.dirname(__file__)
# Add lib to PYTHONPATH
lib_path = osp.join(this_dir, '..', 'lib')
add_path(lib_path)
coco_path = osp.join(this_dir, '..', 'data', 'coco', 'PythonAPI')
add_path(coco_path)
lib_path = osp.join(this_dir, '..')
add_path(lib_path)
| 22.235294 | 65 | 0.687831 | import os.path as osp
import sys
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
this_dir = osp.dirname(__file__)
lib_path = osp.join(this_dir, '..', 'lib')
add_path(lib_path)
coco_path = osp.join(this_dir, '..', 'data', 'coco', 'PythonAPI')
add_path(coco_path)
lib_path = osp.join(this_dir, '..')
add_path(lib_path)
| true | true |
1c2fb5e395071cbfd684013207b19f4d5adecb86 | 18,724 | py | Python | train.py | tsis-mobile-technology/deep-text-recognition-benchmark | d742dee8b13958437ec8565e70121732669fd704 | [
"Apache-2.0"
] | null | null | null | train.py | tsis-mobile-technology/deep-text-recognition-benchmark | d742dee8b13958437ec8565e70121732669fd704 | [
"Apache-2.0"
] | null | null | null | train.py | tsis-mobile-technology/deep-text-recognition-benchmark | d742dee8b13958437ec8565e70121732669fd704 | [
"Apache-2.0"
] | null | null | null | #-*-coding:utf-8-*-
import os
import sys
import time
import random
import string
import argparse
import torch
import torch.backends.cudnn as cudnn
import torch.nn.init as init
import torch.optim as optim
import torch.utils.data
import numpy as np
from utils import CTCLabelConverter, CTCLabelConverterForBaiduWarpctc, AttnLabelConverter, Averager
from dataset import hierarchical_dataset, AlignCollate, Batch_Balanced_Dataset
from model import Model
from test import validation
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
## 한글 학습을 위한 configuration (train.py)
# python train.py --train_data /Users/gotaejong/ExternHard/97_Workspace/jupyter/Text_in_the_wild/data_lmdb/train --valid_data /Users/gotaejong/ExternHard/97_Workspace/jupyter/Text_in_the_wild/data_lmdb/validation --Transformation TPS --FeatureExtraction ResNet --SequenceModeling BiLSTM --Prediction CTC --data_filtering_off --workers 0 --imgH 64 --imgW 200
def train(opt):
""" dataset preparation """
if not opt.data_filtering_off:
print('Filtering the images containing characters which are not in opt.character')
print('Filtering the images whose label is longer than opt.batch_max_length')
# see https://github.com/clovaai/deep-text-recognition-benchmark/blob/6593928855fb7abb999a99f428b3e4477d4ae356/dataset.py#L130
opt.select_data = opt.select_data.split('-')
opt.batch_ratio = opt.batch_ratio.split('-')
train_dataset = Batch_Balanced_Dataset(opt)
log = open(f'./saved_models/{opt.exp_name}/log_dataset.txt', 'a')
AlignCollate_valid = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD)
valid_dataset, valid_dataset_log = hierarchical_dataset(root=opt.valid_data, opt=opt)
valid_loader = torch.utils.data.DataLoader(
valid_dataset, batch_size=opt.batch_size,
shuffle=True, # 'True' to check training progress with validation function.
num_workers=int(opt.workers),
collate_fn=AlignCollate_valid, pin_memory=True)
log.write(valid_dataset_log)
print('-' * 80)
log.write('-' * 80 + '\n')
log.close()
""" model configuration """
if 'CTC' in opt.Prediction:
if opt.baiduCTC:
converter = CTCLabelConverterForBaiduWarpctc(opt.character)
else:
converter = CTCLabelConverter(opt.character)
else:
converter = AttnLabelConverter(opt.character)
opt.num_class = len(converter.character)
if opt.rgb:
opt.input_channel = 3
model = Model(opt)
print('model input parameters', opt.imgH, opt.imgW, opt.num_fiducial, opt.input_channel, opt.output_channel,
opt.hidden_size, opt.num_class, opt.batch_max_length, opt.Transformation, opt.FeatureExtraction,
opt.SequenceModeling, opt.Prediction)
# weight initialization
for name, param in model.named_parameters():
if 'localization_fc2' in name:
print(f'Skip {name} as it is already initialized')
continue
try:
if 'bias' in name:
init.constant_(param, 0.0)
elif 'weight' in name:
init.kaiming_normal_(param)
except Exception as e: # for batchnorm.
if 'weight' in name:
param.data.fill_(1)
continue
# data parallel for multi-GPU
model = torch.nn.DataParallel(model).to(device)
model.train()
if opt.saved_model != '':
print(f'loading pretrained model from {opt.saved_model}')
if opt.FT:
# GPU
# model.load_state_dict(torch.load(opt.saved_model), strict=False)
# CPU
model.load_state_dict(torch.load(opt.saved_model, map_location=torch.device('cpu')), strict=False)
else:
model.load_state_dict(torch.load(opt.saved_model))
print("Model:")
print(model)
""" setup loss """
if 'CTC' in opt.Prediction:
if opt.baiduCTC:
# need to install warpctc. see our guideline.
# > 3/2 ERROR: warpctc_pytorch-0.2.1+torch16.cpu-cp38-cp38-manylinux1_x86_64.whl is not a supported wheel on this platform.
# from warpctc_pytorch import CTCLoss
# criterion = CTCLoss()
criterion = torch.nn.CTCLoss(zero_infinity=True).to(device)
else:
criterion = torch.nn.CTCLoss(zero_infinity=True).to(device)
else:
criterion = torch.nn.CrossEntropyLoss(ignore_index=0).to(device) # ignore [GO] token = ignore index 0
# loss averager
loss_avg = Averager()
# filter that only require gradient decent
filtered_parameters = []
params_num = []
for p in filter(lambda p: p.requires_grad, model.parameters()):
filtered_parameters.append(p)
params_num.append(np.prod(p.size()))
print('Trainable params num : ', sum(params_num))
# [print(name, p.numel()) for name, p in filter(lambda p: p[1].requires_grad, model.named_parameters())]
# setup optimizer
if opt.adam:
optimizer = optim.Adam(filtered_parameters, lr=opt.lr, betas=(opt.beta1, 0.999))
else:
optimizer = optim.Adadelta(filtered_parameters, lr=opt.lr, rho=opt.rho, eps=opt.eps)
print("Optimizer:")
print(optimizer)
""" final options """
# print(opt)
with open(f'./saved_models/{opt.exp_name}/opt.txt', 'a') as opt_file:
opt_log = '------------ Options -------------\n'
args = vars(opt)
for k, v in args.items():
opt_log += f'{str(k)}: {str(v)}\n'
opt_log += '---------------------------------------\n'
print(opt_log)
opt_file.write(opt_log)
""" start training """
start_iter = 0
if opt.saved_model != '':
try:
start_iter = int(opt.saved_model.split('_')[-1].split('.')[0])
print(f'continue to train, start_iter: {start_iter}')
except:
pass
start_time = time.time()
best_accuracy = -1
best_norm_ED = -1
iteration = start_iter
while(True):
# train part
image_tensors, labels = train_dataset.get_batch()
image = image_tensors.to(device)
text, length = converter.encode(labels, batch_max_length=opt.batch_max_length)
batch_size = image.size(0)
if 'CTC' in opt.Prediction:
preds = model(image, text)
preds_size = torch.IntTensor([preds.size(1)] * batch_size)
if opt.baiduCTC:
preds = preds.permute(1, 0, 2) # to use CTCLoss format
cost = criterion(preds, text, preds_size, length) / batch_size
else:
preds = preds.log_softmax(2).permute(1, 0, 2)
cost = criterion(preds, text, preds_size, length)
else:
preds = model(image, text[:, :-1]) # align with Attention.forward
target = text[:, 1:] # without [GO] Symbol
cost = criterion(preds.view(-1, preds.shape[-1]), target.contiguous().view(-1))
model.zero_grad()
cost.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), opt.grad_clip) # gradient clipping with 5 (Default)
optimizer.step()
loss_avg.add(cost)
# validation part
if (iteration + 1) % opt.valInterval == 0 or iteration == 0: # To see training progress, we also conduct validation when 'iteration == 0'
elapsed_time = time.time() - start_time
# for log
with open(f'./saved_models/{opt.exp_name}/log_train.txt', 'a') as log:
model.eval()
with torch.no_grad():
valid_loss, current_accuracy, current_norm_ED, preds, confidence_score, labels, infer_time, length_of_data = validation(
model, criterion, valid_loader, converter, opt)
model.train()
# training loss and validation loss
loss_log = f'[{iteration+1}/{opt.num_iter}] Train loss: {loss_avg.val():0.5f}, Valid loss: {valid_loss:0.5f}, Elapsed_time: {elapsed_time:0.5f}'
loss_avg.reset()
current_model_log = f'{"Current_accuracy":17s}: {current_accuracy:0.3f}, {"Current_norm_ED":17s}: {current_norm_ED:0.2f}'
# keep best accuracy model (on valid dataset)
if current_accuracy > best_accuracy:
best_accuracy = current_accuracy
torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/best_accuracy.pth')
if current_norm_ED > best_norm_ED:
best_norm_ED = current_norm_ED
torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/best_norm_ED.pth')
best_model_log = f'{"Best_accuracy":17s}: {best_accuracy:0.3f}, {"Best_norm_ED":17s}: {best_norm_ED:0.2f}'
loss_model_log = f'{loss_log}\n{current_model_log}\n{best_model_log}'
print(loss_model_log)
log.write(loss_model_log + '\n')
# show some predicted results
dashed_line = '-' * 80
head = f'{"Ground Truth":25s} | {"Prediction":25s} | Confidence Score & T/F'
predicted_result_log = f'{dashed_line}\n{head}\n{dashed_line}\n'
for gt, pred, confidence in zip(labels[:5], preds[:5], confidence_score[:5]):
if 'Attn' in opt.Prediction:
gt = gt[:gt.find('[s]')]
pred = pred[:pred.find('[s]')]
predicted_result_log += f'{gt:25s} | {pred:25s} | {confidence:0.4f}\t{str(pred == gt)}\n'
predicted_result_log += f'{dashed_line}'
print(predicted_result_log)
log.write(predicted_result_log + '\n')
# save model per 1e+5 iter.
if (iteration + 1) % 1e+5 == 0:
torch.save(
model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth')
if (iteration + 1) == opt.num_iter:
print('end the training')
sys.exit()
iteration += 1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--exp_name', help='Where to store logs and models')
parser.add_argument('--train_data', required=True, help='path to training dataset')
parser.add_argument('--valid_data', required=True, help='path to validation dataset')
parser.add_argument('--manualSeed', type=int, default=1111, help='for random seed setting')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=4)
parser.add_argument('--batch_size', type=int, default=192, help='input batch size')
parser.add_argument('--num_iter', type=int, default=300000, help='number of iterations to train for')
parser.add_argument('--valInterval', type=int, default=2000, help='Interval between each validation')
parser.add_argument('--saved_model', default='', help="path to model to continue training")
parser.add_argument('--FT', action='store_true', help='whether to do fine-tuning')
parser.add_argument('--adam', action='store_true', help='Whether to use adam (default is Adadelta)')
parser.add_argument('--lr', type=float, default=1, help='learning rate, default=1.0 for Adadelta')
parser.add_argument('--beta1', type=float, default=0.9, help='beta1 for adam. default=0.9')
parser.add_argument('--rho', type=float, default=0.95, help='decay rate rho for Adadelta. default=0.95')
parser.add_argument('--eps', type=float, default=1e-8, help='eps for Adadelta. default=1e-8')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping value. default=5')
parser.add_argument('--baiduCTC', action='store_true', help='for data_filtering_off mode')
""" Data processing """
## default
# parser.add_argument('--select_data', type=str, default='MJ-ST',
# help='select training data (default is MJ-ST, which means MJ and ST used as training data)')
# parser.add_argument('--batch_ratio', type=str, default='0.5-0.5',
# help='assign ratio for each selected data in the batch')
## Text in the Wild case
parser.add_argument('--select_data', type=str, default='/',
help='select training data')
parser.add_argument('--batch_ratio', type=str, default='1',
help='assign ratio for each selected data in the batch')
parser.add_argument('--total_data_usage_ratio', type=str, default='1.0',
help='total data usage ratio, this ratio is multiplied to total number of data.')
parser.add_argument('--batch_max_length', type=int, default=25, help='maximum-label-length')
parser.add_argument('--imgH', type=int, default=32, help='the height of the input image')
parser.add_argument('--imgW', type=int, default=100, help='the width of the input image')
parser.add_argument('--rgb', action='store_true', help='use rgb input')
## default
# parser.add_argument('--character', type=str,
# default='0123456789abcdefghijklmnopqrstuvwxyz', help='character label')
## Text in the Wild case
# character 추가 참고 : https://github.com/tsis-mobile-technology/EasyOCR/blob/master/easyocr/config.py
parser.add_argument('--character', type=str,
default='0123456789!#$%&\'"()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZㆍ가각간갇갈감갑값갓강갖같갚갛개객걀걔거걱건걷걸검겁것겉게겨격겪견결겹경곁계고곡곤곧골곰곱곳공과관광괜괴굉교구국군굳굴굵굶굽궁권귀귓규균귤그극근글긁금급긋긍기긴길김깅깊까깍깎깐깔깜깝깡깥깨꺼꺾껌껍껏껑께껴꼬꼭꼴꼼꼽꽂꽃꽉꽤꾸꾼꿀꿈뀌끄끈끊끌끓끔끗끝끼낌나낙낚난날낡남납낫낭낮낯낱낳내냄냇냉냐냥너넉넌널넓넘넣네넥넷녀녁년념녕노녹논놀놈농높놓놔뇌뇨누눈눕뉘뉴늄느늑는늘늙능늦늬니닐님다닥닦단닫달닭닮담답닷당닿대댁댐댓더덕던덜덟덤덥덧덩덮데델도독돈돌돕돗동돼되된두둑둘둠둡둥뒤뒷드득든듣들듬듭듯등디딩딪따딱딴딸땀땅때땜떠떡떤떨떻떼또똑뚜뚫뚱뛰뜨뜩뜯뜰뜻띄라락란람랍랑랗래랜램랫략량러럭런럴럼럽럿렁렇레렉렌려력련렬렵령례로록론롬롭롯료루룩룹룻뤄류륙률륭르른름릇릎리릭린림립릿링마막만많말맑맘맙맛망맞맡맣매맥맨맵맺머먹먼멀멈멋멍멎메멘멩며면멸명몇모목몬몰몸몹못몽묘무묵묶문묻물뭄뭇뭐뭘뭣므미민믿밀밉밌및밑바박밖반받발밝밟밤밥방밭배백뱀뱃뱉버번벌범법벗베벤벨벼벽변별볍병볕보복볶본볼봄봇봉뵈뵙부북분불붉붐붓붕붙뷰브븐블비빌빔빗빚빛빠빡빨빵빼뺏뺨뻐뻔뻗뼈뼉뽑뿌뿐쁘쁨사삭산살삶삼삿상새색샌생샤서석섞선설섬섭섯성세섹센셈셋셔션소속손솔솜솟송솥쇄쇠쇼수숙순숟술숨숫숭숲쉬쉰쉽슈스슨슬슴습슷승시식신싣실싫심십싯싱싶싸싹싼쌀쌍쌓써썩썰썹쎄쏘쏟쑤쓰쓴쓸씀씌씨씩씬씹씻아악안앉않알앓암압앗앙앞애액앨야약얀얄얇양얕얗얘어억언얹얻얼엄업없엇엉엊엌엎에엔엘여역연열엷염엽엿영옆예옛오옥온올옮옳옷옹와완왕왜왠외왼요욕용우욱운울움웃웅워원월웨웬위윗유육율으윽은을음응의이익인일읽잃임입잇있잊잎자작잔잖잘잠잡잣장잦재쟁쟤저적전절젊점접젓정젖제젠젯져조족존졸좀좁종좋좌죄주죽준줄줌줍중쥐즈즉즌즐즘증지직진질짐집짓징짙짚짜짝짧째쨌쩌쩍쩐쩔쩜쪽쫓쭈쭉찌찍찢차착찬찮찰참찻창찾채책챔챙처척천철첩첫청체쳐초촉촌촛총촬최추축춘출춤춥춧충취츠측츰층치칙친칠침칫칭카칸칼캄캐캠커컨컬컴컵컷케켓켜코콘콜콤콩쾌쿄쿠퀴크큰클큼키킬타탁탄탈탑탓탕태택탤터턱턴털텅테텍텔템토톤톨톱통퇴투툴툼퉁튀튜트특튼튿틀틈티틱팀팅파팎판팔팝패팩팬퍼퍽페펜펴편펼평폐포폭폰표푸푹풀품풍퓨프플픔피픽필핏핑하학한할함합항해핵핸햄햇행향허헌험헤헬혀현혈협형혜호혹혼홀홈홉홍화확환활황회획횟횡효후훈훌훔훨휘휴흉흐흑흔흘흙흡흥흩희흰히힘',
help='character label')
# 0123456789!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZㆍ가각간갇갈감갑값갓강갖같갚갛개객걀걔거걱건걷걸검겁것겉게겨격겪견결겹경곁계고곡곤곧골곰곱곳공과관광괜괴굉교구국군굳굴굵굶굽궁권귀귓규균귤그극근글긁금급긋긍기긴길김깅깊까깍깎깐깔깜깝깡깥깨꺼꺾껌껍껏껑께껴꼬꼭꼴꼼꼽꽂꽃꽉꽤꾸꾼꿀꿈뀌끄끈끊끌끓끔끗끝끼낌나낙낚난날낡남납낫낭낮낯낱낳내냄냇냉냐냥너넉넌널넓넘넣네넥넷녀녁년념녕노녹논놀놈농높놓놔뇌뇨누눈눕뉘뉴늄느늑는늘늙능늦늬니닐님다닥닦단닫달닭닮담답닷당닿대댁댐댓더덕던덜덟덤덥덧덩덮데델도독돈돌돕돗동돼되된두둑둘둠둡둥뒤뒷드득든듣들듬듭듯등디딩딪따딱딴딸땀땅때땜떠떡떤떨떻떼또똑뚜뚫뚱뛰뜨뜩뜯뜰뜻띄라락란람랍랑랗래랜램랫략량러럭런럴럼럽럿렁렇레렉렌려력련렬렵령례로록론롬롭롯료루룩룹룻뤄류륙률륭르른름릇릎리릭린림립릿링마막만많말맑맘맙맛망맞맡맣매맥맨맵맺머먹먼멀멈멋멍멎메멘멩며면멸명몇모목몬몰몸몹못몽묘무묵묶문묻물뭄뭇뭐뭘뭣므미민믿밀밉밌및밑바박밖반받발밝밟밤밥방밭배백뱀뱃뱉버번벌범법벗베벤벨벼벽변별볍병볕보복볶본볼봄봇봉뵈뵙부북분불붉붐붓붕붙뷰브븐블비빌빔빗빚빛빠빡빨빵빼뺏뺨뻐뻔뻗뼈뼉뽑뿌뿐쁘쁨사삭산살삶삼삿상새색샌생샤서석섞선설섬섭섯성세섹센셈셋셔션소속손솔솜솟송솥쇄쇠쇼수숙순숟술숨숫숭숲쉬쉰쉽슈스슨슬슴습슷승시식신싣실싫심십싯싱싶싸싹싼쌀쌍쌓써썩썰썹쎄쏘쏟쑤쓰쓴쓸씀씌씨씩씬씹씻아악안앉않알앓암압앗앙앞애액앨야약얀얄얇양얕얗얘어억언얹얻얼엄업없엇엉엊엌엎에엔엘여역연열엷염엽엿영옆예옛오옥온올옮옳옷옹와완왕왜왠외왼요욕용우욱운울움웃웅워원월웨웬위윗유육율으윽은을음응의이익인일읽잃임입잇있잊잎자작잔잖잘잠잡잣장잦재쟁쟤저적전절젊점접젓정젖제젠젯져조족존졸좀좁종좋좌죄주죽준줄줌줍중쥐즈즉즌즐즘증지직진질짐집짓징짙짚짜짝짧째쨌쩌쩍쩐쩔쩜쪽쫓쭈쭉찌찍찢차착찬찮찰참찻창찾채책챔챙처척천철첩첫청체쳐초촉촌촛총촬최추축춘출춤춥춧충취츠측츰층치칙친칠침칫칭카칸칼캄캐캠커컨컬컴컵컷케켓켜코콘콜콤콩쾌쿄쿠퀴크큰클큼키킬타탁탄탈탑탓탕태택탤터턱턴털텅테텍텔템토톤톨톱통퇴투툴툼퉁튀튜트특튼튿틀틈티틱팀팅파팎판팔팝패팩팬퍼퍽페펜펴편펼평폐포폭폰표푸푹풀품풍퓨프플픔피픽필핏핑하학한할함합항해핵핸햄햇행향허헌험헤헬혀현혈협형혜호혹혼홀홈홉홍화확환활황회획횟횡효후훈훌훔훨휘휴흉흐흑흔흘흙흡흥흩희흰히힘",
parser.add_argument('--sensitive', action='store_true', help='for sensitive character mode')
parser.add_argument('--PAD', action='store_true', help='whether to keep ratio then pad for image resize')
parser.add_argument('--data_filtering_off', action='store_true', help='for data_filtering_off mode')
""" Model Architecture """
parser.add_argument('--Transformation', type=str, required=True, help='Transformation stage. None|TPS')
parser.add_argument('--FeatureExtraction', type=str, required=True,
help='FeatureExtraction stage. VGG|RCNN|ResNet')
parser.add_argument('--SequenceModeling', type=str, required=True, help='SequenceModeling stage. None|BiLSTM')
parser.add_argument('--Prediction', type=str, required=True, help='Prediction stage. CTC|Attn')
parser.add_argument('--num_fiducial', type=int, default=20, help='number of fiducial points of TPS-STN')
parser.add_argument('--input_channel', type=int, default=1,
help='the number of input channel of Feature extractor')
parser.add_argument('--output_channel', type=int, default=512,
help='the number of output channel of Feature extractor')
parser.add_argument('--hidden_size', type=int, default=256, help='the size of the LSTM hidden state')
opt = parser.parse_args()
if not opt.exp_name:
opt.exp_name = f'{opt.Transformation}-{opt.FeatureExtraction}-{opt.SequenceModeling}-{opt.Prediction}'
opt.exp_name += f'-Seed{opt.manualSeed}'
# print(opt.exp_name)
os.makedirs(f'./saved_models/{opt.exp_name}', exist_ok=True)
""" vocab / character number configuration """
if opt.sensitive:
# opt.character += 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
opt.character = string.printable[:-6] # same with ASTER setting (use 94 char).
""" Seed and GPU setting """
# print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
np.random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
torch.cuda.manual_seed(opt.manualSeed)
cudnn.benchmark = True
cudnn.deterministic = True
opt.num_gpu = torch.cuda.device_count()
# print('device count', opt.num_gpu)
if opt.num_gpu > 1:
print('------ Use multi-GPU setting ------')
print('if you stuck too long time with multi-GPU setting, try to set --workers 0')
# check multi-GPU issue https://github.com/clovaai/deep-text-recognition-benchmark/issues/1
opt.workers = opt.workers * opt.num_gpu
opt.batch_size = opt.batch_size * opt.num_gpu
""" previous version
print('To equlize batch stats to 1-GPU setting, the batch_size is multiplied with num_gpu and multiplied batch_size is ', opt.batch_size)
opt.batch_size = opt.batch_size * opt.num_gpu
print('To equalize the number of epochs to 1-GPU setting, num_iter is divided with num_gpu by default.')
If you dont care about it, just commnet out these line.)
opt.num_iter = int(opt.num_iter / opt.num_gpu)
"""
train(opt)
| 55.233038 | 1,105 | 0.677152 |
import os
import sys
import time
import random
import string
import argparse
import torch
import torch.backends.cudnn as cudnn
import torch.nn.init as init
import torch.optim as optim
import torch.utils.data
import numpy as np
from utils import CTCLabelConverter, CTCLabelConverterForBaiduWarpctc, AttnLabelConverter, Averager
from dataset import hierarchical_dataset, AlignCollate, Batch_Balanced_Dataset
from model import Model
from test import validation
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
ta_filtering_off:
print('Filtering the images containing characters which are not in opt.character')
print('Filtering the images whose label is longer than opt.batch_max_length')
opt.select_data = opt.select_data.split('-')
opt.batch_ratio = opt.batch_ratio.split('-')
train_dataset = Batch_Balanced_Dataset(opt)
log = open(f'./saved_models/{opt.exp_name}/log_dataset.txt', 'a')
AlignCollate_valid = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD)
valid_dataset, valid_dataset_log = hierarchical_dataset(root=opt.valid_data, opt=opt)
valid_loader = torch.utils.data.DataLoader(
valid_dataset, batch_size=opt.batch_size,
shuffle=True,
num_workers=int(opt.workers),
collate_fn=AlignCollate_valid, pin_memory=True)
log.write(valid_dataset_log)
print('-' * 80)
log.write('-' * 80 + '\n')
log.close()
if 'CTC' in opt.Prediction:
if opt.baiduCTC:
converter = CTCLabelConverterForBaiduWarpctc(opt.character)
else:
converter = CTCLabelConverter(opt.character)
else:
converter = AttnLabelConverter(opt.character)
opt.num_class = len(converter.character)
if opt.rgb:
opt.input_channel = 3
model = Model(opt)
print('model input parameters', opt.imgH, opt.imgW, opt.num_fiducial, opt.input_channel, opt.output_channel,
opt.hidden_size, opt.num_class, opt.batch_max_length, opt.Transformation, opt.FeatureExtraction,
opt.SequenceModeling, opt.Prediction)
for name, param in model.named_parameters():
if 'localization_fc2' in name:
print(f'Skip {name} as it is already initialized')
continue
try:
if 'bias' in name:
init.constant_(param, 0.0)
elif 'weight' in name:
init.kaiming_normal_(param)
except Exception as e:
if 'weight' in name:
param.data.fill_(1)
continue
model = torch.nn.DataParallel(model).to(device)
model.train()
if opt.saved_model != '':
print(f'loading pretrained model from {opt.saved_model}')
if opt.FT:
model.load_state_dict(torch.load(opt.saved_model, map_location=torch.device('cpu')), strict=False)
else:
model.load_state_dict(torch.load(opt.saved_model))
print("Model:")
print(model)
if 'CTC' in opt.Prediction:
if opt.baiduCTC:
criterion = torch.nn.CTCLoss(zero_infinity=True).to(device)
else:
criterion = torch.nn.CTCLoss(zero_infinity=True).to(device)
else:
criterion = torch.nn.CrossEntropyLoss(ignore_index=0).to(device)
loss_avg = Averager()
filtered_parameters = []
params_num = []
for p in filter(lambda p: p.requires_grad, model.parameters()):
filtered_parameters.append(p)
params_num.append(np.prod(p.size()))
print('Trainable params num : ', sum(params_num))
if opt.adam:
optimizer = optim.Adam(filtered_parameters, lr=opt.lr, betas=(opt.beta1, 0.999))
else:
optimizer = optim.Adadelta(filtered_parameters, lr=opt.lr, rho=opt.rho, eps=opt.eps)
print("Optimizer:")
print(optimizer)
with open(f'./saved_models/{opt.exp_name}/opt.txt', 'a') as opt_file:
opt_log = '------------ Options -------------\n'
args = vars(opt)
for k, v in args.items():
opt_log += f'{str(k)}: {str(v)}\n'
opt_log += '---------------------------------------\n'
print(opt_log)
opt_file.write(opt_log)
start_iter = 0
if opt.saved_model != '':
try:
start_iter = int(opt.saved_model.split('_')[-1].split('.')[0])
print(f'continue to train, start_iter: {start_iter}')
except:
pass
start_time = time.time()
best_accuracy = -1
best_norm_ED = -1
iteration = start_iter
while(True):
image_tensors, labels = train_dataset.get_batch()
image = image_tensors.to(device)
text, length = converter.encode(labels, batch_max_length=opt.batch_max_length)
batch_size = image.size(0)
if 'CTC' in opt.Prediction:
preds = model(image, text)
preds_size = torch.IntTensor([preds.size(1)] * batch_size)
if opt.baiduCTC:
preds = preds.permute(1, 0, 2)
cost = criterion(preds, text, preds_size, length) / batch_size
else:
preds = preds.log_softmax(2).permute(1, 0, 2)
cost = criterion(preds, text, preds_size, length)
else:
preds = model(image, text[:, :-1])
target = text[:, 1:]
cost = criterion(preds.view(-1, preds.shape[-1]), target.contiguous().view(-1))
model.zero_grad()
cost.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), opt.grad_clip)
optimizer.step()
loss_avg.add(cost)
if (iteration + 1) % opt.valInterval == 0 or iteration == 0:
elapsed_time = time.time() - start_time
with open(f'./saved_models/{opt.exp_name}/log_train.txt', 'a') as log:
model.eval()
with torch.no_grad():
valid_loss, current_accuracy, current_norm_ED, preds, confidence_score, labels, infer_time, length_of_data = validation(
model, criterion, valid_loader, converter, opt)
model.train()
loss_log = f'[{iteration+1}/{opt.num_iter}] Train loss: {loss_avg.val():0.5f}, Valid loss: {valid_loss:0.5f}, Elapsed_time: {elapsed_time:0.5f}'
loss_avg.reset()
current_model_log = f'{"Current_accuracy":17s}: {current_accuracy:0.3f}, {"Current_norm_ED":17s}: {current_norm_ED:0.2f}'
if current_accuracy > best_accuracy:
best_accuracy = current_accuracy
torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/best_accuracy.pth')
if current_norm_ED > best_norm_ED:
best_norm_ED = current_norm_ED
torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/best_norm_ED.pth')
best_model_log = f'{"Best_accuracy":17s}: {best_accuracy:0.3f}, {"Best_norm_ED":17s}: {best_norm_ED:0.2f}'
loss_model_log = f'{loss_log}\n{current_model_log}\n{best_model_log}'
print(loss_model_log)
log.write(loss_model_log + '\n')
dashed_line = '-' * 80
head = f'{"Ground Truth":25s} | {"Prediction":25s} | Confidence Score & T/F'
predicted_result_log = f'{dashed_line}\n{head}\n{dashed_line}\n'
for gt, pred, confidence in zip(labels[:5], preds[:5], confidence_score[:5]):
if 'Attn' in opt.Prediction:
gt = gt[:gt.find('[s]')]
pred = pred[:pred.find('[s]')]
predicted_result_log += f'{gt:25s} | {pred:25s} | {confidence:0.4f}\t{str(pred == gt)}\n'
predicted_result_log += f'{dashed_line}'
print(predicted_result_log)
log.write(predicted_result_log + '\n')
if (iteration + 1) % 1e+5 == 0:
torch.save(
model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth')
if (iteration + 1) == opt.num_iter:
print('end the training')
sys.exit()
iteration += 1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--exp_name', help='Where to store logs and models')
parser.add_argument('--train_data', required=True, help='path to training dataset')
parser.add_argument('--valid_data', required=True, help='path to validation dataset')
parser.add_argument('--manualSeed', type=int, default=1111, help='for random seed setting')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=4)
parser.add_argument('--batch_size', type=int, default=192, help='input batch size')
parser.add_argument('--num_iter', type=int, default=300000, help='number of iterations to train for')
parser.add_argument('--valInterval', type=int, default=2000, help='Interval between each validation')
parser.add_argument('--saved_model', default='', help="path to model to continue training")
parser.add_argument('--FT', action='store_true', help='whether to do fine-tuning')
parser.add_argument('--adam', action='store_true', help='Whether to use adam (default is Adadelta)')
parser.add_argument('--lr', type=float, default=1, help='learning rate, default=1.0 for Adadelta')
parser.add_argument('--beta1', type=float, default=0.9, help='beta1 for adam. default=0.9')
parser.add_argument('--rho', type=float, default=0.95, help='decay rate rho for Adadelta. default=0.95')
parser.add_argument('--eps', type=float, default=1e-8, help='eps for Adadelta. default=1e-8')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping value. default=5')
parser.add_argument('--baiduCTC', action='store_true', help='for data_filtering_off mode')
t('--select_data', type=str, default='/',
help='select training data')
parser.add_argument('--batch_ratio', type=str, default='1',
help='assign ratio for each selected data in the batch')
parser.add_argument('--total_data_usage_ratio', type=str, default='1.0',
help='total data usage ratio, this ratio is multiplied to total number of data.')
parser.add_argument('--batch_max_length', type=int, default=25, help='maximum-label-length')
parser.add_argument('--imgH', type=int, default=32, help='the height of the input image')
parser.add_argument('--imgW', type=int, default=100, help='the width of the input image')
parser.add_argument('--rgb', action='store_true', help='use rgb input')
gument('--character', type=str,
default='0123456789!#$%&\'"()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZㆍ가각간갇갈감갑값갓강갖같갚갛개객걀걔거걱건걷걸검겁것겉게겨격겪견결겹경곁계고곡곤곧골곰곱곳공과관광괜괴굉교구국군굳굴굵굶굽궁권귀귓규균귤그극근글긁금급긋긍기긴길김깅깊까깍깎깐깔깜깝깡깥깨꺼꺾껌껍껏껑께껴꼬꼭꼴꼼꼽꽂꽃꽉꽤꾸꾼꿀꿈뀌끄끈끊끌끓끔끗끝끼낌나낙낚난날낡남납낫낭낮낯낱낳내냄냇냉냐냥너넉넌널넓넘넣네넥넷녀녁년념녕노녹논놀놈농높놓놔뇌뇨누눈눕뉘뉴늄느늑는늘늙능늦늬니닐님다닥닦단닫달닭닮담답닷당닿대댁댐댓더덕던덜덟덤덥덧덩덮데델도독돈돌돕돗동돼되된두둑둘둠둡둥뒤뒷드득든듣들듬듭듯등디딩딪따딱딴딸땀땅때땜떠떡떤떨떻떼또똑뚜뚫뚱뛰뜨뜩뜯뜰뜻띄라락란람랍랑랗래랜램랫략량러럭런럴럼럽럿렁렇레렉렌려력련렬렵령례로록론롬롭롯료루룩룹룻뤄류륙률륭르른름릇릎리릭린림립릿링마막만많말맑맘맙맛망맞맡맣매맥맨맵맺머먹먼멀멈멋멍멎메멘멩며면멸명몇모목몬몰몸몹못몽묘무묵묶문묻물뭄뭇뭐뭘뭣므미민믿밀밉밌및밑바박밖반받발밝밟밤밥방밭배백뱀뱃뱉버번벌범법벗베벤벨벼벽변별볍병볕보복볶본볼봄봇봉뵈뵙부북분불붉붐붓붕붙뷰브븐블비빌빔빗빚빛빠빡빨빵빼뺏뺨뻐뻔뻗뼈뼉뽑뿌뿐쁘쁨사삭산살삶삼삿상새색샌생샤서석섞선설섬섭섯성세섹센셈셋셔션소속손솔솜솟송솥쇄쇠쇼수숙순숟술숨숫숭숲쉬쉰쉽슈스슨슬슴습슷승시식신싣실싫심십싯싱싶싸싹싼쌀쌍쌓써썩썰썹쎄쏘쏟쑤쓰쓴쓸씀씌씨씩씬씹씻아악안앉않알앓암압앗앙앞애액앨야약얀얄얇양얕얗얘어억언얹얻얼엄업없엇엉엊엌엎에엔엘여역연열엷염엽엿영옆예옛오옥온올옮옳옷옹와완왕왜왠외왼요욕용우욱운울움웃웅워원월웨웬위윗유육율으윽은을음응의이익인일읽잃임입잇있잊잎자작잔잖잘잠잡잣장잦재쟁쟤저적전절젊점접젓정젖제젠젯져조족존졸좀좁종좋좌죄주죽준줄줌줍중쥐즈즉즌즐즘증지직진질짐집짓징짙짚짜짝짧째쨌쩌쩍쩐쩔쩜쪽쫓쭈쭉찌찍찢차착찬찮찰참찻창찾채책챔챙처척천철첩첫청체쳐초촉촌촛총촬최추축춘출춤춥춧충취츠측츰층치칙친칠침칫칭카칸칼캄캐캠커컨컬컴컵컷케켓켜코콘콜콤콩쾌쿄쿠퀴크큰클큼키킬타탁탄탈탑탓탕태택탤터턱턴털텅테텍텔템토톤톨톱통퇴투툴툼퉁튀튜트특튼튿틀틈티틱팀팅파팎판팔팝패팩팬퍼퍽페펜펴편펼평폐포폭폰표푸푹풀품풍퓨프플픔피픽필핏핑하학한할함합항해핵핸햄햇행향허헌험헤헬혀현혈협형혜호혹혼홀홈홉홍화확환활황회획횟횡효후훈훌훔훨휘휴흉흐흑흔흘흙흡흥흩희흰히힘',
help='character label')
# 0123456789!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZㆍ가각간갇갈감갑값갓강갖같갚갛개객걀걔거걱건걷걸검겁것겉게겨격겪견결겹경곁계고곡곤곧골곰곱곳공과관광괜괴굉교구국군굳굴굵굶굽궁권귀귓규균귤그극근글긁금급긋긍기긴길김깅깊까깍깎깐깔깜깝깡깥깨꺼꺾껌껍껏껑께껴꼬꼭꼴꼼꼽꽂꽃꽉꽤꾸꾼꿀꿈뀌끄끈끊끌끓끔끗끝끼낌나낙낚난날낡남납낫낭낮낯낱낳내냄냇냉냐냥너넉넌널넓넘넣네넥넷녀녁년념녕노녹논놀놈농높놓놔뇌뇨누눈눕뉘뉴늄느늑는늘늙능늦늬니닐님다닥닦단닫달닭닮담답닷당닿대댁댐댓더덕던덜덟덤덥덧덩덮데델도독돈돌돕돗동돼되된두둑둘둠둡둥뒤뒷드득든듣들듬듭듯등디딩딪따딱딴딸땀땅때땜떠떡떤떨떻떼또똑뚜뚫뚱뛰뜨뜩뜯뜰뜻띄라락란람랍랑랗래랜램랫략량러럭런럴럼럽럿렁렇레렉렌려력련렬렵령례로록론롬롭롯료루룩룹룻뤄류륙률륭르른름릇릎리릭린림립릿링마막만많말맑맘맙맛망맞맡맣매맥맨맵맺머먹먼멀멈멋멍멎메멘멩며면멸명몇모목몬몰몸몹못몽묘무묵묶문묻물뭄뭇뭐뭘뭣므미민믿밀밉밌및밑바박밖반받발밝밟밤밥방밭배백뱀뱃뱉버번벌범법벗베벤벨벼벽변별볍병볕보복볶본볼봄봇봉뵈뵙부북분불붉붐붓붕붙뷰브븐블비빌빔빗빚빛빠빡빨빵빼뺏뺨뻐뻔뻗뼈뼉뽑뿌뿐쁘쁨사삭산살삶삼삿상새색샌생샤서석섞선설섬섭섯성세섹센셈셋셔션소속손솔솜솟송솥쇄쇠쇼수숙순숟술숨숫숭숲쉬쉰쉽슈스슨슬슴습슷승시식신싣실싫심십싯싱싶싸싹싼쌀쌍쌓써썩썰썹쎄쏘쏟쑤쓰쓴쓸씀씌씨씩씬씹씻아악안앉않알앓암압앗앙앞애액앨야약얀얄얇양얕얗얘어억언얹얻얼엄업없엇엉엊엌엎에엔엘여역연열엷염엽엿영옆예옛오옥온올옮옳옷옹와완왕왜왠외왼요욕용우욱운울움웃웅워원월웨웬위윗유육율으윽은을음응의이익인일읽잃임입잇있잊잎자작잔잖잘잠잡잣장잦재쟁쟤저적전절젊점접젓정젖제젠젯져조족존졸좀좁종좋좌죄주죽준줄줌줍중쥐즈즉즌즐즘증지직진질짐집짓징짙짚짜짝짧째쨌쩌쩍쩐쩔쩜쪽쫓쭈쭉찌찍찢차착찬찮찰참찻창찾채책챔챙처척천철첩첫청체쳐초촉촌촛총촬최추축춘출춤춥춧충취츠측츰층치칙친칠침칫칭카칸칼캄캐캠커컨컬컴컵컷케켓켜코콘콜콤콩쾌쿄쿠퀴크큰클큼키킬타탁탄탈탑탓탕태택탤터턱턴털텅테텍텔템토톤톨톱통퇴투툴툼퉁튀튜트특튼튿틀틈티틱팀팅파팎판팔팝패팩팬퍼퍽페펜펴편펼평폐포폭폰표푸푹풀품풍퓨프플픔피픽필핏핑하학한할함합항해핵핸햄햇행향허헌험헤헬혀현혈협형혜호혹혼홀홈홉홍화확환활황회획횟횡효후훈훌훔훨휘휴흉흐흑흔흘흙흡흥흩희흰히힘",
parser.add_argument('--sensitive', action='store_true', help='for sensitive character mode')
parser.add_argument('--PAD', action='store_true', help='whether to keep ratio then pad for image resize')
parser.add_argument('--data_filtering_off', action='store_true', help='for data_filtering_off mode')
parser.add_argument('--Transformation', type=str, required=True, help='Transformation stage. None|TPS')
parser.add_argument('--FeatureExtraction', type=str, required=True,
help='FeatureExtraction stage. VGG|RCNN|ResNet')
parser.add_argument('--SequenceModeling', type=str, required=True, help='SequenceModeling stage. None|BiLSTM')
parser.add_argument('--Prediction', type=str, required=True, help='Prediction stage. CTC|Attn')
parser.add_argument('--num_fiducial', type=int, default=20, help='number of fiducial points of TPS-STN')
parser.add_argument('--input_channel', type=int, default=1,
help='the number of input channel of Feature extractor')
parser.add_argument('--output_channel', type=int, default=512,
help='the number of output channel of Feature extractor')
parser.add_argument('--hidden_size', type=int, default=256, help='the size of the LSTM hidden state')
opt = parser.parse_args()
if not opt.exp_name:
opt.exp_name = f'{opt.Transformation}-{opt.FeatureExtraction}-{opt.SequenceModeling}-{opt.Prediction}'
opt.exp_name += f'-Seed{opt.manualSeed}'
# print(opt.exp_name)
os.makedirs(f'./saved_models/{opt.exp_name}', exist_ok=True)
if opt.sensitive:
# opt.character += 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
opt.character = string.printable[:-6] # same with ASTER setting (use 94 char).
# print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
np.random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
torch.cuda.manual_seed(opt.manualSeed)
cudnn.benchmark = True
cudnn.deterministic = True
opt.num_gpu = torch.cuda.device_count()
# print('device count', opt.num_gpu)
if opt.num_gpu > 1:
print('------ Use multi-GPU setting ------')
print('if you stuck too long time with multi-GPU setting, try to set --workers 0')
# check multi-GPU issue https://github.com/clovaai/deep-text-recognition-benchmark/issues/1
opt.workers = opt.workers * opt.num_gpu
opt.batch_size = opt.batch_size * opt.num_gpu
train(opt)
| true | true |
1c2fb5fc10089bbacaa7a1f8c73535da8b2e736c | 1,229 | py | Python | Python3/0505-The-Maze-II/soln.py | wyaadarsh/LeetCode-Solutions | 3719f5cb059eefd66b83eb8ae990652f4b7fd124 | [
"MIT"
] | 5 | 2020-07-24T17:48:59.000Z | 2020-12-21T05:56:00.000Z | Python3/0505-The-Maze-II/soln.py | zhangyaqi1989/LeetCode-Solutions | 2655a1ffc8678ad1de6c24295071308a18c5dc6e | [
"MIT"
] | null | null | null | Python3/0505-The-Maze-II/soln.py | zhangyaqi1989/LeetCode-Solutions | 2655a1ffc8678ad1de6c24295071308a18c5dc6e | [
"MIT"
] | 2 | 2020-07-24T17:49:01.000Z | 2020-08-31T19:57:35.000Z | class Solution:
def shortestDistance(self, maze, start, destination):
"""
:type maze: List[List[int]]
:type start: List[int]
:type destination: List[int]
:rtype: int
"""
m, n = len(maze), len(maze[0])
start, dest = tuple(start), tuple(destination)
heap = []
heapq.heappush(heap, [0, start])
visited = {start : 0}
while heap:
dis, (i, j), = heapq.heappop(heap)
if (i, j) == dest:
return dis
for di, dj in ((-1, 0), (1, 0), (0, 1), (0, -1)):
newi, newj = i + di, j + dj
while 0 <= newi < m and 0 <= newj < n and maze[newi][newj] != 1:
newi, newj = newi + di, newj + dj
newi, newj = newi - di, newj - dj
if newi != i or newj != j:
distance = dis + ((newi - i) // di if newi != i else (newj - j) // dj)
else:
distance = dis
if (newi, newj) not in visited or visited[newi, newj] > distance:
visited[newi, newj] = distance
heapq.heappush(heap, (distance, (newi, newj)))
return -1 | 40.966667 | 90 | 0.441009 | class Solution:
def shortestDistance(self, maze, start, destination):
m, n = len(maze), len(maze[0])
start, dest = tuple(start), tuple(destination)
heap = []
heapq.heappush(heap, [0, start])
visited = {start : 0}
while heap:
dis, (i, j), = heapq.heappop(heap)
if (i, j) == dest:
return dis
for di, dj in ((-1, 0), (1, 0), (0, 1), (0, -1)):
newi, newj = i + di, j + dj
while 0 <= newi < m and 0 <= newj < n and maze[newi][newj] != 1:
newi, newj = newi + di, newj + dj
newi, newj = newi - di, newj - dj
if newi != i or newj != j:
distance = dis + ((newi - i) // di if newi != i else (newj - j) // dj)
else:
distance = dis
if (newi, newj) not in visited or visited[newi, newj] > distance:
visited[newi, newj] = distance
heapq.heappush(heap, (distance, (newi, newj)))
return -1 | true | true |
1c2fb67571f8019d9cbbd8c5dfe8c8c59589de44 | 3,442 | py | Python | network/wifiWidget.py | dailing/drcm | b692818ae5074611c27bff124dd41b34f0d7e64b | [
"MIT"
] | null | null | null | network/wifiWidget.py | dailing/drcm | b692818ae5074611c27bff124dd41b34f0d7e64b | [
"MIT"
] | null | null | null | network/wifiWidget.py | dailing/drcm | b692818ae5074611c27bff124dd41b34f0d7e64b | [
"MIT"
] | null | null | null | import sys
import logging
import subprocess
from PyQt4 import QtGui, QtCore
from wifiManager import wifiManager
try:
from sql.RunnableFunc import RunnableFunc
from sql.PoolWrapper import PoolWrapper
except Exception as e:
pass
from widget.LineEditDialog import LineEditDialog
def showKeyBoard():
try:
subprocess.Popen(["matchbox-keyboard"])
except FileNotFoundError:
pass
def hideKeyBoard():
pass
subprocess.Popen(["killall","matchbox-keyboard"])
def visulizeSignal(wifiData):
#convert to percentage representation
quality = wifiData[0].split('/')
quality = float(quality[0]) / float(quality[1])
quality = int(quality * 100)
#'|' denote ten percent, '.' denote five percent
rem = (quality % 10) > 5
strQuality = '|' * (quality / 10) + ('|' if rem else '.')
res = ['' if e is None else e for e in wifiData]
print (strQuality)
res[0] = strQuality
return res
class WifiTableView(QtGui.QTableWidget):
"""wifi mananger table list view"""
wifiQuerySignal = QtCore.pyqtSignal(list)
def __init__(self):
QtGui.QTableWidget.__init__(self)
self.pw = PoolWrapper()
self.initTable()
self.setEditTriggers(QtGui.QAbstractItemView.CurrentChanged)
def tabCellClicked(self, i, j):
if j != 1: #
return
ssid = str(self.item(i, j).text())
if ssid == self.wifiManager.getCurrentWifi():
print ('is connected')
return
pwd = str(self.item(i, 2).text())
pwd, isOkay = LineEditDialog.newInstance('password',pwd)
if not isOkay:
return
self.item(i, 2).setText(pwd)
if pwd == '******':
pwd = None
pwd = self.pw.start(
RunnableFunc(
self.wifiManager.connectWifi,
ssid,
pwd
)
)
print(str(self.item(i, 2).text()))
def initTable(self):
self.wifiManager = wifiManager()
# table.itemClicked.connect(self.tabItemDoubleClicked)
self.cellClicked.connect(self.tabCellClicked)
# tableItem = QtGui.QTableWidgetItem()
self.setWindowTitle("WIFI LIST")
#quality, ssid, user, pwd
self.setColumnCount(3)
self.verticalHeader().hide()
self.setHorizontalHeaderLabels(['quality', 'wifi', 'password'])
self.setShowGrid(False)
# table.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.horizontalHeader().setResizeMode(QtGui.QHeaderView.Stretch)
#[[quality, name, pwd]]
# self.item(0, 2).setFlags(self.item(0, 2).flags() ^ QtCore.Qt.ItemIsEditable)
self.pw.start(
RunnableFunc(
self.asynFillTable
)
)
self.wifiQuerySignal.connect(self.asynFillTableCallBack)
def asynFillTable(self):
wifiList = self.wifiManager.getWifiList()
print(wifiList)
self.wifiQuerySignal.emit(wifiList)
#pull data and emit signal
def asynFillTableCallBack(self, wifiList):
for w in wifiList:
self.appendStrRow(visulizeSignal(w))
self.pw.start(
RunnableFunc(
wifiManager().connect_saved
)
)
def appendStrRow(self, data):
x = self.rowCount()
self.insertRow(x)
for i, v in enumerate(data) :
item = QtGui.QTableWidgetItem(v)
item.setTextAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
item.setFlags(item.flags() ^ QtCore.Qt.ItemIsEditable)
self.setItem(x, i, item)
if __name__ == '__main__':
try:
visulizeSignal(['59/70'])
# app = QtGui.QApplication(sys.argv)
# wrapper = QtGui.QMainWindow()
# wrapper.setCentralWidget(WifiTableView())
# wrapper.setGeometry(400, 400, 800, 480)
# wrapper.show()
# sys.exit(app.exec_())
except Exception as e:
print(e)
| 23.902778 | 80 | 0.705113 | import sys
import logging
import subprocess
from PyQt4 import QtGui, QtCore
from wifiManager import wifiManager
try:
from sql.RunnableFunc import RunnableFunc
from sql.PoolWrapper import PoolWrapper
except Exception as e:
pass
from widget.LineEditDialog import LineEditDialog
def showKeyBoard():
try:
subprocess.Popen(["matchbox-keyboard"])
except FileNotFoundError:
pass
def hideKeyBoard():
pass
subprocess.Popen(["killall","matchbox-keyboard"])
def visulizeSignal(wifiData):
quality = wifiData[0].split('/')
quality = float(quality[0]) / float(quality[1])
quality = int(quality * 100)
rem = (quality % 10) > 5
strQuality = '|' * (quality / 10) + ('|' if rem else '.')
res = ['' if e is None else e for e in wifiData]
print (strQuality)
res[0] = strQuality
return res
class WifiTableView(QtGui.QTableWidget):
wifiQuerySignal = QtCore.pyqtSignal(list)
def __init__(self):
QtGui.QTableWidget.__init__(self)
self.pw = PoolWrapper()
self.initTable()
self.setEditTriggers(QtGui.QAbstractItemView.CurrentChanged)
def tabCellClicked(self, i, j):
if j != 1:
return
ssid = str(self.item(i, j).text())
if ssid == self.wifiManager.getCurrentWifi():
print ('is connected')
return
pwd = str(self.item(i, 2).text())
pwd, isOkay = LineEditDialog.newInstance('password',pwd)
if not isOkay:
return
self.item(i, 2).setText(pwd)
if pwd == '******':
pwd = None
pwd = self.pw.start(
RunnableFunc(
self.wifiManager.connectWifi,
ssid,
pwd
)
)
print(str(self.item(i, 2).text()))
def initTable(self):
self.wifiManager = wifiManager()
self.cellClicked.connect(self.tabCellClicked)
self.setWindowTitle("WIFI LIST")
self.setColumnCount(3)
self.verticalHeader().hide()
self.setHorizontalHeaderLabels(['quality', 'wifi', 'password'])
self.setShowGrid(False)
self.horizontalHeader().setResizeMode(QtGui.QHeaderView.Stretch)
self.pw.start(
RunnableFunc(
self.asynFillTable
)
)
self.wifiQuerySignal.connect(self.asynFillTableCallBack)
def asynFillTable(self):
wifiList = self.wifiManager.getWifiList()
print(wifiList)
self.wifiQuerySignal.emit(wifiList)
def asynFillTableCallBack(self, wifiList):
for w in wifiList:
self.appendStrRow(visulizeSignal(w))
self.pw.start(
RunnableFunc(
wifiManager().connect_saved
)
)
def appendStrRow(self, data):
x = self.rowCount()
self.insertRow(x)
for i, v in enumerate(data) :
item = QtGui.QTableWidgetItem(v)
item.setTextAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
item.setFlags(item.flags() ^ QtCore.Qt.ItemIsEditable)
self.setItem(x, i, item)
if __name__ == '__main__':
try:
visulizeSignal(['59/70'])
except Exception as e:
print(e)
| true | true |
1c2fb6a640426644549f001ce958942cb7912536 | 3,403 | py | Python | src/models/utils.py | caixhstrive/Mini_Xception | 0cf7ce88d9cbe56f0dc20b0ef2850a499c033ce3 | [
"MIT"
] | 3 | 2018-11-21T06:51:57.000Z | 2018-11-21T08:20:53.000Z | src/models/utils.py | caixhstrive/Mini_Xception | 0cf7ce88d9cbe56f0dc20b0ef2850a499c033ce3 | [
"MIT"
] | null | null | null | src/models/utils.py | caixhstrive/Mini_Xception | 0cf7ce88d9cbe56f0dc20b0ef2850a499c033ce3 | [
"MIT"
] | null | null | null | #-*- coding:utf-8 -*-
import os
from keras import backend as K
from keras.models import Model
from keras.engine.topology import get_source_inputs
from keras.layers import Activation, Add, Concatenate, Conv2D, GlobalMaxPooling2D
from keras.layers import GlobalAveragePooling2D,Input, Dense
from keras.layers import MaxPool2D,AveragePooling2D, BatchNormalization, Lambda, DepthwiseConv2D
import numpy as np
def channel_split(x, name=''):
# equipartition
in_channles = x.shape.as_list()[-1]
ip = in_channles // 2
c_hat = Lambda(lambda z: z[:, :, :, 0:ip], name='%s/sp%d_slice' % (name, 0))(x)
c = Lambda(lambda z: z[:, :, :, ip:], name='%s/sp%d_slice' % (name, 1))(x)
return c_hat, c
def channel_shuffle(x):
height, width, channels = x.shape.as_list()[1:]
channels_per_split = channels // 2
x = K.reshape(x, [-1, height, width, 2, channels_per_split])
x = K.permute_dimensions(x, (0,1,2,4,3))
x = K.reshape(x, [-1, height, width, channels])
return x
def shuffle_unit(inputs, out_channels, bottleneck_ratio,strides=2,stage=1,block=1):
if K.image_data_format() == 'channels_last':
bn_axis = -1
else:
raise ValueError('Only channels last supported')
prefix = 'stage{}/block{}'.format(stage, block)
bottleneck_channels = int(out_channels * bottleneck_ratio)
if strides < 2:
c_hat, c = channel_split(inputs, '{}/spl'.format(prefix))
inputs = c
x = Conv2D(bottleneck_channels, kernel_size=(1,1), strides=1, padding='same', name='{}/1x1conv_1'.format(prefix))(inputs)
x = BatchNormalization(axis=bn_axis, name='{}/bn_1x1conv_1'.format(prefix))(x)
x = Activation('relu', name='{}/relu_1x1conv_1'.format(prefix))(x)
x = DepthwiseConv2D(kernel_size=3, strides=strides, padding='same', name='{}/3x3dwconv'.format(prefix))(x)
x = BatchNormalization(axis=bn_axis, name='{}/bn_3x3dwconv'.format(prefix))(x)
x = Conv2D(bottleneck_channels, kernel_size=1,strides=1,padding='same', name='{}/1x1conv_2'.format(prefix))(x)
x = BatchNormalization(axis=bn_axis, name='{}/bn_1x1conv_2'.format(prefix))(x)
x = Activation('relu', name='{}/relu_1x1conv_2'.format(prefix))(x)
if strides < 2:
ret = Concatenate(axis=bn_axis, name='{}/concat_1'.format(prefix))([x, c_hat])
else:
s2 = DepthwiseConv2D(kernel_size=3, strides=2, padding='same', name='{}/3x3dwconv_2'.format(prefix))(inputs)
s2 = BatchNormalization(axis=bn_axis, name='{}/bn_3x3dwconv_2'.format(prefix))(s2)
s2 = Conv2D(bottleneck_channels, kernel_size=1,strides=1,padding='same', name='{}/1x1_conv_3'.format(prefix))(s2)
s2 = BatchNormalization(axis=bn_axis, name='{}/bn_1x1conv_3'.format(prefix))(s2)
s2 = Activation('relu', name='{}/relu_1x1conv_3'.format(prefix))(s2)
ret = Concatenate(axis=bn_axis, name='{}/concat_2'.format(prefix))([x, s2])
ret = Lambda(channel_shuffle, name='{}/channel_shuffle'.format(prefix))(ret)
return ret
def block(x, channel_map, bottleneck_ratio, repeat=1, stage=1):
x = shuffle_unit(x, out_channels=channel_map[stage-1],
strides=2,bottleneck_ratio=bottleneck_ratio,stage=stage,block=1)
for i in range(1, repeat+1):
x = shuffle_unit(x, out_channels=channel_map[stage-1],strides=1,
bottleneck_ratio=bottleneck_ratio,stage=stage, block=(1+i))
return x
| 44.776316 | 125 | 0.675287 |
import os
from keras import backend as K
from keras.models import Model
from keras.engine.topology import get_source_inputs
from keras.layers import Activation, Add, Concatenate, Conv2D, GlobalMaxPooling2D
from keras.layers import GlobalAveragePooling2D,Input, Dense
from keras.layers import MaxPool2D,AveragePooling2D, BatchNormalization, Lambda, DepthwiseConv2D
import numpy as np
def channel_split(x, name=''):
in_channles = x.shape.as_list()[-1]
ip = in_channles // 2
c_hat = Lambda(lambda z: z[:, :, :, 0:ip], name='%s/sp%d_slice' % (name, 0))(x)
c = Lambda(lambda z: z[:, :, :, ip:], name='%s/sp%d_slice' % (name, 1))(x)
return c_hat, c
def channel_shuffle(x):
height, width, channels = x.shape.as_list()[1:]
channels_per_split = channels // 2
x = K.reshape(x, [-1, height, width, 2, channels_per_split])
x = K.permute_dimensions(x, (0,1,2,4,3))
x = K.reshape(x, [-1, height, width, channels])
return x
def shuffle_unit(inputs, out_channels, bottleneck_ratio,strides=2,stage=1,block=1):
if K.image_data_format() == 'channels_last':
bn_axis = -1
else:
raise ValueError('Only channels last supported')
prefix = 'stage{}/block{}'.format(stage, block)
bottleneck_channels = int(out_channels * bottleneck_ratio)
if strides < 2:
c_hat, c = channel_split(inputs, '{}/spl'.format(prefix))
inputs = c
x = Conv2D(bottleneck_channels, kernel_size=(1,1), strides=1, padding='same', name='{}/1x1conv_1'.format(prefix))(inputs)
x = BatchNormalization(axis=bn_axis, name='{}/bn_1x1conv_1'.format(prefix))(x)
x = Activation('relu', name='{}/relu_1x1conv_1'.format(prefix))(x)
x = DepthwiseConv2D(kernel_size=3, strides=strides, padding='same', name='{}/3x3dwconv'.format(prefix))(x)
x = BatchNormalization(axis=bn_axis, name='{}/bn_3x3dwconv'.format(prefix))(x)
x = Conv2D(bottleneck_channels, kernel_size=1,strides=1,padding='same', name='{}/1x1conv_2'.format(prefix))(x)
x = BatchNormalization(axis=bn_axis, name='{}/bn_1x1conv_2'.format(prefix))(x)
x = Activation('relu', name='{}/relu_1x1conv_2'.format(prefix))(x)
if strides < 2:
ret = Concatenate(axis=bn_axis, name='{}/concat_1'.format(prefix))([x, c_hat])
else:
s2 = DepthwiseConv2D(kernel_size=3, strides=2, padding='same', name='{}/3x3dwconv_2'.format(prefix))(inputs)
s2 = BatchNormalization(axis=bn_axis, name='{}/bn_3x3dwconv_2'.format(prefix))(s2)
s2 = Conv2D(bottleneck_channels, kernel_size=1,strides=1,padding='same', name='{}/1x1_conv_3'.format(prefix))(s2)
s2 = BatchNormalization(axis=bn_axis, name='{}/bn_1x1conv_3'.format(prefix))(s2)
s2 = Activation('relu', name='{}/relu_1x1conv_3'.format(prefix))(s2)
ret = Concatenate(axis=bn_axis, name='{}/concat_2'.format(prefix))([x, s2])
ret = Lambda(channel_shuffle, name='{}/channel_shuffle'.format(prefix))(ret)
return ret
def block(x, channel_map, bottleneck_ratio, repeat=1, stage=1):
x = shuffle_unit(x, out_channels=channel_map[stage-1],
strides=2,bottleneck_ratio=bottleneck_ratio,stage=stage,block=1)
for i in range(1, repeat+1):
x = shuffle_unit(x, out_channels=channel_map[stage-1],strides=1,
bottleneck_ratio=bottleneck_ratio,stage=stage, block=(1+i))
return x
| true | true |
1c2fb781ddcd4218fd8a81658d8b1820f7658753 | 425 | py | Python | setup.py | dhruvdcoder/allennlp-wandb | 160dceb1f4cec8e893b856d333bc302748afdd74 | [
"MIT"
] | null | null | null | setup.py | dhruvdcoder/allennlp-wandb | 160dceb1f4cec8e893b856d333bc302748afdd74 | [
"MIT"
] | null | null | null | setup.py | dhruvdcoder/allennlp-wandb | 160dceb1f4cec8e893b856d333bc302748afdd74 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
install_requires = [
"allennlp>=0.9.0",
"wandb==0.8.15",
]
setup(
name='allennlp_wandb',
version='0.0.1',
description='Utilities to use allennlp with wandb',
packages=find_packages(
exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
package_data={'allennlp_wandb': ['py.typed']},
install_requires=install_requires,
zip_safe=False)
| 25 | 62 | 0.647059 | from setuptools import setup, find_packages
install_requires = [
"allennlp>=0.9.0",
"wandb==0.8.15",
]
setup(
name='allennlp_wandb',
version='0.0.1',
description='Utilities to use allennlp with wandb',
packages=find_packages(
exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
package_data={'allennlp_wandb': ['py.typed']},
install_requires=install_requires,
zip_safe=False)
| true | true |
1c2fb7ce59e423e59b7589b3cf8fd6d6bac8e56f | 10,685 | py | Python | gateware/butterstick-bitstream.py | butterstick-fpga/test-fixture-sw | 6ab19faaeaaf3368fd9cd308fa94f913fe3e54be | [
"BSD-2-Clause"
] | null | null | null | gateware/butterstick-bitstream.py | butterstick-fpga/test-fixture-sw | 6ab19faaeaaf3368fd9cd308fa94f913fe3e54be | [
"BSD-2-Clause"
] | null | null | null | gateware/butterstick-bitstream.py | butterstick-fpga/test-fixture-sw | 6ab19faaeaaf3368fd9cd308fa94f913fe3e54be | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
# This file is Copyright (c) Greg Davill <greg.davill@gmail.com>
# License: BSD
# This variable defines all the external programs that this module
# relies on. lxbuildenv reads this variable in order to ensure
# the build will finish without exiting due to missing third-party
# programs.
LX_DEPENDENCIES = ["riscv", "nextpnr-ecp5", "yosys"]
# Import lxbuildenv to integrate the deps/ directory
import lxbuildenv
import os
import shutil
import argparse
import subprocess
from migen import *
from migen.genlib.resetsync import AsyncResetSynchronizer
from litex.build.lattice.trellis import trellis_args, trellis_argdict
from litex.soc.integration.soc_core import *
from litex.soc.integration.builder import *
from litex.soc.interconnect.csr import *
from litex.soc.cores.clock import *
from litex.soc.cores.clock.common import period_ns
from litex.soc.cores.gpio import GPIOOut, GPIOIn, GPIOTristate
from litex.soc.cores.spi_flash import SpiFlashDualQuad
from litedram.modules import MT41K256M16
from litedram.phy import ECP5DDRPHY
from liteeth.phy.ecp5rgmii import LiteEthPHYRGMII
from rtl.platform import butterstick_r1d0
from rtl.rgb import Leds
from rtl.vccio import VccIo
# CRG ---------------------------------------------------------------------------------------------
class _CRG(Module):
def __init__(self, platform, sys_clk_freq):
self.rst = Signal()
self.clock_domains.cd_init = ClockDomain()
self.clock_domains.cd_por = ClockDomain(reset_less=True)
self.clock_domains.cd_sys = ClockDomain()
self.clock_domains.cd_sys2x = ClockDomain()
self.clock_domains.cd_sys2x_i = ClockDomain(reset_less=True)
# # #
self.stop = Signal()
self.reset = Signal()
# Clk / Rst
clk30 = platform.request("clk30")
rst_n = platform.request("user_btn", 0)
platform.add_period_constraint(clk30, period_ns(30e6))
platform.add_period_constraint(ClockSignal('jtag'), period_ns(20e6))
# Power on reset
por_count = Signal(16, reset=2**16-1)
por_done = Signal()
self.comb += self.cd_por.clk.eq(clk30)
self.comb += por_done.eq(por_count == 0)
self.sync.por += If(~por_done, por_count.eq(por_count - 1))
# PLL
self.submodules.pll = pll = ECP5PLL()
self.comb += pll.reset.eq(~por_done | ~rst_n | self.rst)
pll.register_clkin(clk30, 30e6)
pll.create_clkout(self.cd_sys2x_i, 2*sys_clk_freq)
pll.create_clkout(self.cd_init, 25e6)
self.specials += [
Instance("ECLKSYNCB",
i_ECLKI = self.cd_sys2x_i.clk,
i_STOP = self.stop,
o_ECLKO = self.cd_sys2x.clk),
Instance("CLKDIVF",
p_DIV = "2.0",
i_ALIGNWD = 0,
i_CLKI = self.cd_sys2x.clk,
i_RST = self.reset,
o_CDIVX = self.cd_sys.clk),
AsyncResetSynchronizer(self.cd_sys, ~pll.locked | self.reset),
AsyncResetSynchronizer(self.cd_sys2x, ~pll.locked | self.reset),
]
# BaseSoC ------------------------------------------------------------------------------------------
class BaseSoC(SoCCore):
mem_map = {
"rom": 0x00000000, # (default shadow @0x80000000)
"testrom": 0x08000000, # (default shadow @0x80000000)
"sram": 0x10000000, # (default shadow @0xa0000000)
"spiflash": 0x20000000, # (default shadow @0xa0000000)
"main_ram": 0x40000000, # (default shadow @0xc0000000)
"csr": 0xf0000000, # (default shadow @0xe0000000)
"usb": 0xf0010000,
}
mem_map.update(SoCCore.mem_map)
interrupt_map = {
"timer0": 0,
"uart": 1,
}
interrupt_map.update(SoCCore.interrupt_map)
def __init__(self, sys_clk_freq=int(60e6), toolchain="trellis", **kwargs):
# Board Revision ---------------------------------------------------------------------------
revision = kwargs.get("revision", "0.2")
device = kwargs.get("device", "25F")
platform = butterstick_r1d0.ButterStickPlatform()
# Serial -----------------------------------------------------------------------------------
# platform.add_extension(butterstick_r1d0._uart_debug)
# SoCCore ----------------------------------------------------------------------------------
SoCCore.__init__(self, platform, clk_freq=sys_clk_freq, csr_data_width=32, integrated_rom_size=32*1024, integrated_sram_size=16*1024, uart_name='jtag_uart')
# CRG --------------------------------------------------------------------------------------
self.submodules.crg = crg = _CRG(platform, sys_clk_freq)
# VCCIO Control ----------------------------------------------------------------------------
self.submodules.vccio = VccIo(platform.request("vccio_ctrl"))
# SPI Flash --------------------------------------------------------------------------------
from litespi.modules import W25Q128JV
from litespi.opcodes import SpiNorFlashOpCodes as Codes
self.add_spi_flash(mode="4x", module=W25Q128JV(Codes.READ_1_1_4), with_master=True)
# Leds -------------------------------------------------------------------------------------
led = platform.request("led_rgb_multiplex")
self.submodules.leds = Leds(led.a, led.c)
self.add_csr("leds")
# Test rom ---------------------------------------------------------------------------------
self.add_rom("testrom",
origin = self.mem_map['testrom'],
size = 32*1024,
contents = [],
mode = 'r',
)
self.add_constant("ROM_BOOT_ADDRESS", self.mem_map['testrom'])
self.add_constant("UART_POLLING")
self.submodules.gpioa = GPIOTristate(platform.request('gpio',0))
self.submodules.gpiob = GPIOTristate(platform.request('gpio',1))
self.submodules.gpioc = GPIOTristate(platform.request('gpio',2))
self.submodules.ddrphy = ECP5DDRPHY(
platform.request("ddram"),
sys_clk_freq=sys_clk_freq)
self.comb += self.crg.stop.eq(self.ddrphy.init.stop)
self.comb += self.crg.reset.eq(self.ddrphy.init.reset)
self.add_sdram("sdram",
phy = self.ddrphy,
module = MT41K256M16(sys_clk_freq, "1:2"),
l2_cache_size = kwargs.get("l2_size", 8192)
)
# Ethernet / Etherbone ---------------------------------------------------------------------
self.submodules.ethphy = LiteEthPHYRGMII(
clock_pads = self.platform.request("eth_clocks"),
pads = self.platform.request("eth"))
self.add_ethernet(phy=self.ethphy)
# Self Reset -------------------------------------------------------------------------------
rst = Signal()
self.submodules.reset = GPIOOut(rst)
self.comb += platform.request("rst_n").eq(~rst)
# Buttons ----------------------------------------------------------------------------------
self.submodules.button = GPIOIn(platform.request("user_btn"))
#Add GIT repo to the firmware
git_rev_cmd = subprocess.Popen("git describe --tags --first-parent --always".split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(git_stdout, _) = git_rev_cmd.communicate()
self.add_constant('CONFIG_REPO_GIT_DESC',git_stdout.decode('ascii').strip('\n'))
def PackageTestRom(self, builder):
self.finalize()
os.makedirs(builder.output_dir, exist_ok=True)
# Remove un-needed sw packages
builder.add_software_package("testrom", "{}/../firmware/testrom".format(os.getcwd()))
builder._prepare_rom_software()
builder._generate_includes()
builder._generate_rom_software(compile_bios=False)
# patch random file into BRAM
rom_file = os.path.join(builder.software_dir, "testrom", "demo.bin")
rom_data = soc_core.get_mem_data(rom_file, self.cpu.endianness)
# Initialize SoC with with demo data.
self.testrom.mem.init = rom_data
def CreateFirmwareInit(init, output_file):
content = ""
for d in init:
content += "{:08x}\n".format(d)
with open(output_file, "w") as o:
o.write(content)
# Build --------------------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description="Build ButterStick test gateware")
parser.add_argument("--update-firmware",
default=False,
action='store_true',
help="compile firmware and update existing gateware")
args = parser.parse_args()
soc = BaseSoC()
builder = Builder(soc)
rand_rom = os.path.join(builder.gateware_dir, "rand.data")
input_config = os.path.join(builder.gateware_dir, f"{soc.platform.name}.config")
output_config = os.path.join(builder.gateware_dir, f"{soc.platform.name}_patched.config")
# Create rand fill for BRAM
if (os.path.exists(rand_rom) == False) or (args.update_firmware == False):
os.makedirs(os.path.join(builder.output_dir, 'software'), exist_ok=True)
os.makedirs(os.path.join(builder.output_dir, 'gateware'), exist_ok=True)
os.system(f"ecpbram --generate {rand_rom} --seed {0} --width {32} --depth {32*1024 // 4}")
# patch random file into BRAM
data = []
with open(rand_rom, 'r') as inp:
for d in inp.readlines():
data += [int(d, 16)]
soc.testrom.mem.init = data
# Build gateware
vns = builder.build()
soc.do_exit(vns)
soc.finalize()
soc.PackageTestRom(builder)
testrom_file = "{}/testrom/demo.bin".format(builder.software_dir)
testrom_init = "{}/testrom/testrom.init".format(builder.software_dir)
CreateFirmwareInit(get_mem_data(testrom_file, soc.cpu.endianness), testrom_init)
# Insert Firmware into Gateware
os.system(f"ecpbram --input {input_config} --output {output_config} --from {rand_rom} --to {testrom_init}")
# create compressed config (ECP5 specific)
output_bitstream = os.path.join(builder.gateware_dir, f"{soc.platform.name}.bit")
os.system(f"ecppack --freq 38.8 --compress --input {output_config} --bit {output_bitstream}")
if __name__ == "__main__":
main()
| 37.756184 | 172 | 0.563594 |
LX_DEPENDENCIES = ["riscv", "nextpnr-ecp5", "yosys"]
import lxbuildenv
import os
import shutil
import argparse
import subprocess
from migen import *
from migen.genlib.resetsync import AsyncResetSynchronizer
from litex.build.lattice.trellis import trellis_args, trellis_argdict
from litex.soc.integration.soc_core import *
from litex.soc.integration.builder import *
from litex.soc.interconnect.csr import *
from litex.soc.cores.clock import *
from litex.soc.cores.clock.common import period_ns
from litex.soc.cores.gpio import GPIOOut, GPIOIn, GPIOTristate
from litex.soc.cores.spi_flash import SpiFlashDualQuad
from litedram.modules import MT41K256M16
from litedram.phy import ECP5DDRPHY
from liteeth.phy.ecp5rgmii import LiteEthPHYRGMII
from rtl.platform import butterstick_r1d0
from rtl.rgb import Leds
from rtl.vccio import VccIo
class _CRG(Module):
def __init__(self, platform, sys_clk_freq):
self.rst = Signal()
self.clock_domains.cd_init = ClockDomain()
self.clock_domains.cd_por = ClockDomain(reset_less=True)
self.clock_domains.cd_sys = ClockDomain()
self.clock_domains.cd_sys2x = ClockDomain()
self.clock_domains.cd_sys2x_i = ClockDomain(reset_less=True)
self.stop = Signal()
self.reset = Signal()
clk30 = platform.request("clk30")
rst_n = platform.request("user_btn", 0)
platform.add_period_constraint(clk30, period_ns(30e6))
platform.add_period_constraint(ClockSignal('jtag'), period_ns(20e6))
por_count = Signal(16, reset=2**16-1)
por_done = Signal()
self.comb += self.cd_por.clk.eq(clk30)
self.comb += por_done.eq(por_count == 0)
self.sync.por += If(~por_done, por_count.eq(por_count - 1))
self.submodules.pll = pll = ECP5PLL()
self.comb += pll.reset.eq(~por_done | ~rst_n | self.rst)
pll.register_clkin(clk30, 30e6)
pll.create_clkout(self.cd_sys2x_i, 2*sys_clk_freq)
pll.create_clkout(self.cd_init, 25e6)
self.specials += [
Instance("ECLKSYNCB",
i_ECLKI = self.cd_sys2x_i.clk,
i_STOP = self.stop,
o_ECLKO = self.cd_sys2x.clk),
Instance("CLKDIVF",
p_DIV = "2.0",
i_ALIGNWD = 0,
i_CLKI = self.cd_sys2x.clk,
i_RST = self.reset,
o_CDIVX = self.cd_sys.clk),
AsyncResetSynchronizer(self.cd_sys, ~pll.locked | self.reset),
AsyncResetSynchronizer(self.cd_sys2x, ~pll.locked | self.reset),
]
class BaseSoC(SoCCore):
mem_map = {
"rom": 0x00000000,
"testrom": 0x08000000,
"sram": 0x10000000,
"spiflash": 0x20000000,
"main_ram": 0x40000000,
"csr": 0xf0000000,
"usb": 0xf0010000,
}
mem_map.update(SoCCore.mem_map)
interrupt_map = {
"timer0": 0,
"uart": 1,
}
interrupt_map.update(SoCCore.interrupt_map)
def __init__(self, sys_clk_freq=int(60e6), toolchain="trellis", **kwargs):
revision = kwargs.get("revision", "0.2")
device = kwargs.get("device", "25F")
platform = butterstick_r1d0.ButterStickPlatform()
SoCCore.__init__(self, platform, clk_freq=sys_clk_freq, csr_data_width=32, integrated_rom_size=32*1024, integrated_sram_size=16*1024, uart_name='jtag_uart')
self.submodules.crg = crg = _CRG(platform, sys_clk_freq)
self.submodules.vccio = VccIo(platform.request("vccio_ctrl"))
from litespi.modules import W25Q128JV
from litespi.opcodes import SpiNorFlashOpCodes as Codes
self.add_spi_flash(mode="4x", module=W25Q128JV(Codes.READ_1_1_4), with_master=True)
led = platform.request("led_rgb_multiplex")
self.submodules.leds = Leds(led.a, led.c)
self.add_csr("leds")
self.add_rom("testrom",
origin = self.mem_map['testrom'],
size = 32*1024,
contents = [],
mode = 'r',
)
self.add_constant("ROM_BOOT_ADDRESS", self.mem_map['testrom'])
self.add_constant("UART_POLLING")
self.submodules.gpioa = GPIOTristate(platform.request('gpio',0))
self.submodules.gpiob = GPIOTristate(platform.request('gpio',1))
self.submodules.gpioc = GPIOTristate(platform.request('gpio',2))
self.submodules.ddrphy = ECP5DDRPHY(
platform.request("ddram"),
sys_clk_freq=sys_clk_freq)
self.comb += self.crg.stop.eq(self.ddrphy.init.stop)
self.comb += self.crg.reset.eq(self.ddrphy.init.reset)
self.add_sdram("sdram",
phy = self.ddrphy,
module = MT41K256M16(sys_clk_freq, "1:2"),
l2_cache_size = kwargs.get("l2_size", 8192)
)
self.submodules.ethphy = LiteEthPHYRGMII(
clock_pads = self.platform.request("eth_clocks"),
pads = self.platform.request("eth"))
self.add_ethernet(phy=self.ethphy)
rst = Signal()
self.submodules.reset = GPIOOut(rst)
self.comb += platform.request("rst_n").eq(~rst)
self.submodules.button = GPIOIn(platform.request("user_btn"))
git_rev_cmd = subprocess.Popen("git describe --tags --first-parent --always".split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(git_stdout, _) = git_rev_cmd.communicate()
self.add_constant('CONFIG_REPO_GIT_DESC',git_stdout.decode('ascii').strip('\n'))
def PackageTestRom(self, builder):
self.finalize()
os.makedirs(builder.output_dir, exist_ok=True)
builder.add_software_package("testrom", "{}/../firmware/testrom".format(os.getcwd()))
builder._prepare_rom_software()
builder._generate_includes()
builder._generate_rom_software(compile_bios=False)
rom_file = os.path.join(builder.software_dir, "testrom", "demo.bin")
rom_data = soc_core.get_mem_data(rom_file, self.cpu.endianness)
self.testrom.mem.init = rom_data
def CreateFirmwareInit(init, output_file):
content = ""
for d in init:
content += "{:08x}\n".format(d)
with open(output_file, "w") as o:
o.write(content)
def main():
parser = argparse.ArgumentParser(description="Build ButterStick test gateware")
parser.add_argument("--update-firmware",
default=False,
action='store_true',
help="compile firmware and update existing gateware")
args = parser.parse_args()
soc = BaseSoC()
builder = Builder(soc)
rand_rom = os.path.join(builder.gateware_dir, "rand.data")
input_config = os.path.join(builder.gateware_dir, f"{soc.platform.name}.config")
output_config = os.path.join(builder.gateware_dir, f"{soc.platform.name}_patched.config")
if (os.path.exists(rand_rom) == False) or (args.update_firmware == False):
os.makedirs(os.path.join(builder.output_dir, 'software'), exist_ok=True)
os.makedirs(os.path.join(builder.output_dir, 'gateware'), exist_ok=True)
os.system(f"ecpbram --generate {rand_rom} --seed {0} --width {32} --depth {32*1024 // 4}")
data = []
with open(rand_rom, 'r') as inp:
for d in inp.readlines():
data += [int(d, 16)]
soc.testrom.mem.init = data
vns = builder.build()
soc.do_exit(vns)
soc.finalize()
soc.PackageTestRom(builder)
testrom_file = "{}/testrom/demo.bin".format(builder.software_dir)
testrom_init = "{}/testrom/testrom.init".format(builder.software_dir)
CreateFirmwareInit(get_mem_data(testrom_file, soc.cpu.endianness), testrom_init)
os.system(f"ecpbram --input {input_config} --output {output_config} --from {rand_rom} --to {testrom_init}")
output_bitstream = os.path.join(builder.gateware_dir, f"{soc.platform.name}.bit")
os.system(f"ecppack --freq 38.8 --compress --input {output_config} --bit {output_bitstream}")
if __name__ == "__main__":
main()
| true | true |
1c2fb937fada7a62b8037f0529f48779b7d7d22b | 6,028 | py | Python | PyForge/ForgeUsers.py | sgthakare20/Pyforge | e3ce15586ccc07f39e0faf18885b472baa60ff5d | [
"MIT"
] | 1 | 2020-04-13T13:02:43.000Z | 2020-04-13T13:02:43.000Z | PyForge/ForgeUsers.py | sgthakare20/Pyforge | e3ce15586ccc07f39e0faf18885b472baa60ff5d | [
"MIT"
] | null | null | null | PyForge/ForgeUsers.py | sgthakare20/Pyforge | e3ce15586ccc07f39e0faf18885b472baa60ff5d | [
"MIT"
] | 2 | 2021-06-22T14:39:53.000Z | 2021-06-22T15:28:21.000Z | # -*- coding: utf-8 -*-
"""Module containing classes related to users on the Autodesk Forge BIM360 platform."""
from PyForge.ForgeApi import ForgeApi
class UsersApi(ForgeApi):
"""This class provides the base API calls for Autodesk BIM360 users."""
def __init__(self, token,
base_url=r'https://developer.api.autodesk.com/bim360/admin/v1/',
timeout=1):
"""
Initialize the UsersApi class and attach an authentication token for the Autodesk Forge API.
Args:
token (str): Authentication token for Autodesk Forge API.
base_url (str, optional): Base URL for calls to the users API.
Defaults to r'https://developer.api.autodesk.com/bim360/admin/v1/'
timeout (float, optional): Default timeout for API calls. Defaults to 1.
Returns:
None.
"""
self.token = token
def get_project_users(self, project_id=None, region='US', accept_language="de", filters={},
limit=100, offset=0, sort=[], fields=[],
endpoint=r'projects/:projectId/users'):
"""
Send a GET projects/:projectId/users request to the BIM360 API, returns the users assigned to the project.
Args:
project_id (str, optional): The project id for the BIM360 project. Defaults to None.
region (str, optional): The BIM360 server region to be adressed, can be US or EMEA. Defaults to US.
accept_language (str, optional): The language in which the response is to be returned. Defaults to de.
filters (dict, optional): A dict of filters in the form {filtertype : List(str filter entries)}. Defaults to {}.
limit (int, optional): Size of the response array. Defaults to 100.
offset (int, optional): Offset of the response array. Defaults to 0.
sort (list, optional): List of string field names to sort in ascending order, Prepending a field with - sorts in descending order. Defaults to [].
fields (list, optional): List of string field names to include in the response array. Defaults to [].
endpoint (str, optional): endpoint for the GET projects/:projectId/users request.
Defaults to r'projects/:projectId/users'
Raises:
ValueError: If self.token, project_id are of NoneType.
ConnectionError: Different Connectionerrors based on retrieved ApiErrors from the Forge API.
Returns:
list(dict(JsonApiObject)): List of users JsonApi objects in the form of dicts.
"""
try:
token = self.token
except AttributeError:
raise ValueError("Please initialise the UsersApi.")
if project_id is None:
raise ValueError("Please enter a project id.")
if project_id.startswith("b."):
project_id = project_id[2:]
endpoint = endpoint.replace(':projectId', project_id)
headers = {}
headers.update({'Authorization' : "Bearer {}".format(token)})
headers.update({'Accept-Language' : accept_language})
headers.update({'Region' : region})
params = {}
params.update({'limit' : limit})
params.update({'offset' : offset})
params.update(self.make_filters(filters))
if sort:
sort = ",".join(sort)
params.update({'sort' : sort})
if fields:
fields = ",".join(fields)
params.update({'field' : fields})
resp = self.http.get(endpoint, headers=headers, params=params)
if resp.status_code == 200:
cont = resp.json()['results']
if isinstance(cont, list):
if len(cont) == limit:
cont += self.get_account_projects(token=token,
project_id=project_id,
region=region,
accept_language=accept_language,
filters=filters,
limit=limit,
offset=offset+limit,
sort=sort,
fields=fields,
url=url)
return cont
else:
raise TypeError(f"Invalid response type for endpoint: {endpoint}\n" +
f"with content: {resp.content}")
if resp.status_code == 401:
raise ConnectionError("Renew authorization token.")
raise ConnectionError("Request failed with code {}".format(resp.status_code) +
" and message : {}".format(resp.content) +
" for endpoint: {}".format(endpoint))
def make_filters(self, filters):
"""
Create a filter query parameter of the given type with the given entries.
Args:
filters (dict, {str, filter_name : list(str, filter entires)}): The filters to be used.
Raises:
ValueError: Raised if the filter entries exceed 255 characters.
TypeError: Raised if the filters parameter is not of type dict.
Returns:
dict: A filter dictionary to be used as a query parameter.
"""
if isinstance(filters, dict):
if filters:
for filt, entries in filters.items():
things = ",".join(entries)
if len(things) > 255:
raise ValueError("Max filterlength is 255 characters.")
return {"filter[{}]".format(filt) : things}
else:
return {}
raise TypeError("filters parameter has the wrong type: {}".format(type(filters)))
| 42.751773 | 158 | 0.547279 |
from PyForge.ForgeApi import ForgeApi
class UsersApi(ForgeApi):
def __init__(self, token,
base_url=r'https://developer.api.autodesk.com/bim360/admin/v1/',
timeout=1):
self.token = token
def get_project_users(self, project_id=None, region='US', accept_language="de", filters={},
limit=100, offset=0, sort=[], fields=[],
endpoint=r'projects/:projectId/users'):
try:
token = self.token
except AttributeError:
raise ValueError("Please initialise the UsersApi.")
if project_id is None:
raise ValueError("Please enter a project id.")
if project_id.startswith("b."):
project_id = project_id[2:]
endpoint = endpoint.replace(':projectId', project_id)
headers = {}
headers.update({'Authorization' : "Bearer {}".format(token)})
headers.update({'Accept-Language' : accept_language})
headers.update({'Region' : region})
params = {}
params.update({'limit' : limit})
params.update({'offset' : offset})
params.update(self.make_filters(filters))
if sort:
sort = ",".join(sort)
params.update({'sort' : sort})
if fields:
fields = ",".join(fields)
params.update({'field' : fields})
resp = self.http.get(endpoint, headers=headers, params=params)
if resp.status_code == 200:
cont = resp.json()['results']
if isinstance(cont, list):
if len(cont) == limit:
cont += self.get_account_projects(token=token,
project_id=project_id,
region=region,
accept_language=accept_language,
filters=filters,
limit=limit,
offset=offset+limit,
sort=sort,
fields=fields,
url=url)
return cont
else:
raise TypeError(f"Invalid response type for endpoint: {endpoint}\n" +
f"with content: {resp.content}")
if resp.status_code == 401:
raise ConnectionError("Renew authorization token.")
raise ConnectionError("Request failed with code {}".format(resp.status_code) +
" and message : {}".format(resp.content) +
" for endpoint: {}".format(endpoint))
def make_filters(self, filters):
if isinstance(filters, dict):
if filters:
for filt, entries in filters.items():
things = ",".join(entries)
if len(things) > 255:
raise ValueError("Max filterlength is 255 characters.")
return {"filter[{}]".format(filt) : things}
else:
return {}
raise TypeError("filters parameter has the wrong type: {}".format(type(filters)))
| true | true |
1c2fb9e585142cd6b3ce74512705e983aa22ee83 | 1,608 | py | Python | amber/urls.py | Taywee/amberherbert.com | 6bf384d7cdf18dc613252fe4dde38545150eabbc | [
"MIT"
] | null | null | null | amber/urls.py | Taywee/amberherbert.com | 6bf384d7cdf18dc613252fe4dde38545150eabbc | [
"MIT"
] | 2 | 2017-10-15T20:36:59.000Z | 2017-10-17T05:27:49.000Z | amber/urls.py | Taywee/amberherbert.com | 6bf384d7cdf18dc613252fe4dde38545150eabbc | [
"MIT"
] | null | null | null | from __future__ import absolute_import, unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from wagtail.admin import urls as wagtailadmin_urls
from wagtail.core import urls as wagtail_urls
from wagtail.documents import urls as wagtaildocs_urls
from search import views as search_views
try:
with open('/etc/amberherbert/django-admin.path', 'r') as file:
djadmin = file.read().strip()
except FileNotFoundError:
djadmin = r'^django-admin/'
try:
with open('/etc/amberherbert/wagtail-admin.path', 'r') as file:
wtadmin = file.read().strip()
except FileNotFoundError:
wtadmin = r'^admin/'
urlpatterns = [
url(djadmin, admin.site.urls),
url(wtadmin, include(wagtailadmin_urls)),
url(r'^documents/', include(wagtaildocs_urls)),
url(r'^search/$', search_views.search, name='search'),
# For anything not caught by a more specific rule above, hand over to
# Wagtail's page serving mechanism. This should be the last pattern in
# the list:
url(r'', include(wagtail_urls)),
# Alternatively, if you want Wagtail pages to be served from a subpath
# of your site, rather than the site root:
# url(r'^pages/', include(wagtail_urls)),
]
if settings.DEBUG:
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
# Serve static and media files from development server
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 31.529412 | 80 | 0.731343 | from __future__ import absolute_import, unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from wagtail.admin import urls as wagtailadmin_urls
from wagtail.core import urls as wagtail_urls
from wagtail.documents import urls as wagtaildocs_urls
from search import views as search_views
try:
with open('/etc/amberherbert/django-admin.path', 'r') as file:
djadmin = file.read().strip()
except FileNotFoundError:
djadmin = r'^django-admin/'
try:
with open('/etc/amberherbert/wagtail-admin.path', 'r') as file:
wtadmin = file.read().strip()
except FileNotFoundError:
wtadmin = r'^admin/'
urlpatterns = [
url(djadmin, admin.site.urls),
url(wtadmin, include(wagtailadmin_urls)),
url(r'^documents/', include(wagtaildocs_urls)),
url(r'^search/$', search_views.search, name='search'),
# the list:
url(r'', include(wagtail_urls)),
# Alternatively, if you want Wagtail pages to be served from a subpath
# of your site, rather than the site root:
# url(r'^pages/', include(wagtail_urls)),
]
if settings.DEBUG:
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
# Serve static and media files from development server
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| true | true |
1c2fb9fa00e7283d38dd9f6522c0eb79545438ef | 2,145 | py | Python | sympy/polys/tests/test_rationaltools.py | minrk/sympy | 1cc6e3837b8ed20ba52ea97298f31aa08b43c508 | [
"BSD-3-Clause"
] | 2 | 2015-11-13T16:40:57.000Z | 2017-09-15T15:37:19.000Z | openrave/sympy/polys/tests/test_rationaltools.py | jdsika/holy | a2ac55fa1751a3a8038cf61d29b95005f36d6264 | [
"MIT"
] | 1 | 2016-06-13T01:29:51.000Z | 2016-06-14T00:38:27.000Z | openrave/sympy/polys/tests/test_rationaltools.py | jdsika/holy | a2ac55fa1751a3a8038cf61d29b95005f36d6264 | [
"MIT"
] | null | null | null | """Tests for tools for manipulation of rational expressions. """
from sympy.polys.rationaltools import together
from sympy import S, symbols, Rational, sin, exp, Eq, Integral, Mul
from sympy.abc import x, y, z
A, B = symbols('A,B', commutative=False)
def test_together():
assert together(0) == 0
assert together(1) == 1
assert together(x*y*z) == x*y*z
assert together(x + y) == x + y
assert together(1/x) == 1/x
assert together(1/x + 1) == (x + 1)/x
assert together(1/x + 3) == (3*x + 1)/x
assert together(1/x + x) == (x**2 + 1)/x
assert together(1/x + Rational(1, 2)) == (x + 2)/(2*x)
assert together(Rational(1, 2) + x/2) == Mul(S.Half, x + 1, evaluate=False)
assert together(1/x + 2/y) == (2*x + y)/(y*x)
assert together(1/(1 + 1/x)) == x/(1 + x)
assert together(x/(1 + 1/x)) == x**2/(1 + x)
assert together(1/x + 1/y + 1/z) == (x*y + x*z + y*z)/(x*y*z)
assert together(1/(1 + x + 1/y + 1/z)) == y*z/(y + z + y*z + x*y*z)
assert together(1/(x*y) + 1/(x*y)**2) == y**(-2)*x**(-2)*(1 + x*y)
assert together(1/(x*y) + 1/(x*y)**4) == y**(-4)*x**(-4)*(1 + x**3*y**3)
assert together(1/(x**7*y) + 1/(x*y)**4) == y**(-4)*x**(-7)*(x**3 + y**3)
assert together(5/(2 + 6/(3 + 7/(4 + 8/(5 + 9/x))))) == \
(S(5)/2)*((171 + 119*x)/(279 + 203*x))
assert together(1 + 1/(x + 1)**2) == (1 + (x + 1)**2)/(x + 1)**2
assert together(1 + 1/(x*(1 + x))) == (1 + x*(1 + x))/(x*(1 + x))
assert together(1/(x*(x + 1)) + 1/(x*(x + 2))) == (3 + 2*x)/(x*(1 + x)*(2 + x))
assert together(1 + 1/(2*x + 2)**2) == (4*(x + 1)**2 + 1)/(4*(x + 1)**2)
assert together(sin(1/x + 1/y)) == sin(1/x + 1/y)
assert together(sin(1/x + 1/y), deep=True) == sin((x + y)/(x*y))
assert together(1/exp(x) + 1/(x*exp(x))) == (1+x)/(x*exp(x))
assert together(1/exp(2*x) + 1/(x*exp(3*x))) == (1+exp(x)*x)/(x*exp(3*x))
assert together(Integral(1/x + 1/y, x)) == Integral((x + y)/(x*y), x)
assert together(Eq(1/x + 1/y, 1 + 1/z)) == Eq((x + y)/(x*y), (z + 1)/z)
assert together(1/(A*B) + 1/(B*A)) in [(A*B + B*A)/(B*A**2*B), (A*B + B*A)/(A*B**2*A)]
| 39 | 90 | 0.486247 |
from sympy.polys.rationaltools import together
from sympy import S, symbols, Rational, sin, exp, Eq, Integral, Mul
from sympy.abc import x, y, z
A, B = symbols('A,B', commutative=False)
def test_together():
assert together(0) == 0
assert together(1) == 1
assert together(x*y*z) == x*y*z
assert together(x + y) == x + y
assert together(1/x) == 1/x
assert together(1/x + 1) == (x + 1)/x
assert together(1/x + 3) == (3*x + 1)/x
assert together(1/x + x) == (x**2 + 1)/x
assert together(1/x + Rational(1, 2)) == (x + 2)/(2*x)
assert together(Rational(1, 2) + x/2) == Mul(S.Half, x + 1, evaluate=False)
assert together(1/x + 2/y) == (2*x + y)/(y*x)
assert together(1/(1 + 1/x)) == x/(1 + x)
assert together(x/(1 + 1/x)) == x**2/(1 + x)
assert together(1/x + 1/y + 1/z) == (x*y + x*z + y*z)/(x*y*z)
assert together(1/(1 + x + 1/y + 1/z)) == y*z/(y + z + y*z + x*y*z)
assert together(1/(x*y) + 1/(x*y)**2) == y**(-2)*x**(-2)*(1 + x*y)
assert together(1/(x*y) + 1/(x*y)**4) == y**(-4)*x**(-4)*(1 + x**3*y**3)
assert together(1/(x**7*y) + 1/(x*y)**4) == y**(-4)*x**(-7)*(x**3 + y**3)
assert together(5/(2 + 6/(3 + 7/(4 + 8/(5 + 9/x))))) == \
(S(5)/2)*((171 + 119*x)/(279 + 203*x))
assert together(1 + 1/(x + 1)**2) == (1 + (x + 1)**2)/(x + 1)**2
assert together(1 + 1/(x*(1 + x))) == (1 + x*(1 + x))/(x*(1 + x))
assert together(1/(x*(x + 1)) + 1/(x*(x + 2))) == (3 + 2*x)/(x*(1 + x)*(2 + x))
assert together(1 + 1/(2*x + 2)**2) == (4*(x + 1)**2 + 1)/(4*(x + 1)**2)
assert together(sin(1/x + 1/y)) == sin(1/x + 1/y)
assert together(sin(1/x + 1/y), deep=True) == sin((x + y)/(x*y))
assert together(1/exp(x) + 1/(x*exp(x))) == (1+x)/(x*exp(x))
assert together(1/exp(2*x) + 1/(x*exp(3*x))) == (1+exp(x)*x)/(x*exp(3*x))
assert together(Integral(1/x + 1/y, x)) == Integral((x + y)/(x*y), x)
assert together(Eq(1/x + 1/y, 1 + 1/z)) == Eq((x + y)/(x*y), (z + 1)/z)
assert together(1/(A*B) + 1/(B*A)) in [(A*B + B*A)/(B*A**2*B), (A*B + B*A)/(A*B**2*A)]
| true | true |
1c2fba2b5b481ced053f949679d016b98fc94c72 | 42,298 | py | Python | tools/linter_lib/custom_check.py | dmryabov/zulip | fd2a63b049277f3cc7267a4a5bdbb485c4933719 | [
"Apache-2.0"
] | null | null | null | tools/linter_lib/custom_check.py | dmryabov/zulip | fd2a63b049277f3cc7267a4a5bdbb485c4933719 | [
"Apache-2.0"
] | null | null | null | tools/linter_lib/custom_check.py | dmryabov/zulip | fd2a63b049277f3cc7267a4a5bdbb485c4933719 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
from zulint.custom_rules import RuleList
from typing import cast, Any, Dict, List, Tuple
Rule = List[Dict[str, Any]]
# Rule help:
# By default, a rule applies to all files within the extension for which it is specified (e.g. all .py files)
# There are three operators we can use to manually include or exclude files from linting for a rule:
# 'exclude': 'set([<path>, ...])' - if <path> is a filename, excludes that file.
# if <path> is a directory, excludes all files directly below the directory <path>.
# 'exclude_line': 'set([(<path>, <line>), ...])' - excludes all lines matching <line> in the file <path> from linting.
# 'include_only': 'set([<path>, ...])' - includes only those files where <path> is a substring of the filepath.
LineTup = Tuple[int, str, str, str]
PYDELIMS = r'''"'()\[\]{}#\\'''
PYREG = r"[^{}]".format(PYDELIMS)
PYSQ = r'"(?:[^"\\]|\\.)*"'
PYDQ = r"'(?:[^'\\]|\\.)*'"
PYLEFT = r"[(\[{]"
PYRIGHT = r"[)\]}]"
PYCODE = PYREG
for depth in range(5):
PYGROUP = r"""(?:{}|{}|{}{}*{})""".format(PYSQ, PYDQ, PYLEFT, PYCODE, PYRIGHT)
PYCODE = r"""(?:{}|{})""".format(PYREG, PYGROUP)
FILES_WITH_LEGACY_SUBJECT = {
# This basically requires a big DB migration:
'zerver/lib/topic.py',
# This is for backward compatibility.
'zerver/tests/test_legacy_subject.py',
# Other migration-related changes require extreme care.
'zerver/lib/fix_unreads.py',
'zerver/tests/test_migrations.py',
# These use subject in the email sense, and will
# probably always be exempt:
'zerver/lib/email_mirror.py',
'zerver/lib/feedback.py',
'zerver/tests/test_new_users.py',
'zerver/tests/test_email_mirror.py',
# These are tied more to our API than our DB model.
'zerver/openapi/python_examples.py',
'zerver/tests/test_openapi.py',
# This has lots of query data embedded, so it's hard
# to fix everything until we migrate the DB to "topic".
'zerver/tests/test_narrow.py',
}
shebang_rules = [
{'pattern': '^#!',
'description': "zerver library code shouldn't have a shebang line.",
'include_only': set(['zerver/'])},
# /bin/sh and /usr/bin/env are the only two binaries
# that NixOS provides at a fixed path (outside a
# buildFHSUserEnv sandbox).
{'pattern': '^#!(?! *(?:/usr/bin/env|/bin/sh)(?: |$))',
'description': "Use `#!/usr/bin/env foo` instead of `#!/path/foo`"
" for interpreters other than sh."},
{'pattern': '^#!/usr/bin/env python$',
'description': "Use `#!/usr/bin/env python3` instead of `#!/usr/bin/env python`."}
] # type: Rule
trailing_whitespace_rule = {
'pattern': r'\s+$',
'strip': '\n',
'description': 'Fix trailing whitespace'
}
whitespace_rules = [
# This linter should be first since bash_rules depends on it.
trailing_whitespace_rule,
{'pattern': 'http://zulip.readthedocs.io',
'description': 'Use HTTPS when linking to ReadTheDocs',
},
{'pattern': '\t',
'strip': '\n',
'exclude': set(['tools/ci/success-http-headers.txt']),
'description': 'Fix tab-based whitespace'},
] # type: Rule
comma_whitespace_rule = [
{'pattern': ', {2,}[^#/ ]',
'exclude': set(['zerver/tests', 'frontend_tests/node_tests', 'corporate/tests']),
'description': "Remove multiple whitespaces after ','",
'good_lines': ['foo(1, 2, 3)', 'foo = bar # some inline comment'],
'bad_lines': ['foo(1, 2, 3)', 'foo(1, 2, 3)']},
] # type: Rule
markdown_whitespace_rules = list([rule for rule in whitespace_rules if rule['pattern'] != r'\s+$']) + [
# Two spaces trailing a line with other content is okay--it's a markdown line break.
# This rule finds one space trailing a non-space, three or more trailing spaces, and
# spaces on an empty line.
{'pattern': r'((?<!\s)\s$)|(\s\s\s+$)|(^\s+$)',
'strip': '\n',
'description': 'Fix trailing whitespace'},
{'pattern': '^#+[A-Za-z0-9]',
'strip': '\n',
'description': 'Missing space after # in heading',
'good_lines': ['### some heading', '# another heading'],
'bad_lines': ['###some heading', '#another heading']},
] # type: Rule
js_rules = RuleList(
langs=['js'],
rules=cast(Rule, [
{'pattern': 'subject|SUBJECT',
'exclude': set(['static/js/util.js',
'frontend_tests/']),
'exclude_pattern': 'emails',
'description': 'avoid subject in JS code',
'good_lines': ['topic_name'],
'bad_lines': ['subject="foo"', ' MAX_SUBJECT_LEN']},
{'pattern': r'[^_]function\(',
'description': 'The keyword "function" should be followed by a space'},
{'pattern': r'.*blueslip.warning\(.*',
'description': 'The module blueslip has no function warning, try using blueslip.warn'},
{'pattern': '[)]{$',
'description': 'Missing space between ) and {'},
{'pattern': r'i18n\.t\([^)]+[^,\{\)]$',
'description': 'i18n string should not be a multiline string'},
{'pattern': r'''i18n\.t\(['"].+?['"]\s*\+''',
'description': 'Do not concatenate arguments within i18n.t()'},
{'pattern': r'i18n\.t\(.+\).*\+',
'description': 'Do not concatenate i18n strings'},
{'pattern': r'\+.*i18n\.t\(.+\)',
'description': 'Do not concatenate i18n strings'},
{'pattern': '[.]includes[(]',
'exclude': ['frontend_tests/'],
'description': '.includes() is incompatible with Internet Explorer. Use .indexOf() !== -1 instead.'},
{'pattern': '[.]html[(]',
'exclude_pattern': r'''[.]html[(]("|'|render_|html|message.content|sub.rendered_description|i18n.t|rendered_|$|[)]|error_text|widget_elem|[$]error|[$][(]"<p>"[)])''',
'exclude': ['static/js/portico', 'static/js/lightbox.js', 'static/js/ui_report.js',
'static/js/confirm_dialog.js',
'frontend_tests/'],
'description': 'Setting HTML content with jQuery .html() can lead to XSS security bugs. Consider .text() or using rendered_foo as a variable name if content comes from handlebars and thus is already sanitized.'},
{'pattern': '["\']json/',
'description': 'Relative URL for JSON route not supported by i18n'},
# This rule is constructed with + to avoid triggering on itself
{'pattern': " =" + '[^ =>~"]',
'description': 'Missing whitespace after "="'},
{'pattern': '^[ ]*//[A-Za-z0-9]',
'description': 'Missing space after // in comment'},
{'pattern': 'if[(]',
'description': 'Missing space between if and ('},
{'pattern': 'else{$',
'description': 'Missing space between else and {'},
{'pattern': '^else {$',
'description': 'Write JS else statements on same line as }'},
{'pattern': '^else if',
'description': 'Write JS else statements on same line as }'},
{'pattern': 'console[.][a-z]',
'exclude': set(['static/js/blueslip.js',
'frontend_tests/zjsunit',
'frontend_tests/casper_lib/common.js',
'frontend_tests/node_tests',
'static/js/debug.js']),
'description': 'console.log and similar should not be used in webapp'},
{'pattern': r'''[.]text\(["'][a-zA-Z]''',
'description': 'Strings passed to $().text should be wrapped in i18n.t() for internationalization',
'exclude': set(['frontend_tests/node_tests/'])},
{'pattern': r'''compose_error\(["']''',
'description': 'Argument to compose_error should be a literal string enclosed '
'by i18n.t()'},
{'pattern': r'ui.report_success\(',
'description': 'Deprecated function, use ui_report.success.'},
{'pattern': r'''report.success\(["']''',
'description': 'Argument to report_success should be a literal string enclosed '
'by i18n.t()'},
{'pattern': r'ui.report_error\(',
'description': 'Deprecated function, use ui_report.error.'},
{'pattern': r'''report.error\(["'][^'"]''',
'description': 'Argument to ui_report.error should be a literal string enclosed '
'by i18n.t()',
'good_lines': ['ui_report.error("")', 'ui_report.error(_("text"))'],
'bad_lines': ['ui_report.error("test")']},
{'pattern': r'\$\(document\)\.ready\(',
'description': "`Use $(f) rather than `$(document).ready(f)`",
'good_lines': ['$(function () {foo();}'],
'bad_lines': ['$(document).ready(function () {foo();}']},
{'pattern': '[$][.](get|post|patch|delete|ajax)[(]',
'description': "Use channel module for AJAX calls",
'exclude': set([
# Internal modules can do direct network calls
'static/js/blueslip.js',
'static/js/channel.js',
# External modules that don't include channel.js
'static/js/stats/',
'static/js/portico/',
'static/js/billing/',
]),
'good_lines': ['channel.get(...)'],
'bad_lines': ['$.get()', '$.post()', '$.ajax()']},
{'pattern': 'style ?=',
'description': "Avoid using the `style=` attribute; we prefer styling in CSS files",
'exclude': set([
'frontend_tests/node_tests/copy_and_paste.js',
'frontend_tests/node_tests/upload.js',
'frontend_tests/node_tests/templates.js',
'static/js/upload.js',
'static/js/stream_color.js',
]),
'good_lines': ['#my-style {color: blue;}'],
'bad_lines': ['<p style="color: blue;">Foo</p>', 'style = "color: blue;"']},
]) + whitespace_rules + comma_whitespace_rule,
)
python_rules = RuleList(
langs=['py'],
rules=cast(Rule, [
{'pattern': 'subject|SUBJECT',
'exclude_pattern': 'subject to the|email|outbox',
'description': 'avoid subject as a var',
'good_lines': ['topic_name'],
'bad_lines': ['subject="foo"', ' MAX_SUBJECT_LEN'],
'exclude': FILES_WITH_LEGACY_SUBJECT,
'include_only': set([
'zerver/data_import/',
'zerver/lib/',
'zerver/tests/',
'zerver/views/'])},
{'pattern': '^(?!#)@login_required',
'description': '@login_required is unsupported; use @zulip_login_required',
'good_lines': ['@zulip_login_required', '# foo @login_required'],
'bad_lines': ['@login_required', ' @login_required']},
{'pattern': '^user_profile[.]save[(][)]',
'description': 'Always pass update_fields when saving user_profile objects',
'exclude_line': set([
('zerver/lib/actions.py', "user_profile.save() # Can't use update_fields because of how the foreign key works."),
]),
'exclude': set(['zerver/tests', 'zerver/lib/create_user.py']),
'good_lines': ['user_profile.save(update_fields=["pointer"])'],
'bad_lines': ['user_profile.save()']},
{'pattern': r'^[^"]*"[^"]*"%\(',
'description': 'Missing space around "%"',
'good_lines': ['"%s" % ("foo")', '"%s" % (foo)'],
'bad_lines': ['"%s"%("foo")', '"%s"%(foo)']},
{'pattern': r"^[^']*'[^']*'%\(",
'description': 'Missing space around "%"',
'good_lines': ["'%s' % ('foo')", "'%s' % (foo)"],
'bad_lines': ["'%s'%('foo')", "'%s'%(foo)"]},
{'pattern': 'self: Any',
'description': 'you can omit Any annotation for self',
'good_lines': ['def foo (self):'],
'bad_lines': ['def foo(self: Any):']},
# This rule is constructed with + to avoid triggering on itself
{'pattern': " =" + '[^ =>~"]',
'description': 'Missing whitespace after "="',
'good_lines': ['a = b', '5 == 6'],
'bad_lines': ['a =b', 'asdf =42']},
{'pattern': r'":\w[^"]*$',
'description': 'Missing whitespace after ":"',
'good_lines': ['"foo": bar', '"some:string:with:colons"'],
'bad_lines': ['"foo":bar', '"foo":1']},
{'pattern': r"':\w[^']*$",
'description': 'Missing whitespace after ":"',
'good_lines': ["'foo': bar", "'some:string:with:colons'"],
'bad_lines': ["'foo':bar", "'foo':1"]},
{'pattern': r"^\s+#\w",
'strip': '\n',
'exclude': set(['tools/droplets/create.py']),
'description': 'Missing whitespace after "#"',
'good_lines': ['a = b # some operation', '1+2 # 3 is the result'],
'bad_lines': [' #some operation', ' #not valid!!!']},
{'pattern': "assertEquals[(]",
'description': 'Use assertEqual, not assertEquals (which is deprecated).',
'good_lines': ['assertEqual(1, 2)'],
'bad_lines': ['assertEquals(1, 2)']},
{'pattern': "== None",
'description': 'Use `is None` to check whether something is None',
'good_lines': ['if foo is None'],
'bad_lines': ['foo == None']},
{'pattern': "type:[(]",
'description': 'Missing whitespace after ":" in type annotation',
'good_lines': ['# type: (Any, Any)', 'colon:separated:string:containing:type:as:keyword'],
'bad_lines': ['# type:(Any, Any)']},
{'pattern': "type: ignore$",
'exclude': set(['tools/tests',
'zerver/lib/test_runner.py',
'zerver/tests']),
'description': '"type: ignore" should always end with "# type: ignore # explanation for why"',
'good_lines': ['foo = bar # type: ignore # explanation'],
'bad_lines': ['foo = bar # type: ignore']},
{'pattern': "# type [(]",
'description': 'Missing : after type in type annotation',
'good_lines': ['foo = 42 # type: int', '# type: (str, int) -> None'],
'bad_lines': ['# type (str, int) -> None']},
{'pattern': "#type",
'description': 'Missing whitespace after "#" in type annotation',
'good_lines': ['foo = 42 # type: int'],
'bad_lines': ['foo = 42 #type: int']},
{'pattern': r'\b(if|else|while)[(]',
'description': 'Put a space between statements like if, else, etc. and (.',
'good_lines': ['if (1 == 2):', 'while (foo == bar):'],
'bad_lines': ['if(1 == 2):', 'while(foo == bar):']},
{'pattern': ", [)]",
'description': 'Unnecessary whitespace between "," and ")"',
'good_lines': ['foo = (1, 2, 3,)', 'foo(bar, 42)'],
'bad_lines': ['foo = (1, 2, 3, )']},
{'pattern': "% [(]",
'description': 'Unnecessary whitespace between "%" and "("',
'good_lines': ['"foo %s bar" % ("baz",)'],
'bad_lines': ['"foo %s bar" % ("baz",)']},
# This next check could have false positives, but it seems pretty
# rare; if we find any, they can be added to the exclude list for
# this rule.
{'pattern': r"""^(?:[^'"#\\]|{}|{})*(?:{}|{})\s*%\s*(?![\s({{\\]|dict\(|tuple\()(?:[^,{}]|{})+(?:$|[,#\\]|{})""".format(
PYSQ, PYDQ, PYSQ, PYDQ, PYDELIMS, PYGROUP, PYRIGHT),
'description': 'Used % formatting without a tuple',
'good_lines': ['"foo %s bar" % ("baz",)'],
'bad_lines': ['"foo %s bar" % "baz"']},
{'pattern': r"""^(?:[^'"#\\]|{}|{})*(?:{}|{})\s*%\s*\((?:[^,{}]|{})*\)""".format(
PYSQ, PYDQ, PYSQ, PYDQ, PYDELIMS, PYGROUP),
'description': 'Used % formatting with parentheses that do not form a tuple',
'good_lines': ['"foo %s bar" % ("baz",)"'],
'bad_lines': ['"foo %s bar" % ("baz")']},
{'pattern': 'sudo',
'include_only': set(['scripts/']),
'exclude': set(['scripts/lib/setup_venv.py']),
'exclude_line': set([
('scripts/lib/zulip_tools.py', 'sudo_args = kwargs.pop(\'sudo_args\', [])'),
('scripts/lib/zulip_tools.py', 'args = [\'sudo\'] + sudo_args + [\'--\'] + args'),
]),
'description': 'Most scripts are intended to run on systems without sudo.',
'good_lines': ['subprocess.check_call(["ls"])'],
'bad_lines': ['subprocess.check_call(["sudo", "ls"])']},
{'pattern': 'django.utils.translation',
'include_only': set(['test/', 'zerver/views/development/']),
'description': 'Test strings should not be tagged for translation',
'good_lines': [''],
'bad_lines': ['django.utils.translation']},
{'pattern': 'userid',
'description': 'We prefer user_id over userid.',
'good_lines': ['id = alice.user_id'],
'bad_lines': ['id = alice.userid']},
{'pattern': r'json_success\({}\)',
'description': 'Use json_success() to return nothing',
'good_lines': ['return json_success()'],
'bad_lines': ['return json_success({})']},
{'pattern': r'\Wjson_error\(_\(?\w+\)',
'exclude': set(['zerver/tests', 'zerver/views/development/']),
'description': 'Argument to json_error should be a literal string enclosed by _()',
'good_lines': ['return json_error(_("string"))'],
'bad_lines': ['return json_error(_variable)', 'return json_error(_(variable))']},
{'pattern': r'''\Wjson_error\(['"].+[),]$''',
'exclude': set(['zerver/tests']),
'description': 'Argument to json_error should a literal string enclosed by _()'},
# To avoid JsonableError(_variable) and JsonableError(_(variable))
{'pattern': r'\WJsonableError\(_\(?\w.+\)',
'exclude': set(['zerver/tests', 'zerver/views/development/']),
'description': 'Argument to JsonableError should be a literal string enclosed by _()'},
{'pattern': r'''\WJsonableError\(["'].+\)''',
'exclude': set(['zerver/tests', 'zerver/views/development/']),
'description': 'Argument to JsonableError should be a literal string enclosed by _()'},
{'pattern': r"""\b_\((?:\s|{}|{})*[^\s'")]""".format(PYSQ, PYDQ),
'description': 'Called _() on a computed string',
'exclude_line': set([
('zerver/lib/i18n.py', 'result = _(string)'),
]),
'good_lines': ["return json_error(_('No presence data for %s') % (target.email,))"],
'bad_lines': ["return json_error(_('No presence data for %s' % (target.email,)))"]},
{'pattern': r'''([a-zA-Z0-9_]+)=REQ\(['"]\1['"]''',
'description': 'REQ\'s first argument already defaults to parameter name'},
{'pattern': r'self\.client\.(get|post|patch|put|delete)',
'description': \
'''Do not call self.client directly for put/patch/post/get.
See WRAPPER_COMMENT in test_helpers.py for details.
'''},
# Directly fetching Message objects in e.g. views code is often a security bug.
{'pattern': '[^r]Message.objects.get',
'exclude': set(["zerver/tests",
"zerver/lib/onboarding.py",
"zilencer/management/commands/add_mock_conversation.py",
"zerver/worker/queue_processors.py",
"zerver/management/commands/export.py",
"zerver/lib/export.py"]),
'description': 'Please use access_message() to fetch Message objects',
},
{'pattern': 'Stream.objects.get',
'include_only': set(["zerver/views/"]),
'description': 'Please use access_stream_by_*() to fetch Stream objects',
},
{'pattern': 'get_stream[(]',
'include_only': set(["zerver/views/", "zerver/lib/actions.py"]),
'exclude_line': set([
# This one in check_message is kinda terrible, since it's
# how most instances are written, but better to exclude something than nothing
('zerver/lib/actions.py', 'stream = get_stream(stream_name, realm)'),
('zerver/lib/actions.py', 'get_stream(admin_realm_signup_notifications_stream, admin_realm)'),
# Here we need get_stream to access streams you've since unsubscribed from.
('zerver/views/messages.py', 'stream = get_stream(operand, self.user_profile.realm)'),
# Use stream_id to exclude mutes.
('zerver/views/messages.py', 'stream_id = get_stream(stream_name, user_profile.realm).id'),
]),
'description': 'Please use access_stream_by_*() to fetch Stream objects',
},
{'pattern': 'Stream.objects.filter',
'include_only': set(["zerver/views/"]),
'description': 'Please use access_stream_by_*() to fetch Stream objects',
},
{'pattern': '^from (zerver|analytics|confirmation)',
'include_only': set(["/migrations/"]),
'exclude': set([
'zerver/migrations/0032_verify_all_medium_avatar_images.py',
'zerver/migrations/0060_move_avatars_to_be_uid_based.py',
'zerver/migrations/0104_fix_unreads.py',
'zerver/migrations/0206_stream_rendered_description.py',
'pgroonga/migrations/0002_html_escape_subject.py',
]),
'description': "Don't import models or other code in migrations; see docs/subsystems/schema-migrations.md",
},
{'pattern': 'datetime[.](now|utcnow)',
'include_only': set(["zerver/", "analytics/"]),
'description': "Don't use datetime in backend code.\n"
"See https://zulip.readthedocs.io/en/latest/contributing/code-style.html#naive-datetime-objects",
},
{'pattern': r'render_to_response\(',
'description': "Use render() instead of render_to_response().",
},
{'pattern': 'from os.path',
'description': "Don't use from when importing from the standard library",
},
{'pattern': 'import os.path',
'description': "Use import os instead of import os.path",
},
{'pattern': r'(logging|logger)\.warn\W',
'description': "Logger.warn is a deprecated alias for Logger.warning; Use 'warning' instead of 'warn'.",
'good_lines': ["logging.warning('I am a warning.')", "logger.warning('warning')"],
'bad_lines': ["logging.warn('I am a warning.')", "logger.warn('warning')"]},
{'pattern': r'\.pk',
'exclude_pattern': '[.]_meta[.]pk',
'description': "Use `id` instead of `pk`.",
'good_lines': ['if my_django_model.id == 42', 'self.user_profile._meta.pk'],
'bad_lines': ['if my_django_model.pk == 42']},
{'pattern': r'^[ ]*# type: \(',
'exclude': set([
# These directories, especially scripts/ and puppet/,
# have tools that need to run before a Zulip environment
# is provisioned; in some of those, the `typing` module
# might not be available yet, so care is required.
'scripts/',
'tools/',
'puppet/',
# Zerver files that we should just clean.
'zerver/tests',
'zerver/openapi/python_examples.py',
'zerver/lib/request.py',
'zerver/views/streams.py',
# thumbor is (currently) python2 only
'zthumbor/',
]),
'description': 'Comment-style function type annotation. Use Python3 style annotations instead.',
},
{'pattern': r' = models[.].*null=True.*\) # type: (?!Optional)',
'include_only': {"zerver/models.py"},
'description': 'Model variable with null=true not annotated as Optional.',
'good_lines': ['desc = models.TextField(null=True) # type: Optional[Text]',
'stream = models.ForeignKey(Stream, null=True, on_delete=CASCADE) # type: Optional[Stream]',
'desc = models.TextField() # type: Text',
'stream = models.ForeignKey(Stream, on_delete=CASCADE) # type: Stream'],
'bad_lines': ['desc = models.CharField(null=True) # type: Text',
'stream = models.ForeignKey(Stream, null=True, on_delete=CASCADE) # type: Stream'],
},
{'pattern': r' = models[.](?!NullBoolean).*\) # type: Optional', # Optional tag, except NullBoolean(Field)
'exclude_pattern': 'null=True',
'include_only': {"zerver/models.py"},
'description': 'Model variable annotated with Optional but variable does not have null=true.',
'good_lines': ['desc = models.TextField(null=True) # type: Optional[Text]',
'stream = models.ForeignKey(Stream, null=True, on_delete=CASCADE) # type: Optional[Stream]',
'desc = models.TextField() # type: Text',
'stream = models.ForeignKey(Stream, on_delete=CASCADE) # type: Stream'],
'bad_lines': ['desc = models.TextField() # type: Optional[Text]',
'stream = models.ForeignKey(Stream, on_delete=CASCADE) # type: Optional[Stream]'],
},
{'pattern': r'[\s([]Text([^\s\w]|$)',
'exclude': set([
# We are likely to want to keep these dirs Python 2+3 compatible,
# since the plan includes extracting them to a separate project eventually.
'tools/lib',
# TODO: Update our migrations from Text->str.
'zerver/migrations/',
# thumbor is (currently) python2 only
'zthumbor/',
]),
'description': "Now that we're a Python 3 only codebase, we don't need to use typing.Text. Please use str instead.",
},
{'pattern': 'exit[(]1[)]',
'include_only': set(["/management/commands/"]),
'description': 'Raise CommandError to exit with failure in management commands',
},
]) + whitespace_rules + comma_whitespace_rule,
max_length=110,
shebang_rules=shebang_rules,
)
bash_rules = RuleList(
langs=['bash'],
rules=cast(Rule, [
{'pattern': '#!.*sh [-xe]',
'description': 'Fix shebang line with proper call to /usr/bin/env for Bash path, change -x|-e switches'
' to set -x|set -e'},
{'pattern': 'sudo',
'description': 'Most scripts are intended to work on systems without sudo',
'include_only': set(['scripts/']),
'exclude': set([
'scripts/lib/install',
'scripts/setup/configure-rabbitmq'
]), },
]) + whitespace_rules[0:1],
shebang_rules=shebang_rules,
)
css_rules = RuleList(
langs=['css', 'scss'],
rules=cast(Rule, [
{'pattern': r'calc\([^+]+\+[^+]+\)',
'description': "Avoid using calc with '+' operator. See #8403 : in CSS.",
'good_lines': ["width: calc(20% - -14px);"],
'bad_lines': ["width: calc(20% + 14px);"]},
{'pattern': r'^[^:]*:\S[^:]*;$',
'description': "Missing whitespace after : in CSS",
'good_lines': ["background-color: white;", "text-size: 16px;"],
'bad_lines': ["background-color:white;", "text-size:16px;"]},
{'pattern': '[a-z]{',
'description': "Missing whitespace before '{' in CSS.",
'good_lines': ["input {", "body {"],
'bad_lines': ["input{", "body{"]},
{'pattern': 'https://',
'description': "Zulip CSS should have no dependencies on external resources",
'good_lines': ['background: url(/static/images/landing-page/pycon.jpg);'],
'bad_lines': ['background: url(https://example.com/image.png);']},
{'pattern': '^[ ][ ][a-zA-Z0-9]',
'description': "Incorrect 2-space indentation in CSS",
'strip': '\n',
'good_lines': [" color: white;", "color: white;"],
'bad_lines': [" color: white;"]},
{'pattern': r'{\w',
'description': "Missing whitespace after '{' in CSS (should be newline).",
'good_lines': ["{\n"],
'bad_lines': ["{color: LightGoldenRodYellow;"]},
{'pattern': ' thin[ ;]',
'description': "thin CSS attribute is under-specified, please use 1px.",
'good_lines': ["border-width: 1px;"],
'bad_lines': ["border-width: thin;", "border-width: thin solid black;"]},
{'pattern': ' medium[ ;]',
'description': "medium CSS attribute is under-specified, please use pixels.",
'good_lines': ["border-width: 3px;"],
'bad_lines': ["border-width: medium;", "border: medium solid black;"]},
{'pattern': ' thick[ ;]',
'description': "thick CSS attribute is under-specified, please use pixels.",
'good_lines': ["border-width: 5px;"],
'bad_lines': ["border-width: thick;", "border: thick solid black;"]},
{'pattern': r'rgba?\(',
'description': 'Use of rgb(a) format is banned, Please use hsl(a) instead',
'good_lines': ['hsl(0, 0%, 0%)', 'hsla(0, 0%, 100%, 0.1)'],
'bad_lines': ['rgb(0, 0, 0)', 'rgba(255, 255, 255, 0.1)']},
]) + whitespace_rules + comma_whitespace_rule
)
prose_style_rules = cast(Rule, [
{'pattern': r'[^\/\#\-"]([jJ]avascript)', # exclude usage in hrefs/divs
'exclude': set(["docs/documentation/api.md"]),
'description': "javascript should be spelled JavaScript"},
{'pattern': r'''[^\/\-\."'\_\=\>]([gG]ithub)[^\.\-\_"\<]''', # exclude usage in hrefs/divs
'description': "github should be spelled GitHub"},
{'pattern': '[oO]rganisation', # exclude usage in hrefs/divs
'description': "Organization is spelled with a z",
'exclude_line': [('docs/translating/french.md', '* organization - **organisation**')]},
{'pattern': '!!! warning',
'description': "!!! warning is invalid; it's spelled '!!! warn'"},
{'pattern': 'Terms of service',
'description': "The S in Terms of Service is capitalized"},
{'pattern': '[^-_]botserver(?!rc)|bot server',
'description': "Use Botserver instead of botserver or bot server."},
]) + comma_whitespace_rule
html_rules = whitespace_rules + prose_style_rules + cast(Rule, [
{'pattern': 'subject|SUBJECT',
'exclude': set(['templates/zerver/email.html']),
'exclude_pattern': 'email subject',
'description': 'avoid subject in templates',
'good_lines': ['topic_name'],
'bad_lines': ['subject="foo"', ' MAX_SUBJECT_LEN']},
{'pattern': r'placeholder="[^{#](?:(?!\.com).)+$',
'description': "`placeholder` value should be translatable.",
'exclude_line': [('templates/zerver/register.html', 'placeholder="acme"'),
('templates/zerver/register.html', 'placeholder="Acme or Aκμή"')],
'exclude': set(["templates/analytics/support.html"]),
'good_lines': ['<input class="stream-list-filter" type="text" placeholder="{{ _(\'Search streams\') }}" />'],
'bad_lines': ['<input placeholder="foo">']},
{'pattern': "placeholder='[^{]",
'description': "`placeholder` value should be translatable.",
'good_lines': ['<input class="stream-list-filter" type="text" placeholder="{{ _(\'Search streams\') }}" />'],
'bad_lines': ["<input placeholder='foo'>"]},
{'pattern': "aria-label='[^{]",
'description': "`aria-label` value should be translatable.",
'good_lines': ['<button type="button" class="close close-alert-word-status" aria-label="{{t \'Close\' }}">'],
'bad_lines': ["<button aria-label='foo'></button>"]},
{'pattern': 'aria-label="[^{]',
'description': "`aria-label` value should be translatable.",
'good_lines': ['<button type="button" class="close close-alert-word-status" aria-label="{{t \'Close\' }}">'],
'bad_lines': ['<button aria-label="foo"></button>']},
{'pattern': 'script src="http',
'description': "Don't directly load dependencies from CDNs. See docs/subsystems/front-end-build-process.md",
'exclude': set(["templates/corporate/billing.html", "templates/zerver/hello.html",
"templates/corporate/upgrade.html"]),
'good_lines': ["{{ render_bundle('landing-page') }}"],
'bad_lines': ['<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.2.1/jquery.min.js"></script>']},
{'pattern': "title='[^{]",
'description': "`title` value should be translatable.",
'good_lines': ['<link rel="author" title="{{ _(\'About these documents\') }}" />'],
'bad_lines': ["<p title='foo'></p>"]},
{'pattern': r'title="[^{\:]',
'exclude_line': set([
('templates/zerver/app/markdown_help.html',
'<td class="rendered_markdown"><img alt=":heart:" class="emoji" src="/static/generated/emoji/images/emoji/heart.png" title=":heart:" /></td>')
]),
'exclude': set(["templates/zerver/emails", "templates/analytics/support.html"]),
'description': "`title` value should be translatable."},
{'pattern': r'''\Walt=["'][^{"']''',
'description': "alt argument should be enclosed by _() or it should be an empty string.",
'exclude': set(['static/templates/settings/display_settings.hbs',
'templates/zerver/app/keyboard_shortcuts.html',
'templates/zerver/app/markdown_help.html']),
'good_lines': ['<img src="{{source_url}}" alt="{{ _(name) }}" />', '<img alg="" />'],
'bad_lines': ['<img alt="Foo Image" />']},
{'pattern': r'''\Walt=["']{{ ?["']''',
'description': "alt argument should be enclosed by _().",
'good_lines': ['<img src="{{source_url}}" alt="{{ _(name) }}" />'],
'bad_lines': ['<img alt="{{ " />']},
{'pattern': r'\bon\w+ ?=',
'description': "Don't use inline event handlers (onclick=, etc. attributes) in HTML. Instead,"
"attach a jQuery event handler ($('#foo').on('click', function () {...})) when "
"the DOM is ready (inside a $(function () {...}) block).",
'exclude': set(['templates/zerver/dev_login.html', 'templates/corporate/upgrade.html']),
'good_lines': ["($('#foo').on('click', function () {}"],
'bad_lines': ["<button id='foo' onclick='myFunction()'>Foo</button>", "<input onchange='myFunction()'>"]},
{'pattern': 'style ?=',
'description': "Avoid using the `style=` attribute; we prefer styling in CSS files",
'exclude_pattern': r'.*style ?=["' + "'" + '](display: ?none|background: {{|color: {{|background-color: {{).*',
'exclude': set([
# KaTeX output uses style attribute
'templates/zerver/app/markdown_help.html',
# 5xx page doesn't have external CSS
'static/html/5xx.html',
# Group PMs color is dynamically calculated
'static/templates/group_pms.hbs',
# exclude_pattern above handles color, but have other issues:
'static/templates/draft.hbs',
'static/templates/subscription.hbs',
'static/templates/single_message.hbs',
# Old-style email templates need to use inline style
# attributes; it should be possible to clean these up
# when we convert these templates to use premailer.
'templates/zerver/emails/email_base_messages.html',
# Email log templates; should clean up.
'templates/zerver/email.html',
'templates/zerver/email_log.html',
# Probably just needs to be changed to display: none so the exclude works
'templates/zerver/app/navbar.html',
# Needs the width cleaned up; display: none is fine
'static/templates/settings/account_settings.hbs',
# background image property is dynamically generated
'static/templates/user_profile_modal.hbs',
'static/templates/sidebar_private_message_list.hbs',
# Inline styling for an svg; could be moved to CSS files?
'templates/zerver/landing_nav.html',
'templates/zerver/billing_nav.html',
'templates/zerver/app/home.html',
'templates/zerver/features.html',
'templates/zerver/portico-header.html',
'templates/corporate/billing.html',
'templates/corporate/upgrade.html',
# Miscellaneous violations to be cleaned up
'static/templates/user_info_popover_title.hbs',
'static/templates/subscription_invites_warning_modal.hbs',
'templates/zerver/reset_confirm.html',
'templates/zerver/config_error.html',
'templates/zerver/dev_env_email_access_details.html',
'templates/zerver/confirm_continue_registration.html',
'templates/zerver/register.html',
'templates/zerver/accounts_send_confirm.html',
'templates/zerver/integrations/index.html',
'templates/zerver/documentation_main.html',
'templates/analytics/realm_summary_table.html',
'templates/corporate/zephyr.html',
'templates/corporate/zephyr-mirror.html',
]),
'good_lines': ['#my-style {color: blue;}', 'style="display: none"', "style='display: none"],
'bad_lines': ['<p style="color: blue;">Foo</p>', 'style = "color: blue;"']},
])
handlebars_rules = RuleList(
langs=['hbs'],
rules=html_rules + cast(Rule, [
{'pattern': "[<]script",
'description': "Do not use inline <script> tags here; put JavaScript in static/js instead."},
{'pattern': '{{ t ("|\')',
'description': 'There should be no spaces before the "t" in a translation tag.'},
{'pattern': r"{{t '.*' }}[\.\?!]",
'description': "Period should be part of the translatable string."},
{'pattern': r'{{t ".*" }}[\.\?!]',
'description': "Period should be part of the translatable string."},
{'pattern': r"{{/tr}}[\.\?!]",
'description': "Period should be part of the translatable string."},
{'pattern': '{{t ("|\') ',
'description': 'Translatable strings should not have leading spaces.'},
{'pattern': "{{t '[^']+ ' }}",
'description': 'Translatable strings should not have trailing spaces.'},
{'pattern': '{{t "[^"]+ " }}',
'description': 'Translatable strings should not have trailing spaces.'},
]),
)
jinja2_rules = RuleList(
langs=['html'],
rules=html_rules + cast(Rule, [
{'pattern': r"{% endtrans %}[\.\?!]",
'description': "Period should be part of the translatable string."},
{'pattern': r"{{ _(.+) }}[\.\?!]",
'description': "Period should be part of the translatable string."},
]),
)
json_rules = RuleList(
langs=['json'],
rules=cast(Rule, [
# Here, we don't use `whitespace_rules`, because the tab-based
# whitespace rule flags a lot of third-party JSON fixtures
# under zerver/webhooks that we want preserved verbatim. So
# we just include the trailing whitespace rule and a modified
# version of the tab-based whitespace rule (we can't just use
# exclude in whitespace_rules, since we only want to ignore
# JSON files with tab-based whitespace, not webhook code).
trailing_whitespace_rule,
{'pattern': '\t',
'strip': '\n',
'exclude': set(['zerver/webhooks/']),
'description': 'Fix tab-based whitespace'},
{'pattern': r'":["\[\{]',
'exclude': set(['zerver/webhooks/', 'zerver/tests/fixtures/']),
'description': 'Require space after : in JSON'},
])
)
markdown_docs_length_exclude = {
# Has some example Vagrant output that's very long
"docs/development/setup-vagrant.md",
# Have wide output in code blocks
"docs/subsystems/logging.md",
"docs/subsystems/migration-renumbering.md",
# Have curl commands with JSON that would be messy to wrap
"zerver/webhooks/helloworld/doc.md",
"zerver/webhooks/trello/doc.md",
# Has a very long configuration line
"templates/zerver/integrations/perforce.md",
# Has some example code that could perhaps be wrapped
"templates/zerver/api/incoming-webhooks-walkthrough.md",
# This macro has a long indented URL
"templates/zerver/help/include/git-webhook-url-with-branches-indented.md",
# These two are the same file and have some too-long lines for GitHub badges
"README.md",
"docs/overview/readme.md",
}
markdown_rules = RuleList(
langs=['md'],
rules=markdown_whitespace_rules + prose_style_rules + cast(Rule, [
{'pattern': r'\[(?P<url>[^\]]+)\]\((?P=url)\)',
'description': 'Linkified markdown URLs should use cleaner <http://example.com> syntax.'},
{'pattern': 'https://zulip.readthedocs.io/en/latest/[a-zA-Z0-9]',
'exclude': ['docs/overview/contributing.md', 'docs/overview/readme.md', 'docs/README.md'],
'include_only': set(['docs/']),
'description': "Use relative links (../foo/bar.html) to other documents in docs/",
},
{'pattern': "su zulip -c [^']",
'include_only': set(['docs/']),
'description': "Always quote arguments using `su zulip -c '` to avoid confusion about how su works.",
},
{'pattern': r'\][(][^#h]',
'include_only': set(['README.md', 'CONTRIBUTING.md']),
'description': "Use absolute links from docs served by GitHub",
},
]),
max_length=120,
length_exclude=markdown_docs_length_exclude,
exclude_files_in='templates/zerver/help/'
)
help_markdown_rules = RuleList(
langs=['md'],
rules=markdown_rules.rules + cast(Rule, [
{'pattern': '[a-z][.][A-Z]',
'description': "Likely missing space after end of sentence",
'include_only': set(['templates/zerver/help/']),
},
{'pattern': r'\b[rR]ealm[s]?\b',
'include_only': set(['templates/zerver/help/']),
'good_lines': ['Organization', 'deactivate_realm', 'realm_filter'],
'bad_lines': ['Users are in a realm', 'Realm is the best model'],
'description': "Realms are referred to as Organizations in user-facing docs."},
]),
length_exclude=markdown_docs_length_exclude,
)
txt_rules = RuleList(
langs=['txt', 'text', 'yaml', 'rst'],
rules=whitespace_rules,
)
non_py_rules = [
handlebars_rules,
jinja2_rules,
css_rules,
js_rules,
json_rules,
markdown_rules,
help_markdown_rules,
bash_rules,
txt_rules,
]
| 50.717026 | 222 | 0.571327 |
from __future__ import print_function
from __future__ import absolute_import
from zulint.custom_rules import RuleList
from typing import cast, Any, Dict, List, Tuple
Rule = List[Dict[str, Any]]
LineTup = Tuple[int, str, str, str]
PYDELIMS = r'''"'()\[\]{}#\\'''
PYREG = r"[^{}]".format(PYDELIMS)
PYSQ = r'"(?:[^"\\]|\\.)*"'
PYDQ = r"'(?:[^'\\]|\\.)*'"
PYLEFT = r"[(\[{]"
PYRIGHT = r"[)\]}]"
PYCODE = PYREG
for depth in range(5):
PYGROUP = r"""(?:{}|{}|{}{}*{})""".format(PYSQ, PYDQ, PYLEFT, PYCODE, PYRIGHT)
PYCODE = r"""(?:{}|{})""".format(PYREG, PYGROUP)
FILES_WITH_LEGACY_SUBJECT = {
'zerver/lib/topic.py',
'zerver/tests/test_legacy_subject.py',
'zerver/lib/fix_unreads.py',
'zerver/tests/test_migrations.py',
'zerver/lib/email_mirror.py',
'zerver/lib/feedback.py',
'zerver/tests/test_new_users.py',
'zerver/tests/test_email_mirror.py',
'zerver/openapi/python_examples.py',
'zerver/tests/test_openapi.py',
# to fix everything until we migrate the DB to "topic".
'zerver/tests/test_narrow.py',
}
shebang_rules = [
{'pattern': '^
'description': "zerver library code shouldn't have a shebang line.",
'include_only': set(['zerver/'])},
{'pattern': '^#!(?! *(?:/usr/bin/env|/bin/sh)(?: |$))',
'description': "Use `#!/usr/bin/env foo` instead of `#!/path/foo`"
" for interpreters other than sh."},
{'pattern': '^#!/usr/bin/env python$',
'description': "Use `#!/usr/bin/env python3` instead of `#!/usr/bin/env python`."}
]
trailing_whitespace_rule = {
'pattern': r'\s+$',
'strip': '\n',
'description': 'Fix trailing whitespace'
}
whitespace_rules = [
trailing_whitespace_rule,
{'pattern': 'http://zulip.readthedocs.io',
'description': 'Use HTTPS when linking to ReadTheDocs',
},
{'pattern': '\t',
'strip': '\n',
'exclude': set(['tools/ci/success-http-headers.txt']),
'description': 'Fix tab-based whitespace'},
]
comma_whitespace_rule = [
{'pattern': ', {2,}[^#/ ]',
'exclude': set(['zerver/tests', 'frontend_tests/node_tests', 'corporate/tests']),
'description': "Remove multiple whitespaces after ','",
'good_lines': ['foo(1, 2, 3)', 'foo = bar # some inline comment'],
'bad_lines': ['foo(1, 2, 3)', 'foo(1, 2, 3)']},
]
markdown_whitespace_rules = list([rule for rule in whitespace_rules if rule['pattern'] != r'\s+$']) + [
# This rule finds one space trailing a non-space, three or more trailing spaces, and
# spaces on an empty line.
{'pattern': r'((?<!\s)\s$)|(\s\s\s+$)|(^\s+$)',
'strip': '\n',
'description': 'Fix trailing whitespace'},
{'pattern': '^
'strip': '\n',
'description': 'Missing space after
'good_lines': [' 'frontend_tests/']),
'exclude_pattern': 'emails',
'description': 'avoid subject in JS code',
'good_lines': ['topic_name'],
'bad_lines': ['subject="foo"', ' MAX_SUBJECT_LEN']},
{'pattern': r'[^_]function\(',
'description': 'The keyword "function" should be followed by a space'},
{'pattern': r'.*blueslip.warning\(.*',
'description': 'The module blueslip has no function warning, try using blueslip.warn'},
{'pattern': '[)]{$',
'description': 'Missing space between ) and {'},
{'pattern': r'i18n\.t\([^)]+[^,\{\)]$',
'description': 'i18n string should not be a multiline string'},
{'pattern': r'''i18n\.t\(['"].+?['"]\s*\+''',
'description': 'Do not concatenate arguments within i18n.t()'},
{'pattern': r'i18n\.t\(.+\).*\+',
'description': 'Do not concatenate i18n strings'},
{'pattern': r'\+.*i18n\.t\(.+\)',
'description': 'Do not concatenate i18n strings'},
{'pattern': '[.]includes[(]',
'exclude': ['frontend_tests/'],
'description': '.includes() is incompatible with Internet Explorer. Use .indexOf() !== -1 instead.'},
{'pattern': '[.]html[(]',
'exclude_pattern': r'''[.]html[(]("|'|render_|html|message.content|sub.rendered_description|i18n.t|rendered_|$|[)]|error_text|widget_elem|[$]error|[$][(]"<p>"[)])''',
'exclude': ['static/js/portico', 'static/js/lightbox.js', 'static/js/ui_report.js',
'static/js/confirm_dialog.js',
'frontend_tests/'],
'description': 'Setting HTML content with jQuery .html() can lead to XSS security bugs. Consider .text() or using rendered_foo as a variable name if content comes from handlebars and thus is already sanitized.'},
{'pattern': '["\']json/',
'description': 'Relative URL for JSON route not supported by i18n'},
# This rule is constructed with + to avoid triggering on itself
{'pattern': " =" + '[^ =>~"]',
'description': 'Missing whitespace after "="'},
{'pattern': '^[ ]*//[A-Za-z0-9]',
'description': 'Missing space after // in comment'},
{'pattern': 'if[(]',
'description': 'Missing space between if and ('},
{'pattern': 'else{$',
'description': 'Missing space between else and {'},
{'pattern': '^else {$',
'description': 'Write JS else statements on same line as }'},
{'pattern': '^else if',
'description': 'Write JS else statements on same line as }'},
{'pattern': 'console[.][a-z]',
'exclude': set(['static/js/blueslip.js',
'frontend_tests/zjsunit',
'frontend_tests/casper_lib/common.js',
'frontend_tests/node_tests',
'static/js/debug.js']),
'description': 'console.log and similar should not be used in webapp'},
{'pattern': r'''[.]text\(["'][a-zA-Z]''',
'description': 'Strings passed to $().text should be wrapped in i18n.t() for internationalization',
'exclude': set(['frontend_tests/node_tests/'])},
{'pattern': r'''compose_error\(["']''',
'description': 'Argument to compose_error should be a literal string enclosed '
'by i18n.t()'},
{'pattern': r'ui.report_success\(',
'description': 'Deprecated function, use ui_report.success.'},
{'pattern': r'''report.success\(["']''',
'description': 'Argument to report_success should be a literal string enclosed '
'by i18n.t()'},
{'pattern': r'ui.report_error\(',
'description': 'Deprecated function, use ui_report.error.'},
{'pattern': r'''report.error\(["'][^'"]''',
'description': 'Argument to ui_report.error should be a literal string enclosed '
'by i18n.t()',
'good_lines': ['ui_report.error("")', 'ui_report.error(_("text"))'],
'bad_lines': ['ui_report.error("test")']},
{'pattern': r'\$\(document\)\.ready\(',
'description': "`Use $(f) rather than `$(document).ready(f)`",
'good_lines': ['$(function () {foo();}'],
'bad_lines': ['$(document).ready(function () {foo();}']},
{'pattern': '[$][.](get|post|patch|delete|ajax)[(]',
'description': "Use channel module for AJAX calls",
'exclude': set([
'static/js/blueslip.js',
'static/js/channel.js',
'static/js/stats/',
'static/js/portico/',
'static/js/billing/',
]),
'good_lines': ['channel.get(...)'],
'bad_lines': ['$.get()', '$.post()', '$.ajax()']},
{'pattern': 'style ?=',
'description': "Avoid using the `style=` attribute; we prefer styling in CSS files",
'exclude': set([
'frontend_tests/node_tests/copy_and_paste.js',
'frontend_tests/node_tests/upload.js',
'frontend_tests/node_tests/templates.js',
'static/js/upload.js',
'static/js/stream_color.js',
]),
'good_lines': ['
'bad_lines': ['<p style="color: blue;">Foo</p>', 'style = "color: blue;"']},
]) + whitespace_rules + comma_whitespace_rule,
)
python_rules = RuleList(
langs=['py'],
rules=cast(Rule, [
{'pattern': 'subject|SUBJECT',
'exclude_pattern': 'subject to the|email|outbox',
'description': 'avoid subject as a var',
'good_lines': ['topic_name'],
'bad_lines': ['subject="foo"', ' MAX_SUBJECT_LEN'],
'exclude': FILES_WITH_LEGACY_SUBJECT,
'include_only': set([
'zerver/data_import/',
'zerver/lib/',
'zerver/tests/',
'zerver/views/'])},
{'pattern': '^(?!
'description': '@login_required is unsupported; use @zulip_login_required',
'good_lines': ['@zulip_login_required', '
'bad_lines': ['@login_required', ' @login_required']},
{'pattern': '^user_profile[.]save[(][)]',
'description': 'Always pass update_fields when saving user_profile objects',
'exclude_line': set([
('zerver/lib/actions.py', "user_profile.save() # Can't use update_fields because of how the foreign key works."),
]),
'exclude': set(['zerver/tests', 'zerver/lib/create_user.py']),
'good_lines': ['user_profile.save(update_fields=["pointer"])'],
'bad_lines': ['user_profile.save()']},
{'pattern': r'^[^"]*"[^"]*"%\(',
'description': 'Missing space around "%"',
'good_lines': ['"%s" % ("foo")', '"%s" % (foo)'],
'bad_lines': ['"%s"%("foo")', '"%s"%(foo)']},
{'pattern': r"^[^']*'[^']*'%\(",
'description': 'Missing space around "%"',
'good_lines': ["'%s' % ('foo')", "'%s' % (foo)"],
'bad_lines': ["'%s'%('foo')", "'%s'%(foo)"]},
{'pattern': 'self: Any',
'description': 'you can omit Any annotation for self',
'good_lines': ['def foo (self):'],
'bad_lines': ['def foo(self: Any):']},
{'pattern': " =" + '[^ =>~"]',
'description': 'Missing whitespace after "="',
'good_lines': ['a = b', '5 == 6'],
'bad_lines': ['a =b', 'asdf =42']},
{'pattern': r'":\w[^"]*$',
'description': 'Missing whitespace after ":"',
'good_lines': ['"foo": bar', '"some:string:with:colons"'],
'bad_lines': ['"foo":bar', '"foo":1']},
{'pattern': r"':\w[^']*$",
'description': 'Missing whitespace after ":"',
'good_lines': ["'foo': bar", "'some:string:with:colons'"],
'bad_lines': ["'foo':bar", "'foo':1"]},
{'pattern': r"^\s+
'strip': '\n',
'exclude': set(['tools/droplets/create.py']),
'description': 'Missing whitespace after "#"',
'good_lines': ['a = b # some operation', '1+2 # 3 is the result'],
'bad_lines': [' #some operation', ' #not valid!!!']},
{'pattern': "assertEquals[(]",
'description': 'Use assertEqual, not assertEquals (which is deprecated).',
'good_lines': ['assertEqual(1, 2)'],
'bad_lines': ['assertEquals(1, 2)']},
{'pattern': "== None",
'description': 'Use `is None` to check whether something is None',
'good_lines': ['if foo is None'],
'bad_lines': ['foo == None']},
{'pattern': "type:[(]",
'description': 'Missing whitespace after ":" in type annotation',
'good_lines': ['# type: (Any, Any)', 'colon:separated:string:containing:type:as:keyword'],
'bad_lines': ['# type:(Any, Any)']},
{'pattern': "type: ignore$",
'exclude': set(['tools/tests',
'zerver/lib/test_runner.py',
'zerver/tests']),
'description': '"type: ignore" should always end with "# type: ignore # explanation for why"',
'good_lines': ['foo = bar # type: ignore # explanation'],
'bad_lines': ['foo = bar # type: ignore']},
{'pattern': "
'description': 'Missing : after type in type annotation',
'good_lines': ['foo = 42 # type: int', '# type: (str, int) -> None'],
'bad_lines': ['# type (str, int) -> None']},
{'pattern': "
'description': 'Missing whitespace after "#" in type annotation',
'good_lines': ['foo = 42 # type: int'],
'bad_lines': ['foo = 42 #type: int']},
{'pattern': r'\b(if|else|while)[(]',
'description': 'Put a space between statements like if, else, etc. and (.',
'good_lines': ['if (1 == 2):', 'while (foo == bar):'],
'bad_lines': ['if(1 == 2):', 'while(foo == bar):']},
{'pattern': ", [)]",
'description': 'Unnecessary whitespace between "," and ")"',
'good_lines': ['foo = (1, 2, 3,)', 'foo(bar, 42)'],
'bad_lines': ['foo = (1, 2, 3, )']},
{'pattern': "% [(]",
'description': 'Unnecessary whitespace between "%" and "("',
'good_lines': ['"foo %s bar" % ("baz",)'],
'bad_lines': ['"foo %s bar" % ("baz",)']},
# This next check could have false positives, but it seems pretty
# rare; if we find any, they can be added to the exclude list for
# this rule.
{'pattern': r"""^(?:[^'"#\\]|{}|{})*(?:{}|{})\s*%\s*(?![\s({{\\]|dict\(|tuple\()(?:[^,{}]|{})+(?:$|[,#\\]|{})""".format(
PYSQ, PYDQ, PYSQ, PYDQ, PYDELIMS, PYGROUP, PYRIGHT),
'description': 'Used % formatting without a tuple',
'good_lines': ['"foo %s bar" % ("baz",)'],
'bad_lines': ['"foo %s bar" % "baz"']},
{'pattern': r"""^(?:[^'"#\\]|{}|{})*(?:{}|{})\s*%\s*\((?:[^,{}]|{})*\)""".format(
PYSQ, PYDQ, PYSQ, PYDQ, PYDELIMS, PYGROUP),
'description': 'Used % formatting with parentheses that do not form a tuple',
'good_lines': ['"foo %s bar" % ("baz",)"'],
'bad_lines': ['"foo %s bar" % ("baz")']},
{'pattern': 'sudo',
'include_only': set(['scripts/']),
'exclude': set(['scripts/lib/setup_venv.py']),
'exclude_line': set([
('scripts/lib/zulip_tools.py', 'sudo_args = kwargs.pop(\'sudo_args\', [])'),
('scripts/lib/zulip_tools.py', 'args = [\'sudo\'] + sudo_args + [\'--\'] + args'),
]),
'description': 'Most scripts are intended to run on systems without sudo.',
'good_lines': ['subprocess.check_call(["ls"])'],
'bad_lines': ['subprocess.check_call(["sudo", "ls"])']},
{'pattern': 'django.utils.translation',
'include_only': set(['test/', 'zerver/views/development/']),
'description': 'Test strings should not be tagged for translation',
'good_lines': [''],
'bad_lines': ['django.utils.translation']},
{'pattern': 'userid',
'description': 'We prefer user_id over userid.',
'good_lines': ['id = alice.user_id'],
'bad_lines': ['id = alice.userid']},
{'pattern': r'json_success\({}\)',
'description': 'Use json_success() to return nothing',
'good_lines': ['return json_success()'],
'bad_lines': ['return json_success({})']},
{'pattern': r'\Wjson_error\(_\(?\w+\)',
'exclude': set(['zerver/tests', 'zerver/views/development/']),
'description': 'Argument to json_error should be a literal string enclosed by _()',
'good_lines': ['return json_error(_("string"))'],
'bad_lines': ['return json_error(_variable)', 'return json_error(_(variable))']},
{'pattern': r'''\Wjson_error\(['"].+[),]$''',
'exclude': set(['zerver/tests']),
'description': 'Argument to json_error should a literal string enclosed by _()'},
# To avoid JsonableError(_variable) and JsonableError(_(variable))
{'pattern': r'\WJsonableError\(_\(?\w.+\)',
'exclude': set(['zerver/tests', 'zerver/views/development/']),
'description': 'Argument to JsonableError should be a literal string enclosed by _()'},
{'pattern': r'''\WJsonableError\(["'].+\)''',
'exclude': set(['zerver/tests', 'zerver/views/development/']),
'description': 'Argument to JsonableError should be a literal string enclosed by _()'},
{'pattern': r"""\b_\((?:\s|{}|{})*[^\s'")]""".format(PYSQ, PYDQ),
'description': 'Called _() on a computed string',
'exclude_line': set([
('zerver/lib/i18n.py', 'result = _(string)'),
]),
'good_lines': ["return json_error(_('No presence data for %s') % (target.email,))"],
'bad_lines': ["return json_error(_('No presence data for %s' % (target.email,)))"]},
{'pattern': r'''([a-zA-Z0-9_]+)=REQ\(['"]\1['"]''',
'description': 'REQ\'s first argument already defaults to parameter name'},
{'pattern': r'self\.client\.(get|post|patch|put|delete)',
'description': \
'''Do not call self.client directly for put/patch/post/get.
See WRAPPER_COMMENT in test_helpers.py for details.
'''},
# Directly fetching Message objects in e.g. views code is often a security bug.
{'pattern': '[^r]Message.objects.get',
'exclude': set(["zerver/tests",
"zerver/lib/onboarding.py",
"zilencer/management/commands/add_mock_conversation.py",
"zerver/worker/queue_processors.py",
"zerver/management/commands/export.py",
"zerver/lib/export.py"]),
'description': 'Please use access_message() to fetch Message objects',
},
{'pattern': 'Stream.objects.get',
'include_only': set(["zerver/views/"]),
'description': 'Please use access_stream_by_*() to fetch Stream objects',
},
{'pattern': 'get_stream[(]',
'include_only': set(["zerver/views/", "zerver/lib/actions.py"]),
'exclude_line': set([
# This one in check_message is kinda terrible, since it's
# how most instances are written, but better to exclude something than nothing
('zerver/lib/actions.py', 'stream = get_stream(stream_name, realm)'),
('zerver/lib/actions.py', 'get_stream(admin_realm_signup_notifications_stream, admin_realm)'),
# Here we need get_stream to access streams you've since unsubscribed from.
('zerver/views/messages.py', 'stream = get_stream(operand, self.user_profile.realm)'),
# Use stream_id to exclude mutes.
('zerver/views/messages.py', 'stream_id = get_stream(stream_name, user_profile.realm).id'),
]),
'description': 'Please use access_stream_by_*() to fetch Stream objects',
},
{'pattern': 'Stream.objects.filter',
'include_only': set(["zerver/views/"]),
'description': 'Please use access_stream_by_*() to fetch Stream objects',
},
{'pattern': '^from (zerver|analytics|confirmation)',
'include_only': set(["/migrations/"]),
'exclude': set([
'zerver/migrations/0032_verify_all_medium_avatar_images.py',
'zerver/migrations/0060_move_avatars_to_be_uid_based.py',
'zerver/migrations/0104_fix_unreads.py',
'zerver/migrations/0206_stream_rendered_description.py',
'pgroonga/migrations/0002_html_escape_subject.py',
]),
'description': "Don't import models or other code in migrations; see docs/subsystems/schema-migrations.md",
},
{'pattern': 'datetime[.](now|utcnow)',
'include_only': set(["zerver/", "analytics/"]),
'description': "Don't use datetime in backend code.\n"
"See https://zulip.readthedocs.io/en/latest/contributing/code-style.html
},
{'pattern': r'render_to_response\(',
'description': "Use render() instead of render_to_response().",
},
{'pattern': 'from os.path',
'description': "Don't use from when importing from the standard library",
},
{'pattern': 'import os.path',
'description': "Use import os instead of import os.path",
},
{'pattern': r'(logging|logger)\.warn\W',
'description': "Logger.warn is a deprecated alias for Logger.warning; Use 'warning' instead of 'warn'.",
'good_lines': ["logging.warning('I am a warning.')", "logger.warning('warning')"],
'bad_lines': ["logging.warn('I am a warning.')", "logger.warn('warning')"]},
{'pattern': r'\.pk',
'exclude_pattern': '[.]_meta[.]pk',
'description': "Use `id` instead of `pk`.",
'good_lines': ['if my_django_model.id == 42', 'self.user_profile._meta.pk'],
'bad_lines': ['if my_django_model.pk == 42']},
{'pattern': r'^[ ]*# type: \(',
'exclude': set([
# These directories, especially scripts/ and puppet/,
# have tools that need to run before a Zulip environment
# is provisioned; in some of those, the `typing` module
# might not be available yet, so care is required.
'scripts/',
'tools/',
'puppet/',
# Zerver files that we should just clean.
'zerver/tests',
'zerver/openapi/python_examples.py',
'zerver/lib/request.py',
'zerver/views/streams.py',
# thumbor is (currently) python2 only
'zthumbor/',
]),
'description': 'Comment-style function type annotation. Use Python3 style annotations instead.',
},
{'pattern': r' = models[.].*null=True.*\) # type: (?!Optional)',
'include_only': {"zerver/models.py"},
'description': 'Model variable with null=true not annotated as Optional.',
'good_lines': ['desc = models.TextField(null=True) # type: Optional[Text]',
'stream = models.ForeignKey(Stream, null=True, on_delete=CASCADE) # type: Optional[Stream]',
'desc = models.TextField() # type: Text',
'stream = models.ForeignKey(Stream, on_delete=CASCADE) # type: Stream'],
'bad_lines': ['desc = models.CharField(null=True) # type: Text',
'stream = models.ForeignKey(Stream, null=True, on_delete=CASCADE) # type: Stream'],
},
{'pattern': r' = models[.](?!NullBoolean).*\) # type: Optional', # Optional tag, except NullBoolean(Field)
'exclude_pattern': 'null=True',
'include_only': {"zerver/models.py"},
'description': 'Model variable annotated with Optional but variable does not have null=true.',
'good_lines': ['desc = models.TextField(null=True) # type: Optional[Text]',
'stream = models.ForeignKey(Stream, null=True, on_delete=CASCADE) # type: Optional[Stream]',
'desc = models.TextField() # type: Text',
'stream = models.ForeignKey(Stream, on_delete=CASCADE) # type: Stream'],
'bad_lines': ['desc = models.TextField() # type: Optional[Text]',
'stream = models.ForeignKey(Stream, on_delete=CASCADE) # type: Optional[Stream]'],
},
{'pattern': r'[\s([]Text([^\s\w]|$)',
'exclude': set([
# We are likely to want to keep these dirs Python 2+3 compatible,
# since the plan includes extracting them to a separate project eventually.
'tools/lib',
# TODO: Update our migrations from Text->str.
'zerver/migrations/',
# thumbor is (currently) python2 only
'zthumbor/',
]),
'description': "Now that we're a Python 3 only codebase, we don't need to use typing.Text. Please use str instead.",
},
{'pattern': 'exit[(]1[)]',
'include_only': set(["/management/commands/"]),
'description': 'Raise CommandError to exit with failure in management commands',
},
]) + whitespace_rules + comma_whitespace_rule,
max_length=110,
shebang_rules=shebang_rules,
)
bash_rules = RuleList(
langs=['bash'],
rules=cast(Rule, [
{'pattern': '#!.*sh [-xe]',
'description': 'Fix shebang line with proper call to /usr/bin/env for Bash path, change -x|-e switches'
' to set -x|set -e'},
{'pattern': 'sudo',
'description': 'Most scripts are intended to work on systems without sudo',
'include_only': set(['scripts/']),
'exclude': set([
'scripts/lib/install',
'scripts/setup/configure-rabbitmq'
]), },
]) + whitespace_rules[0:1],
shebang_rules=shebang_rules,
)
css_rules = RuleList(
langs=['css', 'scss'],
rules=cast(Rule, [
{'pattern': r'calc\([^+]+\+[^+]+\)',
'description': "Avoid using calc with '+' operator. See #8403 : in CSS.",
'good_lines': ["width: calc(20% - -14px);"],
'bad_lines': ["width: calc(20% + 14px);"]},
{'pattern': r'^[^:]*:\S[^:]*;$',
'description': "Missing whitespace after : in CSS",
'good_lines': ["background-color: white;", "text-size: 16px;"],
'bad_lines': ["background-color:white;", "text-size:16px;"]},
{'pattern': '[a-z]{',
'description': "Missing whitespace before '{' in CSS.",
'good_lines': ["input {", "body {"],
'bad_lines': ["input{", "body{"]},
{'pattern': 'https://',
'description': "Zulip CSS should have no dependencies on external resources",
'good_lines': ['background: url(/static/images/landing-page/pycon.jpg);'],
'bad_lines': ['background: url(https://example.com/image.png);']},
{'pattern': '^[ ][ ][a-zA-Z0-9]',
'description': "Incorrect 2-space indentation in CSS",
'strip': '\n',
'good_lines': [" color: white;", "color: white;"],
'bad_lines': [" color: white;"]},
{'pattern': r'{\w',
'description': "Missing whitespace after '{' in CSS (should be newline).",
'good_lines': ["{\n"],
'bad_lines': ["{color: LightGoldenRodYellow;"]},
{'pattern': ' thin[ ;]',
'description': "thin CSS attribute is under-specified, please use 1px.",
'good_lines': ["border-width: 1px;"],
'bad_lines': ["border-width: thin;", "border-width: thin solid black;"]},
{'pattern': ' medium[ ;]',
'description': "medium CSS attribute is under-specified, please use pixels.",
'good_lines': ["border-width: 3px;"],
'bad_lines': ["border-width: medium;", "border: medium solid black;"]},
{'pattern': ' thick[ ;]',
'description': "thick CSS attribute is under-specified, please use pixels.",
'good_lines': ["border-width: 5px;"],
'bad_lines': ["border-width: thick;", "border: thick solid black;"]},
{'pattern': r'rgba?\(',
'description': 'Use of rgb(a) format is banned, Please use hsl(a) instead',
'good_lines': ['hsl(0, 0%, 0%)', 'hsla(0, 0%, 100%, 0.1)'],
'bad_lines': ['rgb(0, 0, 0)', 'rgba(255, 255, 255, 0.1)']},
]) + whitespace_rules + comma_whitespace_rule
)
prose_style_rules = cast(Rule, [
{'pattern': r'[^\/\#\-"]([jJ]avascript)', # exclude usage in hrefs/divs
'exclude': set(["docs/documentation/api.md"]),
'description': "javascript should be spelled JavaScript"},
{'pattern': r'''[^\/\-\."'\_\=\>]([gG]ithub)[^\.\-\_"\<]''',
'description': "github should be spelled GitHub"},
{'pattern': '[oO]rganisation',
'description': "Organization is spelled with a z",
'exclude_line': [('docs/translating/french.md', '* organization - **organisation**')]},
{'pattern': '!!! warning',
'description': "!!! warning is invalid; it's spelled '!!! warn'"},
{'pattern': 'Terms of service',
'description': "The S in Terms of Service is capitalized"},
{'pattern': '[^-_]botserver(?!rc)|bot server',
'description': "Use Botserver instead of botserver or bot server."},
]) + comma_whitespace_rule
html_rules = whitespace_rules + prose_style_rules + cast(Rule, [
{'pattern': 'subject|SUBJECT',
'exclude': set(['templates/zerver/email.html']),
'exclude_pattern': 'email subject',
'description': 'avoid subject in templates',
'good_lines': ['topic_name'],
'bad_lines': ['subject="foo"', ' MAX_SUBJECT_LEN']},
{'pattern': r'placeholder="[^{#](?:(?!\.com).)+$',
'description': "`placeholder` value should be translatable.",
'exclude_line': [('templates/zerver/register.html', 'placeholder="acme"'),
('templates/zerver/register.html', 'placeholder="Acme or Aκμή"')],
'exclude': set(["templates/analytics/support.html"]),
'good_lines': ['<input class="stream-list-filter" type="text" placeholder="{{ _(\'Search streams\') }}" />'],
'bad_lines': ['<input placeholder="foo">']},
{'pattern': "placeholder='[^{]",
'description': "`placeholder` value should be translatable.",
'good_lines': ['<input class="stream-list-filter" type="text" placeholder="{{ _(\'Search streams\') }}" />'],
'bad_lines': ["<input placeholder='foo'>"]},
{'pattern': "aria-label='[^{]",
'description': "`aria-label` value should be translatable.",
'good_lines': ['<button type="button" class="close close-alert-word-status" aria-label="{{t \'Close\' }}">'],
'bad_lines': ["<button aria-label='foo'></button>"]},
{'pattern': 'aria-label="[^{]',
'description': "`aria-label` value should be translatable.",
'good_lines': ['<button type="button" class="close close-alert-word-status" aria-label="{{t \'Close\' }}">'],
'bad_lines': ['<button aria-label="foo"></button>']},
{'pattern': 'script src="http',
'description': "Don't directly load dependencies from CDNs. See docs/subsystems/front-end-build-process.md",
'exclude': set(["templates/corporate/billing.html", "templates/zerver/hello.html",
"templates/corporate/upgrade.html"]),
'good_lines': ["{{ render_bundle('landing-page') }}"],
'bad_lines': ['<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.2.1/jquery.min.js"></script>']},
{'pattern': "title='[^{]",
'description': "`title` value should be translatable.",
'good_lines': ['<link rel="author" title="{{ _(\'About these documents\') }}" />'],
'bad_lines': ["<p title='foo'></p>"]},
{'pattern': r'title="[^{\:]',
'exclude_line': set([
('templates/zerver/app/markdown_help.html',
'<td class="rendered_markdown"><img alt=":heart:" class="emoji" src="/static/generated/emoji/images/emoji/heart.png" title=":heart:" /></td>')
]),
'exclude': set(["templates/zerver/emails", "templates/analytics/support.html"]),
'description': "`title` value should be translatable."},
{'pattern': r'''\Walt=["'][^{"']''',
'description': "alt argument should be enclosed by _() or it should be an empty string.",
'exclude': set(['static/templates/settings/display_settings.hbs',
'templates/zerver/app/keyboard_shortcuts.html',
'templates/zerver/app/markdown_help.html']),
'good_lines': ['<img src="{{source_url}}" alt="{{ _(name) }}" />', '<img alg="" />'],
'bad_lines': ['<img alt="Foo Image" />']},
{'pattern': r'''\Walt=["']{{ ?["']''',
'description': "alt argument should be enclosed by _().",
'good_lines': ['<img src="{{source_url}}" alt="{{ _(name) }}" />'],
'bad_lines': ['<img alt="{{ " />']},
{'pattern': r'\bon\w+ ?=',
'description': "Don't use inline event handlers (onclick=, etc. attributes) in HTML. Instead,"
"attach a jQuery event handler ($('#foo').on('click', function () {...})) when "
"the DOM is ready (inside a $(function () {...}) block).",
'exclude': set(['templates/zerver/dev_login.html', 'templates/corporate/upgrade.html']),
'good_lines': ["($('#foo').on('click', function () {}"],
'bad_lines': ["<button id='foo' onclick='myFunction()'>Foo</button>", "<input onchange='myFunction()'>"]},
{'pattern': 'style ?=',
'description': "Avoid using the `style=` attribute; we prefer styling in CSS files",
'exclude_pattern': r'.*style ?=["' + "'" + '](display: ?none|background: {{|color: {{|background-color: {{).*',
'exclude': set([
# KaTeX output uses style attribute
'templates/zerver/app/markdown_help.html',
# 5xx page doesn't have external CSS
'static/html/5xx.html',
# Group PMs color is dynamically calculated
'static/templates/group_pms.hbs',
# exclude_pattern above handles color, but have other issues:
'static/templates/draft.hbs',
'static/templates/subscription.hbs',
'static/templates/single_message.hbs',
# Old-style email templates need to use inline style
# attributes; it should be possible to clean these up
# when we convert these templates to use premailer.
'templates/zerver/emails/email_base_messages.html',
# Email log templates; should clean up.
'templates/zerver/email.html',
'templates/zerver/email_log.html',
# Probably just needs to be changed to display: none so the exclude works
'templates/zerver/app/navbar.html',
# Needs the width cleaned up; display: none is fine
'static/templates/settings/account_settings.hbs',
# background image property is dynamically generated
'static/templates/user_profile_modal.hbs',
'static/templates/sidebar_private_message_list.hbs',
# Inline styling for an svg; could be moved to CSS files?
'templates/zerver/landing_nav.html',
'templates/zerver/billing_nav.html',
'templates/zerver/app/home.html',
'templates/zerver/features.html',
'templates/zerver/portico-header.html',
'templates/corporate/billing.html',
'templates/corporate/upgrade.html',
# Miscellaneous violations to be cleaned up
'static/templates/user_info_popover_title.hbs',
'static/templates/subscription_invites_warning_modal.hbs',
'templates/zerver/reset_confirm.html',
'templates/zerver/config_error.html',
'templates/zerver/dev_env_email_access_details.html',
'templates/zerver/confirm_continue_registration.html',
'templates/zerver/register.html',
'templates/zerver/accounts_send_confirm.html',
'templates/zerver/integrations/index.html',
'templates/zerver/documentation_main.html',
'templates/analytics/realm_summary_table.html',
'templates/corporate/zephyr.html',
'templates/corporate/zephyr-mirror.html',
]),
'good_lines': ['#my-style {color: blue;}', 'style="display: none"', "style='display: none"],
'bad_lines': ['<p style="color: blue;">Foo</p>', 'style = "color: blue;"']},
])
handlebars_rules = RuleList(
langs=['hbs'],
rules=html_rules + cast(Rule, [
{'pattern': "[<]script",
'description': "Do not use inline <script> tags here; put JavaScript in static/js instead."},
{'pattern': '{{ t ("|\')',
'description': 'There should be no spaces before the "t" in a translation tag.'},
{'pattern': r"{{t '.*' }}[\.\?!]",
'description': "Period should be part of the translatable string."},
{'pattern': r'{{t ".*" }}[\.\?!]',
'description': "Period should be part of the translatable string."},
{'pattern': r"{{/tr}}[\.\?!]",
'description': "Period should be part of the translatable string."},
{'pattern': '{{t ("|\') ',
'description': 'Translatable strings should not have leading spaces.'},
{'pattern': "{{t '[^']+ ' }}",
'description': 'Translatable strings should not have trailing spaces.'},
{'pattern': '{{t "[^"]+ " }}',
'description': 'Translatable strings should not have trailing spaces.'},
]),
)
jinja2_rules = RuleList(
langs=['html'],
rules=html_rules + cast(Rule, [
{'pattern': r"{% endtrans %}[\.\?!]",
'description': "Period should be part of the translatable string."},
{'pattern': r"{{ _(.+) }}[\.\?!]",
'description': "Period should be part of the translatable string."},
]),
)
json_rules = RuleList(
langs=['json'],
rules=cast(Rule, [
# whitespace rule flags a lot of third-party JSON fixtures
# under zerver/webhooks that we want preserved verbatim. So
# we just include the trailing whitespace rule and a modified
# version of the tab-based whitespace rule (we can't just use
trailing_whitespace_rule,
{'pattern': '\t',
'strip': '\n',
'exclude': set(['zerver/webhooks/']),
'description': 'Fix tab-based whitespace'},
{'pattern': r'":["\[\{]',
'exclude': set(['zerver/webhooks/', 'zerver/tests/fixtures/']),
'description': 'Require space after : in JSON'},
])
)
markdown_docs_length_exclude = {
"docs/development/setup-vagrant.md",
# Have wide output in code blocks
"docs/subsystems/logging.md",
"docs/subsystems/migration-renumbering.md",
# Have curl commands with JSON that would be messy to wrap
"zerver/webhooks/helloworld/doc.md",
"zerver/webhooks/trello/doc.md",
# Has a very long configuration line
"templates/zerver/integrations/perforce.md",
# Has some example code that could perhaps be wrapped
"templates/zerver/api/incoming-webhooks-walkthrough.md",
# This macro has a long indented URL
"templates/zerver/help/include/git-webhook-url-with-branches-indented.md",
# These two are the same file and have some too-long lines for GitHub badges
"README.md",
"docs/overview/readme.md",
}
markdown_rules = RuleList(
langs=['md'],
rules=markdown_whitespace_rules + prose_style_rules + cast(Rule, [
{'pattern': r'\[(?P<url>[^\]]+)\]\((?P=url)\)',
'description': 'Linkified markdown URLs should use cleaner <http://example.com> syntax.'},
{'pattern': 'https://zulip.readthedocs.io/en/latest/[a-zA-Z0-9]',
'exclude': ['docs/overview/contributing.md', 'docs/overview/readme.md', 'docs/README.md'],
'include_only': set(['docs/']),
'description': "Use relative links (../foo/bar.html) to other documents in docs/",
},
{'pattern': "su zulip -c [^']",
'include_only': set(['docs/']),
'description': "Always quote arguments using `su zulip -c '` to avoid confusion about how su works.",
},
{'pattern': r'\][(][^
'include_only': set(['README.md', 'CONTRIBUTING.md']),
'description': "Use absolute links from docs served by GitHub",
},
]),
max_length=120,
length_exclude=markdown_docs_length_exclude,
exclude_files_in='templates/zerver/help/'
)
help_markdown_rules = RuleList(
langs=['md'],
rules=markdown_rules.rules + cast(Rule, [
{'pattern': '[a-z][.][A-Z]',
'description': "Likely missing space after end of sentence",
'include_only': set(['templates/zerver/help/']),
},
{'pattern': r'\b[rR]ealm[s]?\b',
'include_only': set(['templates/zerver/help/']),
'good_lines': ['Organization', 'deactivate_realm', 'realm_filter'],
'bad_lines': ['Users are in a realm', 'Realm is the best model'],
'description': "Realms are referred to as Organizations in user-facing docs."},
]),
length_exclude=markdown_docs_length_exclude,
)
txt_rules = RuleList(
langs=['txt', 'text', 'yaml', 'rst'],
rules=whitespace_rules,
)
non_py_rules = [
handlebars_rules,
jinja2_rules,
css_rules,
js_rules,
json_rules,
markdown_rules,
help_markdown_rules,
bash_rules,
txt_rules,
]
| true | true |
1c2fba5a7b75cab145ff9a67266c2c28d6b40e9d | 5,333 | py | Python | nlpaug/model/lang_models/language_models.py | So-AI-love/nlpaug | 3aff5754609cb6bf092709d9af2089ccd55ffc93 | [
"MIT"
] | null | null | null | nlpaug/model/lang_models/language_models.py | So-AI-love/nlpaug | 3aff5754609cb6bf092709d9af2089ccd55ffc93 | [
"MIT"
] | null | null | null | nlpaug/model/lang_models/language_models.py | So-AI-love/nlpaug | 3aff5754609cb6bf092709d9af2089ccd55ffc93 | [
"MIT"
] | null | null | null | try:
import torch
import torch.nn.functional as F
except ImportError:
# No installation required if not using this function
pass
import numpy as np
import string
import nlpaug.util.selection.filtering as filtering
class LanguageModels:
OPTIMIZE_ATTRIBUTES = ['external_memory', 'return_proba']
def __init__(self, device='cpu', temperature=1.0, top_k=100, top_p=0.01, optimize=None, silence=True):
try:
import torch
except ModuleNotFoundError:
raise ModuleNotFoundError('Missed torch library. Install torch by following https://pytorch.org/get-started/locally/`')
# self.device = 'cuda' if device is None and torch.cuda.is_available() else 'cpu'
self.device = device if device else 'cpu'
self.temperature = temperature
self.top_k = top_k
self.top_p = top_p
self.optimize = self.init_optimize(optimize)
self.silence = silence
@classmethod
def get_default_optimize_config(cls):
return {
'external_memory': 1024, # GPT2 needs either zero or non-zero. XLNet needs number of extra memory tokens.
'return_proba': False
}
def init_optimize(self, optimize):
_optimize = self.get_default_optimize_config()
if optimize is None:
return _optimize
for attr in self.OPTIMIZE_ATTRIBUTES:
if attr in optimize:
_optimize[attr] = optimize[attr]
return _optimize
def clean(self, text):
return text.strip()
def predict(self, text, target_word=None, n=1):
raise NotImplementedError
@classmethod
def control_randomness(cls, logits, seed):
temperature = seed['temperature']
if temperature is not None:
return logits / temperature
return logits
def filtering(self, logits, seed):
top_k = seed['top_k']
top_p = seed['top_p']
check_top_k = False
check_top_p = False
if top_k is not None and 0 < top_k < len(logits):
logits, idxes = filtering.filter_top_k(logits, top_k, replace=-float('Inf'))
check_top_k = True
if top_p is not None and 0 < top_p < 1:
logits, idxes = filtering.nucleus_sampling(logits, top_p)
check_top_p = True
# If top_p is not None, value will be sorted, so no need to select it again
if not check_top_p:
if check_top_k:
logits = logits.index_select(0, idxes)
# TODO: Externalize to util for checking
if 'cuda' in self.device:
idxes = idxes.cpu()
idxes = idxes.detach().numpy().tolist()
else:
idxes = np.arange(len(logits)).tolist()
else:
logits = logits[:len(idxes)]
# TODO: Externalize to util for checking
if 'cuda' in self.device:
idxes = idxes.cpu()
idxes = idxes.detach().numpy().tolist()
return logits, idxes
def pick(self, logits, idxes, target_word, n=1, include_punctuation=False):
candidate_ids, candidate_probas = self.prob_multinomial(logits, n=n*10)
candidate_ids = [idxes[candidate_id] for candidate_id in candidate_ids]
results = self.get_candidiates(candidate_ids, candidate_probas, target_word, n,
include_punctuation)
return results
def id2token(self, _id):
raise NotImplementedError()
def prob_multinomial(self, logits, n):
# Convert to probability
probas = F.softmax(logits, dim=-1)
# Draw candidates
num_sample = min(n, torch.nonzero(probas, as_tuple=False).size(0)) # Number of potential candidate is small when top_k/ top_p are used.
filtered_top_n_ids = torch.multinomial(probas, num_samples=num_sample, replacement=False).tolist()
if self.optimize['return_proba']:
top_n_probas = [probas[_id] for _id in filtered_top_n_ids]
return filtered_top_n_ids, top_n_probas
return filtered_top_n_ids, None
def is_skip_candidate(self, candidate):
return False
def get_candidiates(self, candidate_ids, candidate_probas, target_word=None, n=1,
include_punctuation=False):
# To have random behavior, NO sorting for candidate_probas.
results = []
if candidate_probas is None:
candidate_probas = [0] * len(candidate_ids)
for candidate_id, candidate_proba in zip(candidate_ids, candidate_probas):
candidate_word = self.id2token(candidate_id)
# unable to predict word
if candidate_word in ['', self.UNKNOWN_TOKEN, self.SUBWORD_PREFIX] or 'unused' in candidate_word:
continue
# predicted same word
if target_word is not None and candidate_word.lower() == target_word.lower():
continue
# stop word
if self.is_skip_candidate(candidate_word):
continue
# punctuation
if not include_punctuation and candidate_word in string.punctuation:
continue
results.append((candidate_word, candidate_proba))
if len(results) >= n:
break
return results
| 35.317881 | 144 | 0.624602 | try:
import torch
import torch.nn.functional as F
except ImportError:
pass
import numpy as np
import string
import nlpaug.util.selection.filtering as filtering
class LanguageModels:
OPTIMIZE_ATTRIBUTES = ['external_memory', 'return_proba']
def __init__(self, device='cpu', temperature=1.0, top_k=100, top_p=0.01, optimize=None, silence=True):
try:
import torch
except ModuleNotFoundError:
raise ModuleNotFoundError('Missed torch library. Install torch by following https://pytorch.org/get-started/locally/`')
self.device = device if device else 'cpu'
self.temperature = temperature
self.top_k = top_k
self.top_p = top_p
self.optimize = self.init_optimize(optimize)
self.silence = silence
@classmethod
def get_default_optimize_config(cls):
return {
'external_memory': 1024,
'return_proba': False
}
def init_optimize(self, optimize):
_optimize = self.get_default_optimize_config()
if optimize is None:
return _optimize
for attr in self.OPTIMIZE_ATTRIBUTES:
if attr in optimize:
_optimize[attr] = optimize[attr]
return _optimize
def clean(self, text):
return text.strip()
def predict(self, text, target_word=None, n=1):
raise NotImplementedError
@classmethod
def control_randomness(cls, logits, seed):
temperature = seed['temperature']
if temperature is not None:
return logits / temperature
return logits
def filtering(self, logits, seed):
top_k = seed['top_k']
top_p = seed['top_p']
check_top_k = False
check_top_p = False
if top_k is not None and 0 < top_k < len(logits):
logits, idxes = filtering.filter_top_k(logits, top_k, replace=-float('Inf'))
check_top_k = True
if top_p is not None and 0 < top_p < 1:
logits, idxes = filtering.nucleus_sampling(logits, top_p)
check_top_p = True
if not check_top_p:
if check_top_k:
logits = logits.index_select(0, idxes)
if 'cuda' in self.device:
idxes = idxes.cpu()
idxes = idxes.detach().numpy().tolist()
else:
idxes = np.arange(len(logits)).tolist()
else:
logits = logits[:len(idxes)]
if 'cuda' in self.device:
idxes = idxes.cpu()
idxes = idxes.detach().numpy().tolist()
return logits, idxes
def pick(self, logits, idxes, target_word, n=1, include_punctuation=False):
candidate_ids, candidate_probas = self.prob_multinomial(logits, n=n*10)
candidate_ids = [idxes[candidate_id] for candidate_id in candidate_ids]
results = self.get_candidiates(candidate_ids, candidate_probas, target_word, n,
include_punctuation)
return results
def id2token(self, _id):
raise NotImplementedError()
def prob_multinomial(self, logits, n):
probas = F.softmax(logits, dim=-1)
num_sample = min(n, torch.nonzero(probas, as_tuple=False).size(0))
filtered_top_n_ids = torch.multinomial(probas, num_samples=num_sample, replacement=False).tolist()
if self.optimize['return_proba']:
top_n_probas = [probas[_id] for _id in filtered_top_n_ids]
return filtered_top_n_ids, top_n_probas
return filtered_top_n_ids, None
def is_skip_candidate(self, candidate):
return False
def get_candidiates(self, candidate_ids, candidate_probas, target_word=None, n=1,
include_punctuation=False):
results = []
if candidate_probas is None:
candidate_probas = [0] * len(candidate_ids)
for candidate_id, candidate_proba in zip(candidate_ids, candidate_probas):
candidate_word = self.id2token(candidate_id)
if candidate_word in ['', self.UNKNOWN_TOKEN, self.SUBWORD_PREFIX] or 'unused' in candidate_word:
continue
if target_word is not None and candidate_word.lower() == target_word.lower():
continue
if self.is_skip_candidate(candidate_word):
continue
if not include_punctuation and candidate_word in string.punctuation:
continue
results.append((candidate_word, candidate_proba))
if len(results) >= n:
break
return results
| true | true |
1c2fbae5ecfebac86fe0e32e8e1486af950d1fe0 | 2,988 | py | Python | models/others/lsa.py | dahyun-kang/renet | 43a4e5af96b56c99a0cd63e35bd272db72f7f3a4 | [
"MIT"
] | 50 | 2021-08-18T23:41:16.000Z | 2022-03-09T03:08:40.000Z | models/others/lsa.py | ChiaraDom18/renet | b58ebc092fcdb40e7f534f6407512df4f109cacd | [
"MIT"
] | 6 | 2021-08-31T11:55:36.000Z | 2022-02-10T02:16:27.000Z | models/others/lsa.py | ChiaraDom18/renet | b58ebc092fcdb40e7f534f6407512df4f109cacd | [
"MIT"
] | 11 | 2021-08-30T08:36:36.000Z | 2022-03-22T07:21:45.000Z | """ code references: https://github.com/leaderj1001/Stand-Alone-Self-Attention """
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
class LocalSelfAttention(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, groups=1, bias=False):
super(LocalSelfAttention, self).__init__()
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.groups = groups
assert self.out_channels % self.groups == 0, "out_channels should be divided by groups. (example: out_channels: 40, groups: 4)"
self.rel_h = nn.Parameter(torch.randn(out_channels // 2, 1, 1, kernel_size, 1), requires_grad=True)
self.rel_w = nn.Parameter(torch.randn(out_channels // 2, 1, 1, 1, kernel_size), requires_grad=True)
self.key_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=bias)
self.query_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=bias)
self.value_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=bias)
self.agg = nn.Sequential(
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, kernel_size=1, bias=False),
nn.BatchNorm2d(out_channels))
self.reset_parameters()
def forward(self, x):
batch, channels, height, width = x.size()
padded_x = F.pad(x, [self.padding, self.padding, self.padding, self.padding])
q_out = self.query_conv(x)
k_out = self.key_conv(padded_x)
v_out = self.value_conv(padded_x)
k_out = k_out.unfold(2, self.kernel_size, self.stride).unfold(3, self.kernel_size, self.stride)
v_out = v_out.unfold(2, self.kernel_size, self.stride).unfold(3, self.kernel_size, self.stride)
k_out_h, k_out_w = k_out.split(self.out_channels // 2, dim=1)
k_out = torch.cat((k_out_h + self.rel_h, k_out_w + self.rel_w), dim=1)
k_out = k_out.contiguous().view(batch, self.groups, self.out_channels // self.groups, height, width, -1)
v_out = v_out.contiguous().view(batch, self.groups, self.out_channels // self.groups, height, width, -1)
q_out = q_out.view(batch, self.groups, self.out_channels // self.groups, height, width, 1)
out = q_out * k_out
out = F.softmax(out, dim=-1)
out = torch.einsum('bnchwk,bnchwk -> bnchw', out, v_out).view(batch, -1, height, width)
out = self.agg(out)
return out
def reset_parameters(self):
init.kaiming_normal_(self.key_conv.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.value_conv.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.query_conv.weight, mode='fan_out', nonlinearity='relu')
init.normal_(self.rel_h, 0, 1)
init.normal_(self.rel_w, 0, 1)
| 43.941176 | 135 | 0.66834 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
class LocalSelfAttention(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, groups=1, bias=False):
super(LocalSelfAttention, self).__init__()
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.groups = groups
assert self.out_channels % self.groups == 0, "out_channels should be divided by groups. (example: out_channels: 40, groups: 4)"
self.rel_h = nn.Parameter(torch.randn(out_channels // 2, 1, 1, kernel_size, 1), requires_grad=True)
self.rel_w = nn.Parameter(torch.randn(out_channels // 2, 1, 1, 1, kernel_size), requires_grad=True)
self.key_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=bias)
self.query_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=bias)
self.value_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=bias)
self.agg = nn.Sequential(
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, kernel_size=1, bias=False),
nn.BatchNorm2d(out_channels))
self.reset_parameters()
def forward(self, x):
batch, channels, height, width = x.size()
padded_x = F.pad(x, [self.padding, self.padding, self.padding, self.padding])
q_out = self.query_conv(x)
k_out = self.key_conv(padded_x)
v_out = self.value_conv(padded_x)
k_out = k_out.unfold(2, self.kernel_size, self.stride).unfold(3, self.kernel_size, self.stride)
v_out = v_out.unfold(2, self.kernel_size, self.stride).unfold(3, self.kernel_size, self.stride)
k_out_h, k_out_w = k_out.split(self.out_channels // 2, dim=1)
k_out = torch.cat((k_out_h + self.rel_h, k_out_w + self.rel_w), dim=1)
k_out = k_out.contiguous().view(batch, self.groups, self.out_channels // self.groups, height, width, -1)
v_out = v_out.contiguous().view(batch, self.groups, self.out_channels // self.groups, height, width, -1)
q_out = q_out.view(batch, self.groups, self.out_channels // self.groups, height, width, 1)
out = q_out * k_out
out = F.softmax(out, dim=-1)
out = torch.einsum('bnchwk,bnchwk -> bnchw', out, v_out).view(batch, -1, height, width)
out = self.agg(out)
return out
def reset_parameters(self):
init.kaiming_normal_(self.key_conv.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.value_conv.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.query_conv.weight, mode='fan_out', nonlinearity='relu')
init.normal_(self.rel_h, 0, 1)
init.normal_(self.rel_w, 0, 1)
| true | true |
1c2fbb148908d1bc8211429d7a1d2e851c65fc4a | 2,081 | py | Python | autosuspend_mcstatus/activity.py | nikp123/autosuspend-mcstatus | 0fec99e9ee6fa961bfafdd677684290495fcdcbf | [
"MIT"
] | null | null | null | autosuspend_mcstatus/activity.py | nikp123/autosuspend-mcstatus | 0fec99e9ee6fa961bfafdd677684290495fcdcbf | [
"MIT"
] | null | null | null | autosuspend_mcstatus/activity.py | nikp123/autosuspend-mcstatus | 0fec99e9ee6fa961bfafdd677684290495fcdcbf | [
"MIT"
] | null | null | null | from typing import *
import configparser
import socket
from mcstatus import MinecraftServer
from autosuspend.checks import Activity, ConfigurationError
from .util import MCStatusMixin
class ServerOnline(Activity, MCStatusMixin):
@classmethod
def create(cls, name: str, config: configparser.SectionProxy) -> "ServerOnline":
return cls(name, **cls.collect_init_args(config))
def __init__(self, name: str, **kwargs) -> None:
MCStatusMixin.__init__(self, **kwargs)
Activity.__init__(self, name)
def check(self) -> Optional[str]:
self.logger.debug("Sending SLP to {}".format(self._address))
try:
self._server.ping(tries=self._retries)
return "Server is online"
except socket.timeout as error:
pass
except ConnectionError as error:
pass
class PlayersOnline(Activity, MCStatusMixin):
@classmethod
def create(cls, name: str, config: configparser.SectionProxy) -> "PlayersOnline":
try:
treshold = config.getint("treshold", fallback=0)
except ValueError as error:
raise ConfigurationError("Treshold must be integer") from error
return cls(name, treshold, **cls.collect_init_args(config))
def __init__(self, name: str, treshold: int, **kwargs) -> None:
MCStatusMixin.__init__(self, **kwargs)
Activity.__init__(self, name)
self._treshold = treshold
def check(self) -> Optional[str]:
self.logger.debug("Sending SLP to {}".format(self._address))
try:
status = self._server.status(tries=self._retries)
if status.players.online > self._treshold:
return "{} players online on {}".format(
status.players.online,
self._address
)
except socket.timeout as error:
self.logger.warning("SLP timed out, server is probably down")
except ConnectionError as error:
self.logger.warning("Connection error: {}".format(error))
| 35.87931 | 85 | 0.635272 | from typing import *
import configparser
import socket
from mcstatus import MinecraftServer
from autosuspend.checks import Activity, ConfigurationError
from .util import MCStatusMixin
class ServerOnline(Activity, MCStatusMixin):
@classmethod
def create(cls, name: str, config: configparser.SectionProxy) -> "ServerOnline":
return cls(name, **cls.collect_init_args(config))
def __init__(self, name: str, **kwargs) -> None:
MCStatusMixin.__init__(self, **kwargs)
Activity.__init__(self, name)
def check(self) -> Optional[str]:
self.logger.debug("Sending SLP to {}".format(self._address))
try:
self._server.ping(tries=self._retries)
return "Server is online"
except socket.timeout as error:
pass
except ConnectionError as error:
pass
class PlayersOnline(Activity, MCStatusMixin):
@classmethod
def create(cls, name: str, config: configparser.SectionProxy) -> "PlayersOnline":
try:
treshold = config.getint("treshold", fallback=0)
except ValueError as error:
raise ConfigurationError("Treshold must be integer") from error
return cls(name, treshold, **cls.collect_init_args(config))
def __init__(self, name: str, treshold: int, **kwargs) -> None:
MCStatusMixin.__init__(self, **kwargs)
Activity.__init__(self, name)
self._treshold = treshold
def check(self) -> Optional[str]:
self.logger.debug("Sending SLP to {}".format(self._address))
try:
status = self._server.status(tries=self._retries)
if status.players.online > self._treshold:
return "{} players online on {}".format(
status.players.online,
self._address
)
except socket.timeout as error:
self.logger.warning("SLP timed out, server is probably down")
except ConnectionError as error:
self.logger.warning("Connection error: {}".format(error))
| true | true |
1c2fbb6630d7d1f4de8a8b6f7db503ab9258f7c7 | 1,544 | py | Python | models/layers/linears.py | MachineWei/ChineseNer | fae4dfb0498c2f1f7dfafee70fa47c935266bfaf | [
"MIT"
] | 1 | 2021-08-28T11:45:18.000Z | 2021-08-28T11:45:18.000Z | models/layers/linears.py | MachineWei/bert-crf-for-ner | fae4dfb0498c2f1f7dfafee70fa47c935266bfaf | [
"MIT"
] | null | null | null | models/layers/linears.py | MachineWei/bert-crf-for-ner | fae4dfb0498c2f1f7dfafee70fa47c935266bfaf | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
# from torch.modeling_utils import PoolerStartLogits, PoolerEndLogits
class FeedForwardNetwork(nn.Module):
def __init__(self, input_size, hidden_size, output_size, dropout_rate=0):
super(FeedForwardNetwork, self).__init__()
self.dropout_rate = dropout_rate
self.linear1 = nn.Linear(input_size, hidden_size)
self.linear2 = nn.Linear(hidden_size, output_size)
def forward(self, x):
x_proj = F.dropout(F.relu(self.linear1(x)), p=self.dropout_rate, training=self.training)
x_proj = self.linear2(x_proj)
return x_proj
class PoolerStartLogits(nn.Module):
def __init__(self, hidden_size, num_classes):
super(PoolerStartLogits, self).__init__()
self.dense = nn.Linear(hidden_size, num_classes)
def forward(self, hidden_states, p_mask=None):
x = self.dense(hidden_states)
return x
class PoolerEndLogits(nn.Module):
def __init__(self, hidden_size, num_classes):
super(PoolerEndLogits, self).__init__()
self.dense_0 = nn.Linear(hidden_size, hidden_size)
self.activation = nn.Tanh()
self.LayerNorm = nn.LayerNorm(hidden_size)
self.dense_1 = nn.Linear(hidden_size, num_classes)
def forward(self, hidden_states, start_positions=None, p_mask=None):
x = self.dense_0(torch.cat([hidden_states, start_positions], dim=-1))
x = self.activation(x)
x = self.LayerNorm(x)
x = self.dense_1(x)
return x
| 36.761905 | 96 | 0.69171 | import torch
import torch.nn as nn
import torch.nn.functional as F
class FeedForwardNetwork(nn.Module):
def __init__(self, input_size, hidden_size, output_size, dropout_rate=0):
super(FeedForwardNetwork, self).__init__()
self.dropout_rate = dropout_rate
self.linear1 = nn.Linear(input_size, hidden_size)
self.linear2 = nn.Linear(hidden_size, output_size)
def forward(self, x):
x_proj = F.dropout(F.relu(self.linear1(x)), p=self.dropout_rate, training=self.training)
x_proj = self.linear2(x_proj)
return x_proj
class PoolerStartLogits(nn.Module):
def __init__(self, hidden_size, num_classes):
super(PoolerStartLogits, self).__init__()
self.dense = nn.Linear(hidden_size, num_classes)
def forward(self, hidden_states, p_mask=None):
x = self.dense(hidden_states)
return x
class PoolerEndLogits(nn.Module):
def __init__(self, hidden_size, num_classes):
super(PoolerEndLogits, self).__init__()
self.dense_0 = nn.Linear(hidden_size, hidden_size)
self.activation = nn.Tanh()
self.LayerNorm = nn.LayerNorm(hidden_size)
self.dense_1 = nn.Linear(hidden_size, num_classes)
def forward(self, hidden_states, start_positions=None, p_mask=None):
x = self.dense_0(torch.cat([hidden_states, start_positions], dim=-1))
x = self.activation(x)
x = self.LayerNorm(x)
x = self.dense_1(x)
return x
| true | true |
1c2fbc32946418c6d20fc9cc650f8583cf072fe2 | 536 | py | Python | src/thenewboston/transactions/validation.py | achalpatel/thenewboston-python | 4044ce07cb5e0d1f92b4332bbd8c6ac8f33bcdb9 | [
"MIT"
] | 122 | 2020-07-12T23:08:49.000Z | 2021-12-18T16:14:10.000Z | src/thenewboston/transactions/validation.py | achalpatel/thenewboston-python | 4044ce07cb5e0d1f92b4332bbd8c6ac8f33bcdb9 | [
"MIT"
] | 47 | 2020-07-15T02:18:09.000Z | 2021-09-22T19:51:59.000Z | src/thenewboston/transactions/validation.py | achalpatel/thenewboston-python | 4044ce07cb5e0d1f92b4332bbd8c6ac8f33bcdb9 | [
"MIT"
] | 52 | 2020-07-13T10:49:52.000Z | 2021-10-30T03:34:55.000Z | def validate_transaction_exists(*, amount, fee, error, recipient, txs):
"""Check for the existence of a Tx"""
tx = next(
(
tx for tx in txs if
tx.get('amount') >= amount
and tx.get('fee') == fee
and tx.get('recipient') == recipient
),
None
)
if not tx:
raise error({
'error_message': 'Tx not found',
'expected_amount': amount,
'expected_fee': fee,
'expected_recipient': recipient
})
| 26.8 | 71 | 0.494403 | def validate_transaction_exists(*, amount, fee, error, recipient, txs):
tx = next(
(
tx for tx in txs if
tx.get('amount') >= amount
and tx.get('fee') == fee
and tx.get('recipient') == recipient
),
None
)
if not tx:
raise error({
'error_message': 'Tx not found',
'expected_amount': amount,
'expected_fee': fee,
'expected_recipient': recipient
})
| true | true |
1c2fbd6abc51e920a13f546a06ef5107166764c0 | 21,025 | py | Python | PC/utils/scheduler.py | StanLei52/GEBD | 5f7e722e0384f9877c75d116e1db72400d2bc58f | [
"MIT"
] | 44 | 2021-03-24T07:10:57.000Z | 2022-03-12T11:49:14.000Z | PC/utils/scheduler.py | StanLei52/GEBD | 5f7e722e0384f9877c75d116e1db72400d2bc58f | [
"MIT"
] | 2 | 2021-05-26T09:31:55.000Z | 2021-08-11T11:47:38.000Z | PC/utils/scheduler.py | StanLei52/GEBD | 5f7e722e0384f9877c75d116e1db72400d2bc58f | [
"MIT"
] | 6 | 2021-04-07T00:51:51.000Z | 2022-01-12T01:54:41.000Z | from typing import Dict, Any
import torch
import math
import logging
import numpy as np
_logger = logging.getLogger(__name__)
class Scheduler:
""" Parameter Scheduler Base Class
A scheduler base class that can be used to schedule any optimizer parameter groups.
Unlike the builtin PyTorch schedulers, this is intended to be consistently called
* At the END of each epoch, before incrementing the epoch count, to calculate next epoch's value
* At the END of each optimizer update, after incrementing the update count, to calculate next update's value
The schedulers built on this should try to remain as stateless as possible (for simplicity).
This family of schedulers is attempting to avoid the confusion of the meaning of 'last_epoch'
and -1 values for special behaviour. All epoch and update counts must be tracked in the training
code and explicitly passed in to the schedulers on the corresponding step or step_update call.
Based on ideas from:
* https://github.com/pytorch/fairseq/tree/master/fairseq/optim/lr_scheduler
* https://github.com/allenai/allennlp/tree/master/allennlp/training/learning_rate_schedulers
"""
def __init__(self,
optimizer: torch.optim.Optimizer,
param_group_field: str,
noise_range_t=None,
noise_type='normal',
noise_pct=0.67,
noise_std=1.0,
noise_seed=None,
initialize: bool = True) -> None:
self.optimizer = optimizer
self.param_group_field = param_group_field
self._initial_param_group_field = f"initial_{param_group_field}"
if initialize:
for i, group in enumerate(self.optimizer.param_groups):
if param_group_field not in group:
raise KeyError(f"{param_group_field} missing from param_groups[{i}]")
group.setdefault(self._initial_param_group_field, group[param_group_field])
else:
for i, group in enumerate(self.optimizer.param_groups):
if self._initial_param_group_field not in group:
raise KeyError(f"{self._initial_param_group_field} missing from param_groups[{i}]")
self.base_values = [group[self._initial_param_group_field] for group in self.optimizer.param_groups]
self.metric = None # any point to having this for all?
self.noise_range_t = noise_range_t
self.noise_pct = noise_pct
self.noise_type = noise_type
self.noise_std = noise_std
self.noise_seed = noise_seed if noise_seed is not None else 42
self.update_groups(self.base_values)
def state_dict(self) -> Dict[str, Any]:
return {key: value for key, value in self.__dict__.items() if key != 'optimizer'}
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
self.__dict__.update(state_dict)
def get_epoch_values(self, epoch: int):
return None
def get_update_values(self, num_updates: int):
return None
def step(self, epoch: int, metric: float = None) -> None:
self.metric = metric
values = self.get_epoch_values(epoch)
if values is not None:
values = self._add_noise(values, epoch)
self.update_groups(values)
def step_update(self, num_updates: int, metric: float = None):
self.metric = metric
values = self.get_update_values(num_updates)
if values is not None:
values = self._add_noise(values, num_updates)
self.update_groups(values)
def update_groups(self, values):
if not isinstance(values, (list, tuple)):
values = [values] * len(self.optimizer.param_groups)
for param_group, value in zip(self.optimizer.param_groups, values):
param_group[self.param_group_field] = value
def _add_noise(self, lrs, t):
if self.noise_range_t is not None:
if isinstance(self.noise_range_t, (list, tuple)):
apply_noise = self.noise_range_t[0] <= t < self.noise_range_t[1]
else:
apply_noise = t >= self.noise_range_t
if apply_noise:
g = torch.Generator()
g.manual_seed(self.noise_seed + t)
if self.noise_type == 'normal':
while True:
# resample if noise out of percent limit, brute force but shouldn't spin much
noise = torch.randn(1, generator=g).item()
if abs(noise) < self.noise_pct:
break
else:
noise = 2 * (torch.rand(1, generator=g).item() - 0.5) * self.noise_pct
lrs = [v + v * noise for v in lrs]
return lrs
class CosineLRScheduler(Scheduler):
"""
Cosine decay with restarts.
This is described in the paper https://arxiv.org/abs/1608.03983.
Inspiration from
https://github.com/allenai/allennlp/blob/master/allennlp/training/learning_rate_schedulers/cosine.py
"""
def __init__(self,
optimizer: torch.optim.Optimizer,
t_initial: int,
t_mul: float = 1.,
lr_min: float = 0.,
decay_rate: float = 1.,
warmup_t=0,
warmup_lr_init=0,
warmup_prefix=False,
cycle_limit=0,
t_in_epochs=True,
noise_range_t=None,
noise_pct=0.67,
noise_std=1.0,
noise_seed=42,
initialize=True) -> None:
super().__init__(
optimizer, param_group_field="lr",
noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed,
initialize=initialize)
assert t_initial > 0
assert lr_min >= 0
if t_initial == 1 and t_mul == 1 and decay_rate == 1:
_logger.warning("Cosine annealing scheduler will have no effect on the learning "
"rate since t_initial = t_mul = eta_mul = 1.")
self.t_initial = t_initial
self.t_mul = t_mul
self.lr_min = lr_min
self.decay_rate = decay_rate
self.cycle_limit = cycle_limit
self.warmup_t = warmup_t
self.warmup_lr_init = warmup_lr_init
self.warmup_prefix = warmup_prefix
self.t_in_epochs = t_in_epochs
if self.warmup_t:
self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values]
super().update_groups(self.warmup_lr_init)
else:
self.warmup_steps = [1 for _ in self.base_values]
def _get_lr(self, t):
if t < self.warmup_t:
lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps]
else:
if self.warmup_prefix:
t = t - self.warmup_t
if self.t_mul != 1:
i = math.floor(math.log(1 - t / self.t_initial * (1 - self.t_mul), self.t_mul))
t_i = self.t_mul ** i * self.t_initial
t_curr = t - (1 - self.t_mul ** i) / (1 - self.t_mul) * self.t_initial
else:
i = t // self.t_initial
t_i = self.t_initial
t_curr = t - (self.t_initial * i)
gamma = self.decay_rate ** i
lr_min = self.lr_min * gamma
lr_max_values = [v * gamma for v in self.base_values]
if self.cycle_limit == 0 or (self.cycle_limit > 0 and i < self.cycle_limit):
lrs = [
lr_min + 0.5 * (lr_max - lr_min) * (1 + math.cos(math.pi * t_curr / t_i)) for lr_max in lr_max_values
]
else:
lrs = [self.lr_min for _ in self.base_values]
return lrs
def get_epoch_values(self, epoch: int):
if self.t_in_epochs:
return self._get_lr(epoch)
else:
return None
def get_update_values(self, num_updates: int):
if not self.t_in_epochs:
return self._get_lr(num_updates)
else:
return None
def get_cycle_length(self, cycles=0):
if not cycles:
cycles = self.cycle_limit
cycles = max(1, cycles)
if self.t_mul == 1.0:
return self.t_initial * cycles
else:
return int(math.floor(-self.t_initial * (self.t_mul ** cycles - 1) / (1 - self.t_mul)))
class TanhLRScheduler(Scheduler):
"""
Hyberbolic-Tangent decay with restarts.
This is described in the paper https://arxiv.org/abs/1806.01593
"""
def __init__(self,
optimizer: torch.optim.Optimizer,
t_initial: int,
lb: float = -6.,
ub: float = 4.,
t_mul: float = 1.,
lr_min: float = 0.,
decay_rate: float = 1.,
warmup_t=0,
warmup_lr_init=0,
warmup_prefix=False,
cycle_limit=0,
t_in_epochs=True,
noise_range_t=None,
noise_pct=0.67,
noise_std=1.0,
noise_seed=42,
initialize=True) -> None:
super().__init__(
optimizer, param_group_field="lr",
noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed,
initialize=initialize)
assert t_initial > 0
assert lr_min >= 0
assert lb < ub
assert cycle_limit >= 0
assert warmup_t >= 0
assert warmup_lr_init >= 0
self.lb = lb
self.ub = ub
self.t_initial = t_initial
self.t_mul = t_mul
self.lr_min = lr_min
self.decay_rate = decay_rate
self.cycle_limit = cycle_limit
self.warmup_t = warmup_t
self.warmup_lr_init = warmup_lr_init
self.warmup_prefix = warmup_prefix
self.t_in_epochs = t_in_epochs
if self.warmup_t:
t_v = self.base_values if self.warmup_prefix else self._get_lr(self.warmup_t)
self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in t_v]
super().update_groups(self.warmup_lr_init)
else:
self.warmup_steps = [1 for _ in self.base_values]
def _get_lr(self, t):
if t < self.warmup_t:
lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps]
else:
if self.warmup_prefix:
t = t - self.warmup_t
if self.t_mul != 1:
i = math.floor(math.log(1 - t / self.t_initial * (1 - self.t_mul), self.t_mul))
t_i = self.t_mul ** i * self.t_initial
t_curr = t - (1 - self.t_mul ** i) / (1 - self.t_mul) * self.t_initial
else:
i = t // self.t_initial
t_i = self.t_initial
t_curr = t - (self.t_initial * i)
if self.cycle_limit == 0 or (self.cycle_limit > 0 and i < self.cycle_limit):
gamma = self.decay_rate ** i
lr_min = self.lr_min * gamma
lr_max_values = [v * gamma for v in self.base_values]
tr = t_curr / t_i
lrs = [
lr_min + 0.5 * (lr_max - lr_min) * (1 - math.tanh(self.lb * (1. - tr) + self.ub * tr))
for lr_max in lr_max_values
]
else:
lrs = [self.lr_min * (self.decay_rate ** self.cycle_limit) for _ in self.base_values]
return lrs
def get_epoch_values(self, epoch: int):
if self.t_in_epochs:
return self._get_lr(epoch)
else:
return None
def get_update_values(self, num_updates: int):
if not self.t_in_epochs:
return self._get_lr(num_updates)
else:
return None
def get_cycle_length(self, cycles=0):
if not cycles:
cycles = self.cycle_limit
cycles = max(1, cycles)
if self.t_mul == 1.0:
return self.t_initial * cycles
else:
return int(math.floor(-self.t_initial * (self.t_mul ** cycles - 1) / (1 - self.t_mul)))
class StepLRScheduler(Scheduler):
"""
"""
def __init__(self,
optimizer: torch.optim.Optimizer,
decay_t: float,
decay_rate: float = 1.,
warmup_t=0,
warmup_lr_init=0,
t_in_epochs=True,
noise_range_t=None,
noise_pct=0.67,
noise_std=1.0,
noise_seed=42,
initialize=True,
) -> None:
super().__init__(
optimizer, param_group_field="lr",
noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed,
initialize=initialize)
self.decay_t = decay_t
self.decay_rate = decay_rate
self.warmup_t = warmup_t
self.warmup_lr_init = warmup_lr_init
self.t_in_epochs = t_in_epochs
if self.warmup_t:
self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values]
super().update_groups(self.warmup_lr_init)
else:
self.warmup_steps = [1 for _ in self.base_values]
def _get_lr(self, t):
if t < self.warmup_t:
lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps]
else:
lrs = [v * (self.decay_rate ** (t // self.decay_t)) for v in self.base_values]
return lrs
def get_epoch_values(self, epoch: int):
if self.t_in_epochs:
return self._get_lr(epoch)
else:
return None
def get_update_values(self, num_updates: int):
if not self.t_in_epochs:
return self._get_lr(num_updates)
else:
return None
class PlateauLRScheduler(Scheduler):
"""Decay the LR by a factor every time the validation loss plateaus."""
def __init__(self,
optimizer,
decay_rate=0.1,
patience_t=10,
verbose=True,
threshold=1e-4,
cooldown_t=0,
warmup_t=0,
warmup_lr_init=0,
lr_min=0,
mode='max',
noise_range_t=None,
noise_type='normal',
noise_pct=0.67,
noise_std=1.0,
noise_seed=None,
initialize=True,
):
super().__init__(optimizer, 'lr', initialize=initialize)
self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
self.optimizer,
patience=patience_t,
factor=decay_rate,
verbose=verbose,
threshold=threshold,
cooldown=cooldown_t,
mode=mode,
min_lr=lr_min
)
self.noise_range = noise_range_t
self.noise_pct = noise_pct
self.noise_type = noise_type
self.noise_std = noise_std
self.noise_seed = noise_seed if noise_seed is not None else 42
self.warmup_t = warmup_t
self.warmup_lr_init = warmup_lr_init
if self.warmup_t:
self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values]
super().update_groups(self.warmup_lr_init)
else:
self.warmup_steps = [1 for _ in self.base_values]
self.restore_lr = None
def state_dict(self):
return {
'best': self.lr_scheduler.best,
'last_epoch': self.lr_scheduler.last_epoch,
}
def load_state_dict(self, state_dict):
self.lr_scheduler.best = state_dict['best']
if 'last_epoch' in state_dict:
self.lr_scheduler.last_epoch = state_dict['last_epoch']
# override the base class step fn completely
def step(self, epoch, metric=None):
if epoch <= self.warmup_t:
lrs = [self.warmup_lr_init + epoch * s for s in self.warmup_steps]
super().update_groups(lrs)
else:
if self.restore_lr is not None:
# restore actual LR from before our last noise perturbation before stepping base
for i, param_group in enumerate(self.optimizer.param_groups):
param_group['lr'] = self.restore_lr[i]
self.restore_lr = None
self.lr_scheduler.step(metric, epoch) # step the base scheduler
if self.noise_range is not None:
if isinstance(self.noise_range, (list, tuple)):
apply_noise = self.noise_range[0] <= epoch < self.noise_range[1]
else:
apply_noise = epoch >= self.noise_range
if apply_noise:
self._apply_noise(epoch)
def _apply_noise(self, epoch):
g = torch.Generator()
g.manual_seed(self.noise_seed + epoch)
if self.noise_type == 'normal':
while True:
# resample if noise out of percent limit, brute force but shouldn't spin much
noise = torch.randn(1, generator=g).item()
if abs(noise) < self.noise_pct:
break
else:
noise = 2 * (torch.rand(1, generator=g).item() - 0.5) * self.noise_pct
# apply the noise on top of previous LR, cache the old value so we can restore for normal
# stepping of base scheduler
restore_lr = []
for i, param_group in enumerate(self.optimizer.param_groups):
old_lr = float(param_group['lr'])
restore_lr.append(old_lr)
new_lr = old_lr + old_lr * noise
param_group['lr'] = new_lr
self.restore_lr = restore_lr
def create_scheduler(args, optimizer):
num_epochs = args.epochs
if getattr(args, 'lr_noise', None) is not None:
lr_noise = getattr(args, 'lr_noise')
if isinstance(lr_noise, (list, tuple)):
noise_range = [n * num_epochs for n in lr_noise]
if len(noise_range) == 1:
noise_range = noise_range[0]
else:
noise_range = lr_noise * num_epochs
else:
noise_range = None
lr_scheduler = None
if args.sched == 'cosine':
lr_scheduler = CosineLRScheduler(
optimizer,
t_initial=num_epochs,
t_mul=getattr(args, 'lr_cycle_mul', 1.),
lr_min=args.min_lr,
decay_rate=args.decay_rate,
warmup_lr_init=args.warmup_lr,
warmup_t=args.warmup_epochs,
cycle_limit=getattr(args, 'lr_cycle_limit', 1),
t_in_epochs=True,
noise_range_t=noise_range,
noise_pct=getattr(args, 'lr_noise_pct', 0.67),
noise_std=getattr(args, 'lr_noise_std', 1.),
noise_seed=getattr(args, 'seed', 42),
)
num_epochs = lr_scheduler.get_cycle_length() + args.cooldown_epochs
elif args.sched == 'tanh':
lr_scheduler = TanhLRScheduler(
optimizer,
t_initial=num_epochs,
t_mul=getattr(args, 'lr_cycle_mul', 1.),
lr_min=args.min_lr,
warmup_lr_init=args.warmup_lr,
warmup_t=args.warmup_epochs,
cycle_limit=getattr(args, 'lr_cycle_limit', 1),
t_in_epochs=True,
noise_range_t=noise_range,
noise_pct=getattr(args, 'lr_noise_pct', 0.67),
noise_std=getattr(args, 'lr_noise_std', 1.),
noise_seed=getattr(args, 'seed', 42),
)
num_epochs = lr_scheduler.get_cycle_length() + args.cooldown_epochs
elif args.sched == 'step':
lr_scheduler = StepLRScheduler(
optimizer,
decay_t=args.decay_epochs,
decay_rate=args.decay_rate,
warmup_lr_init=args.warmup_lr,
warmup_t=args.warmup_epochs,
noise_range_t=noise_range,
noise_pct=getattr(args, 'lr_noise_pct', 0.67),
noise_std=getattr(args, 'lr_noise_std', 1.),
noise_seed=getattr(args, 'seed', 42),
)
elif args.sched == 'plateau':
mode = 'min' if 'loss' in getattr(args, 'eval_metric', '') else 'max'
lr_scheduler = PlateauLRScheduler(
optimizer,
decay_rate=args.decay_rate,
patience_t=args.patience_epochs,
lr_min=args.min_lr,
mode=mode,
warmup_lr_init=args.warmup_lr,
warmup_t=args.warmup_epochs,
cooldown_t=0,
noise_range_t=noise_range,
noise_pct=getattr(args, 'lr_noise_pct', 0.67),
noise_std=getattr(args, 'lr_noise_std', 1.),
noise_seed=getattr(args, 'seed', 42),
)
return lr_scheduler, num_epochs | 38.577982 | 121 | 0.571082 | from typing import Dict, Any
import torch
import math
import logging
import numpy as np
_logger = logging.getLogger(__name__)
class Scheduler:
def __init__(self,
optimizer: torch.optim.Optimizer,
param_group_field: str,
noise_range_t=None,
noise_type='normal',
noise_pct=0.67,
noise_std=1.0,
noise_seed=None,
initialize: bool = True) -> None:
self.optimizer = optimizer
self.param_group_field = param_group_field
self._initial_param_group_field = f"initial_{param_group_field}"
if initialize:
for i, group in enumerate(self.optimizer.param_groups):
if param_group_field not in group:
raise KeyError(f"{param_group_field} missing from param_groups[{i}]")
group.setdefault(self._initial_param_group_field, group[param_group_field])
else:
for i, group in enumerate(self.optimizer.param_groups):
if self._initial_param_group_field not in group:
raise KeyError(f"{self._initial_param_group_field} missing from param_groups[{i}]")
self.base_values = [group[self._initial_param_group_field] for group in self.optimizer.param_groups]
self.metric = None
self.noise_range_t = noise_range_t
self.noise_pct = noise_pct
self.noise_type = noise_type
self.noise_std = noise_std
self.noise_seed = noise_seed if noise_seed is not None else 42
self.update_groups(self.base_values)
def state_dict(self) -> Dict[str, Any]:
return {key: value for key, value in self.__dict__.items() if key != 'optimizer'}
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
self.__dict__.update(state_dict)
def get_epoch_values(self, epoch: int):
return None
def get_update_values(self, num_updates: int):
return None
def step(self, epoch: int, metric: float = None) -> None:
self.metric = metric
values = self.get_epoch_values(epoch)
if values is not None:
values = self._add_noise(values, epoch)
self.update_groups(values)
def step_update(self, num_updates: int, metric: float = None):
self.metric = metric
values = self.get_update_values(num_updates)
if values is not None:
values = self._add_noise(values, num_updates)
self.update_groups(values)
def update_groups(self, values):
if not isinstance(values, (list, tuple)):
values = [values] * len(self.optimizer.param_groups)
for param_group, value in zip(self.optimizer.param_groups, values):
param_group[self.param_group_field] = value
def _add_noise(self, lrs, t):
if self.noise_range_t is not None:
if isinstance(self.noise_range_t, (list, tuple)):
apply_noise = self.noise_range_t[0] <= t < self.noise_range_t[1]
else:
apply_noise = t >= self.noise_range_t
if apply_noise:
g = torch.Generator()
g.manual_seed(self.noise_seed + t)
if self.noise_type == 'normal':
while True:
noise = torch.randn(1, generator=g).item()
if abs(noise) < self.noise_pct:
break
else:
noise = 2 * (torch.rand(1, generator=g).item() - 0.5) * self.noise_pct
lrs = [v + v * noise for v in lrs]
return lrs
class CosineLRScheduler(Scheduler):
def __init__(self,
optimizer: torch.optim.Optimizer,
t_initial: int,
t_mul: float = 1.,
lr_min: float = 0.,
decay_rate: float = 1.,
warmup_t=0,
warmup_lr_init=0,
warmup_prefix=False,
cycle_limit=0,
t_in_epochs=True,
noise_range_t=None,
noise_pct=0.67,
noise_std=1.0,
noise_seed=42,
initialize=True) -> None:
super().__init__(
optimizer, param_group_field="lr",
noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed,
initialize=initialize)
assert t_initial > 0
assert lr_min >= 0
if t_initial == 1 and t_mul == 1 and decay_rate == 1:
_logger.warning("Cosine annealing scheduler will have no effect on the learning "
"rate since t_initial = t_mul = eta_mul = 1.")
self.t_initial = t_initial
self.t_mul = t_mul
self.lr_min = lr_min
self.decay_rate = decay_rate
self.cycle_limit = cycle_limit
self.warmup_t = warmup_t
self.warmup_lr_init = warmup_lr_init
self.warmup_prefix = warmup_prefix
self.t_in_epochs = t_in_epochs
if self.warmup_t:
self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values]
super().update_groups(self.warmup_lr_init)
else:
self.warmup_steps = [1 for _ in self.base_values]
def _get_lr(self, t):
if t < self.warmup_t:
lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps]
else:
if self.warmup_prefix:
t = t - self.warmup_t
if self.t_mul != 1:
i = math.floor(math.log(1 - t / self.t_initial * (1 - self.t_mul), self.t_mul))
t_i = self.t_mul ** i * self.t_initial
t_curr = t - (1 - self.t_mul ** i) / (1 - self.t_mul) * self.t_initial
else:
i = t // self.t_initial
t_i = self.t_initial
t_curr = t - (self.t_initial * i)
gamma = self.decay_rate ** i
lr_min = self.lr_min * gamma
lr_max_values = [v * gamma for v in self.base_values]
if self.cycle_limit == 0 or (self.cycle_limit > 0 and i < self.cycle_limit):
lrs = [
lr_min + 0.5 * (lr_max - lr_min) * (1 + math.cos(math.pi * t_curr / t_i)) for lr_max in lr_max_values
]
else:
lrs = [self.lr_min for _ in self.base_values]
return lrs
def get_epoch_values(self, epoch: int):
if self.t_in_epochs:
return self._get_lr(epoch)
else:
return None
def get_update_values(self, num_updates: int):
if not self.t_in_epochs:
return self._get_lr(num_updates)
else:
return None
def get_cycle_length(self, cycles=0):
if not cycles:
cycles = self.cycle_limit
cycles = max(1, cycles)
if self.t_mul == 1.0:
return self.t_initial * cycles
else:
return int(math.floor(-self.t_initial * (self.t_mul ** cycles - 1) / (1 - self.t_mul)))
class TanhLRScheduler(Scheduler):
def __init__(self,
optimizer: torch.optim.Optimizer,
t_initial: int,
lb: float = -6.,
ub: float = 4.,
t_mul: float = 1.,
lr_min: float = 0.,
decay_rate: float = 1.,
warmup_t=0,
warmup_lr_init=0,
warmup_prefix=False,
cycle_limit=0,
t_in_epochs=True,
noise_range_t=None,
noise_pct=0.67,
noise_std=1.0,
noise_seed=42,
initialize=True) -> None:
super().__init__(
optimizer, param_group_field="lr",
noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed,
initialize=initialize)
assert t_initial > 0
assert lr_min >= 0
assert lb < ub
assert cycle_limit >= 0
assert warmup_t >= 0
assert warmup_lr_init >= 0
self.lb = lb
self.ub = ub
self.t_initial = t_initial
self.t_mul = t_mul
self.lr_min = lr_min
self.decay_rate = decay_rate
self.cycle_limit = cycle_limit
self.warmup_t = warmup_t
self.warmup_lr_init = warmup_lr_init
self.warmup_prefix = warmup_prefix
self.t_in_epochs = t_in_epochs
if self.warmup_t:
t_v = self.base_values if self.warmup_prefix else self._get_lr(self.warmup_t)
self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in t_v]
super().update_groups(self.warmup_lr_init)
else:
self.warmup_steps = [1 for _ in self.base_values]
def _get_lr(self, t):
if t < self.warmup_t:
lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps]
else:
if self.warmup_prefix:
t = t - self.warmup_t
if self.t_mul != 1:
i = math.floor(math.log(1 - t / self.t_initial * (1 - self.t_mul), self.t_mul))
t_i = self.t_mul ** i * self.t_initial
t_curr = t - (1 - self.t_mul ** i) / (1 - self.t_mul) * self.t_initial
else:
i = t // self.t_initial
t_i = self.t_initial
t_curr = t - (self.t_initial * i)
if self.cycle_limit == 0 or (self.cycle_limit > 0 and i < self.cycle_limit):
gamma = self.decay_rate ** i
lr_min = self.lr_min * gamma
lr_max_values = [v * gamma for v in self.base_values]
tr = t_curr / t_i
lrs = [
lr_min + 0.5 * (lr_max - lr_min) * (1 - math.tanh(self.lb * (1. - tr) + self.ub * tr))
for lr_max in lr_max_values
]
else:
lrs = [self.lr_min * (self.decay_rate ** self.cycle_limit) for _ in self.base_values]
return lrs
def get_epoch_values(self, epoch: int):
if self.t_in_epochs:
return self._get_lr(epoch)
else:
return None
def get_update_values(self, num_updates: int):
if not self.t_in_epochs:
return self._get_lr(num_updates)
else:
return None
def get_cycle_length(self, cycles=0):
if not cycles:
cycles = self.cycle_limit
cycles = max(1, cycles)
if self.t_mul == 1.0:
return self.t_initial * cycles
else:
return int(math.floor(-self.t_initial * (self.t_mul ** cycles - 1) / (1 - self.t_mul)))
class StepLRScheduler(Scheduler):
def __init__(self,
optimizer: torch.optim.Optimizer,
decay_t: float,
decay_rate: float = 1.,
warmup_t=0,
warmup_lr_init=0,
t_in_epochs=True,
noise_range_t=None,
noise_pct=0.67,
noise_std=1.0,
noise_seed=42,
initialize=True,
) -> None:
super().__init__(
optimizer, param_group_field="lr",
noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed,
initialize=initialize)
self.decay_t = decay_t
self.decay_rate = decay_rate
self.warmup_t = warmup_t
self.warmup_lr_init = warmup_lr_init
self.t_in_epochs = t_in_epochs
if self.warmup_t:
self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values]
super().update_groups(self.warmup_lr_init)
else:
self.warmup_steps = [1 for _ in self.base_values]
def _get_lr(self, t):
if t < self.warmup_t:
lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps]
else:
lrs = [v * (self.decay_rate ** (t // self.decay_t)) for v in self.base_values]
return lrs
def get_epoch_values(self, epoch: int):
if self.t_in_epochs:
return self._get_lr(epoch)
else:
return None
def get_update_values(self, num_updates: int):
if not self.t_in_epochs:
return self._get_lr(num_updates)
else:
return None
class PlateauLRScheduler(Scheduler):
def __init__(self,
optimizer,
decay_rate=0.1,
patience_t=10,
verbose=True,
threshold=1e-4,
cooldown_t=0,
warmup_t=0,
warmup_lr_init=0,
lr_min=0,
mode='max',
noise_range_t=None,
noise_type='normal',
noise_pct=0.67,
noise_std=1.0,
noise_seed=None,
initialize=True,
):
super().__init__(optimizer, 'lr', initialize=initialize)
self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
self.optimizer,
patience=patience_t,
factor=decay_rate,
verbose=verbose,
threshold=threshold,
cooldown=cooldown_t,
mode=mode,
min_lr=lr_min
)
self.noise_range = noise_range_t
self.noise_pct = noise_pct
self.noise_type = noise_type
self.noise_std = noise_std
self.noise_seed = noise_seed if noise_seed is not None else 42
self.warmup_t = warmup_t
self.warmup_lr_init = warmup_lr_init
if self.warmup_t:
self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values]
super().update_groups(self.warmup_lr_init)
else:
self.warmup_steps = [1 for _ in self.base_values]
self.restore_lr = None
def state_dict(self):
return {
'best': self.lr_scheduler.best,
'last_epoch': self.lr_scheduler.last_epoch,
}
def load_state_dict(self, state_dict):
self.lr_scheduler.best = state_dict['best']
if 'last_epoch' in state_dict:
self.lr_scheduler.last_epoch = state_dict['last_epoch']
# override the base class step fn completely
def step(self, epoch, metric=None):
if epoch <= self.warmup_t:
lrs = [self.warmup_lr_init + epoch * s for s in self.warmup_steps]
super().update_groups(lrs)
else:
if self.restore_lr is not None:
# restore actual LR from before our last noise perturbation before stepping base
for i, param_group in enumerate(self.optimizer.param_groups):
param_group['lr'] = self.restore_lr[i]
self.restore_lr = None
self.lr_scheduler.step(metric, epoch) # step the base scheduler
if self.noise_range is not None:
if isinstance(self.noise_range, (list, tuple)):
apply_noise = self.noise_range[0] <= epoch < self.noise_range[1]
else:
apply_noise = epoch >= self.noise_range
if apply_noise:
self._apply_noise(epoch)
def _apply_noise(self, epoch):
g = torch.Generator()
g.manual_seed(self.noise_seed + epoch)
if self.noise_type == 'normal':
while True:
# resample if noise out of percent limit, brute force but shouldn't spin much
noise = torch.randn(1, generator=g).item()
if abs(noise) < self.noise_pct:
break
else:
noise = 2 * (torch.rand(1, generator=g).item() - 0.5) * self.noise_pct
restore_lr = []
for i, param_group in enumerate(self.optimizer.param_groups):
old_lr = float(param_group['lr'])
restore_lr.append(old_lr)
new_lr = old_lr + old_lr * noise
param_group['lr'] = new_lr
self.restore_lr = restore_lr
def create_scheduler(args, optimizer):
num_epochs = args.epochs
if getattr(args, 'lr_noise', None) is not None:
lr_noise = getattr(args, 'lr_noise')
if isinstance(lr_noise, (list, tuple)):
noise_range = [n * num_epochs for n in lr_noise]
if len(noise_range) == 1:
noise_range = noise_range[0]
else:
noise_range = lr_noise * num_epochs
else:
noise_range = None
lr_scheduler = None
if args.sched == 'cosine':
lr_scheduler = CosineLRScheduler(
optimizer,
t_initial=num_epochs,
t_mul=getattr(args, 'lr_cycle_mul', 1.),
lr_min=args.min_lr,
decay_rate=args.decay_rate,
warmup_lr_init=args.warmup_lr,
warmup_t=args.warmup_epochs,
cycle_limit=getattr(args, 'lr_cycle_limit', 1),
t_in_epochs=True,
noise_range_t=noise_range,
noise_pct=getattr(args, 'lr_noise_pct', 0.67),
noise_std=getattr(args, 'lr_noise_std', 1.),
noise_seed=getattr(args, 'seed', 42),
)
num_epochs = lr_scheduler.get_cycle_length() + args.cooldown_epochs
elif args.sched == 'tanh':
lr_scheduler = TanhLRScheduler(
optimizer,
t_initial=num_epochs,
t_mul=getattr(args, 'lr_cycle_mul', 1.),
lr_min=args.min_lr,
warmup_lr_init=args.warmup_lr,
warmup_t=args.warmup_epochs,
cycle_limit=getattr(args, 'lr_cycle_limit', 1),
t_in_epochs=True,
noise_range_t=noise_range,
noise_pct=getattr(args, 'lr_noise_pct', 0.67),
noise_std=getattr(args, 'lr_noise_std', 1.),
noise_seed=getattr(args, 'seed', 42),
)
num_epochs = lr_scheduler.get_cycle_length() + args.cooldown_epochs
elif args.sched == 'step':
lr_scheduler = StepLRScheduler(
optimizer,
decay_t=args.decay_epochs,
decay_rate=args.decay_rate,
warmup_lr_init=args.warmup_lr,
warmup_t=args.warmup_epochs,
noise_range_t=noise_range,
noise_pct=getattr(args, 'lr_noise_pct', 0.67),
noise_std=getattr(args, 'lr_noise_std', 1.),
noise_seed=getattr(args, 'seed', 42),
)
elif args.sched == 'plateau':
mode = 'min' if 'loss' in getattr(args, 'eval_metric', '') else 'max'
lr_scheduler = PlateauLRScheduler(
optimizer,
decay_rate=args.decay_rate,
patience_t=args.patience_epochs,
lr_min=args.min_lr,
mode=mode,
warmup_lr_init=args.warmup_lr,
warmup_t=args.warmup_epochs,
cooldown_t=0,
noise_range_t=noise_range,
noise_pct=getattr(args, 'lr_noise_pct', 0.67),
noise_std=getattr(args, 'lr_noise_std', 1.),
noise_seed=getattr(args, 'seed', 42),
)
return lr_scheduler, num_epochs | true | true |
1c2fbd7e9cc84b577c89dfb84de3fe84522eb5fe | 2,636 | py | Python | Missions_to_Mars/.history/scrape_mars_20200809061221.py | ermiasgelaye/web-scraping-challenge | f99c3436dfb0169595c46dae7733d90e21385cc6 | [
"ADSL"
] | null | null | null | Missions_to_Mars/.history/scrape_mars_20200809061221.py | ermiasgelaye/web-scraping-challenge | f99c3436dfb0169595c46dae7733d90e21385cc6 | [
"ADSL"
] | null | null | null | Missions_to_Mars/.history/scrape_mars_20200809061221.py | ermiasgelaye/web-scraping-challenge | f99c3436dfb0169595c46dae7733d90e21385cc6 | [
"ADSL"
] | 2 | 2020-11-02T08:12:16.000Z | 2021-05-17T21:45:42.000Z | # Dependencies
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup as bs
import requests
from splinter import Browser
import re
import time
# Initialize browser
def init_browser():
executable_path = {"executable_path": "/usr/local/bin/chromedriver"}
#executable_path = {'executable_path': 'chromedriver.exe'}
return Browser("chrome", **executable_path, headless=False)
def scrape():
browser = init_browser()
url = 'https://mars.nasa.gov/news/'
browser.visit(url)
html = browser.html
soup = bs(html, 'html.parser')
news_title = soup.find('div', class_='content_title').text
news_p = soup.find('div', class_='article_teaser_body').text
url = 'https://www.jpl.nasa.gov/spaceimages/'
browser.visit(url)
html = browser.html
soup = bs(html, 'html.parser')
base_url = 'https://www.jpl.nasa.gov'
image_url = soup.find("a", class_="button fancybox")["data-fancybox-href"]
featured_image_url = base_url + image_url
url = 'https://twitter.com/marswxreport?lang=en'
browser.visit(url)
html = browser.html
soup = bs(html, "html.parser")
mars_weather = soup.find(text=re.compile("InSight sol"))
url = 'https://space-facts.com/mars/'
browser.visit(url)
tables = pd.read_html(url)
facts_df = tables[0]
facts_df.columns = ['Fact', 'Value']
facts_df['Fact'] = facts_df['Fact'].str.replace(':', '')
facts_df.reset_index(drop=True, inplace=True)
facts_html = facts_df.to_html()
hemisphere_img_urls = []
url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(url)
html = browser.html
soup = bs(html, 'html.parser')
results = soup.find_all('div', class_="description")
base_url = 'https://astrogeology.usgs.gov/'
sites = []
for result in results:
link = result.find('a', class_="itemLink product-item")
link_text = link['href']
hemispheres_url = base_url + link_text
sites.append(hemispheres_url)
hemispheres = []
for site in sites:
browser.visit(site)
html = browser.html
soup = bs(html, 'html.parser')
title = soup.find('h2', class_="title").text.strip()
url = soup.find_all('a', target="_blank", href=True)[0]['href']
hemispheres.append({"title": title, "img_url": url})
output = {
"news_title": news_title,
"news_p": news_p,
"featured_image_url": featured_image_url,
"mars_weather": mars_weather,
"facts_html": facts_html,
"hemispheres": hemispheres
}
return output
| 31.759036 | 96 | 0.649469 |
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup as bs
import requests
from splinter import Browser
import re
import time
def init_browser():
executable_path = {"executable_path": "/usr/local/bin/chromedriver"}
return Browser("chrome", **executable_path, headless=False)
def scrape():
browser = init_browser()
url = 'https://mars.nasa.gov/news/'
browser.visit(url)
html = browser.html
soup = bs(html, 'html.parser')
news_title = soup.find('div', class_='content_title').text
news_p = soup.find('div', class_='article_teaser_body').text
url = 'https://www.jpl.nasa.gov/spaceimages/'
browser.visit(url)
html = browser.html
soup = bs(html, 'html.parser')
base_url = 'https://www.jpl.nasa.gov'
image_url = soup.find("a", class_="button fancybox")["data-fancybox-href"]
featured_image_url = base_url + image_url
url = 'https://twitter.com/marswxreport?lang=en'
browser.visit(url)
html = browser.html
soup = bs(html, "html.parser")
mars_weather = soup.find(text=re.compile("InSight sol"))
url = 'https://space-facts.com/mars/'
browser.visit(url)
tables = pd.read_html(url)
facts_df = tables[0]
facts_df.columns = ['Fact', 'Value']
facts_df['Fact'] = facts_df['Fact'].str.replace(':', '')
facts_df.reset_index(drop=True, inplace=True)
facts_html = facts_df.to_html()
hemisphere_img_urls = []
url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(url)
html = browser.html
soup = bs(html, 'html.parser')
results = soup.find_all('div', class_="description")
base_url = 'https://astrogeology.usgs.gov/'
sites = []
for result in results:
link = result.find('a', class_="itemLink product-item")
link_text = link['href']
hemispheres_url = base_url + link_text
sites.append(hemispheres_url)
hemispheres = []
for site in sites:
browser.visit(site)
html = browser.html
soup = bs(html, 'html.parser')
title = soup.find('h2', class_="title").text.strip()
url = soup.find_all('a', target="_blank", href=True)[0]['href']
hemispheres.append({"title": title, "img_url": url})
output = {
"news_title": news_title,
"news_p": news_p,
"featured_image_url": featured_image_url,
"mars_weather": mars_weather,
"facts_html": facts_html,
"hemispheres": hemispheres
}
return output
| true | true |
1c2fbd99e8a358645cbd1a49f7f62034df8161ae | 5,189 | py | Python | condoor/actions.py | kstaniek/condoor-ng | adbe6d37b5978e17237e41051525ab59c589adbc | [
"Apache-2.0"
] | null | null | null | condoor/actions.py | kstaniek/condoor-ng | adbe6d37b5978e17237e41051525ab59c589adbc | [
"Apache-2.0"
] | 6 | 2016-12-07T05:55:13.000Z | 2017-01-07T02:52:55.000Z | condoor/actions.py | kstaniek/condoor-ng | adbe6d37b5978e17237e41051525ab59c589adbc | [
"Apache-2.0"
] | null | null | null | """Provides predefined actions for Finite State Machines."""
import logging
from condoor.fsm import action
from condoor.exceptions import ConnectionAuthenticationError, ConnectionError, ConnectionTimeoutError
@action
def a_send_line(text, ctx):
"""Send text line to the controller followed by `os.linesep`."""
ctx.ctrl.sendline(text)
return True
@action
def a_send(text, ctx):
"""Send text line to the controller."""
ctx.ctrl.send(text)
return True
@action
def a_send_username(username, ctx):
"""Sent the username text."""
if username:
ctx.ctrl.sendline(username)
return True
else:
ctx.ctrl.disconnect()
raise ConnectionAuthenticationError("Username not provided", ctx.ctrl.hostname)
@action
def a_send_password(password, ctx):
"""Send the password text.
Before sending the password local echo is disabled.
If password not provided it disconnects from the device and raises ConnectionAuthenticationError exception.
"""
if password:
# ctx.ctrl.setecho(False)
ctx.ctrl.sendline(password)
# ctx.ctrl.setecho(True)
return True
else:
ctx.ctrl.disconnect()
raise ConnectionAuthenticationError("Password not provided", ctx.ctrl.hostname)
@action
def a_authentication_error(ctx):
"""Raise ConnectionAuthenticationError exception and disconnect."""
ctx.ctrl.disconnect()
raise ConnectionAuthenticationError("Authentication failed", ctx.ctrl.hostname)
@action
def a_unable_to_connect(ctx):
"""Provide detailed information about the session (before, after) when unable to connect.
The state machine finishes without exception
"""
message = "{}{}".format(ctx.ctrl.before, ctx.ctrl.after)
ctx.msg = message.strip().splitlines()[-1]
ctx.device.last_error_msg = ctx.msg
# ctx.msg = "{}{}".format(ctx.ctrl.before, ctx.ctrl.after)
return False
@action
def a_standby_console(ctx):
"""Raise ConnectionError exception when connected to standby console."""
ctx.device.is_console = True
raise ConnectionError("Standby console", ctx.ctrl.hostname)
@action
def a_disconnect(ctx):
"""Disconnect from the device when device is reloading."""
ctx.msg = "Device is reloading"
ctx.ctrl.disconnect()
return True
@action
def a_reload_na(ctx):
"""Provide the message when the reload is not possible."""
ctx.msg = "Reload to the ROM monitor disallowed from a telnet line. " \
"Set the configuration register boot bits to be non-zero."
ctx.failed = True
return False
@action
def a_connection_closed(ctx):
"""Provide message when connection is closed by remote host."""
ctx.msg = "Device disconnected"
ctx.device.connected = False
# do not stop FSM to detect the jumphost prompt
return True
@action
def a_stays_connected(ctx):
"""Stay connected."""
ctx.ctrl.connected = True
ctx.device.connected = False
return True
@action
def a_unexpected_prompt(ctx):
"""Provide message when received humphost prompt."""
prompt = ctx.ctrl.after
ctx.msg = "Received the jump host prompt: '{}'".format(prompt)
ctx.device.connected = False
ctx.finished = True
raise ConnectionError("Unable to connect to the device.", ctx.ctrl.hostname)
@action
def a_connection_timeout(ctx):
"""Checks the prompt and update the drivers."""
prompt = ctx.ctrl.after
ctx.msg = "Received the jump host prompt: '{}'".format(prompt)
print(ctx.msg)
ctx.device.connected = False
ctx.finished = True
raise ConnectionTimeoutError("Unable to connect to the device.", ctx.ctrl.hostname)
@action
def a_expected_prompt(ctx):
"""Update driver, config mode and hostname when received an expected prompt."""
prompt = ctx.ctrl.after
ctx.device.update_driver(prompt)
ctx.device.update_config_mode()
ctx.device.update_hostname()
ctx.finished = True
return True
@action
def a_save_last_pattern(obj, ctx):
"""Save last pattern in the context."""
obj.last_pattern = ctx.pattern
return True
@action
def a_send_boot(rommon_boot_command, ctx):
"""Send boot command."""
ctx.ctrl.sendline(rommon_boot_command)
return True
@action
def a_reconnect(ctx):
"""Reconnect."""
ctx.device.connect(ctx.ctrl)
return True
@action
def a_return_and_reconnect(ctx):
"""Send new line and reconnect."""
ctx.ctrl.send("\r")
ctx.ctrl.connect(ctx.device)
return True
@action
def a_store_cmd_result(ctx):
"""Store the command result for complex state machines.
It is useful when exact command output is embedded in another commands, i.e. admin show inventory in eXR.
"""
result = ctx.ctrl.before
# check if multi line
index = result.find('\n')
if index > 0:
# remove first line
result = result[index + 1:]
ctx.device.last_command_result = result.replace('\r', '')
return True
@action
def a_message_callback(ctx):
"""Message the captured pattern."""
message = ctx.ctrl.after.strip().splitlines()[-1]
ctx.device.chain.connection.emit_message(message, log_level=logging.INFO)
return True
| 26.747423 | 111 | 0.695895 | import logging
from condoor.fsm import action
from condoor.exceptions import ConnectionAuthenticationError, ConnectionError, ConnectionTimeoutError
@action
def a_send_line(text, ctx):
ctx.ctrl.sendline(text)
return True
@action
def a_send(text, ctx):
ctx.ctrl.send(text)
return True
@action
def a_send_username(username, ctx):
if username:
ctx.ctrl.sendline(username)
return True
else:
ctx.ctrl.disconnect()
raise ConnectionAuthenticationError("Username not provided", ctx.ctrl.hostname)
@action
def a_send_password(password, ctx):
if password:
ctx.ctrl.sendline(password)
return True
else:
ctx.ctrl.disconnect()
raise ConnectionAuthenticationError("Password not provided", ctx.ctrl.hostname)
@action
def a_authentication_error(ctx):
ctx.ctrl.disconnect()
raise ConnectionAuthenticationError("Authentication failed", ctx.ctrl.hostname)
@action
def a_unable_to_connect(ctx):
message = "{}{}".format(ctx.ctrl.before, ctx.ctrl.after)
ctx.msg = message.strip().splitlines()[-1]
ctx.device.last_error_msg = ctx.msg
return False
@action
def a_standby_console(ctx):
ctx.device.is_console = True
raise ConnectionError("Standby console", ctx.ctrl.hostname)
@action
def a_disconnect(ctx):
ctx.msg = "Device is reloading"
ctx.ctrl.disconnect()
return True
@action
def a_reload_na(ctx):
ctx.msg = "Reload to the ROM monitor disallowed from a telnet line. " \
"Set the configuration register boot bits to be non-zero."
ctx.failed = True
return False
@action
def a_connection_closed(ctx):
ctx.msg = "Device disconnected"
ctx.device.connected = False
return True
@action
def a_stays_connected(ctx):
ctx.ctrl.connected = True
ctx.device.connected = False
return True
@action
def a_unexpected_prompt(ctx):
prompt = ctx.ctrl.after
ctx.msg = "Received the jump host prompt: '{}'".format(prompt)
ctx.device.connected = False
ctx.finished = True
raise ConnectionError("Unable to connect to the device.", ctx.ctrl.hostname)
@action
def a_connection_timeout(ctx):
prompt = ctx.ctrl.after
ctx.msg = "Received the jump host prompt: '{}'".format(prompt)
print(ctx.msg)
ctx.device.connected = False
ctx.finished = True
raise ConnectionTimeoutError("Unable to connect to the device.", ctx.ctrl.hostname)
@action
def a_expected_prompt(ctx):
prompt = ctx.ctrl.after
ctx.device.update_driver(prompt)
ctx.device.update_config_mode()
ctx.device.update_hostname()
ctx.finished = True
return True
@action
def a_save_last_pattern(obj, ctx):
obj.last_pattern = ctx.pattern
return True
@action
def a_send_boot(rommon_boot_command, ctx):
ctx.ctrl.sendline(rommon_boot_command)
return True
@action
def a_reconnect(ctx):
ctx.device.connect(ctx.ctrl)
return True
@action
def a_return_and_reconnect(ctx):
ctx.ctrl.send("\r")
ctx.ctrl.connect(ctx.device)
return True
@action
def a_store_cmd_result(ctx):
result = ctx.ctrl.before
index = result.find('\n')
if index > 0:
result = result[index + 1:]
ctx.device.last_command_result = result.replace('\r', '')
return True
@action
def a_message_callback(ctx):
message = ctx.ctrl.after.strip().splitlines()[-1]
ctx.device.chain.connection.emit_message(message, log_level=logging.INFO)
return True
| true | true |
1c2fbe6b562df5257fec1a80f3d6038e91915a3f | 3,740 | py | Python | staves/runtimes/docker.py | digitalernachschub/staves | 8b96e018ebd79e18b446e931eb8a04dc5e3a8a87 | [
"Apache-2.0"
] | 11 | 2020-05-14T16:25:34.000Z | 2022-01-06T07:25:37.000Z | staves/runtimes/docker.py | digitalernachschub/staves | 8b96e018ebd79e18b446e931eb8a04dc5e3a8a87 | [
"Apache-2.0"
] | null | null | null | staves/runtimes/docker.py | digitalernachschub/staves | 8b96e018ebd79e18b446e931eb8a04dc5e3a8a87 | [
"Apache-2.0"
] | null | null | null | import io
import json
import logging
import os
import socket
import struct
import tarfile
from dataclasses import asdict
from pathlib import Path
from typing import Mapping
import docker
from docker.types import Mount
import staves.builders.gentoo as gentoo_builder
from staves.builders.gentoo import ImageSpec
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def run(
builder: str,
portage: str,
build_cache: str,
image_spec: ImageSpec,
image_path: Path,
stdlib: bool = False,
ssh: bool = False,
netrc: bool = False,
env: Mapping[str, str] = None,
):
docker_client = docker.from_env()
mounts = [
Mount(
type="volume",
source=build_cache,
target="/var/cache/binpkgs",
)
]
if ssh:
ssh_dir = str(Path.home().joinpath(".ssh"))
mounts += [
Mount(type="bind", source=ssh_dir, target="/root/.ssh", read_only=True),
Mount(
type="bind",
source=ssh_dir,
target="/var/tmp/portage/.ssh",
read_only=True,
),
]
if netrc:
netrc_path = str(Path.home().joinpath(".ssh"))
mounts += [
Mount(
type="bind", source=netrc_path, target="/root/.netrc", read_only=True
),
Mount(
type="bind",
source=netrc_path,
target="/var/tmp/portage/.netrc",
read_only=True,
),
]
logger.debug("Starting docker container with the following mounts:")
for mount in mounts:
logger.debug(str(mount))
for log_output in docker_client.api.pull(portage, stream=True, decode=True):
print(log_output)
portage_container = docker_client.containers.create(
portage,
auto_remove=True,
)
args = []
if stdlib:
args += ["--stdlib"]
container = docker_client.containers.create(
builder,
entrypoint=["/usr/bin/python", "/staves.py"],
command=args,
mounts=mounts,
detach=True,
environment=env,
stdin_open=True,
volumes_from=[portage_container.id + ":ro"],
)
bundle_file = io.BytesIO()
with tarfile.TarFile(fileobj=bundle_file, mode="x") as archive:
builder_runtime_path = os.path.abspath(gentoo_builder.__file__)
archive.add(builder_runtime_path, arcname="staves.py")
bundle_file.seek(0)
bundle_content = bundle_file.read()
container.put_archive("/", bundle_content)
container.start()
container_input = container.attach_socket(params={"stdin": 1, "stream": 1})
serialized_image_spec = json.dumps(
dict(
locale=asdict(image_spec.locale),
global_env=image_spec.global_env,
package_envs=image_spec.package_envs,
repositories=[asdict(repository) for repository in image_spec.repositories],
package_configs=image_spec.package_configs,
packages_to_be_installed=image_spec.packages_to_be_installed,
)
).encode()
content_length = struct.pack(">Q", len(serialized_image_spec))
content = content_length + serialized_image_spec
container_input._sock.send(content)
container_input._sock.shutdown(socket.SHUT_RDWR)
container_input.close()
for line in container.logs(stream=True):
print(line.decode(), end="")
container.stop()
container.wait()
image_chunks, _ = container.get_archive("/tmp/rootfs")
with image_path.open(mode="wb") as image_archive:
for chunk in image_chunks:
image_archive.write(chunk)
container.remove()
portage_container.remove()
| 30.655738 | 88 | 0.622727 | import io
import json
import logging
import os
import socket
import struct
import tarfile
from dataclasses import asdict
from pathlib import Path
from typing import Mapping
import docker
from docker.types import Mount
import staves.builders.gentoo as gentoo_builder
from staves.builders.gentoo import ImageSpec
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def run(
builder: str,
portage: str,
build_cache: str,
image_spec: ImageSpec,
image_path: Path,
stdlib: bool = False,
ssh: bool = False,
netrc: bool = False,
env: Mapping[str, str] = None,
):
docker_client = docker.from_env()
mounts = [
Mount(
type="volume",
source=build_cache,
target="/var/cache/binpkgs",
)
]
if ssh:
ssh_dir = str(Path.home().joinpath(".ssh"))
mounts += [
Mount(type="bind", source=ssh_dir, target="/root/.ssh", read_only=True),
Mount(
type="bind",
source=ssh_dir,
target="/var/tmp/portage/.ssh",
read_only=True,
),
]
if netrc:
netrc_path = str(Path.home().joinpath(".ssh"))
mounts += [
Mount(
type="bind", source=netrc_path, target="/root/.netrc", read_only=True
),
Mount(
type="bind",
source=netrc_path,
target="/var/tmp/portage/.netrc",
read_only=True,
),
]
logger.debug("Starting docker container with the following mounts:")
for mount in mounts:
logger.debug(str(mount))
for log_output in docker_client.api.pull(portage, stream=True, decode=True):
print(log_output)
portage_container = docker_client.containers.create(
portage,
auto_remove=True,
)
args = []
if stdlib:
args += ["--stdlib"]
container = docker_client.containers.create(
builder,
entrypoint=["/usr/bin/python", "/staves.py"],
command=args,
mounts=mounts,
detach=True,
environment=env,
stdin_open=True,
volumes_from=[portage_container.id + ":ro"],
)
bundle_file = io.BytesIO()
with tarfile.TarFile(fileobj=bundle_file, mode="x") as archive:
builder_runtime_path = os.path.abspath(gentoo_builder.__file__)
archive.add(builder_runtime_path, arcname="staves.py")
bundle_file.seek(0)
bundle_content = bundle_file.read()
container.put_archive("/", bundle_content)
container.start()
container_input = container.attach_socket(params={"stdin": 1, "stream": 1})
serialized_image_spec = json.dumps(
dict(
locale=asdict(image_spec.locale),
global_env=image_spec.global_env,
package_envs=image_spec.package_envs,
repositories=[asdict(repository) for repository in image_spec.repositories],
package_configs=image_spec.package_configs,
packages_to_be_installed=image_spec.packages_to_be_installed,
)
).encode()
content_length = struct.pack(">Q", len(serialized_image_spec))
content = content_length + serialized_image_spec
container_input._sock.send(content)
container_input._sock.shutdown(socket.SHUT_RDWR)
container_input.close()
for line in container.logs(stream=True):
print(line.decode(), end="")
container.stop()
container.wait()
image_chunks, _ = container.get_archive("/tmp/rootfs")
with image_path.open(mode="wb") as image_archive:
for chunk in image_chunks:
image_archive.write(chunk)
container.remove()
portage_container.remove()
| true | true |
1c2fbea4b7cf369327216abf4b9b0f4d2b266cd6 | 3,661 | py | Python | build/PureCloudPlatformClientV2/models/week_shift_trade_list_response.py | cjohnson-ctl/platform-client-sdk-python | 38ce53bb8012b66e8a43cc8bd6ff00cf6cc99100 | [
"MIT"
] | 10 | 2019-02-22T00:27:08.000Z | 2021-09-12T23:23:44.000Z | libs/PureCloudPlatformClientV2/models/week_shift_trade_list_response.py | rocketbot-cl/genesysCloud | dd9d9b5ebb90a82bab98c0d88b9585c22c91f333 | [
"MIT"
] | 5 | 2018-06-07T08:32:00.000Z | 2021-07-28T17:37:26.000Z | libs/PureCloudPlatformClientV2/models/week_shift_trade_list_response.py | rocketbot-cl/genesysCloud | dd9d9b5ebb90a82bab98c0d88b9585c22c91f333 | [
"MIT"
] | 6 | 2020-04-09T17:43:07.000Z | 2022-02-17T08:48:05.000Z | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class WeekShiftTradeListResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
WeekShiftTradeListResponse - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'entities': 'list[WeekShiftTradeResponse]'
}
self.attribute_map = {
'entities': 'entities'
}
self._entities = None
@property
def entities(self):
"""
Gets the entities of this WeekShiftTradeListResponse.
:return: The entities of this WeekShiftTradeListResponse.
:rtype: list[WeekShiftTradeResponse]
"""
return self._entities
@entities.setter
def entities(self, entities):
"""
Sets the entities of this WeekShiftTradeListResponse.
:param entities: The entities of this WeekShiftTradeListResponse.
:type: list[WeekShiftTradeResponse]
"""
self._entities = entities
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 27.946565 | 77 | 0.580716 |
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class WeekShiftTradeListResponse(object):
def __init__(self):
self.swagger_types = {
'entities': 'list[WeekShiftTradeResponse]'
}
self.attribute_map = {
'entities': 'entities'
}
self._entities = None
@property
def entities(self):
return self._entities
@entities.setter
def entities(self, entities):
self._entities = entities
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
1c2fbf246556300f28b607ea53fca3967c6adc28 | 7,777 | py | Python | build/python-env/lib/python2.7/site-packages/docker/models/networks.py | imiMoisesEducation/beatcookie-discbot | 59c8be23346d8d2fc1777a2b08856df88e2ae5c2 | [
"Apache-2.0"
] | 20 | 2018-05-08T20:41:48.000Z | 2019-08-15T02:15:58.000Z | build/python-env/lib/python2.7/site-packages/docker/models/networks.py | imiMoisesEducation/beatcookie-discbot | 59c8be23346d8d2fc1777a2b08856df88e2ae5c2 | [
"Apache-2.0"
] | 2 | 2021-02-02T22:48:24.000Z | 2021-06-02T02:04:53.000Z | build/python-env/lib/python2.7/site-packages/docker/models/networks.py | imiMoisesEducation/beatcookie-discbot | 59c8be23346d8d2fc1777a2b08856df88e2ae5c2 | [
"Apache-2.0"
] | 5 | 2018-07-03T03:15:01.000Z | 2020-09-10T06:30:27.000Z | from ..api import APIClient
from ..utils import version_gte
from .containers import Container
from .resource import Model, Collection
class Network(Model):
"""
A Docker network.
"""
@property
def name(self):
"""
The name of the network.
"""
return self.attrs.get('Name')
@property
def containers(self):
"""
The containers that are connected to the network, as a list of
:py:class:`~docker.models.containers.Container` objects.
"""
return [
self.client.containers.get(cid) for cid in
(self.attrs.get('Containers') or {}).keys()
]
def connect(self, container, *args, **kwargs):
"""
Connect a container to this network.
Args:
container (str): Container to connect to this network, as either
an ID, name, or :py:class:`~docker.models.containers.Container`
object.
aliases (:py:class:`list`): A list of aliases for this endpoint.
Names in that list can be used within the network to reach the
container. Defaults to ``None``.
links (:py:class:`list`): A list of links for this endpoint.
Containers declared in this list will be linkedto this
container. Defaults to ``None``.
ipv4_address (str): The IP address of this container on the
network, using the IPv4 protocol. Defaults to ``None``.
ipv6_address (str): The IP address of this container on the
network, using the IPv6 protocol. Defaults to ``None``.
link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6)
addresses.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if isinstance(container, Container):
container = container.id
return self.client.api.connect_container_to_network(
container, self.id, *args, **kwargs
)
def disconnect(self, container, *args, **kwargs):
"""
Disconnect a container from this network.
Args:
container (str): Container to disconnect from this network, as
either an ID, name, or
:py:class:`~docker.models.containers.Container` object.
force (bool): Force the container to disconnect from a network.
Default: ``False``
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if isinstance(container, Container):
container = container.id
return self.client.api.disconnect_container_from_network(
container, self.id, *args, **kwargs
)
def remove(self):
"""
Remove this network.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.remove_network(self.id)
class NetworkCollection(Collection):
"""
Networks on the Docker server.
"""
model = Network
def create(self, name, *args, **kwargs):
"""
Create a network. Similar to the ``docker network create``.
Args:
name (str): Name of the network
driver (str): Name of the driver used to create the network
options (dict): Driver options as a key-value dictionary
ipam (IPAMConfig): Optional custom IP scheme for the network.
check_duplicate (bool): Request daemon to check for networks with
same name. Default: ``None``.
internal (bool): Restrict external access to the network. Default
``False``.
labels (dict): Map of labels to set on the network. Default
``None``.
enable_ipv6 (bool): Enable IPv6 on the network. Default ``False``.
attachable (bool): If enabled, and the network is in the global
scope, non-service containers on worker nodes will be able to
connect to the network.
scope (str): Specify the network's scope (``local``, ``global`` or
``swarm``)
ingress (bool): If set, create an ingress network which provides
the routing-mesh in swarm mode.
Returns:
(:py:class:`Network`): The network that was created.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
A network using the bridge driver:
>>> client.networks.create("network1", driver="bridge")
You can also create more advanced networks with custom IPAM
configurations. For example, setting the subnet to
``192.168.52.0/24`` and gateway address to ``192.168.52.254``.
.. code-block:: python
>>> ipam_pool = docker.types.IPAMPool(
subnet='192.168.52.0/24',
gateway='192.168.52.254'
)
>>> ipam_config = docker.types.IPAMConfig(
pool_configs=[ipam_pool]
)
>>> client.networks.create(
"network1",
driver="bridge",
ipam=ipam_config
)
"""
resp = self.client.api.create_network(name, *args, **kwargs)
return self.get(resp['Id'])
def get(self, network_id, *args, **kwargs):
"""
Get a network by its ID.
Args:
network_id (str): The ID of the network.
verbose (bool): Retrieve the service details across the cluster in
swarm mode.
scope (str): Filter the network by scope (``swarm``, ``global``
or ``local``).
Returns:
(:py:class:`Network`) The network.
Raises:
:py:class:`docker.errors.NotFound`
If the network does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.prepare_model(
self.client.api.inspect_network(network_id, *args, **kwargs)
)
def list(self, *args, **kwargs):
"""
List networks. Similar to the ``docker networks ls`` command.
Args:
names (:py:class:`list`): List of names to filter by.
ids (:py:class:`list`): List of ids to filter by.
filters (dict): Filters to be processed on the network list.
Available filters:
- ``driver=[<driver-name>]`` Matches a network's driver.
- ``label=[<key>]`` or ``label=[<key>=<value>]``.
- ``type=["custom"|"builtin"]`` Filters networks by type.
greedy (bool): Fetch more details for each network individually.
You might want this to get the containers attached to them.
Returns:
(list of :py:class:`Network`) The networks on the server.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
greedy = kwargs.pop('greedy', False)
resp = self.client.api.networks(*args, **kwargs)
networks = [self.prepare_model(item) for item in resp]
if greedy and version_gte(self.client.api._version, '1.28'):
for net in networks:
net.reload()
return networks
def prune(self, filters=None):
self.client.api.prune_networks(filters=filters)
prune.__doc__ = APIClient.prune_networks.__doc__
| 36.00463 | 79 | 0.555098 | from ..api import APIClient
from ..utils import version_gte
from .containers import Container
from .resource import Model, Collection
class Network(Model):
@property
def name(self):
return self.attrs.get('Name')
@property
def containers(self):
return [
self.client.containers.get(cid) for cid in
(self.attrs.get('Containers') or {}).keys()
]
def connect(self, container, *args, **kwargs):
if isinstance(container, Container):
container = container.id
return self.client.api.connect_container_to_network(
container, self.id, *args, **kwargs
)
def disconnect(self, container, *args, **kwargs):
if isinstance(container, Container):
container = container.id
return self.client.api.disconnect_container_from_network(
container, self.id, *args, **kwargs
)
def remove(self):
return self.client.api.remove_network(self.id)
class NetworkCollection(Collection):
model = Network
def create(self, name, *args, **kwargs):
resp = self.client.api.create_network(name, *args, **kwargs)
return self.get(resp['Id'])
def get(self, network_id, *args, **kwargs):
return self.prepare_model(
self.client.api.inspect_network(network_id, *args, **kwargs)
)
def list(self, *args, **kwargs):
greedy = kwargs.pop('greedy', False)
resp = self.client.api.networks(*args, **kwargs)
networks = [self.prepare_model(item) for item in resp]
if greedy and version_gte(self.client.api._version, '1.28'):
for net in networks:
net.reload()
return networks
def prune(self, filters=None):
self.client.api.prune_networks(filters=filters)
prune.__doc__ = APIClient.prune_networks.__doc__
| true | true |
1c2fbffd53700052b6c2bf61434722bcee5798b7 | 1,336 | py | Python | setup.py | albmarin/pcuf | 90fe58a5373d4afb46d95486ced91976e13e2f90 | [
"BSD-3-Clause"
] | null | null | null | setup.py | albmarin/pcuf | 90fe58a5373d4afb46d95486ced91976e13e2f90 | [
"BSD-3-Clause"
] | 247 | 2020-02-19T05:55:58.000Z | 2022-03-28T13:43:12.000Z | setup.py | albmarin/pcuf | 90fe58a5373d4afb46d95486ced91976e13e2f90 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open("README.rst") as readme_file:
readme = readme_file.read()
with open("HISTORY.rst") as history_file:
history = history_file.read()
requirements = [
"xlrd>=1.2.0",
"pandas>=0.24.1",
"requests>=2.21.0",
"click>=7.0",
"openpyxl>=2.5.12",
]
setup_requirements = ["pytest-runner"]
test_requirements = ["pytest"]
setup(
author="Alberto J. Marin",
author_email="alberto@ajmar.in",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
description="A personal collection of useful and frequently used Python functions.",
install_requires=requirements,
license="BSD license",
long_description=readme + "\n\n" + history,
include_package_data=True,
keywords="pcuf",
name="pcuf",
packages=find_packages(),
setup_requires=setup_requirements,
test_suite="tests",
tests_require=test_requirements,
url="https://github.com/git-albertomarin/pcuf",
version="0.1.9",
zip_safe=False,
)
| 25.692308 | 88 | 0.643713 |
from setuptools import setup, find_packages
with open("README.rst") as readme_file:
readme = readme_file.read()
with open("HISTORY.rst") as history_file:
history = history_file.read()
requirements = [
"xlrd>=1.2.0",
"pandas>=0.24.1",
"requests>=2.21.0",
"click>=7.0",
"openpyxl>=2.5.12",
]
setup_requirements = ["pytest-runner"]
test_requirements = ["pytest"]
setup(
author="Alberto J. Marin",
author_email="alberto@ajmar.in",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
description="A personal collection of useful and frequently used Python functions.",
install_requires=requirements,
license="BSD license",
long_description=readme + "\n\n" + history,
include_package_data=True,
keywords="pcuf",
name="pcuf",
packages=find_packages(),
setup_requires=setup_requirements,
test_suite="tests",
tests_require=test_requirements,
url="https://github.com/git-albertomarin/pcuf",
version="0.1.9",
zip_safe=False,
)
| true | true |
1c2fc0173c6d37b75e054681031b636b7288fb33 | 8,657 | py | Python | 2015/day_03/day_03_part_2.py | Sancti0n/advent-of-code | 360c20f63c2308439e2e191c60b7164be86c4d4a | [
"MIT"
] | null | null | null | 2015/day_03/day_03_part_2.py | Sancti0n/advent-of-code | 360c20f63c2308439e2e191c60b7164be86c4d4a | [
"MIT"
] | null | null | null | 2015/day_03/day_03_part_2.py | Sancti0n/advent-of-code | 360c20f63c2308439e2e191c60b7164be86c4d4a | [
"MIT"
] | null | null | null | path = '^^<<v<<v><v^^<><>^^<v<v^>>^^^><^>v^>v><><><<vv^^<^>^^<v^>v>v^v>>>^<>v<^<v^><^>>>>><<v>>^>>^>v^>><<^>v>v<>^v^v^vvv><>^^>v><v<><>^><^^<vv^v<v>^v>>^v^>v><>v^<vv>^><<v^>vv^<<>v>>><<<>>^<vv<^<>^^vv>>>^><<<<vv^v^>>><><^>v<>^>v<v^v<^vv><^v^><<<<>^<>v>^v>v<v<v<<>v<^<<<v>>>>>^^v>vv^^<>^<>^^^^<^^^v<v^^>v<^^v^^>v>^v^^^^>><<v<>v<>^v^<v<>><>^^><<^^<^^>vv<>v^<^v<vv<<<>^>^^>^<>v^^vv<>>v><<<>vvv<>v<>><^<^v<>^vv>^^v<v<v><^<>>vv<^>>^>>vv^v<vv^vv<^<<>>^v^<>^>>>>vv>^^>v>vv>v><^vv^<<v>^<<^^<v<v>vv<v^^<>^^v>^>>v><^<<vv<<v^vv^^^v>>v<<v^><vv^><vv<^vv<<vv^v<<^v<^^v>><<v^>>^^<>v>><<v<>>^^<v>>^^>>vvv^><<<<<^<^vv<^<><v<<>^^^<<<^>^^^<v<<vv>vv<>^<>v<^v>^<<<v<v<v>>^v<>>v<<^<<v<<>^<<<><><>^>>>>^>v^v<<v<v<<>>vv<^vvv^^^^<vv>vv>^v^^v^<v^v><^vv<^vv>v<^>vv<>>^>^><vv<><^>v>^v>vvv<>^>^v<><>vv>><^v^<><><v>>v^v^><^<^>vv>v<^>vvv>v<<<<<^<v<<vv<^^^<<>>^v<vv<^<>v>^<v<>><><>^<<v>v^>^<vv>><><>>^>^>><^<v>^^>^^>^^v^^<^v^^>v^^>>><<><v<v<<v^vv<><><>^<v>^<<^^v^>v>><>^^^><^vvv<^^^^^v><<><v<^^v><><>>^>vv<vvvv<<>>><v<^^^^v<<^><v>^vv<v^^v^vv<^^>^^<v>><<v^>v<^^>^<^<v<^^v>^<<v>^>>>^v<>v<^^^>vvv^v<<^><>>><vvv^<^^^<^>>v>>><v>^^vvv^vvv<^^^^v^v^<vv^<v>^<<^>v^v^<<><>><^v><v<><<>><<<>^v>v<>^<v^v>^vv>>^<>v^^<<v><^v>>v<>>^v^^>><^>v^<^v^^>><>v^>^v^v<<<v^<v^^v<^>v<><>vv>>>>^>v<>v<<<>^^>vv^v<><v^<>^<<<<>>^^>^v<v^v<<><>^v<>>^v^<<^<^>>>^vv<><v<^^<>v^>>v<^^v<v>>>^>><<><<<>><vv<v>>^v>><^<v><vv>^vv<v<>>><>v^><>vv<^^v^^^v<>><^vvv<<^<>v>>>v>><v><>>><>><v^><v^v<v>^v>v<v>>^^<^>^>v><>vv>^v><<>>>>>>>^<<^vv^^vvvv<^^><<<v<<>vvv<>^><<v<v^v^<<v>v<>>^<vv^<v<v>^<<^^vv>v>^<vv<<>v<v^<>v>>^v^^vvvv>^^>>v^v^^><<^>v>>^^>^<^^<>v<v>vv^vv>v<v>>^v<><^vv^<vv<v^^^v<^v^>>^v>>>^^<^<^>^v^>^>>>^v>^>^^^>>^<>v^^<>^v<<^^>^^<vv<>v<^v^>><^v^>^<>>^vv^vv^>v^<vvvvvv^>><^^<^v<^<v^<<^^<<v^<^>><>v><^v^v^^^v>v^<>^<<v<^^vvv<v>^^>^v^^<><vv^v^>v^<<>>vv<>>>>v>v<>^>>>v<>^^><v<v^^^<>^<^><>^><<v>><>^<<>>><<^<vvv<^><v>>^vv^v>><v<>vv^<<^^<<><v><<^<v<vv<<^v^vv>v^>>>v<<<<v<<>v>^vv<^v><v<v>v<^>^^vv>v><v>><<v<<v^v>>><>^<>><><<^<<^v^v<<v>v>v<v<^^>vv<^v^^^<v^<<<v<>v^><^v>^<^<v>>^<<<v>>v^<><>>^v<>vvv<vvvvv<^^><^>><^^>^>^v^vv<^><<^v>><^^v>^v<>^>vvvv><^>^<<v^^vv<v^^<><>v>^>>^<^<<<^v^^^>^>>^>><><<^>v^^<v>>v<<<<vvv<vvvv^<^<v^^<>^>vvv^<vv^v^v>^<<><v><^v^v^^^>^^>^vv<>v>>v^>vv^vv>v<^v^^>>^v^v<>>^^><<v<<>><>>>^>^<>^^v^^><^<>><<^<vv^^^^^>>vv^<v^<^>>>>v<<><<^>vv>vvv>^<><>>>>vv><<v^v<^^^<<^^^vv^<v<><><<<<>><<v^<>v>v^><>v^v^^><>v>v>^^v<^v<>>^^^^^<v>><v^>^^<v>><v^^>v<^<^>>>^><^^>><<>>^><>^^^>v^^^>^^v^<>^^><^>>><><^>>v<v^>v<^><v<v^<>v<^v>v^<^vv^^><<<><><^v^<v<^^>v>v^>>^^vv^<v>^v>^<^v<>^>^><^<v>^v><^<^<>v^^>^><>>><<v><<><>v<<^v^^<^><>^<><><v>v<^^<v<v>>^^<<>>^<v>><^><^<^>^^v<>v>>><><<>^>v><><<<<v^^^^v<>>^^^v>><<^v>^>>><vv^>>^vv<^<>>^<^^<^v>v<v<<<<<>^<<^<<<<<^<^>>^><<>><>v^v>^<^>v^<><vvv^>^v^v^v><^<v<>vv<<^<>^^^<>^v>^<v^^<v^v>v<>>^>v<<>v<>v^v>v<<<>>v>vv>>v<<>v<>v<^>^>^<v>>v>^>^^^<vv>v<<>>><v>^vvv^^>^^<^vv^^^^>v>^v^>v^^v^>>^v>^vv>^^v^<<<<>^<><^<^<<^^>v^^^v<>>vvv<v>>vv><v<v>^<^v>>^v<vv^<<v<vv><^^v^v>v<>^v<<<^^v^^^<^v>v^v^v>><vvv<<>v<>^v>vv^v>vv<<^v<v>^v>v>><^v<v<>v>>>><<<><vv><>^v^<^vvv>v<>><^v>^>><v>vv<><><>v><>>><^>vv>>^<>v^>>^><<<^><<>^v^>>><><>vv>^<>^>^v^^><^>>><<>v^<^vv>^<^vv>><v<>vv<v><><<^><>v<^^<^>vv^^^^vv<<v><>vv<><v>v<>>>>^><v><>^<><>v<>><<>^^vvv>^^^<><>>vvv^v>><>vv<vv>^^^v^<<>^^v<><<^^v<>^^>^<^^v>>v^v^^>>v>>>^<<^<>^>^^v>>>><vv<<>^v<<vv><<^^vv><^>vv<>>v<v>v^>v>>v^<vv<<<v><v^>vvv^^>vv^<<v>v^>>v^<>>><><<^^<^v>^>>>v>v>^v<>vv><vv<vvv<<v>v>^v<<<>><<><><>v^>>>v^>v^>>vv^^<v>^<>>><^>v^<>^^><v>v<><<<><v^v<<<v<v^>v^v>^>v<^<>v>v^^>>v>vv^v<>>^^^^<>v^>>>>>>>><v<^<<vvv<^v^>^v<^<<>>><<<^<<^>^>v^<>^<<<>v>><^vv^>^>^>>>^<vv><v^^^<v^<v<><v^vvv<>v<vvv^vv<<<v^<^<^vvvv^<<vv<^v><<>^>^<v^v^<^>v^><>>v^>v^>^>>v<>vv^v<<>^^>>vv<>vv>>^v<^vv>^v>v<v^vvv^<<^><>v^<><vv><>v^^><<<><>^>^v^<>><vv<^>v^v>v<>><v<<^>^<vv<^v>^<<v><^<^^vv^<>><v^>^vv^<>>^^^^v>v><^^^v^<<<>^<^<<>><>>v<<^v^>><><v^>>^vv^v>vv>>>>>>^^<<>v^>v^v>^^>>><vv^^^v>^v>>^^^<>><>v^<<<v<vv^^<v^<<<>v>v^^^<vv<>>^v>^v<^<<><>vv>^^^<^^vv<v<<vv>^^>vv>v<<^>^vv><^><v>^^^^v<<vv>v^<<^^>>^^vvvv^v^>vv>>v^<v>vvv<>>^><>>v^^>>^<>>vvvv^>><v^v<^^<^vv>>v<<^<<^><v^^><v^>v^>><<<v>v>v^>^v<v^vv<^^^v<^<vvvvv<<vvv>><>v<v<v<<^v<><<>vv>><v>><^>>^^v>^>><>vv^><<>>vv<<<^<^^>^<<^>>>><v<^v<<<>>v>vv<^>^v><>>v<v^v<>v^vvvv>v^>>v><<^<v>^^v>>vv^^>v>^v>^v^^>^<^vv<v<<^>vv<<^>>^<<^^>>^<^>v^><^vv>^^v><v^>>><>v^v>^v<^><<<>vv><v>v<><>>v^<>^^>^<>^<<^>>vv^><^<v<^^vvv>>v^>>v^>v>vv><>>v<^>><<<v<<vv><v<v<v>v<v>vv^vvv^vv^>^>v><vv<v^^<>>>>vv^>^<>v<^>^<^v>vv<^<<>>^<^<vv><^^<>^<<v^v^>v<<><v>v>><^v<<^vvv>v>v<<^^<^^>v<vv<v<v^v>^^^>^>vv<v<<^^v^<v<^>^^^vv>v<>>>vv>><><^><><<<vvv<<^^v^<v^<<^>>vv>vv^v^>>><v><<v^v>>v>>vv>^^vvv^>^^>^>^>^v<<^vv^>vvv^^vv><^>^v^>^><>v<^^vv<v><v^<><^<>><v>^^v^v>v^vv<>><^v>^<^v>^<>^v>>>><<vv^^^vv^>>><vv^v>>v><^v^vv><<^v<<>^^<v><^v>vvv<><^^><<^v><>^<^v<^^<^vvvv^^>>>>vv>v>>>v<v^><<<<v>>v^><v>>vv^v<vv<>vv<>vvv>>>><>>><>^v<v^v><vvv<<v^^v^v<>>><>>^vv<<v<><<vv<v^>^^vv><^v^v<v^vvv^v>v^^^vv>^><^vvv<<>^vvv^<v<v^v>>>>^<<<><<<<<^v<^^>>>>^>^<v^^^v<vvv<vv^<>v<<<^<^>>v^<v><<><<^^vvv^>v<>>^^>v>^v>>v<v><v>>>>^<^<^>v^v<vv<>^>><>^<<^vvv^^<>^<vvv<>v^>^^<<^>^vv><vvv>>v^v^>v><v>^<^^<>^>^>>>^^vvv^<<>v^<<>><>v<^<^>v^>^vv><v<^<<<^v>^>>^<^v^<<<<^v^><v^v>v^><<v<><<v^<<^<<v<<v><v><><^^^^>v>^^<v>>v<vvv<<<>><>>^><<><^<>>^^>vv<^><^v^><vvv>>>vvv<<vv^<^^^<^>^<>>^>>^v^<^^v>^<v<<>^^v<^vv^><vvv>>^v><<^<v^<><><>>^>vv<<>^^^v^^<v<>><>>vv>v^>vvv^^v<vv<^<^>>^>>^>>v^<<<v^>v^<^v^vv^><^<^v<<v<<>v>^v^<<<v^vv<v<<>^^<v>>>^<v<^>^^v<v>>>><vv<^^<<>><<v<v>^^v^>>^^>>^v^<^v>v^v^v^v^>v^vv<><>^^<>^><^^^<<<^<v>v<<>^<^^^^^v^<^<<^^>^vv<>v^>><>>^>v>v<>^>v<v^>>><>^<><v>>>^>^>>v^><v<>v><^vv^>v<<v>v<><<vv<<v>^><^<v^>v<<v^v<<><v><>v<v><>^^<v<>><<>v>vv<<v>^v<v>vv><><>vv^<<>^>^<^>>>^v>v<^v^^^vv<>>>^<<^>>><<^^v^>v^<^v>vvv>v^^vv>^^>>v<>^<<>^<><^^v^>><>^>v>>^^^<<^^v<>^^>^<>^>><^>^vvv><^>^<^>^>>vv<^>>^v>>^<>>^^>>>v^<v>>v<<v<^>>v^^vv>v><^v^^><vv^v<^>v<<>v^^<><>^>vvv><^^^>^v^>v>>^vvv<^vv>^^>^>>v<>><<^v<<v^>^><>vv^<<^^vv><v>>^<^><^<v>^v<v>^<<>^v^^>v^>>^^^<^vv>v^>>>vv<<>v>>>^>v^^<v^v^^v^>>v<v<<v>^<<>>vv<<^v>v<<vv<<^<^v<^<><^^>v>>v>v^>><vv<^v<^>^>>v>^><<^<<>^v<v>>><^^<^<<<v^^>^>vv<<>^<>^<v^<<^v>vv>^^^v<^v><v<<<<<vv>vv>^^^^>v>v><<^<<<^vv><^<<<><v>><v^v>v<<v^^<v^>v>^v^v^<^<^vv>vvv<^^v<>v<<<<>v<v^<vvv^^^<<^<^<<>^<<><<<>v<^>^^v<^^v^>vv>vvv>v><v^^<<>>^><^>>v<<vv>v<<^^^v<<^v^^><><<<><<>v>^<<>v<<<^v>><v^v<^v<v^vv>v>><<^<><^v^^v<v>^>^>vvvv<<><<>>^<vv>^^><v<>v>v<v^^>^><>>><^><<><<<^<>v^><vv^^^^>>^v^>v^<>>v>^^><^<^v^<v^>>v>^vvv<>>v<v^v><>^vvvv<v^<<v^<<^^vv>><<<<<<v><<<v<v^v^^<v^^<>v<<<<^v<<><<v^<^><v<vv<v^v^<v^^vv<v^v<<<>^<<>vv<v<^>^<<><vv<<vv<v<^<^<>><^^<<>>>vv>>>>>>^v<v<>>v^v^^<v^<<<<>><<^v^^^<>^<vv>>>><>v^v^vvv^>>v>><v^v<<<^v>>^^<<^^vv><<<^^^<<<v><^^>>>>vvv^v<^>^^>v<^<><vv<v<>v>>>^vv<<^<v>^v^>^>^v>v>v^v^>v<<v>><>><v^^<<^>>>><<^v^<>^v<vv><>vvv^>v>v<v<v^>^<><><>^>>><v<<<v^vv><>^>^^<<v^>>v^^>^<v>><>><>v^v^^v>>>>vv>>^v<<^v^<>^>v^^>^^<<vvvvvvv>^<v^<<^<<>><<<^^^v^^^^v<^<>v<^^<>vv^^v^<>^<<^>>v>v<<<^^^^vvv^<^<><>v<<v^<^<>>><<><<<v<v<v><vv>^^<vv<<vv<<<v<^>^^vv<v<>><<>>>^v<<>^>>>v^>v>^^<>^<vv<><^>v>^>>>><>^^>v^^v>^vv^^v^><<<>>v<>v<vv<vv^v^v<^v^<^^><<<><vv^^>^<^<<>v>>>>^<<v>v<v>vv<^><^<v><<^>v>>v><<v<<^v^<>>^>>>^v^v>v^^vv^>^<^^>>^><^vv^^vv^<>>^^^^<^^><><v<>>^>>^><vv^>^vvv<^<<v^^<<<>^><>>>^^<><v<v<><<v^^^^^<^<^<<>><<>>>>^<<>>>^<^v^>><<^>>>^<<v>^>><>^<v>^<><v>^v^^vv<><^>vv^^v^<^^^v^vvv^>><>>v<<vv<>>^<^vvv<<^^><vvv^^<v<>vv^^<<>><v>><^^vvv<<<^>^<><^>vv^><^<<>vv<<v>>vv>v>v^<vv><vv><<>^^^^v^^^^<v>^<<^><><^^v^>v>^>><^><<>v^<v>>>^vvv>>^<^<>^^v^vv^^v><<vv^<>>>v<<<>v>^<>v<<>v^>^<<><<><v<v<v<>v^>v<><^^>^<^v^^><^>vv>^>vv<v<^v>vv>^^><<>vv^>^v<<^<<^<<>v<v<^<v>v>>^><v^^v^v>>>><v^v^<<<vv<<^^<>>v^v<^v>v>^^^v<v><v^^^vv<>v^v<^<>v><><v^<>>vv>v><>v>^v<><<<<<<v<>>v^vv<<<<v<<v><^<>^>><>^^vv>^<^<<>vv>>vv<vvv>><><v<>><^<v>^><^<<v>><v><v>^<v>><>v^^^^v<v^^v<>^^vv<>v<>v>^vv^><v^<<^<>^<>^^^>v^>>>v><<^>>v<^v<>^^<v<><v^v<v>v<><v<vv><<>v<^<^>v<>v^>v>^^<<<^^vv^<><<<>>v>^^<>v>>>><v<v<^^^v<v<v^><<>v^v<>v>><<<<v^<><^<<^>^<vvv<v^^v>>v^vv^><^v^^<>^^><<v^>>vv>^<v^vv<^^v<>>vvv<^v^>>^<v<v>>^>^^<<^>^>^v><>>^<^^v>^>>^^<><>>>^^>^^vvv>v<^^<>v^v^^<v<<^<v^v^<<>v^v<v<<v<>>><<^^^>>v>^vv>^>^^v<>^^<>v^^<><v<v<vvv^<vv<<>v^><<><v<>vv<<^vvvv><<<v>v>v^>v^<>v^>^<v<vvv^>^<>^>^^v<>><<<><v<^^>^v<v>^^v^v<<<^v^<>^<>v>^^>v<v<v>v>^^<<<><<^>v<v<^vv^v><^^<<vv>^<<v><>^>>>>><v^v<<<^>^v^v<<v<>vvv<<>v>v>>^v^v^>><<<<>v^<v<><<>>>^>>^>><<v>'
t = []
x1, y1, x2, y2 = 0, 0, 0, 0
for i in range(len(path)):
if i%2 == 0:
if path[i] == '^': x2+=1
if path[i] == 'v': x2-=1
if path[i] == '>': y2+=1
if path[i] == '<': y2-=1
t.append(str(x2)+'/'+str(y2))
if i%2 == 1:
if path[i] == '^': x1+=1
if path[i] == 'v': x1-=1
if path[i] == '>': y1+=1
if path[i] == '<': y1-=1
t.append(str(x1)+'/'+str(y1))
print(len(set(t))) | 480.944444 | 8,201 | 0.258866 | path = '^^<<v<<v><v^^<><>^^<v<v^>>^^^><^>v^>v><><><<vv^^<^>^^<v^>v>v^v>>>^<>v<^<v^><^>>>>><<v>>^>>^>v^>><<^>v>v<>^v^v^vvv><>^^>v><v<><>^><^^<vv^v<v>^v>>^v^>v><>v^<vv>^><<v^>vv^<<>v>>><<<>>^<vv<^<>^^vv>>>^><<<<vv^v^>>><><^>v<>^>v<v^v<^vv><^v^><<<<>^<>v>^v>v<v<v<<>v<^<<<v>>>>>^^v>vv^^<>^<>^^^^<^^^v<v^^>v<^^v^^>v>^v^^^^>><<v<>v<>^v^<v<>><>^^><<^^<^^>vv<>v^<^v<vv<<<>^>^^>^<>v^^vv<>>v><<<>vvv<>v<>><^<^v<>^vv>^^v<v<v><^<>>vv<^>>^>>vv^v<vv^vv<^<<>>^v^<>^>>>>vv>^^>v>vv>v><^vv^<<v>^<<^^<v<v>vv<v^^<>^^v>^>>v><^<<vv<<v^vv^^^v>>v<<v^><vv^><vv<^vv<<vv^v<<^v<^^v>><<v^>>^^<>v>><<v<>>^^<v>>^^>>vvv^><<<<<^<^vv<^<><v<<>^^^<<<^>^^^<v<<vv>vv<>^<>v<^v>^<<<v<v<v>>^v<>>v<<^<<v<<>^<<<><><>^>>>>^>v^v<<v<v<<>>vv<^vvv^^^^<vv>vv>^v^^v^<v^v><^vv<^vv>v<^>vv<>>^>^><vv<><^>v>^v>vvv<>^>^v<><>vv>><^v^<><><v>>v^v^><^<^>vv>v<^>vvv>v<<<<<^<v<<vv<^^^<<>>^v<vv<^<>v>^<v<>><><>^<<v>v^>^<vv>><><>>^>^>><^<v>^^>^^>^^v^^<^v^^>v^^>>><<><v<v<<v^vv<><><>^<v>^<<^^v^>v>><>^^^><^vvv<^^^^^v><<><v<^^v><><>>^>vv<vvvv<<>>><v<^^^^v<<^><v>^vv<v^^v^vv<^^>^^<v>><<v^>v<^^>^<^<v<^^v>^<<v>^>>>^v<>v<^^^>vvv^v<<^><>>><vvv^<^^^<^>>v>>><v>^^vvv^vvv<^^^^v^v^<vv^<v>^<<^>v^v^<<><>><^v><v<><<>><<<>^v>v<>^<v^v>^vv>>^<>v^^<<v><^v>>v<>>^v^^>><^>v^<^v^^>><>v^>^v^v<<<v^<v^^v<^>v<><>vv>>>>^>v<>v<<<>^^>vv^v<><v^<>^<<<<>>^^>^v<v^v<<><>^v<>>^v^<<^<^>>>^vv<><v<^^<>v^>>v<^^v<v>>>^>><<><<<>><vv<v>>^v>><^<v><vv>^vv<v<>>><>v^><>vv<^^v^^^v<>><^vvv<<^<>v>>>v>><v><>>><>><v^><v^v<v>^v>v<v>>^^<^>^>v><>vv>^v><<>>>>>>>^<<^vv^^vvvv<^^><<<v<<>vvv<>^><<v<v^v^<<v>v<>>^<vv^<v<v>^<<^^vv>v>^<vv<<>v<v^<>v>>^v^^vvvv>^^>>v^v^^><<^>v>>^^>^<^^<>v<v>vv^vv>v<v>>^v<><^vv^<vv<v^^^v<^v^>>^v>>>^^<^<^>^v^>^>>>^v>^>^^^>>^<>v^^<>^v<<^^>^^<vv<>v<^v^>><^v^>^<>>^vv^vv^>v^<vvvvvv^>><^^<^v<^<v^<<^^<<v^<^>><>v><^v^v^^^v>v^<>^<<v<^^vvv<v>^^>^v^^<><vv^v^>v^<<>>vv<>>>>v>v<>^>>>v<>^^><v<v^^^<>^<^><>^><<v>><>^<<>>><<^<vvv<^><v>>^vv^v>><v<>vv^<<^^<<><v><<^<v<vv<<^v^vv>v^>>>v<<<<v<<>v>^vv<^v><v<v>v<^>^^vv>v><v>><<v<<v^v>>><>^<>><><<^<<^v^v<<v>v>v<v<^^>vv<^v^^^<v^<<<v<>v^><^v>^<^<v>>^<<<v>>v^<><>>^v<>vvv<vvvvv<^^><^>><^^>^>^v^vv<^><<^v>><^^v>^v<>^>vvvv><^>^<<v^^vv<v^^<><>v>^>>^<^<<<^v^^^>^>>^>><><<^>v^^<v>>v<<<<vvv<vvvv^<^<v^^<>^>vvv^<vv^v^v>^<<><v><^v^v^^^>^^>^vv<>v>>v^>vv^vv>v<^v^^>>^v^v<>>^^><<v<<>><>>>^>^<>^^v^^><^<>><<^<vv^^^^^>>vv^<v^<^>>>>v<<><<^>vv>vvv>^<><>>>>vv><<v^v<^^^<<^^^vv^<v<><><<<<>><<v^<>v>v^><>v^v^^><>v>v>^^v<^v<>>^^^^^<v>><v^>^^<v>><v^^>v<^<^>>>^><^^>><<>>^><>^^^>v^^^>^^v^<>^^><^>>><><^>>v<v^>v<^><v<v^<>v<^v>v^<^vv^^><<<><><^v^<v<^^>v>v^>>^^vv^<v>^v>^<^v<>^>^><^<v>^v><^<^<>v^^>^><>>><<v><<><>v<<^v^^<^><>^<><><v>v<^^<v<v>>^^<<>>^<v>><^><^<^>^^v<>v>>><><<>^>v><><<<<v^^^^v<>>^^^v>><<^v>^>>><vv^>>^vv<^<>>^<^^<^v>v<v<<<<<>^<<^<<<<<^<^>>^><<>><>v^v>^<^>v^<><vvv^>^v^v^v><^<v<>vv<<^<>^^^<>^v>^<v^^<v^v>v<>>^>v<<>v<>v^v>v<<<>>v>vv>>v<<>v<>v<^>^>^<v>>v>^>^^^<vv>v<<>>><v>^vvv^^>^^<^vv^^^^>v>^v^>v^^v^>>^v>^vv>^^v^<<<<>^<><^<^<<^^>v^^^v<>>vvv<v>>vv><v<v>^<^v>>^v<vv^<<v<vv><^^v^v>v<>^v<<<^^v^^^<^v>v^v^v>><vvv<<>v<>^v>vv^v>vv<<^v<v>^v>v>><^v<v<>v>>>><<<><vv><>^v^<^vvv>v<>><^v>^>><v>vv<><><>v><>>><^>vv>>^<>v^>>^><<<^><<>^v^>>><><>vv>^<>^>^v^^><^>>><<>v^<^vv>^<^vv>><v<>vv<v><><<^><>v<^^<^>vv^^^^vv<<v><>vv<><v>v<>>>>^><v><>^<><>v<>><<>^^vvv>^^^<><>>vvv^v>><>vv<vv>^^^v^<<>^^v<><<^^v<>^^>^<^^v>>v^v^^>>v>>>^<<^<>^>^^v>>>><vv<<>^v<<vv><<^^vv><^>vv<>>v<v>v^>v>>v^<vv<<<v><v^>vvv^^>vv^<<v>v^>>v^<>>><><<^^<^v>^>>>v>v>^v<>vv><vv<vvv<<v>v>^v<<<>><<><><>v^>>>v^>v^>>vv^^<v>^<>>><^>v^<>^^><v>v<><<<><v^v<<<v<v^>v^v>^>v<^<>v>v^^>>v>vv^v<>>^^^^<>v^>>>>>>>><v<^<<vvv<^v^>^v<^<<>>><<<^<<^>^>v^<>^<<<>v>><^vv^>^>^>>>^<vv><v^^^<v^<v<><v^vvv<>v<vvv^vv<<<v^<^<^vvvv^<<vv<^v><<>^>^<v^v^<^>v^><>>v^>v^>^>>v<>vv^v<<>^^>>vv<>vv>>^v<^vv>^v>v<v^vvv^<<^><>v^<><vv><>v^^><<<><>^>^v^<>><vv<^>v^v>v<>><v<<^>^<vv<^v>^<<v><^<^^vv^<>><v^>^vv^<>>^^^^v>v><^^^v^<<<>^<^<<>><>>v<<^v^>><><v^>>^vv^v>vv>>>>>>^^<<>v^>v^v>^^>>><vv^^^v>^v>>^^^<>><>v^<<<v<vv^^<v^<<<>v>v^^^<vv<>>^v>^v<^<<><>vv>^^^<^^vv<v<<vv>^^>vv>v<<^>^vv><^><v>^^^^v<<vv>v^<<^^>>^^vvvv^v^>vv>>v^<v>vvv<>>^><>>v^^>>^<>>vvvv^>><v^v<^^<^vv>>v<<^<<^><v^^><v^>v^>><<<v>v>v^>^v<v^vv<^^^v<^<vvvvv<<vvv>><>v<v<v<<^v<><<>vv>><v>><^>>^^v>^>><>vv^><<>>vv<<<^<^^>^<<^>>>><v<^v<<<>>v>vv<^>^v><>>v<v^v<>v^vvvv>v^>>v><<^<v>^^v>>vv^^>v>^v>^v^^>^<^vv<v<<^>vv<<^>>^<<^^>>^<^>v^><^vv>^^v><v^>>><>v^v>^v<^><<<>vv><v>v<><>>v^<>^^>^<>^<<^>>vv^><^<v<^^vvv>>v^>>v^>v>vv><>>v<^>><<<v<<vv><v<v<v>v<v>vv^vvv^vv^>^>v><vv<v^^<>>>>vv^>^<>v<^>^<^v>vv<^<<>>^<^<vv><^^<>^<<v^v^>v<<><v>v>><^v<<^vvv>v>v<<^^<^^>v<vv<v<v^v>^^^>^>vv<v<<^^v^<v<^>^^^vv>v<>>>vv>><><^><><<<vvv<<^^v^<v^<<^>>vv>vv^v^>>><v><<v^v>>v>>vv>^^vvv^>^^>^>^>^v<<^vv^>vvv^^vv><^>^v^>^><>v<^^vv<v><v^<><^<>><v>^^v^v>v^vv<>><^v>^<^v>^<>^v>>>><<vv^^^vv^>>><vv^v>>v><^v^vv><<^v<<>^^<v><^v>vvv<><^^><<^v><>^<^v<^^<^vvvv^^>>>>vv>v>>>v<v^><<<<v>>v^><v>>vv^v<vv<>vv<>vvv>>>><>>><>^v<v^v><vvv<<v^^v^v<>>><>>^vv<<v<><<vv<v^>^^vv><^v^v<v^vvv^v>v^^^vv>^><^vvv<<>^vvv^<v<v^v>>>>^<<<><<<<<^v<^^>>>>^>^<v^^^v<vvv<vv^<>v<<<^<^>>v^<v><<><<^^vvv^>v<>>^^>v>^v>>v<v><v>>>>^<^<^>v^v<vv<>^>><>^<<^vvv^^<>^<vvv<>v^>^^<<^>^vv><vvv>>v^v^>v><v>^<^^<>^>^>>>^^vvv^<<>v^<<>><>v<^<^>v^>^vv><v<^<<<^v>^>>^<^v^<<<<^v^><v^v>v^><<v<><<v^<<^<<v<<v><v><><^^^^>v>^^<v>>v<vvv<<<>><>>^><<><^<>>^^>vv<^><^v^><vvv>>>vvv<<vv^<^^^<^>^<>>^>>^v^<^^v>^<v<<>^^v<^vv^><vvv>>^v><<^<v^<><><>>^>vv<<>^^^v^^<v<>><>>vv>v^>vvv^^v<vv<^<^>>^>>^>>v^<<<v^>v^<^v^vv^><^<^v<<v<<>v>^v^<<<v^vv<v<<>^^<v>>>^<v<^>^^v<v>>>><vv<^^<<>><<v<v>^^v^>>^^>>^v^<^v>v^v^v^v^>v^vv<><>^^<>^><^^^<<<^<v>v<<>^<^^^^^v^<^<<^^>^vv<>v^>><>>^>v>v<>^>v<v^>>><>^<><v>>>^>^>>v^><v<>v><^vv^>v<<v>v<><<vv<<v>^><^<v^>v<<v^v<<><v><>v<v><>^^<v<>><<>v>vv<<v>^v<v>vv><><>vv^<<>^>^<^>>>^v>v<^v^^^vv<>>>^<<^>>><<^^v^>v^<^v>vvv>v^^vv>^^>>v<>^<<>^<><^^v^>><>^>v>>^^^<<^^v<>^^>^<>^>><^>^vvv><^>^<^>^>>vv<^>>^v>>^<>>^^>>>v^<v>>v<<v<^>>v^^vv>v><^v^^><vv^v<^>v<<>v^^<><>^>vvv><^^^>^v^>v>>^vvv<^vv>^^>^>>v<>><<^v<<v^>^><>vv^<<^^vv><v>>^<^><^<v>^v<v>^<<>^v^^>v^>>^^^<^vv>v^>>>vv<<>v>>>^>v^^<v^v^^v^>>v<v<<v>^<<>>vv<<^v>v<<vv<<^<^v<^<><^^>v>>v>v^>><vv<^v<^>^>>v>^><<^<<>^v<v>>><^^<^<<<v^^>^>vv<<>^<>^<v^<<^v>vv>^^^v<^v><v<<<<<vv>vv>^^^^>v>v><<^<<<^vv><^<<<><v>><v^v>v<<v^^<v^>v>^v^v^<^<^vv>vvv<^^v<>v<<<<>v<v^<vvv^^^<<^<^<<>^<<><<<>v<^>^^v<^^v^>vv>vvv>v><v^^<<>>^><^>>v<<vv>v<<^^^v<<^v^^><><<<><<>v>^<<>v<<<^v>><v^v<^v<v^vv>v>><<^<><^v^^v<v>^>^>vvvv<<><<>>^<vv>^^><v<>v>v<v^^>^><>>><^><<><<<^<>v^><vv^^^^>>^v^>v^<>>v>^^><^<^v^<v^>>v>^vvv<>>v<v^v><>^vvvv<v^<<v^<<^^vv>><<<<<<v><<<v<v^v^^<v^^<>v<<<<^v<<><<v^<^><v<vv<v^v^<v^^vv<v^v<<<>^<<>vv<v<^>^<<><vv<<vv<v<^<^<>><^^<<>>>vv>>>>>>^v<v<>>v^v^^<v^<<<<>><<^v^^^<>^<vv>>>><>v^v^vvv^>>v>><v^v<<<^v>>^^<<^^vv><<<^^^<<<v><^^>>>>vvv^v<^>^^>v<^<><vv<v<>v>>>^vv<<^<v>^v^>^>^v>v>v^v^>v<<v>><>><v^^<<^>>>><<^v^<>^v<vv><>vvv^>v>v<v<v^>^<><><>^>>><v<<<v^vv><>^>^^<<v^>>v^^>^<v>><>><>v^v^^v>>>>vv>>^v<<^v^<>^>v^^>^^<<vvvvvvv>^<v^<<^<<>><<<^^^v^^^^v<^<>v<^^<>vv^^v^<>^<<^>>v>v<<<^^^^vvv^<^<><>v<<v^<^<>>><<><<<v<v<v><vv>^^<vv<<vv<<<v<^>^^vv<v<>><<>>>^v<<>^>>>v^>v>^^<>^<vv<><^>v>^>>>><>^^>v^^v>^vv^^v^><<<>>v<>v<vv<vv^v^v<^v^<^^><<<><vv^^>^<^<<>v>>>>^<<v>v<v>vv<^><^<v><<^>v>>v><<v<<^v^<>>^>>>^v^v>v^^vv^>^<^^>>^><^vv^^vv^<>>^^^^<^^><><v<>>^>>^><vv^>^vvv<^<<v^^<<<>^><>>>^^<><v<v<><<v^^^^^<^<^<<>><<>>>>^<<>>>^<^v^>><<^>>>^<<v>^>><>^<v>^<><v>^v^^vv<><^>vv^^v^<^^^v^vvv^>><>>v<<vv<>>^<^vvv<<^^><vvv^^<v<>vv^^<<>><v>><^^vvv<<<^>^<><^>vv^><^<<>vv<<v>>vv>v>v^<vv><vv><<>^^^^v^^^^<v>^<<^><><^^v^>v>^>><^><<>v^<v>>>^vvv>>^<^<>^^v^vv^^v><<vv^<>>>v<<<>v>^<>v<<>v^>^<<><<><v<v<v<>v^>v<><^^>^<^v^^><^>vv>^>vv<v<^v>vv>^^><<>vv^>^v<<^<<^<<>v<v<^<v>v>>^><v^^v^v>>>><v^v^<<<vv<<^^<>>v^v<^v>v>^^^v<v><v^^^vv<>v^v<^<>v><><v^<>>vv>v><>v>^v<><<<<<<v<>>v^vv<<<<v<<v><^<>^>><>^^vv>^<^<<>vv>>vv<vvv>><><v<>><^<v>^><^<<v>><v><v>^<v>><>v^^^^v<v^^v<>^^vv<>v<>v>^vv^><v^<<^<>^<>^^^>v^>>>v><<^>>v<^v<>^^<v<><v^v<v>v<><v<vv><<>v<^<^>v<>v^>v>^^<<<^^vv^<><<<>>v>^^<>v>>>><v<v<^^^v<v<v^><<>v^v<>v>><<<<v^<><^<<^>^<vvv<v^^v>>v^vv^><^v^^<>^^><<v^>>vv>^<v^vv<^^v<>>vvv<^v^>>^<v<v>>^>^^<<^>^>^v><>>^<^^v>^>>^^<><>>>^^>^^vvv>v<^^<>v^v^^<v<<^<v^v^<<>v^v<v<<v<>>><<^^^>>v>^vv>^>^^v<>^^<>v^^<><v<v<vvv^<vv<<>v^><<><v<>vv<<^vvvv><<<v>v>v^>v^<>v^>^<v<vvv^>^<>^>^^v<>><<<><v<^^>^v<v>^^v^v<<<^v^<>^<>v>^^>v<v<v>v>^^<<<><<^>v<v<^vv^v><^^<<vv>^<<v><>^>>>>><v^v<<<^>^v^v<<v<>vvv<<>v>v>>^v^v^>><<<<>v^<v<><<>>>^>>^>><<v>'
t = []
x1, y1, x2, y2 = 0, 0, 0, 0
for i in range(len(path)):
if i%2 == 0:
if path[i] == '^': x2+=1
if path[i] == 'v': x2-=1
if path[i] == '>': y2+=1
if path[i] == '<': y2-=1
t.append(str(x2)+'/'+str(y2))
if i%2 == 1:
if path[i] == '^': x1+=1
if path[i] == 'v': x1-=1
if path[i] == '>': y1+=1
if path[i] == '<': y1-=1
t.append(str(x1)+'/'+str(y1))
print(len(set(t))) | true | true |
1c2fc07b459c8bfbe90b2101897c68347d65049c | 2,950 | py | Python | pyreaclib/amemass/ame_nuclide.py | jennranta/pyreaclib | bd9210153b0c01c7ce230b43b88f0a5a1e198c0f | [
"BSD-3-Clause"
] | null | null | null | pyreaclib/amemass/ame_nuclide.py | jennranta/pyreaclib | bd9210153b0c01c7ce230b43b88f0a5a1e198c0f | [
"BSD-3-Clause"
] | null | null | null | pyreaclib/amemass/ame_nuclide.py | jennranta/pyreaclib | bd9210153b0c01c7ce230b43b88f0a5a1e198c0f | [
"BSD-3-Clause"
] | null | null | null | # Common Imports
from __future__ import print_function
class AMENuclide(object):
def __init__(self, n=None, z=None, a=None, element=None, origin=None,
mexcess=None, d_mexcess=None, nucbind=None,
d_nucbind=None, decay_type=None, ebeta=None,
d_ebeta=None, mass=None, d_mass=None):
self.n=None
self.z=None
self.a=None
self.element=None
self.origin=None
self.mexcess=None
self.d_mexcess=None
self.nucbind=None
self.d_nucbind=None
self.decay_type=None
self.ebeta=None
self.d_ebeta=None
self.mass=None
self.d_mass=None
if n:
self.n = int(n)
if z:
self.z = int(z)
if a:
self.a = int(a)
self.element = element
self.origin = origin
if mexcess:
self.mexcess = float(mexcess)
if d_mexcess:
self.d_mexcess = float(d_mexcess)
if nucbind:
self.nucbind = float(nucbind)
if d_nucbind:
self.d_nucbind = float(d_nucbind)
self.decay_type = decay_type
if ebeta:
self.ebeta = float(ebeta)
if d_ebeta:
self.d_ebeta = float(d_ebeta)
if mass:
self.mass = float(mass)
if d_mass:
self.d_mass = float(d_mass)
self.convert_MeV()
self.convert_amu()
def print_contents(self):
"""
Print Contents
"""
print('n = {}'.format(self.n))
print('z = {}'.format(self.z))
print('a = {}'.format(self.a))
print('element = {}'.format(self.element))
print('origin = {}'.format(self.origin))
print('mexcess = {}'.format(self.mexcess))
print('d_mexcess = {}'.format(self.d_mexcess))
print('nucbind = {}'.format(self.nucbind))
print('d_nucbind = {}'.format(self.d_nucbind))
print('decay_type = {}'.format(self.decay_type))
print('ebeta = {}'.format(self.ebeta))
print('d_ebeta = {}'.format(self.d_ebeta))
print('mass = {}'.format(self.mass))
print('d_mass = {}'.format(self.d_mass))
def convert_MeV(self):
"""
Convert keV to MeV
"""
if self.mexcess:
self.mexcess = self.mexcess/1.0e3
if self.d_mexcess:
self.d_mexcess = self.d_mexcess/1.0e3
if self.nucbind:
self.nucbind = self.nucbind/1.0e3
if self.d_nucbind:
self.d_nucbind = self.d_nucbind/1.0e3
if self.ebeta:
self.ebeta = self.ebeta/1.0e3
if self.d_ebeta:
self.d_ebeta = self.d_ebeta/1.0e3
def convert_amu(self):
"""
Convert micro-amu to amu
"""
if self.mass:
self.mass = self.mass/1.0e6
if self.d_mass:
self.d_mass = self.d_mass/1.0e6
| 29.79798 | 73 | 0.530847 |
from __future__ import print_function
class AMENuclide(object):
def __init__(self, n=None, z=None, a=None, element=None, origin=None,
mexcess=None, d_mexcess=None, nucbind=None,
d_nucbind=None, decay_type=None, ebeta=None,
d_ebeta=None, mass=None, d_mass=None):
self.n=None
self.z=None
self.a=None
self.element=None
self.origin=None
self.mexcess=None
self.d_mexcess=None
self.nucbind=None
self.d_nucbind=None
self.decay_type=None
self.ebeta=None
self.d_ebeta=None
self.mass=None
self.d_mass=None
if n:
self.n = int(n)
if z:
self.z = int(z)
if a:
self.a = int(a)
self.element = element
self.origin = origin
if mexcess:
self.mexcess = float(mexcess)
if d_mexcess:
self.d_mexcess = float(d_mexcess)
if nucbind:
self.nucbind = float(nucbind)
if d_nucbind:
self.d_nucbind = float(d_nucbind)
self.decay_type = decay_type
if ebeta:
self.ebeta = float(ebeta)
if d_ebeta:
self.d_ebeta = float(d_ebeta)
if mass:
self.mass = float(mass)
if d_mass:
self.d_mass = float(d_mass)
self.convert_MeV()
self.convert_amu()
def print_contents(self):
print('n = {}'.format(self.n))
print('z = {}'.format(self.z))
print('a = {}'.format(self.a))
print('element = {}'.format(self.element))
print('origin = {}'.format(self.origin))
print('mexcess = {}'.format(self.mexcess))
print('d_mexcess = {}'.format(self.d_mexcess))
print('nucbind = {}'.format(self.nucbind))
print('d_nucbind = {}'.format(self.d_nucbind))
print('decay_type = {}'.format(self.decay_type))
print('ebeta = {}'.format(self.ebeta))
print('d_ebeta = {}'.format(self.d_ebeta))
print('mass = {}'.format(self.mass))
print('d_mass = {}'.format(self.d_mass))
def convert_MeV(self):
if self.mexcess:
self.mexcess = self.mexcess/1.0e3
if self.d_mexcess:
self.d_mexcess = self.d_mexcess/1.0e3
if self.nucbind:
self.nucbind = self.nucbind/1.0e3
if self.d_nucbind:
self.d_nucbind = self.d_nucbind/1.0e3
if self.ebeta:
self.ebeta = self.ebeta/1.0e3
if self.d_ebeta:
self.d_ebeta = self.d_ebeta/1.0e3
def convert_amu(self):
if self.mass:
self.mass = self.mass/1.0e6
if self.d_mass:
self.d_mass = self.d_mass/1.0e6
| true | true |
1c2fc0eaac66c2170fc9e708497eb4ba1c41f83a | 2,412 | py | Python | astropy/constants/utils.py | MatiasRepetto/astropy | 689f9d3b063145150149e592a879ee40af1fac06 | [
"BSD-3-Clause"
] | 1 | 2019-03-11T12:26:49.000Z | 2019-03-11T12:26:49.000Z | astropy/constants/utils.py | MatiasRepetto/astropy | 689f9d3b063145150149e592a879ee40af1fac06 | [
"BSD-3-Clause"
] | 1 | 2019-10-09T18:54:27.000Z | 2019-10-09T18:54:27.000Z | astropy/constants/utils.py | MatiasRepetto/astropy | 689f9d3b063145150149e592a879ee40af1fac06 | [
"BSD-3-Clause"
] | 1 | 2020-02-18T04:10:00.000Z | 2020-02-18T04:10:00.000Z | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Utility functions for ``constants`` sub-package."""
import itertools
__all__ = []
def _get_c(codata, iaudata, module, not_in_module_only=True):
"""
Generator to return a Constant object.
Parameters
----------
codata, iaudata : obj
Modules containing CODATA and IAU constants of interest.
module : obj
Namespace module of interest.
not_in_module_only : bool
If ``True``, ignore constants that are already in the
namespace of ``module``.
Returns
-------
_c : Constant
Constant object to process.
"""
from .constant import Constant
for _nm, _c in itertools.chain(sorted(vars(codata).items()),
sorted(vars(iaudata).items())):
if not isinstance(_c, Constant):
continue
elif (not not_in_module_only) or (_c.abbrev not in module.__dict__):
yield _c
def _set_c(codata, iaudata, module, not_in_module_only=True, doclines=None,
set_class=False):
"""
Set constants in a given module namespace.
Parameters
----------
codata, iaudata : obj
Modules containing CODATA and IAU constants of interest.
module : obj
Namespace module to modify with the given ``codata`` and ``iaudata``.
not_in_module_only : bool
If ``True``, constants that are already in the namespace
of ``module`` will not be modified.
doclines : list or None
If a list is given, this list will be modified in-place to include
documentation of modified constants. This can be used to update
docstring of ``module``.
set_class : bool
Namespace of ``module`` is populated with ``_c.__class__``
instead of just ``_c`` from :func:`_get_c`.
"""
for _c in _get_c(codata, iaudata, module,
not_in_module_only=not_in_module_only):
if set_class:
value = _c.__class__(_c.abbrev, _c.name, _c.value,
_c._unit_string, _c.uncertainty,
_c.reference)
else:
value = _c
setattr(module, _c.abbrev, value)
if doclines is not None:
doclines.append('{:^10} {:^14.9g} {:^16} {}'.format(
_c.abbrev, _c.value, _c._unit_string, _c.name))
| 29.777778 | 77 | 0.596186 |
import itertools
__all__ = []
def _get_c(codata, iaudata, module, not_in_module_only=True):
from .constant import Constant
for _nm, _c in itertools.chain(sorted(vars(codata).items()),
sorted(vars(iaudata).items())):
if not isinstance(_c, Constant):
continue
elif (not not_in_module_only) or (_c.abbrev not in module.__dict__):
yield _c
def _set_c(codata, iaudata, module, not_in_module_only=True, doclines=None,
set_class=False):
for _c in _get_c(codata, iaudata, module,
not_in_module_only=not_in_module_only):
if set_class:
value = _c.__class__(_c.abbrev, _c.name, _c.value,
_c._unit_string, _c.uncertainty,
_c.reference)
else:
value = _c
setattr(module, _c.abbrev, value)
if doclines is not None:
doclines.append('{:^10} {:^14.9g} {:^16} {}'.format(
_c.abbrev, _c.value, _c._unit_string, _c.name))
| true | true |
1c2fc1bdd77a98dff2241412a763ab542480c417 | 7,213 | py | Python | Lib/site-packages/openpyxl/workbook/defined_name.py | percevalm/aumyproject | b24b38005188ce9dd41ed663cf54dad5464afef3 | [
"bzip2-1.0.6"
] | 1 | 2017-10-31T02:37:37.000Z | 2017-10-31T02:37:37.000Z | Lib/site-packages/openpyxl/workbook/defined_name.py | percevalm/aumyproject | b24b38005188ce9dd41ed663cf54dad5464afef3 | [
"bzip2-1.0.6"
] | 16 | 2020-03-24T17:30:37.000Z | 2022-03-11T23:57:41.000Z | Lib/site-packages/openpyxl/workbook/defined_name.py | percevalm/aumyproject | b24b38005188ce9dd41ed663cf54dad5464afef3 | [
"bzip2-1.0.6"
] | null | null | null | from __future__ import absolute_import
# Copyright (c) 2010-2017 openpyxl
import re
from openpyxl.descriptors.serialisable import Serialisable
from openpyxl.descriptors import (
Alias,
Typed,
String,
Float,
Integer,
Bool,
NoneSet,
Set,
Sequence,
Descriptor,
)
from openpyxl.compat import safe_string
from openpyxl.formula import Tokenizer
from openpyxl.utils.cell import (
SHEETRANGE_RE,
SHEET_TITLE,
)
RESERVED = frozenset(["Print_Area", "Print_Titles", "Criteria",
"_FilterDatabase", "Extract", "Consolidate_Area",
"Sheet_Title"])
_names = "|".join(RESERVED)
RESERVED_REGEX = re.compile(r"^_xlnm\.(?P<name>{0})".format(_names))
COL_RANGE = r"""(?P<cols>[$]?[a-zA-Z]{1,3}:[$]?[a-zA-Z]{1,3})"""
COL_RANGE_RE = re.compile(COL_RANGE)
ROW_RANGE = r"""(?P<rows>[$]?\d+:[$]?\d+)"""
ROW_RANGE_RE = re.compile(ROW_RANGE)
TITLES_REGEX = re.compile("""{0}{1}?,?{2}?""".format(SHEET_TITLE, ROW_RANGE, COL_RANGE),
re.VERBOSE)
### utilities
def _unpack_print_titles(defn):
"""
Extract rows and or columns from print titles so that they can be
assigned to a worksheet
"""
scanner = TITLES_REGEX.finditer(defn.value)
kw = dict((k, v) for match in scanner
for k, v in match.groupdict().items() if v)
return kw.get('rows'), kw.get('cols')
def _unpack_print_area(defn):
"""
Extract print area
"""
new = []
for m in SHEETRANGE_RE.finditer(defn.value): # can be multiple
coord = m.group("cells")
if coord:
new.append(coord)
return new
class DefinedName(Serialisable):
tagname = "definedName"
name = String() # unique per workbook/worksheet
comment = String(allow_none=True)
customMenu = String(allow_none=True)
description = String(allow_none=True)
help = String(allow_none=True)
statusBar = String(allow_none=True)
localSheetId = Integer(allow_none=True)
hidden = Bool(allow_none=True)
function = Bool(allow_none=True)
vbProcedure = Bool(allow_none=True)
xlm = Bool(allow_none=True)
functionGroupId = Integer(allow_none=True)
shortcutKey = String(allow_none=True)
publishToServer = Bool(allow_none=True)
workbookParameter = Bool(allow_none=True)
attr_text = Descriptor()
value = Alias("attr_text")
def __init__(self,
name=None,
comment=None,
customMenu=None,
description=None,
help=None,
statusBar=None,
localSheetId=None,
hidden=None,
function=None,
vbProcedure=None,
xlm=None,
functionGroupId=None,
shortcutKey=None,
publishToServer=None,
workbookParameter=None,
attr_text=None
):
self.name = name
self.comment = comment
self.customMenu = customMenu
self.description = description
self.help = help
self.statusBar = statusBar
self.localSheetId = localSheetId
self.hidden = hidden
self.function = function
self.vbProcedure = vbProcedure
self.xlm = xlm
self.functionGroupId = functionGroupId
self.shortcutKey = shortcutKey
self.publishToServer = publishToServer
self.workbookParameter = workbookParameter
self.attr_text = attr_text
@property
def type(self):
tok = Tokenizer("=" + self.value)
parsed = tok.items[0]
if parsed.type == "OPERAND":
return parsed.subtype
return parsed.type
@property
def destinations(self):
if self.type == "RANGE":
tok = Tokenizer("=" + self.value)
for part in tok.items:
if part.subtype == "RANGE":
m = SHEETRANGE_RE.match(part.value)
sheetname = m.group('notquoted') or m.group('quoted')
yield sheetname, m.group('cells')
@property
def is_reserved(self):
m = RESERVED_REGEX.match(self.name)
if m:
return m.group("name")
@property
def is_external(self):
return re.compile(r"^\[\d+\].*").match(self.value) is not None
def __iter__(self):
for key in self.__attrs__:
if key == "attr_text":
continue
v = getattr(self, key)
if v is not None:
if v in RESERVED:
v = "_xlnm." + v
yield key, safe_string(v)
class DefinedNameList(Serialisable):
tagname = "definedNames"
definedName = Sequence(expected_type=DefinedName)
def __init__(self, definedName=()):
self.definedName = definedName
def _cleanup(self):
"""
Strip broken or unknown definitions
"""
self.delete("_xlnm.Print_Titles")
self.delete("_xlnm.Print_Area")
def _duplicate(self, defn):
"""
Check for whether DefinedName with the same name and scope already
exists
"""
for d in self.definedName:
if d.name == defn.name and d.localSheetId == defn.localSheetId:
return True
def append(self, defn):
if not isinstance(defn, DefinedName):
raise TypeError("""You can only append DefinedNames""")
if self._duplicate(defn):
raise ValueError("""DefinedName with the same name and scope already exists""")
names = self.definedName[:]
names.append(defn)
self.definedName = names
def __len__(self):
return len(self.definedName)
def __contains__(self, name):
"""
See if a globaly defined name exists
"""
for defn in self.definedName:
if defn.name == name and defn.localSheetId is None:
return True
def __getitem__(self, name):
"""
Get globally defined name
"""
defn = self.get(name)
if not defn:
raise KeyError("No definition called {0}".format(name))
return defn
def get(self, name, scope=None):
"""
Get the name assigned to a specicic sheet or global
"""
for defn in self.definedName:
if defn.name == name and defn.localSheetId == scope:
return defn
def __delitem__(self, name):
"""
Delete a globally defined name
"""
if not self.delete(name):
raise KeyError("No globally defined name {0}".format(name))
def delete(self, name, scope=None):
"""
Delete a name assigned to a specific or global
"""
for idx, defn in enumerate(self.definedName):
if defn.name == name and defn.localSheetId == scope:
del self.definedName[idx]
return True
def localnames(self, scope):
"""
Provide a list of all names for a particular worksheet
"""
return [defn.name for defn in self.definedName if defn.localSheetId == scope]
| 27.530534 | 91 | 0.579093 | from __future__ import absolute_import
import re
from openpyxl.descriptors.serialisable import Serialisable
from openpyxl.descriptors import (
Alias,
Typed,
String,
Float,
Integer,
Bool,
NoneSet,
Set,
Sequence,
Descriptor,
)
from openpyxl.compat import safe_string
from openpyxl.formula import Tokenizer
from openpyxl.utils.cell import (
SHEETRANGE_RE,
SHEET_TITLE,
)
RESERVED = frozenset(["Print_Area", "Print_Titles", "Criteria",
"_FilterDatabase", "Extract", "Consolidate_Area",
"Sheet_Title"])
_names = "|".join(RESERVED)
RESERVED_REGEX = re.compile(r"^_xlnm\.(?P<name>{0})".format(_names))
COL_RANGE = r"""(?P<cols>[$]?[a-zA-Z]{1,3}:[$]?[a-zA-Z]{1,3})"""
COL_RANGE_RE = re.compile(COL_RANGE)
ROW_RANGE = r"""(?P<rows>[$]?\d+:[$]?\d+)"""
ROW_RANGE_RE = re.compile(ROW_RANGE)
TITLES_REGEX = re.compile("""{0}{1}?,?{2}?""".format(SHEET_TITLE, ROW_RANGE, COL_RANGE),
re.VERBOSE)
les(defn):
scanner = TITLES_REGEX.finditer(defn.value)
kw = dict((k, v) for match in scanner
for k, v in match.groupdict().items() if v)
return kw.get('rows'), kw.get('cols')
def _unpack_print_area(defn):
new = []
for m in SHEETRANGE_RE.finditer(defn.value):
coord = m.group("cells")
if coord:
new.append(coord)
return new
class DefinedName(Serialisable):
tagname = "definedName"
name = String()
comment = String(allow_none=True)
customMenu = String(allow_none=True)
description = String(allow_none=True)
help = String(allow_none=True)
statusBar = String(allow_none=True)
localSheetId = Integer(allow_none=True)
hidden = Bool(allow_none=True)
function = Bool(allow_none=True)
vbProcedure = Bool(allow_none=True)
xlm = Bool(allow_none=True)
functionGroupId = Integer(allow_none=True)
shortcutKey = String(allow_none=True)
publishToServer = Bool(allow_none=True)
workbookParameter = Bool(allow_none=True)
attr_text = Descriptor()
value = Alias("attr_text")
def __init__(self,
name=None,
comment=None,
customMenu=None,
description=None,
help=None,
statusBar=None,
localSheetId=None,
hidden=None,
function=None,
vbProcedure=None,
xlm=None,
functionGroupId=None,
shortcutKey=None,
publishToServer=None,
workbookParameter=None,
attr_text=None
):
self.name = name
self.comment = comment
self.customMenu = customMenu
self.description = description
self.help = help
self.statusBar = statusBar
self.localSheetId = localSheetId
self.hidden = hidden
self.function = function
self.vbProcedure = vbProcedure
self.xlm = xlm
self.functionGroupId = functionGroupId
self.shortcutKey = shortcutKey
self.publishToServer = publishToServer
self.workbookParameter = workbookParameter
self.attr_text = attr_text
@property
def type(self):
tok = Tokenizer("=" + self.value)
parsed = tok.items[0]
if parsed.type == "OPERAND":
return parsed.subtype
return parsed.type
@property
def destinations(self):
if self.type == "RANGE":
tok = Tokenizer("=" + self.value)
for part in tok.items:
if part.subtype == "RANGE":
m = SHEETRANGE_RE.match(part.value)
sheetname = m.group('notquoted') or m.group('quoted')
yield sheetname, m.group('cells')
@property
def is_reserved(self):
m = RESERVED_REGEX.match(self.name)
if m:
return m.group("name")
@property
def is_external(self):
return re.compile(r"^\[\d+\].*").match(self.value) is not None
def __iter__(self):
for key in self.__attrs__:
if key == "attr_text":
continue
v = getattr(self, key)
if v is not None:
if v in RESERVED:
v = "_xlnm." + v
yield key, safe_string(v)
class DefinedNameList(Serialisable):
tagname = "definedNames"
definedName = Sequence(expected_type=DefinedName)
def __init__(self, definedName=()):
self.definedName = definedName
def _cleanup(self):
self.delete("_xlnm.Print_Titles")
self.delete("_xlnm.Print_Area")
def _duplicate(self, defn):
for d in self.definedName:
if d.name == defn.name and d.localSheetId == defn.localSheetId:
return True
def append(self, defn):
if not isinstance(defn, DefinedName):
raise TypeError("""You can only append DefinedNames""")
if self._duplicate(defn):
raise ValueError("""DefinedName with the same name and scope already exists""")
names = self.definedName[:]
names.append(defn)
self.definedName = names
def __len__(self):
return len(self.definedName)
def __contains__(self, name):
for defn in self.definedName:
if defn.name == name and defn.localSheetId is None:
return True
def __getitem__(self, name):
defn = self.get(name)
if not defn:
raise KeyError("No definition called {0}".format(name))
return defn
def get(self, name, scope=None):
for defn in self.definedName:
if defn.name == name and defn.localSheetId == scope:
return defn
def __delitem__(self, name):
if not self.delete(name):
raise KeyError("No globally defined name {0}".format(name))
def delete(self, name, scope=None):
for idx, defn in enumerate(self.definedName):
if defn.name == name and defn.localSheetId == scope:
del self.definedName[idx]
return True
def localnames(self, scope):
return [defn.name for defn in self.definedName if defn.localSheetId == scope]
| true | true |
1c2fc1e3ad518f905ac5ccdd97c9c53482b06b2c | 8,778 | py | Python | danceschool/financial/handlers.py | django-danceschool/django-danceschool | 65ae09ffdcb0821e82df0e1f634fe13c0384a525 | [
"BSD-3-Clause"
] | 32 | 2017-09-12T04:25:25.000Z | 2022-03-21T10:48:07.000Z | danceschool/financial/handlers.py | django-danceschool/django-danceschool | 65ae09ffdcb0821e82df0e1f634fe13c0384a525 | [
"BSD-3-Clause"
] | 97 | 2017-09-01T02:43:08.000Z | 2022-01-03T18:20:34.000Z | danceschool/financial/handlers.py | django-danceschool/django-danceschool | 65ae09ffdcb0821e82df0e1f634fe13c0384a525 | [
"BSD-3-Clause"
] | 19 | 2017-09-26T13:34:46.000Z | 2022-03-21T10:48:10.000Z | from django.dispatch import receiver
from django.db.models import Q, Value, CharField, F
from django.db.models.query import QuerySet
from django.db.models.signals import post_save, m2m_changed
from django.utils.translation import gettext_lazy as _
from django.contrib.auth.models import User
import sys
import logging
from danceschool.core.models import (
EventStaffMember, EventOccurrence, InvoiceItem, Invoice, StaffMember,
Location, EventRegistration
)
from danceschool.core.constants import getConstant
from danceschool.core.signals import get_eventregistration_data
from .models import ExpenseItem, RevenueItem, RepeatedExpenseRule
# Define logger for this file
logger = logging.getLogger(__name__)
@receiver(m2m_changed, sender=EventStaffMember.occurrences.through)
def modifyExistingExpenseItemsForEventStaff(sender, instance, **kwargs):
if 'loaddata' in sys.argv or ('raw' in kwargs and kwargs['raw']):
return
if kwargs.get('action', None) != 'post_add':
return
logger.debug('ExpenseItem signal fired for EventStaffMember %s.' % instance.pk)
staff_expenses = ExpenseItem.objects.filter(
event=instance.event,
expenseRule__in=instance.staffMember.expenserules.all(),
expenseRule__applyRateRule=RepeatedExpenseRule.RateRuleChoices.hourly,
)
if staff_expenses:
logger.debug('Updating existing expense items for event staff member.')
# Fill in the updated hours and the updated total. Set the expense item
# to unapproved.
for expense in staff_expenses:
logger.debug('Updating expense item %s.' % expense.id)
expense.hours = instance.netHours
expense.total = expense.hours * expense.wageRate
expense.approved = None
expense.save()
if hasattr(instance.replacedStaffMember, 'staffMember'):
logger.debug('Adjusting totals for replaced event staff member.')
replaced_expenses = ExpenseItem.objects.filter(
event=instance.event,
expenseRule__staffmemberwageinfo__staffMember=instance.replacedStaffMember.staffMember,
expenseRule__applyRateRule=RepeatedExpenseRule.RateRuleChoices.hourly,
)
# Fill in the updated hours and the updated total. Set the expense item
# to unapproved.
for expense in replaced_expenses:
logger.debug('Updating expense item %s' % expense.id)
expense.hours = instance.replacedStaffMember.netHours
expense.total = expense.hours * expense.wageRate
expense.approved = None
expense.save()
@receiver(post_save, sender=EventOccurrence)
def modifyExistingExpenseItemsForSeriesClass(sender, instance, **kwargs):
if 'loaddata' in sys.argv or ('raw' in kwargs and kwargs['raw']):
return
logger.debug('ExpenseItem signal fired for EventOccurrence %s.' % instance.id)
staff_expenses = ExpenseItem.objects.filter(
event=instance.event,
expenseRule__staffmemberwageinfo__isnull=False,
expenseRule__applyRateRule=RepeatedExpenseRule.RateRuleChoices.hourly,
)
# Fill in the updated hours and the updated total. Set the expense item
# to unapproved.
for expense in staff_expenses:
esm_filters = Q(event=expense.event) & Q(staffMember=expense.expenseRule.staffMember)
if expense.expenseRule.category:
esm_filters = esm_filters & Q(category=expense.expenseRule.category)
# In instances where the expense rule does not specify a category, there could
# be more than one EventStaffMember object for a given staffMember at the
# same Event. There is no easy way to identify which expense is which in this instance,
# so when EventOccurrences are modified, these expenses will not update.
eventstaffmembers = EventStaffMember.objects.filter(esm_filters)
if eventstaffmembers.count() == 1:
esm = eventstaffmembers.first()
expense.hours = esm.netHours
expense.total = expense.hours * expense.wageRate
expense.approved = None
expense.save()
@receiver(post_save, sender=InvoiceItem)
def createRevenueItemForInvoiceItem(sender, instance, **kwargs):
if 'loaddata' in sys.argv or ('raw' in kwargs and kwargs['raw']):
return
logger.debug('RevenueItem signal fired for InvoiceItem %s.' % instance.id)
if instance.invoice.status == Invoice.PaymentStatus.preliminary:
logger.debug('Preliminary invoice. No revenue item will be created.')
return
received_status = (not instance.invoice.unpaid)
related_item = getattr(instance, 'revenueitem', None)
if not related_item:
related_item = RevenueItem.objects.create(
invoiceItem=instance,
invoiceNumber=instance.id,
grossTotal=instance.grossTotal,
total=instance.total,
adjustments=instance.adjustments,
fees=instance.fees,
taxes=instance.taxes,
buyerPaysSalesTax=instance.invoice.buyerPaysSalesTax,
category=getConstant('financial__registrationsRevenueCat'),
submissionUser=instance.invoice.submissionUser,
currentlyHeldBy=instance.invoice.collectedByUser,
received=received_status,
paymentMethod=instance.invoice.get_payment_method(),
description=_('Registration invoice %s' % instance.id)
)
logger.debug('RevenueItem created.')
else:
# Check that the existing revenueItem is still correct
saveFlag = False
for field in ['grossTotal', 'total', 'adjustments', 'fees', 'taxes']:
if getattr(related_item, field) != getattr(instance, field):
setattr(related_item, field, getattr(instance, field))
saveFlag = True
for field in ['buyerPaysSalesTax', ]:
if getattr(related_item, field) != getattr(instance.invoice, field):
setattr(related_item, field, getattr(instance.invoice, field))
saveFlag = True
if related_item.received != received_status:
related_item.received = received_status
related_item.paymentMethod = instance.invoice.get_payment_method()
saveFlag = True
if saveFlag:
related_item.save()
logger.info('RevenueItem associated with InvoiceItem %s updated.' % instance.id)
@receiver(post_save, sender=Invoice)
def createRevenueItemsFromInvoice(sender, instance, **kwargs):
'''
This signal handler exists because an invoice can be changed from
preliminary to non-preliminary without editing the invoice items, in which
case revenue items will need to be created.
'''
if 'loaddata' in sys.argv or ('raw' in kwargs and kwargs['raw']):
return
logger.debug('RevenueItem signal fired for Invoice %s.' % instance.id)
if instance.status == Invoice.PaymentStatus.preliminary:
logger.debug('Preliminary invoice. No revenue items will be created.')
return
for item in instance.invoiceitem_set.all():
createRevenueItemForInvoiceItem(sender, item, **kwargs)
@receiver(post_save, sender=User)
@receiver(post_save, sender=StaffMember)
@receiver(post_save, sender=Location)
def updateTransactionParty(sender, instance, **kwargs):
'''
If a User, StaffMember, or Location is updated, and there exists an associated
TransactionParty, then the name and other attributes of that party should be updated
to reflect the new information.
'''
if 'loaddata' in sys.argv or ('raw' in kwargs and kwargs['raw']):
return
logger.debug('TransactionParty signal fired for %s %s.' % (instance.__class__.__name__, instance.id))
party = getattr(instance, 'transactionparty', None)
if party:
party.save(updateBy=instance)
@receiver(get_eventregistration_data)
def reportRevenue(sender, **kwargs):
logger.debug('Signal fired to return revenue items associated with registrations')
regs = kwargs.pop('eventregistrations', None)
if not regs or not isinstance(regs, QuerySet) or not (regs.model == EventRegistration):
logger.warning('No/invalid EventRegistration queryset passed, so revenue items not found.')
return
extras = {}
regs = regs.filter(invoiceItem__revenueitem__isnull=False).select_related(
'invoiceItem__revenueitem'
)
for reg in regs:
extras[reg.id] = [{
'id': reg.invoiceItem.revenueitem.id,
'name': reg.invoiceItem.revenueitem.description,
'type': 'revenueitem',
'amount': reg.invoiceItem.revenueitem.total,
}, ]
return extras
| 39.719457 | 105 | 0.692755 | from django.dispatch import receiver
from django.db.models import Q, Value, CharField, F
from django.db.models.query import QuerySet
from django.db.models.signals import post_save, m2m_changed
from django.utils.translation import gettext_lazy as _
from django.contrib.auth.models import User
import sys
import logging
from danceschool.core.models import (
EventStaffMember, EventOccurrence, InvoiceItem, Invoice, StaffMember,
Location, EventRegistration
)
from danceschool.core.constants import getConstant
from danceschool.core.signals import get_eventregistration_data
from .models import ExpenseItem, RevenueItem, RepeatedExpenseRule
logger = logging.getLogger(__name__)
@receiver(m2m_changed, sender=EventStaffMember.occurrences.through)
def modifyExistingExpenseItemsForEventStaff(sender, instance, **kwargs):
if 'loaddata' in sys.argv or ('raw' in kwargs and kwargs['raw']):
return
if kwargs.get('action', None) != 'post_add':
return
logger.debug('ExpenseItem signal fired for EventStaffMember %s.' % instance.pk)
staff_expenses = ExpenseItem.objects.filter(
event=instance.event,
expenseRule__in=instance.staffMember.expenserules.all(),
expenseRule__applyRateRule=RepeatedExpenseRule.RateRuleChoices.hourly,
)
if staff_expenses:
logger.debug('Updating existing expense items for event staff member.')
for expense in staff_expenses:
logger.debug('Updating expense item %s.' % expense.id)
expense.hours = instance.netHours
expense.total = expense.hours * expense.wageRate
expense.approved = None
expense.save()
if hasattr(instance.replacedStaffMember, 'staffMember'):
logger.debug('Adjusting totals for replaced event staff member.')
replaced_expenses = ExpenseItem.objects.filter(
event=instance.event,
expenseRule__staffmemberwageinfo__staffMember=instance.replacedStaffMember.staffMember,
expenseRule__applyRateRule=RepeatedExpenseRule.RateRuleChoices.hourly,
)
for expense in replaced_expenses:
logger.debug('Updating expense item %s' % expense.id)
expense.hours = instance.replacedStaffMember.netHours
expense.total = expense.hours * expense.wageRate
expense.approved = None
expense.save()
@receiver(post_save, sender=EventOccurrence)
def modifyExistingExpenseItemsForSeriesClass(sender, instance, **kwargs):
if 'loaddata' in sys.argv or ('raw' in kwargs and kwargs['raw']):
return
logger.debug('ExpenseItem signal fired for EventOccurrence %s.' % instance.id)
staff_expenses = ExpenseItem.objects.filter(
event=instance.event,
expenseRule__staffmemberwageinfo__isnull=False,
expenseRule__applyRateRule=RepeatedExpenseRule.RateRuleChoices.hourly,
)
for expense in staff_expenses:
esm_filters = Q(event=expense.event) & Q(staffMember=expense.expenseRule.staffMember)
if expense.expenseRule.category:
esm_filters = esm_filters & Q(category=expense.expenseRule.category)
eventstaffmembers = EventStaffMember.objects.filter(esm_filters)
if eventstaffmembers.count() == 1:
esm = eventstaffmembers.first()
expense.hours = esm.netHours
expense.total = expense.hours * expense.wageRate
expense.approved = None
expense.save()
@receiver(post_save, sender=InvoiceItem)
def createRevenueItemForInvoiceItem(sender, instance, **kwargs):
if 'loaddata' in sys.argv or ('raw' in kwargs and kwargs['raw']):
return
logger.debug('RevenueItem signal fired for InvoiceItem %s.' % instance.id)
if instance.invoice.status == Invoice.PaymentStatus.preliminary:
logger.debug('Preliminary invoice. No revenue item will be created.')
return
received_status = (not instance.invoice.unpaid)
related_item = getattr(instance, 'revenueitem', None)
if not related_item:
related_item = RevenueItem.objects.create(
invoiceItem=instance,
invoiceNumber=instance.id,
grossTotal=instance.grossTotal,
total=instance.total,
adjustments=instance.adjustments,
fees=instance.fees,
taxes=instance.taxes,
buyerPaysSalesTax=instance.invoice.buyerPaysSalesTax,
category=getConstant('financial__registrationsRevenueCat'),
submissionUser=instance.invoice.submissionUser,
currentlyHeldBy=instance.invoice.collectedByUser,
received=received_status,
paymentMethod=instance.invoice.get_payment_method(),
description=_('Registration invoice %s' % instance.id)
)
logger.debug('RevenueItem created.')
else:
saveFlag = False
for field in ['grossTotal', 'total', 'adjustments', 'fees', 'taxes']:
if getattr(related_item, field) != getattr(instance, field):
setattr(related_item, field, getattr(instance, field))
saveFlag = True
for field in ['buyerPaysSalesTax', ]:
if getattr(related_item, field) != getattr(instance.invoice, field):
setattr(related_item, field, getattr(instance.invoice, field))
saveFlag = True
if related_item.received != received_status:
related_item.received = received_status
related_item.paymentMethod = instance.invoice.get_payment_method()
saveFlag = True
if saveFlag:
related_item.save()
logger.info('RevenueItem associated with InvoiceItem %s updated.' % instance.id)
@receiver(post_save, sender=Invoice)
def createRevenueItemsFromInvoice(sender, instance, **kwargs):
if 'loaddata' in sys.argv or ('raw' in kwargs and kwargs['raw']):
return
logger.debug('RevenueItem signal fired for Invoice %s.' % instance.id)
if instance.status == Invoice.PaymentStatus.preliminary:
logger.debug('Preliminary invoice. No revenue items will be created.')
return
for item in instance.invoiceitem_set.all():
createRevenueItemForInvoiceItem(sender, item, **kwargs)
@receiver(post_save, sender=User)
@receiver(post_save, sender=StaffMember)
@receiver(post_save, sender=Location)
def updateTransactionParty(sender, instance, **kwargs):
if 'loaddata' in sys.argv or ('raw' in kwargs and kwargs['raw']):
return
logger.debug('TransactionParty signal fired for %s %s.' % (instance.__class__.__name__, instance.id))
party = getattr(instance, 'transactionparty', None)
if party:
party.save(updateBy=instance)
@receiver(get_eventregistration_data)
def reportRevenue(sender, **kwargs):
logger.debug('Signal fired to return revenue items associated with registrations')
regs = kwargs.pop('eventregistrations', None)
if not regs or not isinstance(regs, QuerySet) or not (regs.model == EventRegistration):
logger.warning('No/invalid EventRegistration queryset passed, so revenue items not found.')
return
extras = {}
regs = regs.filter(invoiceItem__revenueitem__isnull=False).select_related(
'invoiceItem__revenueitem'
)
for reg in regs:
extras[reg.id] = [{
'id': reg.invoiceItem.revenueitem.id,
'name': reg.invoiceItem.revenueitem.description,
'type': 'revenueitem',
'amount': reg.invoiceItem.revenueitem.total,
}, ]
return extras
| true | true |
1c2fc3209f2199d9779888529609d26bf3a2c41a | 3,245 | py | Python | twittertennis/handler_utils.py | ferencberes/twittertennis | d9a21655c3e5c3599fe4149904c967ed67139569 | [
"CC0-1.0"
] | null | null | null | twittertennis/handler_utils.py | ferencberes/twittertennis | d9a21655c3e5c3599fe4149904c967ed67139569 | [
"CC0-1.0"
] | null | null | null | twittertennis/handler_utils.py | ferencberes/twittertennis | d9a21655c3e5c3599fe4149904c967ed67139569 | [
"CC0-1.0"
] | null | null | null | import pandas as pd
import networkx as nx
from collections import Counter
### EDGES ###
def groupby_count(df, group_cols, count_col):
parts = [df[col] for col in group_cols]
tuples = list(zip(*parts))
cnt = Counter(tuples)
keys, counts = zip(*list(cnt.items()))
res = pd.DataFrame(keys, columns=group_cols)
res[count_col] = counts
return res
def group_edges(df, key_col="date"):
edges_grouped = {}
keys = sorted(list(df[key_col].unique()))
for key in keys:
edges_grouped[key] = df[df[key_col]==key].copy()
return edges_grouped
def get_weighted_edges(df, group_cols):
weighted_edges = groupby_count(df, group_cols, "weight")
return weighted_edges
def prepare_edges(mentions, snapshot_col="date"):
group_cols = ["src","trg",snapshot_col]
weighted_edges = get_weighted_edges(mentions, group_cols)
weighted_edges_grouped = group_edges(weighted_edges, snapshot_col)
edges_grouped = group_edges(mentions[group_cols], snapshot_col)
return weighted_edges, weighted_edges_grouped, edges_grouped
### NODE REINDEXING ###
def reindex_labels(label_dict, id2account, account2index):
tuples = []
for key, label in label_dict.items():
account = id2account[key]
if account in account2index:
new_id = account2index[account]
tuples.append((new_id, label))
new_dict = dict(tuples)
ordered_dict = dict(sorted(new_dict.items()))
return ordered_dict
def reindex_edges(df, id_to_account, account_to_index=None, src_col="src_screen_str", trg_col="trg_screen_str"):
if account_to_index != None:
accounts = list(account_to_index.keys())
tmp = df.copy()
# old solution would also be good with isnull()
tmp[src_col] = tmp["src"].apply(lambda x: id_to_account.get(x))
tmp[trg_col] = tmp["trg"].apply(lambda x: id_to_account.get(x))
tmp = tmp[tmp[src_col].isin(accounts) & tmp[trg_col].isin(accounts)]
src = tmp[src_col].apply(lambda x: account_to_index.get(x))
trg = tmp[trg_col].apply(lambda x: account_to_index.get(x))
else:
src = df["src"]
trg = df["trg"]
return src, trg
### LABELS ###
def regression_labels(df, snapshot_col):
label_records = groupby_count(df, [snapshot_col,"trg"], "count")
snapshots = sorted(list(label_records[snapshot_col].unique()))
labels = {}
for snapshot_id in snapshots:
rec_tmp = label_records[label_records[snapshot_col]==snapshot_id]
dict_tmp = dict(zip(rec_tmp["trg"],rec_tmp["count"]))
labels[snapshot_id] = dict_tmp
return labels
### FEATURES ###
def calculate_node_features(G, total_nodes=None, degree=True, transitivity=True):
"""Calculate degree and node transitivty as node features. The graph nodes must have integer identifiers from 0 to N-1 where N is the number of nodes in G."""
if total_nodes == None:
total_nodes = G.number_of_nodes()
scores = []
if degree:
degs = dict(nx.degree(G))
scores.append([degs.get(i,0) for i in range(total_nodes)])
if transitivity:
trans = dict(nx.clustering(G))
scores.append([trans.get(i,0) for i in range(total_nodes)])
return list(zip(*scores)) | 37.298851 | 162 | 0.678274 | import pandas as pd
import networkx as nx
from collections import Counter
_cols, count_col):
parts = [df[col] for col in group_cols]
tuples = list(zip(*parts))
cnt = Counter(tuples)
keys, counts = zip(*list(cnt.items()))
res = pd.DataFrame(keys, columns=group_cols)
res[count_col] = counts
return res
def group_edges(df, key_col="date"):
edges_grouped = {}
keys = sorted(list(df[key_col].unique()))
for key in keys:
edges_grouped[key] = df[df[key_col]==key].copy()
return edges_grouped
def get_weighted_edges(df, group_cols):
weighted_edges = groupby_count(df, group_cols, "weight")
return weighted_edges
def prepare_edges(mentions, snapshot_col="date"):
group_cols = ["src","trg",snapshot_col]
weighted_edges = get_weighted_edges(mentions, group_cols)
weighted_edges_grouped = group_edges(weighted_edges, snapshot_col)
edges_grouped = group_edges(mentions[group_cols], snapshot_col)
return weighted_edges, weighted_edges_grouped, edges_grouped
unt2index):
tuples = []
for key, label in label_dict.items():
account = id2account[key]
if account in account2index:
new_id = account2index[account]
tuples.append((new_id, label))
new_dict = dict(tuples)
ordered_dict = dict(sorted(new_dict.items()))
return ordered_dict
def reindex_edges(df, id_to_account, account_to_index=None, src_col="src_screen_str", trg_col="trg_screen_str"):
if account_to_index != None:
accounts = list(account_to_index.keys())
tmp = df.copy()
tmp[src_col] = tmp["src"].apply(lambda x: id_to_account.get(x))
tmp[trg_col] = tmp["trg"].apply(lambda x: id_to_account.get(x))
tmp = tmp[tmp[src_col].isin(accounts) & tmp[trg_col].isin(accounts)]
src = tmp[src_col].apply(lambda x: account_to_index.get(x))
trg = tmp[trg_col].apply(lambda x: account_to_index.get(x))
else:
src = df["src"]
trg = df["trg"]
return src, trg
pshot_col):
label_records = groupby_count(df, [snapshot_col,"trg"], "count")
snapshots = sorted(list(label_records[snapshot_col].unique()))
labels = {}
for snapshot_id in snapshots:
rec_tmp = label_records[label_records[snapshot_col]==snapshot_id]
dict_tmp = dict(zip(rec_tmp["trg"],rec_tmp["count"]))
labels[snapshot_id] = dict_tmp
return labels
tal_nodes=None, degree=True, transitivity=True):
if total_nodes == None:
total_nodes = G.number_of_nodes()
scores = []
if degree:
degs = dict(nx.degree(G))
scores.append([degs.get(i,0) for i in range(total_nodes)])
if transitivity:
trans = dict(nx.clustering(G))
scores.append([trans.get(i,0) for i in range(total_nodes)])
return list(zip(*scores)) | true | true |
1c2fc51f8b3649c1bd241cd9d62b5618aa991773 | 700 | py | Python | pybind/slxos/v16r_1_00b/no/fcsp/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | null | null | null | pybind/slxos/v16r_1_00b/no/fcsp/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | null | null | null | pybind/slxos/v16r_1_00b/no/fcsp/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | 1 | 2021-11-05T22:15:42.000Z | 2021-11-05T22:15:42.000Z |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class fcsp(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-common-def - based on the path /no/fcsp. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
_pyangbind_elements = {}
| 33.333333 | 102 | 0.808571 |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class fcsp(PybindBase):
_pyangbind_elements = {}
| true | true |
1c2fc5e3838e7f3766ffd4648fa5a200e09d866f | 2,394 | py | Python | 145-Binary-Tree-Postorder-Traversal/solution.py | Tanych/CodeTracking | 86f1cb98de801f58c39d9a48ce9de12df7303d20 | [
"MIT"
] | null | null | null | 145-Binary-Tree-Postorder-Traversal/solution.py | Tanych/CodeTracking | 86f1cb98de801f58c39d9a48ce9de12df7303d20 | [
"MIT"
] | null | null | null | 145-Binary-Tree-Postorder-Traversal/solution.py | Tanych/CodeTracking | 86f1cb98de801f58c39d9a48ce9de12df7303d20 | [
"MIT"
] | null | null | null | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def reverse(self,from_node,to_node):
if from_node==to_node:
return
a,b,c=from_node,from_node.right,None
while True:
c=b.right
b.right=a
a=b
b=c
if a==to_node:break
def reverseadd(self,from_node,to_node,res):
self.reverse(from_node,to_node)
p=to_node
while True:
res.append(p.val)
if p==from_node:
break
p=p.right
# get back
self.reverse(to_node,from_node)
def postorderTraversal(self,root):
dump=TreeNode(-1)
dump.left=root
cur,pre=dump,None
res=[]
while cur:
if not cur.left:
cur=cur.right
else:
pre=cur.left
while pre.right and pre.right!=cur:
pre=pre.right
if not pre.right:
pre.right=cur
cur=cur.left
else:
self.reverseadd(cur.left,pre,res)
pre.right=None
cur=cur.right
return res
def postorderTraversal_on(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
# using stack ways
stck=[]
# current node
cur=root
peak=last=None
res=[]
while cur or stck:
if cur:
stck.append(cur)
cur=cur.left
elif stck:
# get the peak
peak=stck[-1]
# if has right branch, we need get right first
# mark the peak and add the right subtree to stack
# if subtree has all print out, add the peak val
# last!=peak.right, aviod revisited
if peak.right and last!=peak.right:
cur=peak.right
# no right branch, directly add to res
else:
res.append(peak.val)
last=peak
stck.pop()
return res
| 28.164706 | 66 | 0.447786 |
class Solution(object):
def reverse(self,from_node,to_node):
if from_node==to_node:
return
a,b,c=from_node,from_node.right,None
while True:
c=b.right
b.right=a
a=b
b=c
if a==to_node:break
def reverseadd(self,from_node,to_node,res):
self.reverse(from_node,to_node)
p=to_node
while True:
res.append(p.val)
if p==from_node:
break
p=p.right
self.reverse(to_node,from_node)
def postorderTraversal(self,root):
dump=TreeNode(-1)
dump.left=root
cur,pre=dump,None
res=[]
while cur:
if not cur.left:
cur=cur.right
else:
pre=cur.left
while pre.right and pre.right!=cur:
pre=pre.right
if not pre.right:
pre.right=cur
cur=cur.left
else:
self.reverseadd(cur.left,pre,res)
pre.right=None
cur=cur.right
return res
def postorderTraversal_on(self, root):
stck=[]
cur=root
peak=last=None
res=[]
while cur or stck:
if cur:
stck.append(cur)
cur=cur.left
elif stck:
peak=stck[-1]
if peak.right and last!=peak.right:
cur=peak.right
else:
res.append(peak.val)
last=peak
stck.pop()
return res
| true | true |
1c2fc5e42bff60eaaacc0ac9a42e4f4b9325450c | 9,723 | py | Python | scripts/external_libs/jsonpickle-2.0.0/tests/pandas_test.py | GabrielGanne/trex-core | 688a0fe0adb890964691473723d70ffa98e00dd3 | [
"Apache-2.0"
] | 956 | 2015-06-24T15:04:55.000Z | 2022-03-30T06:25:04.000Z | scripts/external_libs/jsonpickle-2.0.0/tests/pandas_test.py | hjat2005/trex-core | 400f03c86c844a0096dff3f6b13e58a808aaefff | [
"Apache-2.0"
] | 782 | 2015-09-20T15:19:00.000Z | 2022-03-31T23:52:05.000Z | scripts/external_libs/jsonpickle-2.0.0/tests/pandas_test.py | hjat2005/trex-core | 400f03c86c844a0096dff3f6b13e58a808aaefff | [
"Apache-2.0"
] | 429 | 2015-06-27T19:34:21.000Z | 2022-03-23T11:02:51.000Z | from __future__ import absolute_import, division, unicode_literals
import datetime
import pytest
try:
import pandas as pd
import numpy as np
from pandas.testing import assert_series_equal
from pandas.testing import assert_frame_equal
from pandas.testing import assert_index_equal
except ImportError:
pytest.skip('numpy is not available', allow_module_level=True)
import jsonpickle
import jsonpickle.ext.pandas
@pytest.fixture(scope='module', autouse=True)
def pandas_extension():
"""Initialize the numpy extension for this test module"""
jsonpickle.ext.pandas.register_handlers()
yield # control to the test function.
jsonpickle.ext.pandas.unregister_handlers()
def roundtrip(obj):
return jsonpickle.decode(jsonpickle.encode(obj))
def test_series_roundtrip():
ser = pd.Series(
{
'an_int': np.int_(1),
'a_float': np.float_(2.5),
'a_nan': np.nan,
'a_minus_inf': -np.inf,
'an_inf': np.inf,
'a_str': np.str_('foo'),
'a_unicode': np.unicode_('bar'),
'date': np.datetime64('2014-01-01'),
'complex': np.complex_(1 - 2j),
# TODO: the following dtypes are not currently supported.
# 'object': np.object_({'a': 'b'}),
}
)
decoded_ser = roundtrip(ser)
assert_series_equal(decoded_ser, ser)
def test_dataframe_roundtrip():
df = pd.DataFrame(
{
'an_int': np.int_([1, 2, 3]),
'a_float': np.float_([2.5, 3.5, 4.5]),
'a_nan': np.array([np.nan] * 3),
'a_minus_inf': np.array([-np.inf] * 3),
'an_inf': np.array([np.inf] * 3),
'a_str': np.str_('foo'),
'a_unicode': np.unicode_('bar'),
'date': np.array([np.datetime64('2014-01-01')] * 3),
'complex': np.complex_([1 - 2j, 2 - 1.2j, 3 - 1.3j]),
# TODO: the following dtypes are not currently supported.
# 'object': np.object_([{'a': 'b'}]*3),
}
)
decoded_df = roundtrip(df)
assert_frame_equal(decoded_df, df)
def test_multindex_dataframe_roundtrip():
df = pd.DataFrame(
{
'idx_lvl0': ['a', 'b', 'c'],
'idx_lvl1': np.int_([1, 1, 2]),
'an_int': np.int_([1, 2, 3]),
'a_float': np.float_([2.5, 3.5, 4.5]),
'a_nan': np.array([np.nan] * 3),
'a_minus_inf': np.array([-np.inf] * 3),
'an_inf': np.array([np.inf] * 3),
'a_str': np.str_('foo'),
'a_unicode': np.unicode_('bar'),
}
)
df = df.set_index(['idx_lvl0', 'idx_lvl1'])
decoded_df = roundtrip(df)
assert_frame_equal(decoded_df, df)
def test_dataframe_with_interval_index_roundtrip():
df = pd.DataFrame(
{'a': [1, 2], 'b': [3, 4]}, index=pd.IntervalIndex.from_breaks([1, 2, 4])
)
decoded_df = roundtrip(df)
assert_frame_equal(decoded_df, df)
def test_index_roundtrip():
idx = pd.Index(range(5, 10))
decoded_idx = roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_datetime_index_roundtrip():
idx = pd.date_range(start='2019-01-01', end='2019-02-01', freq='D')
decoded_idx = roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_ragged_datetime_index_roundtrip():
idx = pd.DatetimeIndex(['2019-01-01', '2019-01-02', '2019-01-05'])
decoded_idx = roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_timedelta_index_roundtrip():
idx = pd.timedelta_range(start='1 day', periods=4, closed='right')
decoded_idx = roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_period_index_roundtrip():
idx = pd.period_range(start='2017-01-01', end='2018-01-01', freq='M')
decoded_idx = roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_int64_index_roundtrip():
idx = pd.Int64Index([-1, 0, 3, 4])
decoded_idx = roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_uint64_index_roundtrip():
idx = pd.UInt64Index([0, 3, 4])
decoded_idx = roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_float64_index_roundtrip():
idx = pd.Float64Index([0.1, 3.7, 4.2])
decoded_idx = roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_interval_index_roundtrip():
idx = pd.IntervalIndex.from_breaks(range(5))
decoded_idx = roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_datetime_interval_index_roundtrip():
idx = pd.IntervalIndex.from_breaks(pd.date_range('2019-01-01', '2019-01-10'))
decoded_idx = roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_multi_index_roundtrip():
idx = pd.MultiIndex.from_product(((1, 2, 3), ('a', 'b')))
decoded_idx = roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_timestamp_roundtrip():
obj = pd.Timestamp('2019-01-01')
decoded_obj = roundtrip(obj)
assert decoded_obj == obj
def test_period_roundtrip():
obj = pd.Timestamp('2019-01-01')
decoded_obj = roundtrip(obj)
assert decoded_obj == obj
def test_interval_roundtrip():
obj = pd.Interval(2, 4, closed=str('left'))
decoded_obj = roundtrip(obj)
assert decoded_obj == obj
def test_b64():
"""Test the binary encoding"""
# array of substantial size is stored as b64
a = np.random.rand(20, 10)
index = ['Row' + str(i) for i in range(1, a.shape[0] + 1)]
columns = ['Col' + str(i) for i in range(1, a.shape[1] + 1)]
df = pd.DataFrame(a, index=index, columns=columns)
decoded_df = roundtrip(df)
assert_frame_equal(decoded_df, df)
def test_series_list_index():
"""Test pandas using series with a list index"""
expect = pd.Series(0, index=[1, 2, 3])
actual = roundtrip(expect)
assert expect.values[0] == actual.values[0]
assert 0 == actual.values[0]
assert expect.index[0] == actual.index[0]
assert expect.index[1] == actual.index[1]
assert expect.index[2] == actual.index[2]
def test_series_multi_index():
"""Test pandas using series with a multi-index"""
expect = pd.Series(0, index=[[1], [2], [3]])
actual = roundtrip(expect)
assert expect.values[0] == actual.values[0]
assert 0 == actual.values[0]
assert expect.index[0] == actual.index[0]
assert expect.index[0][0] == actual.index[0][0]
assert expect.index[0][1] == actual.index[0][1]
assert expect.index[0][2] == actual.index[0][2]
def test_series_multi_index_strings():
"""Test multi-index with strings"""
lets = ['A', 'B', 'C']
nums = ['1', '2', '3']
midx = pd.MultiIndex.from_product([lets, nums])
expect = pd.Series(0, index=midx)
actual = roundtrip(expect)
assert expect.values[0] == actual.values[0]
assert 0 == actual.values[0]
assert expect.index[0] == actual.index[0]
assert expect.index[1] == actual.index[1]
assert expect.index[2] == actual.index[2]
assert expect.index[3] == actual.index[3]
assert expect.index[4] == actual.index[4]
assert expect.index[5] == actual.index[5]
assert expect.index[6] == actual.index[6]
assert expect.index[7] == actual.index[7]
assert expect.index[8] == actual.index[8]
assert ('A', '1') == actual.index[0]
assert ('A', '2') == actual.index[1]
assert ('A', '3') == actual.index[2]
assert ('B', '1') == actual.index[3]
assert ('B', '2') == actual.index[4]
assert ('B', '3') == actual.index[5]
assert ('C', '1') == actual.index[6]
assert ('C', '2') == actual.index[7]
assert ('C', '3') == actual.index[8]
def test_dataframe_with_timedelta64_dtype():
data_frame = pd.DataFrame(
{
'Start': [
'2020/12/14 00:00:01',
'2020/12/14 00:00:04',
'2020/12/14 00:00:06',
],
'End': [
'2020/12/14 00:00:04',
'2020/12/14 00:00:06',
'2020/12/14 00:00:09',
],
}
)
data_frame['Start'] = pd.to_datetime(data_frame['Start'])
data_frame['End'] = pd.to_datetime(data_frame['End'])
data_frame['Duration'] = data_frame['End'] - data_frame['Start']
encoded = jsonpickle.encode(data_frame)
actual = jsonpickle.decode(encoded)
assert isinstance(actual, pd.DataFrame)
assert data_frame['Start'][0] == actual['Start'][0]
assert data_frame['Start'][1] == actual['Start'][1]
assert data_frame['Start'][2] == actual['Start'][2]
assert data_frame['End'][0] == actual['End'][0]
assert data_frame['End'][1] == actual['End'][1]
assert data_frame['End'][2] == actual['End'][2]
assert isinstance(actual['Duration'][0], datetime.timedelta)
assert isinstance(actual['Duration'][1], datetime.timedelta)
assert isinstance(actual['Duration'][2], datetime.timedelta)
assert data_frame['Duration'][0] == actual['Duration'][0]
assert data_frame['Duration'][1] == actual['Duration'][1]
assert data_frame['Duration'][2] == actual['Duration'][2]
def test_multilevel_columns():
iterables = [['inj', 'prod'], ['hourly', 'cumulative']]
names = ['first', 'second']
# transform it to tuples
columns = pd.MultiIndex.from_product(iterables, names=names)
# build a multi-index from it
data_frame = pd.DataFrame(
np.random.randn(3, 4), index=['A', 'B', 'C'], columns=columns
)
encoded = jsonpickle.encode(data_frame)
cloned_data_frame = jsonpickle.decode(encoded)
assert isinstance(cloned_data_frame, pd.DataFrame)
assert data_frame.columns.names == cloned_data_frame.columns.names
assert_frame_equal(data_frame, cloned_data_frame)
if __name__ == '__main__':
pytest.main([__file__])
| 31.364516 | 81 | 0.622236 | from __future__ import absolute_import, division, unicode_literals
import datetime
import pytest
try:
import pandas as pd
import numpy as np
from pandas.testing import assert_series_equal
from pandas.testing import assert_frame_equal
from pandas.testing import assert_index_equal
except ImportError:
pytest.skip('numpy is not available', allow_module_level=True)
import jsonpickle
import jsonpickle.ext.pandas
@pytest.fixture(scope='module', autouse=True)
def pandas_extension():
jsonpickle.ext.pandas.register_handlers()
yield
jsonpickle.ext.pandas.unregister_handlers()
def roundtrip(obj):
return jsonpickle.decode(jsonpickle.encode(obj))
def test_series_roundtrip():
ser = pd.Series(
{
'an_int': np.int_(1),
'a_float': np.float_(2.5),
'a_nan': np.nan,
'a_minus_inf': -np.inf,
'an_inf': np.inf,
'a_str': np.str_('foo'),
'a_unicode': np.unicode_('bar'),
'date': np.datetime64('2014-01-01'),
'complex': np.complex_(1 - 2j),
}
)
decoded_ser = roundtrip(ser)
assert_series_equal(decoded_ser, ser)
def test_dataframe_roundtrip():
df = pd.DataFrame(
{
'an_int': np.int_([1, 2, 3]),
'a_float': np.float_([2.5, 3.5, 4.5]),
'a_nan': np.array([np.nan] * 3),
'a_minus_inf': np.array([-np.inf] * 3),
'an_inf': np.array([np.inf] * 3),
'a_str': np.str_('foo'),
'a_unicode': np.unicode_('bar'),
'date': np.array([np.datetime64('2014-01-01')] * 3),
'complex': np.complex_([1 - 2j, 2 - 1.2j, 3 - 1.3j]),
}
)
decoded_df = roundtrip(df)
assert_frame_equal(decoded_df, df)
def test_multindex_dataframe_roundtrip():
df = pd.DataFrame(
{
'idx_lvl0': ['a', 'b', 'c'],
'idx_lvl1': np.int_([1, 1, 2]),
'an_int': np.int_([1, 2, 3]),
'a_float': np.float_([2.5, 3.5, 4.5]),
'a_nan': np.array([np.nan] * 3),
'a_minus_inf': np.array([-np.inf] * 3),
'an_inf': np.array([np.inf] * 3),
'a_str': np.str_('foo'),
'a_unicode': np.unicode_('bar'),
}
)
df = df.set_index(['idx_lvl0', 'idx_lvl1'])
decoded_df = roundtrip(df)
assert_frame_equal(decoded_df, df)
def test_dataframe_with_interval_index_roundtrip():
df = pd.DataFrame(
{'a': [1, 2], 'b': [3, 4]}, index=pd.IntervalIndex.from_breaks([1, 2, 4])
)
decoded_df = roundtrip(df)
assert_frame_equal(decoded_df, df)
def test_index_roundtrip():
idx = pd.Index(range(5, 10))
decoded_idx = roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_datetime_index_roundtrip():
idx = pd.date_range(start='2019-01-01', end='2019-02-01', freq='D')
decoded_idx = roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_ragged_datetime_index_roundtrip():
idx = pd.DatetimeIndex(['2019-01-01', '2019-01-02', '2019-01-05'])
decoded_idx = roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_timedelta_index_roundtrip():
idx = pd.timedelta_range(start='1 day', periods=4, closed='right')
decoded_idx = roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_period_index_roundtrip():
idx = pd.period_range(start='2017-01-01', end='2018-01-01', freq='M')
decoded_idx = roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_int64_index_roundtrip():
idx = pd.Int64Index([-1, 0, 3, 4])
decoded_idx = roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_uint64_index_roundtrip():
idx = pd.UInt64Index([0, 3, 4])
decoded_idx = roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_float64_index_roundtrip():
idx = pd.Float64Index([0.1, 3.7, 4.2])
decoded_idx = roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_interval_index_roundtrip():
idx = pd.IntervalIndex.from_breaks(range(5))
decoded_idx = roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_datetime_interval_index_roundtrip():
idx = pd.IntervalIndex.from_breaks(pd.date_range('2019-01-01', '2019-01-10'))
decoded_idx = roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_multi_index_roundtrip():
idx = pd.MultiIndex.from_product(((1, 2, 3), ('a', 'b')))
decoded_idx = roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_timestamp_roundtrip():
obj = pd.Timestamp('2019-01-01')
decoded_obj = roundtrip(obj)
assert decoded_obj == obj
def test_period_roundtrip():
obj = pd.Timestamp('2019-01-01')
decoded_obj = roundtrip(obj)
assert decoded_obj == obj
def test_interval_roundtrip():
obj = pd.Interval(2, 4, closed=str('left'))
decoded_obj = roundtrip(obj)
assert decoded_obj == obj
def test_b64():
a = np.random.rand(20, 10)
index = ['Row' + str(i) for i in range(1, a.shape[0] + 1)]
columns = ['Col' + str(i) for i in range(1, a.shape[1] + 1)]
df = pd.DataFrame(a, index=index, columns=columns)
decoded_df = roundtrip(df)
assert_frame_equal(decoded_df, df)
def test_series_list_index():
expect = pd.Series(0, index=[1, 2, 3])
actual = roundtrip(expect)
assert expect.values[0] == actual.values[0]
assert 0 == actual.values[0]
assert expect.index[0] == actual.index[0]
assert expect.index[1] == actual.index[1]
assert expect.index[2] == actual.index[2]
def test_series_multi_index():
expect = pd.Series(0, index=[[1], [2], [3]])
actual = roundtrip(expect)
assert expect.values[0] == actual.values[0]
assert 0 == actual.values[0]
assert expect.index[0] == actual.index[0]
assert expect.index[0][0] == actual.index[0][0]
assert expect.index[0][1] == actual.index[0][1]
assert expect.index[0][2] == actual.index[0][2]
def test_series_multi_index_strings():
lets = ['A', 'B', 'C']
nums = ['1', '2', '3']
midx = pd.MultiIndex.from_product([lets, nums])
expect = pd.Series(0, index=midx)
actual = roundtrip(expect)
assert expect.values[0] == actual.values[0]
assert 0 == actual.values[0]
assert expect.index[0] == actual.index[0]
assert expect.index[1] == actual.index[1]
assert expect.index[2] == actual.index[2]
assert expect.index[3] == actual.index[3]
assert expect.index[4] == actual.index[4]
assert expect.index[5] == actual.index[5]
assert expect.index[6] == actual.index[6]
assert expect.index[7] == actual.index[7]
assert expect.index[8] == actual.index[8]
assert ('A', '1') == actual.index[0]
assert ('A', '2') == actual.index[1]
assert ('A', '3') == actual.index[2]
assert ('B', '1') == actual.index[3]
assert ('B', '2') == actual.index[4]
assert ('B', '3') == actual.index[5]
assert ('C', '1') == actual.index[6]
assert ('C', '2') == actual.index[7]
assert ('C', '3') == actual.index[8]
def test_dataframe_with_timedelta64_dtype():
data_frame = pd.DataFrame(
{
'Start': [
'2020/12/14 00:00:01',
'2020/12/14 00:00:04',
'2020/12/14 00:00:06',
],
'End': [
'2020/12/14 00:00:04',
'2020/12/14 00:00:06',
'2020/12/14 00:00:09',
],
}
)
data_frame['Start'] = pd.to_datetime(data_frame['Start'])
data_frame['End'] = pd.to_datetime(data_frame['End'])
data_frame['Duration'] = data_frame['End'] - data_frame['Start']
encoded = jsonpickle.encode(data_frame)
actual = jsonpickle.decode(encoded)
assert isinstance(actual, pd.DataFrame)
assert data_frame['Start'][0] == actual['Start'][0]
assert data_frame['Start'][1] == actual['Start'][1]
assert data_frame['Start'][2] == actual['Start'][2]
assert data_frame['End'][0] == actual['End'][0]
assert data_frame['End'][1] == actual['End'][1]
assert data_frame['End'][2] == actual['End'][2]
assert isinstance(actual['Duration'][0], datetime.timedelta)
assert isinstance(actual['Duration'][1], datetime.timedelta)
assert isinstance(actual['Duration'][2], datetime.timedelta)
assert data_frame['Duration'][0] == actual['Duration'][0]
assert data_frame['Duration'][1] == actual['Duration'][1]
assert data_frame['Duration'][2] == actual['Duration'][2]
def test_multilevel_columns():
iterables = [['inj', 'prod'], ['hourly', 'cumulative']]
names = ['first', 'second']
columns = pd.MultiIndex.from_product(iterables, names=names)
data_frame = pd.DataFrame(
np.random.randn(3, 4), index=['A', 'B', 'C'], columns=columns
)
encoded = jsonpickle.encode(data_frame)
cloned_data_frame = jsonpickle.decode(encoded)
assert isinstance(cloned_data_frame, pd.DataFrame)
assert data_frame.columns.names == cloned_data_frame.columns.names
assert_frame_equal(data_frame, cloned_data_frame)
if __name__ == '__main__':
pytest.main([__file__])
| true | true |
1c2fc604ac3d747626eaa5c5d0cc9a69eace435f | 1,293 | py | Python | worker/tasks/db_writer.py | ZTJiu/WebsiteIpParser | 173703a03e329cb9488a9637e84b421a1ed20a19 | [
"Apache-2.0"
] | null | null | null | worker/tasks/db_writer.py | ZTJiu/WebsiteIpParser | 173703a03e329cb9488a9637e84b421a1ed20a19 | [
"Apache-2.0"
] | null | null | null | worker/tasks/db_writer.py | ZTJiu/WebsiteIpParser | 173703a03e329cb9488a9637e84b421a1ed20a19 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding=utf-8 -*-
###########################################
# File : db_writer.py
# Date : 2017-09-17
# Author: Zhang Tianjiu
# Email : zhangtianjiu@vip.qq.com
###########################################
from sqlalchemy import Table, MetaData, Column, String, create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from conf import get_db_info
# 创建对象的基类:
Base = declarative_base()
# 初始化数据库连接:
engine = create_engine('mysql+mysqlconnector://{}'.format(get_db_info))
metadata = MetaData()
DbSession = sessionmaker(bind=engine)
def init_table():
ip_list_table = Table('ip_list',
metadata,
Column('ip', String(20), primary_key=True),
Column('website', String(60)))
ip_list_table.create(engine)
init_table()
# 定义User对象:
class Ip(Base):
# 表的名字:
__tablename__ = 'ip_list'
# 表的结构:
ip = Column(String(20), primary_key=True)
url = Column(String(40))
def insert_data(ip, website):
# 创建session对象:
session = DbSession()
# 创建新User对象:
new_ip = Ip(ip = ip, url = website)
# 添加到session:
session.add(new_ip)
# 提交即保存到数据库:
session.commit()
# 关闭session:
session.close()
__all__ = ['insert_data']
| 23.509091 | 71 | 0.615623 | true | true | |
1c2fc67455b0e53a2eb879e29cb998a2ac40df9f | 3,463 | py | Python | app/tests/testDownload.py | lhorne-gavant/OpenPubArchive-Content-Server-1 | 2b7c02417a8bb37f5a627343fab7fa05dc532bf7 | [
"Apache-2.0"
] | null | null | null | app/tests/testDownload.py | lhorne-gavant/OpenPubArchive-Content-Server-1 | 2b7c02417a8bb37f5a627343fab7fa05dc532bf7 | [
"Apache-2.0"
] | null | null | null | app/tests/testDownload.py | lhorne-gavant/OpenPubArchive-Content-Server-1 | 2b7c02417a8bb37f5a627343fab7fa05dc532bf7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Third-party imports...
#from nose.tools import assert_true
# This test module is in development...
import sys
import os.path
folder = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
if folder == "tests": # testing from within WingIDE, default folder is tests
sys.path.append('../libs')
sys.path.append('../config')
sys.path.append('../../app')
else: # python running from should be within folder app
sys.path.append('./libs')
sys.path.append('./config')
from starlette.testclient import TestClient
import unittest
from localsecrets import TESTUSER, TESTPW, SECRET_KEY, ALGORITHM
import jwt
from datetime import datetime
from unitTestConfig import base_api, base_plus_endpoint_encoded
from main import app
client = TestClient(app)
class TestDownload(unittest.TestCase):
"""
Tests for basic login and Download
Note: tests are performed in alphabetical order, hence the function naming
with forced order in the names.
"""
def test_0_login(self):
full_URL = base_plus_endpoint_encoded(f'/v2/Session/Login/?grant_type=password&username={TESTUSER}&password={TESTPW}')
response = client.get(full_URL)
# Confirm that the request-response cycle completed successfully.
assert(response.ok == True)
r = response.json()
access_token = r["access_token"]
session_id = r["session_id"]
decoded_access_token = jwt.decode(access_token,
key=SECRET_KEY,
algorithms=ALGORITHM
)
expires_time = datetime.fromtimestamp(decoded_access_token['exp'])
orig_session_id = decoded_access_token['orig_session_id']
assert(r["authenticated"] == True)
assert(session_id == orig_session_id)
print (decoded_access_token )
def test_1_Download(self):
full_URL = base_plus_endpoint_encoded(f'/v2/Session/Login/?grant_type=password&username={TESTUSER}&password={TESTPW}')
response = client.get(full_URL)
# Confirm that the request-response cycle completed successfully.
full_URL = base_plus_endpoint_encoded(f'/v2/Documents/Downloads/PDFORIG/IJP.077.0217A/')
# local, this works...but fails in the response.py code trying to convert self.status to int.
response = client.get(full_URL)
# Confirm that the request-response cycle completed successfully.
assert(response.ok == True)
def test_2_Download(self):
full_URL = base_plus_endpoint_encoded(f'/v2/Documents/Downloads/PDF/IFP.017.0240A/')
response = client.get(full_URL)
# Confirm that the request-response cycle completed successfully.
assert(response.ok == True)
def test_3_Download(self):
full_URL = base_plus_endpoint_encoded(f'/v2/Documents/Downloads/EPUB/IJPSP.009.0324A/')
response = client.get(full_URL)
# Confirm that the request-response cycle completed successfully.
assert(response.ok == True)
def test_4_Download(self):
full_URL = base_plus_endpoint_encoded(f'/v2/Documents/Downloads/HTML/IJPSP.009.0324A/')
response = client.get(full_URL)
# Confirm that the request-response cycle completed successfully.
assert(response.ok == True)
if __name__ == '__main__':
unittest.main() | 38.477778 | 126 | 0.674848 |
import sys
import os.path
folder = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
if folder == "tests":
sys.path.append('../libs')
sys.path.append('../config')
sys.path.append('../../app')
else:
sys.path.append('./libs')
sys.path.append('./config')
from starlette.testclient import TestClient
import unittest
from localsecrets import TESTUSER, TESTPW, SECRET_KEY, ALGORITHM
import jwt
from datetime import datetime
from unitTestConfig import base_api, base_plus_endpoint_encoded
from main import app
client = TestClient(app)
class TestDownload(unittest.TestCase):
def test_0_login(self):
full_URL = base_plus_endpoint_encoded(f'/v2/Session/Login/?grant_type=password&username={TESTUSER}&password={TESTPW}')
response = client.get(full_URL)
assert(response.ok == True)
r = response.json()
access_token = r["access_token"]
session_id = r["session_id"]
decoded_access_token = jwt.decode(access_token,
key=SECRET_KEY,
algorithms=ALGORITHM
)
expires_time = datetime.fromtimestamp(decoded_access_token['exp'])
orig_session_id = decoded_access_token['orig_session_id']
assert(r["authenticated"] == True)
assert(session_id == orig_session_id)
print (decoded_access_token )
def test_1_Download(self):
full_URL = base_plus_endpoint_encoded(f'/v2/Session/Login/?grant_type=password&username={TESTUSER}&password={TESTPW}')
response = client.get(full_URL)
full_URL = base_plus_endpoint_encoded(f'/v2/Documents/Downloads/PDFORIG/IJP.077.0217A/')
response = client.get(full_URL)
assert(response.ok == True)
def test_2_Download(self):
full_URL = base_plus_endpoint_encoded(f'/v2/Documents/Downloads/PDF/IFP.017.0240A/')
response = client.get(full_URL)
assert(response.ok == True)
def test_3_Download(self):
full_URL = base_plus_endpoint_encoded(f'/v2/Documents/Downloads/EPUB/IJPSP.009.0324A/')
response = client.get(full_URL)
assert(response.ok == True)
def test_4_Download(self):
full_URL = base_plus_endpoint_encoded(f'/v2/Documents/Downloads/HTML/IJPSP.009.0324A/')
response = client.get(full_URL)
assert(response.ok == True)
if __name__ == '__main__':
unittest.main() | true | true |
1c2fc75c25ffb9aca3dbac7b67593a66e503ab2c | 2,021 | py | Python | accounts/forms.py | Nor-Mal/django-ecommerce | e57d316cb78fbe4315fb0a4b07a79779143981dd | [
"Apache-2.0"
] | 2 | 2021-09-05T20:45:59.000Z | 2021-11-03T11:55:20.000Z | accounts/forms.py | Nor-Mal/django-ecommerce | e57d316cb78fbe4315fb0a4b07a79779143981dd | [
"Apache-2.0"
] | null | null | null | accounts/forms.py | Nor-Mal/django-ecommerce | e57d316cb78fbe4315fb0a4b07a79779143981dd | [
"Apache-2.0"
] | null | null | null | from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import get_user_model
from .models import Customer, Address
class UserCreateForm(UserCreationForm):
class Meta:
fields = ('username', 'first_name', 'last_name', 'email', 'password1', 'password2')
model = get_user_model()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['username'].label = 'Username'
self.fields['first_name'].label = 'First name'
self.fields['first_name'].blank = False
self.fields['last_name'].label = 'Last name'
self.fields['last_name'].blank = False
self.fields['email'].label = 'Email address'
self.fields['email'].blank = False
class UserUpdateForm(forms.ModelForm):
class Meta:
fields = ('username', 'first_name', 'last_name', 'email', 'date_joined', 'last_login')
model = get_user_model()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['username'].label = 'Username'
self.fields['first_name'].label = 'First name'
self.fields['last_name'].label = 'Last name'
self.fields['email'].label = 'Email address'
self.fields['date_joined'].label = 'Created on'
self.fields['last_login'].label = 'Last login'
self.fields['date_joined'].disabled = True
self.fields['last_login'].disabled = True
class CustomerUpdateForm(forms.ModelForm):
class Meta:
model = Customer
fields = ['customer_id']
class AddressUpdateForm(forms.ModelForm):
class Meta:
model = Address
exclude = ['customer']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['address_name'].disabled = True
class AddressCreateForm(forms.ModelForm):
class Meta:
model = Address
exclude = ['customer']
| 33.131148 | 95 | 0.620485 | from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import get_user_model
from .models import Customer, Address
class UserCreateForm(UserCreationForm):
class Meta:
fields = ('username', 'first_name', 'last_name', 'email', 'password1', 'password2')
model = get_user_model()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['username'].label = 'Username'
self.fields['first_name'].label = 'First name'
self.fields['first_name'].blank = False
self.fields['last_name'].label = 'Last name'
self.fields['last_name'].blank = False
self.fields['email'].label = 'Email address'
self.fields['email'].blank = False
class UserUpdateForm(forms.ModelForm):
class Meta:
fields = ('username', 'first_name', 'last_name', 'email', 'date_joined', 'last_login')
model = get_user_model()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['username'].label = 'Username'
self.fields['first_name'].label = 'First name'
self.fields['last_name'].label = 'Last name'
self.fields['email'].label = 'Email address'
self.fields['date_joined'].label = 'Created on'
self.fields['last_login'].label = 'Last login'
self.fields['date_joined'].disabled = True
self.fields['last_login'].disabled = True
class CustomerUpdateForm(forms.ModelForm):
class Meta:
model = Customer
fields = ['customer_id']
class AddressUpdateForm(forms.ModelForm):
class Meta:
model = Address
exclude = ['customer']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['address_name'].disabled = True
class AddressCreateForm(forms.ModelForm):
class Meta:
model = Address
exclude = ['customer']
| true | true |
1c2fc7a04f19443bfe8f1d68c70f77000c87265a | 482 | py | Python | api/models/images.py | syth0le/async_cookeat | 0cecdd44c064be6fe19c0d0ae8342d7baf5a9bb8 | [
"CC0-1.0"
] | null | null | null | api/models/images.py | syth0le/async_cookeat | 0cecdd44c064be6fe19c0d0ae8342d7baf5a9bb8 | [
"CC0-1.0"
] | null | null | null | api/models/images.py | syth0le/async_cookeat | 0cecdd44c064be6fe19c0d0ae8342d7baf5a9bb8 | [
"CC0-1.0"
] | null | null | null | from sqlalchemy import Column, String, Integer, ForeignKey
from sqlalchemy.orm import relationship
from api.utils.db_init import Base
class Images(Base):
__tablename__ = 'images'
id = Column(Integer, primary_key=True, unique=True)
image = Column(String(50), nullable=False)
recipe_id = Column(Integer, ForeignKey("recipe.id"))
recipe_image = relationship("Recipe", back_populates="images")
def __repr__(self):
return '<Images %r>' % self.image
| 26.777778 | 66 | 0.717842 | from sqlalchemy import Column, String, Integer, ForeignKey
from sqlalchemy.orm import relationship
from api.utils.db_init import Base
class Images(Base):
__tablename__ = 'images'
id = Column(Integer, primary_key=True, unique=True)
image = Column(String(50), nullable=False)
recipe_id = Column(Integer, ForeignKey("recipe.id"))
recipe_image = relationship("Recipe", back_populates="images")
def __repr__(self):
return '<Images %r>' % self.image
| true | true |
1c2fc82b12fbba10922a54228785c5d486281380 | 66 | py | Python | streamfig/__init__.py | TiphaineV/streamfig | 4acd92625c34bde0089b7963ec076d902d8ebba1 | [
"MIT"
] | 5 | 2019-09-19T07:11:13.000Z | 2021-12-13T11:18:41.000Z | streamfig/__init__.py | TiphaineV/streamfig | 4acd92625c34bde0089b7963ec076d902d8ebba1 | [
"MIT"
] | 3 | 2020-04-23T17:37:23.000Z | 2021-12-13T09:40:31.000Z | streamfig/__init__.py | TiphaineV/streamfig | 4acd92625c34bde0089b7963ec076d902d8ebba1 | [
"MIT"
] | 5 | 2018-12-14T13:53:33.000Z | 2020-05-18T17:22:52.000Z | from .streamfig import StreamFig
from .printers import FigPrinter
| 22 | 32 | 0.848485 | from .streamfig import StreamFig
from .printers import FigPrinter
| true | true |
1c2fc89fac73d58e54ab12a3e1de3df8b88365e0 | 26,158 | py | Python | kiali_qe/tests/test_istio_config_validation.py | Hawkular-QE/kiali-qe-python | 24e058def1efd0a509a2b599901f4179dbf37583 | [
"Apache-2.0"
] | null | null | null | kiali_qe/tests/test_istio_config_validation.py | Hawkular-QE/kiali-qe-python | 24e058def1efd0a509a2b599901f4179dbf37583 | [
"Apache-2.0"
] | 3 | 2018-03-28T17:11:13.000Z | 2018-03-28T17:55:08.000Z | kiali_qe/tests/test_istio_config_validation.py | Hawkular-QE/kiali-qe-python | 24e058def1efd0a509a2b599901f4179dbf37583 | [
"Apache-2.0"
] | 2 | 2018-02-13T10:56:03.000Z | 2018-03-20T14:07:51.000Z | import pytest
from selenium.common.exceptions import NoSuchElementException
from kiali_qe.tests import ValidationsTest, ConfigValidationObject, ServiceValidationObject
from kiali_qe.utils.path import istio_objects_validation_path
from kiali_qe.components.error_codes import (
KIA0205,
KIA0401,
KIA0301,
KIA0302,
KIA0201,
KIA0202,
KIA0203,
KIA0209,
KIA1102,
KIA0701,
KIA0601,
KIA1104,
KIA0204,
KIA0001,
KIA0004,
KIA0002,
KIA0003,
KIA1103,
KIA1004,
KIA1006,
KIA0105,
KIA1106,
KIA1107,
KIA1101
)
'''
Tests are divided into groups using different services and namespaces. This way the group of tests
can be run in parallel.
'''
BOOKINFO = 'bookinfo'
BOOKINFO2 = 'bookinfo2'
ISTIO_SYSTEM = 'istio-system'
SCENARIO_1 = "two_gateways_same_host.yaml"
SCENARIO_2 = "no_matching_workload_gateway.yaml"
SCENARIO_3 = "more_destination_rules.yaml"
SCENARIO_4 = "no_matching_entry_registry.yaml"
SCENARIO_5 = "subset_label_not_found.yaml"
SCENARIO_6 = "missing_mesh_policy.yaml"
SCENARIO_7 = "mtls_settings_overridden.yaml"
SCENARIO_8 = "mesh_policy_permissive.yaml"
SCENARIO_9 = "mesh_policy_mtls_enable.yaml"
SCENARIO_10 = "non_existing_gateway.yaml"
SCENARIO_11 = "not_defined_protocol.yaml"
SCENARIO_12 = "destination_rule_fqdn.yaml"
SCENARIO_13 = "destination_rule_wrong_fqdn.yaml"
SCENARIO_14 = "ratings_java_svc.yaml"
SCENARIO_15 = "port_name_suffix_missing.yaml"
SCENARIO_16 = "virtual-service-less-than-100-weight.yaml"
SCENARIO_17 = "wrong-host-label-sidecar.yaml"
SCENARIO_18 = "duplicate-no-workload-sidecar.yaml"
SCENARIO_19 = "duplicate-workload-sidecar.yaml"
SCENARIO_20 = "default-sidecar-with-workload.yaml"
SCENARIO_21 = "mesh_policy_disable.yaml"
SCENARIO_22 = "auth-policy-mtls.yaml"
SCENARIO_23 = "vs_subset_service_entry.yaml"
SCENARIO_24 = "vs_wrong_subset_no_dr.yaml"
SCENARIO_25 = "duplicate-vs-gateway.yaml"
SCENARIO_26 = "vs_destination_host_not_found.yaml"
SCENARIO_27 = "request_auth_no_workload.yaml"
SCENARIO_28 = "two_gateways_different_selectors.yaml"
SCENARIO_29 = "subset_not_have_label.yaml"
@pytest.mark.p_group_last
def test_two_gateways_same_host(kiali_client, openshift_client):
""" More than one Gateway for the same host port combination
"""
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_1, namespace=None,
config_validation_objects=[
ConfigValidationObject(
object_type='Gateway',
object_name='bookinfo-gateway-auto-host',
namespace=BOOKINFO,
error_messages=[KIA0301]),
ConfigValidationObject(
object_type='Gateway',
object_name='bookinfo-gateway-auto-host-copy',
namespace=BOOKINFO2,
error_messages=[KIA0301])
],
ignore_common_errors=False)
@pytest.mark.p_group_last
def test_two_gateways_different_selectors(kiali_client, openshift_client):
""" More than one Gateway for the same host port combination referring to different selectors
"""
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_28, namespace=ISTIO_SYSTEM,
config_validation_objects=[
ConfigValidationObject(
object_type='Gateway',
object_name='istio-gateway-prv',
namespace=ISTIO_SYSTEM,
error_messages=[KIA0302]),
ConfigValidationObject(
object_type='Gateway',
object_name='istio-gateway-pub',
namespace=ISTIO_SYSTEM,
error_messages=[KIA0302])
],
ignore_common_errors=False)
@pytest.mark.p_group_last
def test_gateway_no_matching_workload(kiali_client, openshift_client):
""" No matching workload found for gateway selector in this namespace
"""
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_2, namespace=None,
config_validation_objects=[
ConfigValidationObject(
object_type='Gateway',
object_name='bookinfo-gateway-auto-not-match',
namespace=BOOKINFO,
error_messages=[KIA0302])
])
@pytest.mark.p_group_last
def test_more_drs_same_host_port(kiali_client, openshift_client):
""" More than one DestinationRules for the same host subset combination
"""
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_3, namespace=BOOKINFO,
config_validation_objects=[
ConfigValidationObject(
object_type='DestinationRule',
object_name='reviews-dr1-auto',
namespace=BOOKINFO,
error_messages=[KIA0201]),
ConfigValidationObject(
object_type='DestinationRule',
object_name='reviews-dr2-auto',
namespace=BOOKINFO,
error_messages=[KIA0201])
],
ignore_common_errors=False)
@pytest.mark.p_group_last
def test_no_matching_entry_dr(kiali_client, openshift_client):
""" This host has no matching entry in the service registry
(service, workload or service entries)
"""
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_4, namespace=BOOKINFO,
config_validation_objects=[
ConfigValidationObject(
object_type='DestinationRule',
object_name='reviews-no-match-entry-auto',
namespace=BOOKINFO,
error_messages=[KIA0202])
])
@pytest.mark.p_group_last
def test_subset_label_not_found(kiali_client, openshift_client):
""" This subset’s labels are not found in any matching host
"""
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_5, namespace=BOOKINFO,
config_validation_objects=[
ConfigValidationObject(
object_type='DestinationRule',
object_name='reviews-no-subset-label-auto',
namespace=BOOKINFO,
error_messages=[KIA0203,
KIA0203])
])
@pytest.mark.p_group_last
def test_mesh_policy_not_found(kiali_client, openshift_client):
""" PeerAuthentication enabling mTLS is missing
"""
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_6, namespace=ISTIO_SYSTEM,
config_validation_objects=[
ConfigValidationObject(
object_type='DestinationRule',
object_name='default',
namespace=ISTIO_SYSTEM,
error_messages=[KIA0205])
])
@pytest.mark.p_group_last
def test_mtls_settings_overridden(kiali_client, openshift_client):
""" mTLS settings of a non-local Destination Rule are overridden
"""
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_7, namespace=None,
config_validation_objects=[
ConfigValidationObject(
object_type='DestinationRule',
object_name='default',
namespace=ISTIO_SYSTEM,
error_messages=[KIA0205]),
ConfigValidationObject(
object_type='DestinationRule',
object_name='reviews-overridden-auto',
namespace=BOOKINFO,
error_messages=[KIA0204])
])
@pytest.mark.p_group_last
def test_meshpolicy_permissive_ok(kiali_client, openshift_client):
""" PeerAuthentication enabling mTLS found, permissive policy is needed:
PeerAuthentication to enable PERMISSIVE mode to all the workloads in the mesh
"""
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_8, namespace=None,
config_validation_objects=[
ConfigValidationObject(
object_type='DestinationRule',
object_name='default',
namespace=BOOKINFO,
error_messages=[]),
ConfigValidationObject(
object_type='PeerAuthentication',
object_name='default',
namespace=ISTIO_SYSTEM,
error_messages=[])
])
@pytest.mark.p_group_last
def test_meshpolicy_mtls_enable_ok(kiali_client, openshift_client):
""" PeerAuthentication enabling mTLS found, permissive policy is needed:
DestinatonRule to enable mTLS instead of disabling it (change the mode to ISTIO_MUTUAL)
"""
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_9, namespace=None,
config_validation_objects=[
ConfigValidationObject(
object_type='DestinationRule',
object_name='default',
namespace=BOOKINFO,
error_messages=[]),
ConfigValidationObject(
object_type='PeerAuthentication',
object_name='default',
namespace=ISTIO_SYSTEM,
error_messages=[KIA0401])
])
@pytest.mark.p_group_last
def test_vs_to_non_existing_gateway(kiali_client, openshift_client):
""" VirtualService is pointing to a non-existent gateway
"""
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_10, namespace=BOOKINFO,
config_validation_objects=[
ConfigValidationObject(
object_type='VirtualService',
object_name='details-vs-non-existing-gateway-auto',
namespace=BOOKINFO,
error_messages=[KIA1102])
])
@pytest.mark.p_group_last
def test_vs_not_defined_protocol(kiali_client, openshift_client):
""" VirtualService doesn’t define any route protocol
"""
try:
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_11, namespace=BOOKINFO,
config_validation_objects=[
ConfigValidationObject(
object_type='VirtualService',
object_name='details-not-defined-protocol',
namespace=BOOKINFO,
error_messages=[KIA1103])
])
except NoSuchElementException:
# because vs should have protocol defined
pass
@pytest.mark.p_group_last
def test_dr_fqdn_ok(kiali_client, openshift_client):
""" Host in DR is given in FQDN
"""
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_12, namespace=None,
config_validation_objects=[
ConfigValidationObject(
object_type='DestinationRule',
object_name='reviews-dr-fqdn-auto',
namespace=BOOKINFO,
error_messages=[])
])
@pytest.mark.p_group_last
def test_dr_fqdn_not_exist(kiali_client, openshift_client):
""" Host in DR is given in FQDN which does not exist
"""
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_13, namespace=None,
config_validation_objects=[
ConfigValidationObject(
object_type='DestinationRule',
object_name='reviews-dr-wrong-fqdn-auto',
namespace=BOOKINFO,
error_messages=[KIA0202])
])
@pytest.mark.p_group_last
def __test_deployment_port_not_found(kiali_client, openshift_client):
""" Deployment exposing same port as Service not found
"""
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_service_validation(
scenario=SCENARIO_14, service_name='ratings-java',
namespace='bookinfo',
service_validation_objects=[
ServiceValidationObject(
error_message=KIA0701)])
@pytest.mark.p_group_last
def __test_port_name_suffix(kiali_client, openshift_client):
""" Port name must follow <protocol>[-suffix] form
"""
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_service_validation(
scenario=SCENARIO_15, service_name='ratings-java-svc-suffix',
namespace='bookinfo',
service_validation_objects=[
ServiceValidationObject(
error_message=KIA0601)])
@pytest.mark.p_group_last
def test_vs_less_than_100_weight(kiali_client, openshift_client):
""" VirtualService has only weight < 100
"""
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_16, namespace=BOOKINFO,
config_validation_objects=[
ConfigValidationObject(
object_type='VirtualService',
object_name='virtual-service-less-100-weight-auto',
namespace=BOOKINFO,
error_messages=[KIA1104])
])
@pytest.mark.p_group_last
def test_sidecar_errors(kiali_client, openshift_client):
""" Multiple errors
"""
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_17, namespace=BOOKINFO,
config_validation_objects=[
ConfigValidationObject(
object_type='Sidecar',
object_name='wrong-host-sidecar-auto',
namespace=BOOKINFO,
error_messages=[KIA0004,
KIA1004,
KIA1004,
KIA1004,
KIA1004])
])
@pytest.mark.p_group_last
def test_duplicate_sidecar_errors(kiali_client, openshift_client):
""" More than one selector-less Sidecar in the same namespace
"""
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_18, namespace=BOOKINFO,
config_validation_objects=[
ConfigValidationObject(
object_type='Sidecar',
object_name='dupliacate-sidecar1-auto',
namespace=BOOKINFO,
error_messages=[KIA0002,
KIA1004,
KIA1004]),
ConfigValidationObject(
object_type='Sidecar',
object_name='dupliacate-sidecar2-auto',
namespace=BOOKINFO,
error_messages=[KIA0002,
KIA1004,
KIA1004])
])
@pytest.mark.p_group_last
def test_duplicate_workload_sidecar_errors(kiali_client, openshift_client):
""" More than one selector-less Sidecar in the same namespace
"""
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_19, namespace=BOOKINFO,
config_validation_objects=[
ConfigValidationObject(
object_type='Sidecar',
object_name='dupliacate-workload-sidecar1-auto',
namespace=BOOKINFO,
error_messages=[KIA1004, KIA1004, KIA0003]),
ConfigValidationObject(
object_type='Sidecar',
object_name='dupliacate-workload-sidecar2-auto',
namespace=BOOKINFO,
error_messages=[KIA1004, KIA1004, KIA0003])
])
@pytest.mark.p_group_last
def test_default_workload_sidecar(kiali_client, openshift_client):
""" Global default sidecar should not have workloadSelector
"""
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_20, namespace=ISTIO_SYSTEM,
config_validation_objects=[
ConfigValidationObject(
object_type='Sidecar',
object_name='default-sidecar-workload-auto',
namespace=ISTIO_SYSTEM,
error_messages=[KIA1006, KIA0004])
])
@pytest.mark.p_group_last
def test_meshpolicy_disabled_ok(kiali_client, openshift_client):
""" PeerAuthentication disabling mtls for the whole namespace (mode = DISABLE)
Destination Rule disabling mTLS for a whole namespace (mode = DISABLE)
"""
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_21, namespace=None,
config_validation_objects=[
ConfigValidationObject(
object_type='DestinationRule',
object_name='disable-mtls',
namespace=BOOKINFO,
error_messages=[]),
ConfigValidationObject(
object_type='PeerAuthentication',
object_name='default',
namespace=BOOKINFO,
error_messages=[])
])
@pytest.mark.p_group_last
def test_authpolicy_validations_mtls(kiali_client, openshift_client):
""" KIA0105 This field requires mTLS to be enabled
from.source.{namespaces | notNamespaces | principals | notPrincipals}
when.key = {source.principal | source.namespace | connection.sni }
"""
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_22, namespace=BOOKINFO2,
config_validation_objects=[
ConfigValidationObject(
object_type='AuthorizationPolicy',
object_name='authpolicymtls',
namespace=BOOKINFO2,
error_messages=([KIA0105, KIA0105, KIA0105, KIA0105, KIA0105, KIA0105, KIA0105]
if not openshift_client.is_auto_mtls() else []))
])
@pytest.mark.p_group_last
def test_vs_subset_validations_service_entry(kiali_client, openshift_client):
""" KIA1107 Subset found as ServiceEntry exists
"""
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_23, namespace=BOOKINFO,
config_validation_objects=[
ConfigValidationObject(
object_type='VirtualService',
object_name='orahub-vs',
namespace=BOOKINFO,
error_messages=[KIA1104, KIA1104])
])
@pytest.mark.p_group_last
def test_vs_subset_validations_no_service_entry(kiali_client, openshift_client):
""" KIA1107 Subset not found as ServiceEntry missing
"""
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_24, namespace=BOOKINFO,
config_validation_objects=[
ConfigValidationObject(
object_type='VirtualService',
object_name='orahub-vs-no-dr',
namespace=BOOKINFO,
error_messages=[KIA1104, KIA1104, KIA1107, KIA1107])
])
@pytest.mark.p_group_last
def test_vs_duplicate_gateway(kiali_client, openshift_client):
""" KIA1106 More than one Virtual Service for same host
"""
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_25, namespace=BOOKINFO,
config_validation_objects=[
ConfigValidationObject(
object_type='VirtualService',
object_name='admin-vs-2',
namespace=BOOKINFO,
error_messages=[]),
ConfigValidationObject(
object_type='VirtualService',
object_name='admin-vs',
namespace=BOOKINFO,
error_messages=[]),
ConfigValidationObject(
object_type='VirtualService',
object_name='user-vs-2',
namespace=BOOKINFO,
error_messages=[]),
ConfigValidationObject(
object_type='VirtualService',
object_name='user-vs',
namespace=BOOKINFO,
error_messages=[])
])
@pytest.mark.p_group_last
def test_vs_destination_host_not_found(kiali_client, openshift_client):
""" KIA1101 DestinationWeight on route doesn't have a
valid service (host not found)
"""
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_26, namespace=ISTIO_SYSTEM,
config_validation_objects=[
ConfigValidationObject(
object_type='VirtualService',
object_name='foo-dev',
namespace=ISTIO_SYSTEM,
error_messages=[KIA1101])
])
@pytest.mark.p_group_last
def test_request_auth_workload_not_found(kiali_client, openshift_client):
""" KIA0003, KIA0004, KIA0002
"""
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_27, namespace=BOOKINFO,
config_validation_objects=[
ConfigValidationObject(
object_type='RequestAuthentication',
object_name='httpbin-dup-1',
namespace=BOOKINFO,
error_messages=[KIA0003]),
ConfigValidationObject(
object_type='RequestAuthentication',
object_name='httpbin-dup-2',
namespace=BOOKINFO,
error_messages=[KIA0003]),
ConfigValidationObject(
object_type='RequestAuthentication',
object_name='httpbin-matching',
namespace=BOOKINFO,
error_messages=[]),
ConfigValidationObject(
object_type='RequestAuthentication',
object_name='httpbin-no-matching',
namespace=BOOKINFO,
error_messages=[KIA0004]),
ConfigValidationObject(
object_type='RequestAuthentication',
object_name='httpbin-ns-wise',
namespace=BOOKINFO,
error_messages=[KIA0002]),
ConfigValidationObject(
object_type='RequestAuthentication',
object_name='httpbin-ns-wise-1',
namespace=BOOKINFO,
error_messages=[KIA0002])
])
@pytest.mark.p_group_last
def test_subset_no_label(kiali_client, openshift_client):
""" This subset have not label
"""
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_29, namespace=BOOKINFO,
config_validation_objects=[
ConfigValidationObject(
object_type='DestinationRule',
object_name='reviews-subset-not-label-auto',
namespace=BOOKINFO,
error_messages=[KIA0209])
])
| 36.08 | 98 | 0.652038 | import pytest
from selenium.common.exceptions import NoSuchElementException
from kiali_qe.tests import ValidationsTest, ConfigValidationObject, ServiceValidationObject
from kiali_qe.utils.path import istio_objects_validation_path
from kiali_qe.components.error_codes import (
KIA0205,
KIA0401,
KIA0301,
KIA0302,
KIA0201,
KIA0202,
KIA0203,
KIA0209,
KIA1102,
KIA0701,
KIA0601,
KIA1104,
KIA0204,
KIA0001,
KIA0004,
KIA0002,
KIA0003,
KIA1103,
KIA1004,
KIA1006,
KIA0105,
KIA1106,
KIA1107,
KIA1101
)
BOOKINFO = 'bookinfo'
BOOKINFO2 = 'bookinfo2'
ISTIO_SYSTEM = 'istio-system'
SCENARIO_1 = "two_gateways_same_host.yaml"
SCENARIO_2 = "no_matching_workload_gateway.yaml"
SCENARIO_3 = "more_destination_rules.yaml"
SCENARIO_4 = "no_matching_entry_registry.yaml"
SCENARIO_5 = "subset_label_not_found.yaml"
SCENARIO_6 = "missing_mesh_policy.yaml"
SCENARIO_7 = "mtls_settings_overridden.yaml"
SCENARIO_8 = "mesh_policy_permissive.yaml"
SCENARIO_9 = "mesh_policy_mtls_enable.yaml"
SCENARIO_10 = "non_existing_gateway.yaml"
SCENARIO_11 = "not_defined_protocol.yaml"
SCENARIO_12 = "destination_rule_fqdn.yaml"
SCENARIO_13 = "destination_rule_wrong_fqdn.yaml"
SCENARIO_14 = "ratings_java_svc.yaml"
SCENARIO_15 = "port_name_suffix_missing.yaml"
SCENARIO_16 = "virtual-service-less-than-100-weight.yaml"
SCENARIO_17 = "wrong-host-label-sidecar.yaml"
SCENARIO_18 = "duplicate-no-workload-sidecar.yaml"
SCENARIO_19 = "duplicate-workload-sidecar.yaml"
SCENARIO_20 = "default-sidecar-with-workload.yaml"
SCENARIO_21 = "mesh_policy_disable.yaml"
SCENARIO_22 = "auth-policy-mtls.yaml"
SCENARIO_23 = "vs_subset_service_entry.yaml"
SCENARIO_24 = "vs_wrong_subset_no_dr.yaml"
SCENARIO_25 = "duplicate-vs-gateway.yaml"
SCENARIO_26 = "vs_destination_host_not_found.yaml"
SCENARIO_27 = "request_auth_no_workload.yaml"
SCENARIO_28 = "two_gateways_different_selectors.yaml"
SCENARIO_29 = "subset_not_have_label.yaml"
@pytest.mark.p_group_last
def test_two_gateways_same_host(kiali_client, openshift_client):
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_1, namespace=None,
config_validation_objects=[
ConfigValidationObject(
object_type='Gateway',
object_name='bookinfo-gateway-auto-host',
namespace=BOOKINFO,
error_messages=[KIA0301]),
ConfigValidationObject(
object_type='Gateway',
object_name='bookinfo-gateway-auto-host-copy',
namespace=BOOKINFO2,
error_messages=[KIA0301])
],
ignore_common_errors=False)
@pytest.mark.p_group_last
def test_two_gateways_different_selectors(kiali_client, openshift_client):
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_28, namespace=ISTIO_SYSTEM,
config_validation_objects=[
ConfigValidationObject(
object_type='Gateway',
object_name='istio-gateway-prv',
namespace=ISTIO_SYSTEM,
error_messages=[KIA0302]),
ConfigValidationObject(
object_type='Gateway',
object_name='istio-gateway-pub',
namespace=ISTIO_SYSTEM,
error_messages=[KIA0302])
],
ignore_common_errors=False)
@pytest.mark.p_group_last
def test_gateway_no_matching_workload(kiali_client, openshift_client):
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_2, namespace=None,
config_validation_objects=[
ConfigValidationObject(
object_type='Gateway',
object_name='bookinfo-gateway-auto-not-match',
namespace=BOOKINFO,
error_messages=[KIA0302])
])
@pytest.mark.p_group_last
def test_more_drs_same_host_port(kiali_client, openshift_client):
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_3, namespace=BOOKINFO,
config_validation_objects=[
ConfigValidationObject(
object_type='DestinationRule',
object_name='reviews-dr1-auto',
namespace=BOOKINFO,
error_messages=[KIA0201]),
ConfigValidationObject(
object_type='DestinationRule',
object_name='reviews-dr2-auto',
namespace=BOOKINFO,
error_messages=[KIA0201])
],
ignore_common_errors=False)
@pytest.mark.p_group_last
def test_no_matching_entry_dr(kiali_client, openshift_client):
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_4, namespace=BOOKINFO,
config_validation_objects=[
ConfigValidationObject(
object_type='DestinationRule',
object_name='reviews-no-match-entry-auto',
namespace=BOOKINFO,
error_messages=[KIA0202])
])
@pytest.mark.p_group_last
def test_subset_label_not_found(kiali_client, openshift_client):
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_5, namespace=BOOKINFO,
config_validation_objects=[
ConfigValidationObject(
object_type='DestinationRule',
object_name='reviews-no-subset-label-auto',
namespace=BOOKINFO,
error_messages=[KIA0203,
KIA0203])
])
@pytest.mark.p_group_last
def test_mesh_policy_not_found(kiali_client, openshift_client):
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_6, namespace=ISTIO_SYSTEM,
config_validation_objects=[
ConfigValidationObject(
object_type='DestinationRule',
object_name='default',
namespace=ISTIO_SYSTEM,
error_messages=[KIA0205])
])
@pytest.mark.p_group_last
def test_mtls_settings_overridden(kiali_client, openshift_client):
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_7, namespace=None,
config_validation_objects=[
ConfigValidationObject(
object_type='DestinationRule',
object_name='default',
namespace=ISTIO_SYSTEM,
error_messages=[KIA0205]),
ConfigValidationObject(
object_type='DestinationRule',
object_name='reviews-overridden-auto',
namespace=BOOKINFO,
error_messages=[KIA0204])
])
@pytest.mark.p_group_last
def test_meshpolicy_permissive_ok(kiali_client, openshift_client):
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_8, namespace=None,
config_validation_objects=[
ConfigValidationObject(
object_type='DestinationRule',
object_name='default',
namespace=BOOKINFO,
error_messages=[]),
ConfigValidationObject(
object_type='PeerAuthentication',
object_name='default',
namespace=ISTIO_SYSTEM,
error_messages=[])
])
@pytest.mark.p_group_last
def test_meshpolicy_mtls_enable_ok(kiali_client, openshift_client):
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_9, namespace=None,
config_validation_objects=[
ConfigValidationObject(
object_type='DestinationRule',
object_name='default',
namespace=BOOKINFO,
error_messages=[]),
ConfigValidationObject(
object_type='PeerAuthentication',
object_name='default',
namespace=ISTIO_SYSTEM,
error_messages=[KIA0401])
])
@pytest.mark.p_group_last
def test_vs_to_non_existing_gateway(kiali_client, openshift_client):
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_10, namespace=BOOKINFO,
config_validation_objects=[
ConfigValidationObject(
object_type='VirtualService',
object_name='details-vs-non-existing-gateway-auto',
namespace=BOOKINFO,
error_messages=[KIA1102])
])
@pytest.mark.p_group_last
def test_vs_not_defined_protocol(kiali_client, openshift_client):
try:
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_11, namespace=BOOKINFO,
config_validation_objects=[
ConfigValidationObject(
object_type='VirtualService',
object_name='details-not-defined-protocol',
namespace=BOOKINFO,
error_messages=[KIA1103])
])
except NoSuchElementException:
pass
@pytest.mark.p_group_last
def test_dr_fqdn_ok(kiali_client, openshift_client):
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_12, namespace=None,
config_validation_objects=[
ConfigValidationObject(
object_type='DestinationRule',
object_name='reviews-dr-fqdn-auto',
namespace=BOOKINFO,
error_messages=[])
])
@pytest.mark.p_group_last
def test_dr_fqdn_not_exist(kiali_client, openshift_client):
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_13, namespace=None,
config_validation_objects=[
ConfigValidationObject(
object_type='DestinationRule',
object_name='reviews-dr-wrong-fqdn-auto',
namespace=BOOKINFO,
error_messages=[KIA0202])
])
@pytest.mark.p_group_last
def __test_deployment_port_not_found(kiali_client, openshift_client):
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_service_validation(
scenario=SCENARIO_14, service_name='ratings-java',
namespace='bookinfo',
service_validation_objects=[
ServiceValidationObject(
error_message=KIA0701)])
@pytest.mark.p_group_last
def __test_port_name_suffix(kiali_client, openshift_client):
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_service_validation(
scenario=SCENARIO_15, service_name='ratings-java-svc-suffix',
namespace='bookinfo',
service_validation_objects=[
ServiceValidationObject(
error_message=KIA0601)])
@pytest.mark.p_group_last
def test_vs_less_than_100_weight(kiali_client, openshift_client):
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_16, namespace=BOOKINFO,
config_validation_objects=[
ConfigValidationObject(
object_type='VirtualService',
object_name='virtual-service-less-100-weight-auto',
namespace=BOOKINFO,
error_messages=[KIA1104])
])
@pytest.mark.p_group_last
def test_sidecar_errors(kiali_client, openshift_client):
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_17, namespace=BOOKINFO,
config_validation_objects=[
ConfigValidationObject(
object_type='Sidecar',
object_name='wrong-host-sidecar-auto',
namespace=BOOKINFO,
error_messages=[KIA0004,
KIA1004,
KIA1004,
KIA1004,
KIA1004])
])
@pytest.mark.p_group_last
def test_duplicate_sidecar_errors(kiali_client, openshift_client):
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_18, namespace=BOOKINFO,
config_validation_objects=[
ConfigValidationObject(
object_type='Sidecar',
object_name='dupliacate-sidecar1-auto',
namespace=BOOKINFO,
error_messages=[KIA0002,
KIA1004,
KIA1004]),
ConfigValidationObject(
object_type='Sidecar',
object_name='dupliacate-sidecar2-auto',
namespace=BOOKINFO,
error_messages=[KIA0002,
KIA1004,
KIA1004])
])
@pytest.mark.p_group_last
def test_duplicate_workload_sidecar_errors(kiali_client, openshift_client):
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_19, namespace=BOOKINFO,
config_validation_objects=[
ConfigValidationObject(
object_type='Sidecar',
object_name='dupliacate-workload-sidecar1-auto',
namespace=BOOKINFO,
error_messages=[KIA1004, KIA1004, KIA0003]),
ConfigValidationObject(
object_type='Sidecar',
object_name='dupliacate-workload-sidecar2-auto',
namespace=BOOKINFO,
error_messages=[KIA1004, KIA1004, KIA0003])
])
@pytest.mark.p_group_last
def test_default_workload_sidecar(kiali_client, openshift_client):
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_20, namespace=ISTIO_SYSTEM,
config_validation_objects=[
ConfigValidationObject(
object_type='Sidecar',
object_name='default-sidecar-workload-auto',
namespace=ISTIO_SYSTEM,
error_messages=[KIA1006, KIA0004])
])
@pytest.mark.p_group_last
def test_meshpolicy_disabled_ok(kiali_client, openshift_client):
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_21, namespace=None,
config_validation_objects=[
ConfigValidationObject(
object_type='DestinationRule',
object_name='disable-mtls',
namespace=BOOKINFO,
error_messages=[]),
ConfigValidationObject(
object_type='PeerAuthentication',
object_name='default',
namespace=BOOKINFO,
error_messages=[])
])
@pytest.mark.p_group_last
def test_authpolicy_validations_mtls(kiali_client, openshift_client):
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_22, namespace=BOOKINFO2,
config_validation_objects=[
ConfigValidationObject(
object_type='AuthorizationPolicy',
object_name='authpolicymtls',
namespace=BOOKINFO2,
error_messages=([KIA0105, KIA0105, KIA0105, KIA0105, KIA0105, KIA0105, KIA0105]
if not openshift_client.is_auto_mtls() else []))
])
@pytest.mark.p_group_last
def test_vs_subset_validations_service_entry(kiali_client, openshift_client):
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_23, namespace=BOOKINFO,
config_validation_objects=[
ConfigValidationObject(
object_type='VirtualService',
object_name='orahub-vs',
namespace=BOOKINFO,
error_messages=[KIA1104, KIA1104])
])
@pytest.mark.p_group_last
def test_vs_subset_validations_no_service_entry(kiali_client, openshift_client):
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_24, namespace=BOOKINFO,
config_validation_objects=[
ConfigValidationObject(
object_type='VirtualService',
object_name='orahub-vs-no-dr',
namespace=BOOKINFO,
error_messages=[KIA1104, KIA1104, KIA1107, KIA1107])
])
@pytest.mark.p_group_last
def test_vs_duplicate_gateway(kiali_client, openshift_client):
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_25, namespace=BOOKINFO,
config_validation_objects=[
ConfigValidationObject(
object_type='VirtualService',
object_name='admin-vs-2',
namespace=BOOKINFO,
error_messages=[]),
ConfigValidationObject(
object_type='VirtualService',
object_name='admin-vs',
namespace=BOOKINFO,
error_messages=[]),
ConfigValidationObject(
object_type='VirtualService',
object_name='user-vs-2',
namespace=BOOKINFO,
error_messages=[]),
ConfigValidationObject(
object_type='VirtualService',
object_name='user-vs',
namespace=BOOKINFO,
error_messages=[])
])
@pytest.mark.p_group_last
def test_vs_destination_host_not_found(kiali_client, openshift_client):
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_26, namespace=ISTIO_SYSTEM,
config_validation_objects=[
ConfigValidationObject(
object_type='VirtualService',
object_name='foo-dev',
namespace=ISTIO_SYSTEM,
error_messages=[KIA1101])
])
@pytest.mark.p_group_last
def test_request_auth_workload_not_found(kiali_client, openshift_client):
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_27, namespace=BOOKINFO,
config_validation_objects=[
ConfigValidationObject(
object_type='RequestAuthentication',
object_name='httpbin-dup-1',
namespace=BOOKINFO,
error_messages=[KIA0003]),
ConfigValidationObject(
object_type='RequestAuthentication',
object_name='httpbin-dup-2',
namespace=BOOKINFO,
error_messages=[KIA0003]),
ConfigValidationObject(
object_type='RequestAuthentication',
object_name='httpbin-matching',
namespace=BOOKINFO,
error_messages=[]),
ConfigValidationObject(
object_type='RequestAuthentication',
object_name='httpbin-no-matching',
namespace=BOOKINFO,
error_messages=[KIA0004]),
ConfigValidationObject(
object_type='RequestAuthentication',
object_name='httpbin-ns-wise',
namespace=BOOKINFO,
error_messages=[KIA0002]),
ConfigValidationObject(
object_type='RequestAuthentication',
object_name='httpbin-ns-wise-1',
namespace=BOOKINFO,
error_messages=[KIA0002])
])
@pytest.mark.p_group_last
def test_subset_no_label(kiali_client, openshift_client):
tests = ValidationsTest(
kiali_client=kiali_client,
openshift_client=openshift_client,
objects_path=istio_objects_validation_path.strpath)
tests.test_istio_objects(
scenario=SCENARIO_29, namespace=BOOKINFO,
config_validation_objects=[
ConfigValidationObject(
object_type='DestinationRule',
object_name='reviews-subset-not-label-auto',
namespace=BOOKINFO,
error_messages=[KIA0209])
])
| true | true |
1c2fc8f9475fca07fd19d084ffbe32021b3ce474 | 7,459 | py | Python | probatus/utils/shap_helpers.py | PaulZhutovsky/probatus | d8f85dc0eac65a7fec64b76f265693c845afcbe2 | [
"MIT"
] | null | null | null | probatus/utils/shap_helpers.py | PaulZhutovsky/probatus | d8f85dc0eac65a7fec64b76f265693c845afcbe2 | [
"MIT"
] | null | null | null | probatus/utils/shap_helpers.py | PaulZhutovsky/probatus | d8f85dc0eac65a7fec64b76f265693c845afcbe2 | [
"MIT"
] | null | null | null | # Copyright (c) 2020 ING Bank N.V.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import warnings
import numpy as np
import pandas as pd
from shap import Explainer
from shap.explainers._tree import Tree
from shap.utils import sample
from sklearn.pipeline import Pipeline
def shap_calc(
model,
X,
return_explainer=False,
verbose=0,
sample_size=100,
approximate=False,
check_additivity=True,
**shap_kwargs,
):
"""
Helper function to calculate the shapley values for a given model.
Args:
model (binary model):
Trained model.
X (pd.DataFrame or np.ndarray):
features set.
return_explainer (boolean):
if True, returns a a tuple (shap_values, explainer).
verbose (int, optional):
Controls verbosity of the output:
- 0 - nether prints nor warnings are shown
- 1 - 50 - only most important warnings
- 51 - 100 - shows other warnings and prints
- above 100 - presents all prints and all warnings (including SHAP warnings).
approximate (boolean):
if True uses shap approximations - less accurate, but very fast. It applies to tree-based explainers only.
check_additivity (boolean):
if False SHAP will disable the additivity check for tree-based models.
**shap_kwargs: kwargs of the shap.Explainer
Returns:
(np.ndarray or tuple(np.ndarray, shap.Explainer)):
shapley_values for the model, optionally also returns the explainer.
"""
if isinstance(model, Pipeline):
raise (
TypeError(
"The provided model is a Pipeline. Unfortunately, the features based on SHAP do not support "
"pipelines, because they cannot be used in combination with shap.Explainer. Please apply any "
"data transformations before running the probatus module."
)
)
# Suppress warnings regarding XGboost and Lightgbm models.
with warnings.catch_warnings():
if verbose <= 100:
warnings.simplefilter("ignore")
# For tree explainers, do not pass masker when feature_perturbation is
# tree_path_dependent, or when X contains categorical features
# related to issue:
# https://github.com/slundberg/shap/issues/480
if shap_kwargs.get("feature_perturbation") == "tree_path_dependent" or X.select_dtypes("category").shape[1] > 0:
# Calculate Shap values.
explainer = Explainer(model, **shap_kwargs)
else:
# Create the background data,required for non tree based models.
# A single datapoint can passed as mask
# (https://github.com/slundberg/shap/issues/955#issuecomment-569837201)
if X.shape[0] < sample_size:
sample_size = int(np.ceil(X.shape[0] * 0.2))
else:
pass
mask = sample(X, sample_size)
explainer = Explainer(model, masker=mask, **shap_kwargs)
# For tree-explainers allow for using check_additivity and approximate arguments
if isinstance(explainer, Tree):
# Calculate Shap values
shap_values = explainer.shap_values(X, check_additivity=check_additivity, approximate=approximate)
else:
# Calculate Shap values
shap_values = explainer.shap_values(X)
if isinstance(shap_values, list) and len(shap_values) == 2:
warnings.warn(
"Shap values are related to the output probabilities of class 1 for this model, instead of " "log odds."
)
shap_values = shap_values[1]
if return_explainer:
return shap_values, explainer
return shap_values
def shap_to_df(model, X, precalc_shap=None, **kwargs):
"""
Calculates the shap values and return the pandas DataFrame with the columns and the index of the original.
Args:
model (binary model):
Pretrained model (Random Forest of XGBoost at the moment).
X (pd.DataFrame or np.ndarray):
Dataset on which the SHAP importance is calculated.
precalc_shap (np.array):
Precalculated SHAP values. If None, they are computed.
**kwargs: for the function shap_calc
Returns:
(pd.DataFrame):
Dataframe with SHAP feature importance per features on X dataset.
"""
if precalc_shap is not None:
shap_values = precalc_shap
else:
shap_values = shap_calc(model, X, **kwargs)
if isinstance(X, pd.DataFrame):
return pd.DataFrame(shap_values, columns=X.columns, index=X.index)
elif isinstance(X, np.ndarray) and len(X.shape) == 2:
return pd.DataFrame(shap_values, columns=[f"col_{ix}" for ix in range(X.shape[1])])
else:
raise NotImplementedError("X must be a dataframe or a 2d array")
def calculate_shap_importance(shap_values, columns, output_columns_suffix=""):
"""
Returns the average shapley value for each column of the dataframe, as well as the average absolute shap value.
Args:
shap_values (np.array):
Shap values.
columns (list of str):
Feature names.
output_columns_suffix (str, optional):
Suffix to be added at the end of column names in the output.
Returns:
(pd.DataFrame):
Mean absolute shap values and Mean shap values of features.
"""
# Find average shap importance for neg and pos class
shap_abs_mean = np.mean(np.abs(shap_values), axis=0)
shap_mean = np.mean(shap_values, axis=0)
# Prepare importance values in a handy df
importance_df = pd.DataFrame(
{
f"mean_abs_shap_value{output_columns_suffix}": shap_abs_mean.tolist(),
f"mean_shap_value{output_columns_suffix}": shap_mean.tolist(),
},
index=columns,
)
# Set the correct column types
importance_df[f"mean_abs_shap_value{output_columns_suffix}"] = importance_df[
f"mean_abs_shap_value{output_columns_suffix}"
].astype(float)
importance_df[f"mean_shap_value{output_columns_suffix}"] = importance_df[
f"mean_shap_value{output_columns_suffix}"
].astype(float)
importance_df = importance_df.sort_values(f"mean_abs_shap_value{output_columns_suffix}", ascending=False)
return importance_df
| 36.925743 | 120 | 0.667114 |
import warnings
import numpy as np
import pandas as pd
from shap import Explainer
from shap.explainers._tree import Tree
from shap.utils import sample
from sklearn.pipeline import Pipeline
def shap_calc(
model,
X,
return_explainer=False,
verbose=0,
sample_size=100,
approximate=False,
check_additivity=True,
**shap_kwargs,
):
if isinstance(model, Pipeline):
raise (
TypeError(
"The provided model is a Pipeline. Unfortunately, the features based on SHAP do not support "
"pipelines, because they cannot be used in combination with shap.Explainer. Please apply any "
"data transformations before running the probatus module."
)
)
with warnings.catch_warnings():
if verbose <= 100:
warnings.simplefilter("ignore")
if shap_kwargs.get("feature_perturbation") == "tree_path_dependent" or X.select_dtypes("category").shape[1] > 0:
explainer = Explainer(model, **shap_kwargs)
else:
0] < sample_size:
sample_size = int(np.ceil(X.shape[0] * 0.2))
else:
pass
mask = sample(X, sample_size)
explainer = Explainer(model, masker=mask, **shap_kwargs)
if isinstance(explainer, Tree):
shap_values = explainer.shap_values(X, check_additivity=check_additivity, approximate=approximate)
else:
shap_values = explainer.shap_values(X)
if isinstance(shap_values, list) and len(shap_values) == 2:
warnings.warn(
"Shap values are related to the output probabilities of class 1 for this model, instead of " "log odds."
)
shap_values = shap_values[1]
if return_explainer:
return shap_values, explainer
return shap_values
def shap_to_df(model, X, precalc_shap=None, **kwargs):
if precalc_shap is not None:
shap_values = precalc_shap
else:
shap_values = shap_calc(model, X, **kwargs)
if isinstance(X, pd.DataFrame):
return pd.DataFrame(shap_values, columns=X.columns, index=X.index)
elif isinstance(X, np.ndarray) and len(X.shape) == 2:
return pd.DataFrame(shap_values, columns=[f"col_{ix}" for ix in range(X.shape[1])])
else:
raise NotImplementedError("X must be a dataframe or a 2d array")
def calculate_shap_importance(shap_values, columns, output_columns_suffix=""):
shap_abs_mean = np.mean(np.abs(shap_values), axis=0)
shap_mean = np.mean(shap_values, axis=0)
importance_df = pd.DataFrame(
{
f"mean_abs_shap_value{output_columns_suffix}": shap_abs_mean.tolist(),
f"mean_shap_value{output_columns_suffix}": shap_mean.tolist(),
},
index=columns,
)
importance_df[f"mean_abs_shap_value{output_columns_suffix}"] = importance_df[
f"mean_abs_shap_value{output_columns_suffix}"
].astype(float)
importance_df[f"mean_shap_value{output_columns_suffix}"] = importance_df[
f"mean_shap_value{output_columns_suffix}"
].astype(float)
importance_df = importance_df.sort_values(f"mean_abs_shap_value{output_columns_suffix}", ascending=False)
return importance_df
| true | true |
1c2fc8fb4a8080d320c1086f19b7f02983c690e3 | 12,910 | py | Python | panel/io/server.py | nritsche/panel | 15aa31b1c78988d107b3ace765d3c0fec36188c8 | [
"BSD-3-Clause"
] | null | null | null | panel/io/server.py | nritsche/panel | 15aa31b1c78988d107b3ace765d3c0fec36188c8 | [
"BSD-3-Clause"
] | null | null | null | panel/io/server.py | nritsche/panel | 15aa31b1c78988d107b3ace765d3c0fec36188c8 | [
"BSD-3-Clause"
] | null | null | null | """
Utilities for creating bokeh Server instances.
"""
from __future__ import absolute_import, division, unicode_literals
import os
import signal
import sys
import threading
import uuid
from contextlib import contextmanager
from functools import partial
from types import FunctionType
from bokeh.document.events import ModelChangedEvent
from bokeh.server.server import Server
from tornado.websocket import WebSocketHandler
from tornado.web import RequestHandler, StaticFileHandler
from tornado.wsgi import WSGIContainer
from .state import state
#---------------------------------------------------------------------
# Private API
#---------------------------------------------------------------------
INDEX_HTML = os.path.join(os.path.dirname(__file__), '..', '_templates', "index.html")
def _origin_url(url):
if url.startswith("http"):
url = url.split("//")[1]
return url
def _server_url(url, port):
if url.startswith("http"):
return '%s:%d%s' % (url.rsplit(':', 1)[0], port, "/")
else:
return 'http://%s:%d%s' % (url.split(':')[0], port, "/")
@contextmanager
def set_curdoc(doc):
state.curdoc = doc
yield
state.curdoc = None
def _eval_panel(panel, server_id, title, location, doc):
from ..template import BaseTemplate
from ..pane import panel as as_panel
with set_curdoc(doc):
if isinstance(panel, FunctionType):
panel = panel()
if isinstance(panel, BaseTemplate):
doc = panel._modify_doc(server_id, title, doc, location)
else:
doc = as_panel(panel)._modify_doc(server_id, title, doc, location)
return doc
#---------------------------------------------------------------------
# Public API
#---------------------------------------------------------------------
@contextmanager
def unlocked():
"""
Context manager which unlocks a Document and dispatches
ModelChangedEvents triggered in the context body to all sockets
on current sessions.
"""
curdoc = state.curdoc
if curdoc is None or curdoc.session_context is None:
yield
return
connections = curdoc.session_context.session._subscribed_connections
hold = curdoc._hold
if hold:
old_events = list(curdoc._held_events)
else:
old_events = []
curdoc.hold()
try:
yield
events = []
for conn in connections:
socket = conn._socket
if hasattr(socket, 'write_lock') and socket.write_lock._block._value == 0:
state._locks.add(socket)
locked = socket in state._locks
for event in curdoc._held_events:
if (isinstance(event, ModelChangedEvent) and event not in old_events
and hasattr(socket, 'write_message') and not locked):
msg = conn.protocol.create('PATCH-DOC', [event])
WebSocketHandler.write_message(socket, msg.header_json)
WebSocketHandler.write_message(socket, msg.metadata_json)
WebSocketHandler.write_message(socket, msg.content_json)
for header, payload in msg._buffers:
WebSocketHandler.write_message(socket, header)
WebSocketHandler.write_message(socket, payload, binary=True)
elif event not in events:
events.append(event)
curdoc._held_events = events
finally:
if not hold:
curdoc.unhold()
def serve(panels, port=0, address=None, websocket_origin=None, loop=None,
show=True, start=True, title=None, verbose=True, location=True,
**kwargs):
"""
Allows serving one or more panel objects on a single server.
The panels argument should be either a Panel object or a function
returning a Panel object or a dictionary of these two. If a
dictionary is supplied the keys represent the slugs at which
each app is served, e.g. `serve({'app': panel1, 'app2': panel2})`
will serve apps at /app and /app2 on the server.
Arguments
---------
panel: Viewable, function or {str: Viewable or function}
A Panel object, a function returning a Panel object or a
dictionary mapping from the URL slug to either.
port: int (optional, default=0)
Allows specifying a specific port
address : str
The address the server should listen on for HTTP requests.
websocket_origin: str or list(str) (optional)
A list of hosts that can connect to the websocket.
This is typically required when embedding a server app in
an external web site.
If None, "localhost" is used.
loop : tornado.ioloop.IOLoop (optional, default=IOLoop.current())
The tornado IOLoop to run the Server on
show : boolean (optional, default=False)
Whether to open the server in a new browser tab on start
start : boolean(optional, default=False)
Whether to start the Server
title: str or {str: str} (optional, default=None)
An HTML title for the application or a dictionary mapping
from the URL slug to a customized title
verbose: boolean (optional, default=True)
Whether to print the address and port
location : boolean or panel.io.location.Location
Whether to create a Location component to observe and
set the URL location.
kwargs: dict
Additional keyword arguments to pass to Server instance
"""
return get_server(panels, port, address, websocket_origin, loop,
show, start, title, verbose, location, **kwargs)
class ProxyFallbackHandler(RequestHandler):
"""A `RequestHandler` that wraps another HTTP server callback and
proxies the subpath.
"""
def initialize(self, fallback, proxy=None):
self.fallback = fallback
self.proxy = proxy
def prepare(self):
if self.proxy:
self.request.path = self.request.path.replace(self.proxy, '')
self.fallback(self.request)
self._finished = True
self.on_finish()
def get_static_routes(static_dirs):
"""
Returns a list of tornado routes of StaticFileHandlers given a
dictionary of slugs and file paths to serve.
"""
patterns = []
for slug, path in static_dirs.items():
if not slug.startswith('/'):
slug = '/' + slug
if slug == '/static':
raise ValueError("Static file route may not use /static "
"this is reserved for internal use.")
path = os.path.abspath(path)
if not os.path.isdir(path):
raise ValueError("Cannot serve non-existent path %s" % path)
patterns.append(
(r"%s/(.*)" % slug, StaticFileHandler, {"path": path})
)
return patterns
def get_server(panel, port=0, address=None, websocket_origin=None,
loop=None, show=False, start=False, title=None,
verbose=False, location=True, static_dirs={}, **kwargs):
"""
Returns a Server instance with this panel attached as the root
app.
Arguments
---------
panel: Viewable, function or {str: Viewable}
A Panel object, a function returning a Panel object or a
dictionary mapping from the URL slug to either.
port: int (optional, default=0)
Allows specifying a specific port
address : str
The address the server should listen on for HTTP requests.
websocket_origin: str or list(str) (optional)
A list of hosts that can connect to the websocket.
This is typically required when embedding a server app in
an external web site.
If None, "localhost" is used.
loop : tornado.ioloop.IOLoop (optional, default=IOLoop.current())
The tornado IOLoop to run the Server on.
show : boolean (optional, default=False)
Whether to open the server in a new browser tab on start.
start : boolean(optional, default=False)
Whether to start the Server.
title : str or {str: str} (optional, default=None)
An HTML title for the application or a dictionary mapping
from the URL slug to a customized title.
verbose: boolean (optional, default=False)
Whether to report the address and port.
location : boolean or panel.io.location.Location
Whether to create a Location component to observe and
set the URL location.
static_dirs: dict (optional, default={})
A dictionary of routes and local paths to serve as static file
directories on those routes.
kwargs: dict
Additional keyword arguments to pass to Server instance.
Returns
-------
server : bokeh.server.server.Server
Bokeh Server instance running this panel
"""
from tornado.ioloop import IOLoop
server_id = kwargs.pop('server_id', uuid.uuid4().hex)
kwargs['extra_patterns'] = extra_patterns = kwargs.get('extra_patterns', [])
if isinstance(panel, dict):
apps = {}
for slug, app in panel.items():
if isinstance(title, dict):
try:
title_ = title[slug]
except KeyError:
raise KeyError(
"Keys of the title dictionnary and of the apps "
f"dictionary must match. No {slug} key found in the "
"title dictionnary.")
else:
title_ = title
slug = slug if slug.startswith('/') else '/'+slug
if 'flask' in sys.modules:
from flask import Flask
if isinstance(app, Flask):
wsgi = WSGIContainer(app)
if slug == '/':
raise ValueError('Flask apps must be served on a subpath.')
if not slug.endswith('/'):
slug += '/'
extra_patterns.append(('^'+slug+'.*', ProxyFallbackHandler,
dict(fallback=wsgi, proxy=slug)))
continue
apps[slug] = partial(_eval_panel, app, server_id, title_, location)
else:
apps = {'/': partial(_eval_panel, panel, server_id, title, location)}
extra_patterns += get_static_routes(static_dirs)
opts = dict(kwargs)
if loop:
loop.make_current()
opts['io_loop'] = loop
elif opts.get('num_procs', 1) == 1:
opts['io_loop'] = IOLoop.current()
if 'index' not in opts:
opts['index'] = INDEX_HTML
if address is not None:
opts['address'] = address
if websocket_origin:
if not isinstance(websocket_origin, list):
websocket_origin = [websocket_origin]
opts['allow_websocket_origin'] = websocket_origin
server = Server(apps, port=port, **opts)
if verbose:
address = server.address or 'localhost'
print("Launching server at http://%s:%s" % (address, server.port))
state._servers[server_id] = (server, panel, [])
if show:
def show_callback():
server.show('/')
server.io_loop.add_callback(show_callback)
def sig_exit(*args, **kwargs):
server.io_loop.add_callback_from_signal(do_stop)
def do_stop(*args, **kwargs):
server.io_loop.stop()
try:
signal.signal(signal.SIGINT, sig_exit)
except ValueError:
pass # Can't use signal on a thread
if start:
server.start()
try:
server.io_loop.start()
except RuntimeError:
pass
return server
class StoppableThread(threading.Thread):
"""Thread class with a stop() method."""
def __init__(self, io_loop=None, timeout=1000, **kwargs):
from tornado import ioloop
super(StoppableThread, self).__init__(**kwargs)
self._stop_event = threading.Event()
self.io_loop = io_loop
self._cb = ioloop.PeriodicCallback(self._check_stopped, timeout)
self._cb.start()
def _check_stopped(self):
if self.stopped:
self._cb.stop()
self.io_loop.stop()
def run(self):
if hasattr(self, '_target'):
target, args, kwargs = self._target, self._args, self._kwargs
else:
target, args, kwargs = self._Thread__target, self._Thread__args, self._Thread__kwargs
if not target:
return
bokeh_server = None
try:
bokeh_server = target(*args, **kwargs)
finally:
if isinstance(bokeh_server, Server):
bokeh_server.stop()
if hasattr(self, '_target'):
del self._target, self._args, self._kwargs
else:
del self._Thread__target, self._Thread__args, self._Thread__kwargs
def stop(self):
self._stop_event.set()
@property
def stopped(self):
return self._stop_event.is_set()
| 34.518717 | 97 | 0.609992 | from __future__ import absolute_import, division, unicode_literals
import os
import signal
import sys
import threading
import uuid
from contextlib import contextmanager
from functools import partial
from types import FunctionType
from bokeh.document.events import ModelChangedEvent
from bokeh.server.server import Server
from tornado.websocket import WebSocketHandler
from tornado.web import RequestHandler, StaticFileHandler
from tornado.wsgi import WSGIContainer
from .state import state
INDEX_HTML = os.path.join(os.path.dirname(__file__), '..', '_templates', "index.html")
def _origin_url(url):
if url.startswith("http"):
url = url.split("//")[1]
return url
def _server_url(url, port):
if url.startswith("http"):
return '%s:%d%s' % (url.rsplit(':', 1)[0], port, "/")
else:
return 'http://%s:%d%s' % (url.split(':')[0], port, "/")
@contextmanager
def set_curdoc(doc):
state.curdoc = doc
yield
state.curdoc = None
def _eval_panel(panel, server_id, title, location, doc):
from ..template import BaseTemplate
from ..pane import panel as as_panel
with set_curdoc(doc):
if isinstance(panel, FunctionType):
panel = panel()
if isinstance(panel, BaseTemplate):
doc = panel._modify_doc(server_id, title, doc, location)
else:
doc = as_panel(panel)._modify_doc(server_id, title, doc, location)
return doc
@contextmanager
def unlocked():
curdoc = state.curdoc
if curdoc is None or curdoc.session_context is None:
yield
return
connections = curdoc.session_context.session._subscribed_connections
hold = curdoc._hold
if hold:
old_events = list(curdoc._held_events)
else:
old_events = []
curdoc.hold()
try:
yield
events = []
for conn in connections:
socket = conn._socket
if hasattr(socket, 'write_lock') and socket.write_lock._block._value == 0:
state._locks.add(socket)
locked = socket in state._locks
for event in curdoc._held_events:
if (isinstance(event, ModelChangedEvent) and event not in old_events
and hasattr(socket, 'write_message') and not locked):
msg = conn.protocol.create('PATCH-DOC', [event])
WebSocketHandler.write_message(socket, msg.header_json)
WebSocketHandler.write_message(socket, msg.metadata_json)
WebSocketHandler.write_message(socket, msg.content_json)
for header, payload in msg._buffers:
WebSocketHandler.write_message(socket, header)
WebSocketHandler.write_message(socket, payload, binary=True)
elif event not in events:
events.append(event)
curdoc._held_events = events
finally:
if not hold:
curdoc.unhold()
def serve(panels, port=0, address=None, websocket_origin=None, loop=None,
show=True, start=True, title=None, verbose=True, location=True,
**kwargs):
return get_server(panels, port, address, websocket_origin, loop,
show, start, title, verbose, location, **kwargs)
class ProxyFallbackHandler(RequestHandler):
def initialize(self, fallback, proxy=None):
self.fallback = fallback
self.proxy = proxy
def prepare(self):
if self.proxy:
self.request.path = self.request.path.replace(self.proxy, '')
self.fallback(self.request)
self._finished = True
self.on_finish()
def get_static_routes(static_dirs):
patterns = []
for slug, path in static_dirs.items():
if not slug.startswith('/'):
slug = '/' + slug
if slug == '/static':
raise ValueError("Static file route may not use /static "
"this is reserved for internal use.")
path = os.path.abspath(path)
if not os.path.isdir(path):
raise ValueError("Cannot serve non-existent path %s" % path)
patterns.append(
(r"%s/(.*)" % slug, StaticFileHandler, {"path": path})
)
return patterns
def get_server(panel, port=0, address=None, websocket_origin=None,
loop=None, show=False, start=False, title=None,
verbose=False, location=True, static_dirs={}, **kwargs):
from tornado.ioloop import IOLoop
server_id = kwargs.pop('server_id', uuid.uuid4().hex)
kwargs['extra_patterns'] = extra_patterns = kwargs.get('extra_patterns', [])
if isinstance(panel, dict):
apps = {}
for slug, app in panel.items():
if isinstance(title, dict):
try:
title_ = title[slug]
except KeyError:
raise KeyError(
"Keys of the title dictionnary and of the apps "
f"dictionary must match. No {slug} key found in the "
"title dictionnary.")
else:
title_ = title
slug = slug if slug.startswith('/') else '/'+slug
if 'flask' in sys.modules:
from flask import Flask
if isinstance(app, Flask):
wsgi = WSGIContainer(app)
if slug == '/':
raise ValueError('Flask apps must be served on a subpath.')
if not slug.endswith('/'):
slug += '/'
extra_patterns.append(('^'+slug+'.*', ProxyFallbackHandler,
dict(fallback=wsgi, proxy=slug)))
continue
apps[slug] = partial(_eval_panel, app, server_id, title_, location)
else:
apps = {'/': partial(_eval_panel, panel, server_id, title, location)}
extra_patterns += get_static_routes(static_dirs)
opts = dict(kwargs)
if loop:
loop.make_current()
opts['io_loop'] = loop
elif opts.get('num_procs', 1) == 1:
opts['io_loop'] = IOLoop.current()
if 'index' not in opts:
opts['index'] = INDEX_HTML
if address is not None:
opts['address'] = address
if websocket_origin:
if not isinstance(websocket_origin, list):
websocket_origin = [websocket_origin]
opts['allow_websocket_origin'] = websocket_origin
server = Server(apps, port=port, **opts)
if verbose:
address = server.address or 'localhost'
print("Launching server at http://%s:%s" % (address, server.port))
state._servers[server_id] = (server, panel, [])
if show:
def show_callback():
server.show('/')
server.io_loop.add_callback(show_callback)
def sig_exit(*args, **kwargs):
server.io_loop.add_callback_from_signal(do_stop)
def do_stop(*args, **kwargs):
server.io_loop.stop()
try:
signal.signal(signal.SIGINT, sig_exit)
except ValueError:
pass
if start:
server.start()
try:
server.io_loop.start()
except RuntimeError:
pass
return server
class StoppableThread(threading.Thread):
def __init__(self, io_loop=None, timeout=1000, **kwargs):
from tornado import ioloop
super(StoppableThread, self).__init__(**kwargs)
self._stop_event = threading.Event()
self.io_loop = io_loop
self._cb = ioloop.PeriodicCallback(self._check_stopped, timeout)
self._cb.start()
def _check_stopped(self):
if self.stopped:
self._cb.stop()
self.io_loop.stop()
def run(self):
if hasattr(self, '_target'):
target, args, kwargs = self._target, self._args, self._kwargs
else:
target, args, kwargs = self._Thread__target, self._Thread__args, self._Thread__kwargs
if not target:
return
bokeh_server = None
try:
bokeh_server = target(*args, **kwargs)
finally:
if isinstance(bokeh_server, Server):
bokeh_server.stop()
if hasattr(self, '_target'):
del self._target, self._args, self._kwargs
else:
del self._Thread__target, self._Thread__args, self._Thread__kwargs
def stop(self):
self._stop_event.set()
@property
def stopped(self):
return self._stop_event.is_set()
| true | true |
1c2fc92aa9611edafd68b1d93acc7f621dcebf02 | 3,955 | py | Python | mpa/cls/exporter.py | openvinotoolkit/model_preparation_algorithm | 8d36bf5944837b7a3d22fc2c3a4cb93423619fc2 | [
"Apache-2.0"
] | null | null | null | mpa/cls/exporter.py | openvinotoolkit/model_preparation_algorithm | 8d36bf5944837b7a3d22fc2c3a4cb93423619fc2 | [
"Apache-2.0"
] | null | null | null | mpa/cls/exporter.py | openvinotoolkit/model_preparation_algorithm | 8d36bf5944837b7a3d22fc2c3a4cb93423619fc2 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
import os
import torch.onnx
from functools import partial
from mmcv.runner import load_checkpoint, wrap_fp16_model
from mmcls.models import build_classifier
from mpa.registry import STAGES
from .stage import ClsStage
from mpa.utils import mo_wrapper
from mpa.utils.logger import get_logger
import numpy as np
import torch
from mmcls.datasets.pipelines import Compose
logger = get_logger()
@STAGES.register_module()
class ClsExporter(ClsStage):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def get_fake_input(self, cfg, orig_img_shape=(128, 128, 3)):
pipeline = cfg.data.test.pipeline
pipeline = Compose(pipeline)
data = dict(img=np.zeros(orig_img_shape, dtype=np.uint8))
data = pipeline(data)
return data
def get_norm_values(self, cfg):
pipeline = cfg.data.test.pipeline
mean_values = [0, 0, 0]
scale_values = [1, 1, 1]
for pipeline_step in pipeline:
if pipeline_step.type == 'Normalize':
mean_values = pipeline_step.mean
scale_values = pipeline_step.std
break
return mean_values, scale_values
def run(self, model_cfg, model_ckpt, data_cfg, **kwargs):
"""Run exporter stage
"""
self._init_logger()
mode = kwargs.get('mode', 'train')
if mode not in self.mode:
logger.warning(f'mode for this stage {mode}')
return {}
cfg = self.configure(model_cfg, model_ckpt, data_cfg, training=False, **kwargs)
output_path = os.path.join(cfg.work_dir, 'export')
onnx_path = output_path+'/model.onnx'
os.makedirs(output_path, exist_ok=True)
# build the model and load checkpoint
model = build_classifier(cfg.model)
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
logger.info('load checkpoint from ' + cfg.load_from)
_ = load_checkpoint(model, cfg.load_from, map_location='cpu')
if hasattr(model, 'is_export'):
model.is_export = True
model.eval()
model.forward = partial(model.forward, img_metas={}, return_loss=False)
data = self.get_fake_input(cfg)
fake_img = data['img'].unsqueeze(0)
try:
torch.onnx.export(model,
fake_img,
onnx_path,
verbose=False,
export_params=True,
input_names=['data'],
output_names=['logits', 'features', 'vector'],
dynamic_axes={},
opset_version=11,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX
)
mean_values, scale_values = self.get_norm_values(cfg)
mo_args = {
'input_model': onnx_path,
'mean_values': mean_values,
'scale_values': scale_values,
'data_type': 'FP32',
'model_name': 'model',
'reverse_input_channels': None,
}
ret, msg = mo_wrapper.generate_ir(output_path, output_path, silent=True, **mo_args)
os.remove(onnx_path)
except Exception as ex:
return {'outputs': None, 'msg': f'exception {type(ex)}'}
bin_file = [f for f in os.listdir(output_path) if f.endswith('.bin')][0]
xml_file = [f for f in os.listdir(output_path) if f.endswith('.xml')][0]
logger.info('Exporting completed')
return {
'outputs': {
'bin': os.path.join(output_path, bin_file),
'xml': os.path.join(output_path, xml_file)
},
'msg': ''
}
| 34.391304 | 95 | 0.570923 |
import os
import torch.onnx
from functools import partial
from mmcv.runner import load_checkpoint, wrap_fp16_model
from mmcls.models import build_classifier
from mpa.registry import STAGES
from .stage import ClsStage
from mpa.utils import mo_wrapper
from mpa.utils.logger import get_logger
import numpy as np
import torch
from mmcls.datasets.pipelines import Compose
logger = get_logger()
@STAGES.register_module()
class ClsExporter(ClsStage):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def get_fake_input(self, cfg, orig_img_shape=(128, 128, 3)):
pipeline = cfg.data.test.pipeline
pipeline = Compose(pipeline)
data = dict(img=np.zeros(orig_img_shape, dtype=np.uint8))
data = pipeline(data)
return data
def get_norm_values(self, cfg):
pipeline = cfg.data.test.pipeline
mean_values = [0, 0, 0]
scale_values = [1, 1, 1]
for pipeline_step in pipeline:
if pipeline_step.type == 'Normalize':
mean_values = pipeline_step.mean
scale_values = pipeline_step.std
break
return mean_values, scale_values
def run(self, model_cfg, model_ckpt, data_cfg, **kwargs):
self._init_logger()
mode = kwargs.get('mode', 'train')
if mode not in self.mode:
logger.warning(f'mode for this stage {mode}')
return {}
cfg = self.configure(model_cfg, model_ckpt, data_cfg, training=False, **kwargs)
output_path = os.path.join(cfg.work_dir, 'export')
onnx_path = output_path+'/model.onnx'
os.makedirs(output_path, exist_ok=True)
model = build_classifier(cfg.model)
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
logger.info('load checkpoint from ' + cfg.load_from)
_ = load_checkpoint(model, cfg.load_from, map_location='cpu')
if hasattr(model, 'is_export'):
model.is_export = True
model.eval()
model.forward = partial(model.forward, img_metas={}, return_loss=False)
data = self.get_fake_input(cfg)
fake_img = data['img'].unsqueeze(0)
try:
torch.onnx.export(model,
fake_img,
onnx_path,
verbose=False,
export_params=True,
input_names=['data'],
output_names=['logits', 'features', 'vector'],
dynamic_axes={},
opset_version=11,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX
)
mean_values, scale_values = self.get_norm_values(cfg)
mo_args = {
'input_model': onnx_path,
'mean_values': mean_values,
'scale_values': scale_values,
'data_type': 'FP32',
'model_name': 'model',
'reverse_input_channels': None,
}
ret, msg = mo_wrapper.generate_ir(output_path, output_path, silent=True, **mo_args)
os.remove(onnx_path)
except Exception as ex:
return {'outputs': None, 'msg': f'exception {type(ex)}'}
bin_file = [f for f in os.listdir(output_path) if f.endswith('.bin')][0]
xml_file = [f for f in os.listdir(output_path) if f.endswith('.xml')][0]
logger.info('Exporting completed')
return {
'outputs': {
'bin': os.path.join(output_path, bin_file),
'xml': os.path.join(output_path, xml_file)
},
'msg': ''
}
| true | true |
1c2fca169471f7b3ec70ce30effce80a1c395acd | 747 | py | Python | micropsi_core/tests/test_statuslogger.py | Doik/micropsi2 | 35ef3b48d9da255939e8e7af0e00bbcc98597602 | [
"MIT"
] | null | null | null | micropsi_core/tests/test_statuslogger.py | Doik/micropsi2 | 35ef3b48d9da255939e8e7af0e00bbcc98597602 | [
"MIT"
] | null | null | null | micropsi_core/tests/test_statuslogger.py | Doik/micropsi2 | 35ef3b48d9da255939e8e7af0e00bbcc98597602 | [
"MIT"
] | 1 | 2019-01-07T21:33:18.000Z | 2019-01-07T21:33:18.000Z |
def test_statuslogger_does_not_overwrite_children(runtime, test_nodenet):
net = runtime.get_nodenet(test_nodenet)
sl = net.netapi.statuslogger
sl.info("Learning.Foo", sl.ACTIVE, progress=(5, 23))
sl.info("Learning", sl.SUCCESS, "Learning complete")
res, tree = runtime.get_status_tree(test_nodenet)
assert tree['Learning']['level'] == "info"
assert tree['Learning']['state'] == "success"
assert tree['Learning']['msg'] == "Learning complete"
assert tree['Learning']['children']['Foo']['level'] == "info"
assert tree['Learning']['children']['Foo']['state'] == "active"
sl.remove("Learning.Foo")
res, tree = runtime.get_status_tree(test_nodenet)
assert 'Foo' not in tree['Learning']['children']
| 43.941176 | 73 | 0.676037 |
def test_statuslogger_does_not_overwrite_children(runtime, test_nodenet):
net = runtime.get_nodenet(test_nodenet)
sl = net.netapi.statuslogger
sl.info("Learning.Foo", sl.ACTIVE, progress=(5, 23))
sl.info("Learning", sl.SUCCESS, "Learning complete")
res, tree = runtime.get_status_tree(test_nodenet)
assert tree['Learning']['level'] == "info"
assert tree['Learning']['state'] == "success"
assert tree['Learning']['msg'] == "Learning complete"
assert tree['Learning']['children']['Foo']['level'] == "info"
assert tree['Learning']['children']['Foo']['state'] == "active"
sl.remove("Learning.Foo")
res, tree = runtime.get_status_tree(test_nodenet)
assert 'Foo' not in tree['Learning']['children']
| true | true |
1c2fca5f62fd0ebc9291106c3ba7ee9313876a22 | 21,639 | py | Python | manim/scene/scene_file_writer.py | EpicEricEE/manim | 66d26380e526b44d10a405b474356acbbf1f6434 | [
"MIT"
] | 1 | 2021-04-19T18:01:55.000Z | 2021-04-19T18:01:55.000Z | manim/scene/scene_file_writer.py | EpicEricEE/manim | 66d26380e526b44d10a405b474356acbbf1f6434 | [
"MIT"
] | null | null | null | manim/scene/scene_file_writer.py | EpicEricEE/manim | 66d26380e526b44d10a405b474356acbbf1f6434 | [
"MIT"
] | 1 | 2021-03-31T20:46:51.000Z | 2021-03-31T20:46:51.000Z | """The interface between scenes and ffmpeg."""
__all__ = ["SceneFileWriter"]
import datetime
import os
import shutil
import subprocess
from pathlib import Path
from time import sleep
import numpy as np
from PIL import Image
from pydub import AudioSegment
from manim import __version__
from .. import config, logger
from ..constants import FFMPEG_BIN, GIF_FILE_EXTENSION
from ..utils.file_ops import (
add_extension_if_not_present,
add_version_before_extension,
guarantee_existence,
is_gif_format,
is_png_format,
is_webm_format,
modify_atime,
write_to_movie,
)
from ..utils.sounds import get_full_sound_file_path
class SceneFileWriter:
"""
SceneFileWriter is the object that actually writes the animations
played, into video files, using FFMPEG.
This is mostly for Manim's internal use. You will rarely, if ever,
have to use the methods for this class, unless tinkering with the very
fabric of Manim's reality.
Some useful attributes are:
"write_to_movie" (bool=False)
Whether or not to write the animations into a video file.
"movie_file_extension" (str=".mp4")
The file-type extension of the outputted video.
"partial_movie_files"
List of all the partial-movie files.
"""
def __init__(self, renderer, scene_name, **kwargs):
self.renderer = renderer
self.stream_lock = False
self.init_output_directories(scene_name)
self.init_audio()
self.frame_count = 0
self.partial_movie_files = []
def init_output_directories(self, scene_name):
"""Initialise output directories.
Notes
-----
The directories are read from ``config``, for example
``config['media_dir']``. If the target directories don't already
exist, they will be created.
"""
if config["dry_run"]: # in dry-run mode there is no output
return
if config["input_file"]:
module_name = config.get_dir("input_file").stem
else:
module_name = ""
if config["output_file"] and not config["write_all"]:
default_name = config.get_dir("output_file")
else:
default_name = Path(scene_name)
if config["media_dir"]:
image_dir = guarantee_existence(
config.get_dir("images_dir", module_name=module_name)
)
self.image_file_path = os.path.join(
image_dir, add_extension_if_not_present(default_name, ".png")
)
if write_to_movie():
movie_dir = guarantee_existence(
config.get_dir("video_dir", module_name=module_name)
)
self.movie_file_path = os.path.join(
movie_dir,
add_extension_if_not_present(
default_name, config["movie_file_extension"]
),
)
if is_gif_format():
self.gif_file_path = os.path.join(
movie_dir,
add_extension_if_not_present(default_name, GIF_FILE_EXTENSION),
)
self.partial_movie_directory = guarantee_existence(
config.get_dir(
"partial_movie_dir",
scene_name=scene_name,
module_name=module_name,
)
)
def add_partial_movie_file(self, hash_animation):
"""Adds a new partial movie file path to scene.partial_movie_files from an hash. This method will compute the path from the hash.
Parameters
----------
hash_animation : str
Hash of the animation.
"""
if not hasattr(self, "partial_movie_directory") or not write_to_movie():
return
# None has to be added to partial_movie_files to keep the right index with scene.num_plays.
# i.e if an animation is skipped, scene.num_plays is still incremented and we add an element to partial_movie_file be even with num_plays.
if hash_animation is None:
self.partial_movie_files.append(None)
return
new_partial_movie_file = os.path.join(
self.partial_movie_directory,
f"{hash_animation}{config['movie_file_extension']}",
)
self.partial_movie_files.append(new_partial_movie_file)
def get_resolution_directory(self):
"""Get the name of the resolution directory directly containing
the video file.
This method gets the name of the directory that immediately contains the
video file. This name is ``<height_in_pixels_of_video>p<frame_rate>``.
For example, if you are rendering an 854x480 px animation at 15fps,
the name of the directory that immediately contains the video, file
will be ``480p15``.
The file structure should look something like::
MEDIA_DIR
|--Tex
|--texts
|--videos
|--<name_of_file_containing_scene>
|--<height_in_pixels_of_video>p<frame_rate>
|--<scene_name>.mp4
Returns
-------
:class:`str`
The name of the directory.
"""
pixel_height = config["pixel_height"]
frame_rate = config["frame_rate"]
return f"{pixel_height}p{frame_rate}"
# Sound
def init_audio(self):
"""
Preps the writer for adding audio to the movie.
"""
self.includes_sound = False
def create_audio_segment(self):
"""
Creates an empty, silent, Audio Segment.
"""
self.audio_segment = AudioSegment.silent()
def add_audio_segment(self, new_segment, time=None, gain_to_background=None):
"""
This method adds an audio segment from an
AudioSegment type object and suitable parameters.
Parameters
----------
new_segment : AudioSegment
The audio segment to add
time : int, float, optional
the timestamp at which the
sound should be added.
gain_to_background : optional
The gain of the segment from the background.
"""
if not self.includes_sound:
self.includes_sound = True
self.create_audio_segment()
segment = self.audio_segment
curr_end = segment.duration_seconds
if time is None:
time = curr_end
if time < 0:
raise ValueError("Adding sound at timestamp < 0")
new_end = time + new_segment.duration_seconds
diff = new_end - curr_end
if diff > 0:
segment = segment.append(
AudioSegment.silent(int(np.ceil(diff * 1000))),
crossfade=0,
)
self.audio_segment = segment.overlay(
new_segment,
position=int(1000 * time),
gain_during_overlay=gain_to_background,
)
def add_sound(self, sound_file, time=None, gain=None, **kwargs):
"""
This method adds an audio segment from a sound file.
Parameters
----------
sound_file : str
The path to the sound file.
time : float or int, optional
The timestamp at which the audio should be added.
gain : optional
The gain of the given audio segment.
**kwargs
This method uses add_audio_segment, so any keyword arguments
used there can be referenced here.
"""
file_path = get_full_sound_file_path(sound_file)
new_segment = AudioSegment.from_file(file_path)
if gain:
new_segment = new_segment.apply_gain(gain)
self.add_audio_segment(new_segment, time, **kwargs)
# Writers
def begin_animation(self, allow_write=False, file_path=None):
"""
Used internally by manim to stream the animation to FFMPEG for
displaying or writing to a file.
Parameters
----------
allow_write : bool, optional
Whether or not to write to a video file.
"""
if write_to_movie() and allow_write:
self.open_movie_pipe(file_path=file_path)
def end_animation(self, allow_write=False):
"""
Internally used by Manim to stop streaming to
FFMPEG gracefully.
Parameters
----------
allow_write : bool, optional
Whether or not to write to a video file.
"""
if write_to_movie() and allow_write:
self.close_movie_pipe()
def write_frame(self, frame_or_renderer):
"""
Used internally by Manim to write a frame to
the FFMPEG input buffer.
Parameters
----------
frame : np.array
Pixel array of the frame.
"""
if config.renderer == "opengl":
renderer = frame_or_renderer
self.writing_process.stdin.write(
renderer.get_raw_frame_buffer_object_data()
)
else:
frame = frame_or_renderer
if write_to_movie():
self.writing_process.stdin.write(frame.tobytes())
if is_png_format() and not config["dry_run"]:
target_dir, extension = os.path.splitext(self.image_file_path)
if config["zero_pad"]:
Image.fromarray(frame).save(
f"{target_dir}{str(self.frame_count).zfill(config['zero_pad'])}{extension}"
)
else:
Image.fromarray(frame).save(
f"{target_dir}{self.frame_count}{extension}"
)
self.frame_count += 1
def save_final_image(self, image):
"""
The name is a misnomer. This method saves the image
passed to it as an in the default image directory.
Parameters
----------
image : np.array
The pixel array of the image to save.
"""
if config["dry_run"]:
return
if not config["output_file"]:
self.image_file_path = add_version_before_extension(self.image_file_path)
image.save(self.image_file_path)
self.print_file_ready_message(self.image_file_path)
def idle_stream(self):
"""
Doesn't write anything to the FFMPEG frame buffer.
"""
while self.stream_lock:
a = datetime.datetime.now()
# self.update_frame()
self.renderer.update_frame()
n_frames = 1
# frame = self.get_frame()
frame = self.renderer.get_frame()
# self.add_frame(*[frame] * n_frames)
self.renderer.add_frame(*[frame] * n_frames)
b = datetime.datetime.now()
time_diff = (b - a).total_seconds()
frame_duration = 1 / config["frame_rate"]
if time_diff < frame_duration:
sleep(frame_duration - time_diff)
def finish(self, partial_movie_files=None):
"""
Finishes writing to the FFMPEG buffer or writing images
to output directory.
Combines the partial movie files into the
whole scene.
If save_last_frame is True, saves the last
frame in the default image directory.
"""
if write_to_movie():
if hasattr(self, "writing_process"):
self.writing_process.terminate()
self.combine_movie_files(partial_movie_files=partial_movie_files)
if config["flush_cache"]:
self.flush_cache_directory()
else:
self.clean_cache()
elif is_png_format() and not config["dry_run"]:
target_dir, _ = os.path.splitext(self.image_file_path)
logger.info("\n%i images ready at %s\n", self.frame_count, target_dir)
def open_movie_pipe(self, file_path=None):
"""
Used internally by Manim to initialise
FFMPEG and begin writing to FFMPEG's input
buffer.
"""
if file_path is None:
file_path = self.partial_movie_files[self.renderer.num_plays]
self.partial_movie_file_path = file_path
fps = config["frame_rate"]
if fps == int(fps): # fps is integer
fps = int(fps)
if config.renderer == "opengl":
width, height = self.renderer.get_pixel_shape()
else:
height = config["pixel_height"]
width = config["pixel_width"]
command = [
FFMPEG_BIN,
"-y", # overwrite output file if it exists
"-f",
"rawvideo",
"-s",
"%dx%d" % (width, height), # size of one frame
"-pix_fmt",
"rgba",
"-r",
str(fps), # frames per second
"-i",
"-", # The input comes from a pipe
"-an", # Tells FFMPEG not to expect any audio
"-loglevel",
config["ffmpeg_loglevel"].lower(),
"-metadata",
f"comment=Rendered with Manim Community v{__version__}",
]
if config.renderer == "opengl":
command += ["-vf", "vflip"]
if is_webm_format():
command += ["-vcodec", "libvpx-vp9", "-auto-alt-ref", "0"]
# .mov format
elif config["transparent"]:
command += ["-vcodec", "qtrle"]
else:
command += ["-vcodec", "libx264", "-pix_fmt", "yuv420p"]
command += [file_path]
self.writing_process = subprocess.Popen(command, stdin=subprocess.PIPE)
def close_movie_pipe(self):
"""
Used internally by Manim to gracefully stop writing to FFMPEG's input buffer
"""
self.writing_process.stdin.close()
self.writing_process.wait()
logger.info(
f"Animation {self.renderer.num_plays} : Partial movie file written in %(path)s",
{"path": f"'{self.partial_movie_file_path}'"},
)
def is_already_cached(self, hash_invocation):
"""Will check if a file named with `hash_invocation` exists.
Parameters
----------
hash_invocation : :class:`str`
The hash corresponding to an invocation to either `scene.play` or `scene.wait`.
Returns
-------
:class:`bool`
Whether the file exists.
"""
if not hasattr(self, "partial_movie_directory") or not write_to_movie():
return False
path = os.path.join(
self.partial_movie_directory,
f"{hash_invocation}{config['movie_file_extension']}",
)
return os.path.exists(path)
def combine_movie_files(self, partial_movie_files=None):
"""
Used internally by Manim to combine the separate
partial movie files that make up a Scene into a single
video file for that Scene.
"""
# Manim renders the scene as many smaller movie files which are then
# concatenated to a larger one. The reason for this is that sometimes
# video-editing is made easier when one works with the broken up scene,
# which effectively has cuts at all the places you might want. But for
# viewing the scene as a whole, one of course wants to see it as a
# single piece.
partial_movie_files = [el for el in self.partial_movie_files if el is not None]
# NOTE : Here we should do a check and raise an exception if partial
# movie file is empty. We can't, as a lot of stuff (in particular, in
# tests) use scene initialization, and this error would be raised as
# it's just an empty scene initialized.
# Write a file partial_file_list.txt containing all partial movie
# files. This is used by FFMPEG.
file_list = os.path.join(
self.partial_movie_directory, "partial_movie_file_list.txt"
)
logger.debug(
f"Partial movie files to combine ({len(partial_movie_files)} files): %(p)s",
{"p": partial_movie_files[:5]},
)
with open(file_list, "w") as fp:
fp.write("# This file is used internally by FFMPEG.\n")
for pf_path in partial_movie_files:
if os.name == "nt":
pf_path = pf_path.replace("\\", "/")
fp.write(f"file 'file:{pf_path}'\n")
movie_file_path = self.movie_file_path
commands = [
FFMPEG_BIN,
"-y", # overwrite output file if it exists
"-f",
"concat",
"-safe",
"0",
"-i",
file_list,
"-loglevel",
config["ffmpeg_loglevel"].lower(),
"-metadata",
f"comment=Rendered with Manim Community v{__version__}",
"-nostdin",
]
if write_to_movie() and not is_gif_format():
commands += ["-c", "copy", movie_file_path]
if is_gif_format():
if not config["output_file"]:
self.gif_file_path = str(
add_version_before_extension(self.gif_file_path)
)
commands += [
"-vf",
f"fps={np.clip(config['frame_rate'], 1, 50)},split[s0][s1];[s0]palettegen=stats_mode=diff[p];[s1][p]paletteuse=dither=bayer:bayer_scale=5:diff_mode=rectangle",
self.gif_file_path,
]
if not self.includes_sound:
commands.insert(-1, "-an")
combine_process = subprocess.Popen(commands)
combine_process.wait()
if self.includes_sound:
extension = config["movie_file_extension"]
sound_file_path = movie_file_path.replace(extension, ".wav")
# Makes sure sound file length will match video file
self.add_audio_segment(AudioSegment.silent(0))
self.audio_segment.export(
sound_file_path,
bitrate="312k",
)
temp_file_path = movie_file_path.replace(extension, f"_temp{extension}")
commands = [
FFMPEG_BIN,
"-i",
movie_file_path,
"-i",
sound_file_path,
"-y", # overwrite output file if it exists
"-c:v",
"copy",
"-c:a",
"aac",
"-b:a",
"320k",
# select video stream from first file
"-map",
"0:v:0",
# select audio stream from second file
"-map",
"1:a:0",
"-loglevel",
config["ffmpeg_loglevel"].lower(),
"-metadata",
f"comment=Rendered with Manim Community v{__version__}",
# "-shortest",
temp_file_path,
]
subprocess.call(commands)
shutil.move(temp_file_path, movie_file_path)
os.remove(sound_file_path)
self.print_file_ready_message(
self.gif_file_path if is_gif_format() else movie_file_path
)
if write_to_movie():
for file_path in partial_movie_files:
# We have to modify the accessed time so if we have to clean the cache we remove the one used the longest.
modify_atime(file_path)
def clean_cache(self):
"""Will clean the cache by removing the partial_movie_files used by manim the longest ago."""
cached_partial_movies = [
os.path.join(self.partial_movie_directory, file_name)
for file_name in os.listdir(self.partial_movie_directory)
if file_name != "partial_movie_file_list.txt"
]
if len(cached_partial_movies) > config["max_files_cached"]:
number_files_to_delete = (
len(cached_partial_movies) - config["max_files_cached"]
)
oldest_files_to_delete = sorted(
cached_partial_movies,
key=os.path.getatime,
)[:number_files_to_delete]
# oldest_file_path = min(cached_partial_movies, key=os.path.getatime)
for file_to_delete in oldest_files_to_delete:
os.remove(file_to_delete)
logger.info(
f"The partial movie directory is full (> {config['max_files_cached']} files). Therefore, manim has removed {number_files_to_delete} file(s) used by it the longest ago."
+ "You can change this behaviour by changing max_files_cached in config."
)
def flush_cache_directory(self):
"""Delete all the cached partial movie files"""
cached_partial_movies = [
os.path.join(self.partial_movie_directory, file_name)
for file_name in os.listdir(self.partial_movie_directory)
if file_name != "partial_movie_file_list.txt"
]
for f in cached_partial_movies:
os.remove(f)
logger.info(
f"Cache flushed. {len(cached_partial_movies)} file(s) deleted in %(par_dir)s.",
{"par_dir": self.partial_movie_directory},
)
def print_file_ready_message(self, file_path):
"""Prints the "File Ready" message to STDOUT."""
config["output_file"] = file_path
logger.info("\nFile ready at %(file_path)s\n", {"file_path": f"'{file_path}'"})
| 35.885572 | 184 | 0.575674 |
__all__ = ["SceneFileWriter"]
import datetime
import os
import shutil
import subprocess
from pathlib import Path
from time import sleep
import numpy as np
from PIL import Image
from pydub import AudioSegment
from manim import __version__
from .. import config, logger
from ..constants import FFMPEG_BIN, GIF_FILE_EXTENSION
from ..utils.file_ops import (
add_extension_if_not_present,
add_version_before_extension,
guarantee_existence,
is_gif_format,
is_png_format,
is_webm_format,
modify_atime,
write_to_movie,
)
from ..utils.sounds import get_full_sound_file_path
class SceneFileWriter:
def __init__(self, renderer, scene_name, **kwargs):
self.renderer = renderer
self.stream_lock = False
self.init_output_directories(scene_name)
self.init_audio()
self.frame_count = 0
self.partial_movie_files = []
def init_output_directories(self, scene_name):
if config["dry_run"]:
return
if config["input_file"]:
module_name = config.get_dir("input_file").stem
else:
module_name = ""
if config["output_file"] and not config["write_all"]:
default_name = config.get_dir("output_file")
else:
default_name = Path(scene_name)
if config["media_dir"]:
image_dir = guarantee_existence(
config.get_dir("images_dir", module_name=module_name)
)
self.image_file_path = os.path.join(
image_dir, add_extension_if_not_present(default_name, ".png")
)
if write_to_movie():
movie_dir = guarantee_existence(
config.get_dir("video_dir", module_name=module_name)
)
self.movie_file_path = os.path.join(
movie_dir,
add_extension_if_not_present(
default_name, config["movie_file_extension"]
),
)
if is_gif_format():
self.gif_file_path = os.path.join(
movie_dir,
add_extension_if_not_present(default_name, GIF_FILE_EXTENSION),
)
self.partial_movie_directory = guarantee_existence(
config.get_dir(
"partial_movie_dir",
scene_name=scene_name,
module_name=module_name,
)
)
def add_partial_movie_file(self, hash_animation):
if not hasattr(self, "partial_movie_directory") or not write_to_movie():
return
if hash_animation is None:
self.partial_movie_files.append(None)
return
new_partial_movie_file = os.path.join(
self.partial_movie_directory,
f"{hash_animation}{config['movie_file_extension']}",
)
self.partial_movie_files.append(new_partial_movie_file)
def get_resolution_directory(self):
pixel_height = config["pixel_height"]
frame_rate = config["frame_rate"]
return f"{pixel_height}p{frame_rate}"
def init_audio(self):
self.includes_sound = False
def create_audio_segment(self):
self.audio_segment = AudioSegment.silent()
def add_audio_segment(self, new_segment, time=None, gain_to_background=None):
if not self.includes_sound:
self.includes_sound = True
self.create_audio_segment()
segment = self.audio_segment
curr_end = segment.duration_seconds
if time is None:
time = curr_end
if time < 0:
raise ValueError("Adding sound at timestamp < 0")
new_end = time + new_segment.duration_seconds
diff = new_end - curr_end
if diff > 0:
segment = segment.append(
AudioSegment.silent(int(np.ceil(diff * 1000))),
crossfade=0,
)
self.audio_segment = segment.overlay(
new_segment,
position=int(1000 * time),
gain_during_overlay=gain_to_background,
)
def add_sound(self, sound_file, time=None, gain=None, **kwargs):
file_path = get_full_sound_file_path(sound_file)
new_segment = AudioSegment.from_file(file_path)
if gain:
new_segment = new_segment.apply_gain(gain)
self.add_audio_segment(new_segment, time, **kwargs)
def begin_animation(self, allow_write=False, file_path=None):
if write_to_movie() and allow_write:
self.open_movie_pipe(file_path=file_path)
def end_animation(self, allow_write=False):
if write_to_movie() and allow_write:
self.close_movie_pipe()
def write_frame(self, frame_or_renderer):
if config.renderer == "opengl":
renderer = frame_or_renderer
self.writing_process.stdin.write(
renderer.get_raw_frame_buffer_object_data()
)
else:
frame = frame_or_renderer
if write_to_movie():
self.writing_process.stdin.write(frame.tobytes())
if is_png_format() and not config["dry_run"]:
target_dir, extension = os.path.splitext(self.image_file_path)
if config["zero_pad"]:
Image.fromarray(frame).save(
f"{target_dir}{str(self.frame_count).zfill(config['zero_pad'])}{extension}"
)
else:
Image.fromarray(frame).save(
f"{target_dir}{self.frame_count}{extension}"
)
self.frame_count += 1
def save_final_image(self, image):
if config["dry_run"]:
return
if not config["output_file"]:
self.image_file_path = add_version_before_extension(self.image_file_path)
image.save(self.image_file_path)
self.print_file_ready_message(self.image_file_path)
def idle_stream(self):
while self.stream_lock:
a = datetime.datetime.now()
self.renderer.update_frame()
n_frames = 1
frame = self.renderer.get_frame()
self.renderer.add_frame(*[frame] * n_frames)
b = datetime.datetime.now()
time_diff = (b - a).total_seconds()
frame_duration = 1 / config["frame_rate"]
if time_diff < frame_duration:
sleep(frame_duration - time_diff)
def finish(self, partial_movie_files=None):
if write_to_movie():
if hasattr(self, "writing_process"):
self.writing_process.terminate()
self.combine_movie_files(partial_movie_files=partial_movie_files)
if config["flush_cache"]:
self.flush_cache_directory()
else:
self.clean_cache()
elif is_png_format() and not config["dry_run"]:
target_dir, _ = os.path.splitext(self.image_file_path)
logger.info("\n%i images ready at %s\n", self.frame_count, target_dir)
def open_movie_pipe(self, file_path=None):
if file_path is None:
file_path = self.partial_movie_files[self.renderer.num_plays]
self.partial_movie_file_path = file_path
fps = config["frame_rate"]
if fps == int(fps):
fps = int(fps)
if config.renderer == "opengl":
width, height = self.renderer.get_pixel_shape()
else:
height = config["pixel_height"]
width = config["pixel_width"]
command = [
FFMPEG_BIN,
"-y",
"-f",
"rawvideo",
"-s",
"%dx%d" % (width, height),
"-pix_fmt",
"rgba",
"-r",
str(fps),
"-i",
"-",
"-an",
"-loglevel",
config["ffmpeg_loglevel"].lower(),
"-metadata",
f"comment=Rendered with Manim Community v{__version__}",
]
if config.renderer == "opengl":
command += ["-vf", "vflip"]
if is_webm_format():
command += ["-vcodec", "libvpx-vp9", "-auto-alt-ref", "0"]
elif config["transparent"]:
command += ["-vcodec", "qtrle"]
else:
command += ["-vcodec", "libx264", "-pix_fmt", "yuv420p"]
command += [file_path]
self.writing_process = subprocess.Popen(command, stdin=subprocess.PIPE)
def close_movie_pipe(self):
self.writing_process.stdin.close()
self.writing_process.wait()
logger.info(
f"Animation {self.renderer.num_plays} : Partial movie file written in %(path)s",
{"path": f"'{self.partial_movie_file_path}'"},
)
def is_already_cached(self, hash_invocation):
if not hasattr(self, "partial_movie_directory") or not write_to_movie():
return False
path = os.path.join(
self.partial_movie_directory,
f"{hash_invocation}{config['movie_file_extension']}",
)
return os.path.exists(path)
def combine_movie_files(self, partial_movie_files=None):
partial_movie_files = [el for el in self.partial_movie_files if el is not None]
# tests) use scene initialization, and this error would be raised as
# it's just an empty scene initialized.
file_list = os.path.join(
self.partial_movie_directory, "partial_movie_file_list.txt"
)
logger.debug(
f"Partial movie files to combine ({len(partial_movie_files)} files): %(p)s",
{"p": partial_movie_files[:5]},
)
with open(file_list, "w") as fp:
fp.write("# This file is used internally by FFMPEG.\n")
for pf_path in partial_movie_files:
if os.name == "nt":
pf_path = pf_path.replace("\\", "/")
fp.write(f"file 'file:{pf_path}'\n")
movie_file_path = self.movie_file_path
commands = [
FFMPEG_BIN,
"-y",
"-f",
"concat",
"-safe",
"0",
"-i",
file_list,
"-loglevel",
config["ffmpeg_loglevel"].lower(),
"-metadata",
f"comment=Rendered with Manim Community v{__version__}",
"-nostdin",
]
if write_to_movie() and not is_gif_format():
commands += ["-c", "copy", movie_file_path]
if is_gif_format():
if not config["output_file"]:
self.gif_file_path = str(
add_version_before_extension(self.gif_file_path)
)
commands += [
"-vf",
f"fps={np.clip(config['frame_rate'], 1, 50)},split[s0][s1];[s0]palettegen=stats_mode=diff[p];[s1][p]paletteuse=dither=bayer:bayer_scale=5:diff_mode=rectangle",
self.gif_file_path,
]
if not self.includes_sound:
commands.insert(-1, "-an")
combine_process = subprocess.Popen(commands)
combine_process.wait()
if self.includes_sound:
extension = config["movie_file_extension"]
sound_file_path = movie_file_path.replace(extension, ".wav")
self.add_audio_segment(AudioSegment.silent(0))
self.audio_segment.export(
sound_file_path,
bitrate="312k",
)
temp_file_path = movie_file_path.replace(extension, f"_temp{extension}")
commands = [
FFMPEG_BIN,
"-i",
movie_file_path,
"-i",
sound_file_path,
"-y",
"-c:v",
"copy",
"-c:a",
"aac",
"-b:a",
"320k",
"-map",
"0:v:0",
"-map",
"1:a:0",
"-loglevel",
config["ffmpeg_loglevel"].lower(),
"-metadata",
f"comment=Rendered with Manim Community v{__version__}",
temp_file_path,
]
subprocess.call(commands)
shutil.move(temp_file_path, movie_file_path)
os.remove(sound_file_path)
self.print_file_ready_message(
self.gif_file_path if is_gif_format() else movie_file_path
)
if write_to_movie():
for file_path in partial_movie_files:
modify_atime(file_path)
def clean_cache(self):
cached_partial_movies = [
os.path.join(self.partial_movie_directory, file_name)
for file_name in os.listdir(self.partial_movie_directory)
if file_name != "partial_movie_file_list.txt"
]
if len(cached_partial_movies) > config["max_files_cached"]:
number_files_to_delete = (
len(cached_partial_movies) - config["max_files_cached"]
)
oldest_files_to_delete = sorted(
cached_partial_movies,
key=os.path.getatime,
)[:number_files_to_delete]
for file_to_delete in oldest_files_to_delete:
os.remove(file_to_delete)
logger.info(
f"The partial movie directory is full (> {config['max_files_cached']} files). Therefore, manim has removed {number_files_to_delete} file(s) used by it the longest ago."
+ "You can change this behaviour by changing max_files_cached in config."
)
def flush_cache_directory(self):
cached_partial_movies = [
os.path.join(self.partial_movie_directory, file_name)
for file_name in os.listdir(self.partial_movie_directory)
if file_name != "partial_movie_file_list.txt"
]
for f in cached_partial_movies:
os.remove(f)
logger.info(
f"Cache flushed. {len(cached_partial_movies)} file(s) deleted in %(par_dir)s.",
{"par_dir": self.partial_movie_directory},
)
def print_file_ready_message(self, file_path):
config["output_file"] = file_path
logger.info("\nFile ready at %(file_path)s\n", {"file_path": f"'{file_path}'"})
| true | true |
1c2fcab9e0c5719bddabf9a517978dbf7b670dfb | 5,708 | py | Python | openGaussBase/testcase/SQL/DML/copy/Opengauss_Function_DML_Copy_Case0113.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | openGaussBase/testcase/SQL/DML/copy/Opengauss_Function_DML_Copy_Case0113.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | openGaussBase/testcase/SQL/DML/copy/Opengauss_Function_DML_Copy_Case0113.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : 拷贝数据
Case Name : copy from模式下指定smalldatetime_format参数,不带时区
Description :
1.创建测试表并插入数据
2.构造数据文件
3.copy from模式下指定smalldatetime_format不合法的smalldatetime格式
4.copy from模式,binary格式下指定smalldatetime_format
5.copy from模式下指定smalldatetime_format合法的smalldatetime格式
6.清理环境
Expect :
1.创建测试表并插入数据成功
2.构造数据文件成功
3.copy失败
4.copy失败
5.copy成功
6.清理环境成功
History :
"""
import unittest
import os
from yat.test import Node
from yat.test import macro
from testcase.utils.Common import Common
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Logger import Logger
from testcase.utils.Constant import Constant
class CopyFile(unittest.TestCase):
def setUp(self):
self.log = Logger()
self.log.info(f'-----{os.path.basename(__file__)} start-----')
self.pri_sh = CommonSH('PrimaryDbUser')
self.pri_user = Node(node='PrimaryDbUser')
self.common = Common()
self.Constant = Constant()
self.tb_name = 't_copy_113'
self.file_name = 'testcopy113.dat'
self.copy_dir_path = os.path.join(macro.DB_INSTANCE_PATH,
'pg_copydir')
def test_copy_file(self):
text = '-----step1:创建测试表并对测试表插入数据' \
'Expect:创建测试表并插入数据成功-----'
self.log.info(text)
sql_cmd = f"drop table if exists {self.tb_name};" \
f"create table {self.tb_name} (sk integer,id varchar(16)," \
f"name varchar(20),create_smalldatetime smalldatetime);" \
f"insert into {self.tb_name} values " \
f"(001,'sk1','tt1','2021-11-01 01:02:30');" \
f"insert into {self.tb_name} values " \
f"(002,'sk2','tt2','2022-01-23 02:02:30');" \
f"insert into {self.tb_name} values" \
f" (003,'sk3','tt3','2000-12-23 03:02:30');"
self.log.info(sql_cmd)
sql_res = self.pri_sh.execut_db_sql(sql_cmd)
self.log.info(sql_res)
self.assertIn(self.Constant.CREATE_TABLE_SUCCESS, sql_res,
'执行失败:' + text)
self.assertIn(self.Constant.INSERT_SUCCESS_MSG, sql_res,
'执行失败:' + text)
self.assertNotIn(self.Constant.SQL_WRONG_MSG[1], sql_res,
'执行失败:' + text)
text = '-----step2:构造数据文件 Expect:构造数据文件成功-----'
self.log.info(text)
excute_cmd = f'''mkdir {self.copy_dir_path};
touch {os.path.join(self.copy_dir_path, self.file_name)};'''
self.log.info(excute_cmd)
msg = self.common.get_sh_result(self.pri_user, excute_cmd)
self.log.info(msg)
self.assertEqual(len(msg), 0, '执行失败:' + text)
sql_cmd = self.pri_sh.execut_db_sql(
f"copy {self.tb_name} to '"
f"{os.path.join(self.copy_dir_path, self.file_name)}';")
self.assertIn('COPY 3', sql_cmd, '执行失败:' + text)
text = '-----step3:copy from模式下指定smalldatetime_format不合法的' \
'smalldatetime格式 Expect:copy失败-----'
self.log.info(text)
sql_cmd = f"copy {self.tb_name} from '" \
f"{os.path.join(self.copy_dir_path, self.file_name)}' " \
f"with(format 'text',smalldatetime_format 'YYYY-M-D HH:MI:SS');"
self.log.info(sql_cmd)
sql_res = self.pri_sh.execut_db_sql(sql_cmd)
self.log.info(sql_res)
self.assertIn('invalid data for match in format string', sql_res,
'执行失败:' + text)
text = '-----step4:copy from模式,binary格式下指定smalldatetime_format' \
'Expect:copy失败-----'
self.log.info(text)
sql_cmd = f"copy {self.tb_name} from '" \
f"{os.path.join(self.copy_dir_path, self.file_name)}'" \
f"with(format 'binary',smalldatetime_format " \
f"'YYYY-MM-DD HH:MI:SS');"
self.log.info(sql_cmd)
sql_res = self.pri_sh.execut_db_sql(sql_cmd)
self.log.info(sql_res)
self.assertIn('compatibility options in BINARY mode', sql_res,
'执行失败:' + text)
text = '-----step5:copy from模式下指定smalldatetime_format合法的' \
'smalldatetime格式 Expect:copy成功-----'
self.log.info(text)
sql_cmd = f"copy {self.tb_name} from '" \
f"{os.path.join(self.copy_dir_path, self.file_name)}'" \
f"with(format 'text',smalldatetime_format 'YYYY-MM-DD HH:MI:SS');"
self.log.info(sql_cmd)
sql_res = self.pri_sh.execut_db_sql(sql_cmd)
self.log.info(sql_res)
self.assertIn('COPY 3', sql_res, '执行失败:' + text)
def tearDown(self):
text = '-----step6:清理环境 Expect:清理环境成功-----'
self.log.info(text)
sql_cmd = self.pri_sh.execut_db_sql(
f"drop table if exists {self.tb_name};")
self.log.info(sql_cmd)
excute_cmd = f'''rm -rf {self.copy_dir_path}'''
self.log.info(excute_cmd)
msg = self.common.get_sh_result(self.pri_user, excute_cmd)
self.log.info(msg)
self.assertEqual(len(msg), 0, '执行失败:' + text)
self.assertIn(self.Constant.TABLE_DROP_SUCCESS, sql_cmd)
self.log.info(f'-----{os.path.basename(__file__)} end-----')
| 39.638889 | 84 | 0.61405 |
import unittest
import os
from yat.test import Node
from yat.test import macro
from testcase.utils.Common import Common
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Logger import Logger
from testcase.utils.Constant import Constant
class CopyFile(unittest.TestCase):
def setUp(self):
self.log = Logger()
self.log.info(f'-----{os.path.basename(__file__)} start-----')
self.pri_sh = CommonSH('PrimaryDbUser')
self.pri_user = Node(node='PrimaryDbUser')
self.common = Common()
self.Constant = Constant()
self.tb_name = 't_copy_113'
self.file_name = 'testcopy113.dat'
self.copy_dir_path = os.path.join(macro.DB_INSTANCE_PATH,
'pg_copydir')
def test_copy_file(self):
text = '-----step1:创建测试表并对测试表插入数据' \
'Expect:创建测试表并插入数据成功-----'
self.log.info(text)
sql_cmd = f"drop table if exists {self.tb_name};" \
f"create table {self.tb_name} (sk integer,id varchar(16)," \
f"name varchar(20),create_smalldatetime smalldatetime);" \
f"insert into {self.tb_name} values " \
f"(001,'sk1','tt1','2021-11-01 01:02:30');" \
f"insert into {self.tb_name} values " \
f"(002,'sk2','tt2','2022-01-23 02:02:30');" \
f"insert into {self.tb_name} values" \
f" (003,'sk3','tt3','2000-12-23 03:02:30');"
self.log.info(sql_cmd)
sql_res = self.pri_sh.execut_db_sql(sql_cmd)
self.log.info(sql_res)
self.assertIn(self.Constant.CREATE_TABLE_SUCCESS, sql_res,
'执行失败:' + text)
self.assertIn(self.Constant.INSERT_SUCCESS_MSG, sql_res,
'执行失败:' + text)
self.assertNotIn(self.Constant.SQL_WRONG_MSG[1], sql_res,
'执行失败:' + text)
text = '-----step2:构造数据文件 Expect:构造数据文件成功-----'
self.log.info(text)
excute_cmd = f'''mkdir {self.copy_dir_path};
touch {os.path.join(self.copy_dir_path, self.file_name)};'''
self.log.info(excute_cmd)
msg = self.common.get_sh_result(self.pri_user, excute_cmd)
self.log.info(msg)
self.assertEqual(len(msg), 0, '执行失败:' + text)
sql_cmd = self.pri_sh.execut_db_sql(
f"copy {self.tb_name} to '"
f"{os.path.join(self.copy_dir_path, self.file_name)}';")
self.assertIn('COPY 3', sql_cmd, '执行失败:' + text)
text = '-----step3:copy from模式下指定smalldatetime_format不合法的' \
'smalldatetime格式 Expect:copy失败-----'
self.log.info(text)
sql_cmd = f"copy {self.tb_name} from '" \
f"{os.path.join(self.copy_dir_path, self.file_name)}' " \
f"with(format 'text',smalldatetime_format 'YYYY-M-D HH:MI:SS');"
self.log.info(sql_cmd)
sql_res = self.pri_sh.execut_db_sql(sql_cmd)
self.log.info(sql_res)
self.assertIn('invalid data for match in format string', sql_res,
'执行失败:' + text)
text = '-----step4:copy from模式,binary格式下指定smalldatetime_format' \
'Expect:copy失败-----'
self.log.info(text)
sql_cmd = f"copy {self.tb_name} from '" \
f"{os.path.join(self.copy_dir_path, self.file_name)}'" \
f"with(format 'binary',smalldatetime_format " \
f"'YYYY-MM-DD HH:MI:SS');"
self.log.info(sql_cmd)
sql_res = self.pri_sh.execut_db_sql(sql_cmd)
self.log.info(sql_res)
self.assertIn('compatibility options in BINARY mode', sql_res,
'执行失败:' + text)
text = '-----step5:copy from模式下指定smalldatetime_format合法的' \
'smalldatetime格式 Expect:copy成功-----'
self.log.info(text)
sql_cmd = f"copy {self.tb_name} from '" \
f"{os.path.join(self.copy_dir_path, self.file_name)}'" \
f"with(format 'text',smalldatetime_format 'YYYY-MM-DD HH:MI:SS');"
self.log.info(sql_cmd)
sql_res = self.pri_sh.execut_db_sql(sql_cmd)
self.log.info(sql_res)
self.assertIn('COPY 3', sql_res, '执行失败:' + text)
def tearDown(self):
text = '-----step6:清理环境 Expect:清理环境成功-----'
self.log.info(text)
sql_cmd = self.pri_sh.execut_db_sql(
f"drop table if exists {self.tb_name};")
self.log.info(sql_cmd)
excute_cmd = f'''rm -rf {self.copy_dir_path}'''
self.log.info(excute_cmd)
msg = self.common.get_sh_result(self.pri_user, excute_cmd)
self.log.info(msg)
self.assertEqual(len(msg), 0, '执行失败:' + text)
self.assertIn(self.Constant.TABLE_DROP_SUCCESS, sql_cmd)
self.log.info(f'-----{os.path.basename(__file__)} end-----')
| true | true |
1c2fcaee24f6b4fdf6f998f4d35d716b58107bbf | 356 | py | Python | src/admin_utils/brief.py | ionelmc/django-admin-utils | d1c1d64a6d97f3589746c06352b21500d234209d | [
"BSD-2-Clause"
] | 14 | 2015-02-05T04:18:37.000Z | 2022-01-29T08:35:23.000Z | src/admin_utils/brief.py | mattcaldwell/django-admin-utils | a1bf8638111fa90dada9d39e515972668630f7be | [
"BSD-2-Clause"
] | null | null | null | src/admin_utils/brief.py | mattcaldwell/django-admin-utils | a1bf8638111fa90dada9d39e515972668630f7be | [
"BSD-2-Clause"
] | 3 | 2015-03-30T18:44:26.000Z | 2021-01-05T18:49:03.000Z | from django.contrib import admin
def register(model, site=admin.site):
def decorator(klass):
site.register(model, klass)
return klass
return decorator
def inline(model, klass=admin.TabularInline, **options):
return type(
"%sInlineAdmin" % model.__name__,
(klass,),
dict(model=model, **options)
)
| 20.941176 | 56 | 0.63764 | from django.contrib import admin
def register(model, site=admin.site):
def decorator(klass):
site.register(model, klass)
return klass
return decorator
def inline(model, klass=admin.TabularInline, **options):
return type(
"%sInlineAdmin" % model.__name__,
(klass,),
dict(model=model, **options)
)
| true | true |
1c2fcb490335c2e2fc92adb3893eecaaa49d33f2 | 1,436 | py | Python | pytext/loss/tests/ctc_loss_test.py | dmitryvinn/pytext | 43373462d1b9bada3ba02072aed78338d3bb3a12 | [
"BSD-3-Clause"
] | 6,199 | 2018-12-13T15:34:51.000Z | 2022-03-26T04:08:58.000Z | pytext/loss/tests/ctc_loss_test.py | dmitryvinn/pytext | 43373462d1b9bada3ba02072aed78338d3bb3a12 | [
"BSD-3-Clause"
] | 1,356 | 2018-12-13T15:50:33.000Z | 2022-03-03T20:45:58.000Z | pytext/loss/tests/ctc_loss_test.py | dmitryvinn/pytext | 43373462d1b9bada3ba02072aed78338d3bb3a12 | [
"BSD-3-Clause"
] | 842 | 2018-12-13T15:35:13.000Z | 2022-03-23T13:27:00.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import torch
import torch.nn.functional as F
from pytext.loss.loss import CTCLoss
class CTCLossTest(unittest.TestCase):
def test_ctc_loss(self):
torch.manual_seed(0)
N = 16 # Batch size
T = 50 # Input sequence length
C = 20 # Number of classes (including blank)
S = 30 # Target sequence length of longest target in batch (padding length)
S_min = 10 # Minimum target length (only for testing)
logits = torch.randn(N, T, C)
targets = torch.randint(1, C, (N, S), dtype=torch.long)
input_lengths = torch.full((N,), T, dtype=torch.long)
target_lengths = torch.randint(S_min, S, (N,), dtype=torch.long)
config = CTCLoss.Config()
config.blank = 0 # Needs to be set to 0 for CuDNN support.
ctc_loss_fn = CTCLoss(config=config)
ctc_loss_val = ctc_loss_fn(
logits,
targets,
input_lengths,
target_lengths,
)
# PyTorch CTC loss
log_probs = logits.permute(1, 0, 2).log_softmax(
2
) # permute to conform to CTC loss input tensor (T,N,C) in PyTorch.
lib_ctc_loss_val = F.ctc_loss(log_probs, targets, input_lengths, target_lengths)
self.assertAlmostEqual(ctc_loss_val.item(), lib_ctc_loss_val.item())
| 32.636364 | 88 | 0.629526 |
import unittest
import torch
import torch.nn.functional as F
from pytext.loss.loss import CTCLoss
class CTCLossTest(unittest.TestCase):
def test_ctc_loss(self):
torch.manual_seed(0)
N = 16
T = 50
C = 20
S = 30
S_min = 10
logits = torch.randn(N, T, C)
targets = torch.randint(1, C, (N, S), dtype=torch.long)
input_lengths = torch.full((N,), T, dtype=torch.long)
target_lengths = torch.randint(S_min, S, (N,), dtype=torch.long)
config = CTCLoss.Config()
config.blank = 0
ctc_loss_fn = CTCLoss(config=config)
ctc_loss_val = ctc_loss_fn(
logits,
targets,
input_lengths,
target_lengths,
)
log_probs = logits.permute(1, 0, 2).log_softmax(
2
)
lib_ctc_loss_val = F.ctc_loss(log_probs, targets, input_lengths, target_lengths)
self.assertAlmostEqual(ctc_loss_val.item(), lib_ctc_loss_val.item())
| true | true |
1c2fcf462232fd130a11832b921637407dc6a8a4 | 14,515 | py | Python | core/common.py | tangb/cleep-desktop | 7e333b0ce8445fad86216c4b51b1ade8c21695fd | [
"MIT"
] | 2 | 2020-07-31T13:24:05.000Z | 2022-03-10T08:44:06.000Z | core/common.py | tangb/cleep-desktop | 7e333b0ce8445fad86216c4b51b1ade8c21695fd | [
"MIT"
] | 6 | 2020-04-09T16:44:28.000Z | 2022-02-22T11:26:24.000Z | core/common.py | tangb/cleep-desktop | 7e333b0ce8445fad86216c4b51b1ade8c21695fd | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file shares some constants and classes
"""
from core.exception import InvalidMessage
import copy
__all__ = ['CORE_MODULES', 'CATEGORIES', 'PeerInfos',
'ExecutionStep', 'MessageResponse', 'MessageRequest']
"""
CONSTANTS
"""
CORE_MODULES = [
'system',
'update',
'audio',
'network',
'cleepbus',
'parameters'
]
class CATEGORIES(object):
"""
Cleep application categories
"""
#generic application
APPLICATION = 'APPLICATION'
#mobile application for car, bike, hiking...
MOBILE = 'MOBILE'
#application to configure and use hardware (soundcard, display...)
DRIVER = 'DRIVER'
#home automation application (shutter, light...)
HOMEAUTOMATION = 'HOMEAUTOMATION'
#media application (music player, video player...)
MEDIA = 'MEDIA'
#application based on online service (sms broker, weather provider...)
SERVICE = 'SERVICE'
ALL = ['APPLICATION', 'MOBILE', 'DRIVER', 'HOMEAUTOMATION', 'MEDIA', 'SERVICE']
class ExecutionStep(object):
"""
Cleep execution steps
"""
#boot step (init logger, brokers...)
BOOT = 0
#init modules (constructor)
INIT = 1
#configure modules (_configure)
CONFIG = 2
#application and all modules are running
RUN = 3
#stopping cleep
STOP = 4
def __init__(self):
self.step = self.BOOT
class PeerInfos():
"""
Stores peer informations
"""
def __init__(self,
uuid=None,
ident=None,
hostname=None,
ip=None,
port=80,
ssl=False,
macs=None,
cleepdesktop=False,
extra={},
):
"""
Constructor
Args:
uuid (string): peer uuid provided by cleep
ident (string): peer identifier provided by external bus
hostname (string): peer hostname
ip (string): peer ip
port (int): peer access port
ssl (bool): peer has ssl enabled
macs (list): list of macs addresses
cleepdesktop (bool): is cleepdesktop peer
extra (dict): extra peer informations (about hardware...)
Note:
Uuid is mandatory because device can change identifier after each connection.
Id is the identifier provided by your external bus implementation.
Hostname is mandatory because it is used to display user friendly peer name
Mac addresses are mandatory because they are used to identify a peer that has been reinstalled (and
has lost its previous uuid)
"""
self.uuid = uuid
self.ident = ident
self.hostname = hostname
self.ip = ip
self.port = port
self.ssl = ssl
self.macs = macs
self.cleepdesktop = cleepdesktop
self.online = False
self.extra = extra
def to_dict(self, with_extra=False):
"""
Return peer infos as dict
Args:
with_extra (bool): add extra data
Returns:
dict: peer infos
"""
out = {
'uuid': self.uuid,
'ident': self.ident,
'hostname': self.hostname,
'ip': self.ip,
'port': self.port,
'ssl': self.ssl,
'macs': self.macs,
'cleepdesktop': self.cleepdesktop,
'online': self.online,
'extra': self.extra,
}
with_extra and out.update({'extra': self.extra})
return out
def __str__(self):
"""
To string method
Returns:
string: peer infos as string
"""
return 'PeerInfos(uuid:%s, ident:%s, hostname:%s, ip:%s port:%s, ssl:%s, macs:%s, cleepdesktop:%s, online:%s, extra:%s)' % (
self.uuid,
self.ident,
self.hostname,
self.ip,
self.port,
self.ssl,
self.macs,
self.cleepdesktop,
self.online,
self.extra,
)
def fill_from_dict(self, peer_infos):
"""
Fill infos from dict
Args:
peer_infos (dict): peer informations
"""
if not isinstance(peer_infos, dict):
raise Exception('Parameter "peer_infos" must be a dict')
self.uuid = peer_infos.get('uuid', None)
self.ident = peer_infos.get('ident', None)
self.hostname = peer_infos.get('hostname', None)
self.ip = peer_infos.get('ip', None)
self.port = peer_infos.get('port', None)
self.ssl = peer_infos.get('ssl', False)
self.macs = peer_infos.get('macs', None)
self.cleepdesktop = peer_infos.get('cleepdesktop', False)
self.extra = copy.deepcopy(peer_infos.get('extra', {}))
class MessageResponse(object):
"""
Object that holds message response
A response is composed of:
* an error flag: True if error, False otherwise
* a message: a message about request
* some data: data returned by the request
"""
def __init__(self, error=False, message='', data=None, broadcast=False):
"""
Constructor
Args:
error (bool): error flag (default False)
message (string): response message (default empty string)
data (any): response data (default None)
broadcast (bool): response comes from broadcast (default False)
"""
self.error = error
self.message = message
self.data = data
self.broadcast = broadcast
def __str__(self):
"""
Stringify
"""
return 'MessageResponse(error:%r, message:"%s", data:%s, broadcast:%r)' % (
self.error,
self.message,
str(self.data),
self.broadcast
)
def to_dict(self):
"""
Return message response
"""
return {'error':self.error, 'message':self.message, 'data':self.data}
def fill_from_response(self, response):
"""
Fill from other response
Args:
response (MessageResponse): message response instance
"""
if not isinstance(response, MessageResponse):
raise Exception('Parameter "response" must be a MessageResponse instance')
self.error = response.error
self.message = response.message
self.data = copy.deepcopy(response.data)
self.broadcast = response.broadcast
def fill_from_dict(self, response):
"""
Fill from dict
Args:
response (dict): response as dict
"""
if not isinstance(response, dict):
raise Exception('Parameter "response" must be a dict')
self.error = response.get('error', False)
self.broadcast = response.get('broadcast', False)
self.message = response.get('message', '')
self.data = response.get('data', None)
class MessageRequest(object):
"""
Object that holds message request
A message request is composed of:
* in case of a command:
* a command name
* command parameters
* the command sender
* in case of an event:
* an event name
* event parameters
* propagate flag to say if event can be propagated out of the device
* a device id
* a startup flag that indicates this event was sent during cleep startup
Attribute peer_infos is filled when message comes from oustide. This field must also be filled when
message is intented to be sent to outside.
Members:
command (string): command name
event (string): event name
propagate (bool): True if event can be propagated out of the device [event only]
params (dict): list of event or command parameters
to (string): message module recipient
sender (string): message sender [command only]
device_id (string): internal virtual device identifier [event only]
peer_infos (PeerInfos): peer informations. Must be filled if message comes from outside the device
Note:
A message cannot be a command and an event, priority to command if both are specified.
"""
def __init__(self, command=None, event=None, params={}, to=None):
"""
Constructor
Args:
command (string): request command
event (string): request event
params (dict): message parameter if any
to (string): message recipient if any
"""
self.command = command
self.event = event
self.params = params
self.to = to
self.propagate = False
self.sender = None
self.device_id = None
self.peer_infos = None
self.command_uuid = None
self.timeout = None
def __str__(self):
"""
Stringify function
"""
if self.command:
return 'MessageRequest(command:%s, params:%s, to:%s, sender:%s, device_id:%s, peer_infos:%s, command_uuid:%s, timeout:%s)' % (
self.command,
str(self.params),
self.to,
self.sender,
self.device_id,
self.peer_infos.to_dict() if self.peer_infos else None,
self.command_uuid,
self.timeout,
)
elif self.event:
return 'MessageRequest(event:%s, propagate:%s, params:%s, to:%s, device_id:%s, peer_infos:%s, command_uuid:%s)' % (
self.event,
self.propagate,
str(self.params),
self.to,
self.device_id,
self.peer_infos.to_dict() if self.peer_infos else None,
self.command_uuid,
)
return 'MessageRequest(Invalid message)'
def is_broadcast(self):
"""
Return broadcast status
Returns:
bool: True if the request is broadcast
"""
return True if self.to is None else False
def is_command(self):
"""
Return true if message is a command. If not it is an event
Returns:
bool: True if message is a command, otherwise it is an event
"""
return True if self.command else False
def is_external_event(self):
"""
Return True if event comes from external device
Returns:
bool: True if event comes from external device
"""
return True if self.peer_infos is not None else False
def to_dict(self, startup=False, external_sender=None):
"""
Convert message request to dict object
Params:
startup (bool): True if the message is startup message
external_sender (string): specify module name that handles message from external bus
Raise:
InvalidMessage if message is not valid
"""
if self.command and not self.peer_infos:
# internal command
return {
'command': self.command,
'params': self.params,
'to': self.to,
'sender': self.sender,
'broadcast': self.is_broadcast(),
}
elif self.event and not self.peer_infos:
# internal event
return {
'event': self.event,
'to': self.to,
'params': self.params,
'startup': startup,
'device_id': self.device_id,
'sender': self.sender,
}
elif self.command and self.peer_infos:
# external command
return {
'command': self.command,
'params': self.params,
'to': self.to,
'sender': external_sender or self.sender,
'broadcast': self.is_broadcast(),
'peer_infos': self.peer_infos.to_dict(),
'command_uuid': self.command_uuid,
'timeout': self.timeout,
}
elif self.event and self.peer_infos:
# external event
return {
'event': self.event,
'params': self.params,
'startup': False,
'device_id': None,
'sender': external_sender or self.sender,
'peer_infos': self.peer_infos.to_dict(),
'command_uuid': self.command_uuid,
}
else:
raise InvalidMessage()
def fill_from_request(self, request):
"""
Fill instance from other request
Args:
request (MessageRequest): message request instance
"""
if not isinstance(request, MessageRequest):
raise Exception('Parameter "request" must be a MessageRequest instance')
self.command = request.command
self.event = request.event
self.propagate = request.propagate
self.params = copy.deepcopy(request.params)
self.to = request.to
self.sender = request.sender
self.device_id = request.device_id
self.peer_infos = None
self.command_uuid = request.command_uuid
if request.peer_infos:
self.peer_infos = PeerInfos()
self.peer_infos.fill_from_dict(request.peer_infos.to_dict(True))
def fill_from_dict(self, request):
"""
Fill instance from other request
Args:
request (dict): message request infos
"""
if not isinstance(request, dict):
raise Exception('Parameter "request" must be a dict')
self.command = request.get('command', None)
self.event = request.get('event', None)
self.propagate = request.get('propagate', False)
self.params = copy.deepcopy(request.get('params', {}))
self.to = request.get('to', None)
self.sender = request.get('sender', None)
self.device_id = request.get('device_id', None)
self.command_uuid = request.get('command_uuid', None)
self.timeout = request.get('timeout', 5.0)
self.peer_infos = None
if request.get('peer_infos', None):
self.peer_infos = PeerInfos()
self.peer_infos.fill_from_dict(request.get('peer_infos'))
| 29.989669 | 138 | 0.559283 |
from core.exception import InvalidMessage
import copy
__all__ = ['CORE_MODULES', 'CATEGORIES', 'PeerInfos',
'ExecutionStep', 'MessageResponse', 'MessageRequest']
CORE_MODULES = [
'system',
'update',
'audio',
'network',
'cleepbus',
'parameters'
]
class CATEGORIES(object):
APPLICATION = 'APPLICATION'
MOBILE = 'MOBILE'
DRIVER = 'DRIVER'
HOMEAUTOMATION = 'HOMEAUTOMATION'
MEDIA = 'MEDIA'
SERVICE = 'SERVICE'
ALL = ['APPLICATION', 'MOBILE', 'DRIVER', 'HOMEAUTOMATION', 'MEDIA', 'SERVICE']
class ExecutionStep(object):
BOOT = 0
INIT = 1
CONFIG = 2
RUN = 3
STOP = 4
def __init__(self):
self.step = self.BOOT
class PeerInfos():
def __init__(self,
uuid=None,
ident=None,
hostname=None,
ip=None,
port=80,
ssl=False,
macs=None,
cleepdesktop=False,
extra={},
):
self.uuid = uuid
self.ident = ident
self.hostname = hostname
self.ip = ip
self.port = port
self.ssl = ssl
self.macs = macs
self.cleepdesktop = cleepdesktop
self.online = False
self.extra = extra
def to_dict(self, with_extra=False):
out = {
'uuid': self.uuid,
'ident': self.ident,
'hostname': self.hostname,
'ip': self.ip,
'port': self.port,
'ssl': self.ssl,
'macs': self.macs,
'cleepdesktop': self.cleepdesktop,
'online': self.online,
'extra': self.extra,
}
with_extra and out.update({'extra': self.extra})
return out
def __str__(self):
return 'PeerInfos(uuid:%s, ident:%s, hostname:%s, ip:%s port:%s, ssl:%s, macs:%s, cleepdesktop:%s, online:%s, extra:%s)' % (
self.uuid,
self.ident,
self.hostname,
self.ip,
self.port,
self.ssl,
self.macs,
self.cleepdesktop,
self.online,
self.extra,
)
def fill_from_dict(self, peer_infos):
if not isinstance(peer_infos, dict):
raise Exception('Parameter "peer_infos" must be a dict')
self.uuid = peer_infos.get('uuid', None)
self.ident = peer_infos.get('ident', None)
self.hostname = peer_infos.get('hostname', None)
self.ip = peer_infos.get('ip', None)
self.port = peer_infos.get('port', None)
self.ssl = peer_infos.get('ssl', False)
self.macs = peer_infos.get('macs', None)
self.cleepdesktop = peer_infos.get('cleepdesktop', False)
self.extra = copy.deepcopy(peer_infos.get('extra', {}))
class MessageResponse(object):
def __init__(self, error=False, message='', data=None, broadcast=False):
self.error = error
self.message = message
self.data = data
self.broadcast = broadcast
def __str__(self):
return 'MessageResponse(error:%r, message:"%s", data:%s, broadcast:%r)' % (
self.error,
self.message,
str(self.data),
self.broadcast
)
def to_dict(self):
return {'error':self.error, 'message':self.message, 'data':self.data}
def fill_from_response(self, response):
if not isinstance(response, MessageResponse):
raise Exception('Parameter "response" must be a MessageResponse instance')
self.error = response.error
self.message = response.message
self.data = copy.deepcopy(response.data)
self.broadcast = response.broadcast
def fill_from_dict(self, response):
if not isinstance(response, dict):
raise Exception('Parameter "response" must be a dict')
self.error = response.get('error', False)
self.broadcast = response.get('broadcast', False)
self.message = response.get('message', '')
self.data = response.get('data', None)
class MessageRequest(object):
def __init__(self, command=None, event=None, params={}, to=None):
self.command = command
self.event = event
self.params = params
self.to = to
self.propagate = False
self.sender = None
self.device_id = None
self.peer_infos = None
self.command_uuid = None
self.timeout = None
def __str__(self):
if self.command:
return 'MessageRequest(command:%s, params:%s, to:%s, sender:%s, device_id:%s, peer_infos:%s, command_uuid:%s, timeout:%s)' % (
self.command,
str(self.params),
self.to,
self.sender,
self.device_id,
self.peer_infos.to_dict() if self.peer_infos else None,
self.command_uuid,
self.timeout,
)
elif self.event:
return 'MessageRequest(event:%s, propagate:%s, params:%s, to:%s, device_id:%s, peer_infos:%s, command_uuid:%s)' % (
self.event,
self.propagate,
str(self.params),
self.to,
self.device_id,
self.peer_infos.to_dict() if self.peer_infos else None,
self.command_uuid,
)
return 'MessageRequest(Invalid message)'
def is_broadcast(self):
return True if self.to is None else False
def is_command(self):
return True if self.command else False
def is_external_event(self):
return True if self.peer_infos is not None else False
def to_dict(self, startup=False, external_sender=None):
if self.command and not self.peer_infos:
return {
'command': self.command,
'params': self.params,
'to': self.to,
'sender': self.sender,
'broadcast': self.is_broadcast(),
}
elif self.event and not self.peer_infos:
return {
'event': self.event,
'to': self.to,
'params': self.params,
'startup': startup,
'device_id': self.device_id,
'sender': self.sender,
}
elif self.command and self.peer_infos:
return {
'command': self.command,
'params': self.params,
'to': self.to,
'sender': external_sender or self.sender,
'broadcast': self.is_broadcast(),
'peer_infos': self.peer_infos.to_dict(),
'command_uuid': self.command_uuid,
'timeout': self.timeout,
}
elif self.event and self.peer_infos:
return {
'event': self.event,
'params': self.params,
'startup': False,
'device_id': None,
'sender': external_sender or self.sender,
'peer_infos': self.peer_infos.to_dict(),
'command_uuid': self.command_uuid,
}
else:
raise InvalidMessage()
def fill_from_request(self, request):
if not isinstance(request, MessageRequest):
raise Exception('Parameter "request" must be a MessageRequest instance')
self.command = request.command
self.event = request.event
self.propagate = request.propagate
self.params = copy.deepcopy(request.params)
self.to = request.to
self.sender = request.sender
self.device_id = request.device_id
self.peer_infos = None
self.command_uuid = request.command_uuid
if request.peer_infos:
self.peer_infos = PeerInfos()
self.peer_infos.fill_from_dict(request.peer_infos.to_dict(True))
def fill_from_dict(self, request):
if not isinstance(request, dict):
raise Exception('Parameter "request" must be a dict')
self.command = request.get('command', None)
self.event = request.get('event', None)
self.propagate = request.get('propagate', False)
self.params = copy.deepcopy(request.get('params', {}))
self.to = request.get('to', None)
self.sender = request.get('sender', None)
self.device_id = request.get('device_id', None)
self.command_uuid = request.get('command_uuid', None)
self.timeout = request.get('timeout', 5.0)
self.peer_infos = None
if request.get('peer_infos', None):
self.peer_infos = PeerInfos()
self.peer_infos.fill_from_dict(request.get('peer_infos'))
| true | true |
1c2fd346542e78afce69ecd41b77a94c70950c89 | 244 | py | Python | catalog/bindings/csw/exception.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | catalog/bindings/csw/exception.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | catalog/bindings/csw/exception.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | from dataclasses import dataclass
from bindings.csw.exception_type import ExceptionType
__NAMESPACE__ = "http://www.opengis.net/ows"
@dataclass
class Exception(ExceptionType):
class Meta:
namespace = "http://www.opengis.net/ows"
| 22.181818 | 53 | 0.758197 | from dataclasses import dataclass
from bindings.csw.exception_type import ExceptionType
__NAMESPACE__ = "http://www.opengis.net/ows"
@dataclass
class Exception(ExceptionType):
class Meta:
namespace = "http://www.opengis.net/ows"
| true | true |
1c2fd3a10430ee151538db6ca511a9c5c6a5b7f2 | 36,429 | py | Python | test.py | Miraclelwk/learn-notes | 65ee8713a9477b6fe13ca93f787438f206fe6fd7 | [
"MIT"
] | null | null | null | test.py | Miraclelwk/learn-notes | 65ee8713a9477b6fe13ca93f787438f206fe6fd7 | [
"MIT"
] | null | null | null | test.py | Miraclelwk/learn-notes | 65ee8713a9477b6fe13ca93f787438f206fe6fd7 | [
"MIT"
] | null | null | null | """
# 多行注释
第一个注释
第二个注释
第三个注释
"""
"""
print("Hello,Python!!!")
# 缩进不同导致代码报错
if True:
print("1")
print("true")
else:
print("0")
print("false")
# 数字过长使用分隔符
print(100_100_100)
# 二进制表示
a = 0b10
print(a)
# 浮点型不能直接计算,会得到一个不精确的结果
b = 0.1 + 0.2
print(b)
# 引号指定字符串
s = 'Hello'
print(s)
# 不同引号之间才能嵌套
s = '子曰:"学而时习之,不亦说乎"'
print(s)
# 三引号指定多行字符串
s = '''锄禾日当午,
汗滴禾下土。
谁知盘中餐,
粒粒皆辛苦。'''
print(s)
# 转义符
s = '子曰:"学而时习之,\n不亦说乎"'
print(s)
# 格式化字符串
a = 123
print('a=',a)
# 指定单个占位符
b = 'hello %s'%'孙悟空'
print(b)
# 指定多个占位符
c = 'hello %s 你好 %s'%('tom','孙悟空')
print(c)
# 指定占位符最小字符位数
d = 'hello %3s'%'abcde'
print(d)
# 占位不够最小字符数时空格补位
e = 'hello %5s'%'abc'
print(e)
# 占位字符数区间
f = 'hello %3.5s'%'abcdefg'
print(f)
# 指定占位符最大字符位数
g = 'hello %.3s'%'abcde'
print(g)
# %f指定小数的位数
h = 'hello %.3f'%123.45678
print(h)
# 整数占位符
i = 'hello %d'%123.123
print(i)
# 字符串嵌入变量
a = '123'
b = '六儿'
c = f'hello {a} {b}'
print(f'c= {c}')
print(f'a= {a}')
# 练习:创建变量保存名字,使用四种方式输出,欢迎xxx光临
name = '李4'
#拼串
print('欢迎'+name+'光临!')
#多个参数
print('欢迎',name,'光临!')
#占位符
print('欢迎%s光临!'%name)
#格式化字符串
print(f'欢迎{name}光临!')
#复制字符串
k = 'abc'
k = k*20
print(k)
#布尔值
a = True
print('a = ',a)
#布尔值属于整型
print(1+False)
#空值
c = None
print(c)
#类型检查
a = 123
b = '123'
#方式一
type(123)
c = type(123)
print(c)
#方式二
d = type(a)
print(d)
#方式三
print(type(b))
# 直接查看值的type
print(type(1))
print(type(1.5))
print(type(True))
print(type('hello'))
print(type(None))
# 查找值的id
id(123)
# 变量与对象
a = 123
id(a)
b = a
id(b)
#变量相互独立
a = 10
b = a
print(b)
a = 20
print(a,b)
# 定义一个变量a的类型
a = True
print('a = ',a)
print('a的类型为',type(a))
# 重新赋值a
a = 'True'
# int(a) 若执行结果a仍为bool,不会产生影响
a = int(a) # 重新赋值a
print('a = ',a)
print('a的类型为',type(a))
# 需要字符串和其他类型拼串时
b = 123
print('hello',str(b))
# 加法运算符
a = 10 + 5
print('a =',a) # 计算
b = 'hello' + 'world' # 拼串
print('b =',b)
# 除法运算符
a = 10 / 3
print('a = ',a)
b =10 // 3 # 整除
print('b = ',b)
#幂运算和开方
a = 2**3
print('a = ',a)
b = 16**0.5 # 求开方
print('b = ',b)
#取模
a = 10 % 5
print('a = ',a)
b = 10 % 4
print('b = ',b)
c = 10 % 3
print('c = ',c)
d = 10 % 2
print('d = ',d)
# 关系运算符
10 > 20
print(int('2') > int('11'))
result = 'qqq' is not 'aaa'
print('result = ',result)
#非运算
a = True
a = not a #对a进行非运算
b = 1
b = not b
c = ''
c = not c
print('a = ',a)
print('b = ',b)
print('c = ',c)
# 练习
# 布尔值逻辑运算
a = True
a = not a
b = None
b = not b
print('a = ',a)
print('b = ',b)
result = True and False
print('result = ',result)
result1 = True or False
print('result1 = ',result1)
#非布尔值逻辑运算
c = 10
c = not c
print('c = ',c)
d = 10 and 20
print('d = ',d)
e = 'hello' and 'world'
print('e = ',e)
f = True or 'world'
print('f = ',f)
g = 'hello' or 123
print('g = ',g)
# 非布尔值的逻辑运算
# True and True
result1 = 1 and 2 # 2
# True and False
result2 = 1 and 0 # 0
# False and True
result3 = 0 and 1 # 0
# False and False
result4 = 0 and None # 0
print('result1 = ',result1)
print('result2 = ',result2)
print('result3 = ',result3)
print('result4 = ',result4)
# True or True
result5 = 1 or 2 # 1
# True or False
result6 = 1 or 0 # 1
# False or True
result7 = 0 or 1 # 1
# False or False
result8 = 0 or None # None
print('result5 = ',result5)
print('result6 = ',result6)
print('result7 = ',result7)
print('result8 = ',result8)
# 比较两个值的大小
a = 10
b = 20
print('a大') if a > b else print('b大')
c = 66
d = 38
max = c if c > d else d
print('max = ',max)
# 练习
# 三个值的最大值
a = 100
b = 200
c = 300
mid = a if a > b else b
max = mid if mid > c else c
print('三者中最大值是',max)
# 运算符可以用括号控制优先级
a = 1 or 2 and 3
b = (1 or 2) and 3
print(a,b)
# 条件判断语句
num = 20
if num > 10 : print('num比10大')
# if 后面跟代码块
if True :
print('123456')
print('789')
print('abc')
print('hello')
# 练习:在命令行让用户输入一个用户名,获取用户输入,并进行判断:
如果用户输入的用户名是admin,则显示欢迎管理员光临;
如果用户输入的是其他用户名,则什么也不做。
str_input = input('\n\n请输入用户名:')
if str_input == 'admin':
print('欢迎管理员光临')
# input()函数
a = input()
print('用户输入的内容是:',a)
input('请输入用户名:')
# 练习:用户输入一个年龄,如果年龄大于18岁,则显示你已经成年了。
# age = input('请输入用户的年龄:')
# age = int(age)
# age是字符串不能直接和数字比较,也可以用下面语句:
age = int(input('请输入用户的年龄:'))
if age >= 18:
print('你已经成年了~~~')
# if-else
username = input('输入用户名:')
if username == 'admin':
print('欢迎管理员光临!')
else :
print('欢迎用户光临!')
# if-elif-else
age = int(input('输入年龄:'))
if age <= 20:
print('青年')
elif age <= 30:
print('青年')
elif age <= 60:
print('老年')
else:
print('小孩')
# if语句练习
1.获取用户输入的整数,判断数字的奇偶性
num = int(input('输入一个整数:'))
if num % 2 == 1:
print('这个数是奇数')
else:
print('这个数是偶数')
# 2.编写一个程序判断年份是否为闰年。如果年份可以不能被100整除或者可以被400整除,那么年份为闰年
year = int(input('输入年份判断是否为闰年:'))
if year % 100 != 0 or year % 400 == 0:
print('闰年')
else:
print('平年')
# 3.狗的前两年每一年相当于人的10.5岁,之后每增加一年就增加4岁。编写一个程序,获取用户输入的狗的年龄,显示其相当于人类的年龄。
dog_age = int(input('输入狗的年龄:'))
if dog_age < 0:
if dog_age <= 2:
print('狗的年龄相当于人的:', dog_age * 10.5,'岁')
else:
print('狗的年龄相当于人的:', (dog_age - 2) * 4 + 21, '岁')
else:
print(请输入合法的数字:)
4.从键盘输入小明的期末成绩:
当成绩为10时,’奖励一辆BMW‘;
当成绩为[80-99]时,’奖励一台iphone‘;
当成绩为[60-79]时,’奖励一本参考书‘;
其他时什么奖励都没有
def customFun():
acheviementReport = int(input('请输入你的成绩:\n'))
if acheviementReport==10:
print('奖励一辆BMW')
elif 80 <= acheviementReport <= 90:
print('奖励一台iphone')
elif 60 <= acheviementReport < 80:
print('奖励一本参考书')
else:
print("什么都没有")
customFun()
customFun()
# 5.女方家长嫁女儿的条件: 高:180cm以上;富:1000以上;帅:500以上;
# 如果三个条件同时满足,则'我一定要嫁给他';
# 如果三个条件有为真的情况,则'嫁吧,比上不足比下有余'
# 如果三个条件都不满足,则'不嫁'
high = int(input('身高:\n'))
price = int(input('请输入家产:\n'))
handsome = int(input('请输入颜值:\n'))
if high >= 180 and price >= 1000 and handsome >= 500:
print('我一定要嫁给他!')
elif high >= 180 or price >= 1000 or handsome >= 500:
print('嫁吧,比上不足比下有余')
else:
print('不嫁')
# while循环语句
i = 0 # 初始化表达式
while i < 10: # 初始化表达式
i += 1 # 更新表达式
print('hello')
# while语句练习
# 1.求100以内所有的奇数之和
# 获取100内所有的数
i = 0
# 创建一个变量保存结果
result = 0
while i < 100:
i += 1
if i % 2 == 1:
result += i
print('100内所有奇数之和为:', result)
# 获取100以内所有的奇数
i = 1
while i < 100:
print(i)
i += 1
# 2.求100以内所有7的倍数之和以及个数
i = 0
num = 0
result = 0
while i < 100:
i += 1
if i % 7 == 0:
num += 1
result += i
print('100以内所有7的倍数之和为:\n', result)
print('100以内所有7的倍数的个数:\n', num)
# 3.水仙花数是指一个n位数(n>=3),它的每个位上的数字的n次幂之和等于它本身
# (例如1**3 + 5**3 + 3**3 = 153) 求1000以内所有的水仙花数
i = 100
while i < 1000:
# 设a为i的百位数
a = i // 100
# 设b为i的十位数
b = i // 10 % 10 # b = i - a * 100 // 10
# 设c为i的个位数
c = i % 10
if a ** 3 + b ** 3 + c ** 3 == i:
print(i)
i += 1
# 4.获取用户输入的任意数,判断其是否为质数
i = 2
num = int(input('输入任意整数:'))
flag = True
while i < num:
if num % i == 0:
flag = False
i += 1
if flag:
print(num, '是质数')
else:
print(num, '不是质数')
# 步骤:
# 获取用户输入的任意数
num = int(input('输入任意整数:'))
# 举例有可能成为9的因数的数- 2、3、4、5、6、7、8
# 获取所有可能整除num的整数
# i = 2 # 不包含1
# flag = True # 传递判断结果
# while i < num: # 不包含自身
# # 判断num能否被i整除
# # if num % i != 0: i还没递增,num不能被一个i整除,不能判断为质数
# # 逆向思维num能被i整除,num不是质数
# if num % i == 0:
# flag = False # num不是质数则结果输出为False
# i += 1
# 循环嵌套练习
# 1.打印99乘法表
i = 0
while i < 9:
j = 0
while j < i + 1:
a = i + 1
b = j + 1
c = a * b
print(a, '*', b, '=', c, '', end='')
j += 1
print('')
i += 1
i = 0
while i < 9:
j = 0
while j < i + 1:
print('*', end='')
j += 1
print('')
i += 1
# 2.求100以内所有质数
i = 2
# j = 2 # j没有放进循环不会重置,j应从2开始
# result = True #result没有放进循环不会重置,result应该默认为True开始
while i <= 100:
j = 2
result = True
while j < i:
if i % j == 0:
result = False
j += 1
if result:
print(i)
i += 1
#创建循环求1-100
i = 2
while i <= 100:
# print(i)
# 创建一个变量,记录i的状态,默认i是质数
flag = True
# 判断i是否质数
# 获取所有可能是i的因数
j = 2
while j < i:
# 判断i能否被j整除
if i % j == 0:
flag = False
j += 1
# 验证结果并输出
if flag:
print(i)
i += 1
# 3.倒三角形
i = 0
while i < 9:
j = 9
while j > i:
print('*', end='')
j -= 1
i += 1
print('')
# break
i = 0
while i < 5:
if i == 3:
break
print(i)
i += 1
else:
print('hello')
# continue
i = 0
while i < 5:
i += 1
if i == 3:
continue
print(i)
else:
print('hello')
# 质数练习优化
from time import *
begin = time()
i = 2
while i <= 10000:
j = 2
result = True
# while j < i:
while j <= i ** 0.5:
if i % j == 0:
result = False
break
j += 1
if result:
# print(i)
pass
i += 1
end = time()
print('程序执行花费了:', end - begin, '秒')
# 综合练习
# 小游戏《唐僧大战白骨精》
# 1. 身份选择
# - 显示提示信息:欢迎来到《xxx》
# - 请选择你的身份:1. xxx 2. xxx
# - 根据用户选择来分配身份(显示不同提示信息):你已经选择唐僧,恭喜你将以唐僧的身份进行游戏!
# - 你居然选择白骨精,太不要脸了。系统已为你自动分配角色为唐僧
# - 选项错误,系统已自动为你分配角色为唐僧。
#
# 2. 游戏进行
# - 显示玩家基本信息(攻击力、生命值)
# - 显示玩家可以进行的操作(练级 打boss 逃跑)
# - 练级:提升玩家攻击力和生命值
# - 打boss:玩家攻击boss,boss反击,计算boss是否被玩家消灭,计算玩家是否已经被boss消灭。
# 身份选择
# 欢迎语
print('='*15, '欢迎来到20年前的小游戏', '='*15)
# 游戏身份选择
identity = int(input('请选择你的身份:\n 1.唐僧\n 2.白骨精\n'))
# 打印分割线
print('-'*50)
if identity == 1:
print('你已经选择->唐僧<-,恭喜你将以->唐僧<-的身份进行游戏!')
elif identity == 2:
print('你居然选择白骨精,太不要脸了。系统已为你自动分配角色为->唐僧<-')
else:
print(' 选项错误,系统已自动为你分配角色为唐僧。')
# 进入游戏
# 显示玩家信息
# print('******当前角色是:唐僧\t 生命值:100\t 攻击力:20******')
# 创建变量保存信息
# operation = int(input('请选择下一步操作:\n 1.练级\n 2.打boss\n 3.睡大觉'))
play_life = 100 # 生命值
play_attack = 20 # 攻击力
play_grade = 1 # 等级
boss_life = 1000
boss_attack = 200
print('*' * 10, f'当前角色是:唐僧\t 生命值:{play_life}\t', f'攻击力:{play_attack}\t', f'等级: {play_grade}\t', '*' * 10)
# 游戏选项需要反复出现,写到死循环中
while True:
operation = int(input('请选择下一步操作:\n 1.练级\n 2.打boss\n 3.逃跑\n'))
# 增加玩家生命值和攻击力
if operation == 1:
play_life += 50
play_attack += 100
play_grade += 1
print('*'*20, '练级成功!当前等级为:', play_grade, '当前生命值为:', play_life, '当前攻击力为:', play_attack, '*'*20)
# 玩家攻击boss,boss减去的生命值等于玩家攻击力
elif operation == 2:
# 玩家攻击boss,boss减去的生命值等于玩家攻击力
boss_life -= play_attack
print('->唐僧<- 攻击了 ->白骨精<-')
# 检查玩家是否赢了,赢则游戏结束,没赢则受到反击
if boss_life <= 0:
print(f'->白骨精<-受到了{play_attack}点伤害,打败boss,->唐僧<-赢得对局')
break
else:
# boss反击,唐僧受到boss攻击力等额伤害
play_life -= boss_attack
print(' ->白骨精<-攻击了->唐僧<- ')
if play_life <= 0:
print(f'->唐僧<-受到了{ boss_attack } 点伤害,挑战失败')
break
# 逃跑,退出游戏
elif operation == 3:
print('->唐僧<-扭头撒腿就跑!game over')
break
else:
break
print('选项错误,退出游戏')
# 字符串截取
a = 'hello world'
print(a[0:6]) # 截取第一个到第六个字符:hello (空格也算)
print(a[2:-2]) # 截取第三个到倒数第三个字符:hello wor
print(a[0]) # 截取第一个字符:h
print(a[1:]) # 截取第二个后的全部字符:ello world
# 列表创建
list1 = ['如果可以作弊', '我会想你念你', 1, 2, 3]
print(list1)
list2 = ['我', '曾将', '青春', '翻涌', '成', '她']
print(list2[1]) # 曾将
print(list2[-1]) # 成
# 列表截取(切片)
nums = [10, 20, 30, 40, 50, 60, 70, 80]
print(nums[2:7]) # 30,40,50,60,70
print(nums[1:-2]) # 20,30,40,50,60
# 列表长度
nums = [10, 20, 30, 40, 50, 60, 70, 80]
print('列表长度为: ', len(nums))
# 练习:在列表中保存5个名字,通过索引获取每个名字。
names = ['小a', '小b', '小c', '小d', '小e']
print(names[0])
print(names[1])
print(names[2])
print(names[-2])
print(names[-1])
# 列表单个元素修改
list3 = ['如果可以作弊', '我会想你念你', 1, 2, 3]
list3[2] = '到最后的荼蘼'
print(list3)
# 列表切片修改
list3 = ['如果可以作弊', '我会想你念你', 1, 2, 3]
list3[2:] = ['到最后的荼蘼','如果回忆容易', '我会想你念你']
print(list3)
# 设置步长修改列表
list3 = [1, '如果可以作弊', 2, '我会想你念你', 3]
list3[::2] = ['到最后的荼蘼', '如果回忆容易', '我会想你念你']
print(list3)
# 列表添加和删除
list3 = ['如果可以作弊', '我会想你念你', 1, 2, 3]
list3.append('到最后的荼蘼')
del list3[3]
print(list3)
# 通过切片删除列表元素
casual_list = [1, 123, 213, 12321, 3242213, 223, 123, 423, 324, 32133]
del casual_list[0:2]
print(casual_list)
del casual_list[::2]
print(casual_list)
# 列表拼接和重复
squares = [1, 2, 3]
squares += [7, 8, 9]
print(squares)
print(squares * 3)
# 列表函数&方法
# 函数
# in & not in
casual_list = [1, 123, 213, 12321, 3242213]
print(123 in casual_list)
print('hello' not in casual_list)
# len() & min() & max()
casual_list = [1, 123, 213, 12321, 3242213]
print(len(casual_list))
print(min(casual_list))
print(max(casual_list))
# 方法
# index()
casual_list = [1, 123, 213, 123, 3242213]
print(casual_list.index(213))
print(casual_list.index(213, 1)) # 第二个参数表示查找的起始位置
print(casual_list.index(213, 1, 3)) # 第三个参数表示查找的终点位置
# count()
casual_list = [1, 123, 213, 123, 3242213]
print(casual_list.count(123))
print(casual_list.count(213))
print(casual_list.count(111))
# 列表的方法
# insert
stus = ['孙悟空', '猪八戒', '牛魔王', '白骨精']
print('原列表:', stus)
stus.insert(2, '唐僧')
print('新列表:', stus)
# extend
stus = ['孙悟空', '猪八戒', '牛魔王', '白骨精']
print('原列表:', stus)
stus.extend(['dddd', '唐僧'])
print('新列表:', stus)
# clear
stus = ['孙悟空', '猪八戒', '牛魔王', '白骨精']
stus.clear()
print(stus)
# pop
stus = ['孙悟空', '猪八戒', '牛魔王', '白骨精']
result = stus.pop(2)
print('返回值:', result)
# remove
stus = ['孙悟空', '猪八戒', '牛魔王', '白骨精', '猪八戒']
stus.remove('猪八戒')
print(stus)
# reverse
casual_list = [1, 123, 213, 12321, 3242213]
casual_list.reverse()
print(casual_list)
# sort
casual_list = [1, 123, 213, 12321, 3242213]
casual_list.sort()
print(casual_list)
casual_list.sort(reverse=True)
print(casual_list)
# 遍历列表
# while循环
stus = ['孙悟空', '猪八戒', '牛魔王', '白骨精', '猪八戒']
i = 0
while i < 4:
print(stus[i])
i += 1
# for循环
stus = ['孙悟空', '猪八戒', '牛魔王', '白骨精', '猪八戒']
for s in stus:
print(s)
# 嵌套列表
a = ['a', 'b', 'c']
n = [1, 2, 3]
list = [a, n]
print(list)
# 索引子表
print(list[0]) # ['a', 'b', 'c']
# 索引子表的元素
print(list[0][1]) # b
# 列表练习
# EMS(Employee Manager System 员工管理系统)
# 显示系统欢迎信息
print('-'*20, '欢迎进入员工管理系统', '-'*20)
# 创建列表,保存员工信息,字符串形式
emps = ['\t孙悟空\t16', '\t猪八戒\t15']
# 创建死循环
while True:
# 显示用户选项
print('-' * 60)
print('请选择你要进行的操作:')
print('\t1.查询员工\t')
print('\t2.添加员工\t')
print('\t3.删除员工\t')
print('\t4.退出\t')
user_code = input('请选择1-4:\n')
print('-'*60)
if user_code == '1': # 查询员工
# 创建变量
print('\t序号\t\t姓名\t\t年龄')
# 创建变量表示序号
n = 1
for emp in emps:
print(f'\t{n}\t{emp}')
n += 1
elif user_code == '2': # 添加员工
# 获取员工信息
emp_name = input('请输入员工姓名:')
emp_age = input('请输入员工年龄:')
emp = f'\t{emp_name}\t\t{emp_age}'
print('-' * 60)
# 提示
print('员工:', emp, '将被添加到系统中')
user_confirm = input('是否继续添加[Y/N]:')
print('-' * 60)
if user_confirm == 'Y':
emps.append(emp)
print('插入成功')
elif user_confirm == 'N':
print('取消成功')
pass
# 删除员工
elif user_code == '3':
del_num = int(input('请输入删除员工序号:'))
if 0 < del_num <= len(emps):
del_index = del_num - 1
else:
print('输入有误')
print('员工:', emps[del_index], '将被删除')
print('\t序号\t姓名\t\t年龄')
print(f'\t{del_num}\t{emps[del_index]}')
user_confirm = input('是否继续删除[Y/N]:')
if user_confirm == 'Y':
del emps[del_index]
print('删除成功')
elif user_confirm == 'N':
print('操作取消')
pass
elif user_code == '4':
input('欢迎使用,点击回车键退出')
break
else:
print('您的输入有误请重新输入')
# range
# 创建元组
tuple = ('Google', 'Runoob', 1997, 2000)
tup1 = (1, 2, 3, 4, 5)
tup2 = 3, 5, 6 # 不用小括号也可以
tup3 = ()
print(type(tuple))
print(type(tup1))
print(type(tup2))
print(tup1)
print(tup2)
print(tup3)
# 元组只有单个元素,加逗号
tup1 = (50)
tup2 = (50,)
print(type(tup1))
print(type(tup2))
# 元组解包
# 利用解包交换变量的值
tup1 = (10, 20, 30, 40)
a, b, c, d = tup1
print(a, b, c, d)
a, b = b, a
print(a, b)
# 利用解包分配元素给相对位置的变量
tup1 = (10, 20, 30, 40, 50)
a, b, *c = tup1
print(a, b, c)
a, *b, c = tup1
print(a, b, c)
*a, b, c = tup1
print(a, b, c)
# 元组索引
tup1 = (10, 20, 30, 40, 50, 60)
print(tup1[0]) # 10
print(tup1[1:5]) # (20, 30, 40 ,50)
print(tup1[2:-1]) # ( 30, 40, 50)
# 元组拼接
tup1 = ('a', 'b', 'c')
tup2 = (1, 2, 3)
tup3 = tup1 + tup2
print(tup3)
# 元组的删除
tup1 = (1, 2, 3)
del tup1
print(tup1)
# 元组不可变
tup1 = (1, 2, 3)
print(id(tup1))
tup1 = ('a', 'b', 'c')
print(id(tup1))
# 创建字典
tinydict = {'a': 1, 'b': 2, 'c': 3}
emptydict = {}
print(tinydict)
print(emptydict)
print('length=', len(tinydict))
print('length=', len(emptydict))
print(type(tinydict))
# 访问字典
tinydict = {'Name': '大傻春', 'Age': 2, 'Class': '你要干什么'}
print(tinydict['Name'])
print(tinydict['Class'])
# 字典新增、修改
tinydict = {'Name': '大傻春', 'Age': 2, 'Class': '你要干什么'}
tinydict['Age'] = '你个大傻子'
tinydict['Word'] = '滚就滚'
print(tinydict['Word'])
print(tinydict)
# 删除字典元素
tinydict = {'Name': '大傻春', 'Age': 2, 'Class': '你要干什么', 'Word': '滚就滚'}
del(tinydict['Age'], tinydict['Word'])
print(tinydict)
tinydict.clear()
print(tinydict)
# 删除一个字典
tinydict = {'a': 1, 'b': 2, 'c': 3}
del tinydict
print(tinydict)
# 字典键不允许被赋值两次
tinydict = {'a': 1, 'b': 2, 'c': 3}
tinydict['b'] = 30
print(tinydict)
# 字典键不允许用可变的数据类型
tinydict = {[a]: 1, 'b': 2, 'c': 3}
print(tinydict)
# rang
r = range(5) # 生成一个序列【0,1,2,3,4】
s = range(3, 10, 2)
print(list(r))
print(list(s))
# 通过range可以创建一个指定次数的for循环
# for循环除了创建方式以外,其余和while一样,包括break、continue都可以在for循环中使用。
for i in range(30):
print(i)
# 可变对象
a = [1, 2, 3]
print('修改前:', a, id(a))
# 通过索引改变对象[1,2,3]的值,不会改变变量所指向的对象
a[0] = 10
print('修改后:', a, id(a))
# 修改对象的值时,如果有其他变量也指向该对象,则修改也会在其他的变量中体现
a = [1, 2, 3]
print('修改前:', a, id(a))
b = a
a[0] = 10
print('修改后:', a, id(a))
print('修改后:', b, id(b))
# 为变量重新赋值,改变变量所指向的对象
a = [1, 2, 3]
print('修改前:', a, id(a))
a = [4, 5, 6]
print('修改后:', a, id(a))
# dict函数创建字典
d = dict(name='孙悟空', age='8')
print(d, type(d))
# 双值子序列转换为字典
e = dict([('name', '孙悟饭'), ('age', 18)])
print(e, type(e))
# 获取字典的长度
d = dict(name='孙悟空', age='8')
print(len(d))
# in & not in
d = {'name': '孙悟空', 'age': 18}
print('name' in d)
# get(key,[default])获取指定键的值
e = dict([('name', '孙悟饭'), ('age', 18)])
print(d.get('name'))
print(d.get('abc'))
print(d.get('hello', '返回默认值'))
print(d)
# setdefault(key,[, default])添加字典的值
d = {'name': '孙悟空', 'age': 18}
result = d.setdefault('name')
result1 = d.setdefault('abc')
result2 = d.setdefault('address', '花果山')
print(result, result1, result2)
print(d)
# update()
d = {'a': 1, 'b': 2, 'c': 3}
d2 = {'d': 4, 'e': 5, 'f': 6, 'a': 8}
d.update(d2)
print(d)
# popitem删除字典的键值对
d = {'a': 1, 'b': 2, 'c': 3}
result = d.popitem()
print(result)
print(d)
e = {}
print(e.popitem())
# pop删除字典的键值对
d = {'a': 1, 'b': 2, 'c': 3}
result1 = d.pop('c')
result2 = d.pop('e', '返回默认值')
print('result1=', result1)
print('result2= ', result2)
print(d)
# copy()字典浅复制
d = {'a': 1, 'b': 2, 'c': 3, 'e': {'name': '孙悟空', 'age': 18}}
d2 = d.copy()
print(d, id(d))
print(d2, id(d2))
d2['e']['name'] = '猪八戒' # 修改可变对象时,原对象也会改变
print(d, id(d))
print(d2, id(d2))
# 字典遍历
# keys()
d = {'name': '孙悟空', 'age': 18, 'address': '花果山'}
for k in d.keys():
print(k)
# values()
d = {'name': '孙悟空', 'age': 18, 'address': '花果山'}
for v in d.values():
print(v)
# items()
d = {'name': '孙悟空', 'age': 18, 'address': '花果山'}
for k, v in d.items():
print(k, '=', v)
# 创建集合
s = {1, 2, 3, 4, 5, 6, 7, 8}
print(s)
p = {1, 1, 1, 1, 1, 2, 2, 2, 10, 20, 30}
print(p)
q = set('hello') # 字符串转换为集合
t = set({'a': 1, 'b': 2, 'c': 3}) # 字典转换为集合
print(q)
print(t)
# in & not in
s = {1, 2, 3, 4, 5, 6, 7, 8}
print(1 in s)
print('hello' not in s)
# len()
s = {1, 2, 3, 4, 5, 6, 7, 8}
print(len(s))
# 集合添加元素
# add()
s = {1, 2, 3, 4, 5, 6, 7, 8}
s.add(10)
print(s)
# update()
s = {1, 2, 3, 4, 5, 6, 7, 8}
s2 = set('hello')
print(s)
s.update((10, 20, 30))
print(s)
s.update({'a': 1, 'b': 2, 'c': 3}) # update字典类型只会插入键
print(s)
# 删除集合元素
# pop()
s = {1, 2, 3, 4, 5, 6, 7, 8}
result = s.pop()
print(result, s)
# remove()
s = {1, 2, 3, 4, 5, 6, 7, 8}
s.remove(7)
print(s)
# clear()
s = {1, 2, 3, 4, 5, 6, 7, 8}
s.clear()
print(s)
# copy()
s = {1, 2, 3, 4, 5, 6, 7, 8}
s2 = s.copy()
print(s, id(s))
print(s2, id(s2))
# 集合的运算
s1 = {1, 2, 3, 4, 5}
s2 = {3, 4, 5, 6, 7, 8, 9}
# & 交集运算
result1 = s1 & s2
print(result1)
# | 并集运算
s1 = {1, 2, 3, 4, 5}
s2 = {3, 4, 5, 6, 7, 8, 9}
result2 = s1 | s2
print(result2)
# - 差集运算
s1 = {1, 2, 3, 4, 5}
s2 = {3, 4, 5, 6, 7, 8, 9}
result3 = s1 - s2
print(result3)
# ^ 异或集运算
s1 = {1, 2, 3, 4, 5}
s2 = {3, 4, 5, 6, 7, 8, 9}
result4 = s1 ^ s2
print(result4)
# <= 检查一个集合是否为另一个的子集
s1 = {1, 2, 3}
s2 = {1, 2, 3, 4, 5, 6}
result = s1 <= s2
print(result)
# < 检查一个集合是否为另一个的真子集
s1 = {1, 2, 3}
s2 = {1, 2, 3, 4, 5, 6}
result = s1 <= s2
print(result)
# >= 检查一个集合是否为另一个的超集
s1 = {1, 2, 3}
s2 = {1, 2, 3}
s3 = {123, 213}
result1 = s1 >= s2
result2 = s1 >= s3
print('result1=', result1)
print('result2=', result2)
# > 检查一个集合是否为另一个的真超集
s1 = {1, 2, 3}
s2 = {1, 2, 3}
s3 = {1, 2}
result1 = s1 > s2
result2 = s1 > s3
print('result1=', result1)
print('result2=', result2)
# 函数
# 定义函数
def fun():
print('这是我的第一个函数')
print('hello')
print('world')
fun()
print(type(fun))
print(id(fun))
# 函数的参数
def add_sum(a, b): # 形参相当于在函数内写上:a = None b = None
print(a, '+', b, '=', a+b)
add_sum(10, 20)
add_sum(123, 233)
add_sum(12, 21)
# 定义函数练习1:定义一个函数,可以用来求任意三个数的乘积
def product(a, b, c):
print(a, '*', b, '*', c, '=', a*b*c)
product(10, 20, 30)
product(123, 345, 789)
# 定义函数练习2:定义一个函数,可以根据不同的用户名显示不同的欢迎信息
def welcome(a):
print('欢迎', a, '光临')
welcome('孙悟空')
# 实参的类型
def fn():
print('这是我的第一个函数')
print('hello')
print('world')
def fn2(a):
print('a = ', a)
b = 123
c = True
d = 'hello'
e = [1, 2, 3]
fn2(b)
fn2(c)
fn2(d)
fn2(e)
fn2(fn)
# 形参重新赋值对其他变量不产生影响
def fn3(a):
a = 20
print('a = ', a)
c = 10
fn3(c)
print('c = ', c)
# 修改形参指向的对象会对其他变量产生影响
def fn4(a):
a[0] = 10
print('a = ', a, id(a))
c = [1, 2, 3]
fn4(c)
print('c = ', c, id(c))
# 不定长参数
def fn(*a):
print('a= ', a, type(a))
fn(123, 213, 323)
# 定义一个函数可以令任意数字相加
def fun(*nums):
result = 0
for n in nums:
result += n
print('sum=', result)
fun(123456, 123, 123123, 123123, 12)
# 可变参数的使用
def fn(a, b, *c):
print('a= ', a)
print('b= ', b)
print('c= ', c)
fn(1, 2, 3, 4, 5)
# 可变参数后的参数必须关键字传参
def fn2(a, *b, c):
print('a= ', a)
print('b= ', b)
print('c= ', c)
fn2(1, 2, 3, 4, 5, c=6)
# 全部参数以关键字传参
def fn2(*, a, b, c):
print('a= ', a)
print('b= ', b)
print('c= ', c)
fn2(a=1, b=2, c=3)
# **形参
def fn2(b, c, **a):
print('a= ', a, type(a))
print('b= ', b)
print('c= ', c)
fn2(b=2, c=3, d=1, e=5, f=6)
# 参数解包
# 对序列解包
def fn2(a, b, c):
print('a= ', a)
print('b= ', b)
print('c= ', c)
# 创建一个元组或者列表
t = (1, 2, 3)
# 传统方式传参: fn2(t[0], t[1], t[2])
fn2(*t)
# 对字典解包
def fn2(a, b, c):
print('a= ', a)
print('b= ', b)
print('c= ', c)
# 创建一个字典
t = {'a': 1, 'b': 2, 'c': 3}
# 传统方式传参: fn2(t[0], t[1], t[2])
fn2(**t)
# 返回值
def fn():
def fn2():
print('hello world')
return fn2()
# return 100
# return 'hello'
# return [1,2,3]
fn()
print(fn())
def fun(*nums):
result = 0
for n in nums:
result += n
return result
r = fun(123, 123)
print(r)
# 函数后加不加()的区别
def fn5():
return 10
print(fn5)
print(fn5())
# 文档字符串
# help()函数
help(print)
# 文档字符串
def fn():
'''
这是一个文档字符串的示例
函数的作用:.....
函数的参数:
a:作用,类型,默认值......
b:作用,类型,默认值......
c:作用,类型,默认值......
'''
return 10
# 说明参数和返回值类型
def fn(a: int, b: bool, c: str = 10) -> int:
# 此处说明参数a为int,b为bool,c为str,返回值为int
return 10
# 作用域
# 全局作用域
b = 20
def fn():
a = 10
print('函数内a:', a)
print('函数外b:', b)
fn()
# 函数作用域
a = 20
def fn():
a = 10
print('函数内a:', a)
fn()
print('函数外a:', a)
# 变量的查找
a = 10
def fn():
global a
a = 20
print('修改后的a:', a)
fn()
print('全局变量的a:', a)
# 命名空间
scope = locals() # 获得当前命名空间
print(scope, type(scope)) # 返回一个字典
scope['c'] = 10 # 向字典中添加一个key-value相当于创建一个全局变量(一般不建议这么做)
print('c=', c)
def fn():
a = 10
scope = locals() # 获得当前函数命名空间
print(scope, type(scope)) # 返回一个字典
scope['b'] = 20 # 通过操作函数的命名空间(一般不建议这么做)
print('b = ', b)
fn()
# globals()查看全局命名空间
b = 10
def fn():
global_scope = globals() # 获得全局命名空间
print(global_scope) # 查看全局命名空间
global_scope['b'] = 20 # 修改全局变量
print('b = ', b)
fn()
print('函数外b:', b)
# 递归
# 创建一个函数求10!
def factorial():
n = 10
for i in range(1, 10):
n = n * i
print(n)
factorial()
# 创建一个函数求任意数的阶乘
def factorial2(n):
'''
这是一个求任意数阶乘的函数
参数 n:求阶乘的数字
'''
# 创建一个变量保存结果
result = n
for i in range(1, n):
result = result * i
return result
print(n, '的阶乘为:', result)
factorial2(5)
# 递归例子:求任意数的阶乘
# 10! = 10 *9!
# 9! = 9 * 8!
# 8! = 8 *7!
...
# 1!= 1
def factorial2(n):
'''
这是一个求任意数阶乘的函数
参数 n:求阶乘的数字
'''
# 基线条件:判断n是否为1,如果为1则不再继续
if n == 1:
# 1的阶乘就是1,直接返回1
return 1
# 递归条件 return n*(n-1)
return n * factorial2(n-1)
print(factorial2(10))
# 递归练习
# 练习1:创建一个函数power来求任意数字做幂运算 n ** i
def power(n, i):
'''
这是一个power函数,为任意函数做幂运算
参数 n:幂运算数字
i:幂运算次数
'''
# 基线条件:1次幂
if i == 1:
return n
# 递归条件
return n * power(n, i-1)
# print(n, '的', i, '次幂为:', power(10, 3))
print(power(10, 3))
# 练习2:创建一个函数,用来检查一个任意字符串是否是回文字符串,如果是返回True,否则返回False
def pal(pal_str):
'''
该函数用来检查字符串是否为回文字符串,是返回True,否返回False
'''
# abcdedcba
# 先判断第一个和最后一个字符是否相等,如果相等判断bcdedcb是否回文
# 判断bcdedcb是否回文
# 判断cdedc是否回文
# 判断ded是否回文
# 判断e是否回文
# 基线条件:字符串的长度小于2是回文字符串
# 字符串第一个不等于最后一个不是回文字符串
if len(pal_str) < 2:
return True
elif pal_str[0] != pal_str[-1]:
return False
# 递归条件
return pal(pal_str[1:-1])
print(pal('abcdedcba'))
# 高阶函数
l1 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
l2 = []
def fn2(i):
if i % 2 == 0:
return True
return False
def fn(func, lst):
'''
该函数用于将指定的元素输出到新的列表中
参数 lst:用来保存新表
'''
for n in lst:
if func(n):
l2.append(n)
return l2
print(fn(fn2, l1))
# 匿名函数
# filter
l1 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
l2 = []
def fn2(i):
if i % 2 == 0:
return True
return False
def fn(func, lst):
'''
该函数用于将指定的元素输出到新的列表中
参数 lst:用来保存新表
'''
for n in lst:
if func(n):
l2.append(n)
return l2
r = filter(fn2, l1)
print(list(r))
# def fn(a, b):
# return a+b
# 等价于:
lambda a, b: a + b
print(lambda a, b: a + b)
print((lambda a, b: a + b)(10, 20))
l1 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
l2 = []
# 匿名函数
def fn(func, lst):
'''
该函数用于将指定的元素输出到新的列表中
参数 lst:用来保存新表
'''
for n in lst:
if func(n):
l2.append(n)
return l2
r = filter(lambda i: i % 2 == 0, l1)
print(list(r))
# map()
l1 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
r = map(lambda i: i + 1, l1)
print(r)
print(list(r))
# sort()方法
l3 = ['kkk', 'cc', 'aa', 'hhh']
l3.sort()
print(l3)
l3 = ['kkk', 'c', 'aa', 'hhhhh']
l3.sort(key=len)
print(l3)
l3 = [5, '1', 3, 8, 6]
l3.sort(key=int)
print(l3)
# 闭包
a = 20
def fn():
# 在函数内部定义一个变量
a = 10
# 在函数fn()内部定义一个函数fn2()
def fn2():
print('我是fn2', a)
# 将函数fn2()作为返回值返回
return fn2()
# r是调用fn()后返回的函数,在函数内部定义,并不是全局函数
# 因此外部无法访问到函数fn()内部的变量,r能访问到内部变量
r = fn()
r
print(a)
# 求平均值
nums = []
# 创建一个函数来计算平均值
def averager(n):
# 将n添加到列表中
nums.append(n)
# 计算平均值
return sum(nums)/len(nums)
print(averager(10))
print(averager(20))
print(averager(30))
# 平均数优化
def make_averager():
nums = []
# 创建一个函数来计算平均值
def averager(n):
# 将n添加到列表中
nums.append(n)
# 计算平均值
return sum(nums)/len(nums)
return averager
averager = make_averager()
print(averager(10))
print(averager(20))
print(averager(20))
print(averager(20))
# 装饰器引入
# 新需求:在函数执行前后增加提示
# 直接修改原函数较为麻烦且违反ocp原则
def add(a, b):
'''
求两个数的和
'''
r = a + b
return r
def mul(a , b):
'''
求两个数的乘积
'''
r = a * b
return r
# 可根据现有函数创建一个新的函数
def new_add(a, b):
print('加法开始计算~~~')
c = add(a, b)
print('加法计算完成~~~')
return c
r = new_add(10, 20)
print(r)
# 装饰器使用
def begin_end(old):
'''
用来对其他函数进行扩展
参数:
old 要扩展的函数对象
'''
# 创建一个新的函数,扩展被调用的函数
# 参数采用不定长参数,可以根据被调用函数的参数数量自动接收
def new_function(*args, **kwargs):
print('程序开始执行')
# 调用被扩展的函数
result = old(*args, **kwargs)
print('程序执行完毕')
# 返回函数的执行结果
return result
# 返回新函数
return new_function
f1 = begin_end(add)
f2 = begin_end(mul)
r = f1(2, 3)
p = f2(5, 6)
print(r)
print(p)
def fn3(old):
'''
用来对其他函数进行扩展
参数:
old 要扩展的函数对象
'''
# 创建一个新的函数,扩展被调用的函数
# 参数采用不定长参数,可以根据被调用函数的参数数量自动接收
def new_function(*args, **kwargs):
print('这里是fn3装饰器~~~')
# 调用被扩展的函数
result = old(*args, **kwargs)
print('这里是fn3装饰器~~~')
# 返回函数的执行结果
return result
# 返回新函数
return new_function
@begin_end
@fn3
def say_helllo():
print('hello')
say_helllo()
# 类与对象
# 类
class MyClass:
pass
print(MyClass)
# 使用类来创建对象,就像调用一个函数
mc1 = MyClass() # mc就是通过MyClass创建的对象,也是MyClass的实例
mc2 = MyClass()
mc3 = MyClass()
print(mc1, type(mc1))
# 对象创建流程
class MyClass:
pass
mc = MyClass()
mc.name = '孙悟空' # 对象属性赋值为“孙悟空”
print(mc.name) # 输出对象属性与变量类似
# 类的定义
class Person:
# 在类的代码块中,可以定义变量和函数
# 在类中所定义的变量,将会成为所有实例的公共属性,所有实例都可以访问这些变量
name = '孙悟空' # 公共属性,所有实例都可以访问
# 在类中定义的函数称为方法,这些方法可以通过该类所有实例来访问
def say_hello(a):
print('你好')
# 创建Person类的实例
p1 = Person()
p2 = Person()
print(p1.name)
print(p2.name)
# 方法和函数调用的区别:
# 如果是函数调用,则调用时传几个参数,就会由几个实参;
# 但如果是方法调用,默认传递一个参数,所以方法中至少要定义一个形参。
p1.say_hello()
p2.say_hello()
# 属性和方法
class Person:
name = '孙悟空'
def say_hello(a):
print('你好')
p1 = Person()
p2 = Person()
print(p1.name) # 孙悟空
# 修改p1的name属性
p1.name = '猪八戒'
# 实例化对象p1中原来没有name属性,查找到类person中的name属性
# 修改p1name属性后,在对象p1的内存中添加name属性
print(p1.name) # 猪八戒
print(p2.name) # 孙悟空
# self
class Person:
name = '孙悟空'
def say_hello(self):
# say_hello()方法实现如下格式:你好,我是xxx
# 在方法中不能直接调用类中的属性如:print('你好,我是%s'% name)
# 第一个参数就是调用方法的对象本身
# 如果是p1调用,则第一个参数就是p1对象
# 如果是p2调用,则第一个参数就是p2对象
# 一般将这个参数命名为self。
print('你好,我是%s' % self.name)
p1 = Person()
p2 = Person()
p1.name = '孙悟空'
p2.name = '猪八戒'
p1.say_hello() # 你好,我是孙悟空
p2.say_hello() # 你好,我是猪八戒
# 对象初始化
class Person:
def say_hello(self):
print('你好,我是%s' % self.name)
# 目前来讲,对于Person类来说name属性是必须的,而且每个对象的name属性都是不同的
# 而现在是定义对象之后,手动将name属性添加到对象中,这种方式容易被忽略或出现错误
# 我们希望在创建对象时,必须设置name属性,如果不设置对象将无法创建
# 属性的创建应该是自动完成的,而不是创建对象后手动添加
def __init__(self, name):
# 通过self向新建的对象中初始化属性
# 每调用一次init方法就会复制实例化对象一个name属性
self.name = name
# 调用一个Person相当于调用init,传参到init中
p1 = Person('孙悟空')
p2 = Person('猪八戒')
p1.say_hello()
p2.say_hello()
# 练习:自定义一个表示狗的类(Dog)
# 属性:name,age,gender,height
# 方法:call(),bite(),run()
class Dog:
def __init__(self, name, age, gender, height):
self.name = name
self.age = age
self.gender = gender
self.height = height
def call(self):
print('狗在叫')
def bite(self):
print('狗在咬')
def run(self):
print('狗在跑')
d1 = Dog('小5', 23, '男', 167)
d1.call()
d1.bite()
d1.run()
# 封装
class Dog:
def __init__(self, name):
# 没有一种方法可以完全隐藏属性,封装仅仅是将属性名设置为不常用的,防君子不防小人。
self.hidden_name = name
def say_hello(self):
print('hello, 这里是狗%s' % self.hidden_name)
def get_name(self):
'''
函数用来获取属性
'''
# 获取属性的同时进行其他操作
print('用户属性已经被获取')
return self.hidden_name
def set_name(self, name):
'''
函数用来修改属性
'''
print('用户属性已经被修改')
self.hidden_name = name
d1 = Dog('小5')
d1.say_hello()
# getter和setter方法
class Person:
def __init__(self, name):
self._name = name
# getter方法装饰器
@property
def name(self):
print('getter方法执行了')
return self._name
# setter方法装饰器: @属性名(???).setter
# 属性名还是getter的方法名
@name.setter
def set_name(self, name):
print('setter方法执行了')
self._name = name
# 此处可将方法像属性一样调用: 实例化对象.方法
p1 = Person('孙悟空')
p1.set_name = '猪八戒'
print(p1.name)
# 继承
# 定义一个类Animal,这个类需要两个方法:run() sleep()、
class Animal:
def run(self):
print('动物会跑~~~')
def sleep(self):
print('动物会睡觉~~~')
# 定义一个类Dog,这个类需要三个方法:run() sleep() bark()
# 有一个类能实现大部分功能,但是不能实现全部功能
# 如何让这个类实现全部功能?
# 1.直接修改这个类,在这个类中添加需要的功能 --修改麻烦并且违反OCP原则
# 2.直接创建一个新的类 --创建比较麻烦,需要复制粘贴,会出现大量的重复性代码
# 3.直接从Animal类中继承属性和方法
class Dog(Animal):
def bark(self):
print('狗会嚎叫~~~')
d = Dog()
d.run()
d.sleep()
d.bark()
# isinstance检查一个对象是否一个类的实例,如果这个类是这个对象的父类,也会返回True
print(isinstance(d, Dog))
print(isinstance(d, Animal))
# 所有的对象都是object的实例
print(isinstance(d, object))
# 检查一个类是否为一个类的子类
print(issubclass(Dog, Animal))
print(issubclass(Dog, object))
print(issubclass(Animal, object))
print(issubclass(print, object))
# 方法的重写
class A(object):
def AA(self):
print('AAA')
class B(A):
def AA(self):
print('bbb')
class C(B):
def AA(self):
print('ccc')
c = C()
c.AA()
# super()
class Animal:
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
def run(self):
print('动物会跑~~~')
def sleep(self):
print('动物会睡觉~~~')
# 父类中的所有方法都会被子类继承,包括特殊方法,也可以重写特殊方法。
class Dog(Animal):
def __init__(self, name, age):
# 希望可以直接调用父类的__init__来初始化父类中定义的属性
super().__init__(name)
self._age = age
@property
def age(self):
return self._age
@age.setter
def age(self, age):
self._age = age
d = Dog('小5', 23)
print(d.name)
print(d.age)
# 多重继承
class A(object):
def test(self):
print('AAA')
class B(object):
def test2(self):
print('BBB')
class C(A, B):
pass
c = C()
c.test()
c.test2()
print(A.__bases__)
print(B.__bases__)
print(C.__bases__)
class A(object):
def test(self):
print('AAA')
class B(object):
def test2(self):
print('BBB')
class C(A, B):
pass
c = C()
c.test()
# 多重继承的复杂性
class A(object):
def test(self):
print('这是A的test方法')
class B(object):
def test(self):
print('这是B的test方法')
class C(A, B):
pass
c = C()
c.test()
# 多态
class A:
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
class B:
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
class C:
pass
a = A('孙悟空')
b = B('猪八戒')
# 对于函数say_hello()来说,只要对象中含有name属性,就可以作为参数传递
# 这个函数不会考虑对象的类型,只要有name属性即可
def say_hello(obj):
print('hello,我是%s' % obj.name)
say_hello(a)
say_hello(b)
# 在say_hello2()中做了一个类型检查,也就是只有obj是A类型的对象时,才可以正常使用
# 其他类型的对象都无法使用该函数,这个函数就违反了多态
# 违反了多态的函数,只适用于一种类型的对象,无法处理其他类型对象,这样导致函数的适应性非常差
def say_hello2(obj):
# 类型检查
# 注意:像isinstance()这种函数在开发中一般不会使用,因为这意味着函数可能违反了多态
if isinstance(obj, A):
print('hello,我是%s' % obj.name)
else:
print('此类型对象无法使用该函数')
say_hello2(a)
say_hello2(b)
# 类和方法总结
# 定义一个类
class A:
# 类属性,直接在类中定义的属性是类属性
# 类属性可以通过类或类的实例化对象访问
# 但类属性只能通过类对象修改,无法通过实例化对象修改
count = 0
a = A()
print(a.count)
print(A.count)
a.count = 100
A.count = 10
print(a.count)
print(A.count)
class B:
# 实例属性,通过实例化对象添加的属性属于实例属性
# 实例属性只能通过实例对象来访问和修改,类对象无法访问修改
def __init__(self):
self.name = '孙悟空'
b = B()
# print('B,', B.name) # 报错
print('b:', b.name)
class C:
# 实例方法:在类中定义,以第一个参数的方法都是实例方法
# 实例方法在调用时,Python会将调用对象作为self传入
# 实例方法可以通过实例和类调用。当通过实例调用时,会自动将当前对象作为self传入
# 当通过类调用时,不会自动传递self,此时需要手动传递self
def test(self):
print('这是test方法~~~')
c = C()
c.test()
# 类调用方法时,需要手动传入实例化对象
C.test(c) # 等价于c.test()
# 类方法
class D:
@classmethod
def test(cls):
print('这是一个类方法~~~')
print('类方法', cls)
d = D()
D.test()
d.test()
# 静态方法
class E:
@staticmethod
def test():
print('这是一个静态方法~~~')
e = E()
E.test()
e.test()
"""
| 15.229515 | 105 | 0.556342 | true | true | |
1c2fd423183c48e273f5b744cb621c5dd8b65e84 | 1,751 | py | Python | NasAssesment/manage.py | siddshadab/Django_Assesment | a06ebf73bccd4e83b78391a1f70792cb5979ba8e | [
"MIT"
] | null | null | null | NasAssesment/manage.py | siddshadab/Django_Assesment | a06ebf73bccd4e83b78391a1f70792cb5979ba8e | [
"MIT"
] | null | null | null | NasAssesment/manage.py | siddshadab/Django_Assesment | a06ebf73bccd4e83b78391a1f70792cb5979ba8e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
import sqlite3
from sqlite3 import Error
import environ
from NasAssesment.settings import BASE_DIR
def create_connection(db_file):
conn = None
try:
conn = sqlite3.connect(db_file)
except Error as e:
print(e)
return conn
def update_task(conn,id):
#insert if record not exist
sql = 'INSERT INTO restApi_slotMaster (id) SELECT ' +str(id) + ' WHERE NOT EXISTS (SELECT * FROM restApi_slotMaster WHERE id ='+str(id)+');'
print(id)
print(sql)
cur = conn.cursor()
cur.execute(sql)
conn.commit()
def main():
"""Run administrative tasks."""
database = BASE_DIR / 'db.sqlite3'
print(database)
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'NasAssesment.settings')
conn = create_connection(database)
# Handle for first run
env = environ.Env()
env.read_env(env.str('BASE_DIR', '.env'))
SLOT_NUMBER = env('SLOT_NUMBER')
try:
with conn:
for x in range(int(SLOT_NUMBER)):
#Take range from property file later
update_task(conn,x + 1)
except:
print("An exception occurred On first Time Server Run")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.241935 | 144 | 0.632781 |
import os
import sys
import sqlite3
from sqlite3 import Error
import environ
from NasAssesment.settings import BASE_DIR
def create_connection(db_file):
conn = None
try:
conn = sqlite3.connect(db_file)
except Error as e:
print(e)
return conn
def update_task(conn,id):
sql = 'INSERT INTO restApi_slotMaster (id) SELECT ' +str(id) + ' WHERE NOT EXISTS (SELECT * FROM restApi_slotMaster WHERE id ='+str(id)+');'
print(id)
print(sql)
cur = conn.cursor()
cur.execute(sql)
conn.commit()
def main():
database = BASE_DIR / 'db.sqlite3'
print(database)
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'NasAssesment.settings')
conn = create_connection(database)
env = environ.Env()
env.read_env(env.str('BASE_DIR', '.env'))
SLOT_NUMBER = env('SLOT_NUMBER')
try:
with conn:
for x in range(int(SLOT_NUMBER)):
update_task(conn,x + 1)
except:
print("An exception occurred On first Time Server Run")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| true | true |
1c2fd450d2aa0c4510a90e6e5a7df1a0957949f4 | 1,575 | py | Python | tests/create_images.py | neurodata/ndex | c4d84e3be16de1ff53028d3bb1efd770790759af | [
"Apache-2.0"
] | 4 | 2018-12-03T14:08:35.000Z | 2020-07-24T06:19:10.000Z | tests/create_images.py | neurodata/ndex | c4d84e3be16de1ff53028d3bb1efd770790759af | [
"Apache-2.0"
] | null | null | null | tests/create_images.py | neurodata/ndex | c4d84e3be16de1ff53028d3bb1efd770790759af | [
"Apache-2.0"
] | null | null | null | import math
import os
import numpy as np
import png
import tifffile as tiff
def create_img_file(x_size, y_size, dtype, file_format, img_fname, intensity_range=None):
if intensity_range is None:
bit_width = int(''.join(filter(str.isdigit, dtype)))
else:
bit_width = round(math.log(intensity_range, 2))
ar = np.random.randint(
1, 2**bit_width, size=(y_size, x_size), dtype=dtype)
directory = os.path.dirname(img_fname)
if not os.path.isdir(directory):
os.makedirs(directory)
if file_format == 'tif':
tiff.imsave(img_fname, ar)
elif file_format == 'png':
with open(img_fname, 'wb') as f:
writer = png.Writer(width=x_size, height=y_size,
bitdepth=bit_width, greyscale=True)
writer.write(f, ar.tolist())
def gen_images(ingest_job, intensity_range=None):
for z in range(ingest_job.z_range[0], ingest_job.z_range[1], ingest_job.z_step):
img_fname = ingest_job.get_img_fname(z)
img_size = ingest_job.img_size
if img_size is None:
img_size = [ingest_job.x_extent[1],
ingest_job.y_extent[1],
ingest_job.z_extent[1]]
create_img_file(img_size[0], img_size[1], ingest_job.datatype,
ingest_job.extension, img_fname, intensity_range)
def del_test_images(ingest_job):
for z in range(ingest_job.z_range[0], ingest_job.z_range[1], ingest_job.z_step):
img_fname = ingest_job.get_img_fname(z)
os.remove(img_fname)
| 34.23913 | 89 | 0.641905 | import math
import os
import numpy as np
import png
import tifffile as tiff
def create_img_file(x_size, y_size, dtype, file_format, img_fname, intensity_range=None):
if intensity_range is None:
bit_width = int(''.join(filter(str.isdigit, dtype)))
else:
bit_width = round(math.log(intensity_range, 2))
ar = np.random.randint(
1, 2**bit_width, size=(y_size, x_size), dtype=dtype)
directory = os.path.dirname(img_fname)
if not os.path.isdir(directory):
os.makedirs(directory)
if file_format == 'tif':
tiff.imsave(img_fname, ar)
elif file_format == 'png':
with open(img_fname, 'wb') as f:
writer = png.Writer(width=x_size, height=y_size,
bitdepth=bit_width, greyscale=True)
writer.write(f, ar.tolist())
def gen_images(ingest_job, intensity_range=None):
for z in range(ingest_job.z_range[0], ingest_job.z_range[1], ingest_job.z_step):
img_fname = ingest_job.get_img_fname(z)
img_size = ingest_job.img_size
if img_size is None:
img_size = [ingest_job.x_extent[1],
ingest_job.y_extent[1],
ingest_job.z_extent[1]]
create_img_file(img_size[0], img_size[1], ingest_job.datatype,
ingest_job.extension, img_fname, intensity_range)
def del_test_images(ingest_job):
for z in range(ingest_job.z_range[0], ingest_job.z_range[1], ingest_job.z_step):
img_fname = ingest_job.get_img_fname(z)
os.remove(img_fname)
| true | true |
1c2fd45f0ec11984bef0ea3fcbfb99c970afdb9e | 11,684 | py | Python | client/modules/Calendar.py | archeltaneka/jasper-finalproject | 88151554f0ced1e7e8c592584ccfe1b79493b71f | [
"MIT"
] | 3 | 2019-05-29T15:21:53.000Z | 2022-01-19T12:48:47.000Z | client/modules/Calendar.py | archeltaneka/jasper-finalproject | 88151554f0ced1e7e8c592584ccfe1b79493b71f | [
"MIT"
] | null | null | null | client/modules/Calendar.py | archeltaneka/jasper-finalproject | 88151554f0ced1e7e8c592584ccfe1b79493b71f | [
"MIT"
] | 2 | 2018-09-24T12:54:38.000Z | 2018-10-02T15:04:39.000Z | import httplib2
import sys
import datetime
import re
import gflags
import calendar
import jasperpath
import logging
import requests
from client.app_utils import getTimezone
from dateutil import tz
from dateutil import parser
from dateutil.relativedelta import relativedelta
from apiclient.discovery import build
from oauth2client.file import Storage
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.tools import *
logging.getLogger('googleapiclient.discovery_cache').setLevel(logging.ERROR)
# Written by Marc Poul Joseph Laventure
FLAGS = gflags.FLAGS
WORDS = [ "Calendar", "Events", "Check", "My" ]
# The scope URL for read/write access to a user's calendar data
scope = 'https://www.googleapis.com/auth/calendar'
if bool(re.search('--noauth_local_webserver', str(sys.argv), re.IGNORECASE)):
argv = FLAGS(sys.argv[1])
def convertDateToGoogleStr(timezone, d):
dateStr = timezone.normalize(timezone.localize(d)).astimezone(tz.tzutc()).isoformat('T')
return dateStr
def getStartOfDay( dayOfInterest ):
return datetime.datetime(dayOfInterest.year, dayOfInterest.month, dayOfInterest.day )
def getEndOfDay(dayOfInterest):
return getStartOfDay(dayOfInterest) + datetime.timedelta(days=1, minutes=-1 )
def convertGoogleDateStr( dateStr, tz ):
date = parser.parse(dateStr)
return date.astimezone( tz )
def addEvent(profile, mic, service):
while True:
try:
mic.say("What would you like to add?")
eventData = mic.activeListen()
createdEvent = service.events().quickAdd(calendarId='primary', text=eventData).execute()
mic.say("Added event " + createdEvent['summary'] + " on " + getReadableDateFromEvent(createdEvent, getTimezone(profile)) +
" " + getReadableTimeFromEvent(createdEvent, getTimezone(profile)))
# Create a variable for POST
ev = createdEvent['summary']
dt = getReadableDateFromEvent(createdEvent, getTimezone(profile))
tm = getReadableTimeFromEvent(createdEvent, getTimezone(profile))
mic.say("Is this what you wanted?")
if bool(re.search(r'\bYes\b', mic.activeListen(), re.IGNORECASE)):
mic.say("Okay, it's on your calendar")
# POST request for req here
payload = {'user_id':3, 'event':ev, 'date':dt, 'time':tm}
r = requests.post("http://178.128.62.29/api/schedule/createNew", params=payload)
else:
mic.say("My mistake, english is my second language.")
service.events().delete(calendarId='primary', eventId=createdEvent['id']).execute()
return
except KeyError:
mic.say("Could not add event to your calender; check if internet issue.")
mic.say("Would you like to attempt again?")
responseRedo = mic.activeListen()
if bool(re.search(r'\bNo\b', responseRedo, re.IGNORECASE)):
return
#gets all events today
def getEventsToday(profile, mic, service):
tz = getTimezone(profile)
d = datetime.datetime.now(tz=tz)
getEventsOn(d, tz, mic, "today", service)
#gets all events tomorrow
def getEventsTomorrow(profile, mic, service):
tz = getTimezone(profile)
d = datetime.datetime.now(tz=tz) + datetime.timedelta(days=1)
getEventsOn(d, tz, mic, "tomorrow", service)
#gets all events on the provided next day of week (Monday, Tuesday, etc..)
def getEventsOnNextDayOfWeek(profile, mic, dayOfWeekStr, service ):
tz = getTimezone(profile)
d = datetime.datetime.now(tz=tz)
dayOfWeek = list(calendar.day_name).index(dayOfWeekStr)
if ( dayOfWeek == d.weekday() ):
timediff = datetime.timedelta(days=7)
elif ( dayOfWeek <= d.weekday() ):
timediff = datetime.timedelta(days=(7-dayOfWeek))
else:
timediff = datetime.timedelta(days=(dayOfWeek-d.weekday()))
getEventsOn(d+timediff, tz, mic, "next " + dayOfWeekStr, service)
#gets all events on the provided day
def getEventsOn( day, tz, mic, keyword, service ):
events = queryEvents(convertDateToGoogleStr(tz, getStartOfDay(day)), convertDateToGoogleStr(tz, getEndOfDay(day)), service)
if(len(events) == 0):
mic.say( "You have no events scheduled for " + keyword )
return
sep=""
for event in events:
eventTitle = getSummaryFromEvent(event)
mic.say( sep + eventTitle + getReadableTimeFromEvent(event,tz) )
sep = "and "
#gets all events in the next month that contain keywords
def getEventsBySummary( profile, mic, keyWords, service ):
tz = getTimezone(profile)
today = getStartOfDay(datetime.datetime.now(tz=tz))
oneMonthFromToday = today + relativedelta(months=1)
events = queryEvents(convertDateToGoogleStr(tz, today), convertDateToGoogleStr(tz, oneMonthFromToday), service, keyWords)
if len(events) == 0:
mic.say("You don't have any events like that")
return
sep=""
for event in events:
eventTitle = getSummaryFromEvent(event)
mic.say( sep + " on " + getReadableDateFromEvent(event, tz) + " " + eventTitle + getReadableTimeFromEvent(event, tz) )
sep="and"
#returns a readable title from Google event
def getSummaryFromEvent(event):
if 'summary' in event:
return str(event['summary'])
return "An Event"
#returns a readable date phrase from Google event
def getReadableDateFromEvent(event, tz):
eventRawStartTime = event['start']
if "dateTime" in eventRawStartTime:
date = convertGoogleDateStr(eventRawStartTime['dateTime'], tz)
else:
date = eventRawStartTime['date'].split("-")
date = datetime.datetime(year=int(date[0]), month=int(date[1]), day=int(date[2]), tzinfo=tz)
#if it's with 7 days, say the name of day
if (date - datetime.datetime.now(tz=tz)).days <= 7:
return " next " + calendar.day_name[date.weekday()]
#else return Month, Day Number
return calendar.month_name[date.month] + " " + str(date.day)
#returns a readable time phrase from Google event
def getReadableTimeFromEvent(event, tz):
eventRawStartTime = event['start']
if "dateTime" in eventRawStartTime:
date = convertGoogleDateStr(eventRawStartTime['dateTime'], tz)
startMinute = ":" + str(date.minute)
startHour = date.hour
appendingTime = "am"
if ((date.hour - 12) > 0 ):
startHour = date.hour - 12
appendingTime = "pm"
if date.minute == 0:
startMinute = ""
elif (date.minute < 10):
startMinute = " OH " + str(date.minute)
return " at " + str(startHour) + startMinute + " " + appendingTime
return " all day"
#querys google events, expecting start and end to be already converted to google format
def queryEvents(start, end, service, keyWords=None, ):
page_token = None
myEvents = []
while True:
# Gets events from primary calender from each page in present day boundaries
if not keyWords:
events = service.events().list(calendarId='primary', pageToken=page_token, timeMin=start, timeMax=end, singleEvents=True, orderBy="startTime").execute()
else:
events = service.events().list(calendarId='primary', pageToken=page_token, timeMin=start, timeMax=end, q=keyWords, singleEvents=True, orderBy="startTime").execute()
myEvents.extend(events['items'])
page_token = events.get('nextPageToken')
if not page_token:
break
return myEvents
def handle(text, mic, profile, recursive=False):
print ("****")
if not text and recursive:
mic.say("Okay nevermind then")
if bool(re.search(r'\b(Add|Create|Set)\b', text, re.IGNORECASE)):
addEvent(profile,mic, getService(profile))
elif bool(re.search(r'\bToday\b', text, re.IGNORECASE)):
getEventsToday(profile,mic, getService(profile))
elif bool(re.search(r'\bTomorrow\b', text, re.IGNORECASE)):
getEventsTomorrow(profile,mic, getService(profile))
elif bool(re.search(r'\b(Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday)\b', text, re.IGNORECASE)):
for day in list(calendar.day_name):
if ( re.search(r'\b%s\b' % day, text, re.IGNORECASE) ):
getEventsOnNextDayOfWeek(profile, mic, day, getService(profile))
break;
elif bool(re.search(r'\b(Search)\b', text, re.IGNORECASE)):
if bool(re.search(r'\b(calendar for)\b', text, re.IGNORECASE)):
text = str(text).lower().replace("search calendar for","")
if len(str.strip(text)) > 0:
mic.say("I am searching for " + text)
getEventsBySummary( profile, mic, text, getService(profile) )
return
mic.say("What events would you like to search for?")
getEventsBySummary( profile, mic, mic.activeListen(), getService(profile) )
elif not recursive:
mic.say("Did you want to do something with your calendar?")
handle( mic.activeListen(), mic, profile, True )
else:
mic.say("Okay nevermind then")
def getService(profile):
print ("TESTTEST")
client_id = profile["google_calendar"]["id"]
client_secret = profile["google_calendar"]["secret"]
print ("TEST")
# Create a flow object. This object holds the client_id, client_secret, and
# scope. It assists with OAuth 2.0 steps to get user authorization and
# credentials.
flow = OAuth2WebServerFlow(client_id, client_secret, scope)
# Create a Storage object. This object holds the credentials that your
# application needs to authorize access to the user's data. The name of the
# credentials file is provided. If the file does not exist, it is
# created. This object can only hold credentials for a single user, so
# as-written, this script can only handle a single user.
print( jasperpath.config('calendar/credentials.dat') )
storage = Storage(jasperpath.config('calendar/credentials.dat'))
# storage = Storage('credentials.dat')
# The get() function returns the credentials for the Storage object. If no
# credentials were found, None is returned.
credentials = storage.get()
# If no credentials are found or the credentials are invalid due to
# expiration, new credentials need to be obtained from the authorization
# server. The oauth2client.tools.run_flow() function attempts to open an
# authorization server page in your default web browser. The server
# asks the user to grant your application access to the user's data.
# If the user grants access, the run_flow() function returns new credentials.
# The new credentials are also stored in the supplied Storage object,
# which updates the credentials.dat file.
if credentials is None or credentials.invalid:
credentials = run_flow(flow, storage)
# Create an httplib2.Http object to handle our HTTP requests, and authorize it
# using the credentials.authorize() function.
http = httplib2.Http()
http = credentials.authorize(http)
# The apiclient.discovery.build() function returns an instance of an API service
# object can be used to make API calls. The object is constructed with
# methods specific to the calendar API. The arguments provided are:
# name of the API ('calendar')
# version of the API you are using ('v3')
# authorized httplib2.Http() object that can be used for API calls
return build('calendar', 'v3', http=http)
def isValid(text):
return bool(re.search(r'\bCalendar\b', text, re.IGNORECASE))
| 43.114391 | 176 | 0.676566 | import httplib2
import sys
import datetime
import re
import gflags
import calendar
import jasperpath
import logging
import requests
from client.app_utils import getTimezone
from dateutil import tz
from dateutil import parser
from dateutil.relativedelta import relativedelta
from apiclient.discovery import build
from oauth2client.file import Storage
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.tools import *
logging.getLogger('googleapiclient.discovery_cache').setLevel(logging.ERROR)
FLAGS = gflags.FLAGS
WORDS = [ "Calendar", "Events", "Check", "My" ]
scope = 'https://www.googleapis.com/auth/calendar'
if bool(re.search('--noauth_local_webserver', str(sys.argv), re.IGNORECASE)):
argv = FLAGS(sys.argv[1])
def convertDateToGoogleStr(timezone, d):
dateStr = timezone.normalize(timezone.localize(d)).astimezone(tz.tzutc()).isoformat('T')
return dateStr
def getStartOfDay( dayOfInterest ):
return datetime.datetime(dayOfInterest.year, dayOfInterest.month, dayOfInterest.day )
def getEndOfDay(dayOfInterest):
return getStartOfDay(dayOfInterest) + datetime.timedelta(days=1, minutes=-1 )
def convertGoogleDateStr( dateStr, tz ):
date = parser.parse(dateStr)
return date.astimezone( tz )
def addEvent(profile, mic, service):
while True:
try:
mic.say("What would you like to add?")
eventData = mic.activeListen()
createdEvent = service.events().quickAdd(calendarId='primary', text=eventData).execute()
mic.say("Added event " + createdEvent['summary'] + " on " + getReadableDateFromEvent(createdEvent, getTimezone(profile)) +
" " + getReadableTimeFromEvent(createdEvent, getTimezone(profile)))
# Create a variable for POST
ev = createdEvent['summary']
dt = getReadableDateFromEvent(createdEvent, getTimezone(profile))
tm = getReadableTimeFromEvent(createdEvent, getTimezone(profile))
mic.say("Is this what you wanted?")
if bool(re.search(r'\bYes\b', mic.activeListen(), re.IGNORECASE)):
mic.say("Okay, it's on your calendar")
payload = {'user_id':3, 'event':ev, 'date':dt, 'time':tm}
r = requests.post("http://178.128.62.29/api/schedule/createNew", params=payload)
else:
mic.say("My mistake, english is my second language.")
service.events().delete(calendarId='primary', eventId=createdEvent['id']).execute()
return
except KeyError:
mic.say("Could not add event to your calender; check if internet issue.")
mic.say("Would you like to attempt again?")
responseRedo = mic.activeListen()
if bool(re.search(r'\bNo\b', responseRedo, re.IGNORECASE)):
return
def getEventsToday(profile, mic, service):
tz = getTimezone(profile)
d = datetime.datetime.now(tz=tz)
getEventsOn(d, tz, mic, "today", service)
def getEventsTomorrow(profile, mic, service):
tz = getTimezone(profile)
d = datetime.datetime.now(tz=tz) + datetime.timedelta(days=1)
getEventsOn(d, tz, mic, "tomorrow", service)
def getEventsOnNextDayOfWeek(profile, mic, dayOfWeekStr, service ):
tz = getTimezone(profile)
d = datetime.datetime.now(tz=tz)
dayOfWeek = list(calendar.day_name).index(dayOfWeekStr)
if ( dayOfWeek == d.weekday() ):
timediff = datetime.timedelta(days=7)
elif ( dayOfWeek <= d.weekday() ):
timediff = datetime.timedelta(days=(7-dayOfWeek))
else:
timediff = datetime.timedelta(days=(dayOfWeek-d.weekday()))
getEventsOn(d+timediff, tz, mic, "next " + dayOfWeekStr, service)
def getEventsOn( day, tz, mic, keyword, service ):
events = queryEvents(convertDateToGoogleStr(tz, getStartOfDay(day)), convertDateToGoogleStr(tz, getEndOfDay(day)), service)
if(len(events) == 0):
mic.say( "You have no events scheduled for " + keyword )
return
sep=""
for event in events:
eventTitle = getSummaryFromEvent(event)
mic.say( sep + eventTitle + getReadableTimeFromEvent(event,tz) )
sep = "and "
def getEventsBySummary( profile, mic, keyWords, service ):
tz = getTimezone(profile)
today = getStartOfDay(datetime.datetime.now(tz=tz))
oneMonthFromToday = today + relativedelta(months=1)
events = queryEvents(convertDateToGoogleStr(tz, today), convertDateToGoogleStr(tz, oneMonthFromToday), service, keyWords)
if len(events) == 0:
mic.say("You don't have any events like that")
return
sep=""
for event in events:
eventTitle = getSummaryFromEvent(event)
mic.say( sep + " on " + getReadableDateFromEvent(event, tz) + " " + eventTitle + getReadableTimeFromEvent(event, tz) )
sep="and"
#returns a readable title from Google event
def getSummaryFromEvent(event):
if 'summary' in event:
return str(event['summary'])
return "An Event"
#returns a readable date phrase from Google event
def getReadableDateFromEvent(event, tz):
eventRawStartTime = event['start']
if "dateTime" in eventRawStartTime:
date = convertGoogleDateStr(eventRawStartTime['dateTime'], tz)
else:
date = eventRawStartTime['date'].split("-")
date = datetime.datetime(year=int(date[0]), month=int(date[1]), day=int(date[2]), tzinfo=tz)
#if it's with 7 days, say the name of day
if (date - datetime.datetime.now(tz=tz)).days <= 7:
return " next " + calendar.day_name[date.weekday()]
return calendar.month_name[date.month] + " " + str(date.day)
def getReadableTimeFromEvent(event, tz):
eventRawStartTime = event['start']
if "dateTime" in eventRawStartTime:
date = convertGoogleDateStr(eventRawStartTime['dateTime'], tz)
startMinute = ":" + str(date.minute)
startHour = date.hour
appendingTime = "am"
if ((date.hour - 12) > 0 ):
startHour = date.hour - 12
appendingTime = "pm"
if date.minute == 0:
startMinute = ""
elif (date.minute < 10):
startMinute = " OH " + str(date.minute)
return " at " + str(startHour) + startMinute + " " + appendingTime
return " all day"
def queryEvents(start, end, service, keyWords=None, ):
page_token = None
myEvents = []
while True:
if not keyWords:
events = service.events().list(calendarId='primary', pageToken=page_token, timeMin=start, timeMax=end, singleEvents=True, orderBy="startTime").execute()
else:
events = service.events().list(calendarId='primary', pageToken=page_token, timeMin=start, timeMax=end, q=keyWords, singleEvents=True, orderBy="startTime").execute()
myEvents.extend(events['items'])
page_token = events.get('nextPageToken')
if not page_token:
break
return myEvents
def handle(text, mic, profile, recursive=False):
print ("****")
if not text and recursive:
mic.say("Okay nevermind then")
if bool(re.search(r'\b(Add|Create|Set)\b', text, re.IGNORECASE)):
addEvent(profile,mic, getService(profile))
elif bool(re.search(r'\bToday\b', text, re.IGNORECASE)):
getEventsToday(profile,mic, getService(profile))
elif bool(re.search(r'\bTomorrow\b', text, re.IGNORECASE)):
getEventsTomorrow(profile,mic, getService(profile))
elif bool(re.search(r'\b(Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday)\b', text, re.IGNORECASE)):
for day in list(calendar.day_name):
if ( re.search(r'\b%s\b' % day, text, re.IGNORECASE) ):
getEventsOnNextDayOfWeek(profile, mic, day, getService(profile))
break;
elif bool(re.search(r'\b(Search)\b', text, re.IGNORECASE)):
if bool(re.search(r'\b(calendar for)\b', text, re.IGNORECASE)):
text = str(text).lower().replace("search calendar for","")
if len(str.strip(text)) > 0:
mic.say("I am searching for " + text)
getEventsBySummary( profile, mic, text, getService(profile) )
return
mic.say("What events would you like to search for?")
getEventsBySummary( profile, mic, mic.activeListen(), getService(profile) )
elif not recursive:
mic.say("Did you want to do something with your calendar?")
handle( mic.activeListen(), mic, profile, True )
else:
mic.say("Okay nevermind then")
def getService(profile):
print ("TESTTEST")
client_id = profile["google_calendar"]["id"]
client_secret = profile["google_calendar"]["secret"]
print ("TEST")
flow = OAuth2WebServerFlow(client_id, client_secret, scope)
# credentials file is provided. If the file does not exist, it is
# created. This object can only hold credentials for a single user, so
# as-written, this script can only handle a single user.
print( jasperpath.config('calendar/credentials.dat') )
storage = Storage(jasperpath.config('calendar/credentials.dat'))
# storage = Storage('credentials.dat')
# The get() function returns the credentials for the Storage object. If no
# credentials were found, None is returned.
credentials = storage.get()
# If no credentials are found or the credentials are invalid due to
# expiration, new credentials need to be obtained from the authorization
# server. The oauth2client.tools.run_flow() function attempts to open an
# authorization server page in your default web browser. The server
# asks the user to grant your application access to the user's data.
if credentials is None or credentials.invalid:
credentials = run_flow(flow, storage)
http = httplib2.Http()
http = credentials.authorize(http)
return build('calendar', 'v3', http=http)
def isValid(text):
return bool(re.search(r'\bCalendar\b', text, re.IGNORECASE))
| true | true |
1c2fd5f5966cf7d51a1860d24e47594d7de8d44f | 6,104 | py | Python | tests/unit/modules/test_drbd.py | ifraixedes/saltstack-salt | b54becb8b43cc9b7c00b2c0bc637ac534dc62896 | [
"Apache-2.0"
] | 9,425 | 2015-01-01T05:59:24.000Z | 2022-03-31T20:44:05.000Z | tests/unit/modules/test_drbd.py | ifraixedes/saltstack-salt | b54becb8b43cc9b7c00b2c0bc637ac534dc62896 | [
"Apache-2.0"
] | 33,507 | 2015-01-01T00:19:56.000Z | 2022-03-31T23:48:20.000Z | tests/unit/modules/test_drbd.py | ifraixedes/saltstack-salt | b54becb8b43cc9b7c00b2c0bc637ac534dc62896 | [
"Apache-2.0"
] | 5,810 | 2015-01-01T19:11:45.000Z | 2022-03-31T02:37:20.000Z | """
:codeauthor: Jayesh Kariya <jayeshk@saltstack.com>
"""
import salt.modules.drbd as drbd
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, patch
from tests.support.unit import TestCase
class DrbdTestCase(TestCase, LoaderModuleMockMixin):
"""
Test cases for salt.modules.drbd
"""
def setup_loader_modules(self):
return {drbd: {}}
# 'overview' function tests: 1
def test_overview(self):
"""
Test if it shows status of the DRBD devices
"""
ret = {
"connection state": "True",
"device": "Stack",
"fs": "None",
"local disk state": "UpToDate",
"local role": "master",
"minor number": "Salt",
"mountpoint": "True",
"partner disk state": "UpToDate",
"partner role": "minion",
"percent": "888",
"remains": "666",
"total size": "50",
"used": "50",
}
mock = MagicMock(
return_value=(
"Salt:Stack True master/minion UpToDate/UpToDate True None 50 50 666 888"
)
)
with patch.dict(drbd.__salt__, {"cmd.run": mock}):
self.assertDictEqual(drbd.overview(), ret)
ret = {
"connection state": "True",
"device": "Stack",
"local disk state": "UpToDate",
"local role": "master",
"minor number": "Salt",
"partner disk state": "partner",
"partner role": "minion",
"synched": "5050",
"synchronisation: ": "syncbar",
}
mock = MagicMock(
return_value=(
"Salt:Stack True master/minion UpToDate/partner syncbar None 50 50"
)
)
with patch.dict(drbd.__salt__, {"cmd.run": mock}):
self.assertDictEqual(drbd.overview(), ret)
def test_status(self):
"""
Test if it shows status of the DRBD resources via drbdadm
"""
ret = [
{
"local role": "Primary",
"local volumes": [{"disk": "UpToDate"}],
"peer nodes": [
{
"peer volumes": [
{
"done": "96.47",
"peer-disk": "Inconsistent",
"replication": "SyncSource",
}
],
"peernode name": "opensuse-node2",
"role": "Secondary",
}
],
"resource name": "single",
}
]
mock = MagicMock(
return_value="""
single role:Primary
disk:UpToDate
opensuse-node2 role:Secondary
replication:SyncSource peer-disk:Inconsistent done:96.47
"""
)
with patch.dict(drbd.__salt__, {"cmd.run": mock}):
try: # python2
self.assertItemsEqual(drbd.status(), ret)
except AttributeError: # python3
self.assertCountEqual(drbd.status(), ret)
ret = [
{
"local role": "Primary",
"local volumes": [
{"disk": "UpToDate", "volume": "0"},
{"disk": "UpToDate", "volume": "1"},
],
"peer nodes": [
{
"peer volumes": [
{"peer-disk": "UpToDate", "volume": "0"},
{"peer-disk": "UpToDate", "volume": "1"},
],
"peernode name": "node2",
"role": "Secondary",
},
{
"peer volumes": [
{"peer-disk": "UpToDate", "volume": "0"},
{"peer-disk": "UpToDate", "volume": "1"},
],
"peernode name": "node3",
"role": "Secondary",
},
],
"resource name": "test",
},
{
"local role": "Primary",
"local volumes": [
{"disk": "UpToDate", "volume": "0"},
{"disk": "UpToDate", "volume": "1"},
],
"peer nodes": [
{
"peer volumes": [
{"peer-disk": "UpToDate", "volume": "0"},
{"peer-disk": "UpToDate", "volume": "1"},
],
"peernode name": "node2",
"role": "Secondary",
},
{
"peer volumes": [
{"peer-disk": "UpToDate", "volume": "0"},
{"peer-disk": "UpToDate", "volume": "1"},
],
"peernode name": "node3",
"role": "Secondary",
},
],
"resource name": "res",
},
]
mock = MagicMock(
return_value="""
res role:Primary
volume:0 disk:UpToDate
volume:1 disk:UpToDate
node2 role:Secondary
volume:0 peer-disk:UpToDate
volume:1 peer-disk:UpToDate
node3 role:Secondary
volume:0 peer-disk:UpToDate
volume:1 peer-disk:UpToDate
test role:Primary
volume:0 disk:UpToDate
volume:1 disk:UpToDate
node2 role:Secondary
volume:0 peer-disk:UpToDate
volume:1 peer-disk:UpToDate
node3 role:Secondary
volume:0 peer-disk:UpToDate
volume:1 peer-disk:UpToDate
"""
)
with patch.dict(drbd.__salt__, {"cmd.run": mock}):
try: # python2
self.assertItemsEqual(drbd.status(), ret)
except AttributeError: # python3
self.assertCountEqual(drbd.status(), ret)
| 31.791667 | 89 | 0.422182 |
import salt.modules.drbd as drbd
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, patch
from tests.support.unit import TestCase
class DrbdTestCase(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
return {drbd: {}}
def test_overview(self):
ret = {
"connection state": "True",
"device": "Stack",
"fs": "None",
"local disk state": "UpToDate",
"local role": "master",
"minor number": "Salt",
"mountpoint": "True",
"partner disk state": "UpToDate",
"partner role": "minion",
"percent": "888",
"remains": "666",
"total size": "50",
"used": "50",
}
mock = MagicMock(
return_value=(
"Salt:Stack True master/minion UpToDate/UpToDate True None 50 50 666 888"
)
)
with patch.dict(drbd.__salt__, {"cmd.run": mock}):
self.assertDictEqual(drbd.overview(), ret)
ret = {
"connection state": "True",
"device": "Stack",
"local disk state": "UpToDate",
"local role": "master",
"minor number": "Salt",
"partner disk state": "partner",
"partner role": "minion",
"synched": "5050",
"synchronisation: ": "syncbar",
}
mock = MagicMock(
return_value=(
"Salt:Stack True master/minion UpToDate/partner syncbar None 50 50"
)
)
with patch.dict(drbd.__salt__, {"cmd.run": mock}):
self.assertDictEqual(drbd.overview(), ret)
def test_status(self):
ret = [
{
"local role": "Primary",
"local volumes": [{"disk": "UpToDate"}],
"peer nodes": [
{
"peer volumes": [
{
"done": "96.47",
"peer-disk": "Inconsistent",
"replication": "SyncSource",
}
],
"peernode name": "opensuse-node2",
"role": "Secondary",
}
],
"resource name": "single",
}
]
mock = MagicMock(
return_value="""
single role:Primary
disk:UpToDate
opensuse-node2 role:Secondary
replication:SyncSource peer-disk:Inconsistent done:96.47
"""
)
with patch.dict(drbd.__salt__, {"cmd.run": mock}):
try:
self.assertItemsEqual(drbd.status(), ret)
except AttributeError:
self.assertCountEqual(drbd.status(), ret)
ret = [
{
"local role": "Primary",
"local volumes": [
{"disk": "UpToDate", "volume": "0"},
{"disk": "UpToDate", "volume": "1"},
],
"peer nodes": [
{
"peer volumes": [
{"peer-disk": "UpToDate", "volume": "0"},
{"peer-disk": "UpToDate", "volume": "1"},
],
"peernode name": "node2",
"role": "Secondary",
},
{
"peer volumes": [
{"peer-disk": "UpToDate", "volume": "0"},
{"peer-disk": "UpToDate", "volume": "1"},
],
"peernode name": "node3",
"role": "Secondary",
},
],
"resource name": "test",
},
{
"local role": "Primary",
"local volumes": [
{"disk": "UpToDate", "volume": "0"},
{"disk": "UpToDate", "volume": "1"},
],
"peer nodes": [
{
"peer volumes": [
{"peer-disk": "UpToDate", "volume": "0"},
{"peer-disk": "UpToDate", "volume": "1"},
],
"peernode name": "node2",
"role": "Secondary",
},
{
"peer volumes": [
{"peer-disk": "UpToDate", "volume": "0"},
{"peer-disk": "UpToDate", "volume": "1"},
],
"peernode name": "node3",
"role": "Secondary",
},
],
"resource name": "res",
},
]
mock = MagicMock(
return_value="""
res role:Primary
volume:0 disk:UpToDate
volume:1 disk:UpToDate
node2 role:Secondary
volume:0 peer-disk:UpToDate
volume:1 peer-disk:UpToDate
node3 role:Secondary
volume:0 peer-disk:UpToDate
volume:1 peer-disk:UpToDate
test role:Primary
volume:0 disk:UpToDate
volume:1 disk:UpToDate
node2 role:Secondary
volume:0 peer-disk:UpToDate
volume:1 peer-disk:UpToDate
node3 role:Secondary
volume:0 peer-disk:UpToDate
volume:1 peer-disk:UpToDate
"""
)
with patch.dict(drbd.__salt__, {"cmd.run": mock}):
try:
self.assertItemsEqual(drbd.status(), ret)
except AttributeError:
self.assertCountEqual(drbd.status(), ret)
| true | true |
1c2fd66b33bfdead4ee11a93556cf890ac8cb385 | 214 | py | Python | peruintercorp/peruintercorp/doctype/proyectos/test_proyectos.py | aaguirrek/pii-peruintercorp | 027d4c5f1fb79a1b16937bcf0938c4739f26b52a | [
"MIT"
] | null | null | null | peruintercorp/peruintercorp/doctype/proyectos/test_proyectos.py | aaguirrek/pii-peruintercorp | 027d4c5f1fb79a1b16937bcf0938c4739f26b52a | [
"MIT"
] | null | null | null | peruintercorp/peruintercorp/doctype/proyectos/test_proyectos.py | aaguirrek/pii-peruintercorp | 027d4c5f1fb79a1b16937bcf0938c4739f26b52a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Peru Intercorp and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestProyectos(unittest.TestCase):
pass
| 19.454545 | 53 | 0.771028 |
from __future__ import unicode_literals
import frappe
import unittest
class TestProyectos(unittest.TestCase):
pass
| true | true |
1c2fd86a3f1225beeae650437858e61c423f2ef8 | 1,143 | py | Python | test/pyaz/postgres/flexible_server/deploy/__init__.py | bigdatamoore/py-az-cli | 54383a4ee7cc77556f6183e74e992eec95b28e01 | [
"MIT"
] | null | null | null | test/pyaz/postgres/flexible_server/deploy/__init__.py | bigdatamoore/py-az-cli | 54383a4ee7cc77556f6183e74e992eec95b28e01 | [
"MIT"
] | 9 | 2021-09-24T16:37:24.000Z | 2021-12-24T00:39:19.000Z | test/pyaz/postgres/flexible_server/deploy/__init__.py | bigdatamoore/py-az-cli | 54383a4ee7cc77556f6183e74e992eec95b28e01 | [
"MIT"
] | null | null | null | import json, subprocess
from .... pyaz_utils import get_cli_name, get_params
def setup(resource_group, server_name, database_name, admin_user, admin_password, sql_file, repo, action_name=None, branch=None, allow_push=None):
params = get_params(locals())
command = "az postgres flexible-server deploy setup " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def run(action_name, branch):
params = get_params(locals())
command = "az postgres flexible-server deploy run " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
| 35.71875 | 146 | 0.682415 | import json, subprocess
from .... pyaz_utils import get_cli_name, get_params
def setup(resource_group, server_name, database_name, admin_user, admin_password, sql_file, repo, action_name=None, branch=None, allow_push=None):
params = get_params(locals())
command = "az postgres flexible-server deploy setup " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def run(action_name, branch):
params = get_params(locals())
command = "az postgres flexible-server deploy run " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
| true | true |
1c2fd86c5a584ac6d9a5926e64b58842e9791db0 | 977 | py | Python | general_itests/steps/shared_steps.py | sobolevn/paasta | 8b87e0b13816c09b3d063b6d3271e6c7627fd264 | [
"Apache-2.0"
] | 1,711 | 2015-11-10T18:04:56.000Z | 2022-03-23T08:53:16.000Z | general_itests/steps/shared_steps.py | sobolevn/paasta | 8b87e0b13816c09b3d063b6d3271e6c7627fd264 | [
"Apache-2.0"
] | 1,689 | 2015-11-10T17:59:04.000Z | 2022-03-31T20:46:46.000Z | general_itests/steps/shared_steps.py | sobolevn/paasta | 8b87e0b13816c09b3d063b6d3271e6c7627fd264 | [
"Apache-2.0"
] | 267 | 2015-11-10T19:17:16.000Z | 2022-02-08T20:59:52.000Z | # Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from behave import then
@then('it should have a return code of "{code:d}"')
def see_expected_return_code(context, code):
print(context.output)
print(context.return_code)
print()
assert context.return_code == code
@then('the output should contain "{output_string}"')
def output_contains(context, output_string):
print(output_string)
assert output_string in context.output
| 33.689655 | 74 | 0.752303 |
from behave import then
@then('it should have a return code of "{code:d}"')
def see_expected_return_code(context, code):
print(context.output)
print(context.return_code)
print()
assert context.return_code == code
@then('the output should contain "{output_string}"')
def output_contains(context, output_string):
print(output_string)
assert output_string in context.output
| true | true |
1c2fd9a22e506269dd9c789c4afcef4614f97997 | 328 | py | Python | test/filter_lol_test.py | zhenggc1/guietta | 2eb78b7d0a30d145a248c6eac27cab2bb907d64c | [
"MIT"
] | 1 | 2020-07-22T17:30:10.000Z | 2020-07-22T17:30:10.000Z | test/filter_lol_test.py | zhenggc1/guietta | 2eb78b7d0a30d145a248c6eac27cab2bb907d64c | [
"MIT"
] | null | null | null | test/filter_lol_test.py | zhenggc1/guietta | 2eb78b7d0a30d145a248c6eac27cab2bb907d64c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import unittest
from guietta.guietta import _filter_lol
class FilterLolTest(unittest.TestCase):
def test_filter_lol(self):
lol = [[1, 3, 10], [3.14, 0, -2]]
def func(x):
return x * 2
_filter_lol(lol, func)
assert lol == [[2, 6, 20], [6.28, 0, -4]]
| 17.263158 | 49 | 0.545732 |
import unittest
from guietta.guietta import _filter_lol
class FilterLolTest(unittest.TestCase):
def test_filter_lol(self):
lol = [[1, 3, 10], [3.14, 0, -2]]
def func(x):
return x * 2
_filter_lol(lol, func)
assert lol == [[2, 6, 20], [6.28, 0, -4]]
| true | true |
1c2fda6177765b7906214bb4b8231a55632b2a0e | 22,030 | py | Python | tests/test_s3boto3.py | danielholmes/django-storages | 45d8235ebd62da29bcca6b1e012a143009b2fb0c | [
"BSD-3-Clause"
] | null | null | null | tests/test_s3boto3.py | danielholmes/django-storages | 45d8235ebd62da29bcca6b1e012a143009b2fb0c | [
"BSD-3-Clause"
] | null | null | null | tests/test_s3boto3.py | danielholmes/django-storages | 45d8235ebd62da29bcca6b1e012a143009b2fb0c | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import gzip
import pickle
import threading
import warnings
from datetime import datetime
from unittest import skipIf
from botocore.exceptions import ClientError
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.files.base import ContentFile
from django.test import TestCase, override_settings
from django.utils.six.moves.urllib import parse as urlparse
from django.utils.timezone import is_aware, utc
from storages.backends import s3boto3
try:
from unittest import mock
except ImportError: # Python 3.2 and below
import mock
class S3Boto3TestCase(TestCase):
def setUp(self):
self.storage = s3boto3.S3Boto3Storage()
self.storage._connections.connection = mock.MagicMock()
class S3Boto3StorageTests(S3Boto3TestCase):
def test_clean_name(self):
"""
Test the base case of _clean_name
"""
path = self.storage._clean_name("path/to/somewhere")
self.assertEqual(path, "path/to/somewhere")
def test_clean_name_normalize(self):
"""
Test the normalization of _clean_name
"""
path = self.storage._clean_name("path/to/../somewhere")
self.assertEqual(path, "path/somewhere")
def test_clean_name_trailing_slash(self):
"""
Test the _clean_name when the path has a trailing slash
"""
path = self.storage._clean_name("path/to/somewhere/")
self.assertEqual(path, "path/to/somewhere/")
def test_clean_name_windows(self):
"""
Test the _clean_name when the path has a trailing slash
"""
path = self.storage._clean_name("path\\to\\somewhere")
self.assertEqual(path, "path/to/somewhere")
def test_pickle_with_bucket(self):
"""
Test that the storage can be pickled with a bucket attached
"""
# Ensure the bucket has been used
self.storage.bucket
self.assertIsNotNone(self.storage._bucket)
# Can't pickle MagicMock, but you can't pickle a real Bucket object either
p = pickle.dumps(self.storage)
new_storage = pickle.loads(p)
self.assertIsInstance(new_storage._connections, threading.local)
# Put the mock connection back in
new_storage._connections.connection = mock.MagicMock()
self.assertIsNone(new_storage._bucket)
new_storage.bucket
self.assertIsNotNone(new_storage._bucket)
def test_pickle_without_bucket(self):
"""
Test that the storage can be pickled, without a bucket instance
"""
# Can't pickle a threadlocal
p = pickle.dumps(self.storage)
new_storage = pickle.loads(p)
self.assertIsInstance(new_storage._connections, threading.local)
def test_storage_url_slashes(self):
"""
Test URL generation.
"""
self.storage.custom_domain = 'example.com'
# We expect no leading slashes in the path,
# and trailing slashes should be preserved.
self.assertEqual(self.storage.url(''), 'https://example.com/')
self.assertEqual(self.storage.url('path'), 'https://example.com/path')
self.assertEqual(self.storage.url('path/'), 'https://example.com/path/')
self.assertEqual(self.storage.url('path/1'), 'https://example.com/path/1')
self.assertEqual(self.storage.url('path/1/'), 'https://example.com/path/1/')
def test_storage_save(self):
"""
Test saving a file
"""
name = 'test_storage_save.txt'
content = ContentFile('new content')
self.storage.save(name, content)
self.storage.bucket.Object.assert_called_once_with(name)
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
content.file,
ExtraArgs={
'ContentType': 'text/plain',
'ACL': self.storage.default_acl,
}
)
def test_storage_save_with_acl(self):
"""
Test saving a file with user defined ACL.
"""
name = 'test_storage_save.txt'
content = ContentFile('new content')
self.storage.default_acl = 'private'
self.storage.save(name, content)
self.storage.bucket.Object.assert_called_once_with(name)
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
content.file,
ExtraArgs={
'ContentType': 'text/plain',
'ACL': 'private',
}
)
def test_content_type(self):
"""
Test saving a file with a None content type.
"""
name = 'test_image.jpg'
content = ContentFile('data')
content.content_type = None
self.storage.save(name, content)
self.storage.bucket.Object.assert_called_once_with(name)
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
content.file,
ExtraArgs={
'ContentType': 'image/jpeg',
'ACL': self.storage.default_acl,
}
)
def test_storage_save_gzipped(self):
"""
Test saving a gzipped file
"""
name = 'test_storage_save.gz'
content = ContentFile("I am gzip'd")
self.storage.save(name, content)
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
content.file,
ExtraArgs={
'ContentType': 'application/octet-stream',
'ContentEncoding': 'gzip',
'ACL': self.storage.default_acl,
}
)
def test_storage_save_gzip(self):
"""
Test saving a file with gzip enabled.
"""
self.storage.gzip = True
name = 'test_storage_save.css'
content = ContentFile("I should be gzip'd")
self.storage.save(name, content)
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
mock.ANY,
ExtraArgs={
'ContentType': 'text/css',
'ContentEncoding': 'gzip',
'ACL': self.storage.default_acl,
}
)
args, kwargs = obj.upload_fileobj.call_args
content = args[0]
zfile = gzip.GzipFile(mode='rb', fileobj=content)
self.assertEqual(zfile.read(), b"I should be gzip'd")
def test_storage_save_gzip_twice(self):
"""
Test saving the same file content twice with gzip enabled.
"""
# Given
self.storage.gzip = True
name = 'test_storage_save.css'
content = ContentFile("I should be gzip'd")
# When
self.storage.save(name, content)
self.storage.save('test_storage_save_2.css', content)
# Then
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
mock.ANY,
ExtraArgs={
'ContentType': 'text/css',
'ContentEncoding': 'gzip',
'ACL': self.storage.default_acl,
}
)
args, kwargs = obj.upload_fileobj.call_args
content = args[0]
zfile = gzip.GzipFile(mode='rb', fileobj=content)
self.assertEqual(zfile.read(), b"I should be gzip'd")
def test_compress_content_len(self):
"""
Test that file returned by _compress_content() is readable.
"""
self.storage.gzip = True
content = ContentFile("I should be gzip'd")
content = self.storage._compress_content(content)
self.assertTrue(len(content.read()) > 0)
def test_storage_open_write(self):
"""
Test opening a file in write mode
"""
name = 'test_open_for_writïng.txt'
content = 'new content'
# Set the encryption flag used for multipart uploads
self.storage.encryption = True
self.storage.reduced_redundancy = True
self.storage.default_acl = 'public-read'
file = self.storage.open(name, 'w')
self.storage.bucket.Object.assert_called_with(name)
obj = self.storage.bucket.Object.return_value
# Set the name of the mock object
obj.key = name
file.write(content)
obj.initiate_multipart_upload.assert_called_with(
ACL='public-read',
ContentType='text/plain',
ServerSideEncryption='AES256',
StorageClass='REDUCED_REDUNDANCY'
)
# Save the internal file before closing
multipart = obj.initiate_multipart_upload.return_value
multipart.parts.all.return_value = [mock.MagicMock(e_tag='123', part_number=1)]
file.close()
multipart.Part.assert_called_with(1)
part = multipart.Part.return_value
part.upload.assert_called_with(Body=content.encode('utf-8'))
multipart.complete.assert_called_once_with(
MultipartUpload={'Parts': [{'ETag': '123', 'PartNumber': 1}]})
def test_storage_write_beyond_buffer_size(self):
"""
Test writing content that exceeds the buffer size
"""
name = 'test_open_for_writïng_beyond_buffer_size.txt'
# Set the encryption flag used for multipart uploads
self.storage.encryption = True
self.storage.reduced_redundancy = True
self.storage.default_acl = 'public-read'
file = self.storage.open(name, 'w')
self.storage.bucket.Object.assert_called_with(name)
obj = self.storage.bucket.Object.return_value
# Set the name of the mock object
obj.key = name
# Initiate the multipart upload
file.write('')
obj.initiate_multipart_upload.assert_called_with(
ACL='public-read',
ContentType='text/plain',
ServerSideEncryption='AES256',
StorageClass='REDUCED_REDUNDANCY'
)
multipart = obj.initiate_multipart_upload.return_value
# Write content at least twice as long as the buffer size
written_content = ''
counter = 1
while len(written_content) < 2 * file.buffer_size:
content = 'hello, aws {counter}\n'.format(counter=counter)
# Write more than just a few bytes in each iteration to keep the
# test reasonably fast
content += '*' * int(file.buffer_size / 10)
file.write(content)
written_content += content
counter += 1
# Save the internal file before closing
multipart.parts.all.return_value = [
mock.MagicMock(e_tag='123', part_number=1),
mock.MagicMock(e_tag='456', part_number=2)
]
file.close()
self.assertListEqual(
multipart.Part.call_args_list,
[mock.call(1), mock.call(2)]
)
part = multipart.Part.return_value
uploaded_content = ''.join(
(args_list[1]['Body'].decode('utf-8')
for args_list in part.upload.call_args_list)
)
self.assertEqual(uploaded_content, written_content)
multipart.complete.assert_called_once_with(
MultipartUpload={'Parts': [
{'ETag': '123', 'PartNumber': 1},
{'ETag': '456', 'PartNumber': 2},
]}
)
def test_auto_creating_bucket(self):
self.storage.auto_create_bucket = True
Bucket = mock.MagicMock()
self.storage._connections.connection.Bucket.return_value = Bucket
self.storage._connections.connection.meta.client.meta.region_name = 'sa-east-1'
Bucket.meta.client.head_bucket.side_effect = ClientError({'Error': {},
'ResponseMetadata': {'HTTPStatusCode': 404}},
'head_bucket')
self.storage._get_or_create_bucket('testbucketname')
Bucket.create.assert_called_once_with(
ACL='public-read',
CreateBucketConfiguration={
'LocationConstraint': 'sa-east-1',
}
)
def test_auto_creating_bucket_with_acl(self):
self.storage.auto_create_bucket = True
self.storage.bucket_acl = 'public-read'
Bucket = mock.MagicMock()
self.storage._connections.connection.Bucket.return_value = Bucket
self.storage._connections.connection.meta.client.meta.region_name = 'sa-east-1'
Bucket.meta.client.head_bucket.side_effect = ClientError({'Error': {},
'ResponseMetadata': {'HTTPStatusCode': 404}},
'head_bucket')
self.storage._get_or_create_bucket('testbucketname')
Bucket.create.assert_called_once_with(
ACL='public-read',
CreateBucketConfiguration={
'LocationConstraint': 'sa-east-1',
}
)
def test_storage_exists(self):
self.assertTrue(self.storage.exists("file.txt"))
self.storage.connection.meta.client.head_object.assert_called_with(
Bucket=self.storage.bucket_name,
Key="file.txt",
)
def test_storage_exists_false(self):
self.storage.connection.meta.client.head_object.side_effect = ClientError(
{'Error': {'Code': '404', 'Message': 'Not Found'}},
'HeadObject',
)
self.assertFalse(self.storage.exists("file.txt"))
self.storage.connection.meta.client.head_object.assert_called_with(
Bucket=self.storage.bucket_name,
Key='file.txt',
)
def test_storage_exists_doesnt_create_bucket(self):
with mock.patch.object(self.storage, '_get_or_create_bucket') as method:
self.storage.exists('file.txt')
self.assertFalse(method.called)
def test_storage_delete(self):
self.storage.delete("path/to/file.txt")
self.storage.bucket.Object.assert_called_with('path/to/file.txt')
self.storage.bucket.Object.return_value.delete.assert_called_with()
def test_storage_listdir_base(self):
# Files:
# some/path/1.txt
# 2.txt
# other/path/3.txt
# 4.txt
pages = [
{
'CommonPrefixes': [
{'Prefix': 'some'},
{'Prefix': 'other'},
],
'Contents': [
{'Key': '2.txt'},
{'Key': '4.txt'},
],
},
]
paginator = mock.MagicMock()
paginator.paginate.return_value = pages
self.storage._connections.connection.meta.client.get_paginator.return_value = paginator
dirs, files = self.storage.listdir('')
paginator.paginate.assert_called_with(Bucket=None, Delimiter='/', Prefix='')
self.assertEqual(dirs, ['some', 'other'])
self.assertEqual(files, ['2.txt', '4.txt'])
def test_storage_listdir_subdir(self):
# Files:
# some/path/1.txt
# some/2.txt
pages = [
{
'CommonPrefixes': [
{'Prefix': 'some/path'},
],
'Contents': [
{'Key': 'some/2.txt'},
],
},
]
paginator = mock.MagicMock()
paginator.paginate.return_value = pages
self.storage._connections.connection.meta.client.get_paginator.return_value = paginator
dirs, files = self.storage.listdir('some/')
paginator.paginate.assert_called_with(Bucket=None, Delimiter='/', Prefix='some/')
self.assertEqual(dirs, ['path'])
self.assertEqual(files, ['2.txt'])
def test_storage_size(self):
obj = self.storage.bucket.Object.return_value
obj.content_length = 4098
name = 'file.txt'
self.assertEqual(self.storage.size(name), obj.content_length)
def test_storage_mtime(self):
# Test both USE_TZ cases
for use_tz in (True, False):
with self.settings(USE_TZ=use_tz):
self._test_storage_mtime(use_tz)
def _test_storage_mtime(self, use_tz):
obj = self.storage.bucket.Object.return_value
obj.last_modified = datetime.now(utc)
name = 'file.txt'
self.assertFalse(
is_aware(self.storage.modified_time(name)),
'Naive datetime object expected from modified_time()'
)
self.assertIs(
settings.USE_TZ,
is_aware(self.storage.get_modified_time(name)),
'%s datetime object expected from get_modified_time() when USE_TZ=%s' % (
('Naive', 'Aware')[settings.USE_TZ],
settings.USE_TZ
)
)
def test_storage_url(self):
name = 'test_storage_size.txt'
url = 'http://aws.amazon.com/%s' % name
self.storage.bucket.meta.client.generate_presigned_url.return_value = url
self.storage.bucket.name = 'bucket'
self.assertEqual(self.storage.url(name), url)
self.storage.bucket.meta.client.generate_presigned_url.assert_called_with(
'get_object',
Params={'Bucket': self.storage.bucket.name, 'Key': name},
ExpiresIn=self.storage.querystring_expire
)
custom_expire = 123
self.assertEqual(self.storage.url(name, expire=custom_expire), url)
self.storage.bucket.meta.client.generate_presigned_url.assert_called_with(
'get_object',
Params={'Bucket': self.storage.bucket.name, 'Key': name},
ExpiresIn=custom_expire
)
def test_generated_url_is_encoded(self):
self.storage.custom_domain = "mock.cloudfront.net"
filename = "whacky & filename'.mp4"
url = self.storage.url(filename)
parsed_url = urlparse.urlparse(url)
self.assertEqual(parsed_url.path,
"/whacky%20%26%20filename%27.mp4")
self.assertFalse(self.storage.bucket.meta.client.generate_presigned_url.called)
def test_special_characters(self):
self.storage.custom_domain = "mock.cloudfront.net"
name = "ãlöhâ.jpg"
content = ContentFile('new content')
self.storage.save(name, content)
self.storage.bucket.Object.assert_called_once_with(name)
url = self.storage.url(name)
parsed_url = urlparse.urlparse(url)
self.assertEqual(parsed_url.path, "/%C3%A3l%C3%B6h%C3%A2.jpg")
def test_strip_signing_parameters(self):
expected = 'http://bucket.s3-aws-region.amazonaws.com/foo/bar'
self.assertEqual(self.storage._strip_signing_parameters(
'%s?X-Amz-Date=12345678&X-Amz-Signature=Signature' % expected), expected)
self.assertEqual(self.storage._strip_signing_parameters(
'%s?expires=12345678&signature=Signature' % expected), expected)
@skipIf(threading is None, 'Test requires threading')
def test_connection_threading(self):
connections = []
def thread_storage_connection():
connections.append(self.storage.connection)
for x in range(2):
t = threading.Thread(target=thread_storage_connection)
t.start()
t.join()
# Connection for each thread needs to be unique
self.assertIsNot(connections[0], connections[1])
def test_location_leading_slash(self):
msg = (
"S3Boto3Storage.location cannot begin with a leading slash. "
"Found '/'. Use '' instead."
)
with self.assertRaises(ImproperlyConfigured, msg=msg):
s3boto3.S3Boto3Storage(location='/')
def test_deprecated_acl(self):
with override_settings(AWS_DEFAULT_ACL=None), warnings.catch_warnings(record=True) as w:
s3boto3.S3Boto3Storage(acl='private')
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
message = (
"The acl argument of S3Boto3Storage is deprecated. Use argument "
"default_acl or setting AWS_DEFAULT_ACL instead. The acl argument "
"will be removed in version 2.0."
)
assert str(w[-1].message) == message
def test_deprecated_bucket(self):
with override_settings(AWS_DEFAULT_ACL=None), warnings.catch_warnings(record=True) as w:
s3boto3.S3Boto3Storage(bucket='django')
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
message = (
"The bucket argument of S3Boto3Storage is deprecated. Use argument "
"bucket_name or setting AWS_STORAGE_BUCKET_NAME instead. The bucket "
"argument will be removed in version 2.0."
)
assert str(w[-1].message) == message
def test_deprecated_default_acl(self):
with warnings.catch_warnings(record=True) as w:
s3boto3.S3Boto3Storage()
assert len(w) == 1
message = (
"The default behavior of S3Boto3Storage is insecure and will change "
"in django-storages 2.0. By default files and new buckets are saved "
"with an ACL of 'public-read' (globally publicly readable). Version 2.0 will "
"default to using the bucket's ACL. To opt into the new behavior set "
"AWS_DEFAULT_ACL = None, otherwise to silence this warning explicitly "
"set AWS_DEFAULT_ACL."
)
assert str(w[-1].message) == message
def test_deprecated_default_acl_override_class_variable(self):
class MyStorage(s3boto3.S3Boto3Storage):
default_acl = "private"
with warnings.catch_warnings(record=True) as w:
MyStorage()
assert len(w) == 0
| 36.47351 | 111 | 0.607354 |
from __future__ import unicode_literals
import gzip
import pickle
import threading
import warnings
from datetime import datetime
from unittest import skipIf
from botocore.exceptions import ClientError
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.files.base import ContentFile
from django.test import TestCase, override_settings
from django.utils.six.moves.urllib import parse as urlparse
from django.utils.timezone import is_aware, utc
from storages.backends import s3boto3
try:
from unittest import mock
except ImportError:
import mock
class S3Boto3TestCase(TestCase):
def setUp(self):
self.storage = s3boto3.S3Boto3Storage()
self.storage._connections.connection = mock.MagicMock()
class S3Boto3StorageTests(S3Boto3TestCase):
def test_clean_name(self):
path = self.storage._clean_name("path/to/somewhere")
self.assertEqual(path, "path/to/somewhere")
def test_clean_name_normalize(self):
path = self.storage._clean_name("path/to/../somewhere")
self.assertEqual(path, "path/somewhere")
def test_clean_name_trailing_slash(self):
path = self.storage._clean_name("path/to/somewhere/")
self.assertEqual(path, "path/to/somewhere/")
def test_clean_name_windows(self):
path = self.storage._clean_name("path\\to\\somewhere")
self.assertEqual(path, "path/to/somewhere")
def test_pickle_with_bucket(self):
self.storage.bucket
self.assertIsNotNone(self.storage._bucket)
p = pickle.dumps(self.storage)
new_storage = pickle.loads(p)
self.assertIsInstance(new_storage._connections, threading.local)
new_storage._connections.connection = mock.MagicMock()
self.assertIsNone(new_storage._bucket)
new_storage.bucket
self.assertIsNotNone(new_storage._bucket)
def test_pickle_without_bucket(self):
p = pickle.dumps(self.storage)
new_storage = pickle.loads(p)
self.assertIsInstance(new_storage._connections, threading.local)
def test_storage_url_slashes(self):
self.storage.custom_domain = 'example.com'
# We expect no leading slashes in the path,
# and trailing slashes should be preserved.
self.assertEqual(self.storage.url(''), 'https://example.com/')
self.assertEqual(self.storage.url('path'), 'https://example.com/path')
self.assertEqual(self.storage.url('path/'), 'https://example.com/path/')
self.assertEqual(self.storage.url('path/1'), 'https://example.com/path/1')
self.assertEqual(self.storage.url('path/1/'), 'https://example.com/path/1/')
def test_storage_save(self):
name = 'test_storage_save.txt'
content = ContentFile('new content')
self.storage.save(name, content)
self.storage.bucket.Object.assert_called_once_with(name)
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
content.file,
ExtraArgs={
'ContentType': 'text/plain',
'ACL': self.storage.default_acl,
}
)
def test_storage_save_with_acl(self):
name = 'test_storage_save.txt'
content = ContentFile('new content')
self.storage.default_acl = 'private'
self.storage.save(name, content)
self.storage.bucket.Object.assert_called_once_with(name)
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
content.file,
ExtraArgs={
'ContentType': 'text/plain',
'ACL': 'private',
}
)
def test_content_type(self):
name = 'test_image.jpg'
content = ContentFile('data')
content.content_type = None
self.storage.save(name, content)
self.storage.bucket.Object.assert_called_once_with(name)
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
content.file,
ExtraArgs={
'ContentType': 'image/jpeg',
'ACL': self.storage.default_acl,
}
)
def test_storage_save_gzipped(self):
name = 'test_storage_save.gz'
content = ContentFile("I am gzip'd")
self.storage.save(name, content)
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
content.file,
ExtraArgs={
'ContentType': 'application/octet-stream',
'ContentEncoding': 'gzip',
'ACL': self.storage.default_acl,
}
)
def test_storage_save_gzip(self):
self.storage.gzip = True
name = 'test_storage_save.css'
content = ContentFile("I should be gzip'd")
self.storage.save(name, content)
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
mock.ANY,
ExtraArgs={
'ContentType': 'text/css',
'ContentEncoding': 'gzip',
'ACL': self.storage.default_acl,
}
)
args, kwargs = obj.upload_fileobj.call_args
content = args[0]
zfile = gzip.GzipFile(mode='rb', fileobj=content)
self.assertEqual(zfile.read(), b"I should be gzip'd")
def test_storage_save_gzip_twice(self):
self.storage.gzip = True
name = 'test_storage_save.css'
content = ContentFile("I should be gzip'd")
# When
self.storage.save(name, content)
self.storage.save('test_storage_save_2.css', content)
# Then
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
mock.ANY,
ExtraArgs={
'ContentType': 'text/css',
'ContentEncoding': 'gzip',
'ACL': self.storage.default_acl,
}
)
args, kwargs = obj.upload_fileobj.call_args
content = args[0]
zfile = gzip.GzipFile(mode='rb', fileobj=content)
self.assertEqual(zfile.read(), b"I should be gzip'd")
def test_compress_content_len(self):
self.storage.gzip = True
content = ContentFile("I should be gzip'd")
content = self.storage._compress_content(content)
self.assertTrue(len(content.read()) > 0)
def test_storage_open_write(self):
name = 'test_open_for_writïng.txt'
content = 'new content'
# Set the encryption flag used for multipart uploads
self.storage.encryption = True
self.storage.reduced_redundancy = True
self.storage.default_acl = 'public-read'
file = self.storage.open(name, 'w')
self.storage.bucket.Object.assert_called_with(name)
obj = self.storage.bucket.Object.return_value
# Set the name of the mock object
obj.key = name
file.write(content)
obj.initiate_multipart_upload.assert_called_with(
ACL='public-read',
ContentType='text/plain',
ServerSideEncryption='AES256',
StorageClass='REDUCED_REDUNDANCY'
)
# Save the internal file before closing
multipart = obj.initiate_multipart_upload.return_value
multipart.parts.all.return_value = [mock.MagicMock(e_tag='123', part_number=1)]
file.close()
multipart.Part.assert_called_with(1)
part = multipart.Part.return_value
part.upload.assert_called_with(Body=content.encode('utf-8'))
multipart.complete.assert_called_once_with(
MultipartUpload={'Parts': [{'ETag': '123', 'PartNumber': 1}]})
def test_storage_write_beyond_buffer_size(self):
name = 'test_open_for_writïng_beyond_buffer_size.txt'
# Set the encryption flag used for multipart uploads
self.storage.encryption = True
self.storage.reduced_redundancy = True
self.storage.default_acl = 'public-read'
file = self.storage.open(name, 'w')
self.storage.bucket.Object.assert_called_with(name)
obj = self.storage.bucket.Object.return_value
# Set the name of the mock object
obj.key = name
# Initiate the multipart upload
file.write('')
obj.initiate_multipart_upload.assert_called_with(
ACL='public-read',
ContentType='text/plain',
ServerSideEncryption='AES256',
StorageClass='REDUCED_REDUNDANCY'
)
multipart = obj.initiate_multipart_upload.return_value
# Write content at least twice as long as the buffer size
written_content = ''
counter = 1
while len(written_content) < 2 * file.buffer_size:
content = 'hello, aws {counter}\n'.format(counter=counter)
# Write more than just a few bytes in each iteration to keep the
# test reasonably fast
content += '*' * int(file.buffer_size / 10)
file.write(content)
written_content += content
counter += 1
# Save the internal file before closing
multipart.parts.all.return_value = [
mock.MagicMock(e_tag='123', part_number=1),
mock.MagicMock(e_tag='456', part_number=2)
]
file.close()
self.assertListEqual(
multipart.Part.call_args_list,
[mock.call(1), mock.call(2)]
)
part = multipart.Part.return_value
uploaded_content = ''.join(
(args_list[1]['Body'].decode('utf-8')
for args_list in part.upload.call_args_list)
)
self.assertEqual(uploaded_content, written_content)
multipart.complete.assert_called_once_with(
MultipartUpload={'Parts': [
{'ETag': '123', 'PartNumber': 1},
{'ETag': '456', 'PartNumber': 2},
]}
)
def test_auto_creating_bucket(self):
self.storage.auto_create_bucket = True
Bucket = mock.MagicMock()
self.storage._connections.connection.Bucket.return_value = Bucket
self.storage._connections.connection.meta.client.meta.region_name = 'sa-east-1'
Bucket.meta.client.head_bucket.side_effect = ClientError({'Error': {},
'ResponseMetadata': {'HTTPStatusCode': 404}},
'head_bucket')
self.storage._get_or_create_bucket('testbucketname')
Bucket.create.assert_called_once_with(
ACL='public-read',
CreateBucketConfiguration={
'LocationConstraint': 'sa-east-1',
}
)
def test_auto_creating_bucket_with_acl(self):
self.storage.auto_create_bucket = True
self.storage.bucket_acl = 'public-read'
Bucket = mock.MagicMock()
self.storage._connections.connection.Bucket.return_value = Bucket
self.storage._connections.connection.meta.client.meta.region_name = 'sa-east-1'
Bucket.meta.client.head_bucket.side_effect = ClientError({'Error': {},
'ResponseMetadata': {'HTTPStatusCode': 404}},
'head_bucket')
self.storage._get_or_create_bucket('testbucketname')
Bucket.create.assert_called_once_with(
ACL='public-read',
CreateBucketConfiguration={
'LocationConstraint': 'sa-east-1',
}
)
def test_storage_exists(self):
self.assertTrue(self.storage.exists("file.txt"))
self.storage.connection.meta.client.head_object.assert_called_with(
Bucket=self.storage.bucket_name,
Key="file.txt",
)
def test_storage_exists_false(self):
self.storage.connection.meta.client.head_object.side_effect = ClientError(
{'Error': {'Code': '404', 'Message': 'Not Found'}},
'HeadObject',
)
self.assertFalse(self.storage.exists("file.txt"))
self.storage.connection.meta.client.head_object.assert_called_with(
Bucket=self.storage.bucket_name,
Key='file.txt',
)
def test_storage_exists_doesnt_create_bucket(self):
with mock.patch.object(self.storage, '_get_or_create_bucket') as method:
self.storage.exists('file.txt')
self.assertFalse(method.called)
def test_storage_delete(self):
self.storage.delete("path/to/file.txt")
self.storage.bucket.Object.assert_called_with('path/to/file.txt')
self.storage.bucket.Object.return_value.delete.assert_called_with()
def test_storage_listdir_base(self):
# Files:
# some/path/1.txt
# 2.txt
# other/path/3.txt
# 4.txt
pages = [
{
'CommonPrefixes': [
{'Prefix': 'some'},
{'Prefix': 'other'},
],
'Contents': [
{'Key': '2.txt'},
{'Key': '4.txt'},
],
},
]
paginator = mock.MagicMock()
paginator.paginate.return_value = pages
self.storage._connections.connection.meta.client.get_paginator.return_value = paginator
dirs, files = self.storage.listdir('')
paginator.paginate.assert_called_with(Bucket=None, Delimiter='/', Prefix='')
self.assertEqual(dirs, ['some', 'other'])
self.assertEqual(files, ['2.txt', '4.txt'])
def test_storage_listdir_subdir(self):
# Files:
# some/path/1.txt
# some/2.txt
pages = [
{
'CommonPrefixes': [
{'Prefix': 'some/path'},
],
'Contents': [
{'Key': 'some/2.txt'},
],
},
]
paginator = mock.MagicMock()
paginator.paginate.return_value = pages
self.storage._connections.connection.meta.client.get_paginator.return_value = paginator
dirs, files = self.storage.listdir('some/')
paginator.paginate.assert_called_with(Bucket=None, Delimiter='/', Prefix='some/')
self.assertEqual(dirs, ['path'])
self.assertEqual(files, ['2.txt'])
def test_storage_size(self):
obj = self.storage.bucket.Object.return_value
obj.content_length = 4098
name = 'file.txt'
self.assertEqual(self.storage.size(name), obj.content_length)
def test_storage_mtime(self):
# Test both USE_TZ cases
for use_tz in (True, False):
with self.settings(USE_TZ=use_tz):
self._test_storage_mtime(use_tz)
def _test_storage_mtime(self, use_tz):
obj = self.storage.bucket.Object.return_value
obj.last_modified = datetime.now(utc)
name = 'file.txt'
self.assertFalse(
is_aware(self.storage.modified_time(name)),
'Naive datetime object expected from modified_time()'
)
self.assertIs(
settings.USE_TZ,
is_aware(self.storage.get_modified_time(name)),
'%s datetime object expected from get_modified_time() when USE_TZ=%s' % (
('Naive', 'Aware')[settings.USE_TZ],
settings.USE_TZ
)
)
def test_storage_url(self):
name = 'test_storage_size.txt'
url = 'http://aws.amazon.com/%s' % name
self.storage.bucket.meta.client.generate_presigned_url.return_value = url
self.storage.bucket.name = 'bucket'
self.assertEqual(self.storage.url(name), url)
self.storage.bucket.meta.client.generate_presigned_url.assert_called_with(
'get_object',
Params={'Bucket': self.storage.bucket.name, 'Key': name},
ExpiresIn=self.storage.querystring_expire
)
custom_expire = 123
self.assertEqual(self.storage.url(name, expire=custom_expire), url)
self.storage.bucket.meta.client.generate_presigned_url.assert_called_with(
'get_object',
Params={'Bucket': self.storage.bucket.name, 'Key': name},
ExpiresIn=custom_expire
)
def test_generated_url_is_encoded(self):
self.storage.custom_domain = "mock.cloudfront.net"
filename = "whacky & filename'.mp4"
url = self.storage.url(filename)
parsed_url = urlparse.urlparse(url)
self.assertEqual(parsed_url.path,
"/whacky%20%26%20filename%27.mp4")
self.assertFalse(self.storage.bucket.meta.client.generate_presigned_url.called)
def test_special_characters(self):
self.storage.custom_domain = "mock.cloudfront.net"
name = "ãlöhâ.jpg"
content = ContentFile('new content')
self.storage.save(name, content)
self.storage.bucket.Object.assert_called_once_with(name)
url = self.storage.url(name)
parsed_url = urlparse.urlparse(url)
self.assertEqual(parsed_url.path, "/%C3%A3l%C3%B6h%C3%A2.jpg")
def test_strip_signing_parameters(self):
expected = 'http://bucket.s3-aws-region.amazonaws.com/foo/bar'
self.assertEqual(self.storage._strip_signing_parameters(
'%s?X-Amz-Date=12345678&X-Amz-Signature=Signature' % expected), expected)
self.assertEqual(self.storage._strip_signing_parameters(
'%s?expires=12345678&signature=Signature' % expected), expected)
@skipIf(threading is None, 'Test requires threading')
def test_connection_threading(self):
connections = []
def thread_storage_connection():
connections.append(self.storage.connection)
for x in range(2):
t = threading.Thread(target=thread_storage_connection)
t.start()
t.join()
self.assertIsNot(connections[0], connections[1])
def test_location_leading_slash(self):
msg = (
"S3Boto3Storage.location cannot begin with a leading slash. "
"Found '/'. Use '' instead."
)
with self.assertRaises(ImproperlyConfigured, msg=msg):
s3boto3.S3Boto3Storage(location='/')
def test_deprecated_acl(self):
with override_settings(AWS_DEFAULT_ACL=None), warnings.catch_warnings(record=True) as w:
s3boto3.S3Boto3Storage(acl='private')
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
message = (
"The acl argument of S3Boto3Storage is deprecated. Use argument "
"default_acl or setting AWS_DEFAULT_ACL instead. The acl argument "
"will be removed in version 2.0."
)
assert str(w[-1].message) == message
def test_deprecated_bucket(self):
with override_settings(AWS_DEFAULT_ACL=None), warnings.catch_warnings(record=True) as w:
s3boto3.S3Boto3Storage(bucket='django')
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
message = (
"The bucket argument of S3Boto3Storage is deprecated. Use argument "
"bucket_name or setting AWS_STORAGE_BUCKET_NAME instead. The bucket "
"argument will be removed in version 2.0."
)
assert str(w[-1].message) == message
def test_deprecated_default_acl(self):
with warnings.catch_warnings(record=True) as w:
s3boto3.S3Boto3Storage()
assert len(w) == 1
message = (
"The default behavior of S3Boto3Storage is insecure and will change "
"in django-storages 2.0. By default files and new buckets are saved "
"with an ACL of 'public-read' (globally publicly readable). Version 2.0 will "
"default to using the bucket's ACL. To opt into the new behavior set "
"AWS_DEFAULT_ACL = None, otherwise to silence this warning explicitly "
"set AWS_DEFAULT_ACL."
)
assert str(w[-1].message) == message
def test_deprecated_default_acl_override_class_variable(self):
class MyStorage(s3boto3.S3Boto3Storage):
default_acl = "private"
with warnings.catch_warnings(record=True) as w:
MyStorage()
assert len(w) == 0
| true | true |
1c2fda91b51d903a1fb662774f2ddb659efb16c0 | 3,680 | py | Python | selfdrive/controls/lib/lane_planner.py | azure93/openpilot_079_neokii | 7ac7c327527e8ab7a1b9dc42463ce02be81c444d | [
"MIT"
] | null | null | null | selfdrive/controls/lib/lane_planner.py | azure93/openpilot_079_neokii | 7ac7c327527e8ab7a1b9dc42463ce02be81c444d | [
"MIT"
] | null | null | null | selfdrive/controls/lib/lane_planner.py | azure93/openpilot_079_neokii | 7ac7c327527e8ab7a1b9dc42463ce02be81c444d | [
"MIT"
] | 2 | 2021-05-19T12:34:17.000Z | 2021-06-12T11:32:55.000Z | from common.numpy_fast import interp
import numpy as np
from cereal import log
from selfdrive.ntune import ntune_get
CAMERA_OFFSET = 0.06 # m from center car to camera
def compute_path_pinv(l=50):
deg = 3
x = np.arange(l*1.0)
X = np.vstack(tuple(x**n for n in range(deg, -1, -1))).T
pinv = np.linalg.pinv(X)
return pinv
def model_polyfit(points, path_pinv):
return np.dot(path_pinv, [float(x) for x in points])
def eval_poly(poly, x):
return poly[3] + poly[2]*x + poly[1]*x**2 + poly[0]*x**3
def calc_d_poly(l_poly, r_poly, p_poly, l_prob, r_prob, lane_width, v_ego):
# This will improve behaviour when lanes suddenly widen
# these numbers were tested on 2000segments and found to work well
lane_width = min(4.0, lane_width)
width_poly = l_poly - r_poly
prob_mods = []
for t_check in [0.0, 1.5, 3.0]:
width_at_t = eval_poly(width_poly, t_check * (v_ego + 7))
prob_mods.append(interp(width_at_t, [4.0, 5.0], [1.0, 0.0]))
mod = min(prob_mods)
l_prob = mod * l_prob
r_prob = mod * r_prob
path_from_left_lane = l_poly.copy()
path_from_left_lane[3] -= lane_width / 2.0
path_from_right_lane = r_poly.copy()
path_from_right_lane[3] += lane_width / 2.0
lr_prob = l_prob + r_prob - l_prob * r_prob
# neokii
if lr_prob > 0.65:
lr_prob = min(lr_prob * 1.35, 1.0)
d_poly_lane = (l_prob * path_from_left_lane + r_prob * path_from_right_lane) / (l_prob + r_prob + 0.0001)
return lr_prob * d_poly_lane + (1.0 - lr_prob) * p_poly
class LanePlanner():
def __init__(self):
self.l_poly = [0., 0., 0., 0.]
self.r_poly = [0., 0., 0., 0.]
self.p_poly = [0., 0., 0., 0.]
self.d_poly = [0., 0., 0., 0.]
self.lane_width_estimate = 3.7
self.lane_width_certainty = 1.0
self.lane_width = 3.7
self.l_prob = 0.
self.r_prob = 0.
self.l_lane_change_prob = 0.
self.r_lane_change_prob = 0.
self._path_pinv = compute_path_pinv()
self.x_points = np.arange(50)
def parse_model(self, md):
if len(md.leftLane.poly):
self.l_poly = np.array(md.leftLane.poly)
self.r_poly = np.array(md.rightLane.poly)
self.p_poly = np.array(md.path.poly)
else:
self.l_poly = model_polyfit(md.leftLane.points, self._path_pinv) # left line
self.r_poly = model_polyfit(md.rightLane.points, self._path_pinv) # right line
self.p_poly = model_polyfit(md.path.points, self._path_pinv) # predicted path
self.l_prob = md.leftLane.prob # left line prob
self.r_prob = md.rightLane.prob # right line prob
if len(md.meta.desireState):
self.l_lane_change_prob = md.meta.desireState[log.PathPlan.Desire.laneChangeLeft - 1]
self.r_lane_change_prob = md.meta.desireState[log.PathPlan.Desire.laneChangeRight - 1]
def update_d_poly(self, v_ego):
# only offset left and right lane lines; offsetting p_poly does not make sense
cameraOffset = ntune_get("cameraOffset")
self.l_poly[3] += cameraOffset
self.r_poly[3] += cameraOffset
# Find current lanewidth
self.lane_width_certainty += 0.05 * (self.l_prob * self.r_prob - self.lane_width_certainty)
current_lane_width = abs(self.l_poly[3] - self.r_poly[3])
self.lane_width_estimate += 0.005 * (current_lane_width - self.lane_width_estimate)
speed_lane_width = interp(v_ego, [0., 31.], [2.8, 3.5])
self.lane_width = self.lane_width_certainty * self.lane_width_estimate + \
(1 - self.lane_width_certainty) * speed_lane_width
self.d_poly = calc_d_poly(self.l_poly, self.r_poly, self.p_poly, self.l_prob, self.r_prob, self.lane_width, v_ego)
def update(self, v_ego, md):
self.parse_model(md)
self.update_d_poly(v_ego)
| 33.454545 | 118 | 0.682609 | from common.numpy_fast import interp
import numpy as np
from cereal import log
from selfdrive.ntune import ntune_get
CAMERA_OFFSET = 0.06
def compute_path_pinv(l=50):
deg = 3
x = np.arange(l*1.0)
X = np.vstack(tuple(x**n for n in range(deg, -1, -1))).T
pinv = np.linalg.pinv(X)
return pinv
def model_polyfit(points, path_pinv):
return np.dot(path_pinv, [float(x) for x in points])
def eval_poly(poly, x):
return poly[3] + poly[2]*x + poly[1]*x**2 + poly[0]*x**3
def calc_d_poly(l_poly, r_poly, p_poly, l_prob, r_prob, lane_width, v_ego):
lane_width = min(4.0, lane_width)
width_poly = l_poly - r_poly
prob_mods = []
for t_check in [0.0, 1.5, 3.0]:
width_at_t = eval_poly(width_poly, t_check * (v_ego + 7))
prob_mods.append(interp(width_at_t, [4.0, 5.0], [1.0, 0.0]))
mod = min(prob_mods)
l_prob = mod * l_prob
r_prob = mod * r_prob
path_from_left_lane = l_poly.copy()
path_from_left_lane[3] -= lane_width / 2.0
path_from_right_lane = r_poly.copy()
path_from_right_lane[3] += lane_width / 2.0
lr_prob = l_prob + r_prob - l_prob * r_prob
if lr_prob > 0.65:
lr_prob = min(lr_prob * 1.35, 1.0)
d_poly_lane = (l_prob * path_from_left_lane + r_prob * path_from_right_lane) / (l_prob + r_prob + 0.0001)
return lr_prob * d_poly_lane + (1.0 - lr_prob) * p_poly
class LanePlanner():
def __init__(self):
self.l_poly = [0., 0., 0., 0.]
self.r_poly = [0., 0., 0., 0.]
self.p_poly = [0., 0., 0., 0.]
self.d_poly = [0., 0., 0., 0.]
self.lane_width_estimate = 3.7
self.lane_width_certainty = 1.0
self.lane_width = 3.7
self.l_prob = 0.
self.r_prob = 0.
self.l_lane_change_prob = 0.
self.r_lane_change_prob = 0.
self._path_pinv = compute_path_pinv()
self.x_points = np.arange(50)
def parse_model(self, md):
if len(md.leftLane.poly):
self.l_poly = np.array(md.leftLane.poly)
self.r_poly = np.array(md.rightLane.poly)
self.p_poly = np.array(md.path.poly)
else:
self.l_poly = model_polyfit(md.leftLane.points, self._path_pinv)
self.r_poly = model_polyfit(md.rightLane.points, self._path_pinv)
self.p_poly = model_polyfit(md.path.points, self._path_pinv)
self.l_prob = md.leftLane.prob
self.r_prob = md.rightLane.prob
if len(md.meta.desireState):
self.l_lane_change_prob = md.meta.desireState[log.PathPlan.Desire.laneChangeLeft - 1]
self.r_lane_change_prob = md.meta.desireState[log.PathPlan.Desire.laneChangeRight - 1]
def update_d_poly(self, v_ego):
cameraOffset = ntune_get("cameraOffset")
self.l_poly[3] += cameraOffset
self.r_poly[3] += cameraOffset
self.lane_width_certainty += 0.05 * (self.l_prob * self.r_prob - self.lane_width_certainty)
current_lane_width = abs(self.l_poly[3] - self.r_poly[3])
self.lane_width_estimate += 0.005 * (current_lane_width - self.lane_width_estimate)
speed_lane_width = interp(v_ego, [0., 31.], [2.8, 3.5])
self.lane_width = self.lane_width_certainty * self.lane_width_estimate + \
(1 - self.lane_width_certainty) * speed_lane_width
self.d_poly = calc_d_poly(self.l_poly, self.r_poly, self.p_poly, self.l_prob, self.r_prob, self.lane_width, v_ego)
def update(self, v_ego, md):
self.parse_model(md)
self.update_d_poly(v_ego)
| true | true |
1c2fdac8dcd7093d69110e668617a1a1d89c8df3 | 1,144 | py | Python | vsphere/objects_queue.py | SumoLogic/sumologic-vmware | 6c19d48b208cec7a69e726dfad0a5e7aa16ad220 | [
"Apache-2.0"
] | 1 | 2022-02-12T02:01:09.000Z | 2022-02-12T02:01:09.000Z | vsphere/objects_queue.py | SumoLogic/sumologic-vmware | 6c19d48b208cec7a69e726dfad0a5e7aa16ad220 | [
"Apache-2.0"
] | null | null | null | vsphere/objects_queue.py | SumoLogic/sumologic-vmware | 6c19d48b208cec7a69e726dfad0a5e7aa16ad220 | [
"Apache-2.0"
] | null | null | null | import threading
class ObjectsQueue:
"""
Implements a queue to store Mor objects of any type for each instance.
"""
def __init__(self):
self._objects_queue = {}
self._objects_queue_lock = threading.RLock()
def fill(self, key, mor_dict):
"""
Set a dict mapping (resouce_type --> objects[]) for a given key
"""
with self._objects_queue_lock:
self._objects_queue[key] = mor_dict
def contains(self, key):
with self._objects_queue_lock:
return key in self._objects_queue
def size(self, key, resource_type):
"""
Return the size of the queue for a given key and resource type.
"""
with self._objects_queue_lock:
return len(self._objects_queue[key].get(resource_type, []))
def pop(self, key, resource_type):
"""
Extract an object from the list.
If the list is empty, method will return None
"""
with self._objects_queue_lock:
objects = self._objects_queue[key].get(resource_type, [])
return objects.pop() if objects else None
| 30.105263 | 74 | 0.611014 | import threading
class ObjectsQueue:
def __init__(self):
self._objects_queue = {}
self._objects_queue_lock = threading.RLock()
def fill(self, key, mor_dict):
with self._objects_queue_lock:
self._objects_queue[key] = mor_dict
def contains(self, key):
with self._objects_queue_lock:
return key in self._objects_queue
def size(self, key, resource_type):
with self._objects_queue_lock:
return len(self._objects_queue[key].get(resource_type, []))
def pop(self, key, resource_type):
with self._objects_queue_lock:
objects = self._objects_queue[key].get(resource_type, [])
return objects.pop() if objects else None
| true | true |
1c2fdade7e9808dec5d8d44a9e50dd624d2cefb4 | 1,958 | py | Python | covsirphy/regression/param_decision_tree.py | ardhanii/covid19-sir | 87881963c49a2fc5b6235c8b21269d216acaa941 | [
"Apache-2.0"
] | 97 | 2020-05-15T15:20:15.000Z | 2022-03-18T02:55:54.000Z | covsirphy/regression/param_decision_tree.py | ardhanii/covid19-sir | 87881963c49a2fc5b6235c8b21269d216acaa941 | [
"Apache-2.0"
] | 970 | 2020-06-01T13:48:34.000Z | 2022-03-29T08:20:49.000Z | covsirphy/regression/param_decision_tree.py | ardhani31/Covid19-SIRV-v3 | 59d95156b375c41259c46ce4e656b86903f92ec2 | [
"Apache-2.0"
] | 36 | 2020-05-15T15:36:43.000Z | 2022-02-25T17:59:08.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from sklearn.decomposition import PCA
from sklearn.model_selection import GridSearchCV, TimeSeriesSplit
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler
from sklearn.tree import DecisionTreeRegressor
from covsirphy.regression.regbase import _RegressorBase
from covsirphy.regression.reg_rate_converter import _RateConverter
class _ParamDecisionTreeRegressor(_RegressorBase):
"""
Predict parameter values of ODE models with decision tree regressor.
Args:
- X_train (pandas.DataFrame): X for training with time index
- X_test (pandas.DataFrame): X for test with time index
- Y_train (pandas.DataFrame): Y for training with time index
- Y_test (pandas.DataFrame): Y for test with time index
- X_target (pandas.DataFrame): X for prediction with time index
"""
# Description of regressor
DESC = "Indicators -> Parameters with Decision Tree Regressor"
def _fit(self):
"""
Fit regression model with training dataset, update self._pipeline and self._param.
"""
# Paramters of the steps
param_grid = {
"converter__to_convert": [True, False],
"pca__n_components": [0.3, 0.5, 0.7, 0.9],
"regressor__max_depth": list(range(1, 10)),
}
# Fit with pipeline
steps = [
("converter", _RateConverter()),
("scaler", MinMaxScaler()),
("pca", PCA(random_state=0)),
("regressor", DecisionTreeRegressor(random_state=0)),
]
tscv = TimeSeriesSplit(n_splits=5).split(self._X_train)
pipeline = GridSearchCV(Pipeline(steps=steps), param_grid, n_jobs=-1, cv=tscv)
pipeline.fit(self._X_train, self._Y_train)
# Update regressor
self._pipeline = pipeline
# Update param
self._param.update(**{k: type(v) for (k, v) in steps})
| 38.392157 | 90 | 0.660368 |
from sklearn.decomposition import PCA
from sklearn.model_selection import GridSearchCV, TimeSeriesSplit
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler
from sklearn.tree import DecisionTreeRegressor
from covsirphy.regression.regbase import _RegressorBase
from covsirphy.regression.reg_rate_converter import _RateConverter
class _ParamDecisionTreeRegressor(_RegressorBase):
DESC = "Indicators -> Parameters with Decision Tree Regressor"
def _fit(self):
param_grid = {
"converter__to_convert": [True, False],
"pca__n_components": [0.3, 0.5, 0.7, 0.9],
"regressor__max_depth": list(range(1, 10)),
}
steps = [
("converter", _RateConverter()),
("scaler", MinMaxScaler()),
("pca", PCA(random_state=0)),
("regressor", DecisionTreeRegressor(random_state=0)),
]
tscv = TimeSeriesSplit(n_splits=5).split(self._X_train)
pipeline = GridSearchCV(Pipeline(steps=steps), param_grid, n_jobs=-1, cv=tscv)
pipeline.fit(self._X_train, self._Y_train)
self._pipeline = pipeline
self._param.update(**{k: type(v) for (k, v) in steps})
| true | true |
1c2fdaeab3d1bc770885a1c1c6fb65b25d46fa6d | 29,037 | py | Python | lightbus/transports/redis/event.py | apollo13/lightbus | ad9bb5e376e7aabb400d01307345e00fd07e4677 | [
"Apache-2.0"
] | null | null | null | lightbus/transports/redis/event.py | apollo13/lightbus | ad9bb5e376e7aabb400d01307345e00fd07e4677 | [
"Apache-2.0"
] | null | null | null | lightbus/transports/redis/event.py | apollo13/lightbus | ad9bb5e376e7aabb400d01307345e00fd07e4677 | [
"Apache-2.0"
] | null | null | null | import asyncio
import logging
import time
from collections import OrderedDict
from datetime import datetime
from enum import Enum
from typing import (
Mapping,
Optional,
List,
Tuple,
Union,
Sequence,
AsyncGenerator,
Iterable,
TYPE_CHECKING,
)
from aioredis import ConnectionClosedError, ReplyError
from aioredis.util import decode
from lightbus.transports.base import EventTransport, EventMessage
from lightbus.log import LBullets, L, Bold
from lightbus.serializers import ByFieldMessageSerializer, ByFieldMessageDeserializer
from lightbus.transports.redis.utilities import (
RedisEventMessage,
RedisTransportMixin,
normalise_since_value,
datetime_to_redis_steam_id,
redis_stream_id_add_one,
redis_stream_id_subtract_one,
)
from lightbus.utilities.async_tools import make_exception_checker, cancel
from lightbus.utilities.frozendict import frozendict
from lightbus.utilities.human import human_time
from lightbus.utilities.importing import import_from_string
if TYPE_CHECKING:
# pylint: disable=unused-import,cyclic-import
from lightbus.config import Config
from lightbus.client import BusClient
logger = logging.getLogger("lightbus.transports.redis")
Since = Union[str, datetime, None]
class StreamUse(Enum):
PER_API = "per_api"
PER_EVENT = "per_event"
def __eq__(self, other):
# pylint: disable=comparison-with-callable
if isinstance(other, str):
return self.value == other
else:
return super().__eq__(other)
class RedisEventTransport(RedisTransportMixin, EventTransport):
"""Redis Event Transport
For a description of the protocol see https://lightbus.org/reference/protocols/event/
"""
def __init__(
self,
redis_pool=None,
*,
service_name: str,
consumer_name: str,
url=None,
serializer=ByFieldMessageSerializer(),
deserializer=ByFieldMessageDeserializer(RedisEventMessage),
connection_parameters: Mapping = frozendict(maxsize=100),
batch_size=10,
reclaim_batch_size: int = None,
acknowledgement_timeout: float = 60,
max_stream_length: Optional[int] = 100_000,
stream_use: StreamUse = StreamUse.PER_API,
consumption_restart_delay: int = 5,
consumer_ttl: int = 2_592_000,
):
self.set_redis_pool(redis_pool, url, connection_parameters)
self.batch_size = batch_size
self.reclaim_batch_size = reclaim_batch_size if reclaim_batch_size else batch_size * 10
self.service_name = service_name
self.consumer_name = consumer_name
self.acknowledgement_timeout = acknowledgement_timeout
self.max_stream_length = max_stream_length
self.stream_use = stream_use
self.consumption_restart_delay = consumption_restart_delay
self.consumer_ttl = consumer_ttl
super().__init__(serializer=serializer, deserializer=deserializer)
@classmethod
def from_config(
cls,
config: "Config",
service_name: str = None,
consumer_name: str = None,
url: str = "redis://127.0.0.1:6379/0",
connection_parameters: Mapping = frozendict(maxsize=100),
batch_size: int = 10,
reclaim_batch_size: int = None,
serializer: str = "lightbus.serializers.ByFieldMessageSerializer",
deserializer: str = "lightbus.serializers.ByFieldMessageDeserializer",
acknowledgement_timeout: float = 60,
max_stream_length: Optional[int] = 100_000,
stream_use: StreamUse = StreamUse.PER_API,
consumption_restart_delay: int = 5,
consumer_ttl: int = 2_592_000,
):
serializer = import_from_string(serializer)()
deserializer = import_from_string(deserializer)(RedisEventMessage)
service_name = service_name or config.service_name
consumer_name = consumer_name or config.process_name
if isinstance(stream_use, str):
stream_use = StreamUse[stream_use.upper()]
return cls(
redis_pool=None,
service_name=service_name,
consumer_name=consumer_name,
url=url,
connection_parameters=connection_parameters,
batch_size=batch_size,
reclaim_batch_size=reclaim_batch_size,
serializer=serializer,
deserializer=deserializer,
acknowledgement_timeout=acknowledgement_timeout,
max_stream_length=max_stream_length or None,
stream_use=stream_use,
consumption_restart_delay=consumption_restart_delay,
consumer_ttl=consumer_ttl,
)
async def send_event(self, event_message: EventMessage, options: dict, bus_client: "BusClient"):
"""Publish an event"""
stream = self._get_stream_names(
listen_for=[(event_message.api_name, event_message.event_name)]
)[0]
logger.debug(
LBullets(
L(
"Enqueuing event message {} in Redis stream {}",
Bold(event_message),
Bold(stream),
),
items=dict(**event_message.get_metadata(), kwargs=event_message.get_kwargs()),
)
)
# Performance: I suspect getting a connection from the connection manager each time is causing
# performance issues. Need to confirm.
with await self.connection_manager() as redis:
start_time = time.time()
await redis.xadd(
stream=stream,
fields=self.serializer(event_message),
max_len=self.max_stream_length or None,
exact_len=False,
)
logger.debug(
L(
"Enqueued event message {} in Redis in {} stream {}",
Bold(event_message.canonical_name),
human_time(time.time() - start_time),
Bold(stream),
)
)
async def consume(
self,
listen_for: List[Tuple[str, str]],
listener_name: str,
bus_client: "BusClient",
since: Union[Since, Sequence[Since]] = "$",
forever=True,
) -> AsyncGenerator[List[RedisEventMessage], None]:
"""Consume events for the given APIs"""
self._sanity_check_listen_for(listen_for)
consumer_group = f"{self.service_name}-{listener_name}"
if not isinstance(since, (list, tuple)):
# Since has been specified as a single value. Normalise it into
# the value-per-listener format.
since = [since] * len(listen_for)
since = map(normalise_since_value, since)
stream_names = self._get_stream_names(listen_for)
# Keys are stream names, values as the latest ID consumed from that stream
streams = OrderedDict(zip(stream_names, since))
expected_events = {event_name for _, event_name in listen_for}
logger.debug(
LBullets(
L(
"Consuming events as consumer {} in group {} on streams",
Bold(self.consumer_name),
Bold(consumer_group),
),
items={"{} ({})".format(*v) for v in streams.items()},
)
)
# Cleanup any old groups & consumers
await self._cleanup(stream_names)
# Here we use a queue to combine messages coming from both the
# fetch messages loop and the reclaim messages loop.
queue = asyncio.Queue(maxsize=1)
async def consume_loop():
"""Regular event consuming. See _fetch_new_messages()"""
while True:
try:
async for messages in self._fetch_new_messages(
streams, consumer_group, expected_events, forever
):
await queue.put(messages)
# Wait for the queue to empty before getting trying to get another message
await queue.join()
except (ConnectionClosedError, ConnectionResetError):
# ConnectionClosedError is from aioredis. However, sometimes the connection
# can die outside of aioredis, in which case we get a builtin ConnectionResetError.
logger.warning(
f"Redis connection lost while consuming events, reconnecting "
f"in {self.consumption_restart_delay} seconds..."
)
await asyncio.sleep(self.consumption_restart_delay)
async def reclaim_loop():
"""
Reclaim messages which other consumers have failed to
processes in reasonable time. See _reclaim_lost_messages()
"""
await asyncio.sleep(self.acknowledgement_timeout)
async for messages in self._reclaim_lost_messages(
stream_names, consumer_group, expected_events
):
await queue.put(messages)
# Wait for the queue to empty before getting trying to get another message
await queue.join()
consume_task = None
reclaim_task = None
try:
# Run the two above coroutines in their own tasks
consume_task = asyncio.ensure_future(consume_loop())
reclaim_task = asyncio.ensure_future(reclaim_loop())
# Make sure we surface any exceptions that occur in either task
consume_task.add_done_callback(make_exception_checker(bus_client))
reclaim_task.add_done_callback(make_exception_checker(bus_client))
while True:
try:
messages = await queue.get()
yield messages
queue.task_done()
except GeneratorExit:
return
finally:
# Make sure we cleanup the tasks we created
await cancel(consume_task, reclaim_task)
async def _fetch_new_messages(
self, streams, consumer_group, expected_events, forever
) -> AsyncGenerator[List[EventMessage], None]:
"""Coroutine to consume new messages
The consumption has two stages:
1. Fetch and yield any messages this consumer is responsible for processing but has yet
to successfully process. This can happen in cases where a message was
previously consumed but not acknowledged (i.e. due to an error).
This is a one-off startup stage.
2. Wait for new messages to arrive. Yield these messages when they arrive, then
resume waiting for messages
See Also:
_reclaim_lost_messages() - Another coroutine which reclaims messages which timed out
while being processed by other consumers in this group
"""
with await self.connection_manager() as redis:
# Firstly create the consumer group if we need to
await self._create_consumer_groups(streams, redis, consumer_group)
# Get any messages that this consumer has yet to process.
# This can happen in the case where the processes died before acknowledging.
pending_messages = await redis.xread_group(
group_name=consumer_group,
consumer_name=self.consumer_name,
streams=list(streams.keys()),
# Using ID '0' indicates we want unacked pending messages
latest_ids=["0"] * len(streams),
timeout=None, # Don't block, return immediately
)
event_messages = []
for stream, message_id, fields in pending_messages:
message_id = decode(message_id, "utf8")
stream = decode(stream, "utf8")
event_message = self._fields_to_message(
fields,
expected_events,
stream=stream,
native_id=message_id,
consumer_group=consumer_group,
)
if not event_message:
# noop message, or message an event we don't care about
continue
logger.debug(
LBullets(
L(
"⬅ Receiving pending event {} on stream {}",
Bold(message_id),
Bold(stream),
),
items=dict(
**event_message.get_metadata(), kwargs=event_message.get_kwargs()
),
)
)
event_messages.append(event_message)
if event_messages:
yield event_messages
# We've now cleaned up any old messages that were hanging around.
# Now we get on to the main loop which blocks and waits for new messages
while True:
# Fetch some messages.
# This will block until there are some messages available
stream_messages = await redis.xread_group(
group_name=consumer_group,
consumer_name=self.consumer_name,
streams=list(streams.keys()),
# Using ID '>' indicates we only want new messages which have not
# been passed to other consumers in this group
latest_ids=[">"] * len(streams),
count=self.batch_size,
)
# Handle the messages we have received
event_messages = []
for stream, message_id, fields in stream_messages:
message_id = decode(message_id, "utf8")
stream = decode(stream, "utf8")
event_message = self._fields_to_message(
fields,
expected_events,
stream=stream,
native_id=message_id,
consumer_group=consumer_group,
)
if not event_message:
# noop message, or message an event we don't care about
continue
logger.debug(
LBullets(
L(
"⬅ Received new event {} on stream {}",
Bold(message_id),
Bold(stream),
),
items=dict(
**event_message.get_metadata(), kwargs=event_message.get_kwargs()
),
)
)
# NOTE: YIELD ALL MESSAGES, NOT JUST ONE
event_messages.append(event_message)
if event_messages:
yield event_messages
if not forever:
return
async def _reclaim_lost_messages(
self, stream_names: List[str], consumer_group: str, expected_events: set
) -> AsyncGenerator[List[EventMessage], None]:
"""Reclaim batches of messages that other consumers in the group failed to acknowledge within a timeout.
The timeout period is specified by the `acknowledgement_timeout` option.
"""
with await self.connection_manager() as redis:
for stream in stream_names:
old_messages = True
reclaim_from = None
# Keep pulling reclaimable messages from Redis until there are none left
while old_messages:
# reclaim_from keeps track of where we are up to in our fetching
# of messages
if not reclaim_from:
# This is our first iteration, so fetch from the start of time
reclaim_from = "-"
else:
# This is a subsequent iteration. XPENDING's 'start' parameter is inclusive,
# so we need to add one to the reclaim_from value to ensure we don't get a message
# we've already seen
reclaim_from = redis_stream_id_add_one(reclaim_from)
# Fetch the next batch of messages
old_messages = await redis.xpending(
stream, consumer_group, reclaim_from, "+", count=self.reclaim_batch_size
)
timeout = self.acknowledgement_timeout * 1000
event_messages = []
# Try to claim each messages
for (
message_id,
consumer_name,
ms_since_last_delivery,
num_deliveries,
) in old_messages:
message_id = decode(message_id, "utf8")
consumer_name = decode(consumer_name, "utf8")
reclaim_from = message_id
# This 'if' is not strictly required as the subsequent call to xclaim
# will honor the timeout parameter. However, using this if here allows
# for more sane logging from the point of view of the user. Without it
# we would report that we were trying to claim messages which were
# clearly not timed out yet.
if ms_since_last_delivery > timeout:
logger.info(
L(
"Found timed out event {} in stream {}. Abandoned by {}. Attempting to reclaim...",
Bold(message_id),
Bold(stream),
Bold(consumer_name),
)
)
# *Try* to claim the messages...
result = await redis.xclaim(
stream, consumer_group, self.consumer_name, int(timeout), message_id
)
# Parse each message we managed to claim
for claimed_message_id, fields in result:
claimed_message_id = decode(claimed_message_id, "utf8")
event_message = self._fields_to_message(
fields,
expected_events,
stream=stream,
native_id=claimed_message_id,
consumer_group=consumer_group,
)
if not event_message:
# noop message, or message an event we don't care about
continue
logger.debug(
LBullets(
L(
"⬅ Reclaimed timed out event {} on stream {}. Abandoned by {}.",
Bold(message_id),
Bold(stream),
Bold(consumer_name),
),
items=dict(
**event_message.get_metadata(),
kwargs=event_message.get_kwargs(),
),
)
)
event_messages.append(event_message)
# And yield our batch of messages
if event_messages:
yield event_messages
async def acknowledge(self, *event_messages: RedisEventMessage, bus_client: "BusClient"):
"""Acknowledge that a message has been successfully processed
"""
with await self.connection_manager() as redis:
p = redis.pipeline()
for event_message in event_messages:
p.xack(event_message.stream, event_message.consumer_group, event_message.native_id)
logging.debug(
f"Preparing to acknowledge message {event_message.id} (Native ID: {event_message.native_id})"
)
logger.debug(
f"Batch acknowledging successful processing of {len(event_messages)} message."
)
await p.execute()
async def history(
self,
api_name,
event_name,
start: datetime = None,
stop: datetime = None,
start_inclusive: bool = True,
batch_size: int = 100,
) -> AsyncGenerator[EventMessage, None]:
"""Retrieve historical events for the given API
Will not have any impact on existing consumer groups.
"""
redis_start = datetime_to_redis_steam_id(start) if start else "-"
redis_stop = datetime_to_redis_steam_id(stop) if stop else "+"
if start and not start_inclusive:
redis_start = redis_stream_id_add_one(redis_start)
stream_name = self._get_stream_names([(api_name, event_name)])[0]
logger.debug(
f"Getting history for stream {stream_name} from {redis_start} ({start}) "
f"to {redis_stop} ({stop}) in batches of {batch_size}"
)
with await self.connection_manager() as redis:
messages = True
while messages:
messages = await redis.xrevrange(
stream_name, redis_stop, redis_start, count=batch_size
)
if not messages:
return
for message_id, fields in messages:
message_id = decode(message_id, "utf8")
redis_stop = redis_stream_id_subtract_one(message_id)
event_message = self._fields_to_message(
fields,
expected_event_names={event_name},
stream=stream_name,
native_id=message_id,
consumer_group=None,
)
if event_message:
yield event_message
async def _create_consumer_groups(self, streams, redis, consumer_group):
"""Ensure the consumer groups exist
This is means we have to ensure the streams exist too
"""
for stream, since in streams.items():
if not await redis.exists(stream):
# Add a noop to ensure the stream exists
# TODO: We can now use MKSTREAM, change this logic
# Documented here: https://redis.io/topics/streams-intro
await redis.xadd(stream, fields={"": ""})
try:
# Create the group (it may already exist)
await redis.xgroup_create(stream, consumer_group, latest_id=since)
except ReplyError as e:
if "BUSYGROUP" in str(e):
# Already exists
pass
else:
raise
async def _cleanup(self, stream_names: List[str]):
"""Cleanup old consumers and groups
A group will be deleted if it contains no consumers.
A consumer will be deleted if it has been idle for more than consumer_ttl.
"""
if not self.consumer_ttl:
# Don't do the cleanup if no TTL is given, consider this to mean
# cleanup is disabled
return
with await self.connection_manager() as redis:
# For every stream key...
for stream_name in stream_names:
consumers: List[Tuple[str, str]] = []
# Get all the groups for that key...
try:
groups = await redis.xinfo_groups(stream_name)
except ReplyError as e:
if "ERR no such key" in str(e):
# Steam doesn't exist yet
groups = []
else:
raise
for group in groups:
active_consumers = 0
group_name = group[b"name"]
# Get all the consumers for that group
for consumer in await redis.xinfo_consumers(stream_name, group_name):
consumer_name = consumer[b"name"]
idle_seconds = consumer[b"idle"] / 1000
# And delete the consumer if they have not re-started
# listening for self.consumer_ttl seconds
if idle_seconds >= self.consumer_ttl:
logger.debug(
f"Cleaning up consumer {consumer_name} in group {group_name} on stream {stream_name}. "
f"The consumer has been idle for {idle_seconds} seconds, which is more than the "
f"consumer TTL of {self.consumer_ttl}"
)
await redis.xgroup_delconsumer(stream_name, group_name, consumer_name)
else:
active_consumers += 1
# If no active consumers were found for this group, then delete the entire group
# on the grounds that it is no longer used and can be cleaned up.
if not active_consumers:
# We do this atomically using a lua script. This avoids race conditions
# whereby a new consumer comes into existence the moment before we delete the group
try:
await redis.eval(
ATOMIC_DESTROY_CONSUMER_GROUP, [stream_name], [group_name]
)
except ReplyError as e:
if "NOGROUP" in str(e):
# Already deleted
pass
def _fields_to_message(
self,
fields: dict,
expected_event_names: Iterable[str],
stream: str,
native_id: str,
consumer_group: Optional[str],
) -> Optional[RedisEventMessage]:
"""Convert a dict of Redis message fields into a RedisEventMessage"""
if tuple(fields.items()) == ((b"", b""),):
# Is a noop message, ignore
return None
message = self.deserializer(
fields, stream=stream, native_id=native_id, consumer_group=consumer_group
)
want_message = ("*" in expected_event_names) or (message.event_name in expected_event_names)
if self.stream_use == StreamUse.PER_API and not want_message:
# Only care about events we are listening for. If we have one stream
# per API then we're probably going to receive some events we don't care about.
logger.debug(
f"Ignoring message for unneeded event: {message}. "
f"Only listening for {', '.join(expected_event_names)}"
)
return None
return message
def _get_stream_names(self, listen_for):
"""Convert a list of api names & event names into stream names
The format of these names will vary based on the stream_use setting.
"""
stream_names = []
for api_name, event_name in listen_for:
if self.stream_use == StreamUse.PER_EVENT:
stream_name = f"{api_name}.{event_name}:stream"
elif self.stream_use == StreamUse.PER_API:
stream_name = f"{api_name}.*:stream"
else:
raise ValueError(
"Invalid value for stream_use config option. This should have been caught "
"during config validation."
)
if stream_name not in stream_names:
stream_names.append(stream_name)
return stream_names
# See RedisEventTransport._cleanup()
ATOMIC_DESTROY_CONSUMER_GROUP = """
local stream_name = KEYS[1]
local group_name = ARGV[1]
local consumers = redis.call('xinfo', 'consumers', stream_name, group_name)
if table.getn(consumers) == 0 then
redis.call('xgroup', 'destroy', stream_name, group_name)
end
"""
| 41.719828 | 119 | 0.5431 | import asyncio
import logging
import time
from collections import OrderedDict
from datetime import datetime
from enum import Enum
from typing import (
Mapping,
Optional,
List,
Tuple,
Union,
Sequence,
AsyncGenerator,
Iterable,
TYPE_CHECKING,
)
from aioredis import ConnectionClosedError, ReplyError
from aioredis.util import decode
from lightbus.transports.base import EventTransport, EventMessage
from lightbus.log import LBullets, L, Bold
from lightbus.serializers import ByFieldMessageSerializer, ByFieldMessageDeserializer
from lightbus.transports.redis.utilities import (
RedisEventMessage,
RedisTransportMixin,
normalise_since_value,
datetime_to_redis_steam_id,
redis_stream_id_add_one,
redis_stream_id_subtract_one,
)
from lightbus.utilities.async_tools import make_exception_checker, cancel
from lightbus.utilities.frozendict import frozendict
from lightbus.utilities.human import human_time
from lightbus.utilities.importing import import_from_string
if TYPE_CHECKING:
from lightbus.config import Config
from lightbus.client import BusClient
logger = logging.getLogger("lightbus.transports.redis")
Since = Union[str, datetime, None]
class StreamUse(Enum):
PER_API = "per_api"
PER_EVENT = "per_event"
def __eq__(self, other):
if isinstance(other, str):
return self.value == other
else:
return super().__eq__(other)
class RedisEventTransport(RedisTransportMixin, EventTransport):
def __init__(
self,
redis_pool=None,
*,
service_name: str,
consumer_name: str,
url=None,
serializer=ByFieldMessageSerializer(),
deserializer=ByFieldMessageDeserializer(RedisEventMessage),
connection_parameters: Mapping = frozendict(maxsize=100),
batch_size=10,
reclaim_batch_size: int = None,
acknowledgement_timeout: float = 60,
max_stream_length: Optional[int] = 100_000,
stream_use: StreamUse = StreamUse.PER_API,
consumption_restart_delay: int = 5,
consumer_ttl: int = 2_592_000,
):
self.set_redis_pool(redis_pool, url, connection_parameters)
self.batch_size = batch_size
self.reclaim_batch_size = reclaim_batch_size if reclaim_batch_size else batch_size * 10
self.service_name = service_name
self.consumer_name = consumer_name
self.acknowledgement_timeout = acknowledgement_timeout
self.max_stream_length = max_stream_length
self.stream_use = stream_use
self.consumption_restart_delay = consumption_restart_delay
self.consumer_ttl = consumer_ttl
super().__init__(serializer=serializer, deserializer=deserializer)
@classmethod
def from_config(
cls,
config: "Config",
service_name: str = None,
consumer_name: str = None,
url: str = "redis://127.0.0.1:6379/0",
connection_parameters: Mapping = frozendict(maxsize=100),
batch_size: int = 10,
reclaim_batch_size: int = None,
serializer: str = "lightbus.serializers.ByFieldMessageSerializer",
deserializer: str = "lightbus.serializers.ByFieldMessageDeserializer",
acknowledgement_timeout: float = 60,
max_stream_length: Optional[int] = 100_000,
stream_use: StreamUse = StreamUse.PER_API,
consumption_restart_delay: int = 5,
consumer_ttl: int = 2_592_000,
):
serializer = import_from_string(serializer)()
deserializer = import_from_string(deserializer)(RedisEventMessage)
service_name = service_name or config.service_name
consumer_name = consumer_name or config.process_name
if isinstance(stream_use, str):
stream_use = StreamUse[stream_use.upper()]
return cls(
redis_pool=None,
service_name=service_name,
consumer_name=consumer_name,
url=url,
connection_parameters=connection_parameters,
batch_size=batch_size,
reclaim_batch_size=reclaim_batch_size,
serializer=serializer,
deserializer=deserializer,
acknowledgement_timeout=acknowledgement_timeout,
max_stream_length=max_stream_length or None,
stream_use=stream_use,
consumption_restart_delay=consumption_restart_delay,
consumer_ttl=consumer_ttl,
)
async def send_event(self, event_message: EventMessage, options: dict, bus_client: "BusClient"):
stream = self._get_stream_names(
listen_for=[(event_message.api_name, event_message.event_name)]
)[0]
logger.debug(
LBullets(
L(
"Enqueuing event message {} in Redis stream {}",
Bold(event_message),
Bold(stream),
),
items=dict(**event_message.get_metadata(), kwargs=event_message.get_kwargs()),
)
)
with await self.connection_manager() as redis:
start_time = time.time()
await redis.xadd(
stream=stream,
fields=self.serializer(event_message),
max_len=self.max_stream_length or None,
exact_len=False,
)
logger.debug(
L(
"Enqueued event message {} in Redis in {} stream {}",
Bold(event_message.canonical_name),
human_time(time.time() - start_time),
Bold(stream),
)
)
async def consume(
self,
listen_for: List[Tuple[str, str]],
listener_name: str,
bus_client: "BusClient",
since: Union[Since, Sequence[Since]] = "$",
forever=True,
) -> AsyncGenerator[List[RedisEventMessage], None]:
self._sanity_check_listen_for(listen_for)
consumer_group = f"{self.service_name}-{listener_name}"
if not isinstance(since, (list, tuple)):
since = [since] * len(listen_for)
since = map(normalise_since_value, since)
stream_names = self._get_stream_names(listen_for)
streams = OrderedDict(zip(stream_names, since))
expected_events = {event_name for _, event_name in listen_for}
logger.debug(
LBullets(
L(
"Consuming events as consumer {} in group {} on streams",
Bold(self.consumer_name),
Bold(consumer_group),
),
items={"{} ({})".format(*v) for v in streams.items()},
)
)
await self._cleanup(stream_names)
queue = asyncio.Queue(maxsize=1)
async def consume_loop():
while True:
try:
async for messages in self._fetch_new_messages(
streams, consumer_group, expected_events, forever
):
await queue.put(messages)
await queue.join()
except (ConnectionClosedError, ConnectionResetError):
logger.warning(
f"Redis connection lost while consuming events, reconnecting "
f"in {self.consumption_restart_delay} seconds..."
)
await asyncio.sleep(self.consumption_restart_delay)
async def reclaim_loop():
await asyncio.sleep(self.acknowledgement_timeout)
async for messages in self._reclaim_lost_messages(
stream_names, consumer_group, expected_events
):
await queue.put(messages)
await queue.join()
consume_task = None
reclaim_task = None
try:
consume_task = asyncio.ensure_future(consume_loop())
reclaim_task = asyncio.ensure_future(reclaim_loop())
consume_task.add_done_callback(make_exception_checker(bus_client))
reclaim_task.add_done_callback(make_exception_checker(bus_client))
while True:
try:
messages = await queue.get()
yield messages
queue.task_done()
except GeneratorExit:
return
finally:
await cancel(consume_task, reclaim_task)
async def _fetch_new_messages(
self, streams, consumer_group, expected_events, forever
) -> AsyncGenerator[List[EventMessage], None]:
with await self.connection_manager() as redis:
await self._create_consumer_groups(streams, redis, consumer_group)
pending_messages = await redis.xread_group(
group_name=consumer_group,
consumer_name=self.consumer_name,
streams=list(streams.keys()),
latest_ids=["0"] * len(streams),
timeout=None,
)
event_messages = []
for stream, message_id, fields in pending_messages:
message_id = decode(message_id, "utf8")
stream = decode(stream, "utf8")
event_message = self._fields_to_message(
fields,
expected_events,
stream=stream,
native_id=message_id,
consumer_group=consumer_group,
)
if not event_message:
# noop message, or message an event we don't care about
continue
logger.debug(
LBullets(
L(
"⬅ Receiving pending event {} on stream {}",
Bold(message_id),
Bold(stream),
),
items=dict(
**event_message.get_metadata(), kwargs=event_message.get_kwargs()
),
)
)
event_messages.append(event_message)
if event_messages:
yield event_messages
# Now we get on to the main loop which blocks and waits for new messages
while True:
# Fetch some messages.
# This will block until there are some messages available
stream_messages = await redis.xread_group(
group_name=consumer_group,
consumer_name=self.consumer_name,
streams=list(streams.keys()),
# Using ID '>' indicates we only want new messages which have not
# been passed to other consumers in this group
latest_ids=[">"] * len(streams),
count=self.batch_size,
)
# Handle the messages we have received
event_messages = []
for stream, message_id, fields in stream_messages:
message_id = decode(message_id, "utf8")
stream = decode(stream, "utf8")
event_message = self._fields_to_message(
fields,
expected_events,
stream=stream,
native_id=message_id,
consumer_group=consumer_group,
)
if not event_message:
# noop message, or message an event we don't care about
continue
logger.debug(
LBullets(
L(
"⬅ Received new event {} on stream {}",
Bold(message_id),
Bold(stream),
),
items=dict(
**event_message.get_metadata(), kwargs=event_message.get_kwargs()
),
)
)
event_messages.append(event_message)
if event_messages:
yield event_messages
if not forever:
return
async def _reclaim_lost_messages(
self, stream_names: List[str], consumer_group: str, expected_events: set
) -> AsyncGenerator[List[EventMessage], None]:
with await self.connection_manager() as redis:
for stream in stream_names:
old_messages = True
reclaim_from = None
while old_messages:
if not reclaim_from:
reclaim_from = "-"
else:
# so we need to add one to the reclaim_from value to ensure we don't get a message
reclaim_from = redis_stream_id_add_one(reclaim_from)
# Fetch the next batch of messages
old_messages = await redis.xpending(
stream, consumer_group, reclaim_from, "+", count=self.reclaim_batch_size
)
timeout = self.acknowledgement_timeout * 1000
event_messages = []
# Try to claim each messages
for (
message_id,
consumer_name,
ms_since_last_delivery,
num_deliveries,
) in old_messages:
message_id = decode(message_id, "utf8")
consumer_name = decode(consumer_name, "utf8")
reclaim_from = message_id
# This 'if' is not strictly required as the subsequent call to xclaim
# will honor the timeout parameter. However, using this if here allows
# for more sane logging from the point of view of the user. Without it
# we would report that we were trying to claim messages which were
# clearly not timed out yet.
if ms_since_last_delivery > timeout:
logger.info(
L(
"Found timed out event {} in stream {}. Abandoned by {}. Attempting to reclaim...",
Bold(message_id),
Bold(stream),
Bold(consumer_name),
)
)
# *Try* to claim the messages...
result = await redis.xclaim(
stream, consumer_group, self.consumer_name, int(timeout), message_id
)
# Parse each message we managed to claim
for claimed_message_id, fields in result:
claimed_message_id = decode(claimed_message_id, "utf8")
event_message = self._fields_to_message(
fields,
expected_events,
stream=stream,
native_id=claimed_message_id,
consumer_group=consumer_group,
)
if not event_message:
# noop message, or message an event we don't care about
continue
logger.debug(
LBullets(
L(
"⬅ Reclaimed timed out event {} on stream {}. Abandoned by {}.",
Bold(message_id),
Bold(stream),
Bold(consumer_name),
),
items=dict(
**event_message.get_metadata(),
kwargs=event_message.get_kwargs(),
),
)
)
event_messages.append(event_message)
if event_messages:
yield event_messages
async def acknowledge(self, *event_messages: RedisEventMessage, bus_client: "BusClient"):
with await self.connection_manager() as redis:
p = redis.pipeline()
for event_message in event_messages:
p.xack(event_message.stream, event_message.consumer_group, event_message.native_id)
logging.debug(
f"Preparing to acknowledge message {event_message.id} (Native ID: {event_message.native_id})"
)
logger.debug(
f"Batch acknowledging successful processing of {len(event_messages)} message."
)
await p.execute()
async def history(
self,
api_name,
event_name,
start: datetime = None,
stop: datetime = None,
start_inclusive: bool = True,
batch_size: int = 100,
) -> AsyncGenerator[EventMessage, None]:
redis_start = datetime_to_redis_steam_id(start) if start else "-"
redis_stop = datetime_to_redis_steam_id(stop) if stop else "+"
if start and not start_inclusive:
redis_start = redis_stream_id_add_one(redis_start)
stream_name = self._get_stream_names([(api_name, event_name)])[0]
logger.debug(
f"Getting history for stream {stream_name} from {redis_start} ({start}) "
f"to {redis_stop} ({stop}) in batches of {batch_size}"
)
with await self.connection_manager() as redis:
messages = True
while messages:
messages = await redis.xrevrange(
stream_name, redis_stop, redis_start, count=batch_size
)
if not messages:
return
for message_id, fields in messages:
message_id = decode(message_id, "utf8")
redis_stop = redis_stream_id_subtract_one(message_id)
event_message = self._fields_to_message(
fields,
expected_event_names={event_name},
stream=stream_name,
native_id=message_id,
consumer_group=None,
)
if event_message:
yield event_message
async def _create_consumer_groups(self, streams, redis, consumer_group):
for stream, since in streams.items():
if not await redis.exists(stream):
await redis.xadd(stream, fields={"": ""})
try:
await redis.xgroup_create(stream, consumer_group, latest_id=since)
except ReplyError as e:
if "BUSYGROUP" in str(e):
pass
else:
raise
async def _cleanup(self, stream_names: List[str]):
if not self.consumer_ttl:
# cleanup is disabled
return
with await self.connection_manager() as redis:
# For every stream key...
for stream_name in stream_names:
consumers: List[Tuple[str, str]] = []
# Get all the groups for that key...
try:
groups = await redis.xinfo_groups(stream_name)
except ReplyError as e:
if "ERR no such key" in str(e):
# Steam doesn't exist yet
groups = []
else:
raise
for group in groups:
active_consumers = 0
group_name = group[b"name"]
for consumer in await redis.xinfo_consumers(stream_name, group_name):
consumer_name = consumer[b"name"]
idle_seconds = consumer[b"idle"] / 1000
if idle_seconds >= self.consumer_ttl:
logger.debug(
f"Cleaning up consumer {consumer_name} in group {group_name} on stream {stream_name}. "
f"The consumer has been idle for {idle_seconds} seconds, which is more than the "
f"consumer TTL of {self.consumer_ttl}"
)
await redis.xgroup_delconsumer(stream_name, group_name, consumer_name)
else:
active_consumers += 1
if not active_consumers:
try:
await redis.eval(
ATOMIC_DESTROY_CONSUMER_GROUP, [stream_name], [group_name]
)
except ReplyError as e:
if "NOGROUP" in str(e):
pass
def _fields_to_message(
self,
fields: dict,
expected_event_names: Iterable[str],
stream: str,
native_id: str,
consumer_group: Optional[str],
) -> Optional[RedisEventMessage]:
if tuple(fields.items()) == ((b"", b""),):
return None
message = self.deserializer(
fields, stream=stream, native_id=native_id, consumer_group=consumer_group
)
want_message = ("*" in expected_event_names) or (message.event_name in expected_event_names)
if self.stream_use == StreamUse.PER_API and not want_message:
logger.debug(
f"Ignoring message for unneeded event: {message}. "
f"Only listening for {', '.join(expected_event_names)}"
)
return None
return message
def _get_stream_names(self, listen_for):
stream_names = []
for api_name, event_name in listen_for:
if self.stream_use == StreamUse.PER_EVENT:
stream_name = f"{api_name}.{event_name}:stream"
elif self.stream_use == StreamUse.PER_API:
stream_name = f"{api_name}.*:stream"
else:
raise ValueError(
"Invalid value for stream_use config option. This should have been caught "
"during config validation."
)
if stream_name not in stream_names:
stream_names.append(stream_name)
return stream_names
ATOMIC_DESTROY_CONSUMER_GROUP = """
local stream_name = KEYS[1]
local group_name = ARGV[1]
local consumers = redis.call('xinfo', 'consumers', stream_name, group_name)
if table.getn(consumers) == 0 then
redis.call('xgroup', 'destroy', stream_name, group_name)
end
"""
| true | true |
1c2fdb0210a8225d09724a1dc46d1be23dc02305 | 1,206 | py | Python | twkit/__init__.py | evaperon/twAwler | 8e9f2064cad846177ed6547b9f56f053226a2d5e | [
"Apache-2.0"
] | 5 | 2018-12-06T16:14:14.000Z | 2020-05-22T07:36:45.000Z | twkit/__init__.py | evaperon/twAwler | 8e9f2064cad846177ed6547b9f56f053226a2d5e | [
"Apache-2.0"
] | null | null | null | twkit/__init__.py | evaperon/twAwler | 8e9f2064cad846177ed6547b9f56f053226a2d5e | [
"Apache-2.0"
] | 3 | 2020-04-20T07:20:18.000Z | 2021-08-19T17:31:38.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
###########################################
# (c) 2016-2020 Polyvios Pratikakis
# polyvios@ics.forth.gr
###########################################
"""A library for crawling twitter and analyzing crawled tweets and relations.
By Polyvios Pratikakis <polyvios@ics.forth.gr>.
For support, use the github repository contact methods
(https://www.github.com/polyvios/twAwler).
Currently extracts 6 kinds of relations:
* follow: unweighted, directed graph
* favorite: weighted, directed graph
* reply: weighted, directed graph
* retweet: weighted, directed graph
* quote: weighted, directed graph
* listsim: weighted, undirected graph
* avatar: undirected graph
Currently extracts around 2000 features per user.
"""
__author__ = 'Polyvios Pratikakis'
__email__ = 'polyvios@ics.forth.gr'
__copyright__ = '''
Copyright (c) 2016-present Polyvios Pratikakis, FORTH. All rights reserved.'''
__license__ = 'Apache License 2.0'
__version__ = '0.0.3'
__url__ = 'https://github.com/polyvios/twAwler'
__description__ = 'A Twitter API crawler and feature extraction library'
from twkit.utils import init_state, verbose
| 32.594595 | 78 | 0.677446 | true | true | |
1c2fdbc47064bff8c963b841458330ffba157b64 | 273 | py | Python | topCoder/srms/200s/srm209/div2/moving_averages.py | gauravsingh58/algo | 397859a53429e7a585e5f6964ad24146c6261326 | [
"WTFPL"
] | 1 | 2020-09-30T19:53:08.000Z | 2020-09-30T19:53:08.000Z | topCoder/srms/200s/srm209/div2/moving_averages.py | gauravsingh58/algo | 397859a53429e7a585e5f6964ad24146c6261326 | [
"WTFPL"
] | null | null | null | topCoder/srms/200s/srm209/div2/moving_averages.py | gauravsingh58/algo | 397859a53429e7a585e5f6964ad24146c6261326 | [
"WTFPL"
] | 1 | 2020-10-15T09:10:57.000Z | 2020-10-15T09:10:57.000Z | class MovingAverages:
def calculate(self, times, n):
def as_second(s):
h, m, s = map(int, s.split(':'))
return h * 3600 + m * 60 + s
s = map(as_second, times)
return map(lambda i: sum(s[i:i+n])/n, xrange(len(times)-n+1))
| 34.125 | 69 | 0.520147 | class MovingAverages:
def calculate(self, times, n):
def as_second(s):
h, m, s = map(int, s.split(':'))
return h * 3600 + m * 60 + s
s = map(as_second, times)
return map(lambda i: sum(s[i:i+n])/n, xrange(len(times)-n+1))
| true | true |
1c2fdd77a2b8772f3f77fe224a8336b4a07e1707 | 7,571 | py | Python | tests_unit/test__init__.py | daltonmatos/BarterDude | 9f7eb049711d688d61061036e886c33d855e563a | [
"Apache-2.0"
] | 12 | 2020-02-14T20:30:38.000Z | 2022-03-08T17:53:55.000Z | tests_unit/test__init__.py | daltonmatos/BarterDude | 9f7eb049711d688d61061036e886c33d855e563a | [
"Apache-2.0"
] | 11 | 2020-02-29T15:06:25.000Z | 2021-05-03T15:23:12.000Z | tests_unit/test__init__.py | daltonmatos/BarterDude | 9f7eb049711d688d61061036e886c33d855e563a | [
"Apache-2.0"
] | 3 | 2020-02-28T20:43:11.000Z | 2022-02-07T21:56:34.000Z | from asynctest import Mock, TestCase, CoroutineMock, patch, call
from asyncworker import Options, RouteTypes
from barterdude import BarterDude
from barterdude.message import Message
from tests_unit.helpers import load_fixture
class TestBarterDude(TestCase):
@patch("barterdude.App")
@patch("barterdude.AMQPConnection")
def setUp(self, AMQPConnection, App):
self.monitor = Mock()
self.monitor.dispatch_before_consume = CoroutineMock()
self.monitor.dispatch_on_success = CoroutineMock()
self.monitor.dispatch_on_fail = CoroutineMock()
self.callback = CoroutineMock()
self.messages = [Mock(value=i) for i in range(10)]
self.calls = [call(message) for message in self.messages]
self.AMQPConnection = AMQPConnection
self.connection = self.AMQPConnection.return_value
self.App = App
self.app = self.App.return_value
self.app.startup = CoroutineMock()
self.app.shutdown = CoroutineMock()
self.decorator = self.app.route.return_value
self.schema = load_fixture("schema.json")
self.barterdude = BarterDude()
def test_should_create_connection(self):
self.AMQPConnection.assert_called_once_with( # nosec
hostname="127.0.0.1",
username="guest",
password="guest",
prefetch=10,
name="default",
)
self.App.assert_called_once_with(connections=[self.connection])
def test_should_call_route_when_created(self):
monitor = Mock()
self.barterdude.consume_amqp(
["queue"], monitor=monitor
)(CoroutineMock())
self.app.route.assert_called_once_with(
["queue"],
type=RouteTypes.AMQP_RABBITMQ,
options={
Options.BULK_SIZE: 10,
Options.BULK_FLUSH_INTERVAL: 60,
Options.CONNECTION_FAIL_CALLBACK:
monitor.dispatch_on_connection_fail,
}
)
def test_should_call_route_when_adding_endpoint(self):
hook = Mock()
self.barterdude.add_endpoint(['/my_route'], ['GET'], hook)
self.app.route.assert_called_once_with(
routes=['/my_route'],
methods=['GET'],
type=RouteTypes.HTTP
)
self.decorator.assert_called_once_with(hook)
async def test_should_call_callback_for_each_message(self):
self.barterdude.consume_amqp(["queue"], self.monitor)(self.callback)
self.decorator.assert_called_once()
wrapper = self.decorator.call_args[0][0]
await wrapper(self.messages)
messages = []
for message in self.callback.mock_calls:
self.assertEqual(Message, type(message[1][0]))
messages.append(message[1][0]._message)
self.assertListEqual(
sorted(messages, key=lambda x: x.value),
sorted(self.messages, key=lambda x: x.value))
async def test_should_call_reject_when_callback_fail(self):
self.callback.side_effect = Exception('Boom!')
self.barterdude.consume_amqp(["queue"], self.monitor)(self.callback)
wrapper = self.decorator.call_args[0][0]
await wrapper(self.messages)
for message in self.messages:
message.reject.assert_called_once()
async def test_should_call_monitor_for_each_success_message(self):
self.barterdude.consume_amqp(["queue"], self.monitor)(self.callback)
wrapper = self.decorator.call_args[0][0]
await wrapper(self.messages)
self.monitor.dispatch_before_consume.assert_has_calls(
self.calls, any_order=True)
self.monitor.dispatch_on_success.assert_has_calls(
self.calls, any_order=True)
self.monitor.dispatch_on_fail.assert_not_called()
async def test_should_call_callback_for_valid_message(self):
self.barterdude.consume_amqp(
["queue"], self.monitor, validation_schema=self.schema
)(self.callback)
self.decorator.assert_called_once()
wrapper = self.decorator.call_args[0][0]
message = Mock(Message)
message.body = {"key": 'ok'}
await wrapper([message])
self.callback.assert_called_once()
self.assertEqual(
self.callback.await_args[0][0].body["key"],
message.body["key"]
)
async def test_should_not_call_callback_for_valid_message(self):
self.barterdude.consume_amqp(
["queue"], self.monitor, validation_schema=self.schema
)(self.callback)
self.decorator.assert_called_once()
wrapper = self.decorator.call_args[0][0]
message = Mock(Message)
message.body = {"wrong": 'ok'}
await wrapper([message])
self.callback.assert_not_called()
async def test_should_call_monitor_for_each_fail_message(self):
error = Exception('Boom!')
self.callback.side_effect = error
self.barterdude.consume_amqp(["queue"], self.monitor)(self.callback)
wrapper = self.decorator.call_args[0][0]
await wrapper(self.messages)
self.monitor.dispatch_before_consume.assert_has_calls(
self.calls, any_order=True)
error_calls = [call(message, error) for message in self.messages]
self.monitor.dispatch_on_fail.assert_has_calls(
error_calls, any_order=True)
self.monitor.dispatch_on_success.assert_not_called()
async def test_should_call_put_when_publish(self):
data = Mock()
self.connection.put = CoroutineMock()
await self.barterdude.publish_amqp(
'exchange',
data,
vhost="vhost",
routing_key="routing_key"
)
self.connection.put.assert_called_once_with(
exchange='exchange',
data=data,
vhost="vhost",
routing_key="routing_key",
properties=None
)
async def test_should_call_startup_and_shutdown(self):
await self.barterdude.startup()
self.app.startup.assert_called_once_with()
await self.barterdude.shutdown()
self.app.shutdown.assert_called_once_with()
def test_should_call_run(self):
self.barterdude.run()
self.app.run.assert_called_once_with()
class TestAppSharedProperties(TestCase):
def setUp(self):
self.barterdude = BarterDude()
def test_setitem_changes_state(self):
self.barterdude["foo"] = foo = Mock()
self.assertEqual(foo, self.barterdude["foo"])
async def test_getitem_returns_internal_state_value(self):
self.barterdude["foo"] = "bar"
self.assertEqual("bar", self.barterdude["foo"])
def test_delitem_changes_state(self):
self.barterdude["foo"] = foo = Mock()
self.assertEqual(foo, self.barterdude["foo"])
del self.barterdude["foo"]
with self.assertRaises(KeyError):
self.assertIsNone(self.barterdude["foo"])
def test_len_returns_state_len(self):
test_data = {"foo": 1, "bar": 2}
for k, v in test_data.items():
self.barterdude[k] = v
self.assertEqual(
len(self.barterdude),
len(dict(self.barterdude._BarterDude__app))
)
async def test_iter_iters_through_internal_state_value(self):
test_data = {"foo": 1, "bar": 2}
for k, v in test_data.items():
self.barterdude[k] = v
state = dict(**self.barterdude)
self.assertDictContainsSubset(test_data, state)
| 37.666667 | 76 | 0.645093 | from asynctest import Mock, TestCase, CoroutineMock, patch, call
from asyncworker import Options, RouteTypes
from barterdude import BarterDude
from barterdude.message import Message
from tests_unit.helpers import load_fixture
class TestBarterDude(TestCase):
@patch("barterdude.App")
@patch("barterdude.AMQPConnection")
def setUp(self, AMQPConnection, App):
self.monitor = Mock()
self.monitor.dispatch_before_consume = CoroutineMock()
self.monitor.dispatch_on_success = CoroutineMock()
self.monitor.dispatch_on_fail = CoroutineMock()
self.callback = CoroutineMock()
self.messages = [Mock(value=i) for i in range(10)]
self.calls = [call(message) for message in self.messages]
self.AMQPConnection = AMQPConnection
self.connection = self.AMQPConnection.return_value
self.App = App
self.app = self.App.return_value
self.app.startup = CoroutineMock()
self.app.shutdown = CoroutineMock()
self.decorator = self.app.route.return_value
self.schema = load_fixture("schema.json")
self.barterdude = BarterDude()
def test_should_create_connection(self):
self.AMQPConnection.assert_called_once_with(
hostname="127.0.0.1",
username="guest",
password="guest",
prefetch=10,
name="default",
)
self.App.assert_called_once_with(connections=[self.connection])
def test_should_call_route_when_created(self):
monitor = Mock()
self.barterdude.consume_amqp(
["queue"], monitor=monitor
)(CoroutineMock())
self.app.route.assert_called_once_with(
["queue"],
type=RouteTypes.AMQP_RABBITMQ,
options={
Options.BULK_SIZE: 10,
Options.BULK_FLUSH_INTERVAL: 60,
Options.CONNECTION_FAIL_CALLBACK:
monitor.dispatch_on_connection_fail,
}
)
def test_should_call_route_when_adding_endpoint(self):
hook = Mock()
self.barterdude.add_endpoint(['/my_route'], ['GET'], hook)
self.app.route.assert_called_once_with(
routes=['/my_route'],
methods=['GET'],
type=RouteTypes.HTTP
)
self.decorator.assert_called_once_with(hook)
async def test_should_call_callback_for_each_message(self):
self.barterdude.consume_amqp(["queue"], self.monitor)(self.callback)
self.decorator.assert_called_once()
wrapper = self.decorator.call_args[0][0]
await wrapper(self.messages)
messages = []
for message in self.callback.mock_calls:
self.assertEqual(Message, type(message[1][0]))
messages.append(message[1][0]._message)
self.assertListEqual(
sorted(messages, key=lambda x: x.value),
sorted(self.messages, key=lambda x: x.value))
async def test_should_call_reject_when_callback_fail(self):
self.callback.side_effect = Exception('Boom!')
self.barterdude.consume_amqp(["queue"], self.monitor)(self.callback)
wrapper = self.decorator.call_args[0][0]
await wrapper(self.messages)
for message in self.messages:
message.reject.assert_called_once()
async def test_should_call_monitor_for_each_success_message(self):
self.barterdude.consume_amqp(["queue"], self.monitor)(self.callback)
wrapper = self.decorator.call_args[0][0]
await wrapper(self.messages)
self.monitor.dispatch_before_consume.assert_has_calls(
self.calls, any_order=True)
self.monitor.dispatch_on_success.assert_has_calls(
self.calls, any_order=True)
self.monitor.dispatch_on_fail.assert_not_called()
async def test_should_call_callback_for_valid_message(self):
self.barterdude.consume_amqp(
["queue"], self.monitor, validation_schema=self.schema
)(self.callback)
self.decorator.assert_called_once()
wrapper = self.decorator.call_args[0][0]
message = Mock(Message)
message.body = {"key": 'ok'}
await wrapper([message])
self.callback.assert_called_once()
self.assertEqual(
self.callback.await_args[0][0].body["key"],
message.body["key"]
)
async def test_should_not_call_callback_for_valid_message(self):
self.barterdude.consume_amqp(
["queue"], self.monitor, validation_schema=self.schema
)(self.callback)
self.decorator.assert_called_once()
wrapper = self.decorator.call_args[0][0]
message = Mock(Message)
message.body = {"wrong": 'ok'}
await wrapper([message])
self.callback.assert_not_called()
async def test_should_call_monitor_for_each_fail_message(self):
error = Exception('Boom!')
self.callback.side_effect = error
self.barterdude.consume_amqp(["queue"], self.monitor)(self.callback)
wrapper = self.decorator.call_args[0][0]
await wrapper(self.messages)
self.monitor.dispatch_before_consume.assert_has_calls(
self.calls, any_order=True)
error_calls = [call(message, error) for message in self.messages]
self.monitor.dispatch_on_fail.assert_has_calls(
error_calls, any_order=True)
self.monitor.dispatch_on_success.assert_not_called()
async def test_should_call_put_when_publish(self):
data = Mock()
self.connection.put = CoroutineMock()
await self.barterdude.publish_amqp(
'exchange',
data,
vhost="vhost",
routing_key="routing_key"
)
self.connection.put.assert_called_once_with(
exchange='exchange',
data=data,
vhost="vhost",
routing_key="routing_key",
properties=None
)
async def test_should_call_startup_and_shutdown(self):
await self.barterdude.startup()
self.app.startup.assert_called_once_with()
await self.barterdude.shutdown()
self.app.shutdown.assert_called_once_with()
def test_should_call_run(self):
self.barterdude.run()
self.app.run.assert_called_once_with()
class TestAppSharedProperties(TestCase):
def setUp(self):
self.barterdude = BarterDude()
def test_setitem_changes_state(self):
self.barterdude["foo"] = foo = Mock()
self.assertEqual(foo, self.barterdude["foo"])
async def test_getitem_returns_internal_state_value(self):
self.barterdude["foo"] = "bar"
self.assertEqual("bar", self.barterdude["foo"])
def test_delitem_changes_state(self):
self.barterdude["foo"] = foo = Mock()
self.assertEqual(foo, self.barterdude["foo"])
del self.barterdude["foo"]
with self.assertRaises(KeyError):
self.assertIsNone(self.barterdude["foo"])
def test_len_returns_state_len(self):
test_data = {"foo": 1, "bar": 2}
for k, v in test_data.items():
self.barterdude[k] = v
self.assertEqual(
len(self.barterdude),
len(dict(self.barterdude._BarterDude__app))
)
async def test_iter_iters_through_internal_state_value(self):
test_data = {"foo": 1, "bar": 2}
for k, v in test_data.items():
self.barterdude[k] = v
state = dict(**self.barterdude)
self.assertDictContainsSubset(test_data, state)
| true | true |
1c2fddcbe5eda8f0b8a7640f59c56bbb9a809a56 | 5,745 | py | Python | RTO_comp/RTO_Bayes_runs.py | OptiMaL-PSE-Lab/Expensive-Black-Box-Optim-ChemEng | 19c34dcff8c983926df501b93152fa3b3b0305d6 | [
"MIT"
] | null | null | null | RTO_comp/RTO_Bayes_runs.py | OptiMaL-PSE-Lab/Expensive-Black-Box-Optim-ChemEng | 19c34dcff8c983926df501b93152fa3b3b0305d6 | [
"MIT"
] | null | null | null | RTO_comp/RTO_Bayes_runs.py | OptiMaL-PSE-Lab/Expensive-Black-Box-Optim-ChemEng | 19c34dcff8c983926df501b93152fa3b3b0305d6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 2 15:49:18 2021
@author: dv516
"""
from algorithms.Bayesian_opt_Pyro.utilities_full import BayesOpt
from case_studies.RTO.systems import *
import numpy as np
import pickle
import pyro
pyro.enable_validation(True) # can help with debugging
def RTO(x):
# x = extract_FT(x)
plant = WO_system()
f = plant.WO_obj_sys_ca_noise_less
g1 = plant.WO_con1_sys_ca_noise_less
g2 = plant.WO_con2_sys_ca_noise_less
return f(x), [g1(x), g2(x)]
def RTO_rand(x):
# x = extract_FT(x)
plant = WO_system()
f = plant.WO_obj_sys_ca
g1 = plant.WO_con1_sys_ca
g2 = plant.WO_con2_sys_ca
return f(x), [g1(x), g2(x)]
def RTO_SAA(x):
# x = extract_FT(x)
N_SAA = 5
plant = WO_system()
f = plant.WO_obj_sys_ca
g1 = plant.WO_con1_sys_ca
g2 = plant.WO_con2_sys_ca
f_SAA = 0
g1_SAA, g2_SAA = - np.inf, - np.inf
for i in range(N_SAA):
f_SAA += f(x)/N_SAA
g1_SAA = max(g1_SAA, g1(x))
g2_SAA = max(g2_SAA, g2(x))
return f_SAA, [g1_SAA, g2_SAA]
def RTO_Noise(x, noise, N_SAA):
plant = WO_system()
f = plant.WO_obj_sys_ca_noise_less
g1 = plant.WO_con1_sys_ca_noise_less
g2 = plant.WO_con2_sys_ca_noise_less
f_SAA = 0
g1_SAA, g2_SAA = - np.inf, - np.inf
for i in range(N_SAA):
f_SAA += (f(x) + 5e-1 * np.random.normal(0., noise))/N_SAA
g1_SAA = max(g1_SAA, g1(x) + 5e-4 * np.random.normal(0., noise))
g2_SAA = max(g2_SAA, g2(x) + 5e-4 * np.random.normal(0., noise))
return f_SAA, [g1_SAA, g2_SAA]
x0 = [6.9, 83]
bounds = np.array([[4., 7.], [70., 100.]])
# max_f_eval = 100
# max_it = 50
nbr_feval = 30
N = 10
RTO_Bayes_list = []
for i in range(N):
Bayes = BayesOpt()
pyro.set_rng_seed(i)
RTO_Bayes = Bayes.solve(RTO, x0, acquisition='EI',bounds=bounds.T, \
print_iteration = True, constraints=2, casadi=True, \
maxfun = nbr_feval, ).output_dict
RTO_Bayes_list.append(RTO_Bayes)
print('10 BayesOpt deterministic iterations completed')
with open('BayesRTO_list.pickle', 'wb') as handle:
pickle.dump(RTO_Bayes_list, handle, protocol=pickle.HIGHEST_PROTOCOL)
N = 10
RTORand_Bayes_list = []
for i in range(N):
Bayes = BayesOpt()
pyro.set_rng_seed(i)
RTORand_Bayes = Bayes.solve(RTO_rand, x0, acquisition='EI',bounds=bounds.T, \
print_iteration = True, constraints=2, casadi=True, \
maxfun = nbr_feval, ).output_dict
RTORand_Bayes_list.append(RTORand_Bayes)
print('10 BayesOpt random iterations completed')
with open('BayesRTO_listRand.pickle', 'wb') as handle:
pickle.dump(RTORand_Bayes_list, handle, protocol=pickle.HIGHEST_PROTOCOL)
N = 10
RTOSAA_Bayes_list = []
for i in range(N):
Bayes = BayesOpt()
pyro.set_rng_seed(i)
RTOSAA_Bayes = Bayes.solve(RTO_SAA, x0, acquisition='EI',bounds=bounds.T, \
print_iteration = True, constraints=2, casadi=True, \
maxfun = nbr_feval, ).output_dict
RTOSAA_Bayes_list.append(RTOSAA_Bayes)
print('10 BayesOpt SAA iterations completed')
with open('BayesRTO_listRandSAA.pickle', 'wb') as handle:
pickle.dump(RTOSAA_Bayes_list, handle, protocol=pickle.HIGHEST_PROTOCOL)
n_noise = 6
noise_mat = np.zeros(n_noise)
for i in range(n_noise):
noise_mat[i] = 1/3*i
x0 = [6.9, 83]
bounds = np.array([[4., 7.], [70., 100.]])
max_f_eval = 50 ; N_SAA = 1
N_SAA = 1
N_samples = 20
RTOnoise_list_Bayes = []
RTOconstraint_list_Bayes = []
for i in range(n_noise):
print('Outer Iteration ', i+1, ' out of ', n_noise,' of BayesOpt')
best = []
best_constr = []
Bayes = BayesOpt()
f = lambda x: RTO_Noise(x, noise_mat[i], N_SAA)
for j in range(N_samples):
sol = Bayes.solve(f, x0, acquisition='EI',bounds=bounds.T, \
print_iteration = True, constraints=2, casadi=True, \
maxfun = nbr_feval, ).output_dict
best.append(sol['f_best_so_far'][-1])
_, g = RTO_Noise(sol['x_best_so_far'][-1], 0, N_SAA)
best_constr.append(np.sum(np.maximum(g, 0)))
RTOnoise_list_Bayes.append(best)
RTOconstraint_list_Bayes.append(best_constr)
with open('BayesRTO_listNoiseConv.pickle', 'wb') as handle:
pickle.dump(RTOnoise_list_Bayes, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('BayesRTO_listNoiseConstr.pickle', 'wb') as handle:
pickle.dump(RTOconstraint_list_Bayes, handle, protocol=pickle.HIGHEST_PROTOCOL)
nbr_feval = 25
N_SAA = 2
N_samples = 20
RTOnoiseSAA_list_Bayes = []
RTOconstraintSAA_list_Bayes = []
for i in range(n_noise):
print('Outer Iteration ', i+1, ' out of ', n_noise,' of BayesOpt')
best = []
best_constr = []
for j in range(N_samples):
f = lambda x: RTO_Noise(x, noise_mat[i], N_SAA)
sol = Bayes.solve(f, x0, acquisition='EI',bounds=bounds.T, \
print_iteration = True, constraints=2, casadi=True, \
maxfun = nbr_feval, ).output_dict
best.append(sol['f_best_so_far'][-1])
_, g = RTO_Noise(sol['x_best_so_far'][-1], 0, N_SAA)
best_constr.append(np.sum(np.maximum(g, 0)))
RTOnoiseSAA_list_Bayes.append(best)
RTOconstraintSAA_list_Bayes.append(best_constr)
with open('BayesRTO_listNoiseConvSAA.pickle', 'wb') as handle:
pickle.dump(RTOnoiseSAA_list_Bayes, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('BayesRTO_listNoiseConstrSAA.pickle', 'wb') as handle:
pickle.dump(RTOconstraintSAA_list_Bayes, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 29.921875 | 86 | 0.641775 |
from algorithms.Bayesian_opt_Pyro.utilities_full import BayesOpt
from case_studies.RTO.systems import *
import numpy as np
import pickle
import pyro
pyro.enable_validation(True)
def RTO(x):
plant = WO_system()
f = plant.WO_obj_sys_ca_noise_less
g1 = plant.WO_con1_sys_ca_noise_less
g2 = plant.WO_con2_sys_ca_noise_less
return f(x), [g1(x), g2(x)]
def RTO_rand(x):
plant = WO_system()
f = plant.WO_obj_sys_ca
g1 = plant.WO_con1_sys_ca
g2 = plant.WO_con2_sys_ca
return f(x), [g1(x), g2(x)]
def RTO_SAA(x):
N_SAA = 5
plant = WO_system()
f = plant.WO_obj_sys_ca
g1 = plant.WO_con1_sys_ca
g2 = plant.WO_con2_sys_ca
f_SAA = 0
g1_SAA, g2_SAA = - np.inf, - np.inf
for i in range(N_SAA):
f_SAA += f(x)/N_SAA
g1_SAA = max(g1_SAA, g1(x))
g2_SAA = max(g2_SAA, g2(x))
return f_SAA, [g1_SAA, g2_SAA]
def RTO_Noise(x, noise, N_SAA):
plant = WO_system()
f = plant.WO_obj_sys_ca_noise_less
g1 = plant.WO_con1_sys_ca_noise_less
g2 = plant.WO_con2_sys_ca_noise_less
f_SAA = 0
g1_SAA, g2_SAA = - np.inf, - np.inf
for i in range(N_SAA):
f_SAA += (f(x) + 5e-1 * np.random.normal(0., noise))/N_SAA
g1_SAA = max(g1_SAA, g1(x) + 5e-4 * np.random.normal(0., noise))
g2_SAA = max(g2_SAA, g2(x) + 5e-4 * np.random.normal(0., noise))
return f_SAA, [g1_SAA, g2_SAA]
x0 = [6.9, 83]
bounds = np.array([[4., 7.], [70., 100.]])
nbr_feval = 30
N = 10
RTO_Bayes_list = []
for i in range(N):
Bayes = BayesOpt()
pyro.set_rng_seed(i)
RTO_Bayes = Bayes.solve(RTO, x0, acquisition='EI',bounds=bounds.T, \
print_iteration = True, constraints=2, casadi=True, \
maxfun = nbr_feval, ).output_dict
RTO_Bayes_list.append(RTO_Bayes)
print('10 BayesOpt deterministic iterations completed')
with open('BayesRTO_list.pickle', 'wb') as handle:
pickle.dump(RTO_Bayes_list, handle, protocol=pickle.HIGHEST_PROTOCOL)
N = 10
RTORand_Bayes_list = []
for i in range(N):
Bayes = BayesOpt()
pyro.set_rng_seed(i)
RTORand_Bayes = Bayes.solve(RTO_rand, x0, acquisition='EI',bounds=bounds.T, \
print_iteration = True, constraints=2, casadi=True, \
maxfun = nbr_feval, ).output_dict
RTORand_Bayes_list.append(RTORand_Bayes)
print('10 BayesOpt random iterations completed')
with open('BayesRTO_listRand.pickle', 'wb') as handle:
pickle.dump(RTORand_Bayes_list, handle, protocol=pickle.HIGHEST_PROTOCOL)
N = 10
RTOSAA_Bayes_list = []
for i in range(N):
Bayes = BayesOpt()
pyro.set_rng_seed(i)
RTOSAA_Bayes = Bayes.solve(RTO_SAA, x0, acquisition='EI',bounds=bounds.T, \
print_iteration = True, constraints=2, casadi=True, \
maxfun = nbr_feval, ).output_dict
RTOSAA_Bayes_list.append(RTOSAA_Bayes)
print('10 BayesOpt SAA iterations completed')
with open('BayesRTO_listRandSAA.pickle', 'wb') as handle:
pickle.dump(RTOSAA_Bayes_list, handle, protocol=pickle.HIGHEST_PROTOCOL)
n_noise = 6
noise_mat = np.zeros(n_noise)
for i in range(n_noise):
noise_mat[i] = 1/3*i
x0 = [6.9, 83]
bounds = np.array([[4., 7.], [70., 100.]])
max_f_eval = 50 ; N_SAA = 1
N_SAA = 1
N_samples = 20
RTOnoise_list_Bayes = []
RTOconstraint_list_Bayes = []
for i in range(n_noise):
print('Outer Iteration ', i+1, ' out of ', n_noise,' of BayesOpt')
best = []
best_constr = []
Bayes = BayesOpt()
f = lambda x: RTO_Noise(x, noise_mat[i], N_SAA)
for j in range(N_samples):
sol = Bayes.solve(f, x0, acquisition='EI',bounds=bounds.T, \
print_iteration = True, constraints=2, casadi=True, \
maxfun = nbr_feval, ).output_dict
best.append(sol['f_best_so_far'][-1])
_, g = RTO_Noise(sol['x_best_so_far'][-1], 0, N_SAA)
best_constr.append(np.sum(np.maximum(g, 0)))
RTOnoise_list_Bayes.append(best)
RTOconstraint_list_Bayes.append(best_constr)
with open('BayesRTO_listNoiseConv.pickle', 'wb') as handle:
pickle.dump(RTOnoise_list_Bayes, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('BayesRTO_listNoiseConstr.pickle', 'wb') as handle:
pickle.dump(RTOconstraint_list_Bayes, handle, protocol=pickle.HIGHEST_PROTOCOL)
nbr_feval = 25
N_SAA = 2
N_samples = 20
RTOnoiseSAA_list_Bayes = []
RTOconstraintSAA_list_Bayes = []
for i in range(n_noise):
print('Outer Iteration ', i+1, ' out of ', n_noise,' of BayesOpt')
best = []
best_constr = []
for j in range(N_samples):
f = lambda x: RTO_Noise(x, noise_mat[i], N_SAA)
sol = Bayes.solve(f, x0, acquisition='EI',bounds=bounds.T, \
print_iteration = True, constraints=2, casadi=True, \
maxfun = nbr_feval, ).output_dict
best.append(sol['f_best_so_far'][-1])
_, g = RTO_Noise(sol['x_best_so_far'][-1], 0, N_SAA)
best_constr.append(np.sum(np.maximum(g, 0)))
RTOnoiseSAA_list_Bayes.append(best)
RTOconstraintSAA_list_Bayes.append(best_constr)
with open('BayesRTO_listNoiseConvSAA.pickle', 'wb') as handle:
pickle.dump(RTOnoiseSAA_list_Bayes, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('BayesRTO_listNoiseConstrSAA.pickle', 'wb') as handle:
pickle.dump(RTOconstraintSAA_list_Bayes, handle, protocol=pickle.HIGHEST_PROTOCOL)
| true | true |
1c2fdebdb580e69cba2adc7acdd3c5b2ac28b500 | 12,914 | py | Python | polus-image-assembler-plugin/src/main.py | blowekamp/polus-plugins | 87f9c36647b4cf95cf107cfede3a5a1d749415a5 | [
"MIT"
] | null | null | null | polus-image-assembler-plugin/src/main.py | blowekamp/polus-plugins | 87f9c36647b4cf95cf107cfede3a5a1d749415a5 | [
"MIT"
] | null | null | null | polus-image-assembler-plugin/src/main.py | blowekamp/polus-plugins | 87f9c36647b4cf95cf107cfede3a5a1d749415a5 | [
"MIT"
] | null | null | null | import argparse, logging, multiprocessing, re
from bfio import BioReader,BioWriter
import numpy as np
from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
STITCH_VARS = ['file','correlation','posX','posY','gridX','gridY'] # image stitching values
STITCH_LINE = "file: {}; corr: {}; position: ({}, {}); grid: ({}, {});\n"
def buffer_image(image_path,supertile_buffer,Xi,Yi,Xt,Yt):
"""buffer_image Load and image and store in buffer
This method loads an image and stores it in the appropriate
position based on the stitching vector coordinates within
a large tile of the output image. It is intended to be
used as a thread to increase the reading component to
assembling the image.
Args:
image_path ([str]): Path to image to load
supertile_buffer ([np.ndarray]): A supertile storing multiple images
Xi ([list]): Xmin and Xmax of pixels to load from the image
Yi ([list]): Ymin and Ymax of pixels to load from the image
Xt ([list]): X position within the buffer to store the image
Yt ([list]): Y position within the buffer to store the image
"""
# Load the image
br = BioReader(image_path,max_workers=2)
image = br.read_image(X=Xi,Y=Yi) # only get the first z,c,t layer
# Put the image in the buffer
supertile_buffer[Yt[0]:Yt[1],Xt[0]:Xt[1],...] = image
def make_tile(x_min,x_max,y_min,y_max,stitchPath):
"""make_tile Create a supertile
This method identifies images that have stitching vector positions
within the bounds of the supertile defined by the x and y input
arguments. It then spawns threads to load images and store in the
supertile buffer. Finally it returns the assembled supertile to
allow the main thread to generate the write thread.
Args:
x_min ([int]): Minimum x bound of the tile
x_max ([int]): Maximum x bound of the tile
y_min ([int]): Minimum y bound of the tile
y_max ([int]): Maximum y bound of the tile
stitchPath ([str]): Path to the stitching vector
Returns:
[type]: [description]
"""
# Parse the stitching vector
outvals = _parse_stitch(stitchPath,imgPath,True)
# Get the data type
br = BioReader(str(Path(imgPath).joinpath(outvals['filePos'][0]['file'])))
dtype = br._pix['type']
# initialize the supertile
template = np.zeros((y_max-y_min,x_max-x_min,1,1,1),dtype=dtype)
# get images in bounds of current super tile
with ThreadPoolExecutor(max([multiprocessing.cpu_count(),2])) as executor:
for f in outvals['filePos']:
if (f['posX'] >= x_min and f['posX'] <= x_max) or (f['posX']+f['width'] >= x_min and f['posX']+f['width'] <= x_max):
if (f['posY'] >= y_min and f['posY'] <= y_max) or (f['posY']+f['height'] >= y_min and f['posY']+f['height'] <= y_max):
# get bounds of image within the tile
Xt = [max(0,f['posX']-x_min)]
Xt.append(min(x_max-x_min,f['posX']+f['width']-x_min))
Yt = [max(0,f['posY']-y_min)]
Yt.append(min(y_max-y_min,f['posY']+f['height']-y_min))
# get bounds of image within the image
Xi = [max(0,x_min - f['posX'])]
Xi.append(min(f['width'],x_max - f['posX']))
Yi = [max(0,y_min - f['posY'])]
Yi.append(min(f['height'],y_max - f['posY']))
executor.submit(buffer_image,str(Path(imgPath).joinpath(f['file'])),template,Xi,Yi,Xt,Yt)
return template
def get_number(s):
""" Check that s is number
In this plugin, heatmaps are created only for columns that contain numbers. This
function checks to make sure an input value is able to be converted into a number.
Inputs:
s - An input string or number
Outputs:
value - Either float(s) or False if s cannot be cast to float
"""
try:
return int(s)
except ValueError:
return s
def _parse_stitch(stitchPath,imagePath,timepointName=False):
""" Load and parse image stitching vectors
This function creates a list of file dictionaries that include the filename and
pixel position and dimensions within a stitched image. It also determines the
size of the final stitched image and the suggested name of the output image based
on differences in file names in the stitching vector.
Inputs:
stitchPath - A path to stitching vectors
imagePath - A path to tiled tiff images
timepointName - Use the vector timeslice as the image name
Outputs:
out_dict - Dictionary with keys (width, height, name, filePos)
"""
# Initialize the output
out_dict = { 'width': int(0),
'height': int(0),
'name': '',
'filePos': []}
# Set the regular expression used to parse each line of the stitching vector
line_regex = r"file: (.*); corr: (.*); position: \((.*), (.*)\); grid: \((.*), (.*)\);"
# Get a list of all images in imagePath
images = [p.name for p in Path(imagePath).iterdir()]
# Open each stitching vector
fpath = str(Path(stitchPath).absolute())
name_pos = {}
with open(fpath,'r') as fr:
# Read the first line to get the filename for comparison to all other filenames
line = fr.readline()
stitch_groups = re.match(line_regex,line)
stitch_groups = {key:val for key,val in zip(STITCH_VARS,stitch_groups.groups())}
name = stitch_groups['file']
name_ind = [i for i in range(len(name))]
fr.seek(0) # reset to the first line
# Read each line in the stitching vector
for line in fr:
# Read and parse values from the current line
stitch_groups = re.match(line_regex,line)
stitch_groups = {key:get_number(val) for key,val in zip(STITCH_VARS,stitch_groups.groups())}
# If an image in the vector doesn't match an image in the collection, then skip it
if stitch_groups['file'] not in images:
continue
# Get the image size
stitch_groups['width'], stitch_groups['height'] = BioReader.image_size(str(Path(imagePath).joinpath(stitch_groups['file']).absolute()))
if out_dict['width'] < stitch_groups['width']+stitch_groups['posX']:
out_dict['width'] = stitch_groups['width']+stitch_groups['posX']
if out_dict['height'] < stitch_groups['height']+stitch_groups['posY']:
out_dict['height'] = stitch_groups['height']+stitch_groups['posY']
# Set the stitching vector values in the file dictionary
out_dict['filePos'].append(stitch_groups)
# Determine the difference between first name and current name
if not timepointName:
for i in name_ind:
if name[i] != stitch_groups['file'][i]:
if i not in name_pos.keys():
name_pos[i] = set()
name_pos[i].update([get_number(stitch_groups['file'][i])])
name_pos[i].update([get_number(name[i])])
else:
name_pos[i].update([get_number(stitch_groups['file'][i])])
# Generate the output file name
# NOTE: This should be rewritten later to determine numeric values rather than position values.
# Output file names should be
indices = sorted(name_pos.keys())
if timepointName:
global_regex = ".*global-positions-([0-9]+).txt"
name = re.match(global_regex,Path(stitchPath).name).groups()[0]
name += '.ome.tif'
out_dict['name'] = name
elif len(indices) > 0:
out_dict['name'] = name[0:indices[0]]
minvals = []
maxvals = []
for v,i in enumerate(indices):
if len(minvals)==0:
out_dict['name'] += '<'
minvals.append(min(name_pos[i]))
maxvals.append(max(name_pos[i]))
if i == indices[-1] or indices[v+1] - i > 1:
out_dict['name'] += ''.join([str(ind) for ind in minvals])
out_dict['name'] += '-'
out_dict['name'] += ''.join([str(ind) for ind in maxvals])
out_dict['name'] += '>'
if i == indices[-1]:
out_dict['name'] += name[indices[-1]+1:]
else:
out_dict['name'] += name[indices[v]+1:indices[v+1]]
minvals = []
maxvals = []
else:
out_dict['name'] = name
return out_dict
if __name__=="__main__":
# Initialize the logger
logging.basicConfig(format='%(asctime)s - %(name)-8s - %(levelname)-8s - %(message)s',
datefmt='%d-%b-%y %H:%M:%S')
logger = logging.getLogger("main")
logger.setLevel(logging.INFO)
# Setup the argument parsing
parser = argparse.ArgumentParser(prog='main', description='Assemble images from a single stitching vector.')
parser.add_argument('--stitchPath', dest='stitchPath', type=str,
help='Complete path to a stitching vector', required=True)
parser.add_argument('--imgPath', dest='imgPath', type=str,
help='Input image collection to be processed by this plugin', required=True)
parser.add_argument('--outDir', dest='outDir', type=str,
help='Output collection', required=True)
parser.add_argument('--timesliceNaming', dest='timesliceNaming', type=str,
help='Use timeslice number as image name', required=False)
# Parse the arguments
args = parser.parse_args()
imgPath = args.imgPath
if Path(imgPath).joinpath('images').is_dir():
imgPath = str(Path(imgPath).joinpath('images').absolute())
outDir = args.outDir
logger.info('outDir: {}'.format(outDir))
timesliceNaming = args.timesliceNaming == 'true'
logger.info('timesliceNaming: {}'.format(timesliceNaming))
stitchPath = args.stitchPath
# Get a list of stitching vectors
vectors = [str(p.absolute()) for p in Path(stitchPath).iterdir() if p.is_file() and "".join(p.suffixes)=='.txt']
logger.info('imgPath: {}'.format(imgPath))
logger.info('stitchPath: {}'.format(stitchPath))
vectors.sort()
# Variables for image building processes
img_processes = []
img_paths = []
for v in vectors:
# Check to see if the file is a stitching vector
if 'img-global-positions' not in Path(v).name:
continue
# Parse the stitching vector
logger.info('Analyzing vector: {}'.format(Path(v).name))
outvals = _parse_stitch(v,imgPath,timesliceNaming)
logger.info('Building image: {}'.format(outvals['name']))
logger.info('Output image size (width, height): {},{}'.format(outvals['width'],outvals['height']))
# Variables for tile building processes
pnum = 0
ptotal = np.ceil(outvals['width']/10240) * np.ceil(outvals['height']/10240)
ptotal = 1/ptotal * 100
# Initialize the output image
logger.info('Initializing output file: {}'.format(outvals['name']))
refImg = str(Path(imgPath).joinpath(outvals['filePos'][0]['file']).absolute())
outFile = str(Path(outDir).joinpath(outvals['name']).absolute())
br = BioReader(str(Path(refImg).absolute()))
bw = BioWriter(str(Path(outFile).absolute()),metadata=br.read_metadata(),max_workers=max([multiprocessing.cpu_count(),2]))
bw.num_x(outvals['width'])
bw.num_y(outvals['height'])
del br
# Assemble the images
logger.info('Generating tiles...')
threads = []
with ThreadPoolExecutor(max([multiprocessing.cpu_count()//2,2])) as executor:
for x in range(0, outvals['width'], 10240):
X_range = min(x+10240,outvals['width']) # max x-pixel index in the assembled image
for y in range(0, outvals['height'], 10240):
Y_range = min(y+10240,outvals['height']) # max y-pixel index in the assembled image
image_buffer = make_tile(x,X_range,y,Y_range,v)
threads.append(executor.submit(bw.write_image,image_buffer,X=[x],Y=[y]))
# bw.write_image(image_buffer,X=[x],Y=[y])
logger.info('{:.2f} finished...'.format(0))
for ind,thread in enumerate(threads):
thread.result()
logger.info('{:.2f}% finished...'.format(100*(ind+1)/len(threads)))
logger.info('Closing image...')
bw.close_image()
| 43.92517 | 147 | 0.594471 | import argparse, logging, multiprocessing, re
from bfio import BioReader,BioWriter
import numpy as np
from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
STITCH_VARS = ['file','correlation','posX','posY','gridX','gridY']
STITCH_LINE = "file: {}; corr: {}; position: ({}, {}); grid: ({}, {});\n"
def buffer_image(image_path,supertile_buffer,Xi,Yi,Xt,Yt):
br = BioReader(image_path,max_workers=2)
image = br.read_image(X=Xi,Y=Yi)
supertile_buffer[Yt[0]:Yt[1],Xt[0]:Xt[1],...] = image
def make_tile(x_min,x_max,y_min,y_max,stitchPath):
outvals = _parse_stitch(stitchPath,imgPath,True)
br = BioReader(str(Path(imgPath).joinpath(outvals['filePos'][0]['file'])))
dtype = br._pix['type']
template = np.zeros((y_max-y_min,x_max-x_min,1,1,1),dtype=dtype)
with ThreadPoolExecutor(max([multiprocessing.cpu_count(),2])) as executor:
for f in outvals['filePos']:
if (f['posX'] >= x_min and f['posX'] <= x_max) or (f['posX']+f['width'] >= x_min and f['posX']+f['width'] <= x_max):
if (f['posY'] >= y_min and f['posY'] <= y_max) or (f['posY']+f['height'] >= y_min and f['posY']+f['height'] <= y_max):
Xt = [max(0,f['posX']-x_min)]
Xt.append(min(x_max-x_min,f['posX']+f['width']-x_min))
Yt = [max(0,f['posY']-y_min)]
Yt.append(min(y_max-y_min,f['posY']+f['height']-y_min))
Xi = [max(0,x_min - f['posX'])]
Xi.append(min(f['width'],x_max - f['posX']))
Yi = [max(0,y_min - f['posY'])]
Yi.append(min(f['height'],y_max - f['posY']))
executor.submit(buffer_image,str(Path(imgPath).joinpath(f['file'])),template,Xi,Yi,Xt,Yt)
return template
def get_number(s):
try:
return int(s)
except ValueError:
return s
def _parse_stitch(stitchPath,imagePath,timepointName=False):
out_dict = { 'width': int(0),
'height': int(0),
'name': '',
'filePos': []}
line_regex = r"file: (.*); corr: (.*); position: \((.*), (.*)\); grid: \((.*), (.*)\);"
images = [p.name for p in Path(imagePath).iterdir()]
fpath = str(Path(stitchPath).absolute())
name_pos = {}
with open(fpath,'r') as fr:
line = fr.readline()
stitch_groups = re.match(line_regex,line)
stitch_groups = {key:val for key,val in zip(STITCH_VARS,stitch_groups.groups())}
name = stitch_groups['file']
name_ind = [i for i in range(len(name))]
fr.seek(0)
for line in fr:
stitch_groups = re.match(line_regex,line)
stitch_groups = {key:get_number(val) for key,val in zip(STITCH_VARS,stitch_groups.groups())}
if stitch_groups['file'] not in images:
continue
# Get the image size
stitch_groups['width'], stitch_groups['height'] = BioReader.image_size(str(Path(imagePath).joinpath(stitch_groups['file']).absolute()))
if out_dict['width'] < stitch_groups['width']+stitch_groups['posX']:
out_dict['width'] = stitch_groups['width']+stitch_groups['posX']
if out_dict['height'] < stitch_groups['height']+stitch_groups['posY']:
out_dict['height'] = stitch_groups['height']+stitch_groups['posY']
# Set the stitching vector values in the file dictionary
out_dict['filePos'].append(stitch_groups)
# Determine the difference between first name and current name
if not timepointName:
for i in name_ind:
if name[i] != stitch_groups['file'][i]:
if i not in name_pos.keys():
name_pos[i] = set()
name_pos[i].update([get_number(stitch_groups['file'][i])])
name_pos[i].update([get_number(name[i])])
else:
name_pos[i].update([get_number(stitch_groups['file'][i])])
# Generate the output file name
# NOTE: This should be rewritten later to determine numeric values rather than position values.
# Output file names should be
indices = sorted(name_pos.keys())
if timepointName:
global_regex = ".*global-positions-([0-9]+).txt"
name = re.match(global_regex,Path(stitchPath).name).groups()[0]
name += '.ome.tif'
out_dict['name'] = name
elif len(indices) > 0:
out_dict['name'] = name[0:indices[0]]
minvals = []
maxvals = []
for v,i in enumerate(indices):
if len(minvals)==0:
out_dict['name'] += '<'
minvals.append(min(name_pos[i]))
maxvals.append(max(name_pos[i]))
if i == indices[-1] or indices[v+1] - i > 1:
out_dict['name'] += ''.join([str(ind) for ind in minvals])
out_dict['name'] += '-'
out_dict['name'] += ''.join([str(ind) for ind in maxvals])
out_dict['name'] += '>'
if i == indices[-1]:
out_dict['name'] += name[indices[-1]+1:]
else:
out_dict['name'] += name[indices[v]+1:indices[v+1]]
minvals = []
maxvals = []
else:
out_dict['name'] = name
return out_dict
if __name__=="__main__":
# Initialize the logger
logging.basicConfig(format='%(asctime)s - %(name)-8s - %(levelname)-8s - %(message)s',
datefmt='%d-%b-%y %H:%M:%S')
logger = logging.getLogger("main")
logger.setLevel(logging.INFO)
# Setup the argument parsing
parser = argparse.ArgumentParser(prog='main', description='Assemble images from a single stitching vector.')
parser.add_argument('--stitchPath', dest='stitchPath', type=str,
help='Complete path to a stitching vector', required=True)
parser.add_argument('--imgPath', dest='imgPath', type=str,
help='Input image collection to be processed by this plugin', required=True)
parser.add_argument('--outDir', dest='outDir', type=str,
help='Output collection', required=True)
parser.add_argument('--timesliceNaming', dest='timesliceNaming', type=str,
help='Use timeslice number as image name', required=False)
# Parse the arguments
args = parser.parse_args()
imgPath = args.imgPath
if Path(imgPath).joinpath('images').is_dir():
imgPath = str(Path(imgPath).joinpath('images').absolute())
outDir = args.outDir
logger.info('outDir: {}'.format(outDir))
timesliceNaming = args.timesliceNaming == 'true'
logger.info('timesliceNaming: {}'.format(timesliceNaming))
stitchPath = args.stitchPath
# Get a list of stitching vectors
vectors = [str(p.absolute()) for p in Path(stitchPath).iterdir() if p.is_file() and "".join(p.suffixes)=='.txt']
logger.info('imgPath: {}'.format(imgPath))
logger.info('stitchPath: {}'.format(stitchPath))
vectors.sort()
# Variables for image building processes
img_processes = []
img_paths = []
for v in vectors:
# Check to see if the file is a stitching vector
if 'img-global-positions' not in Path(v).name:
continue
# Parse the stitching vector
logger.info('Analyzing vector: {}'.format(Path(v).name))
outvals = _parse_stitch(v,imgPath,timesliceNaming)
logger.info('Building image: {}'.format(outvals['name']))
logger.info('Output image size (width, height): {},{}'.format(outvals['width'],outvals['height']))
# Variables for tile building processes
pnum = 0
ptotal = np.ceil(outvals['width']/10240) * np.ceil(outvals['height']/10240)
ptotal = 1/ptotal * 100
# Initialize the output image
logger.info('Initializing output file: {}'.format(outvals['name']))
refImg = str(Path(imgPath).joinpath(outvals['filePos'][0]['file']).absolute())
outFile = str(Path(outDir).joinpath(outvals['name']).absolute())
br = BioReader(str(Path(refImg).absolute()))
bw = BioWriter(str(Path(outFile).absolute()),metadata=br.read_metadata(),max_workers=max([multiprocessing.cpu_count(),2]))
bw.num_x(outvals['width'])
bw.num_y(outvals['height'])
del br
# Assemble the images
logger.info('Generating tiles...')
threads = []
with ThreadPoolExecutor(max([multiprocessing.cpu_count()//2,2])) as executor:
for x in range(0, outvals['width'], 10240):
X_range = min(x+10240,outvals['width']) # max x-pixel index in the assembled image
for y in range(0, outvals['height'], 10240):
Y_range = min(y+10240,outvals['height']) # max y-pixel index in the assembled image
image_buffer = make_tile(x,X_range,y,Y_range,v)
threads.append(executor.submit(bw.write_image,image_buffer,X=[x],Y=[y]))
# bw.write_image(image_buffer,X=[x],Y=[y])
logger.info('{:.2f} finished...'.format(0))
for ind,thread in enumerate(threads):
thread.result()
logger.info('{:.2f}% finished...'.format(100*(ind+1)/len(threads)))
logger.info('Closing image...')
bw.close_image()
| true | true |
1c2fdece658ee45e7c59df804f24c5c925f8d7d9 | 17,589 | py | Python | gaia.py | 0x7c2/cpme2 | 09ee443ca7193d1566ae300fc0f9707aa5d042e0 | [
"Apache-2.0"
] | null | null | null | gaia.py | 0x7c2/cpme2 | 09ee443ca7193d1566ae300fc0f9707aa5d042e0 | [
"Apache-2.0"
] | null | null | null | gaia.py | 0x7c2/cpme2 | 09ee443ca7193d1566ae300fc0f9707aa5d042e0 | [
"Apache-2.0"
] | 2 | 2020-12-17T08:11:45.000Z | 2021-02-25T17:25:43.000Z | #
# Copyright 2020 by 0x7c2, Simon Brecht.
# All rights reserved.
# This file is part of the Report/Analytic Tool - CPme,
# and is released under the "Apache License 2.0". Please see the LICENSE
# file that should have been included as part of this package.
#
from templates import check
import func
class check_gaia_hwinfo(check):
page = "GAiA.0verview"
category = "Appliance"
title = ""
isFirewall = True
isManagement = True
minVersion = 8020
command = "cpstat -f hw_info os"
isCommand = True
def run_check(self):
for line in self.commandOut:
if ":" in line:
data = line.split(':')
a_field = data[0].strip()
if len(data) > 1:
a_val = data[1].strip()
else:
a_val = ""
self.add_result(a_field, "INFO", a_val)
class check_gaia_scheduled_backup(check):
page = "GAiA.0verview"
category = "GAiA Settings"
title = "Scheduled Backup Config"
isFirewall = True
isManagement = True
minVersion = 8020
command = "func.gaia_get_value('backup-scheduled')"
isCommand = False
def run_check(self):
if self.commandOut:
self.add_result(self.title, 'PASS', '')
else:
self.add_result(self.title, 'WARN', 'not configured')
class check_gaia_check_snapshots(check):
page = "GAiA.0verview"
category = "Environment"
title = "Existing GAiA Snapshots"
isFirewall = True
isManagement = True
minVersion = 8020
command = "lvs | grep -v 'wi-ao' | tail -n +2"
isCommand = True
def run_check(self):
found = False
for o in self.commandOut:
temp = ' '.join(o.split())
cols = temp.split(' ')
if len(cols)>1:
found = True
name = cols[0].strip(' ').strip('\n')
vg = cols[1].strip(' ').strip('\n')
attr = cols[2].strip(' ').strip('\n')
size = cols[3].strip(' ').strip('\n')
detail = vg + " / " + name + " (" + size + ")"
if "hwdiag" in name or "fcd_GAIA" in name:
self.add_result(self.title, 'INFO', detail)
else:
self.add_result(self.title, 'WARN', detail)
if not found:
self.add_result(self.title, 'INFO', '')
class check_gaia_check_cpuse_agent_version(check):
page = "GAiA.CPUSE"
category = "Agent"
title = "Deployment Agent Version"
isFirewall = True
isManagement = True
minVersion = 8020
command = "$DADIR/bin/da_cli da_status"
isCommand = True
def run_check(self):
found = False
for o in self.commandOut:
if 'up to date' in o:
found = True
self.add_result(self.title, 'PASS', '')
if not found:
self.add_result(self.title, 'WARN', 'new version available')
class check_gaia_check_cpuse_agent_pending_reboot(check):
page = "GAiA.CPUSE"
category = "Agent"
title = "Deployment Agent Pending Reboot"
isFirewall = True
isManagement = True
minVersion = 8020
command = "$DADIR/bin/da_cli is_pending_reboot"
isCommand = True
def run_check(self):
found = False
for o in self.commandOut:
if 'no reboot' in o:
found = True
self.add_result(self.title, 'PASS', '')
if not found:
self.add_result(self.title, 'WARN', 'Reboot pending!')
class check_gaia_check_cpuse_agent_packages(check):
page = "GAiA.CPUSE"
category = "Packages"
title = "Packages available for install"
isFirewall = True
isManagement = True
minVersion = 8020
command = "$DADIR/bin/da_cli packages_info status=available"
isCommand = True
def run_check(self):
found = False
for o in self.commandOut:
if 'filename' in o:
tmp = o.split(':')[1].replace('"','').replace(',','')
self.add_result(self.title, 'WARN', tmp)
found = True
if not found:
self.add_result(self.title, 'PASS', '')
class check_gaia_check_proxy_settings(check):
page = "GAiA.0verview"
category = "GAiA Settings"
title = "Proxy Configuration"
isFirewall = True
isManagement = True
minVersion = 8020
command = "func.gaia_get_value('proxy:ip-address')"
isCommand = False
def run_check(self):
if self.commandOut:
proxy_port = func.gaia_get_value('proxy:port')
self.add_result(self.title, 'INFO', self.commandOut + ':' + proxy_port)
else:
self.add_result(self.title, 'INFO', 'direct')
class check_gaia_ntp(check):
page = "GAiA.0verview"
category = "GAiA Settings"
title = "NTP - Time and Date"
isFirewall = True
isManagement = True
minVersion = 8020
command = "ntpstat"
isCommand = True
def run_check(self):
found = False
for o in self.commandOut:
if 'synchronised to' in o:
self.add_result(self.title, "PASS", "")
found = True
if not found:
self.add_result(self.title, "FAIL", "")
class check_gaia_dns_external_checkpoint(check):
page = "GAiA.Connectivity"
category = "DNS Resolver"
title = "DNS Lookup [checkpoint.com]"
isFirewall = True
isManagement = True
minVersion = 8020
command = "nslookup checkpoint.com | awk 'NR>3 { print $0 }'"
isCommand = True
def run_check(self):
passme = False
detail = ""
for line in self.commandOut:
if 'Address:' in line:
if '209' in line:
passme = True
detail = line.strip()
if passme:
self.add_result(self.title, 'PASS', detail)
else:
self.add_result(self.title, 'FAIL', detail)
class check_gaia_dns_external_heise(check):
page = "GAiA.Connectivity"
category = "DNS Resolver"
title = "DNS Lookup [heise.de]"
isFirewall = True
isManagement = True
minVersion = 8020
command = "nslookup heise.de | awk 'NR>3 { print $0 }'"
isCommand = True
def run_check(self):
passme = False
detail = ""
for line in self.commandOut:
if 'Address:' in line:
if '193' in line:
passme = True
detail = line.strip()
if passme:
self.add_result(self.title, 'PASS', detail)
else:
self.add_result(self.title, 'FAIL', detail)
class check_gaia_z_check_connectivity(check):
page = "GAiA.Connectivity"
category = "Check Point Services"
title = "Connection"
isFirewall = True
isManagement = True
minVersion = 8020
command = "ls"
isCommand = True
runOnStartup = False
def run_check(self):
proxy = ""
urls = []
urls.append(['http://cws.checkpoint.com/APPI/SystemStatus/type/short','Social Media Widget Detection'])
urls.append(['http://cws.checkpoint.com/URLF/SystemStatus/type/short','URL Filtering Cloud Categorization'])
urls.append(['http://cws.checkpoint.com/AntiVirus/SystemStatus/type/short','Virus Detection'])
urls.append(['http://cws.checkpoint.com/Malware/SystemStatus/type/short','Bot Detection'])
urls.append(['https://updates.checkpoint.com/','IPS Updates'])
urls.append(['http://dl3.checkpoint.com','Download Service Updates '])
urls.append(['https://usercenter.checkpoint.com/usercenter/services/ProductCoverageService','Contract Entitlement '])
urls.append(['https://usercenter.checkpoint.com/usercenter/services/BladesManagerService','Software Blades Manager Service'])
urls.append(['http://resolver1.chkp.ctmail.com','Suspicious Mail Outbreaks'])
urls.append(['http://download.ctmail.com','Anti-Spam'])
urls.append(['http://te.checkpoint.com','Threat Emulatin'])
urls.append(['http://teadv.checkpoint.com','Threat Emulation Advanced'])
urls.append(['http://kav8.zonealarm.com/version.txt','Deep inspection'])
urls.append(['http://kav8.checkpoint.com','Traditional Anti-Virus'])
urls.append(['http://avupdates.checkpoint.com/UrlList.txt','Traditional Anti-Virus, Legacy URL Filtering'])
urls.append(['http://sigcheck.checkpoint.com/Siglist2.txt','Download of signature updates'])
urls.append(['http://secureupdates.checkpoint.com','Manage Security Gateways'])
urls.append(['https://productcoverage.checkpoint.com/ProductCoverageService','Makes sure the machines contracts are up-to-date'])
urls.append(['https://sc1.checkpoint.com/sc/images/checkmark.gif','Download of icons and screenshots from Check Point media storage servers'])
urls.append(['https://sc1.checkpoint.com/za/images/facetime/large_png/60342479_lrg.png','Download of icons and screenshots from Check Point media storage servers'])
urls.append(['https://sc1.checkpoint.com/za/images/facetime/large_png/60096017_lrg.png','Download of icons and screenshots from Check Point media storage servers'])
urls.append(['https://push.checkpoint.com','Push Notifications '])
urls.append(['http://downloads.checkpoint.com','Download of Endpoint Compliance Updates'])
for url in urls:
if self.runOnStartup:
out, err = func.execute_command('curl_cli -Lisk ' + proxy + url[0] + ' | head -n1')
data = out.read().strip('\n').strip(' ')
if "OK" in data or "Found" in data or "Moved" in data or "Connection established" in data:
state = "PASS"
detail = ""
else:
state = "FAIL"
detail = data
self.add_result(self.title + " [" + url[1] + "]", state, detail)
else:
self.add_result(self.title + " [" + url[1] + "]", 'WAIT', '')
def set_command(self):
self.runOnStartup = True
class check_gaia_interface_bonds(check):
page = "GAiA.Networking"
category = "Bonding"
title = "Bond"
isFirewall = True
isManagement = True
minVersion = 8020
command = "ifconfig | grep -c bond"
isCommand = True
def run_check(self):
if int(self.commandOut[0]) > 0:
cmd = "cphaprob show_bond"
b_out, b_err = func.execute_command(cmd)
for data in b_out:
if "|" in data and "bond" in data:
cols = data.split("|")
b_name = cols[0].strip()
b_mode = cols[1].strip()
b_stat = cols[2].strip()
b_cfg = cols[3].strip()
b_up = cols[4].strip()
b_req = cols[5].strip()
state = "PASS"
if b_stat != "UP":
state = "WARN"
self.add_result(self.title + " [" + b_name + ", " + b_mode + "]", state, b_up + "/" + b_cfg + " , Required: " + b_req)
else:
self.add_result("No bonding found", "PASS", "")
class check_gaia_interface_buffers(check):
page = "GAiA.Networking"
category = "Ring Buffer"
title = "Buffer Size"
isFirewall = True
isManagement = True
isClusterXL = False
minVersion = 8020
command = "ifconfig | grep HWaddr"
isCommand = True
def run_check(self):
for line in self.commandOut:
b_rx = ""
b_tx = ""
state = "PASS"
nic = line.split()[0].strip()
b_out, b_err = func.execute_command('ethtool -g ' + nic)
for data in b_out:
if "RX:" in data: b_rx = data.split()[1].strip()
if "TX:" in data: b_tx = data.split()[1].strip()
if b_rx != "256": state = "WARN"
if b_tx != "1024": state = "WARN"
detail = "RX: " + b_rx + ", TX: " + b_tx
if not "." in nic:
self.add_result(self.title + " [" + nic + "]", state, detail)
class check_gaia_interface_stats(check):
page = "GAiA.Networking"
category = "Statistics"
title = "Interface statistics"
isFirewall = True
isManagement = True
isClusterXL = False
minVersion = 8020
command = "ls"
isCommand = True
def run_check(self):
values_rx = ["rx_dropped", "rx_crc_errors", "rx_errors", "rx_fifo_errors", "rx_frame_errors", "rx_length_errors", "rx_missed_errors", "rx_over_errors"]
values_tx = ["tx_aborted_errors", "tx_carrier_errors", "tx_dropped", "tx_errors", "tx_fifo_errors", "tx_heartbeat_errors", "tx_window_errors"]
out, err = func.execute_command('ls -1 /sys/class/net | grep -vE "(lo|bond|vpn|sit|\.)"')
for line in out:
interface = line.strip('\n')
i = 0
error = False
while i<len(values_rx):
read, err = func.execute_command('cat /sys/class/net/'+interface+'/statistics/'+values_rx[i])
val = read.read().strip('\n')
state = "PASS"
detail = ""
if val != "0":
state = "FAIL"
detail = val
error = True
self.add_result(self.title + " (" + interface + " - " + values_rx[i] + ")", state, detail)
i = i + 1
if not error:
for t in values_rx:
self.results.pop()
self.add_result(self.title + " (" + interface + " - rx/all" + ")", "PASS", "")
i = 0
error = False
while i<len(values_tx):
read, err = func.execute_command('cat /sys/class/net/'+interface+'/statistics/'+values_tx[i])
val = read.read().strip('\n')
state = "PASS"
detail = ""
if val != "0":
state = "FAIL"
detail = val
error = True
self.add_result(self.title + " (" + interface + " - " + values_rx[i] + ")", state, detail)
i = i + 1
if not error:
for t in values_tx:
self.results.pop()
self.add_result(self.title + " (" + interface + " - tx/all" + ")", "PASS", "")
class check_gaia_disk_space(check):
page = "GAiA.0verview"
category = "Harddisk"
title = "Disk Space"
isFirewall = True
isManagement = True
isClusterXL = False
minVersion = 8020
command = "df -h | sed s/\ \ */\;/g | cut -d ';' -f 6,4 | awk 'NR>1 {print $1}'"
isCommand = True
def run_check(self):
for line in self.commandOut:
state = "FAIL"
data = str(line).strip('\n').split(";")
if len(data) < 2:
continue
if "M" in data[0]:
state = "WARN"
if "G" in data[0]:
state = "PASS"
if data[1] == "/boot" or data[1] == "/dev/shm":
state = "PASS"
self.add_result(self.title + " (" + data[1] + ")", state, data[0])
class check_gaia_cpu_smt(check):
page = "GAiA.0verview"
category = "CPU"
title = "Hyperthreading/SMT"
isFirewall = True
isManagement = False
minVersion = 8020
command = 'if [ ! -f "/proc/smt_status" ] ; then echo "Not available" ; else cat /proc/smt_status ; fi'
isCommand = True
def run_check(self):
data = self.commandOut[0].strip()
if "Unsupported" in data:
self.add_result(self.title, "INFO", "Disabled")
else:
self.add_result(self.title, "INFO", data)
class check_gaia_cpu_usage(check):
page = "GAiA.0verview"
category = "CPU"
title = "CPU Usage"
isFirewall = True
isManagement = True
isClusterXL = False
minVersion = 8020
command = "ls"
isCommand = True
def run_check(self):
if func.isFirewall():
out, err = func.execute_command("fw ctl affinity -l")
affinity = out.read()
else:
affinity = ""
dbcur = func.execute_sqlite_query("select name_of_cpu,max(cpu_usage) from UM_STAT_UM_CPU_UM_CPU_ORDERED_TABLE group by name_of_cpu;")
for row in dbcur:
worker = ""
nic = ""
daemon = ""
cpu = row[0]
for line in affinity.split('\n'):
if "CPU "+str(cpu)+'#' in line +'#':
if "Kernel" in line:
if worker != "":
worker = worker + ", "
worker = worker + line.split(":")[0].replace("Kernel ", "")
elif "Daemon" in line:
daemon = "Daemon(s), "
else:
if nic != "":
nic = nic + ", "
nic = nic + line.split(":")[0]
load = str(row[1]).split(".")[0]
state = "PASS"
if int(load) > 85 and nic != "":
state = "FAIL"
elif int(load) > 85 and nic == "":
state = "WARN"
if nic != "":
nic = nic + ", "
self.add_result(self.title + " (peak - CPU " + str(cpu) + "): " + daemon + nic + worker, state, load + "%")
dbcur = func.execute_sqlite_query("select name_of_cpu,avg(cpu_usage) from UM_STAT_UM_CPU_UM_CPU_ORDERED_TABLE group by name_of_cpu;")
for row in dbcur:
worker = ""
nic = ""
daemon = ""
cpu = row[0]
for line in affinity.split('\n'):
if "CPU "+str(cpu)+'#' in line+'#':
if "Kernel" in line:
if worker != "":
worker = worker + ", "
worker = worker + line.split(":")[0].replace("Kernel ", "")
elif "Daemon" in line:
daemon = "Daemon(s), "
else:
if nic != "":
nic = nic + ", "
nic = nic + line.split(":")[0]
load = str(row[1]).split(".")[0]
state = "PASS"
if int(load) > 50:
state = "WARN"
if int(load) > 50 and nic != "":
state = "FAIL"
if int(load) > 85 and worker != "":
state = "FAIL"
if nic != "":
nic = nic + ", "
self.add_result(self.title + " (avg - CPU " + str(cpu) + "): " + daemon + nic + worker, state, load + "%")
dbcur.close()
class check_gaia_memory_usage(check):
page = "GAiA.0verview"
category = "Memory"
title = "Memory Usage"
isFirewall = True
isManagement = True
isClusterXL = False
minVersion = 8020
command = "ls"
isCommand = True
def run_check(self):
mem_total = 0
mem_avg = 0
mem_peak = 0
dbcur = func.execute_sqlite_query("select max(real_total) from UM_STAT_UM_MEMORY;")
for row in dbcur:
mem_total = row[0]
dbcur = func.execute_sqlite_query("select avg(real_used) from UM_STAT_UM_MEMORY;")
for row in dbcur:
mem_avg = row[0]
dbcur = func.execute_sqlite_query("select max(real_used) from UM_STAT_UM_MEMORY;")
for row in dbcur:
mem_peak = row[0]
dbcur.close()
mem_avg_used = int(str(mem_avg/mem_total*100).split(".")[0])
mem_peak_used = int(str(mem_peak/mem_total*100).split(".")[0])
state = "PASS"
if mem_avg_used > 70:
state = "WARN"
if mem_avg_used > 90:
state = "FAIL"
self.add_result(self.title + " (average)", state, str(mem_avg_used)+"%")
state = "PASS"
if mem_peak_used > 80:
state = "WARN"
self.add_result(self.title + " (peak)", state, str(mem_peak_used)+"%")
out, err = func.execute_command("free -g | grep -i swap | awk '{print $3,$4}'")
data = out.read().strip('\n').split(" ")
used = data[0]
avail = data[1]
percent = str(int(used) / int(avail) * 100).split(".")[0]
state = "WARN"
if percent == "0":
state = "PASS"
self.add_result(self.title + " (swap)", state, percent + "%")
| 30.642857 | 166 | 0.627722 |
from templates import check
import func
class check_gaia_hwinfo(check):
page = "GAiA.0verview"
category = "Appliance"
title = ""
isFirewall = True
isManagement = True
minVersion = 8020
command = "cpstat -f hw_info os"
isCommand = True
def run_check(self):
for line in self.commandOut:
if ":" in line:
data = line.split(':')
a_field = data[0].strip()
if len(data) > 1:
a_val = data[1].strip()
else:
a_val = ""
self.add_result(a_field, "INFO", a_val)
class check_gaia_scheduled_backup(check):
page = "GAiA.0verview"
category = "GAiA Settings"
title = "Scheduled Backup Config"
isFirewall = True
isManagement = True
minVersion = 8020
command = "func.gaia_get_value('backup-scheduled')"
isCommand = False
def run_check(self):
if self.commandOut:
self.add_result(self.title, 'PASS', '')
else:
self.add_result(self.title, 'WARN', 'not configured')
class check_gaia_check_snapshots(check):
page = "GAiA.0verview"
category = "Environment"
title = "Existing GAiA Snapshots"
isFirewall = True
isManagement = True
minVersion = 8020
command = "lvs | grep -v 'wi-ao' | tail -n +2"
isCommand = True
def run_check(self):
found = False
for o in self.commandOut:
temp = ' '.join(o.split())
cols = temp.split(' ')
if len(cols)>1:
found = True
name = cols[0].strip(' ').strip('\n')
vg = cols[1].strip(' ').strip('\n')
attr = cols[2].strip(' ').strip('\n')
size = cols[3].strip(' ').strip('\n')
detail = vg + " / " + name + " (" + size + ")"
if "hwdiag" in name or "fcd_GAIA" in name:
self.add_result(self.title, 'INFO', detail)
else:
self.add_result(self.title, 'WARN', detail)
if not found:
self.add_result(self.title, 'INFO', '')
class check_gaia_check_cpuse_agent_version(check):
page = "GAiA.CPUSE"
category = "Agent"
title = "Deployment Agent Version"
isFirewall = True
isManagement = True
minVersion = 8020
command = "$DADIR/bin/da_cli da_status"
isCommand = True
def run_check(self):
found = False
for o in self.commandOut:
if 'up to date' in o:
found = True
self.add_result(self.title, 'PASS', '')
if not found:
self.add_result(self.title, 'WARN', 'new version available')
class check_gaia_check_cpuse_agent_pending_reboot(check):
page = "GAiA.CPUSE"
category = "Agent"
title = "Deployment Agent Pending Reboot"
isFirewall = True
isManagement = True
minVersion = 8020
command = "$DADIR/bin/da_cli is_pending_reboot"
isCommand = True
def run_check(self):
found = False
for o in self.commandOut:
if 'no reboot' in o:
found = True
self.add_result(self.title, 'PASS', '')
if not found:
self.add_result(self.title, 'WARN', 'Reboot pending!')
class check_gaia_check_cpuse_agent_packages(check):
page = "GAiA.CPUSE"
category = "Packages"
title = "Packages available for install"
isFirewall = True
isManagement = True
minVersion = 8020
command = "$DADIR/bin/da_cli packages_info status=available"
isCommand = True
def run_check(self):
found = False
for o in self.commandOut:
if 'filename' in o:
tmp = o.split(':')[1].replace('"','').replace(',','')
self.add_result(self.title, 'WARN', tmp)
found = True
if not found:
self.add_result(self.title, 'PASS', '')
class check_gaia_check_proxy_settings(check):
page = "GAiA.0verview"
category = "GAiA Settings"
title = "Proxy Configuration"
isFirewall = True
isManagement = True
minVersion = 8020
command = "func.gaia_get_value('proxy:ip-address')"
isCommand = False
def run_check(self):
if self.commandOut:
proxy_port = func.gaia_get_value('proxy:port')
self.add_result(self.title, 'INFO', self.commandOut + ':' + proxy_port)
else:
self.add_result(self.title, 'INFO', 'direct')
class check_gaia_ntp(check):
page = "GAiA.0verview"
category = "GAiA Settings"
title = "NTP - Time and Date"
isFirewall = True
isManagement = True
minVersion = 8020
command = "ntpstat"
isCommand = True
def run_check(self):
found = False
for o in self.commandOut:
if 'synchronised to' in o:
self.add_result(self.title, "PASS", "")
found = True
if not found:
self.add_result(self.title, "FAIL", "")
class check_gaia_dns_external_checkpoint(check):
page = "GAiA.Connectivity"
category = "DNS Resolver"
title = "DNS Lookup [checkpoint.com]"
isFirewall = True
isManagement = True
minVersion = 8020
command = "nslookup checkpoint.com | awk 'NR>3 { print $0 }'"
isCommand = True
def run_check(self):
passme = False
detail = ""
for line in self.commandOut:
if 'Address:' in line:
if '209' in line:
passme = True
detail = line.strip()
if passme:
self.add_result(self.title, 'PASS', detail)
else:
self.add_result(self.title, 'FAIL', detail)
class check_gaia_dns_external_heise(check):
page = "GAiA.Connectivity"
category = "DNS Resolver"
title = "DNS Lookup [heise.de]"
isFirewall = True
isManagement = True
minVersion = 8020
command = "nslookup heise.de | awk 'NR>3 { print $0 }'"
isCommand = True
def run_check(self):
passme = False
detail = ""
for line in self.commandOut:
if 'Address:' in line:
if '193' in line:
passme = True
detail = line.strip()
if passme:
self.add_result(self.title, 'PASS', detail)
else:
self.add_result(self.title, 'FAIL', detail)
class check_gaia_z_check_connectivity(check):
page = "GAiA.Connectivity"
category = "Check Point Services"
title = "Connection"
isFirewall = True
isManagement = True
minVersion = 8020
command = "ls"
isCommand = True
runOnStartup = False
def run_check(self):
proxy = ""
urls = []
urls.append(['http://cws.checkpoint.com/APPI/SystemStatus/type/short','Social Media Widget Detection'])
urls.append(['http://cws.checkpoint.com/URLF/SystemStatus/type/short','URL Filtering Cloud Categorization'])
urls.append(['http://cws.checkpoint.com/AntiVirus/SystemStatus/type/short','Virus Detection'])
urls.append(['http://cws.checkpoint.com/Malware/SystemStatus/type/short','Bot Detection'])
urls.append(['https://updates.checkpoint.com/','IPS Updates'])
urls.append(['http://dl3.checkpoint.com','Download Service Updates '])
urls.append(['https://usercenter.checkpoint.com/usercenter/services/ProductCoverageService','Contract Entitlement '])
urls.append(['https://usercenter.checkpoint.com/usercenter/services/BladesManagerService','Software Blades Manager Service'])
urls.append(['http://resolver1.chkp.ctmail.com','Suspicious Mail Outbreaks'])
urls.append(['http://download.ctmail.com','Anti-Spam'])
urls.append(['http://te.checkpoint.com','Threat Emulatin'])
urls.append(['http://teadv.checkpoint.com','Threat Emulation Advanced'])
urls.append(['http://kav8.zonealarm.com/version.txt','Deep inspection'])
urls.append(['http://kav8.checkpoint.com','Traditional Anti-Virus'])
urls.append(['http://avupdates.checkpoint.com/UrlList.txt','Traditional Anti-Virus, Legacy URL Filtering'])
urls.append(['http://sigcheck.checkpoint.com/Siglist2.txt','Download of signature updates'])
urls.append(['http://secureupdates.checkpoint.com','Manage Security Gateways'])
urls.append(['https://productcoverage.checkpoint.com/ProductCoverageService','Makes sure the machines contracts are up-to-date'])
urls.append(['https://sc1.checkpoint.com/sc/images/checkmark.gif','Download of icons and screenshots from Check Point media storage servers'])
urls.append(['https://sc1.checkpoint.com/za/images/facetime/large_png/60342479_lrg.png','Download of icons and screenshots from Check Point media storage servers'])
urls.append(['https://sc1.checkpoint.com/za/images/facetime/large_png/60096017_lrg.png','Download of icons and screenshots from Check Point media storage servers'])
urls.append(['https://push.checkpoint.com','Push Notifications '])
urls.append(['http://downloads.checkpoint.com','Download of Endpoint Compliance Updates'])
for url in urls:
if self.runOnStartup:
out, err = func.execute_command('curl_cli -Lisk ' + proxy + url[0] + ' | head -n1')
data = out.read().strip('\n').strip(' ')
if "OK" in data or "Found" in data or "Moved" in data or "Connection established" in data:
state = "PASS"
detail = ""
else:
state = "FAIL"
detail = data
self.add_result(self.title + " [" + url[1] + "]", state, detail)
else:
self.add_result(self.title + " [" + url[1] + "]", 'WAIT', '')
def set_command(self):
self.runOnStartup = True
class check_gaia_interface_bonds(check):
page = "GAiA.Networking"
category = "Bonding"
title = "Bond"
isFirewall = True
isManagement = True
minVersion = 8020
command = "ifconfig | grep -c bond"
isCommand = True
def run_check(self):
if int(self.commandOut[0]) > 0:
cmd = "cphaprob show_bond"
b_out, b_err = func.execute_command(cmd)
for data in b_out:
if "|" in data and "bond" in data:
cols = data.split("|")
b_name = cols[0].strip()
b_mode = cols[1].strip()
b_stat = cols[2].strip()
b_cfg = cols[3].strip()
b_up = cols[4].strip()
b_req = cols[5].strip()
state = "PASS"
if b_stat != "UP":
state = "WARN"
self.add_result(self.title + " [" + b_name + ", " + b_mode + "]", state, b_up + "/" + b_cfg + " , Required: " + b_req)
else:
self.add_result("No bonding found", "PASS", "")
class check_gaia_interface_buffers(check):
page = "GAiA.Networking"
category = "Ring Buffer"
title = "Buffer Size"
isFirewall = True
isManagement = True
isClusterXL = False
minVersion = 8020
command = "ifconfig | grep HWaddr"
isCommand = True
def run_check(self):
for line in self.commandOut:
b_rx = ""
b_tx = ""
state = "PASS"
nic = line.split()[0].strip()
b_out, b_err = func.execute_command('ethtool -g ' + nic)
for data in b_out:
if "RX:" in data: b_rx = data.split()[1].strip()
if "TX:" in data: b_tx = data.split()[1].strip()
if b_rx != "256": state = "WARN"
if b_tx != "1024": state = "WARN"
detail = "RX: " + b_rx + ", TX: " + b_tx
if not "." in nic:
self.add_result(self.title + " [" + nic + "]", state, detail)
class check_gaia_interface_stats(check):
page = "GAiA.Networking"
category = "Statistics"
title = "Interface statistics"
isFirewall = True
isManagement = True
isClusterXL = False
minVersion = 8020
command = "ls"
isCommand = True
def run_check(self):
values_rx = ["rx_dropped", "rx_crc_errors", "rx_errors", "rx_fifo_errors", "rx_frame_errors", "rx_length_errors", "rx_missed_errors", "rx_over_errors"]
values_tx = ["tx_aborted_errors", "tx_carrier_errors", "tx_dropped", "tx_errors", "tx_fifo_errors", "tx_heartbeat_errors", "tx_window_errors"]
out, err = func.execute_command('ls -1 /sys/class/net | grep -vE "(lo|bond|vpn|sit|\.)"')
for line in out:
interface = line.strip('\n')
i = 0
error = False
while i<len(values_rx):
read, err = func.execute_command('cat /sys/class/net/'+interface+'/statistics/'+values_rx[i])
val = read.read().strip('\n')
state = "PASS"
detail = ""
if val != "0":
state = "FAIL"
detail = val
error = True
self.add_result(self.title + " (" + interface + " - " + values_rx[i] + ")", state, detail)
i = i + 1
if not error:
for t in values_rx:
self.results.pop()
self.add_result(self.title + " (" + interface + " - rx/all" + ")", "PASS", "")
i = 0
error = False
while i<len(values_tx):
read, err = func.execute_command('cat /sys/class/net/'+interface+'/statistics/'+values_tx[i])
val = read.read().strip('\n')
state = "PASS"
detail = ""
if val != "0":
state = "FAIL"
detail = val
error = True
self.add_result(self.title + " (" + interface + " - " + values_rx[i] + ")", state, detail)
i = i + 1
if not error:
for t in values_tx:
self.results.pop()
self.add_result(self.title + " (" + interface + " - tx/all" + ")", "PASS", "")
class check_gaia_disk_space(check):
page = "GAiA.0verview"
category = "Harddisk"
title = "Disk Space"
isFirewall = True
isManagement = True
isClusterXL = False
minVersion = 8020
command = "df -h | sed s/\ \ */\;/g | cut -d ';' -f 6,4 | awk 'NR>1 {print $1}'"
isCommand = True
def run_check(self):
for line in self.commandOut:
state = "FAIL"
data = str(line).strip('\n').split(";")
if len(data) < 2:
continue
if "M" in data[0]:
state = "WARN"
if "G" in data[0]:
state = "PASS"
if data[1] == "/boot" or data[1] == "/dev/shm":
state = "PASS"
self.add_result(self.title + " (" + data[1] + ")", state, data[0])
class check_gaia_cpu_smt(check):
page = "GAiA.0verview"
category = "CPU"
title = "Hyperthreading/SMT"
isFirewall = True
isManagement = False
minVersion = 8020
command = 'if [ ! -f "/proc/smt_status" ] ; then echo "Not available" ; else cat /proc/smt_status ; fi'
isCommand = True
def run_check(self):
data = self.commandOut[0].strip()
if "Unsupported" in data:
self.add_result(self.title, "INFO", "Disabled")
else:
self.add_result(self.title, "INFO", data)
class check_gaia_cpu_usage(check):
page = "GAiA.0verview"
category = "CPU"
title = "CPU Usage"
isFirewall = True
isManagement = True
isClusterXL = False
minVersion = 8020
command = "ls"
isCommand = True
def run_check(self):
if func.isFirewall():
out, err = func.execute_command("fw ctl affinity -l")
affinity = out.read()
else:
affinity = ""
dbcur = func.execute_sqlite_query("select name_of_cpu,max(cpu_usage) from UM_STAT_UM_CPU_UM_CPU_ORDERED_TABLE group by name_of_cpu;")
for row in dbcur:
worker = ""
nic = ""
daemon = ""
cpu = row[0]
for line in affinity.split('\n'):
if "CPU "+str(cpu)+'#' in line +'#':
if "Kernel" in line:
if worker != "":
worker = worker + ", "
worker = worker + line.split(":")[0].replace("Kernel ", "")
elif "Daemon" in line:
daemon = "Daemon(s), "
else:
if nic != "":
nic = nic + ", "
nic = nic + line.split(":")[0]
load = str(row[1]).split(".")[0]
state = "PASS"
if int(load) > 85 and nic != "":
state = "FAIL"
elif int(load) > 85 and nic == "":
state = "WARN"
if nic != "":
nic = nic + ", "
self.add_result(self.title + " (peak - CPU " + str(cpu) + "): " + daemon + nic + worker, state, load + "%")
dbcur = func.execute_sqlite_query("select name_of_cpu,avg(cpu_usage) from UM_STAT_UM_CPU_UM_CPU_ORDERED_TABLE group by name_of_cpu;")
for row in dbcur:
worker = ""
nic = ""
daemon = ""
cpu = row[0]
for line in affinity.split('\n'):
if "CPU "+str(cpu)+'#' in line+'#':
if "Kernel" in line:
if worker != "":
worker = worker + ", "
worker = worker + line.split(":")[0].replace("Kernel ", "")
elif "Daemon" in line:
daemon = "Daemon(s), "
else:
if nic != "":
nic = nic + ", "
nic = nic + line.split(":")[0]
load = str(row[1]).split(".")[0]
state = "PASS"
if int(load) > 50:
state = "WARN"
if int(load) > 50 and nic != "":
state = "FAIL"
if int(load) > 85 and worker != "":
state = "FAIL"
if nic != "":
nic = nic + ", "
self.add_result(self.title + " (avg - CPU " + str(cpu) + "): " + daemon + nic + worker, state, load + "%")
dbcur.close()
class check_gaia_memory_usage(check):
page = "GAiA.0verview"
category = "Memory"
title = "Memory Usage"
isFirewall = True
isManagement = True
isClusterXL = False
minVersion = 8020
command = "ls"
isCommand = True
def run_check(self):
mem_total = 0
mem_avg = 0
mem_peak = 0
dbcur = func.execute_sqlite_query("select max(real_total) from UM_STAT_UM_MEMORY;")
for row in dbcur:
mem_total = row[0]
dbcur = func.execute_sqlite_query("select avg(real_used) from UM_STAT_UM_MEMORY;")
for row in dbcur:
mem_avg = row[0]
dbcur = func.execute_sqlite_query("select max(real_used) from UM_STAT_UM_MEMORY;")
for row in dbcur:
mem_peak = row[0]
dbcur.close()
mem_avg_used = int(str(mem_avg/mem_total*100).split(".")[0])
mem_peak_used = int(str(mem_peak/mem_total*100).split(".")[0])
state = "PASS"
if mem_avg_used > 70:
state = "WARN"
if mem_avg_used > 90:
state = "FAIL"
self.add_result(self.title + " (average)", state, str(mem_avg_used)+"%")
state = "PASS"
if mem_peak_used > 80:
state = "WARN"
self.add_result(self.title + " (peak)", state, str(mem_peak_used)+"%")
out, err = func.execute_command("free -g | grep -i swap | awk '{print $3,$4}'")
data = out.read().strip('\n').split(" ")
used = data[0]
avail = data[1]
percent = str(int(used) / int(avail) * 100).split(".")[0]
state = "WARN"
if percent == "0":
state = "PASS"
self.add_result(self.title + " (swap)", state, percent + "%")
| true | true |
1c2fdfdfa6ead222afd8b8eb59e9c9e529e8da83 | 3,000 | py | Python | parton/cli.py | peterstangl/parton | a7adefee7e8372e9b046a51b263d6a06165ff098 | [
"MIT"
] | 5 | 2018-12-25T20:56:32.000Z | 2022-03-22T00:16:38.000Z | parton/cli.py | peterstangl/parton | a7adefee7e8372e9b046a51b263d6a06165ff098 | [
"MIT"
] | 1 | 2022-01-18T07:13:04.000Z | 2022-01-28T05:42:29.000Z | parton/cli.py | peterstangl/parton | a7adefee7e8372e9b046a51b263d6a06165ff098 | [
"MIT"
] | 3 | 2019-09-20T14:52:16.000Z | 2022-03-28T15:27:09.000Z | """Command line interface."""
import argparse
from fnmatch import fnmatch
from . import io
import tarfile
import logging
logging.basicConfig(level=logging.INFO)
def main(argv=None):
parser = argparse.ArgumentParser(prog='parton',
description="Command line interface to download parton distribution functions.")
subparsers = parser.add_subparsers(title='subcommands')
defaultdir = io.data_dir()
parser.add_argument("--listdir", default=defaultdir,
help="Directory where the index of PDF sets is stored (default: {}).".format(defaultdir))
parser.add_argument("--pdfdir", default=defaultdir,
help="Directory where the PDF sets are stored (default: {}).".format(defaultdir))
parser_update = subparsers.add_parser('update',
description="Command line script to update the list of PDF sets.",
help="Update the list of parton distribution functions.")
parser_update.set_defaults(func=update)
parser_list = subparsers.add_parser('list',
description="Command line script to listthe PDF sets.",
help="Show list of parton distribution functions.")
parser_list.add_argument('--installed', action='store_true')
parser_list.set_defaults(func=listpdf)
parser_install = subparsers.add_parser('install',
description="Command line script to install a PDF set.",
help="Install a PDF set.")
parser_install.add_argument('name')
parser_install.add_argument('-y', action='store_true')
parser_install.set_defaults(func=install)
args = parser.parse_args(argv)
try:
args.func(args)
except AttributeError:
parser.print_help()
def update(args):
io.download_index(args.listdir)
def install(args):
pdfs_av = io.list_available(args.listdir)
to_install = [pdf for pdf in pdfs_av if fnmatch(pdf, args.name)]
if not to_install:
print("No PDF sets matching the pattern {} found.".format(args.name))
return
pdfs_in = set(io.list_installed(args.pdfdir, args.listdir))
to_install = [pdf for pdf in to_install if pdf not in pdfs_in]
if not to_install:
return
print("The following PDF sets will be installed:")
print('\n'.join(to_install))
yes = args.y or input("Proceed? (y/n): ").lower()
if yes:
for pdf in to_install:
try:
io.download_pdfset(pdf, args.pdfdir)
except tarfile.TarError:
logging.error("Unable to extract archive for PDF set {}".format(pdf))
def listpdf(args):
if args.installed:
pdfs = io.list_installed(args.pdfdir, args.listdir)
else:
pdfs = io.list_available(args.listdir)
for pdf in pdfs:
print(pdf)
| 37.5 | 120 | 0.618 |
import argparse
from fnmatch import fnmatch
from . import io
import tarfile
import logging
logging.basicConfig(level=logging.INFO)
def main(argv=None):
parser = argparse.ArgumentParser(prog='parton',
description="Command line interface to download parton distribution functions.")
subparsers = parser.add_subparsers(title='subcommands')
defaultdir = io.data_dir()
parser.add_argument("--listdir", default=defaultdir,
help="Directory where the index of PDF sets is stored (default: {}).".format(defaultdir))
parser.add_argument("--pdfdir", default=defaultdir,
help="Directory where the PDF sets are stored (default: {}).".format(defaultdir))
parser_update = subparsers.add_parser('update',
description="Command line script to update the list of PDF sets.",
help="Update the list of parton distribution functions.")
parser_update.set_defaults(func=update)
parser_list = subparsers.add_parser('list',
description="Command line script to listthe PDF sets.",
help="Show list of parton distribution functions.")
parser_list.add_argument('--installed', action='store_true')
parser_list.set_defaults(func=listpdf)
parser_install = subparsers.add_parser('install',
description="Command line script to install a PDF set.",
help="Install a PDF set.")
parser_install.add_argument('name')
parser_install.add_argument('-y', action='store_true')
parser_install.set_defaults(func=install)
args = parser.parse_args(argv)
try:
args.func(args)
except AttributeError:
parser.print_help()
def update(args):
io.download_index(args.listdir)
def install(args):
pdfs_av = io.list_available(args.listdir)
to_install = [pdf for pdf in pdfs_av if fnmatch(pdf, args.name)]
if not to_install:
print("No PDF sets matching the pattern {} found.".format(args.name))
return
pdfs_in = set(io.list_installed(args.pdfdir, args.listdir))
to_install = [pdf for pdf in to_install if pdf not in pdfs_in]
if not to_install:
return
print("The following PDF sets will be installed:")
print('\n'.join(to_install))
yes = args.y or input("Proceed? (y/n): ").lower()
if yes:
for pdf in to_install:
try:
io.download_pdfset(pdf, args.pdfdir)
except tarfile.TarError:
logging.error("Unable to extract archive for PDF set {}".format(pdf))
def listpdf(args):
if args.installed:
pdfs = io.list_installed(args.pdfdir, args.listdir)
else:
pdfs = io.list_available(args.listdir)
for pdf in pdfs:
print(pdf)
| true | true |
1c2fe051fc50416987d34e6e97aa237660a84ae8 | 5,535 | py | Python | kornia/geometry/epipolar/projection.py | FGeri/kornia | 92fa259601679031dc59c82ffe6862a1b5c8878a | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-06-17T16:57:14.000Z | 2020-06-17T16:57:14.000Z | kornia/geometry/epipolar/projection.py | FGeri/kornia | 92fa259601679031dc59c82ffe6862a1b5c8878a | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | kornia/geometry/epipolar/projection.py | FGeri/kornia | 92fa259601679031dc59c82ffe6862a1b5c8878a | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2022-01-26T13:39:34.000Z | 2022-01-26T13:39:34.000Z | """Module for image projections."""
from typing import Union
import torch
from kornia.geometry.epipolar import numeric
def intrinsics_like(focal: float, input: torch.Tensor) -> torch.Tensor:
r"""Returns a 3x3 instrinsics matrix, with same size as the input.
The center of projection will be based in the input image size.
Args:
focal (float): the focal length for tha camera matrix.
input (torch.Tensor): image tensor that will determine the batch size and image height
and width. It is assumed to be a tensor in the shape of :math:`(B, C, H, W)`.
Returns:
torch.Tensor: The camera matrix with the shape of :math:`(B, 3, 3)`.
"""
assert len(input.shape) == 4, input.shape
assert focal > 0, focal
B, _, H, W = input.shape
intrinsics = numeric.eye_like(3, input)
intrinsics[..., 0, 0] *= focal
intrinsics[..., 1, 1] *= focal
intrinsics[..., 0, 2] += 1. * W / 2
intrinsics[..., 1, 2] += 1. * H / 2
return intrinsics
def random_intrinsics(low: Union[float, torch.Tensor],
high: Union[float, torch.Tensor]) -> torch.Tensor:
r"""Generates a random camera matrix based on a given uniform distribution.
Args:
low (Union[float, torch.Tensor]): lower range (inclusive).
high (Union[float, torch.Tensor]): upper range (exclusive).
Returns:
torch.Tensor: The random camera matrix with the shape of :math:`(1, 3, 3)`.
"""
sampler = torch.distributions.Uniform(low, high)
fx, fy, cx, cy = [sampler.sample((1,)) for _ in range(4)]
zeros, ones = torch.zeros_like(fx), torch.ones_like(fx)
camera_matrix: torch.Tensor = torch.cat([
fx, zeros, cx,
zeros, fy, cy,
zeros, zeros, ones,
])
return camera_matrix.view(1, 3, 3)
def scale_intrinsics(
camera_matrix: torch.Tensor, scale_factor: Union[float, torch.Tensor]) -> torch.Tensor:
r"""Scale a camera matrix containing the intrinsics.
Applies the scaling factor to the focal length and center of projection.
Args:
camera_matrix (torch.Tensor): the camera calibration matrix containing the intrinsic
parameters. The expected shape for the tensor is :math:`(B, 3, 3)`.
scale_factor (Union[float, torch.Tensor]): the scaling factor to be applied.
Returns:
torch.Tensor: The scaled camera matrix with shame shape as input :math:`(B, 3, 3)`.
"""
K_scale = camera_matrix.clone()
K_scale[..., 0, 0] *= scale_factor
K_scale[..., 1, 1] *= scale_factor
K_scale[..., 0, 2] *= scale_factor
K_scale[..., 1, 2] *= scale_factor
return K_scale
def projection_from_KRt(K: torch.Tensor, R: torch.Tensor, t: torch.Tensor) -> torch.Tensor:
r"""Get the projection matrix P from K, R and t.
This function estimate the projection matrix by solving the following equation: :math:`P = K * [R|t]`.
Args:
K (torch.Tensor): the camera matrix with the instrinsics with shape :math:`(B, 3, 3)`.
R (torch.Tensor): The rotation matrix with shape :math:`(B, 3, 3)`.
t (torch.Tensor): The translation vector with shape :math:`(B, 3, 1)`.
Returns:
torch.Tensor: The projection matrix P with shape :math:`(B, 4, 4)`.
"""
assert K.shape[-2:] == (3, 3), K.shape
assert R.shape[-2:] == (3, 3), R.shape
assert t.shape[-2:] == (3, 1), t.shape
assert len(K.shape) == len(R.shape) == len(t.shape)
Rt: torch.Tensor = torch.cat([R, t], dim=-1) # 3x4
Rt_h = torch.nn.functional.pad(Rt, [0, 0, 0, 1], "constant", 0.) # 4x4
Rt_h[..., -1, -1] += 1.
K_h: torch.Tensor = torch.nn.functional.pad(K, [0, 1, 0, 1], "constant", 0.) # 4x4
K_h[..., -1, -1] += 1.
return K @ Rt
def depth(R: torch.Tensor, t: torch.Tensor, X: torch.Tensor) -> torch.Tensor:
r"""Returns the depth of a point transformed by a rigid transform.
Args:
R (torch.Tensor): The rotation matrix with shape :math:`(*, 3, 3)`.
t (torch.Tensor): The translation vector with shape :math:`(*, 3, 1)`.
X (torch.Tensor): The 3d points with shape :math:`(*, 3)`.
Returns:
torch.Tensor: The depth value per point with shape :math:`(*, 1)`.
"""
X_tmp = R @ X.transpose(-2, -1)
X_out = X_tmp[..., 2, :] + t[..., 2, :]
return X_out
# adapted from:
# https://github.com/opencv/opencv_contrib/blob/master/modules/sfm/src/fundamental.cpp#L61
# https://github.com/mapillary/OpenSfM/blob/master/opensfm/multiview.py#L14
def _nullspace(A):
'''Compute the null space of A.
Return the smallest singular value and the corresponding vector.
'''
u, s, vh = torch.svd(A)
return s[..., -1], vh[..., -1]
def projections_from_fundamental(F_mat: torch.Tensor) -> torch.Tensor:
r"""Get the projection matrices from the Fundamenal Matrix.
Args:
F_mat (torch.Tensor): the fundamenal matrix with the shape :math:`(*, 3, 3)`.
Returns:
torch.Tensor: The projection matrices with shape :math:`(*, 4, 4, 2)`.
"""
assert len(F_mat.shape) >= 2, F_mat.shape
assert F_mat.shape[-2:] == (3, 3), F_mat.shape
R1 = numeric.eye_like(3, F_mat) # Bx3x3
t1 = numeric.vec_like(3, F_mat) # Bx3
Ft_mat = F_mat.transpose(-2, -1)
_, e2 = _nullspace(Ft_mat)
R2 = numeric.cross_product_matrix(e2) @ F_mat # Bx3x3
t2 = e2[..., :, None] # Bx3x1
P1 = torch.cat([R1, t1], dim=-1) # Bx3x4
P2 = torch.cat([R2, t2], dim=-1) # Bx3x4
return torch.stack([P1, P2], dim=-1)
| 32.946429 | 106 | 0.61897 | from typing import Union
import torch
from kornia.geometry.epipolar import numeric
def intrinsics_like(focal: float, input: torch.Tensor) -> torch.Tensor:
assert len(input.shape) == 4, input.shape
assert focal > 0, focal
B, _, H, W = input.shape
intrinsics = numeric.eye_like(3, input)
intrinsics[..., 0, 0] *= focal
intrinsics[..., 1, 1] *= focal
intrinsics[..., 0, 2] += 1. * W / 2
intrinsics[..., 1, 2] += 1. * H / 2
return intrinsics
def random_intrinsics(low: Union[float, torch.Tensor],
high: Union[float, torch.Tensor]) -> torch.Tensor:
sampler = torch.distributions.Uniform(low, high)
fx, fy, cx, cy = [sampler.sample((1,)) for _ in range(4)]
zeros, ones = torch.zeros_like(fx), torch.ones_like(fx)
camera_matrix: torch.Tensor = torch.cat([
fx, zeros, cx,
zeros, fy, cy,
zeros, zeros, ones,
])
return camera_matrix.view(1, 3, 3)
def scale_intrinsics(
camera_matrix: torch.Tensor, scale_factor: Union[float, torch.Tensor]) -> torch.Tensor:
K_scale = camera_matrix.clone()
K_scale[..., 0, 0] *= scale_factor
K_scale[..., 1, 1] *= scale_factor
K_scale[..., 0, 2] *= scale_factor
K_scale[..., 1, 2] *= scale_factor
return K_scale
def projection_from_KRt(K: torch.Tensor, R: torch.Tensor, t: torch.Tensor) -> torch.Tensor:
assert K.shape[-2:] == (3, 3), K.shape
assert R.shape[-2:] == (3, 3), R.shape
assert t.shape[-2:] == (3, 1), t.shape
assert len(K.shape) == len(R.shape) == len(t.shape)
Rt: torch.Tensor = torch.cat([R, t], dim=-1)
Rt_h = torch.nn.functional.pad(Rt, [0, 0, 0, 1], "constant", 0.)
Rt_h[..., -1, -1] += 1.
K_h: torch.Tensor = torch.nn.functional.pad(K, [0, 1, 0, 1], "constant", 0.)
K_h[..., -1, -1] += 1.
return K @ Rt
def depth(R: torch.Tensor, t: torch.Tensor, X: torch.Tensor) -> torch.Tensor:
X_tmp = R @ X.transpose(-2, -1)
X_out = X_tmp[..., 2, :] + t[..., 2, :]
return X_out
_nullspace(A):
u, s, vh = torch.svd(A)
return s[..., -1], vh[..., -1]
def projections_from_fundamental(F_mat: torch.Tensor) -> torch.Tensor:
assert len(F_mat.shape) >= 2, F_mat.shape
assert F_mat.shape[-2:] == (3, 3), F_mat.shape
R1 = numeric.eye_like(3, F_mat)
t1 = numeric.vec_like(3, F_mat)
Ft_mat = F_mat.transpose(-2, -1)
_, e2 = _nullspace(Ft_mat)
R2 = numeric.cross_product_matrix(e2) @ F_mat
t2 = e2[..., :, None]
P1 = torch.cat([R1, t1], dim=-1)
P2 = torch.cat([R2, t2], dim=-1)
return torch.stack([P1, P2], dim=-1)
| true | true |
1c2fe086337950aa673e79642215c2f1a374b0ea | 30,570 | py | Python | app/grandchallenge/evaluation/migrations/0001_initial.py | kaczmarj/grand-challenge.org | 8dc8a2170e51072354f7e94f2a22578805a67b94 | [
"Apache-2.0"
] | 7 | 2016-11-05T07:16:30.000Z | 2017-11-23T03:38:03.000Z | app/grandchallenge/evaluation/migrations/0001_initial.py | kaczmarj/grand-challenge.org | 8dc8a2170e51072354f7e94f2a22578805a67b94 | [
"Apache-2.0"
] | 113 | 2015-05-26T09:27:59.000Z | 2018-03-21T10:45:56.000Z | app/grandchallenge/evaluation/migrations/0001_initial.py | kaczmarj/grand-challenge.org | 8dc8a2170e51072354f7e94f2a22578805a67b94 | [
"Apache-2.0"
] | 7 | 2015-07-16T20:11:22.000Z | 2017-06-06T02:41:24.000Z | # Generated by Django 3.1.1 on 2020-12-02 13:26
import uuid
from decimal import Decimal
import django.core.validators
import django.db.models.deletion
import django_extensions.db.fields
from django.conf import settings
from django.db import migrations, models
import grandchallenge.components.models
import grandchallenge.core.storage
import grandchallenge.core.validators
import grandchallenge.evaluation.models
class Migration(migrations.Migration):
initial = True
dependencies = [
("algorithms", "0001_initial"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("challenges", "0001_initial"),
("components", "0001_initial"),
("archives", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="Phase",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("created", models.DateTimeField(auto_now_add=True)),
("modified", models.DateTimeField(auto_now=True)),
(
"title",
models.CharField(
default="Challenge",
help_text="The title of this phase.",
max_length=64,
),
),
(
"slug",
django_extensions.db.fields.AutoSlugField(
blank=True,
editable=False,
max_length=64,
populate_from="title",
),
),
(
"score_title",
models.CharField(
default="Score",
help_text="The name that will be displayed for the scores column, for instance: Score (log-loss)",
max_length=32,
),
),
(
"score_jsonpath",
models.CharField(
blank=True,
help_text="The jsonpath of the field in metrics.json that will be used for the overall scores on the results page. See http://goessner.net/articles/JsonPath/ for syntax. For example: dice.mean",
max_length=255,
),
),
(
"score_error_jsonpath",
models.CharField(
blank=True,
help_text="The jsonpath for the field in metrics.json that contains the error of the score, eg: dice.std",
max_length=255,
),
),
(
"score_default_sort",
models.CharField(
choices=[("asc", "Ascending"), ("desc", "Descending")],
default="desc",
help_text="The default sorting to use for the scores on the results page.",
max_length=4,
),
),
(
"score_decimal_places",
models.PositiveSmallIntegerField(
default=4,
help_text="The number of decimal places to display for the score",
),
),
(
"extra_results_columns",
models.JSONField(
blank=True,
default=list,
help_text="A JSON object that contains the extra columns from metrics.json that will be displayed on the results page. ",
validators=[
grandchallenge.core.validators.JSONValidator(
schema={
"$schema": "http://json-schema.org/draft-06/schema#",
"definitions": {},
"items": {
"$id": "#/items",
"additionalProperties": False,
"properties": {
"error_path": {
"$id": "#/items/properties/error_path",
"default": "",
"examples": [
"aggregates.dice.std"
],
"pattern": "^(.*)$",
"title": "The Error Path Schema",
"type": "string",
},
"order": {
"$id": "#/items/properties/order",
"default": "",
"enum": ["asc", "desc"],
"examples": ["asc"],
"pattern": "^(asc|desc)$",
"title": "The Order Schema",
"type": "string",
},
"path": {
"$id": "#/items/properties/path",
"default": "",
"examples": [
"aggregates.dice.mean"
],
"pattern": "^(.*)$",
"title": "The Path Schema",
"type": "string",
},
"title": {
"$id": "#/items/properties/title",
"default": "",
"examples": ["Mean Dice"],
"pattern": "^(.*)$",
"title": "The Title Schema",
"type": "string",
},
},
"required": ["title", "path", "order"],
"title": "The Items Schema",
"type": "object",
},
"title": "The Extra Results Columns Schema",
"type": "array",
}
)
],
),
),
(
"scoring_method_choice",
models.CharField(
choices=[
(
"abs",
"Use the absolute value of the score column",
),
(
"avg",
"Use the mean of the relative ranks of the score and extra result columns",
),
(
"med",
"Use the median of the relative ranks of the score and extra result columns",
),
],
default="abs",
help_text="How should the rank of each result be calculated?",
max_length=3,
),
),
(
"result_display_choice",
models.CharField(
choices=[
("all", "Display all results"),
(
"rec",
"Only display each users most recent result",
),
("bst", "Only display each users best result"),
],
default="all",
help_text="Which results should be displayed on the leaderboard?",
max_length=3,
),
),
(
"submission_kind",
models.PositiveSmallIntegerField(
choices=[(1, "CSV"), (2, "ZIP"), (3, "Algorithm")],
default=1,
help_text="Should participants submit a .csv/.zip file of predictions, or an algorithm?",
),
),
(
"allow_submission_comments",
models.BooleanField(
default=False,
help_text="Allow users to submit comments as part of their submission.",
),
),
(
"display_submission_comments",
models.BooleanField(
default=False,
help_text="If true, submission comments are shown on the results page.",
),
),
(
"supplementary_file_choice",
models.CharField(
choices=[
("off", "Off"),
("opt", "Optional"),
("req", "Required"),
],
default="off",
help_text="Show a supplementary file field on the submissions page so that users can upload an additional file along with their predictions file as part of their submission (eg, include a pdf description of their method). Off turns this feature off, Optional means that including the file is optional for the user, Required means that the user must upload a supplementary file.",
max_length=3,
),
),
(
"supplementary_file_label",
models.CharField(
blank=True,
default="Supplementary File",
help_text="The label that will be used on the submission and results page for the supplementary file. For example: Algorithm Description.",
max_length=32,
),
),
(
"supplementary_file_help_text",
models.CharField(
blank=True,
default="",
help_text='The help text to include on the submissions page to describe the submissions file. Eg: "A PDF description of the method.".',
max_length=128,
),
),
(
"show_supplementary_file_link",
models.BooleanField(
default=False,
help_text="Show a link to download the supplementary file on the results page.",
),
),
(
"publication_url_choice",
models.CharField(
choices=[
("off", "Off"),
("opt", "Optional"),
("req", "Required"),
],
default="off",
help_text="Show a supplementary url field on the submission page so that users can submit a link to a publication that corresponds to their submission. Off turns this feature off, Optional means that including the url is optional for the user, Required means that the user must provide an url.",
max_length=3,
),
),
(
"show_publication_url",
models.BooleanField(
default=False,
help_text="Show a link to the supplementary url on the results page",
),
),
(
"daily_submission_limit",
models.PositiveIntegerField(
default=10,
help_text="The limit on the number of times that a user can make a submission over the submission limit period. Set this to 0 to close submissions for this phase.",
),
),
(
"submissions_open",
models.DateTimeField(
blank=True,
help_text="If set, participants will not be able to make submissions to this phase before this time.",
null=True,
),
),
(
"submissions_close",
models.DateTimeField(
blank=True,
help_text="If set, participants will not be able to make submissions to this phase after this time.",
null=True,
),
),
(
"submission_page_html",
models.TextField(
blank=True,
help_text="HTML to include on the submission page for this challenge.",
),
),
(
"auto_publish_new_results",
models.BooleanField(
default=True,
help_text="If true, new results are automatically made public. If false, the challenge administrator must manually publish each new result.",
),
),
(
"display_all_metrics",
models.BooleanField(
default=True,
help_text="Should all of the metrics be displayed on the Result detail page?",
),
),
(
"evaluation_detail_observable_url",
models.URLField(
blank=True,
help_text="The URL of the embeddable observable notebook for viewing individual results. Must be of the form https://observablehq.com/embed/@user/notebook?cell=...",
max_length=2000,
validators=[
django.core.validators.RegexValidator(
"^https\\:\\/\\/observablehq\\.com\\/embed\\/\\@[^\\/]+\\/[^\\?\\.]+\\?cell\\=.*$",
"URL must be of the form https://observablehq.com/embed/@user/notebook?cell=*",
)
],
),
),
(
"evaluation_comparison_observable_url",
models.URLField(
blank=True,
help_text="The URL of the embeddable observable notebook for comparingresults. Must be of the form https://observablehq.com/embed/@user/notebook?cell=...",
max_length=2000,
validators=[
django.core.validators.RegexValidator(
"^https\\:\\/\\/observablehq\\.com\\/embed\\/\\@[^\\/]+\\/[^\\?\\.]+\\?cell\\=.*$",
"URL must be of the form https://observablehq.com/embed/@user/notebook?cell=*",
)
],
),
),
(
"archive",
models.ForeignKey(
blank=True,
help_text="Which archive should be used as the source dataset for this phase?",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="archives.archive",
),
),
(
"challenge",
models.ForeignKey(
editable=False,
on_delete=django.db.models.deletion.CASCADE,
to="challenges.challenge",
),
),
(
"inputs",
models.ManyToManyField(
related_name="evaluation_inputs",
to="components.ComponentInterface",
),
),
(
"outputs",
models.ManyToManyField(
related_name="evaluation_outputs",
to="components.ComponentInterface",
),
),
],
options={
"ordering": ("challenge", "submissions_open", "created"),
"permissions": (
("create_phase_submission", "Create Phase Submission"),
),
"unique_together": {
("challenge", "slug"),
("challenge", "title"),
},
},
),
migrations.CreateModel(
name="Submission",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("created", models.DateTimeField(auto_now_add=True)),
("modified", models.DateTimeField(auto_now=True)),
(
"creators_ip",
models.GenericIPAddressField(
default=None, editable=False, null=True
),
),
(
"creators_user_agent",
models.TextField(blank=True, default="", editable=False),
),
(
"predictions_file",
models.FileField(
blank=True,
storage=grandchallenge.core.storage.ProtectedS3Storage(),
upload_to=grandchallenge.evaluation.models.submission_file_path,
validators=[
grandchallenge.core.validators.MimeTypeValidator(
allowed_types=("application/zip", "text/plain")
),
grandchallenge.core.validators.ExtensionValidator(
allowed_extensions=(".zip", ".csv")
),
],
),
),
(
"supplementary_file",
models.FileField(
blank=True,
storage=grandchallenge.core.storage.PublicS3Storage(),
upload_to=grandchallenge.evaluation.models.submission_supplementary_file_path,
validators=[
grandchallenge.core.validators.MimeTypeValidator(
allowed_types=("text/plain", "application/pdf")
)
],
),
),
(
"comment",
models.CharField(
blank=True,
default="",
help_text="You can add a comment here to help you keep track of your submissions.",
max_length=128,
),
),
(
"publication_url",
models.URLField(
blank=True,
help_text="A URL associated with this submission.",
),
),
(
"algorithm_image",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="algorithms.algorithmimage",
),
),
(
"creator",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to=settings.AUTH_USER_MODEL,
),
),
(
"phase",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="evaluation.phase",
),
),
],
options={
"unique_together": {
("phase", "predictions_file", "algorithm_image")
}
},
),
migrations.CreateModel(
name="Method",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("created", models.DateTimeField(auto_now_add=True)),
("modified", models.DateTimeField(auto_now=True)),
(
"staged_image_uuid",
models.UUIDField(blank=True, editable=False, null=True),
),
(
"image",
models.FileField(
blank=True,
help_text=".tar.xz archive of the container image produced from the command 'docker save IMAGE | xz -c > IMAGE.tar.xz'. See https://docs.docker.com/engine/reference/commandline/save/",
storage=grandchallenge.core.storage.PrivateS3Storage(),
upload_to=grandchallenge.components.models.docker_image_path,
validators=[
grandchallenge.core.validators.ExtensionValidator(
allowed_extensions=(
".tar",
".tar.gz",
".tar.xz",
)
)
],
),
),
(
"image_sha256",
models.CharField(editable=False, max_length=71),
),
(
"ready",
models.BooleanField(
default=False,
editable=False,
help_text="Is this image ready to be used?",
),
),
("status", models.TextField(editable=False)),
("requires_gpu", models.BooleanField(default=False)),
(
"requires_gpu_memory_gb",
models.PositiveIntegerField(default=4),
),
("requires_memory_gb", models.PositiveIntegerField(default=4)),
(
"requires_cpu_cores",
models.DecimalField(
decimal_places=2, default=Decimal("1.0"), max_digits=4
),
),
(
"creator",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to=settings.AUTH_USER_MODEL,
),
),
(
"phase",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="evaluation.phase",
),
),
],
options={"abstract": False},
),
migrations.CreateModel(
name="Evaluation",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("created", models.DateTimeField(auto_now_add=True)),
("modified", models.DateTimeField(auto_now=True)),
(
"status",
models.PositiveSmallIntegerField(
choices=[
(0, "Queued"),
(1, "Started"),
(2, "Re-Queued"),
(3, "Failed"),
(4, "Succeeded"),
(5, "Cancelled"),
],
default=0,
),
),
("stdout", models.TextField()),
("stderr", models.TextField(default="")),
(
"error_message",
models.CharField(default="", max_length=1024),
),
("started_at", models.DateTimeField(null=True)),
("completed_at", models.DateTimeField(null=True)),
("published", models.BooleanField(default=True)),
(
"rank",
models.PositiveIntegerField(
default=0,
help_text="The position of this result on the leaderboard. If the value is zero, then the result is unranked.",
),
),
("rank_score", models.FloatField(default=0.0)),
("rank_per_metric", models.JSONField(default=dict)),
(
"inputs",
models.ManyToManyField(
related_name="evaluation_evaluations_as_input",
to="components.ComponentInterfaceValue",
),
),
(
"method",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="evaluation.method",
),
),
(
"outputs",
models.ManyToManyField(
related_name="evaluation_evaluations_as_output",
to="components.ComponentInterfaceValue",
),
),
(
"submission",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="evaluation.submission",
),
),
],
options={"abstract": False},
),
migrations.CreateModel(
name="AlgorithmEvaluation",
fields=[
(
"status",
models.PositiveSmallIntegerField(
choices=[
(0, "Queued"),
(1, "Started"),
(2, "Re-Queued"),
(3, "Failed"),
(4, "Succeeded"),
(5, "Cancelled"),
],
default=0,
),
),
("stdout", models.TextField()),
("stderr", models.TextField(default="")),
(
"error_message",
models.CharField(default="", max_length=1024),
),
("started_at", models.DateTimeField(null=True)),
("completed_at", models.DateTimeField(null=True)),
("id", models.BigAutoField(primary_key=True, serialize=False)),
("created", models.DateTimeField(auto_now_add=True)),
("modified", models.DateTimeField(auto_now=True)),
(
"inputs",
models.ManyToManyField(
related_name="evaluation_algorithmevaluations_as_input",
to="components.ComponentInterfaceValue",
),
),
(
"outputs",
models.ManyToManyField(
related_name="evaluation_algorithmevaluations_as_output",
to="components.ComponentInterfaceValue",
),
),
(
"submission",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="evaluation.submission",
),
),
],
options={"abstract": False},
),
]
| 43.056338 | 403 | 0.367648 |
import uuid
from decimal import Decimal
import django.core.validators
import django.db.models.deletion
import django_extensions.db.fields
from django.conf import settings
from django.db import migrations, models
import grandchallenge.components.models
import grandchallenge.core.storage
import grandchallenge.core.validators
import grandchallenge.evaluation.models
class Migration(migrations.Migration):
initial = True
dependencies = [
("algorithms", "0001_initial"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("challenges", "0001_initial"),
("components", "0001_initial"),
("archives", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="Phase",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("created", models.DateTimeField(auto_now_add=True)),
("modified", models.DateTimeField(auto_now=True)),
(
"title",
models.CharField(
default="Challenge",
help_text="The title of this phase.",
max_length=64,
),
),
(
"slug",
django_extensions.db.fields.AutoSlugField(
blank=True,
editable=False,
max_length=64,
populate_from="title",
),
),
(
"score_title",
models.CharField(
default="Score",
help_text="The name that will be displayed for the scores column, for instance: Score (log-loss)",
max_length=32,
),
),
(
"score_jsonpath",
models.CharField(
blank=True,
help_text="The jsonpath of the field in metrics.json that will be used for the overall scores on the results page. See http://goessner.net/articles/JsonPath/ for syntax. For example: dice.mean",
max_length=255,
),
),
(
"score_error_jsonpath",
models.CharField(
blank=True,
help_text="The jsonpath for the field in metrics.json that contains the error of the score, eg: dice.std",
max_length=255,
),
),
(
"score_default_sort",
models.CharField(
choices=[("asc", "Ascending"), ("desc", "Descending")],
default="desc",
help_text="The default sorting to use for the scores on the results page.",
max_length=4,
),
),
(
"score_decimal_places",
models.PositiveSmallIntegerField(
default=4,
help_text="The number of decimal places to display for the score",
),
),
(
"extra_results_columns",
models.JSONField(
blank=True,
default=list,
help_text="A JSON object that contains the extra columns from metrics.json that will be displayed on the results page. ",
validators=[
grandchallenge.core.validators.JSONValidator(
schema={
"$schema": "http://json-schema.org/draft-06/schema#",
"definitions": {},
"items": {
"$id": "#/items",
"additionalProperties": False,
"properties": {
"error_path": {
"$id": "#/items/properties/error_path",
"default": "",
"examples": [
"aggregates.dice.std"
],
"pattern": "^(.*)$",
"title": "The Error Path Schema",
"type": "string",
},
"order": {
"$id": "#/items/properties/order",
"default": "",
"enum": ["asc", "desc"],
"examples": ["asc"],
"pattern": "^(asc|desc)$",
"title": "The Order Schema",
"type": "string",
},
"path": {
"$id": "#/items/properties/path",
"default": "",
"examples": [
"aggregates.dice.mean"
],
"pattern": "^(.*)$",
"title": "The Path Schema",
"type": "string",
},
"title": {
"$id": "#/items/properties/title",
"default": "",
"examples": ["Mean Dice"],
"pattern": "^(.*)$",
"title": "The Title Schema",
"type": "string",
},
},
"required": ["title", "path", "order"],
"title": "The Items Schema",
"type": "object",
},
"title": "The Extra Results Columns Schema",
"type": "array",
}
)
],
),
),
(
"scoring_method_choice",
models.CharField(
choices=[
(
"abs",
"Use the absolute value of the score column",
),
(
"avg",
"Use the mean of the relative ranks of the score and extra result columns",
),
(
"med",
"Use the median of the relative ranks of the score and extra result columns",
),
],
default="abs",
help_text="How should the rank of each result be calculated?",
max_length=3,
),
),
(
"result_display_choice",
models.CharField(
choices=[
("all", "Display all results"),
(
"rec",
"Only display each users most recent result",
),
("bst", "Only display each users best result"),
],
default="all",
help_text="Which results should be displayed on the leaderboard?",
max_length=3,
),
),
(
"submission_kind",
models.PositiveSmallIntegerField(
choices=[(1, "CSV"), (2, "ZIP"), (3, "Algorithm")],
default=1,
help_text="Should participants submit a .csv/.zip file of predictions, or an algorithm?",
),
),
(
"allow_submission_comments",
models.BooleanField(
default=False,
help_text="Allow users to submit comments as part of their submission.",
),
),
(
"display_submission_comments",
models.BooleanField(
default=False,
help_text="If true, submission comments are shown on the results page.",
),
),
(
"supplementary_file_choice",
models.CharField(
choices=[
("off", "Off"),
("opt", "Optional"),
("req", "Required"),
],
default="off",
help_text="Show a supplementary file field on the submissions page so that users can upload an additional file along with their predictions file as part of their submission (eg, include a pdf description of their method). Off turns this feature off, Optional means that including the file is optional for the user, Required means that the user must upload a supplementary file.",
max_length=3,
),
),
(
"supplementary_file_label",
models.CharField(
blank=True,
default="Supplementary File",
help_text="The label that will be used on the submission and results page for the supplementary file. For example: Algorithm Description.",
max_length=32,
),
),
(
"supplementary_file_help_text",
models.CharField(
blank=True,
default="",
help_text='The help text to include on the submissions page to describe the submissions file. Eg: "A PDF description of the method.".',
max_length=128,
),
),
(
"show_supplementary_file_link",
models.BooleanField(
default=False,
help_text="Show a link to download the supplementary file on the results page.",
),
),
(
"publication_url_choice",
models.CharField(
choices=[
("off", "Off"),
("opt", "Optional"),
("req", "Required"),
],
default="off",
help_text="Show a supplementary url field on the submission page so that users can submit a link to a publication that corresponds to their submission. Off turns this feature off, Optional means that including the url is optional for the user, Required means that the user must provide an url.",
max_length=3,
),
),
(
"show_publication_url",
models.BooleanField(
default=False,
help_text="Show a link to the supplementary url on the results page",
),
),
(
"daily_submission_limit",
models.PositiveIntegerField(
default=10,
help_text="The limit on the number of times that a user can make a submission over the submission limit period. Set this to 0 to close submissions for this phase.",
),
),
(
"submissions_open",
models.DateTimeField(
blank=True,
help_text="If set, participants will not be able to make submissions to this phase before this time.",
null=True,
),
),
(
"submissions_close",
models.DateTimeField(
blank=True,
help_text="If set, participants will not be able to make submissions to this phase after this time.",
null=True,
),
),
(
"submission_page_html",
models.TextField(
blank=True,
help_text="HTML to include on the submission page for this challenge.",
),
),
(
"auto_publish_new_results",
models.BooleanField(
default=True,
help_text="If true, new results are automatically made public. If false, the challenge administrator must manually publish each new result.",
),
),
(
"display_all_metrics",
models.BooleanField(
default=True,
help_text="Should all of the metrics be displayed on the Result detail page?",
),
),
(
"evaluation_detail_observable_url",
models.URLField(
blank=True,
help_text="The URL of the embeddable observable notebook for viewing individual results. Must be of the form https://observablehq.com/embed/@user/notebook?cell=...",
max_length=2000,
validators=[
django.core.validators.RegexValidator(
"^https\\:\\/\\/observablehq\\.com\\/embed\\/\\@[^\\/]+\\/[^\\?\\.]+\\?cell\\=.*$",
"URL must be of the form https://observablehq.com/embed/@user/notebook?cell=*",
)
],
),
),
(
"evaluation_comparison_observable_url",
models.URLField(
blank=True,
help_text="The URL of the embeddable observable notebook for comparingresults. Must be of the form https://observablehq.com/embed/@user/notebook?cell=...",
max_length=2000,
validators=[
django.core.validators.RegexValidator(
"^https\\:\\/\\/observablehq\\.com\\/embed\\/\\@[^\\/]+\\/[^\\?\\.]+\\?cell\\=.*$",
"URL must be of the form https://observablehq.com/embed/@user/notebook?cell=*",
)
],
),
),
(
"archive",
models.ForeignKey(
blank=True,
help_text="Which archive should be used as the source dataset for this phase?",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="archives.archive",
),
),
(
"challenge",
models.ForeignKey(
editable=False,
on_delete=django.db.models.deletion.CASCADE,
to="challenges.challenge",
),
),
(
"inputs",
models.ManyToManyField(
related_name="evaluation_inputs",
to="components.ComponentInterface",
),
),
(
"outputs",
models.ManyToManyField(
related_name="evaluation_outputs",
to="components.ComponentInterface",
),
),
],
options={
"ordering": ("challenge", "submissions_open", "created"),
"permissions": (
("create_phase_submission", "Create Phase Submission"),
),
"unique_together": {
("challenge", "slug"),
("challenge", "title"),
},
},
),
migrations.CreateModel(
name="Submission",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("created", models.DateTimeField(auto_now_add=True)),
("modified", models.DateTimeField(auto_now=True)),
(
"creators_ip",
models.GenericIPAddressField(
default=None, editable=False, null=True
),
),
(
"creators_user_agent",
models.TextField(blank=True, default="", editable=False),
),
(
"predictions_file",
models.FileField(
blank=True,
storage=grandchallenge.core.storage.ProtectedS3Storage(),
upload_to=grandchallenge.evaluation.models.submission_file_path,
validators=[
grandchallenge.core.validators.MimeTypeValidator(
allowed_types=("application/zip", "text/plain")
),
grandchallenge.core.validators.ExtensionValidator(
allowed_extensions=(".zip", ".csv")
),
],
),
),
(
"supplementary_file",
models.FileField(
blank=True,
storage=grandchallenge.core.storage.PublicS3Storage(),
upload_to=grandchallenge.evaluation.models.submission_supplementary_file_path,
validators=[
grandchallenge.core.validators.MimeTypeValidator(
allowed_types=("text/plain", "application/pdf")
)
],
),
),
(
"comment",
models.CharField(
blank=True,
default="",
help_text="You can add a comment here to help you keep track of your submissions.",
max_length=128,
),
),
(
"publication_url",
models.URLField(
blank=True,
help_text="A URL associated with this submission.",
),
),
(
"algorithm_image",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="algorithms.algorithmimage",
),
),
(
"creator",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to=settings.AUTH_USER_MODEL,
),
),
(
"phase",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="evaluation.phase",
),
),
],
options={
"unique_together": {
("phase", "predictions_file", "algorithm_image")
}
},
),
migrations.CreateModel(
name="Method",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("created", models.DateTimeField(auto_now_add=True)),
("modified", models.DateTimeField(auto_now=True)),
(
"staged_image_uuid",
models.UUIDField(blank=True, editable=False, null=True),
),
(
"image",
models.FileField(
blank=True,
help_text=".tar.xz archive of the container image produced from the command 'docker save IMAGE | xz -c > IMAGE.tar.xz'. See https://docs.docker.com/engine/reference/commandline/save/",
storage=grandchallenge.core.storage.PrivateS3Storage(),
upload_to=grandchallenge.components.models.docker_image_path,
validators=[
grandchallenge.core.validators.ExtensionValidator(
allowed_extensions=(
".tar",
".tar.gz",
".tar.xz",
)
)
],
),
),
(
"image_sha256",
models.CharField(editable=False, max_length=71),
),
(
"ready",
models.BooleanField(
default=False,
editable=False,
help_text="Is this image ready to be used?",
),
),
("status", models.TextField(editable=False)),
("requires_gpu", models.BooleanField(default=False)),
(
"requires_gpu_memory_gb",
models.PositiveIntegerField(default=4),
),
("requires_memory_gb", models.PositiveIntegerField(default=4)),
(
"requires_cpu_cores",
models.DecimalField(
decimal_places=2, default=Decimal("1.0"), max_digits=4
),
),
(
"creator",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to=settings.AUTH_USER_MODEL,
),
),
(
"phase",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="evaluation.phase",
),
),
],
options={"abstract": False},
),
migrations.CreateModel(
name="Evaluation",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("created", models.DateTimeField(auto_now_add=True)),
("modified", models.DateTimeField(auto_now=True)),
(
"status",
models.PositiveSmallIntegerField(
choices=[
(0, "Queued"),
(1, "Started"),
(2, "Re-Queued"),
(3, "Failed"),
(4, "Succeeded"),
(5, "Cancelled"),
],
default=0,
),
),
("stdout", models.TextField()),
("stderr", models.TextField(default="")),
(
"error_message",
models.CharField(default="", max_length=1024),
),
("started_at", models.DateTimeField(null=True)),
("completed_at", models.DateTimeField(null=True)),
("published", models.BooleanField(default=True)),
(
"rank",
models.PositiveIntegerField(
default=0,
help_text="The position of this result on the leaderboard. If the value is zero, then the result is unranked.",
),
),
("rank_score", models.FloatField(default=0.0)),
("rank_per_metric", models.JSONField(default=dict)),
(
"inputs",
models.ManyToManyField(
related_name="evaluation_evaluations_as_input",
to="components.ComponentInterfaceValue",
),
),
(
"method",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="evaluation.method",
),
),
(
"outputs",
models.ManyToManyField(
related_name="evaluation_evaluations_as_output",
to="components.ComponentInterfaceValue",
),
),
(
"submission",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="evaluation.submission",
),
),
],
options={"abstract": False},
),
migrations.CreateModel(
name="AlgorithmEvaluation",
fields=[
(
"status",
models.PositiveSmallIntegerField(
choices=[
(0, "Queued"),
(1, "Started"),
(2, "Re-Queued"),
(3, "Failed"),
(4, "Succeeded"),
(5, "Cancelled"),
],
default=0,
),
),
("stdout", models.TextField()),
("stderr", models.TextField(default="")),
(
"error_message",
models.CharField(default="", max_length=1024),
),
("started_at", models.DateTimeField(null=True)),
("completed_at", models.DateTimeField(null=True)),
("id", models.BigAutoField(primary_key=True, serialize=False)),
("created", models.DateTimeField(auto_now_add=True)),
("modified", models.DateTimeField(auto_now=True)),
(
"inputs",
models.ManyToManyField(
related_name="evaluation_algorithmevaluations_as_input",
to="components.ComponentInterfaceValue",
),
),
(
"outputs",
models.ManyToManyField(
related_name="evaluation_algorithmevaluations_as_output",
to="components.ComponentInterfaceValue",
),
),
(
"submission",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="evaluation.submission",
),
),
],
options={"abstract": False},
),
]
| true | true |
1c2fe087748a1234df572bee756c776a9b182f2d | 363 | py | Python | my_classes/Tuples/.history/name_tuples_20210721190506.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | my_classes/Tuples/.history/name_tuples_20210721190506.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | my_classes/Tuples/.history/name_tuples_20210721190506.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | """ Tuple as Data Structure
We have see how we interpreted tuples as data structures
The position of the object contained in the tuple gives it meaning
For example, we can represent a 2D coordinate as: (10, 20)
x y
If pt is a position tuple, we can retrieve the x and y coordinates using:
""" | 30.25 | 73 | 0.61708 | true | true | |
1c2fe11c6ac3fa508db890bc4ec79ab09cf86292 | 4,779 | py | Python | Programs/sent_processing/processing_dwe_17.py | mikepackard415/Scientific-Environmental-Discourse | f8d08734f7c2ce98e088479ac7b58c7b348c0401 | [
"MIT"
] | null | null | null | Programs/sent_processing/processing_dwe_17.py | mikepackard415/Scientific-Environmental-Discourse | f8d08734f7c2ce98e088479ac7b58c7b348c0401 | [
"MIT"
] | null | null | null | Programs/sent_processing/processing_dwe_17.py | mikepackard415/Scientific-Environmental-Discourse | f8d08734f7c2ce98e088479ac7b58c7b348c0401 | [
"MIT"
] | null | null | null | import pandas as pd
import ast
import dask.dataframe as dd
from gensim.utils import effective_n_jobs
import spacy
try:
nlp = spacy.load("en")
except OSError:
nlp = spacy.load("en_core_web_sm")
path = 'Environmental-Discourse'
env = pd.read_pickle('../../Data/'+path+'/env_0.pkl')
env = env[env.year == 2017]
def word_tokenize(word_list, model=nlp, MAX_LEN=1500000):
tokenized = []
if type(word_list) == list and len(word_list) == 1:
word_list = word_list[0]
if type(word_list) == list:
word_list = ' '.join([str(elem) for elem in word_list])
# since we're only tokenizing, I remove RAM intensive operations and increase max text size
model.max_length = MAX_LEN
doc = model(word_list, disable=["parser", "tagger", "ner", "lemmatizer"])
for token in doc:
if not token.is_punct and len(token.text.strip()) > 0:
tokenized.append(token.text)
return tokenized
def normalizeTokens(word_list, extra_stop=[], model=nlp, lemma=True, MAX_LEN=1500000):
#We can use a generator here as we just need to iterate over it
normalized = []
if type(word_list) == list and len(word_list) == 1:
word_list = word_list[0]
if type(word_list) == list:
word_list = ' '.join([str(elem) for elem in word_list])
# since we're only normalizing, I remove RAM intensive operations and increase max text size
model.max_length = MAX_LEN
doc = model(word_list.lower(), disable=["parser", "ner"])
if len(extra_stop) > 0:
for stopword in extra_stop:
lexeme = nlp.vocab[stopword]
lexeme.is_stop = True
# we check if we want lemmas or not earlier to avoid checking every time we loop
if lemma:
for w in doc:
# if it's not a stop word or punctuation mark, add it to our article
if w.text != '\n' and not w.is_stop and not w.is_punct and not w.like_num and len(w.text.strip()) > 0:
# we add the lematized version of the word
normalized.append(str(w.lemma_))
else:
for w in doc:
# if it's not a stop word or punctuation mark, add it to our article
if w.text != '\n' and not w.is_stop and not w.is_punct and not w.like_num and len(w.text.strip()) > 0:
# we add the lematized version of the word
normalized.append(str(w.text.strip()))
return normalized
def ngram_tagger(tokens):
n = len(tokens)
i = 0
tokens_q = []
tokens_qt = []
tokens_qtb = []
# quadgrams
while i < n:
words = '_'.join(tokens[i:i+4])
if words in quadgrams:
tokens_q.append(words)
i += 4
else:
tokens_q.append(tokens[i])
i += 1
# trigrams
n = len(tokens_q)
i = 0
while i < n:
words = '_'.join(tokens_q[i:i+3])
if words in trigrams:
tokens_qt.append(words)
i += 3
else:
tokens_qt.append(tokens_q[i])
i += 1
# bigrams
n = len(tokens_qt)
i = 0
while i < n:
words = '_'.join(tokens_qt[i:i+2])
if words in bigrams:
tokens_qtb.append(words)
i += 2
else:
tokens_qtb.append(tokens_qt[i])
i += 1
return tokens_qtb
def sent_tokenize(word_list, model=nlp):
doc = model(word_list)
sentences = [sent.text.strip() for sent in doc.sents]
return sentences
quadgrams = [('intergovernmental', 'panel', 'climate', 'change'),
('natural', 'resources', 'defense', 'council'),
('coal', 'fired', 'power', 'plants'),
('national', 'oceanic', 'atmospheric', 'administration')]
tr = pd.read_csv('../../Data/' + path + '/trigrams.csv', converters={'Unnamed: 0': ast.literal_eval})
tr.columns = ['trigram', 'freq', 'tag']
trigrams = [t for t in tr[tr.tag == 1].trigram]
b = pd.read_csv('../../Data/' + path + '/bigrams.csv', converters={'Unnamed: 0': ast.literal_eval})
b.columns = ['bigram', 'freq', 'tag']
bigrams = [t for t in b[b.tag == 1].bigram]
quadgrams = ['_'.join(t) for t in quadgrams]
trigrams = ['_'.join(t) for t in trigrams]
bigrams = ['_'.join(t) for t in bigrams]
d_env = dd.from_pandas(env, npartitions=effective_n_jobs(-1))
d_env['sents'] = d_env.text.map(lambda x: [ngram_tagger(
normalizeTokens(
word_tokenize(s), lemma=False)) for s in sent_tokenize(x)])
d_env['sents'] = d_env.sents.map(lambda x: [s for s in x if len(s)>0])
env = d_env.compute()
env.to_pickle('../../Data/'+path+'/sent_processing/env_processed_sent_17.pkl')
env.to_csv('../../Data/'+path+'/sent_processing/env_processed_sent_17.csv') | 33.41958 | 114 | 0.594267 | import pandas as pd
import ast
import dask.dataframe as dd
from gensim.utils import effective_n_jobs
import spacy
try:
nlp = spacy.load("en")
except OSError:
nlp = spacy.load("en_core_web_sm")
path = 'Environmental-Discourse'
env = pd.read_pickle('../../Data/'+path+'/env_0.pkl')
env = env[env.year == 2017]
def word_tokenize(word_list, model=nlp, MAX_LEN=1500000):
tokenized = []
if type(word_list) == list and len(word_list) == 1:
word_list = word_list[0]
if type(word_list) == list:
word_list = ' '.join([str(elem) for elem in word_list])
model.max_length = MAX_LEN
doc = model(word_list, disable=["parser", "tagger", "ner", "lemmatizer"])
for token in doc:
if not token.is_punct and len(token.text.strip()) > 0:
tokenized.append(token.text)
return tokenized
def normalizeTokens(word_list, extra_stop=[], model=nlp, lemma=True, MAX_LEN=1500000):
#We can use a generator here as we just need to iterate over it
normalized = []
if type(word_list) == list and len(word_list) == 1:
word_list = word_list[0]
if type(word_list) == list:
word_list = ' '.join([str(elem) for elem in word_list])
# since we're only normalizing, I remove RAM intensive operations and increase max text size
model.max_length = MAX_LEN
doc = model(word_list.lower(), disable=["parser", "ner"])
if len(extra_stop) > 0:
for stopword in extra_stop:
lexeme = nlp.vocab[stopword]
lexeme.is_stop = True
if lemma:
for w in doc:
if w.text != '\n' and not w.is_stop and not w.is_punct and not w.like_num and len(w.text.strip()) > 0:
# we add the lematized version of the word
normalized.append(str(w.lemma_))
else:
for w in doc:
# if it's not a stop word or punctuation mark, add it to our article
if w.text != '\n' and not w.is_stop and not w.is_punct and not w.like_num and len(w.text.strip()) > 0:
normalized.append(str(w.text.strip()))
return normalized
def ngram_tagger(tokens):
n = len(tokens)
i = 0
tokens_q = []
tokens_qt = []
tokens_qtb = []
while i < n:
words = '_'.join(tokens[i:i+4])
if words in quadgrams:
tokens_q.append(words)
i += 4
else:
tokens_q.append(tokens[i])
i += 1
n = len(tokens_q)
i = 0
while i < n:
words = '_'.join(tokens_q[i:i+3])
if words in trigrams:
tokens_qt.append(words)
i += 3
else:
tokens_qt.append(tokens_q[i])
i += 1
n = len(tokens_qt)
i = 0
while i < n:
words = '_'.join(tokens_qt[i:i+2])
if words in bigrams:
tokens_qtb.append(words)
i += 2
else:
tokens_qtb.append(tokens_qt[i])
i += 1
return tokens_qtb
def sent_tokenize(word_list, model=nlp):
doc = model(word_list)
sentences = [sent.text.strip() for sent in doc.sents]
return sentences
quadgrams = [('intergovernmental', 'panel', 'climate', 'change'),
('natural', 'resources', 'defense', 'council'),
('coal', 'fired', 'power', 'plants'),
('national', 'oceanic', 'atmospheric', 'administration')]
tr = pd.read_csv('../../Data/' + path + '/trigrams.csv', converters={'Unnamed: 0': ast.literal_eval})
tr.columns = ['trigram', 'freq', 'tag']
trigrams = [t for t in tr[tr.tag == 1].trigram]
b = pd.read_csv('../../Data/' + path + '/bigrams.csv', converters={'Unnamed: 0': ast.literal_eval})
b.columns = ['bigram', 'freq', 'tag']
bigrams = [t for t in b[b.tag == 1].bigram]
quadgrams = ['_'.join(t) for t in quadgrams]
trigrams = ['_'.join(t) for t in trigrams]
bigrams = ['_'.join(t) for t in bigrams]
d_env = dd.from_pandas(env, npartitions=effective_n_jobs(-1))
d_env['sents'] = d_env.text.map(lambda x: [ngram_tagger(
normalizeTokens(
word_tokenize(s), lemma=False)) for s in sent_tokenize(x)])
d_env['sents'] = d_env.sents.map(lambda x: [s for s in x if len(s)>0])
env = d_env.compute()
env.to_pickle('../../Data/'+path+'/sent_processing/env_processed_sent_17.pkl')
env.to_csv('../../Data/'+path+'/sent_processing/env_processed_sent_17.csv') | true | true |
1c2fe2e453be6b526576ae046a9baaf0afe5582d | 2,990 | py | Python | linter.py | yubchen/SublimeLinter-for-QJS | c386b5ad5de89d7570c9fb29ea2992e95d2f0666 | [
"MIT"
] | null | null | null | linter.py | yubchen/SublimeLinter-for-QJS | c386b5ad5de89d7570c9fb29ea2992e95d2f0666 | [
"MIT"
] | null | null | null | linter.py | yubchen/SublimeLinter-for-QJS | c386b5ad5de89d7570c9fb29ea2992e95d2f0666 | [
"MIT"
] | null | null | null | #
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by roadhump
# Copyright (c) 2014 roadhump
#
# License: MIT
#
"""This module exports the ESLint plugin class."""
import sublime
import os
import re
import sys
from .lint import NodeLinter
class ESLint(NodeLinter):
"""Provides an interface to the eslint executable."""
cwd = os.path.split(os.path.realpath(__file__))[0]
syntax = ('javascript', 'html', 'javascriptnext', 'javascript (babel)', 'javascript (jsx)', 'jsx-real')
npm_name = 'eslint'
cmd = ('node', cwd+'/eslint/bin/eslint.js', '--format', 'compact', '--stdin', '--stdin-filename', '__RELATIVE_TO_FOLDER__')
version_args = '--version'
version_re = r'v(?P<version>\d+\.\d+\.\d+)'
version_requirement = '>= 0.20.0'
regex = (
r'^.+?: line (?P<line>\d+), col (?P<col>\d+), '
r'(?:(?P<error>Error)|(?P<warning>Warning)) - '
r'(?P<message>.+)'
)
config_fail_regex = re.compile(r'^Cannot read config file: .*\r?\n')
crash_regex = re.compile(
r'^(.*?)\r?\n\w*Error: \1',
re.MULTILINE
)
line_col_base = (1, 0)
selectors = {
'html': 'source.js.embedded.html'
}
def find_errors(self, output):
"""
Parses errors from linter's output
We override this method to handle parsing eslint crashes
"""
match = self.config_fail_regex.match(output)
if match:
return [(match, 0, None, "Error", "", match.group(0), None)]
match = self.crash_regex.match(output)
if match:
msg = "ESLint crashed: %s" % match.group(1)
return [(match, 0, None, "Error", "", msg, None)]
return super().find_errors(output)
def split_match(self, match):
"""
Extract and return values from match.
We override this method to silent warning by .eslintignore settings.
"""
match, line, col, error, warning, message, near = super().split_match(match)
if message and message == 'File ignored because of your .eslintignore file. Use --no-ignore to override.':
return match, None, None, None, None, '', None
return match, line, col, error, warning, message, near
def communicate(self, cmd, code=None):
"""Run an external executable using stdin to pass code and return its output."""
if '__RELATIVE_TO_FOLDER__' in cmd:
relfilename = self.filename
if int(sublime.version()) >= 3080:
window = self.view.window()
vars = window.extract_variables()
if 'folder' in vars:
relfilename = os.path.relpath(self.filename, vars['folder'])
cmd[cmd.index('__RELATIVE_TO_FOLDER__')] = relfilename
elif not code:
cmd.append(self.filename)
sys.stderr.write(super().communicate(cmd, code));
return super().communicate(cmd, code)
| 30.824742 | 127 | 0.59398 |
import sublime
import os
import re
import sys
from .lint import NodeLinter
class ESLint(NodeLinter):
cwd = os.path.split(os.path.realpath(__file__))[0]
syntax = ('javascript', 'html', 'javascriptnext', 'javascript (babel)', 'javascript (jsx)', 'jsx-real')
npm_name = 'eslint'
cmd = ('node', cwd+'/eslint/bin/eslint.js', '--format', 'compact', '--stdin', '--stdin-filename', '__RELATIVE_TO_FOLDER__')
version_args = '--version'
version_re = r'v(?P<version>\d+\.\d+\.\d+)'
version_requirement = '>= 0.20.0'
regex = (
r'^.+?: line (?P<line>\d+), col (?P<col>\d+), '
r'(?:(?P<error>Error)|(?P<warning>Warning)) - '
r'(?P<message>.+)'
)
config_fail_regex = re.compile(r'^Cannot read config file: .*\r?\n')
crash_regex = re.compile(
r'^(.*?)\r?\n\w*Error: \1',
re.MULTILINE
)
line_col_base = (1, 0)
selectors = {
'html': 'source.js.embedded.html'
}
def find_errors(self, output):
match = self.config_fail_regex.match(output)
if match:
return [(match, 0, None, "Error", "", match.group(0), None)]
match = self.crash_regex.match(output)
if match:
msg = "ESLint crashed: %s" % match.group(1)
return [(match, 0, None, "Error", "", msg, None)]
return super().find_errors(output)
def split_match(self, match):
match, line, col, error, warning, message, near = super().split_match(match)
if message and message == 'File ignored because of your .eslintignore file. Use --no-ignore to override.':
return match, None, None, None, None, '', None
return match, line, col, error, warning, message, near
def communicate(self, cmd, code=None):
if '__RELATIVE_TO_FOLDER__' in cmd:
relfilename = self.filename
if int(sublime.version()) >= 3080:
window = self.view.window()
vars = window.extract_variables()
if 'folder' in vars:
relfilename = os.path.relpath(self.filename, vars['folder'])
cmd[cmd.index('__RELATIVE_TO_FOLDER__')] = relfilename
elif not code:
cmd.append(self.filename)
sys.stderr.write(super().communicate(cmd, code));
return super().communicate(cmd, code)
| true | true |
1c2fe3e3373768dfde7d9a8b16225d720e95fca2 | 597 | py | Python | var/spack/repos/builtin/packages/pslib/package.py | kkauder/spack | 6ae8d5c380c1f42094b05d38be26b03650aafb39 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2,360 | 2017-11-06T08:47:01.000Z | 2022-03-31T14:45:33.000Z | var/spack/repos/builtin/packages/pslib/package.py | kkauder/spack | 6ae8d5c380c1f42094b05d38be26b03650aafb39 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 13,838 | 2017-11-04T07:49:45.000Z | 2022-03-31T23:38:39.000Z | var/spack/repos/builtin/packages/pslib/package.py | kkauder/spack | 6ae8d5c380c1f42094b05d38be26b03650aafb39 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1,793 | 2017-11-04T07:45:50.000Z | 2022-03-30T14:31:53.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Pslib(AutotoolsPackage):
"""C-library to create PostScript files on the fly."""
homepage = "http://pslib.sourceforge.net/"
url = "https://sourceforge.net/projects/pslib/files/pslib/0.4.5/pslib-0.4.5.tar.gz"
version('0.4.5', sha256='7a33928982b281660206bb3749a4a563e3ac987eea64f41696f212df345212be')
depends_on('jpeg')
depends_on('libpng')
| 31.421053 | 95 | 0.730318 |
from spack import *
class Pslib(AutotoolsPackage):
homepage = "http://pslib.sourceforge.net/"
url = "https://sourceforge.net/projects/pslib/files/pslib/0.4.5/pslib-0.4.5.tar.gz"
version('0.4.5', sha256='7a33928982b281660206bb3749a4a563e3ac987eea64f41696f212df345212be')
depends_on('jpeg')
depends_on('libpng')
| true | true |
1c2fe3faf32bb6c742fd8ded31cfd83c6c15abcb | 20,258 | py | Python | lib/rucio/tests/test_api_external_representation.py | fno2010/rucio | 47e93cfbe5887071c70de4ba815c1bbdddfac2ce | [
"Apache-2.0"
] | null | null | null | lib/rucio/tests/test_api_external_representation.py | fno2010/rucio | 47e93cfbe5887071c70de4ba815c1bbdddfac2ce | [
"Apache-2.0"
] | null | null | null | lib/rucio/tests/test_api_external_representation.py | fno2010/rucio | 47e93cfbe5887071c70de4ba815c1bbdddfac2ce | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright CERN since 2020
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import string
import unittest
from datetime import datetime
from json import loads
import pytest
import rucio.api.account_limit as api_acc_lim
import rucio.api.rse as api_rse
import rucio.core.account_counter as account_counter
from rucio.api.account import add_account, get_account_info, list_accounts
from rucio.api.did import add_did, add_did_to_followed, attach_dids_to_dids, get_users_following_did, scope_list
from rucio.api.exporter import export_data
from rucio.api.identity import add_account_identity, list_accounts_for_identity
from rucio.api.replica import add_replicas, get_did_from_pfns, list_replicas
from rucio.api.request import get_request_by_did, list_requests, queue_requests
from rucio.api.rule import add_replication_rule
from rucio.api.scope import add_scope, list_scopes, get_scopes
from rucio.api.subscription import add_subscription, list_subscriptions, list_subscription_rule_states, \
get_subscription_by_id
from rucio.common.config import config_get_bool
from rucio.common.types import InternalAccount, InternalScope
from rucio.common.utils import api_update_return_dict, generate_uuid
from rucio.core.rse import get_rse_id
from rucio.core.vo import add_vo, vo_exists
from rucio.daemons.abacus import rse as abacus_rse
from rucio.daemons.judge import cleaner
from rucio.daemons.reaper import reaper
from rucio.db.sqla import constants
from rucio.tests.common import rse_name_generator
from rucio.tests.common_server import get_vo
@pytest.mark.noparallel(reason='uses pre-defined RSE, fails when run in parallel')
class TestApiExternalRepresentation(unittest.TestCase):
@classmethod
def setUpClass(cls):
if config_get_bool('common', 'multi_vo', raise_exception=False, default=False):
cls.vo = {'vo': get_vo()}
cls.new_vo = {'vo': 'new'}
cls.multi_vo = True
if not vo_exists(**cls.new_vo):
add_vo(description='Test', email='rucio@email.com', **cls.new_vo)
else:
cls.vo = {}
cls.new_vo = {}
cls.multi_vo = False
# Add test account
cls.account_name = ''.join(random.choice(string.ascii_lowercase) for x in range(10))
add_account(account=cls.account_name, type_='user', email='rucio@email.com', issuer='root', **cls.vo)
cls.account = InternalAccount(cls.account_name, **cls.vo)
# Add test scope
cls.scope_name = ''.join(random.choice(string.ascii_lowercase) for x in range(10))
add_scope(scope=cls.scope_name, account=cls.account_name, issuer='root', **cls.vo)
cls.scope = InternalScope(cls.scope_name, **cls.vo)
# Get test RSEs
cls.rse_name = 'MOCK'
cls.rse_id = get_rse_id(rse=cls.rse_name, **cls.vo)
cls.rse2_name = 'MOCK2'
cls.rse2_id = get_rse_id(rse=cls.rse2_name, **cls.vo)
cls.rse3_name = rse_name_generator()
cls.rse3_id = api_rse.add_rse(cls.rse3_name, 'root', **cls.new_vo)
cls.rse4_name = rse_name_generator()
cls.rse4_id = api_rse.add_rse(cls.rse4_name, 'root', **cls.new_vo)
api_rse.add_distance(cls.rse3_name, cls.rse4_name, issuer='root', distance=3, **cls.new_vo)
def test_api_update_return_dict(self):
""" API: Test the conversion of dictionaries to external representation """
test_dict = {'account': self.account,
'scope': self.scope,
'rse_expression': 'MOCK|MOCK2',
'rse_id': self.rse_id,
'src_rse_id': self.rse_id,
'source_rse_id': self.rse_id,
'dest_rse_id': self.rse_id,
'destination_rse_id': self.rse_id}
value = api_update_return_dict(test_dict)
expected = {'account': self.account_name, 'scope': self.scope_name, 'rse_expression': 'MOCK|MOCK2',
'rse_id': self.rse_id, 'rse': self.rse_name,
'src_rse_id': self.rse_id, 'src_rse': self.rse_name,
'source_rse_id': self.rse_id, 'source_rse': self.rse_name,
'dest_rse_id': self.rse_id, 'dest_rse': self.rse_name,
'destination_rse_id': self.rse_id, 'destination_rse': self.rse_name}
assert value == expected
def test_api_account(self):
""" ACCOUNT (API): Test external representation of account information """
out = get_account_info(self.account_name, **self.vo)
assert self.account_name == out['account']
out = [acc['account'] for acc in list_accounts(**self.vo)]
assert self.account_name in out
if self.multi_vo:
assert self.account.internal not in out
assert '@' not in ' '.join(out)
def test_api_account_limit(self):
""" ACCOUNT_LIMIT (API): Test external representation of account limits """
# Add mock account limits
rse_expr = '{}|{}'.format(self.rse_name, self.rse2_name)
api_acc_lim.set_local_account_limit(self.account_name, self.rse_name, 10000, issuer='root', **self.vo)
api_acc_lim.set_global_account_limit(self.account_name, rse_expr, 20000, issuer='root', **self.vo)
out = api_acc_lim.get_local_account_limits(self.account_name, **self.vo)
assert self.rse_name in out
assert self.rse_id not in out
out = api_acc_lim.get_local_account_limit(self.account_name, self.rse_name, **self.vo)
assert self.rse_name in out
assert self.rse_id not in out
out = api_acc_lim.get_global_account_limits(self.account_name, **self.vo)
assert rse_expr in out
if self.multi_vo:
assert 'vo={}&({})'.format(self.vo['vo'], rse_expr) not in out
out = api_acc_lim.get_global_account_limit(self.account_name, rse_expr, **self.vo)
assert rse_expr in out
if self.multi_vo:
assert 'vo={}&({})'.format(self.vo['vo'], rse_expr) not in out
out = api_acc_lim.get_local_account_usage(self.account_name, self.rse_name, issuer='root', **self.vo)
out = list(out)
assert 0 != len(out)
assert self.rse_id in [usage['rse_id'] for usage in out if 'rse_id' in usage]
for usage in out:
if 'rse_id' in usage:
assert 'rse' in usage
if usage['rse_id'] == self.rse_id:
assert self.rse_name == usage["rse"]
out = api_acc_lim.get_global_account_usage(self.account_name, rse_expr, issuer='root', **self.vo)
out = list(out)
assert 0 != len(out)
assert rse_expr in [usage['rse_expression'] for usage in out if 'rse_expression' in usage]
def test_api_did(self):
""" DID (API): Test external representation of DIDs """
# add some dids
add_did(self.scope_name, 'ext_parent', 'container', issuer='root', account=self.account_name, **self.vo)
add_did(self.scope_name, 'ext_child', 'dataset', issuer='root', account=self.account_name, **self.vo)
attachment = {'scope': self.scope_name, 'name': 'ext_parent',
'dids': [{'scope': self.scope_name, 'name': 'ext_child', 'type': 'DATASET'}]}
attach_dids_to_dids([attachment], issuer='root', **self.vo)
# test scope_list
out = scope_list(self.scope_name, recursive=True, **self.vo)
out = list(out)
assert 0 != len(out)
parent_found = False
for did in out:
assert did['scope'] == self.scope_name
if did['parent'] is not None:
parent_found = True
assert did['parent']['scope'] == self.scope_name
assert parent_found
# test get_did
add_did_to_followed(self.scope_name, 'ext_parent', self.account_name, **self.vo)
out = get_users_following_did('ext_parent', self.scope_name, **self.vo)
out = list(out)
assert 0 != len(out)
for user in out:
assert user['user'] == self.account_name
def test_api_exporter(self):
""" EXPORTER (API): Test external representation of exported data """
out = export_data('root', **self.new_vo)
rses = out['rses']
assert self.rse3_name in rses
assert self.rse3_id not in rses
distances = out['distances']
assert self.rse3_name in distances
assert self.rse3_id not in distances
assert self.rse4_name in distances[self.rse3_name]
assert self.rse4_id not in distances[self.rse3_name]
# check for interference from other VOs
if self.multi_vo:
assert self.rse_name not in rses
assert self.rse_id not in rses
assert self.rse2_name not in rses
assert self.rse2_id not in rses
assert self.rse_name not in distances
assert self.rse_id not in distances
assert self.rse2_name not in distances
assert self.rse2_id not in distances
def test_api_identity(self):
""" IDENTITY (API): Test external representation of identity accounts """
id_key = ''.join(random.choice(string.ascii_lowercase) for x in range(10))
add_account_identity(id_key, 'userpass', self.account_name, 'rucio_test@test.com', 'root', default=True, password='ext_pass', **self.vo)
out = list_accounts_for_identity(id_key, 'userpass')
assert self.account_name in out
if self.multi_vo:
assert self.account.internal not in out
def test_api_replica(self):
""" REPLICA (API): Test external representation of replicas """
did = 'ext_' + str(generate_uuid())
pfn = 'srm://mock2.com:8443/srm/managerv2?SFN=/rucio/tmpdisk/rucio_tests/%s/%s' % (self.scope_name, generate_uuid())
add_replicas(self.rse2_name, files=[{'scope': self.scope_name, 'name': did, 'bytes': 100, 'pfn': pfn}], issuer='root', **self.vo)
add_did(self.scope_name, 'ext_parent_2', 'dataset', issuer='root', account=self.account_name, **self.vo)
attachment = {'scope': self.scope_name, 'name': 'ext_parent_2',
'dids': [{'scope': self.scope_name, 'name': did}]}
attach_dids_to_dids([attachment], issuer='root', **self.vo)
out = get_did_from_pfns([pfn], self.rse2_name, **self.vo)
out = list(out)
assert 0 != len(out)
did_found = False
for p in out:
for key in p:
if p[key]['name'] == did:
did_found = True
assert self.scope_name == p[key]['scope']
assert did_found
out = list_replicas(dids=[{'scope': self.scope_name, 'name': did}], resolve_parents=True, **self.vo)
out = list(out)
assert 0 != len(out)
parents_found = False
for rep in out:
assert rep['scope'] == self.scope_name
if 'parents' in rep:
parents_found = True
for parent in rep['parents']:
assert self.scope_name in parent
if self.multi_vo:
assert self.scope.internal not in parent
assert parents_found
def test_api_request(self):
""" REQUEST (API): Test external representation of requests """
did = generate_uuid()
add_did(self.scope_name, did, 'dataset', issuer='root', account=self.account_name, rse=self.rse_name, **self.vo)
requests = [{
'dest_rse_id': self.rse2_id,
'source_rse_id': self.rse_id,
'request_type': constants.RequestType.TRANSFER,
'request_id': generate_uuid(),
'name': did,
'scope': self.scope_name,
'account': self.account_name,
'rule_id': generate_uuid(),
'retry_count': 1,
'requested_at': datetime.now(),
'attributes': {
'activity': 'User Subscription',
'bytes': 10,
'md5': '',
'adler32': ''
}
}]
reqs = queue_requests(requests, issuer='root', **self.vo) # this does not pass in the source rse
reqs = list(reqs)
assert 0 != len(reqs)
for r in reqs:
assert r['scope'] == self.scope_name
assert r['account'] == self.account_name
assert r['source_rse'] == self.rse_name
assert r['dest_rse'] == self.rse2_name
out = get_request_by_did(self.scope_name, did, self.rse2_name, issuer='root', **self.vo)
assert out['scope'] == self.scope_name
assert out['account'] == self.account_name
assert out['dest_rse'] == self.rse2_name
assert out['source_rse'] == self.rse_name
out = list_requests([self.rse_name], [self.rse2_name], [constants.RequestState.QUEUED], issuer='root', **self.vo)
out = list(out)
assert 0 != len(out)
assert self.scope_name in [req['scope'] for req in out]
for req in out:
if req['scope'] == self.scope_name:
assert req['scope'] == self.scope_name
assert req['account'] == self.account_name
assert req['dest_rse'] == self.rse2_name
assert req['source_rse'] == self.rse_name
@pytest.mark.noparallel(reason='runs the reaper on a pre-defined rse, might interfere with other tests')
def test_api_rse(self):
""" RSE (API): Test external representation of RSEs """
out = api_rse.get_rse(self.rse_name, **self.vo)
assert out['rse'] == self.rse_name
assert out['id'] == self.rse_id
out = api_rse.list_rses(**self.new_vo)
out = list(out)
assert 0 != len(out)
rse_ids = [rse['id'] for rse in out]
assert self.rse3_id in rse_ids
assert self.rse4_id in rse_ids
for rse in out:
assert 'rse' in rse
if rse['id'] == self.rse3_id:
assert rse['rse'] == self.rse3_name
elif rse['id'] == self.rse4_id:
assert rse['rse'] == self.rse4_name
key = "KEY_" + generate_uuid()
api_rse.add_rse_attribute(self.rse_name, key, 1, issuer='root', **self.vo)
out = api_rse.get_rses_with_attribute(key)
out = list(out)
assert 0 != len(out)
for rse in out:
assert rse['rse'] == self.rse_name
out = api_rse.get_rse_protocols(self.rse_name, issuer='root', **self.vo)
assert out['rse'] == self.rse_name
# add some account and RSE counters
rse_mock = 'MOCK4'
rse_mock_id = get_rse_id(rse_mock, **self.vo)
account_counter.del_counter(rse_id=rse_mock_id, account=self.account)
account_counter.add_counter(rse_id=rse_mock_id, account=self.account)
account_counter.increase(rse_id=rse_mock_id, account=self.account, files=1, bytes_=10)
account_counter.update_account_counter(self.account, rse_mock_id)
did = 'file_' + generate_uuid()
add_did(self.scope_name, did, 'DATASET', 'root', account=self.account_name, rse=rse_mock, **self.vo)
abacus_rse.run(once=True)
out = api_rse.get_rse_usage(rse_mock, per_account=True, issuer='root', **self.vo)
assert rse_mock_id in [o['rse_id'] for o in out]
for usage in out:
if usage['rse_id'] == rse_mock_id:
assert usage['rse'] == rse_mock
accounts = [u['account'] for u in usage['account_usages']]
assert self.account_name in accounts
if self.multi_vo:
assert self.account.internal not in accounts
# clean up files
cleaner.run(once=True)
if self.multi_vo:
reaper.run(once=True, include_rses='vo=%s&(%s)' % (self.vo['vo'], rse_mock), greedy=True)
else:
reaper.run(once=True, include_rses=rse_mock, greedy=True)
abacus_rse.run(once=True)
out = api_rse.parse_rse_expression('%s|%s' % (self.rse_name, self.rse2_name), **self.vo)
assert self.rse_name in out
assert self.rse2_name in out
assert self.rse_id not in out
assert self.rse2_id not in out
def test_api_scope(self):
""" SCOPE (API): Test external representation of scopes """
out = list_scopes()
assert self.scope_name in out
if self.multi_vo:
assert self.scope.internal not in out
out = get_scopes(self.account_name, **self.vo)
assert self.scope_name in out
if self.multi_vo:
assert self.scope.internal not in out
def test_api_subscription(self):
""" SUBSCRIPTION (API): Test external representation of subscriptions """
sub = 'ext_' + generate_uuid()
did = 'ext_' + generate_uuid()
new_acc_name = ''.join(random.choice(string.ascii_lowercase) for x in range(10))
new_scope_name = ''.join(random.choice(string.ascii_lowercase) for x in range(10))
add_account(new_acc_name, 'USER', 'test@test.com', 'root', **self.new_vo)
add_scope(new_scope_name, new_acc_name, 'root', **self.new_vo)
api_acc_lim.set_local_account_limit(new_acc_name, self.rse3_name, 10, 'root', **self.new_vo)
api_acc_lim.set_local_account_limit(new_acc_name, self.rse4_name, 10, 'root', **self.new_vo)
add_did(new_scope_name, did, 'DATASET', 'root', account=new_acc_name, rse=self.rse3_name, **self.new_vo)
sub_id = add_subscription(sub, new_acc_name, {'account': [new_acc_name], 'scope': [new_scope_name]},
[{'copies': 1, 'rse_expression': self.rse3_name, 'weight': 0, 'activity': 'User Subscriptions',
'source_replica_expression': self.rse4_name}],
'', False, 0, 0, 3, 'root', **self.new_vo)
add_replication_rule(dids=[{'scope': new_scope_name, 'name': did}], copies=1, rse_expression=self.rse3_name, weight=None,
lifetime=180, grouping='DATASET', account=new_acc_name, locked=False, subscription_id=sub_id,
source_replica_expression=self.rse4_name, activity='User Subscriptions', notify=None,
purge_replicas=False, ignore_availability=False, comment='', ask_approval=False, asynchronous=False,
delay_injection=None, priority=0, split_container=False, meta='', issuer='root', **self.new_vo)
out = list_subscriptions(sub, **self.new_vo)
out = list(out)
assert 0 != len(out)
assert sub_id in [o['id'] for o in out]
for o in out:
if o['id'] == sub_id:
assert o['account'] == new_acc_name
rules = loads(o['replication_rules'])[0]
assert rules['rse_expression'] == self.rse3_name
assert rules['source_replica_expression'] == self.rse4_name
fil = loads(o['filter'])
assert fil['account'] == [new_acc_name]
assert fil['scope'] == [new_scope_name]
out = list_subscription_rule_states(sub, **self.new_vo)
out = list(out)
assert 0 != len(out)
for o in out:
assert o.account == new_acc_name
out = get_subscription_by_id(sub_id, **self.new_vo)
assert out['account'] == new_acc_name
rules = loads(out['replication_rules'])[0]
assert rules['rse_expression'] == self.rse3_name
assert rules['source_replica_expression'] == self.rse4_name
fil = loads(out['filter'])
assert fil['account'] == [new_acc_name]
assert fil['scope'] == [new_scope_name]
| 45.626126 | 144 | 0.623655 |
import random
import string
import unittest
from datetime import datetime
from json import loads
import pytest
import rucio.api.account_limit as api_acc_lim
import rucio.api.rse as api_rse
import rucio.core.account_counter as account_counter
from rucio.api.account import add_account, get_account_info, list_accounts
from rucio.api.did import add_did, add_did_to_followed, attach_dids_to_dids, get_users_following_did, scope_list
from rucio.api.exporter import export_data
from rucio.api.identity import add_account_identity, list_accounts_for_identity
from rucio.api.replica import add_replicas, get_did_from_pfns, list_replicas
from rucio.api.request import get_request_by_did, list_requests, queue_requests
from rucio.api.rule import add_replication_rule
from rucio.api.scope import add_scope, list_scopes, get_scopes
from rucio.api.subscription import add_subscription, list_subscriptions, list_subscription_rule_states, \
get_subscription_by_id
from rucio.common.config import config_get_bool
from rucio.common.types import InternalAccount, InternalScope
from rucio.common.utils import api_update_return_dict, generate_uuid
from rucio.core.rse import get_rse_id
from rucio.core.vo import add_vo, vo_exists
from rucio.daemons.abacus import rse as abacus_rse
from rucio.daemons.judge import cleaner
from rucio.daemons.reaper import reaper
from rucio.db.sqla import constants
from rucio.tests.common import rse_name_generator
from rucio.tests.common_server import get_vo
@pytest.mark.noparallel(reason='uses pre-defined RSE, fails when run in parallel')
class TestApiExternalRepresentation(unittest.TestCase):
@classmethod
def setUpClass(cls):
if config_get_bool('common', 'multi_vo', raise_exception=False, default=False):
cls.vo = {'vo': get_vo()}
cls.new_vo = {'vo': 'new'}
cls.multi_vo = True
if not vo_exists(**cls.new_vo):
add_vo(description='Test', email='rucio@email.com', **cls.new_vo)
else:
cls.vo = {}
cls.new_vo = {}
cls.multi_vo = False
cls.account_name = ''.join(random.choice(string.ascii_lowercase) for x in range(10))
add_account(account=cls.account_name, type_='user', email='rucio@email.com', issuer='root', **cls.vo)
cls.account = InternalAccount(cls.account_name, **cls.vo)
cls.scope_name = ''.join(random.choice(string.ascii_lowercase) for x in range(10))
add_scope(scope=cls.scope_name, account=cls.account_name, issuer='root', **cls.vo)
cls.scope = InternalScope(cls.scope_name, **cls.vo)
cls.rse_name = 'MOCK'
cls.rse_id = get_rse_id(rse=cls.rse_name, **cls.vo)
cls.rse2_name = 'MOCK2'
cls.rse2_id = get_rse_id(rse=cls.rse2_name, **cls.vo)
cls.rse3_name = rse_name_generator()
cls.rse3_id = api_rse.add_rse(cls.rse3_name, 'root', **cls.new_vo)
cls.rse4_name = rse_name_generator()
cls.rse4_id = api_rse.add_rse(cls.rse4_name, 'root', **cls.new_vo)
api_rse.add_distance(cls.rse3_name, cls.rse4_name, issuer='root', distance=3, **cls.new_vo)
def test_api_update_return_dict(self):
test_dict = {'account': self.account,
'scope': self.scope,
'rse_expression': 'MOCK|MOCK2',
'rse_id': self.rse_id,
'src_rse_id': self.rse_id,
'source_rse_id': self.rse_id,
'dest_rse_id': self.rse_id,
'destination_rse_id': self.rse_id}
value = api_update_return_dict(test_dict)
expected = {'account': self.account_name, 'scope': self.scope_name, 'rse_expression': 'MOCK|MOCK2',
'rse_id': self.rse_id, 'rse': self.rse_name,
'src_rse_id': self.rse_id, 'src_rse': self.rse_name,
'source_rse_id': self.rse_id, 'source_rse': self.rse_name,
'dest_rse_id': self.rse_id, 'dest_rse': self.rse_name,
'destination_rse_id': self.rse_id, 'destination_rse': self.rse_name}
assert value == expected
def test_api_account(self):
out = get_account_info(self.account_name, **self.vo)
assert self.account_name == out['account']
out = [acc['account'] for acc in list_accounts(**self.vo)]
assert self.account_name in out
if self.multi_vo:
assert self.account.internal not in out
assert '@' not in ' '.join(out)
def test_api_account_limit(self):
rse_expr = '{}|{}'.format(self.rse_name, self.rse2_name)
api_acc_lim.set_local_account_limit(self.account_name, self.rse_name, 10000, issuer='root', **self.vo)
api_acc_lim.set_global_account_limit(self.account_name, rse_expr, 20000, issuer='root', **self.vo)
out = api_acc_lim.get_local_account_limits(self.account_name, **self.vo)
assert self.rse_name in out
assert self.rse_id not in out
out = api_acc_lim.get_local_account_limit(self.account_name, self.rse_name, **self.vo)
assert self.rse_name in out
assert self.rse_id not in out
out = api_acc_lim.get_global_account_limits(self.account_name, **self.vo)
assert rse_expr in out
if self.multi_vo:
assert 'vo={}&({})'.format(self.vo['vo'], rse_expr) not in out
out = api_acc_lim.get_global_account_limit(self.account_name, rse_expr, **self.vo)
assert rse_expr in out
if self.multi_vo:
assert 'vo={}&({})'.format(self.vo['vo'], rse_expr) not in out
out = api_acc_lim.get_local_account_usage(self.account_name, self.rse_name, issuer='root', **self.vo)
out = list(out)
assert 0 != len(out)
assert self.rse_id in [usage['rse_id'] for usage in out if 'rse_id' in usage]
for usage in out:
if 'rse_id' in usage:
assert 'rse' in usage
if usage['rse_id'] == self.rse_id:
assert self.rse_name == usage["rse"]
out = api_acc_lim.get_global_account_usage(self.account_name, rse_expr, issuer='root', **self.vo)
out = list(out)
assert 0 != len(out)
assert rse_expr in [usage['rse_expression'] for usage in out if 'rse_expression' in usage]
def test_api_did(self):
add_did(self.scope_name, 'ext_parent', 'container', issuer='root', account=self.account_name, **self.vo)
add_did(self.scope_name, 'ext_child', 'dataset', issuer='root', account=self.account_name, **self.vo)
attachment = {'scope': self.scope_name, 'name': 'ext_parent',
'dids': [{'scope': self.scope_name, 'name': 'ext_child', 'type': 'DATASET'}]}
attach_dids_to_dids([attachment], issuer='root', **self.vo)
out = scope_list(self.scope_name, recursive=True, **self.vo)
out = list(out)
assert 0 != len(out)
parent_found = False
for did in out:
assert did['scope'] == self.scope_name
if did['parent'] is not None:
parent_found = True
assert did['parent']['scope'] == self.scope_name
assert parent_found
add_did_to_followed(self.scope_name, 'ext_parent', self.account_name, **self.vo)
out = get_users_following_did('ext_parent', self.scope_name, **self.vo)
out = list(out)
assert 0 != len(out)
for user in out:
assert user['user'] == self.account_name
def test_api_exporter(self):
out = export_data('root', **self.new_vo)
rses = out['rses']
assert self.rse3_name in rses
assert self.rse3_id not in rses
distances = out['distances']
assert self.rse3_name in distances
assert self.rse3_id not in distances
assert self.rse4_name in distances[self.rse3_name]
assert self.rse4_id not in distances[self.rse3_name]
if self.multi_vo:
assert self.rse_name not in rses
assert self.rse_id not in rses
assert self.rse2_name not in rses
assert self.rse2_id not in rses
assert self.rse_name not in distances
assert self.rse_id not in distances
assert self.rse2_name not in distances
assert self.rse2_id not in distances
def test_api_identity(self):
id_key = ''.join(random.choice(string.ascii_lowercase) for x in range(10))
add_account_identity(id_key, 'userpass', self.account_name, 'rucio_test@test.com', 'root', default=True, password='ext_pass', **self.vo)
out = list_accounts_for_identity(id_key, 'userpass')
assert self.account_name in out
if self.multi_vo:
assert self.account.internal not in out
def test_api_replica(self):
did = 'ext_' + str(generate_uuid())
pfn = 'srm://mock2.com:8443/srm/managerv2?SFN=/rucio/tmpdisk/rucio_tests/%s/%s' % (self.scope_name, generate_uuid())
add_replicas(self.rse2_name, files=[{'scope': self.scope_name, 'name': did, 'bytes': 100, 'pfn': pfn}], issuer='root', **self.vo)
add_did(self.scope_name, 'ext_parent_2', 'dataset', issuer='root', account=self.account_name, **self.vo)
attachment = {'scope': self.scope_name, 'name': 'ext_parent_2',
'dids': [{'scope': self.scope_name, 'name': did}]}
attach_dids_to_dids([attachment], issuer='root', **self.vo)
out = get_did_from_pfns([pfn], self.rse2_name, **self.vo)
out = list(out)
assert 0 != len(out)
did_found = False
for p in out:
for key in p:
if p[key]['name'] == did:
did_found = True
assert self.scope_name == p[key]['scope']
assert did_found
out = list_replicas(dids=[{'scope': self.scope_name, 'name': did}], resolve_parents=True, **self.vo)
out = list(out)
assert 0 != len(out)
parents_found = False
for rep in out:
assert rep['scope'] == self.scope_name
if 'parents' in rep:
parents_found = True
for parent in rep['parents']:
assert self.scope_name in parent
if self.multi_vo:
assert self.scope.internal not in parent
assert parents_found
def test_api_request(self):
did = generate_uuid()
add_did(self.scope_name, did, 'dataset', issuer='root', account=self.account_name, rse=self.rse_name, **self.vo)
requests = [{
'dest_rse_id': self.rse2_id,
'source_rse_id': self.rse_id,
'request_type': constants.RequestType.TRANSFER,
'request_id': generate_uuid(),
'name': did,
'scope': self.scope_name,
'account': self.account_name,
'rule_id': generate_uuid(),
'retry_count': 1,
'requested_at': datetime.now(),
'attributes': {
'activity': 'User Subscription',
'bytes': 10,
'md5': '',
'adler32': ''
}
}]
reqs = queue_requests(requests, issuer='root', **self.vo)
reqs = list(reqs)
assert 0 != len(reqs)
for r in reqs:
assert r['scope'] == self.scope_name
assert r['account'] == self.account_name
assert r['source_rse'] == self.rse_name
assert r['dest_rse'] == self.rse2_name
out = get_request_by_did(self.scope_name, did, self.rse2_name, issuer='root', **self.vo)
assert out['scope'] == self.scope_name
assert out['account'] == self.account_name
assert out['dest_rse'] == self.rse2_name
assert out['source_rse'] == self.rse_name
out = list_requests([self.rse_name], [self.rse2_name], [constants.RequestState.QUEUED], issuer='root', **self.vo)
out = list(out)
assert 0 != len(out)
assert self.scope_name in [req['scope'] for req in out]
for req in out:
if req['scope'] == self.scope_name:
assert req['scope'] == self.scope_name
assert req['account'] == self.account_name
assert req['dest_rse'] == self.rse2_name
assert req['source_rse'] == self.rse_name
@pytest.mark.noparallel(reason='runs the reaper on a pre-defined rse, might interfere with other tests')
def test_api_rse(self):
out = api_rse.get_rse(self.rse_name, **self.vo)
assert out['rse'] == self.rse_name
assert out['id'] == self.rse_id
out = api_rse.list_rses(**self.new_vo)
out = list(out)
assert 0 != len(out)
rse_ids = [rse['id'] for rse in out]
assert self.rse3_id in rse_ids
assert self.rse4_id in rse_ids
for rse in out:
assert 'rse' in rse
if rse['id'] == self.rse3_id:
assert rse['rse'] == self.rse3_name
elif rse['id'] == self.rse4_id:
assert rse['rse'] == self.rse4_name
key = "KEY_" + generate_uuid()
api_rse.add_rse_attribute(self.rse_name, key, 1, issuer='root', **self.vo)
out = api_rse.get_rses_with_attribute(key)
out = list(out)
assert 0 != len(out)
for rse in out:
assert rse['rse'] == self.rse_name
out = api_rse.get_rse_protocols(self.rse_name, issuer='root', **self.vo)
assert out['rse'] == self.rse_name
rse_mock = 'MOCK4'
rse_mock_id = get_rse_id(rse_mock, **self.vo)
account_counter.del_counter(rse_id=rse_mock_id, account=self.account)
account_counter.add_counter(rse_id=rse_mock_id, account=self.account)
account_counter.increase(rse_id=rse_mock_id, account=self.account, files=1, bytes_=10)
account_counter.update_account_counter(self.account, rse_mock_id)
did = 'file_' + generate_uuid()
add_did(self.scope_name, did, 'DATASET', 'root', account=self.account_name, rse=rse_mock, **self.vo)
abacus_rse.run(once=True)
out = api_rse.get_rse_usage(rse_mock, per_account=True, issuer='root', **self.vo)
assert rse_mock_id in [o['rse_id'] for o in out]
for usage in out:
if usage['rse_id'] == rse_mock_id:
assert usage['rse'] == rse_mock
accounts = [u['account'] for u in usage['account_usages']]
assert self.account_name in accounts
if self.multi_vo:
assert self.account.internal not in accounts
cleaner.run(once=True)
if self.multi_vo:
reaper.run(once=True, include_rses='vo=%s&(%s)' % (self.vo['vo'], rse_mock), greedy=True)
else:
reaper.run(once=True, include_rses=rse_mock, greedy=True)
abacus_rse.run(once=True)
out = api_rse.parse_rse_expression('%s|%s' % (self.rse_name, self.rse2_name), **self.vo)
assert self.rse_name in out
assert self.rse2_name in out
assert self.rse_id not in out
assert self.rse2_id not in out
def test_api_scope(self):
out = list_scopes()
assert self.scope_name in out
if self.multi_vo:
assert self.scope.internal not in out
out = get_scopes(self.account_name, **self.vo)
assert self.scope_name in out
if self.multi_vo:
assert self.scope.internal not in out
def test_api_subscription(self):
sub = 'ext_' + generate_uuid()
did = 'ext_' + generate_uuid()
new_acc_name = ''.join(random.choice(string.ascii_lowercase) for x in range(10))
new_scope_name = ''.join(random.choice(string.ascii_lowercase) for x in range(10))
add_account(new_acc_name, 'USER', 'test@test.com', 'root', **self.new_vo)
add_scope(new_scope_name, new_acc_name, 'root', **self.new_vo)
api_acc_lim.set_local_account_limit(new_acc_name, self.rse3_name, 10, 'root', **self.new_vo)
api_acc_lim.set_local_account_limit(new_acc_name, self.rse4_name, 10, 'root', **self.new_vo)
add_did(new_scope_name, did, 'DATASET', 'root', account=new_acc_name, rse=self.rse3_name, **self.new_vo)
sub_id = add_subscription(sub, new_acc_name, {'account': [new_acc_name], 'scope': [new_scope_name]},
[{'copies': 1, 'rse_expression': self.rse3_name, 'weight': 0, 'activity': 'User Subscriptions',
'source_replica_expression': self.rse4_name}],
'', False, 0, 0, 3, 'root', **self.new_vo)
add_replication_rule(dids=[{'scope': new_scope_name, 'name': did}], copies=1, rse_expression=self.rse3_name, weight=None,
lifetime=180, grouping='DATASET', account=new_acc_name, locked=False, subscription_id=sub_id,
source_replica_expression=self.rse4_name, activity='User Subscriptions', notify=None,
purge_replicas=False, ignore_availability=False, comment='', ask_approval=False, asynchronous=False,
delay_injection=None, priority=0, split_container=False, meta='', issuer='root', **self.new_vo)
out = list_subscriptions(sub, **self.new_vo)
out = list(out)
assert 0 != len(out)
assert sub_id in [o['id'] for o in out]
for o in out:
if o['id'] == sub_id:
assert o['account'] == new_acc_name
rules = loads(o['replication_rules'])[0]
assert rules['rse_expression'] == self.rse3_name
assert rules['source_replica_expression'] == self.rse4_name
fil = loads(o['filter'])
assert fil['account'] == [new_acc_name]
assert fil['scope'] == [new_scope_name]
out = list_subscription_rule_states(sub, **self.new_vo)
out = list(out)
assert 0 != len(out)
for o in out:
assert o.account == new_acc_name
out = get_subscription_by_id(sub_id, **self.new_vo)
assert out['account'] == new_acc_name
rules = loads(out['replication_rules'])[0]
assert rules['rse_expression'] == self.rse3_name
assert rules['source_replica_expression'] == self.rse4_name
fil = loads(out['filter'])
assert fil['account'] == [new_acc_name]
assert fil['scope'] == [new_scope_name]
| true | true |
1c2fe45d995e3b53e075541991a3ec3d5009d8ad | 439 | py | Python | users_manage_api/urls.py | OscarMCV/prueba_backend | 893d68c0f3d9bb2dc7bea701e50eed44df4df87f | [
"MIT"
] | null | null | null | users_manage_api/urls.py | OscarMCV/prueba_backend | 893d68c0f3d9bb2dc7bea701e50eed44df4df87f | [
"MIT"
] | null | null | null | users_manage_api/urls.py | OscarMCV/prueba_backend | 893d68c0f3d9bb2dc7bea701e50eed44df4df87f | [
"MIT"
] | null | null | null | #Django
from django.urls import path
#Django rest framework
from rest_framework.urlpatterns import format_suffix_patterns
#Views
from users_manage_api import views as user_views
"""In order to handle urls management better, the "views" name has been changed"""
urlpatterns = [
path('login/', user_views.UserAPIView.as_view()),
path('logon/', user_views.CreateUser.as_view()),
]
urlpatterns = format_suffix_patterns(urlpatterns)
| 27.4375 | 82 | 0.781321 |
from django.urls import path
from rest_framework.urlpatterns import format_suffix_patterns
from users_manage_api import views as user_views
urlpatterns = [
path('login/', user_views.UserAPIView.as_view()),
path('logon/', user_views.CreateUser.as_view()),
]
urlpatterns = format_suffix_patterns(urlpatterns)
| true | true |
1c2fe5cf35fa1b453b2fe317770a5af30692455d | 486 | py | Python | app/backend/registries/migrations/0013_auto_20180712_2107.py | stephenhillier/gwells | 235d35f1f40dd845f8fecd0d7c3371c4564567c6 | [
"Apache-2.0"
] | null | null | null | app/backend/registries/migrations/0013_auto_20180712_2107.py | stephenhillier/gwells | 235d35f1f40dd845f8fecd0d7c3371c4564567c6 | [
"Apache-2.0"
] | null | null | null | app/backend/registries/migrations/0013_auto_20180712_2107.py | stephenhillier/gwells | 235d35f1f40dd845f8fecd0d7c3371c4564567c6 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-07-12 21:07
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('registries', '0012_auto_20180704_2105'),
]
operations = [
migrations.AlterModelOptions(
name='registriesapplication',
options={'ordering': ['primary_certificate_no'], 'verbose_name_plural': 'Applications'},
),
]
| 24.3 | 100 | 0.654321 |
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('registries', '0012_auto_20180704_2105'),
]
operations = [
migrations.AlterModelOptions(
name='registriesapplication',
options={'ordering': ['primary_certificate_no'], 'verbose_name_plural': 'Applications'},
),
]
| true | true |
1c2fe5f15190b2d43bdd5dc69c9fd74b7a7bebe8 | 6,082 | py | Python | owlbot.py | HoangDinhTho/nodejs-firestore | 58ed6d6acff6ebefbd0609257ccf5a78c9dec46c | [
"Apache-2.0"
] | 1 | 2019-10-18T22:44:00.000Z | 2019-10-18T22:44:00.000Z | owlbot.py | renovate-bot/nodejs-firestore | 1dda1bdb53818299fcaefe606d82777ce74dafd2 | [
"Apache-2.0"
] | null | null | null | owlbot.py | renovate-bot/nodejs-firestore | 1dda1bdb53818299fcaefe606d82777ce74dafd2 | [
"Apache-2.0"
] | null | null | null | import synthtool as s
import synthtool.gcp as gcp
import synthtool.languages.node as node
import logging
import os
import subprocess
from pathlib import Path
from synthtool import _tracked_paths
import shutil
logging.basicConfig(level=logging.DEBUG)
staging = Path("owl-bot-staging")
if staging.is_dir():
try:
v1_admin_library = staging / "admin/v1"
v1beta1_library = staging / "v1beta1"
v1_library = staging / "v1"
_tracked_paths.add(v1_admin_library)
_tracked_paths.add(v1beta1_library)
_tracked_paths.add(v1_library)
# skip index, protos, package.json, and README.md
s.copy(v1_admin_library, "dev", excludes=["package.json", "README.md", "src/index.ts", "src/v1/index.ts",
"tsconfig.json", "linkinator.config.json", "webpack.config.js"])
s.copy(v1beta1_library, "dev", excludes=["package.json", "README.md", "src/index.ts", "src/v1beta1/index.ts",
"tsconfig.json", "linkinator.config.json", "webpack.config.js"])
s.copy(v1_library, "dev", excludes=["package.json", "README.md", "src/index.ts", "src/v1/index.ts",
"tsconfig.json", "linkinator.config.json", "webpack.config.js"])
# Fix dropping of google-cloud-resource-header
# See: https://github.com/googleapis/nodejs-firestore/pull/375
s.replace(
"dev/src/v1beta1/firestore_client.ts",
"return this\.innerApiCalls\.listen\(options\);",
"return this.innerApiCalls.listen({}, options);",
)
s.replace(
"dev/src/v1/firestore_client.ts",
"return this\.innerApiCalls\.listen\(options\);",
"return this.innerApiCalls.listen({}, options);",
)
s.replace(
"dev/test/gapic_firestore_v1beta1.ts",
"calledWithExactly\(undefined\)",
"calledWithExactly({}, undefined)",
)
s.replace(
"dev/src/v1beta1/firestore_client.ts",
"return this\.innerApiCalls\.write\(options\);",
"return this.innerApiCalls.write({}, options);",
)
s.replace(
"dev/src/v1/firestore_client.ts",
"return this\.innerApiCalls\.write\(options\);",
"return this.innerApiCalls.write({}, options);",
)
s.replace(
"dev/test/gapic_firestore_v1.ts",
"calledWithExactly\(undefined\)",
"calledWithExactly({}, undefined)",
)
# use the existing proto .js / .d.ts files
s.replace(
"dev/src/v1/firestore_client.ts",
"/protos/protos'",
"/protos/firestore_v1_proto_api'"
)
s.replace(
"dev/test/gapic_firestore_v1.ts",
"/protos/protos'",
"/protos/firestore_v1_proto_api'"
)
s.replace(
"dev/test/gapic_firestore_v1.ts",
"import \* as firestoreModule from '\.\./src';",
"import * as firestoreModule from '../src/v1';"
)
s.replace(
"dev/test/gapic_firestore_v1.ts",
"firestoreModule\.v1",
"firestoreModule"
)
s.replace(
"dev/src/v1/firestore_admin_client.ts",
"/protos/protos'",
"/protos/firestore_admin_v1_proto_api'"
)
s.replace(
"dev/test/gapic_firestore_admin_v1.ts",
"/protos/protos'",
"/protos/firestore_admin_v1_proto_api'"
)
s.replace(
"dev/test/gapic_firestore_admin_v1.ts",
"import \* as firestoreadminModule from '\.\./src';",
"import * as firestoreadminModule from '../src/v1';"
)
s.replace(
"dev/test/gapic_firestore_admin_v1.ts",
"firestoreadminModule\.v1",
"firestoreadminModule"
)
s.replace(
"dev/src/v1beta1/firestore_client.ts",
"/protos/protos'",
"/protos/firestore_v1beta1_proto_api'"
)
s.replace(
"dev/test/gapic_firestore_v1beta1.ts",
"/protos/protos'",
"/protos/firestore_v1beta1_proto_api'"
)
s.replace(
"dev/test/gapic_firestore_v1beta1.ts",
"import \* as firestoreModule from \'../src\';",
"import * as firestoreModule from '../src/v1beta1';"
)
s.replace(
"dev/test/gapic_firestore_v1beta1.ts",
"firestoreModule\.v1beta1",
"firestoreModule"
)
# Mark v1beta1 as deprecated
s.replace(
"dev/src/v1beta1/firestore_client.ts",
"@class",
"@class\n * @deprecated Use v1/firestore_client instead."
)
s.replace(
"dev/src/v1beta1/firestore_client.ts",
"const version",
"// tslint:disable deprecation\n\nconst version",
1
)
os.rename("dev/.gitignore", ".gitignore")
os.rename("dev/.eslintignore", ".eslintignore")
os.rename("dev/.mocharc.js", ".mocharc.js")
os.rename("dev/.jsdoc.js", ".jsdoc.js")
os.rename("dev/.prettierrc.js", ".prettierrc.js")
os.unlink("dev/.eslintrc.json")
s.replace(".jsdoc.js", "protos", "build/protos", 1)
# Remove auto-generated packaging tests
os.system('rm -rf dev/system-test/fixtures dev/system-test/install.ts')
os.chdir("dev")
node.compile_protos_hermetic()
os.chdir("protos")
os.unlink('protos.js')
os.unlink('protos.d.ts')
subprocess.run('./update.sh', shell=True)
os.chdir("../../")
# Copy types into types/
# These files were generated by node.compile_protos_hermetic() above.
os.system("cp build/src/v1/firestore*.d.ts types/v1")
os.system("cp build/src/v1beta1/firestore_client.d.ts types/v1beta1")
os.system("cp build/protos/firestore*.d.ts types/protos")
s.replace(
"types/v1/firestore_client.d.ts",
"../../protos",
"../protos"
)
s.replace(
"types/v1/firestore_admin_client.d.ts",
"../../protos",
"../protos"
)
s.replace(
"types/v1beta1/firestore_client.d.ts",
"../../protos",
"../protos"
)
finally:
# The staging directory should never be merged into the main branch.
shutil.rmtree(staging)
# Copy template files
common_templates = gcp.CommonTemplates()
templates = common_templates.node_library(
source_location="build/src", test_project="node-gcloud-ci"
)
s.copy(templates, excludes=[".eslintrc.json", ".kokoro/**/*", ".github/CODEOWNERS"])
node.fix_hermetic() # fix formatting | 31.350515 | 113 | 0.634988 | import synthtool as s
import synthtool.gcp as gcp
import synthtool.languages.node as node
import logging
import os
import subprocess
from pathlib import Path
from synthtool import _tracked_paths
import shutil
logging.basicConfig(level=logging.DEBUG)
staging = Path("owl-bot-staging")
if staging.is_dir():
try:
v1_admin_library = staging / "admin/v1"
v1beta1_library = staging / "v1beta1"
v1_library = staging / "v1"
_tracked_paths.add(v1_admin_library)
_tracked_paths.add(v1beta1_library)
_tracked_paths.add(v1_library)
s.copy(v1_admin_library, "dev", excludes=["package.json", "README.md", "src/index.ts", "src/v1/index.ts",
"tsconfig.json", "linkinator.config.json", "webpack.config.js"])
s.copy(v1beta1_library, "dev", excludes=["package.json", "README.md", "src/index.ts", "src/v1beta1/index.ts",
"tsconfig.json", "linkinator.config.json", "webpack.config.js"])
s.copy(v1_library, "dev", excludes=["package.json", "README.md", "src/index.ts", "src/v1/index.ts",
"tsconfig.json", "linkinator.config.json", "webpack.config.js"])
s.replace(
"dev/src/v1beta1/firestore_client.ts",
"return this\.innerApiCalls\.listen\(options\);",
"return this.innerApiCalls.listen({}, options);",
)
s.replace(
"dev/src/v1/firestore_client.ts",
"return this\.innerApiCalls\.listen\(options\);",
"return this.innerApiCalls.listen({}, options);",
)
s.replace(
"dev/test/gapic_firestore_v1beta1.ts",
"calledWithExactly\(undefined\)",
"calledWithExactly({}, undefined)",
)
s.replace(
"dev/src/v1beta1/firestore_client.ts",
"return this\.innerApiCalls\.write\(options\);",
"return this.innerApiCalls.write({}, options);",
)
s.replace(
"dev/src/v1/firestore_client.ts",
"return this\.innerApiCalls\.write\(options\);",
"return this.innerApiCalls.write({}, options);",
)
s.replace(
"dev/test/gapic_firestore_v1.ts",
"calledWithExactly\(undefined\)",
"calledWithExactly({}, undefined)",
)
s.replace(
"dev/src/v1/firestore_client.ts",
"/protos/protos'",
"/protos/firestore_v1_proto_api'"
)
s.replace(
"dev/test/gapic_firestore_v1.ts",
"/protos/protos'",
"/protos/firestore_v1_proto_api'"
)
s.replace(
"dev/test/gapic_firestore_v1.ts",
"import \* as firestoreModule from '\.\./src';",
"import * as firestoreModule from '../src/v1';"
)
s.replace(
"dev/test/gapic_firestore_v1.ts",
"firestoreModule\.v1",
"firestoreModule"
)
s.replace(
"dev/src/v1/firestore_admin_client.ts",
"/protos/protos'",
"/protos/firestore_admin_v1_proto_api'"
)
s.replace(
"dev/test/gapic_firestore_admin_v1.ts",
"/protos/protos'",
"/protos/firestore_admin_v1_proto_api'"
)
s.replace(
"dev/test/gapic_firestore_admin_v1.ts",
"import \* as firestoreadminModule from '\.\./src';",
"import * as firestoreadminModule from '../src/v1';"
)
s.replace(
"dev/test/gapic_firestore_admin_v1.ts",
"firestoreadminModule\.v1",
"firestoreadminModule"
)
s.replace(
"dev/src/v1beta1/firestore_client.ts",
"/protos/protos'",
"/protos/firestore_v1beta1_proto_api'"
)
s.replace(
"dev/test/gapic_firestore_v1beta1.ts",
"/protos/protos'",
"/protos/firestore_v1beta1_proto_api'"
)
s.replace(
"dev/test/gapic_firestore_v1beta1.ts",
"import \* as firestoreModule from \'../src\';",
"import * as firestoreModule from '../src/v1beta1';"
)
s.replace(
"dev/test/gapic_firestore_v1beta1.ts",
"firestoreModule\.v1beta1",
"firestoreModule"
)
s.replace(
"dev/src/v1beta1/firestore_client.ts",
"@class",
"@class\n * @deprecated Use v1/firestore_client instead."
)
s.replace(
"dev/src/v1beta1/firestore_client.ts",
"const version",
"// tslint:disable deprecation\n\nconst version",
1
)
os.rename("dev/.gitignore", ".gitignore")
os.rename("dev/.eslintignore", ".eslintignore")
os.rename("dev/.mocharc.js", ".mocharc.js")
os.rename("dev/.jsdoc.js", ".jsdoc.js")
os.rename("dev/.prettierrc.js", ".prettierrc.js")
os.unlink("dev/.eslintrc.json")
s.replace(".jsdoc.js", "protos", "build/protos", 1)
os.system('rm -rf dev/system-test/fixtures dev/system-test/install.ts')
os.chdir("dev")
node.compile_protos_hermetic()
os.chdir("protos")
os.unlink('protos.js')
os.unlink('protos.d.ts')
subprocess.run('./update.sh', shell=True)
os.chdir("../../")
os.system("cp build/src/v1/firestore*.d.ts types/v1")
os.system("cp build/src/v1beta1/firestore_client.d.ts types/v1beta1")
os.system("cp build/protos/firestore*.d.ts types/protos")
s.replace(
"types/v1/firestore_client.d.ts",
"../../protos",
"../protos"
)
s.replace(
"types/v1/firestore_admin_client.d.ts",
"../../protos",
"../protos"
)
s.replace(
"types/v1beta1/firestore_client.d.ts",
"../../protos",
"../protos"
)
finally:
shutil.rmtree(staging)
common_templates = gcp.CommonTemplates()
templates = common_templates.node_library(
source_location="build/src", test_project="node-gcloud-ci"
)
s.copy(templates, excludes=[".eslintrc.json", ".kokoro/**/*", ".github/CODEOWNERS"])
node.fix_hermetic() | true | true |
1c2fe7bccf9324aea7bb17a9739be1ad6a210a33 | 2,658 | py | Python | servicios_profesionales/photologue_custom/tests.py | acs-um/ServiciosProfesionales | b29d67cda42f3d975a8abaf58203d92c9d1a3f57 | [
"MIT"
] | 1 | 2018-05-24T23:33:02.000Z | 2018-05-24T23:33:02.000Z | servicios_profesionales/photologue_custom/tests.py | acs-um/ServiciosProfesionales | b29d67cda42f3d975a8abaf58203d92c9d1a3f57 | [
"MIT"
] | 22 | 2018-05-07T20:46:27.000Z | 2018-06-10T23:59:49.000Z | servicios_profesionales/photologue_custom/tests.py | acs-um/ServiciosProfesionales | b29d67cda42f3d975a8abaf58203d92c9d1a3f57 | [
"MIT"
] | null | null | null | from django.test import TestCase, RequestFactory
from django.urls import reverse
from django.apps import apps
from .apps import PhotologueCustomConfig
from .form import GalleryExtendedForm
from Categorias.models import Categoria
from .models import GalleryExtended
from usuarios.models import MyUser
from servicios.models import Service
from photologue.models import Photo
class GalleryViewTests(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.user = MyUser.objects.create_user(
email=" test@g.com ",
date_of_birth="1995-01-02",
password=" 123123b ",
first_name="Test",
last_name="Apellido"
)
self.categoria = Categoria.objects.create(
name='Construcción',
description='Trabajos de construcción'
)
self.service = Service.objects.create(
name='Albañil',
description='Servicios generales',
category=self.categoria
)
self.service2 = Service.objects.create(
name='Yesero',
description='Yesería en general',
category=self.categoria
)
def test_apps(self):
self.assertEqual(PhotologueCustomConfig.name, 'photologue_custom')
self.assertEqual(apps.get_app_config('photologue_custom').name, 'photologue_custom')
def test_gallery(self):
custom_g = GalleryExtended.nuevo(self.service, self.user)
string = self.user.first_name + '-' + self.service.name
self.assertEquals(custom_g.gallery.title, string)
form = GalleryExtendedForm(data={
'title': custom_g.gallery.title, 'slug': custom_g.gallery.slug,
'description': custom_g.gallery.description,
})
self.assertFalse(form.is_valid())
def test_update_gallery(self):
galeria = GalleryExtended.nuevo(self.service2, self.user)
pc = galeria.gallery.photo_count()
p1 = Photo.objects.create(title='test photo 1')
galeria.gallery.photos.add(p1)
str = galeria.gallery.photo_count()
response = self.client.post(
reverse('updateGallery', kwargs={'pk': galeria.gallery.id}),
{'photos': p1})
self.assertEqual(response.status_code, 302)
galeria.refresh_from_db()
self.assertEqual(pc + 1, galeria.gallery.photo_count())
description = "The Catcher in the Rye"
response = self.client.post(
reverse('updateGallery', kwargs={'pk': galeria.gallery.id}),
{'description': description, 'photos': p1})
self.assertEqual(response.status_code, 302)
| 37.43662 | 92 | 0.647856 | from django.test import TestCase, RequestFactory
from django.urls import reverse
from django.apps import apps
from .apps import PhotologueCustomConfig
from .form import GalleryExtendedForm
from Categorias.models import Categoria
from .models import GalleryExtended
from usuarios.models import MyUser
from servicios.models import Service
from photologue.models import Photo
class GalleryViewTests(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.user = MyUser.objects.create_user(
email=" test@g.com ",
date_of_birth="1995-01-02",
password=" 123123b ",
first_name="Test",
last_name="Apellido"
)
self.categoria = Categoria.objects.create(
name='Construcción',
description='Trabajos de construcción'
)
self.service = Service.objects.create(
name='Albañil',
description='Servicios generales',
category=self.categoria
)
self.service2 = Service.objects.create(
name='Yesero',
description='Yesería en general',
category=self.categoria
)
def test_apps(self):
self.assertEqual(PhotologueCustomConfig.name, 'photologue_custom')
self.assertEqual(apps.get_app_config('photologue_custom').name, 'photologue_custom')
def test_gallery(self):
custom_g = GalleryExtended.nuevo(self.service, self.user)
string = self.user.first_name + '-' + self.service.name
self.assertEquals(custom_g.gallery.title, string)
form = GalleryExtendedForm(data={
'title': custom_g.gallery.title, 'slug': custom_g.gallery.slug,
'description': custom_g.gallery.description,
})
self.assertFalse(form.is_valid())
def test_update_gallery(self):
galeria = GalleryExtended.nuevo(self.service2, self.user)
pc = galeria.gallery.photo_count()
p1 = Photo.objects.create(title='test photo 1')
galeria.gallery.photos.add(p1)
str = galeria.gallery.photo_count()
response = self.client.post(
reverse('updateGallery', kwargs={'pk': galeria.gallery.id}),
{'photos': p1})
self.assertEqual(response.status_code, 302)
galeria.refresh_from_db()
self.assertEqual(pc + 1, galeria.gallery.photo_count())
description = "The Catcher in the Rye"
response = self.client.post(
reverse('updateGallery', kwargs={'pk': galeria.gallery.id}),
{'description': description, 'photos': p1})
self.assertEqual(response.status_code, 302)
| true | true |
1c2fe86730e711e6fb182397ce9e735878b5b04b | 1,670 | py | Python | alipay/aop/api/domain/CloudbusCommonResult.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/domain/CloudbusCommonResult.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/domain/CloudbusCommonResult.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class CloudbusCommonResult(object):
def __init__(self):
self._code = None
self._data = None
self._message = None
@property
def code(self):
return self._code
@code.setter
def code(self, value):
self._code = value
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = value
@property
def message(self):
return self._message
@message.setter
def message(self, value):
self._message = value
def to_alipay_dict(self):
params = dict()
if self.code:
if hasattr(self.code, 'to_alipay_dict'):
params['code'] = self.code.to_alipay_dict()
else:
params['code'] = self.code
if self.data:
if hasattr(self.data, 'to_alipay_dict'):
params['data'] = self.data.to_alipay_dict()
else:
params['data'] = self.data
if self.message:
if hasattr(self.message, 'to_alipay_dict'):
params['message'] = self.message.to_alipay_dict()
else:
params['message'] = self.message
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = CloudbusCommonResult()
if 'code' in d:
o.code = d['code']
if 'data' in d:
o.data = d['data']
if 'message' in d:
o.message = d['message']
return o
| 23.521127 | 65 | 0.534132 |
import json
from alipay.aop.api.constant.ParamConstants import *
class CloudbusCommonResult(object):
def __init__(self):
self._code = None
self._data = None
self._message = None
@property
def code(self):
return self._code
@code.setter
def code(self, value):
self._code = value
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = value
@property
def message(self):
return self._message
@message.setter
def message(self, value):
self._message = value
def to_alipay_dict(self):
params = dict()
if self.code:
if hasattr(self.code, 'to_alipay_dict'):
params['code'] = self.code.to_alipay_dict()
else:
params['code'] = self.code
if self.data:
if hasattr(self.data, 'to_alipay_dict'):
params['data'] = self.data.to_alipay_dict()
else:
params['data'] = self.data
if self.message:
if hasattr(self.message, 'to_alipay_dict'):
params['message'] = self.message.to_alipay_dict()
else:
params['message'] = self.message
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = CloudbusCommonResult()
if 'code' in d:
o.code = d['code']
if 'data' in d:
o.data = d['data']
if 'message' in d:
o.message = d['message']
return o
| true | true |
1c2fe965d8089210f836636560e36b0573e31f72 | 6,563 | py | Python | Script/clf_pre_ln_tf.py | ywu94/Tencent-Ads-Algo-Comp-2020 | 8f008fc1cc21c832e6bdb76056d12ad357da5475 | [
"MIT"
] | 27 | 2020-06-09T18:33:45.000Z | 2021-11-15T11:49:54.000Z | Script/clf_pre_ln_tf.py | Wannaman/Tencent-Ads-Algo-Comp-2020 | 8f008fc1cc21c832e6bdb76056d12ad357da5475 | [
"MIT"
] | 2 | 2020-06-21T01:58:56.000Z | 2020-11-12T18:12:40.000Z | Script/clf_pre_ln_tf.py | Wannaman/Tencent-Ads-Algo-Comp-2020 | 8f008fc1cc21c832e6bdb76056d12ad357da5475 | [
"MIT"
] | 15 | 2020-06-07T14:19:57.000Z | 2020-07-16T08:27:42.000Z | import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.init import xavier_uniform_, kaiming_normal_
class Pre_LN_Transformer_Encoder_Layer(nn.Module):
"""
Encoder layer for Pre-LN Transformer
"""
def __init__(self, d_model, n_head, intermediate_size=2048, device=None, dropout=0.1, **kwargs):
super(Pre_LN_Transformer_Encoder_Layer, self).__init__(**kwargs)
self.d_model = d_model
self.n_head = n_head
self.intermediate_size = intermediate_size
self.device = device if device else torch.device('cpu')
self.dropout = dropout
self.ln_layer_1 = nn.LayerNorm(d_model)
self.mha_layer = nn.MultiheadAttention(d_model, n_head, dropout=dropout)
self.attn_dropout = nn.Dropout(p=dropout)
self.ln_layer_2 = nn.LayerNorm(d_model)
self.ffn_layer_1 = nn.Linear(d_model, intermediate_size)
self.dropout_1 = nn.Dropout(p=dropout)
self.ffn_layer_2 = nn.Linear(intermediate_size, d_model)
self.dropout_2 = nn.Dropout(p=dropout)
def _get_padding_mask(self, batch_size, seq_len, inp_len):
padding_mask = np.ones((batch_size, seq_len))
for index, l in enumerate(inp_len):
padding_mask[index,:l] = 0
return torch.from_numpy(padding_mask).bool().to(self.device)
def forward(self, inp, inp_len):
batch_size, seq_len, _ = inp.shape
padding_mask = self._get_padding_mask(batch_size, seq_len, inp_len) # (batch_size, seq_len)
inp1 = self.ln_layer_1(inp).permute(1,0,2) # (seq_len, batch_size, d_model)
inp2 = self.mha_layer(inp1, inp1, inp1, key_padding_mask=padding_mask)[0].permute(1,0,2) # (batch_size, seq_len, d_model)
inp = inp + self.attn_dropout(inp2)
inp1 = self.ln_layer_2(inp)
inp2 = self.ffn_layer_2(self.dropout_1(F.relu(self.ffn_layer_1(inp1))))
inp = inp + self.dropout_2(inp2)
return inp
class Pre_LN_Transformer_Encoder(nn.Module):
"""
Stacked Pre-LN Transformer Encoder layers
"""
def __init__(self, n_layer, d_model, n_head, intermediate_size=2048, device=None, dropout=0.1, **kwargs):
super(Pre_LN_Transformer_Encoder, self).__init__(**kwargs)
self.n_layer = n_layer
self.d_model = d_model
self.n_head = n_head
self.intermediate_size = intermediate_size
self.device = device if device else torch.device('cpu')
self.dropout = dropout
for index in range(n_layer):
setattr(self, 'pre_ln_tf_encoder_{}'.format(index),
Pre_LN_Transformer_Encoder_Layer(d_model, n_head, intermediate_size=intermediate_size, device=self.device, dropout=0.1))
def forward(self, inp, inp_len):
for index in range(self.n_layer):
inp = getattr(self, 'pre_ln_tf_encoder_{}'.format(index))(inp, inp_len)
return inp
class MLP_Classification_Layer(nn.Module):
"""
Multilayer Perception Classification Layer
- Layer 1: Linear + Batchnorm + ReLU + Dropout
- Layer 2: Linear + Batchnorm + ReLU + Dropout
- Layer 3: Linear
"""
def __init__(self, inp_size, out_size, dropout=0.5, **kwargs):
super(MLP_Classification_Layer, self).__init__(**kwargs)
self.inp_size = inp_size
self.out_size = out_size
self.dropout = dropout
self.mlp_1 = nn.Linear(inp_size, 4096)
self.batchnorm_1 = nn.BatchNorm1d(4096)
self.mlp_dropout_1 = nn.Dropout(p=dropout)
self.mlp_2 = nn.Linear(4096, 2048)
self.batchnorm_2 = nn.BatchNorm1d(2048)
self.mlp_dropout_2 = nn.Dropout(p=dropout)
self.mlp_3 = nn.Linear(2048, out_size)
def forward(self, inp):
mlp_out = self.mlp_1(inp) # (batch_size, 4096)
mlp_out = self.mlp_dropout_1(F.relu(self.batchnorm_1(mlp_out))) # (batch_size, 4096)
mlp_out = self.mlp_2(mlp_out) # (batch_size, 2048)
mlp_out = self.mlp_dropout_2(F.relu(self.batchnorm_2(mlp_out))) # (batch_size, 2048)
mlp_out = self.mlp_3(mlp_out) # (batch_size, out_size)
return mlp_out
class Multi_Seq_Pre_LN_Transformer_Encoder_Classifier(nn.Module):
def __init__(self, embed_size, hidden_size, n_layer, n_head, out_size, intermediate_size=2048, max_seq_len=100, device=None, tf_dropout=0.1, rnn_dropout=0.2, dnn_dropout=0.5, **kwargs):
super(Multi_Seq_Pre_LN_Transformer_Encoder_Classifier, self).__init__(**kwargs)
self.embed_size = embed_size
self.hidden_size = hidden_size
self.n_layer = n_layer
self.n_head = n_head
self.out_size = out_size
self.intermediate_size = intermediate_size
self.max_seq_len = max_seq_len
self.device = device if device else torch.device('cpu')
self.tf_dropout = tf_dropout
self.rnn_dropout = rnn_dropout
self.dnn_dropout = dnn_dropout
self.n_extraction = len(embed_size)
self.mlp_inp_size = sum(map(lambda x:4*x, hidden_size))
for index, e_size in enumerate(embed_size):
setattr(self, 'pre_ln_tf_encoder_{}'.format(index), Pre_LN_Transformer_Encoder(n_layer, e_size, n_head, intermediate_size=intermediate_size, device=self.device, dropout=tf_dropout))
setattr(self, 'ln_{}'.format(index), nn.LayerNorm(e_size))
for index, (e_size, h_size) in enumerate(zip(embed_size, hidden_size)):
setattr(self, 'lstm_{}'.format(index), nn.LSTM(input_size=e_size, hidden_size=h_size, bias=True, bidirectional=True))
self.max_pooling = nn.MaxPool1d(max_seq_len)
self.inp_bn = nn.BatchNorm1d(self.mlp_inp_size)
self.inp_dropout = nn.Dropout(p=dnn_dropout)
self.mlp_layer = MLP_Classification_Layer(self.mlp_inp_size, out_size, dropout=dnn_dropout)
def forward(self, *args):
assert len(args)==self.n_extraction+1
buf, inp_len = [], args[-1]
for index, inp in enumerate(args[:-1]):
inp = getattr(self, 'pre_ln_tf_encoder_{}'.format(index))(inp, inp_len) # (batch_size, seq_len, embed_size)
inp = getattr(self, 'ln_{}'.format(index))(inp) # (batch_size, seq_len, embed_size)
inp = getattr(self, 'lstm_{}'.format(index))(inp.permute(1,0,2))[0].permute(1,0,2) # (batch_size, seq_len, 2*hidden_size)
buf.append(inp[np.arange(len(inp_len)), inp_len-1, :]) # (batch_size, 2*hidden_size)
buf.append(self.max_pooling(inp.permute(0,2,1)).squeeze(2)) # (batch_size, 2*hidden_size)
out = self.inp_bn(torch.cat(buf, dim=1)) # (batch_size, Σ4*hidden_size)
out = self.mlp_layer(self.inp_dropout(F.relu(out))) # (batch_size, out_size)
return out
| 45.576389 | 186 | 0.689928 | import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.init import xavier_uniform_, kaiming_normal_
class Pre_LN_Transformer_Encoder_Layer(nn.Module):
def __init__(self, d_model, n_head, intermediate_size=2048, device=None, dropout=0.1, **kwargs):
super(Pre_LN_Transformer_Encoder_Layer, self).__init__(**kwargs)
self.d_model = d_model
self.n_head = n_head
self.intermediate_size = intermediate_size
self.device = device if device else torch.device('cpu')
self.dropout = dropout
self.ln_layer_1 = nn.LayerNorm(d_model)
self.mha_layer = nn.MultiheadAttention(d_model, n_head, dropout=dropout)
self.attn_dropout = nn.Dropout(p=dropout)
self.ln_layer_2 = nn.LayerNorm(d_model)
self.ffn_layer_1 = nn.Linear(d_model, intermediate_size)
self.dropout_1 = nn.Dropout(p=dropout)
self.ffn_layer_2 = nn.Linear(intermediate_size, d_model)
self.dropout_2 = nn.Dropout(p=dropout)
def _get_padding_mask(self, batch_size, seq_len, inp_len):
padding_mask = np.ones((batch_size, seq_len))
for index, l in enumerate(inp_len):
padding_mask[index,:l] = 0
return torch.from_numpy(padding_mask).bool().to(self.device)
def forward(self, inp, inp_len):
batch_size, seq_len, _ = inp.shape
padding_mask = self._get_padding_mask(batch_size, seq_len, inp_len)
inp1 = self.ln_layer_1(inp).permute(1,0,2)
inp2 = self.mha_layer(inp1, inp1, inp1, key_padding_mask=padding_mask)[0].permute(1,0,2)
inp = inp + self.attn_dropout(inp2)
inp1 = self.ln_layer_2(inp)
inp2 = self.ffn_layer_2(self.dropout_1(F.relu(self.ffn_layer_1(inp1))))
inp = inp + self.dropout_2(inp2)
return inp
class Pre_LN_Transformer_Encoder(nn.Module):
def __init__(self, n_layer, d_model, n_head, intermediate_size=2048, device=None, dropout=0.1, **kwargs):
super(Pre_LN_Transformer_Encoder, self).__init__(**kwargs)
self.n_layer = n_layer
self.d_model = d_model
self.n_head = n_head
self.intermediate_size = intermediate_size
self.device = device if device else torch.device('cpu')
self.dropout = dropout
for index in range(n_layer):
setattr(self, 'pre_ln_tf_encoder_{}'.format(index),
Pre_LN_Transformer_Encoder_Layer(d_model, n_head, intermediate_size=intermediate_size, device=self.device, dropout=0.1))
def forward(self, inp, inp_len):
for index in range(self.n_layer):
inp = getattr(self, 'pre_ln_tf_encoder_{}'.format(index))(inp, inp_len)
return inp
class MLP_Classification_Layer(nn.Module):
def __init__(self, inp_size, out_size, dropout=0.5, **kwargs):
super(MLP_Classification_Layer, self).__init__(**kwargs)
self.inp_size = inp_size
self.out_size = out_size
self.dropout = dropout
self.mlp_1 = nn.Linear(inp_size, 4096)
self.batchnorm_1 = nn.BatchNorm1d(4096)
self.mlp_dropout_1 = nn.Dropout(p=dropout)
self.mlp_2 = nn.Linear(4096, 2048)
self.batchnorm_2 = nn.BatchNorm1d(2048)
self.mlp_dropout_2 = nn.Dropout(p=dropout)
self.mlp_3 = nn.Linear(2048, out_size)
def forward(self, inp):
mlp_out = self.mlp_1(inp)
mlp_out = self.mlp_dropout_1(F.relu(self.batchnorm_1(mlp_out)))
mlp_out = self.mlp_2(mlp_out)
mlp_out = self.mlp_dropout_2(F.relu(self.batchnorm_2(mlp_out)))
mlp_out = self.mlp_3(mlp_out)
return mlp_out
class Multi_Seq_Pre_LN_Transformer_Encoder_Classifier(nn.Module):
def __init__(self, embed_size, hidden_size, n_layer, n_head, out_size, intermediate_size=2048, max_seq_len=100, device=None, tf_dropout=0.1, rnn_dropout=0.2, dnn_dropout=0.5, **kwargs):
super(Multi_Seq_Pre_LN_Transformer_Encoder_Classifier, self).__init__(**kwargs)
self.embed_size = embed_size
self.hidden_size = hidden_size
self.n_layer = n_layer
self.n_head = n_head
self.out_size = out_size
self.intermediate_size = intermediate_size
self.max_seq_len = max_seq_len
self.device = device if device else torch.device('cpu')
self.tf_dropout = tf_dropout
self.rnn_dropout = rnn_dropout
self.dnn_dropout = dnn_dropout
self.n_extraction = len(embed_size)
self.mlp_inp_size = sum(map(lambda x:4*x, hidden_size))
for index, e_size in enumerate(embed_size):
setattr(self, 'pre_ln_tf_encoder_{}'.format(index), Pre_LN_Transformer_Encoder(n_layer, e_size, n_head, intermediate_size=intermediate_size, device=self.device, dropout=tf_dropout))
setattr(self, 'ln_{}'.format(index), nn.LayerNorm(e_size))
for index, (e_size, h_size) in enumerate(zip(embed_size, hidden_size)):
setattr(self, 'lstm_{}'.format(index), nn.LSTM(input_size=e_size, hidden_size=h_size, bias=True, bidirectional=True))
self.max_pooling = nn.MaxPool1d(max_seq_len)
self.inp_bn = nn.BatchNorm1d(self.mlp_inp_size)
self.inp_dropout = nn.Dropout(p=dnn_dropout)
self.mlp_layer = MLP_Classification_Layer(self.mlp_inp_size, out_size, dropout=dnn_dropout)
def forward(self, *args):
assert len(args)==self.n_extraction+1
buf, inp_len = [], args[-1]
for index, inp in enumerate(args[:-1]):
inp = getattr(self, 'pre_ln_tf_encoder_{}'.format(index))(inp, inp_len)
inp = getattr(self, 'ln_{}'.format(index))(inp)
inp = getattr(self, 'lstm_{}'.format(index))(inp.permute(1,0,2))[0].permute(1,0,2)
buf.append(inp[np.arange(len(inp_len)), inp_len-1, :])
buf.append(self.max_pooling(inp.permute(0,2,1)).squeeze(2))
out = self.inp_bn(torch.cat(buf, dim=1))
out = self.mlp_layer(self.inp_dropout(F.relu(out)))
return out
| true | true |
1c2fe96ba751a5bd946a6277f0b8f492d52c2402 | 2,126 | py | Python | src/openvino_dnn_detector.py | FenixFly/UNN_HPC_SCHOOL_2019_OPENVINO | 5e5ce1fa14d56549c7809d1a24bc03353ffadcbb | [
"Apache-2.0"
] | null | null | null | src/openvino_dnn_detector.py | FenixFly/UNN_HPC_SCHOOL_2019_OPENVINO | 5e5ce1fa14d56549c7809d1a24bc03353ffadcbb | [
"Apache-2.0"
] | 2 | 2019-11-12T09:03:18.000Z | 2019-11-18T18:19:56.000Z | src/openvino_dnn_detector.py | FenixFly/UNN_HPC_SCHOOL_2019_OPENVINO | 5e5ce1fa14d56549c7809d1a24bc03353ffadcbb | [
"Apache-2.0"
] | null | null | null | import cv2
import numpy
from openvino.inference_engine import IENetwork, IECore
class OpenvinoDnnDetector:
def __init__(self, weightsPath=None, configPath=None,
task_type=None, cpu_extension = None):
self.weights = weightsPath
self.config = configPath
self.task_type = task_type
# Create net
#self.net = cv2.dnn.readNet(self.weights, self.config)
self.ie = IECore()
self.net = IENetwork(model=configPath, weights=weightsPath)
if cpu_extension:
self.ie.add_extension(cpu_extension, 'CPU')
self.exec_net = self.ie.load_network(network=self.net, device_name='CPU')
def _output_detection(self, output, img):
(h, w) = img.shape[:2]
for i in range(0, output.shape[2]):
confidence = output[0, 0, i, 2]
if confidence > 0.5:
print(i, confidence)
box = output[0, 0, i, 3:7] * numpy.array([w, h, w, h])
print(box)
(startX, startY, endX, endY) = box.astype("int")
text = "{:.2f}%".format(confidence * 100)
y = startY - 10 if startY - 10 > 10 else startY + 10
cv2.rectangle(img, (startX, startY), (endX, endY),
(0, 255, 0), 2)
cv2.putText(img, text, (startX, y),
cv2.FONT_HERSHEY_COMPLEX, 0.45, (0, 0, 255), 1)
return img
def prepare_image(self, image, h, w):
if image.shape[:-1] != (h, w):
image = cv2.resize(image, (w, h))
image = image.transpose((2, 0, 1)) # Change data layout from HWC to CHW
return image
def detect(self, image):
input_blob = next(iter(self.net.inputs))
out_blob = next(iter(self.net.outputs))
n, c, h, w = self.net.inputs[input_blob].shape
blob = self.prepare_image(image, h, w)
output = self.exec_net.infer(inputs={input_blob: blob})
output = output[out_blob]
print(output.shape, output)
return self._output_detection(output, image) | 40.113208 | 81 | 0.557855 | import cv2
import numpy
from openvino.inference_engine import IENetwork, IECore
class OpenvinoDnnDetector:
def __init__(self, weightsPath=None, configPath=None,
task_type=None, cpu_extension = None):
self.weights = weightsPath
self.config = configPath
self.task_type = task_type
self.ie = IECore()
self.net = IENetwork(model=configPath, weights=weightsPath)
if cpu_extension:
self.ie.add_extension(cpu_extension, 'CPU')
self.exec_net = self.ie.load_network(network=self.net, device_name='CPU')
def _output_detection(self, output, img):
(h, w) = img.shape[:2]
for i in range(0, output.shape[2]):
confidence = output[0, 0, i, 2]
if confidence > 0.5:
print(i, confidence)
box = output[0, 0, i, 3:7] * numpy.array([w, h, w, h])
print(box)
(startX, startY, endX, endY) = box.astype("int")
text = "{:.2f}%".format(confidence * 100)
y = startY - 10 if startY - 10 > 10 else startY + 10
cv2.rectangle(img, (startX, startY), (endX, endY),
(0, 255, 0), 2)
cv2.putText(img, text, (startX, y),
cv2.FONT_HERSHEY_COMPLEX, 0.45, (0, 0, 255), 1)
return img
def prepare_image(self, image, h, w):
if image.shape[:-1] != (h, w):
image = cv2.resize(image, (w, h))
image = image.transpose((2, 0, 1))
return image
def detect(self, image):
input_blob = next(iter(self.net.inputs))
out_blob = next(iter(self.net.outputs))
n, c, h, w = self.net.inputs[input_blob].shape
blob = self.prepare_image(image, h, w)
output = self.exec_net.infer(inputs={input_blob: blob})
output = output[out_blob]
print(output.shape, output)
return self._output_detection(output, image) | true | true |
1c2fe973c5f524536ba18e5ee4f9dbd3bd74c8da | 800 | py | Python | pong/RandomUtils.py | FireFlyForLife/Python-Pong | bb0c7e2173f87a379fc5426c2ab24df859238ace | [
"MIT"
] | null | null | null | pong/RandomUtils.py | FireFlyForLife/Python-Pong | bb0c7e2173f87a379fc5426c2ab24df859238ace | [
"MIT"
] | null | null | null | pong/RandomUtils.py | FireFlyForLife/Python-Pong | bb0c7e2173f87a379fc5426c2ab24df859238ace | [
"MIT"
] | null | null | null | def randomRanges(ranges):
index = int( random(len(ranges)) )
val = random(float(ranges[index][0]), float(ranges[index][1]))
return val
lastTime = 0
def deltaTime():
global lastTime
delta = millis() - lastTime
lastTime = millis()
return delta
#http://stackoverflow.com/questions/401847/circle-rectangle-collision-detection-intersection
def intersects(ball, platform):
closestX = constrain(ball.pos.x, platform.pos.x, platform.pos.x + platform.w);
closestY = constrain(ball.pos.y, platform.pos.y, platform.pos.y + platform.h);
distanceX = ball.pos.x - closestX;
distanceY = ball.pos.y - closestY;
radius = ball.r / 2
distanceSquared = (distanceX * distanceX) + (distanceY * distanceY);
return distanceSquared < (radius * radius); | 33.333333 | 92 | 0.6775 | def randomRanges(ranges):
index = int( random(len(ranges)) )
val = random(float(ranges[index][0]), float(ranges[index][1]))
return val
lastTime = 0
def deltaTime():
global lastTime
delta = millis() - lastTime
lastTime = millis()
return delta
def intersects(ball, platform):
closestX = constrain(ball.pos.x, platform.pos.x, platform.pos.x + platform.w);
closestY = constrain(ball.pos.y, platform.pos.y, platform.pos.y + platform.h);
distanceX = ball.pos.x - closestX;
distanceY = ball.pos.y - closestY;
radius = ball.r / 2
distanceSquared = (distanceX * distanceX) + (distanceY * distanceY);
return distanceSquared < (radius * radius); | true | true |
1c2fe9c8f31aa841529840a16b5969ee18f61dd2 | 1,522 | py | Python | CreateNetworks.py | yukimasano/nw_SEIR | af9d1298861eba8aadd1517a92e176f76a7218a2 | [
"MIT"
] | 7 | 2019-02-13T18:04:34.000Z | 2021-01-17T15:49:40.000Z | CreateNetworks.py | yukimasano/nw_SEIR | af9d1298861eba8aadd1517a92e176f76a7218a2 | [
"MIT"
] | 2 | 2017-11-15T21:52:33.000Z | 2020-02-10T08:33:25.000Z | CreateNetworks.py | yukimasano/nw_SEIR | af9d1298861eba8aadd1517a92e176f76a7218a2 | [
"MIT"
] | 5 | 2019-05-05T01:38:48.000Z | 2020-04-03T08:32:48.000Z | # -*- coding: utf-8 -*-
"""
CreateNetworks.py
This algorithm aggregates the temporally resolved network data
default= 20min aggregation, by setting mins=1/3 we get 20sec resolution.
Output: Tensor A20 for day 1, tensor B20 for day 2 along with the the times as vectors.
@author:
Yuki M. Asano
"""
import numpy as np
def createnw(data,metadata,mins):
tt=-1
maxtime=np.ceil((data[-1,0] - data[0,0] )/(20*3*mins)) # 20min
numIndividuals=len(metadata[:, 0])
startid=int(metadata[0][0])
A= np.zeros((numchildren+1,numIndividuals+1, maxtime+1),dtype=np.int)
told=0
for row in range(len(data[:,0])):
t=data[row,0]
id1=int(np.argwhere(str(data[row,1])== metadata[:,0]))
id2=int(np.argwhere(str(data[row,2])== metadata[:,0]))
if (t>= (told+(20*3*mins))) and t!=told: #start new timeslot
tt+=1
told=t
if id1>id2: #fill lower triangular
A[id1][id2][tt]+=1
else:
A[id2][id1][tt]+=1
return A, range(tt)
data=np.loadtxt('primaryschool_wo_class.csv', delimiter=',', dtype=np.int)
firstday=data[0:60623,:]
secondday=data[60623:,:]
metadata=np.loadtxt('metadata_primaryschool.txt', delimiter='\t', dtype='S16')
# create 20min aggregated data
[A20,time] =createnw(firstday,metadata,20)
[B20,time2] =createnw(secondday,metadata,20)
# save data as numpy objects
np.save('day1.npy', A)
np.save('day2.npy', B)
np.save('numbers.npy',no)
np.save('times1.npy',time)
np.save('times2.npy',time2)
| 29.843137 | 87 | 0.639947 |
import numpy as np
def createnw(data,metadata,mins):
tt=-1
maxtime=np.ceil((data[-1,0] - data[0,0] )/(20*3*mins))
numIndividuals=len(metadata[:, 0])
startid=int(metadata[0][0])
A= np.zeros((numchildren+1,numIndividuals+1, maxtime+1),dtype=np.int)
told=0
for row in range(len(data[:,0])):
t=data[row,0]
id1=int(np.argwhere(str(data[row,1])== metadata[:,0]))
id2=int(np.argwhere(str(data[row,2])== metadata[:,0]))
if (t>= (told+(20*3*mins))) and t!=told:
tt+=1
told=t
if id1>id2:
A[id1][id2][tt]+=1
else:
A[id2][id1][tt]+=1
return A, range(tt)
data=np.loadtxt('primaryschool_wo_class.csv', delimiter=',', dtype=np.int)
firstday=data[0:60623,:]
secondday=data[60623:,:]
metadata=np.loadtxt('metadata_primaryschool.txt', delimiter='\t', dtype='S16')
[A20,time] =createnw(firstday,metadata,20)
[B20,time2] =createnw(secondday,metadata,20)
np.save('day1.npy', A)
np.save('day2.npy', B)
np.save('numbers.npy',no)
np.save('times1.npy',time)
np.save('times2.npy',time2)
| true | true |
1c2fea21b8f9ccc7b4e55871b2cc9d33d28e9fd9 | 1,619 | py | Python | t.py | lucascbarbosa/real-pokedex | 1c0dc26fa6a923db5d7f525c303d85644f632ccd | [
"MIT"
] | null | null | null | t.py | lucascbarbosa/real-pokedex | 1c0dc26fa6a923db5d7f525c303d85644f632ccd | [
"MIT"
] | null | null | null | t.py | lucascbarbosa/real-pokedex | 1c0dc26fa6a923db5d7f525c303d85644f632ccd | [
"MIT"
] | null | null | null | a = ['Abra', 'Aerodactyl', 'Alakazam', 'Arbok', 'Arcanine', 'Articuno', 'Beedrill', 'Bellsprout', 'Blastoise', 'bulbasaur', 'Butterfree', 'Caterpie', 'Chansey', 'Charizard', 'Charmander', 'Charmeleon', 'Clefable', 'Clefairy', 'Cloyster', 'Cubone', 'desktop.ini', 'Dewgong', 'Diglett', 'Ditto', 'Dodrio', 'Doduo', 'Dragonair', 'Dragonite', 'Dratini', 'Drowzee', 'Dugtrio', 'Eevee', 'Ekans', 'Electabuzz', 'Electrode', 'Exeggcute', 'Exeggutor', 'Farfetchd', 'Fearow', 'Flareon', 'Gastly', 'Gengar', 'Geodude', 'Gloom', 'Golbat', 'Goldeen', 'Golduck', 'Golem', 'Graveler', 'Grimer', 'Growlithe', 'Gyarados', 'Haunter', 'Hitmonchan', 'Hitmonlee', 'Horsea', 'Hypno', 'Ivysaur', 'Jigglypuff', 'Jolteon', 'Jynx', 'Kabuto', 'Kabutops', 'Kadabra', 'Kakuna', 'Kangaskhan', 'Kingler', 'Koffing', 'Krabby', 'Lapras', 'Lickitung', 'Machamp', 'Machoke', 'Machop', 'Magikarp', 'Magmar', 'Magnemite', 'Magneton', 'Mankey', 'Marowak', 'Meowth',
'Metapod', 'Mew', 'Mewtwo', 'Moltres', 'MrMime', 'Muk', 'Nidoking', 'Nidoqueen', 'Nidoran Female', 'Nidoran Male', 'Nidorina', 'Nidorino', 'Ninetales', 'Oddish', 'Omanyte', 'Omastar', 'Onix', 'Paras', 'Parasect', 'Persian', 'Pidgeot', 'Pidgeotto', 'Pidgey', 'Pikachu', 'Pinsir', 'Poliwag', 'Poliwhirl', 'Poliwrath', 'Ponyta', 'Porygon', 'Primeape', 'Psyduck', 'Raichu', 'Rapidash', 'Raticate', 'Rattata', 'Rhydon', 'Rhyhorn', 'Sandshrew', 'Sandslash', 'Scyther', 'Seadra', 'Seaking', 'Seel', 'Shellder', 'Slowbro', 'Slowpoke', 'Snorlax', 'Spearow', 'Squirtle', 'Starmie', 'Staryu', 'Tangela', 'Tauros', 'Tentacool', 'Tentacruel', 'Vaporeon', 'Venomoth', 'Venonat', 'Venusaur']
print(len(a)) | 539.666667 | 928 | 0.644225 | a = ['Abra', 'Aerodactyl', 'Alakazam', 'Arbok', 'Arcanine', 'Articuno', 'Beedrill', 'Bellsprout', 'Blastoise', 'bulbasaur', 'Butterfree', 'Caterpie', 'Chansey', 'Charizard', 'Charmander', 'Charmeleon', 'Clefable', 'Clefairy', 'Cloyster', 'Cubone', 'desktop.ini', 'Dewgong', 'Diglett', 'Ditto', 'Dodrio', 'Doduo', 'Dragonair', 'Dragonite', 'Dratini', 'Drowzee', 'Dugtrio', 'Eevee', 'Ekans', 'Electabuzz', 'Electrode', 'Exeggcute', 'Exeggutor', 'Farfetchd', 'Fearow', 'Flareon', 'Gastly', 'Gengar', 'Geodude', 'Gloom', 'Golbat', 'Goldeen', 'Golduck', 'Golem', 'Graveler', 'Grimer', 'Growlithe', 'Gyarados', 'Haunter', 'Hitmonchan', 'Hitmonlee', 'Horsea', 'Hypno', 'Ivysaur', 'Jigglypuff', 'Jolteon', 'Jynx', 'Kabuto', 'Kabutops', 'Kadabra', 'Kakuna', 'Kangaskhan', 'Kingler', 'Koffing', 'Krabby', 'Lapras', 'Lickitung', 'Machamp', 'Machoke', 'Machop', 'Magikarp', 'Magmar', 'Magnemite', 'Magneton', 'Mankey', 'Marowak', 'Meowth',
'Metapod', 'Mew', 'Mewtwo', 'Moltres', 'MrMime', 'Muk', 'Nidoking', 'Nidoqueen', 'Nidoran Female', 'Nidoran Male', 'Nidorina', 'Nidorino', 'Ninetales', 'Oddish', 'Omanyte', 'Omastar', 'Onix', 'Paras', 'Parasect', 'Persian', 'Pidgeot', 'Pidgeotto', 'Pidgey', 'Pikachu', 'Pinsir', 'Poliwag', 'Poliwhirl', 'Poliwrath', 'Ponyta', 'Porygon', 'Primeape', 'Psyduck', 'Raichu', 'Rapidash', 'Raticate', 'Rattata', 'Rhydon', 'Rhyhorn', 'Sandshrew', 'Sandslash', 'Scyther', 'Seadra', 'Seaking', 'Seel', 'Shellder', 'Slowbro', 'Slowpoke', 'Snorlax', 'Spearow', 'Squirtle', 'Starmie', 'Staryu', 'Tangela', 'Tauros', 'Tentacool', 'Tentacruel', 'Vaporeon', 'Venomoth', 'Venonat', 'Venusaur']
print(len(a)) | true | true |
1c2fec35942e7d1eb8b16707c437ec0877f05d70 | 17,276 | py | Python | py-polars/tests/test_datelike.py | tamasfe/polars | 709b8d57e32f61c57191cb8ab435a200e3ae6df7 | [
"MIT"
] | 3 | 2022-03-06T12:45:47.000Z | 2022-03-26T08:43:31.000Z | py-polars/tests/test_datelike.py | webclinic017/polars | 5d342a6474754e47baa4f10d64201a4ae015e6c7 | [
"MIT"
] | null | null | null | py-polars/tests/test_datelike.py | webclinic017/polars | 5d342a6474754e47baa4f10d64201a4ae015e6c7 | [
"MIT"
] | null | null | null | import io
from datetime import date, datetime, timedelta
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from test_series import verify_series_and_expr_api
import polars as pl
def test_fill_null() -> None:
dt = datetime.strptime("2021-01-01", "%Y-%m-%d")
s = pl.Series("A", [dt, None])
for fill_val in (dt, pl.lit(dt)):
out = s.fill_null(fill_val) # type: ignore
assert out.null_count() == 0
assert out.dt[0] == dt
assert out.dt[1] == dt
dt1 = date(2001, 1, 1)
dt2 = date(2001, 1, 2)
dt3 = date(2001, 1, 3)
s = pl.Series("a", [dt1, dt2, dt3, None])
dt_2 = date(2001, 1, 4)
for fill_val in (dt_2, pl.lit(dt_2)):
out = s.fill_null(fill_val) # type: ignore
assert out.null_count() == 0
assert out.dt[0] == dt1
assert out.dt[1] == dt2
assert out.dt[-1] == dt_2
def test_filter_date() -> None:
dataset = pl.DataFrame(
{"date": ["2020-01-02", "2020-01-03", "2020-01-04"], "index": [1, 2, 3]}
)
df = dataset.with_column(pl.col("date").str.strptime(pl.Date, "%Y-%m-%d"))
assert df.filter(pl.col("date") <= pl.lit(datetime(2019, 1, 3))).is_empty()
assert df.filter(pl.col("date") < pl.lit(datetime(2020, 1, 4))).shape[0] == 2
assert df.filter(pl.col("date") < pl.lit(datetime(2020, 1, 5))).shape[0] == 3
assert df.filter(pl.col("date") <= pl.lit(datetime(2019, 1, 3))).is_empty()
assert df.filter(pl.col("date") < pl.lit(datetime(2020, 1, 4))).shape[0] == 2
assert df.filter(pl.col("date") < pl.lit(datetime(2020, 1, 5))).shape[0] == 3
def test_series_add_timedelta() -> None:
dates = pl.Series(
[datetime(2000, 1, 1), datetime(2027, 5, 19), datetime(2054, 10, 4)]
)
out = pl.Series(
[datetime(2027, 5, 19), datetime(2054, 10, 4), datetime(2082, 2, 19)]
)
assert (dates + timedelta(days=10_000)).series_equal(out)
def test_series_add_datetime() -> None:
deltas = pl.Series([timedelta(10_000), timedelta(20_000), timedelta(30_000)])
out = pl.Series(
[datetime(2027, 5, 19), datetime(2054, 10, 4), datetime(2082, 2, 19)]
)
assert (deltas + pl.Series([datetime(2000, 1, 1)])) == out
def test_diff_datetime() -> None:
df = pl.DataFrame(
{
"timestamp": ["2021-02-01", "2021-03-1", "2850-04-1"],
"guild": [1, 2, 3],
"char": ["a", "a", "b"],
}
)
out = (
df.with_columns(
[
pl.col("timestamp").str.strptime(pl.Date, fmt="%Y-%m-%d"),
]
).with_columns([pl.col("timestamp").diff().list().over("char")])
)["timestamp"]
assert out[0] == out[1]
def test_timestamp() -> None:
a = pl.Series("a", [a * 1000_000 for a in [10000, 20000, 30000]], dtype=pl.Datetime)
assert a.dt.timestamp("ms") == [10000, 20000, 30000]
out = a.dt.to_python_datetime()
assert isinstance(out[0], datetime)
assert a.dt.min() == out[0]
assert a.dt.max() == out[2]
df = pl.DataFrame([out])
# test if rows returns objects
assert isinstance(df.row(0)[0], datetime)
def test_from_pydatetime() -> None:
dates = [
datetime(2021, 1, 1),
datetime(2021, 1, 2),
datetime(2021, 1, 3),
datetime(2021, 1, 4, 12, 12),
None,
]
s = pl.Series("name", dates)
assert s.dtype == pl.Datetime
assert s.name == "name"
assert s.null_count() == 1
assert s.dt[0] == dates[0]
dates = [date(2021, 1, 1), date(2021, 1, 2), date(2021, 1, 3), None] # type: ignore
s = pl.Series("name", dates)
assert s.dtype == pl.Date
assert s.name == "name"
assert s.null_count() == 1
assert s.dt[0] == dates[0]
def test_to_python_datetime() -> None:
df = pl.DataFrame({"a": [1, 2, 3]})
assert (
df.select(pl.col("a").cast(pl.Datetime).dt.to_python_datetime())["a"].dtype
== pl.Object
)
assert (
df.select(pl.col("a").cast(pl.Datetime).dt.timestamp())["a"].dtype == pl.Int64
)
def test_from_numpy() -> None:
# numpy support is limited; will be stored as object
x = np.asarray(range(100_000, 200_000, 10_000), dtype="datetime64[s]")
s = pl.Series(x)
assert s[0] == x[0]
assert len(s) == 10
def test_datetime_consistency() -> None:
# dt = datetime(2021, 1, 1, 10, 30, 45, 123456)
dt = datetime(2021, 1, 1, 10, 30, 45, 123000)
df = pl.DataFrame({"date": [dt]})
assert df["date"].dt[0] == dt
assert df.select(pl.lit(dt))["literal"].dt[0] == dt
def test_timezone() -> None:
ts = pa.timestamp("s")
data = pa.array([1000, 2000], type=ts)
s: pl.Series = pl.from_arrow(data) # type: ignore
# with timezone; we do expect a warning here
tz_ts = pa.timestamp("s", tz="America/New_York")
tz_data = pa.array([1000, 2000], type=tz_ts)
with pytest.warns(Warning):
tz_s: pl.Series = pl.from_arrow(tz_data) # type: ignore
# timezones have no effect, i.e. `s` equals `tz_s`
assert s.series_equal(tz_s)
def test_to_list() -> None:
s = pl.Series("date", [123543, 283478, 1243]).cast(pl.Date)
out = s.to_list()
assert out[0] == date(2308, 4, 2)
s = pl.Series("datetime", [a * 1_000_000 for a in [123543, 283478, 1243]]).cast(
pl.Datetime
)
out = s.to_list()
assert out[0] == datetime(1970, 1, 2, 10, 19, 3)
def test_rows() -> None:
s0 = pl.Series("date", [123543, 283478, 1243]).cast(pl.Date)
s1 = (
pl.Series("datetime", [a * 1_000_000 for a in [123543, 283478, 1243]])
.cast(pl.Datetime)
.dt.and_time_unit("ns")
)
df = pl.DataFrame([s0, s1])
rows = df.rows()
assert rows[0][0] == date(2308, 4, 2)
assert rows[0][1] == datetime(1970, 1, 1, 0, 2, 3, 543000)
def test_to_numpy() -> None:
s0 = pl.Series("date", [123543, 283478, 1243]).cast(pl.Date)
s1 = pl.Series(
"datetime", [datetime(2021, 1, 2, 3, 4, 5), datetime(2021, 2, 3, 4, 5, 6)]
)
s2 = pl.date_range(
datetime(2021, 1, 1, 0), datetime(2021, 1, 1, 1), interval="1h", time_unit="ms"
)
assert str(s0.to_numpy()) == "['2308-04-02' '2746-02-20' '1973-05-28']"
assert (
str(s1.to_numpy()[:2])
== "['2021-01-02T03:04:05.000000' '2021-02-03T04:05:06.000000']"
)
assert (
str(s2.to_numpy()[:2])
== "['2021-01-01T00:00:00.000' '2021-01-01T01:00:00.000']"
)
s3 = pl.Series([timedelta(hours=1), timedelta(hours=-2)])
out = np.array([3_600_000_000_000, -7_200_000_000_000], dtype="timedelta64[ns]")
assert (s3.to_numpy() == out).all()
def test_truncate() -> None:
start = datetime(2001, 1, 1)
stop = datetime(2001, 1, 2)
s1 = pl.date_range(start, stop, timedelta(minutes=30), name="dates", time_unit="ms")
s2 = pl.date_range(start, stop, timedelta(minutes=30), name="dates", time_unit="ns")
# we can pass strings and timedeltas
for out in [s1.dt.truncate("1h"), s2.dt.truncate(timedelta(hours=1))]:
assert out.dt[0] == start
assert out.dt[1] == start
assert out.dt[2] == start + timedelta(hours=1)
assert out.dt[3] == start + timedelta(hours=1)
# ...
assert out.dt[-3] == stop - timedelta(hours=1)
assert out.dt[-2] == stop - timedelta(hours=1)
assert out.dt[-1] == stop
def test_date_range() -> None:
result = pl.date_range(
datetime(1985, 1, 1), datetime(2015, 7, 1), timedelta(days=1, hours=12)
)
assert len(result) == 7426
assert result.dt[0] == datetime(1985, 1, 1)
assert result.dt[1] == datetime(1985, 1, 2, 12, 0)
assert result.dt[2] == datetime(1985, 1, 4, 0, 0)
assert result.dt[-1] == datetime(2015, 6, 30, 12, 0)
for tu in ["ns", "ms"]:
rng = pl.date_range(
datetime(2020, 1, 1), datetime(2020, 1, 2), "2h", time_unit=tu
)
assert rng.time_unit == tu
assert rng.shape == (13,)
assert rng.dt[0] == datetime(2020, 1, 1)
assert rng.dt[-1] == datetime(2020, 1, 2)
def test_date_comp() -> None:
one = datetime(2001, 1, 1)
two = datetime(2001, 1, 2)
a = pl.Series("a", [one, two])
assert (a == one).to_list() == [True, False]
assert (a != one).to_list() == [False, True]
assert (a > one).to_list() == [False, True]
assert (a >= one).to_list() == [True, True]
assert (a < one).to_list() == [False, False]
assert (a <= one).to_list() == [True, False]
one = date(2001, 1, 1) # type: ignore
two = date(2001, 1, 2) # type: ignore
a = pl.Series("a", [one, two])
assert (a == one).to_list() == [True, False]
assert (a != one).to_list() == [False, True]
assert (a > one).to_list() == [False, True]
assert (a >= one).to_list() == [True, True]
assert (a < one).to_list() == [False, False]
assert (a <= one).to_list() == [True, False]
# also test if the conversion stays correct with wide date ranges
one = date(201, 1, 1) # type: ignore
two = date(201, 1, 2) # type: ignore
a = pl.Series("a", [one, two])
assert (a == one).to_list() == [True, False]
assert (a == two).to_list() == [False, True]
one = date(5001, 1, 1) # type: ignore
two = date(5001, 1, 2) # type: ignore
a = pl.Series("a", [one, two])
assert (a == one).to_list() == [True, False]
assert (a == two).to_list() == [False, True]
def test_truncate_negative_offset() -> None:
df = pl.DataFrame(
{
"event_date": [
datetime(2021, 4, 11),
datetime(2021, 4, 29),
datetime(2021, 5, 29),
],
"adm1_code": [1, 2, 1],
}
)
out = df.groupby_dynamic(
index_column="event_date",
every="1mo",
period="2mo",
offset="-1mo",
include_boundaries=True,
).agg(
[
pl.col("adm1_code"),
]
)
assert out["event_date"].to_list() == [
datetime(2021, 4, 1),
datetime(2021, 4, 1),
datetime(2021, 5, 1),
]
df = pl.DataFrame(
{
"event_date": [
datetime(2021, 4, 11),
datetime(2021, 4, 29),
datetime(2021, 5, 29),
],
"adm1_code": [1, 2, 1],
"five_type": ["a", "b", "a"],
"actor": ["a", "a", "a"],
"admin": ["a", "a", "a"],
"fatalities": [10, 20, 30],
}
)
out = df.groupby_dynamic(
index_column="event_date",
every="1mo",
by=["admin", "five_type", "actor"],
).agg([pl.col("adm1_code").unique(), (pl.col("fatalities") > 0).sum()])
assert out["event_date"].to_list() == [
datetime(2021, 4, 1),
datetime(2021, 5, 1),
datetime(2021, 4, 1),
]
for dt in [pl.Int32, pl.Int64]:
df = pl.DataFrame(
{
"idx": np.arange(6),
"A": ["A", "A", "B", "B", "B", "C"],
}
).with_columns(pl.col("idx").cast(dt))
out = df.groupby_dynamic(
"idx", every="2i", period="3i", include_boundaries=True
).agg(pl.col("A").list())
assert out.shape == (3, 4)
def test_to_arrow() -> None:
date_series = pl.Series("dates", ["2022-01-16", "2022-01-17"]).str.strptime(
pl.Date, "%Y-%m-%d"
)
arr = date_series.to_arrow()
assert arr.type == pa.date32()
def test_non_exact_strptime() -> None:
a = pl.Series("a", ["2022-01-16", "2022-01-17", "foo2022-01-18", "b2022-01-19ar"])
fmt = "%Y-%m-%d"
expected = pl.Series("a", [date(2022, 1, 16), date(2022, 1, 17), None, None])
verify_series_and_expr_api(
a, expected, "str.strptime", pl.Date, fmt, strict=False, exact=True
)
expected = pl.Series(
"a",
[date(2022, 1, 16), date(2022, 1, 17), date(2022, 1, 18), date(2022, 1, 19)],
)
verify_series_and_expr_api(
a, expected, "str.strptime", pl.Date, fmt, strict=False, exact=False
)
with pytest.raises(Exception):
a.str.strptime(pl.Date, fmt, strict=True, exact=True)
def test_explode_date() -> None:
datetimes = [
datetime(2021, 12, 1, 0, 0),
datetime(2021, 12, 1, 0, 0),
datetime(2021, 12, 1, 0, 0),
datetime(2021, 12, 1, 0, 0),
]
dates = [
date(2021, 12, 1),
date(2021, 12, 1),
date(2021, 12, 1),
date(2021, 12, 1),
]
for d in [dates, datetimes]:
df = pl.DataFrame(
{
"a": d,
"b": ["a", "b", "a", "b"],
"c": [1.0, 2.0, 1.1, 2.2],
}
)
out = (
df.groupby("b")
.agg([pl.col("a"), pl.col("c").pct_change()])
.explode(["a", "c"])
)
assert out.shape == (4, 3)
def test_rolling() -> None:
dates = [
"2020-01-01 13:45:48",
"2020-01-01 16:42:13",
"2020-01-01 16:45:09",
"2020-01-02 18:12:48",
"2020-01-03 19:45:32",
"2020-01-08 23:16:43",
]
df = pl.DataFrame({"dt": dates, "a": [3, 7, 5, 9, 2, 1]}).with_column(
pl.col("dt").str.strptime(pl.Datetime)
)
out = df.groupby_rolling(index_column="dt", period="2d").agg(
[
pl.sum("a").alias("sum_a"),
pl.min("a").alias("min_a"),
pl.max("a").alias("max_a"),
]
)
assert out["sum_a"].to_list() == [3, 10, 15, 24, 11, 1]
assert out["max_a"].to_list() == [3, 7, 7, 9, 9, 1]
assert out["min_a"].to_list() == [3, 3, 3, 3, 2, 1]
def test_upsample() -> None:
df = pl.DataFrame(
{
"time": [
datetime(2021, 2, 1),
datetime(2021, 4, 1),
datetime(2021, 5, 1),
datetime(2021, 6, 1),
],
"admin": ["Åland", "Netherlands", "Åland", "Netherlands"],
"test2": [0, 1, 2, 3],
}
)
up = df.upsample(
time_column="time", every="1mo", by="admin", maintain_order=True
).select(pl.all().forward_fill())
expected = pl.DataFrame(
{
"time": [
datetime(2021, 2, 1, 0, 0),
datetime(2021, 3, 1, 0, 0),
datetime(2021, 4, 1, 0, 0),
datetime(2021, 5, 1, 0, 0),
datetime(2021, 4, 1, 0, 0),
datetime(2021, 5, 1, 0, 0),
datetime(2021, 6, 1, 0, 0),
],
"admin": [
"Åland",
"Åland",
"Åland",
"Åland",
"Netherlands",
"Netherlands",
"Netherlands",
],
"test2": [0, 0, 0, 2, 1, 1, 3],
}
)
assert up.frame_equal(expected)
def test_microseconds_accuracy() -> None:
timestamps = [
datetime(2600, 1, 1, 0, 0, 0, 123456),
datetime(2800, 1, 1, 0, 0, 0, 456789),
]
a = pa.Table.from_arrays(
arrays=[timestamps, [128, 256]],
schema=pa.schema(
[
("timestamp", pa.timestamp("us")),
("value", pa.int16()),
]
),
)
assert pl.from_arrow(a)["timestamp"].to_list() == timestamps # type: ignore
def test_cast_time_units() -> None:
dates = pl.Series("dates", [datetime(2001, 1, 1), datetime(2001, 2, 1, 10, 8, 9)])
dates_in_ns = np.array([978307200000000000, 981022089000000000])
assert dates.dt.cast_time_unit("ns").cast(int).to_list() == list(dates_in_ns)
assert dates.dt.cast_time_unit("us").cast(int).to_list() == list(
dates_in_ns // 1_000
)
assert dates.dt.cast_time_unit("ms").cast(int).to_list() == list(
dates_in_ns // 1_000_000
)
def test_read_utc_times_parquet() -> None:
df = pd.DataFrame(
data={
"Timestamp": pd.date_range(
"2022-01-01T00:00+00:00", "2022-01-01T10:00+00:00", freq="H"
)
}
)
f = io.BytesIO()
df.to_parquet(f)
f.seek(0)
df_in = pl.read_parquet(f)
assert df_in["Timestamp"][0] == datetime(2022, 1, 1, 0, 0)
def test_epoch() -> None:
dates = pl.Series("dates", [datetime(2001, 1, 1), datetime(2001, 2, 1, 10, 8, 9)])
for unit in ["ns", "us", "ms"]:
assert dates.dt.epoch(unit).series_equal(dates.dt.timestamp(unit))
assert dates.dt.epoch("s").series_equal(dates.dt.timestamp("ms") // 1000)
assert dates.dt.epoch("d").series_equal(
(dates.dt.timestamp("ms") // (1000 * 3600 * 24)).cast(pl.Int32)
)
def test_default_negative_every_offset_dynamic_groupby() -> None:
# 2791
dts = [
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 2, 1),
datetime(2020, 3, 1),
]
df = pl.DataFrame({"dt": dts, "idx": range(len(dts))})
out = df.groupby_dynamic(index_column="dt", every="1mo", closed="right").agg(
pl.col("idx")
)
expected = pl.DataFrame(
{
"dt": [
datetime(2020, 1, 1, 0, 0),
datetime(2020, 1, 1, 0, 0),
datetime(2020, 3, 1, 0, 0),
],
"idx": [[0], [1, 2], [3]],
}
)
assert out.frame_equal(expected)
| 30.415493 | 88 | 0.522054 | import io
from datetime import date, datetime, timedelta
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from test_series import verify_series_and_expr_api
import polars as pl
def test_fill_null() -> None:
dt = datetime.strptime("2021-01-01", "%Y-%m-%d")
s = pl.Series("A", [dt, None])
for fill_val in (dt, pl.lit(dt)):
out = s.fill_null(fill_val)
assert out.null_count() == 0
assert out.dt[0] == dt
assert out.dt[1] == dt
dt1 = date(2001, 1, 1)
dt2 = date(2001, 1, 2)
dt3 = date(2001, 1, 3)
s = pl.Series("a", [dt1, dt2, dt3, None])
dt_2 = date(2001, 1, 4)
for fill_val in (dt_2, pl.lit(dt_2)):
out = s.fill_null(fill_val)
assert out.null_count() == 0
assert out.dt[0] == dt1
assert out.dt[1] == dt2
assert out.dt[-1] == dt_2
def test_filter_date() -> None:
dataset = pl.DataFrame(
{"date": ["2020-01-02", "2020-01-03", "2020-01-04"], "index": [1, 2, 3]}
)
df = dataset.with_column(pl.col("date").str.strptime(pl.Date, "%Y-%m-%d"))
assert df.filter(pl.col("date") <= pl.lit(datetime(2019, 1, 3))).is_empty()
assert df.filter(pl.col("date") < pl.lit(datetime(2020, 1, 4))).shape[0] == 2
assert df.filter(pl.col("date") < pl.lit(datetime(2020, 1, 5))).shape[0] == 3
assert df.filter(pl.col("date") <= pl.lit(datetime(2019, 1, 3))).is_empty()
assert df.filter(pl.col("date") < pl.lit(datetime(2020, 1, 4))).shape[0] == 2
assert df.filter(pl.col("date") < pl.lit(datetime(2020, 1, 5))).shape[0] == 3
def test_series_add_timedelta() -> None:
dates = pl.Series(
[datetime(2000, 1, 1), datetime(2027, 5, 19), datetime(2054, 10, 4)]
)
out = pl.Series(
[datetime(2027, 5, 19), datetime(2054, 10, 4), datetime(2082, 2, 19)]
)
assert (dates + timedelta(days=10_000)).series_equal(out)
def test_series_add_datetime() -> None:
deltas = pl.Series([timedelta(10_000), timedelta(20_000), timedelta(30_000)])
out = pl.Series(
[datetime(2027, 5, 19), datetime(2054, 10, 4), datetime(2082, 2, 19)]
)
assert (deltas + pl.Series([datetime(2000, 1, 1)])) == out
def test_diff_datetime() -> None:
df = pl.DataFrame(
{
"timestamp": ["2021-02-01", "2021-03-1", "2850-04-1"],
"guild": [1, 2, 3],
"char": ["a", "a", "b"],
}
)
out = (
df.with_columns(
[
pl.col("timestamp").str.strptime(pl.Date, fmt="%Y-%m-%d"),
]
).with_columns([pl.col("timestamp").diff().list().over("char")])
)["timestamp"]
assert out[0] == out[1]
def test_timestamp() -> None:
a = pl.Series("a", [a * 1000_000 for a in [10000, 20000, 30000]], dtype=pl.Datetime)
assert a.dt.timestamp("ms") == [10000, 20000, 30000]
out = a.dt.to_python_datetime()
assert isinstance(out[0], datetime)
assert a.dt.min() == out[0]
assert a.dt.max() == out[2]
df = pl.DataFrame([out])
assert isinstance(df.row(0)[0], datetime)
def test_from_pydatetime() -> None:
dates = [
datetime(2021, 1, 1),
datetime(2021, 1, 2),
datetime(2021, 1, 3),
datetime(2021, 1, 4, 12, 12),
None,
]
s = pl.Series("name", dates)
assert s.dtype == pl.Datetime
assert s.name == "name"
assert s.null_count() == 1
assert s.dt[0] == dates[0]
dates = [date(2021, 1, 1), date(2021, 1, 2), date(2021, 1, 3), None]
s = pl.Series("name", dates)
assert s.dtype == pl.Date
assert s.name == "name"
assert s.null_count() == 1
assert s.dt[0] == dates[0]
def test_to_python_datetime() -> None:
df = pl.DataFrame({"a": [1, 2, 3]})
assert (
df.select(pl.col("a").cast(pl.Datetime).dt.to_python_datetime())["a"].dtype
== pl.Object
)
assert (
df.select(pl.col("a").cast(pl.Datetime).dt.timestamp())["a"].dtype == pl.Int64
)
def test_from_numpy() -> None:
x = np.asarray(range(100_000, 200_000, 10_000), dtype="datetime64[s]")
s = pl.Series(x)
assert s[0] == x[0]
assert len(s) == 10
def test_datetime_consistency() -> None:
dt = datetime(2021, 1, 1, 10, 30, 45, 123000)
df = pl.DataFrame({"date": [dt]})
assert df["date"].dt[0] == dt
assert df.select(pl.lit(dt))["literal"].dt[0] == dt
def test_timezone() -> None:
ts = pa.timestamp("s")
data = pa.array([1000, 2000], type=ts)
s: pl.Series = pl.from_arrow(data)
tz_ts = pa.timestamp("s", tz="America/New_York")
tz_data = pa.array([1000, 2000], type=tz_ts)
with pytest.warns(Warning):
tz_s: pl.Series = pl.from_arrow(tz_data)
assert s.series_equal(tz_s)
def test_to_list() -> None:
s = pl.Series("date", [123543, 283478, 1243]).cast(pl.Date)
out = s.to_list()
assert out[0] == date(2308, 4, 2)
s = pl.Series("datetime", [a * 1_000_000 for a in [123543, 283478, 1243]]).cast(
pl.Datetime
)
out = s.to_list()
assert out[0] == datetime(1970, 1, 2, 10, 19, 3)
def test_rows() -> None:
s0 = pl.Series("date", [123543, 283478, 1243]).cast(pl.Date)
s1 = (
pl.Series("datetime", [a * 1_000_000 for a in [123543, 283478, 1243]])
.cast(pl.Datetime)
.dt.and_time_unit("ns")
)
df = pl.DataFrame([s0, s1])
rows = df.rows()
assert rows[0][0] == date(2308, 4, 2)
assert rows[0][1] == datetime(1970, 1, 1, 0, 2, 3, 543000)
def test_to_numpy() -> None:
s0 = pl.Series("date", [123543, 283478, 1243]).cast(pl.Date)
s1 = pl.Series(
"datetime", [datetime(2021, 1, 2, 3, 4, 5), datetime(2021, 2, 3, 4, 5, 6)]
)
s2 = pl.date_range(
datetime(2021, 1, 1, 0), datetime(2021, 1, 1, 1), interval="1h", time_unit="ms"
)
assert str(s0.to_numpy()) == "['2308-04-02' '2746-02-20' '1973-05-28']"
assert (
str(s1.to_numpy()[:2])
== "['2021-01-02T03:04:05.000000' '2021-02-03T04:05:06.000000']"
)
assert (
str(s2.to_numpy()[:2])
== "['2021-01-01T00:00:00.000' '2021-01-01T01:00:00.000']"
)
s3 = pl.Series([timedelta(hours=1), timedelta(hours=-2)])
out = np.array([3_600_000_000_000, -7_200_000_000_000], dtype="timedelta64[ns]")
assert (s3.to_numpy() == out).all()
def test_truncate() -> None:
start = datetime(2001, 1, 1)
stop = datetime(2001, 1, 2)
s1 = pl.date_range(start, stop, timedelta(minutes=30), name="dates", time_unit="ms")
s2 = pl.date_range(start, stop, timedelta(minutes=30), name="dates", time_unit="ns")
for out in [s1.dt.truncate("1h"), s2.dt.truncate(timedelta(hours=1))]:
assert out.dt[0] == start
assert out.dt[1] == start
assert out.dt[2] == start + timedelta(hours=1)
assert out.dt[3] == start + timedelta(hours=1)
assert out.dt[-3] == stop - timedelta(hours=1)
assert out.dt[-2] == stop - timedelta(hours=1)
assert out.dt[-1] == stop
def test_date_range() -> None:
result = pl.date_range(
datetime(1985, 1, 1), datetime(2015, 7, 1), timedelta(days=1, hours=12)
)
assert len(result) == 7426
assert result.dt[0] == datetime(1985, 1, 1)
assert result.dt[1] == datetime(1985, 1, 2, 12, 0)
assert result.dt[2] == datetime(1985, 1, 4, 0, 0)
assert result.dt[-1] == datetime(2015, 6, 30, 12, 0)
for tu in ["ns", "ms"]:
rng = pl.date_range(
datetime(2020, 1, 1), datetime(2020, 1, 2), "2h", time_unit=tu
)
assert rng.time_unit == tu
assert rng.shape == (13,)
assert rng.dt[0] == datetime(2020, 1, 1)
assert rng.dt[-1] == datetime(2020, 1, 2)
def test_date_comp() -> None:
one = datetime(2001, 1, 1)
two = datetime(2001, 1, 2)
a = pl.Series("a", [one, two])
assert (a == one).to_list() == [True, False]
assert (a != one).to_list() == [False, True]
assert (a > one).to_list() == [False, True]
assert (a >= one).to_list() == [True, True]
assert (a < one).to_list() == [False, False]
assert (a <= one).to_list() == [True, False]
one = date(2001, 1, 1)
two = date(2001, 1, 2)
a = pl.Series("a", [one, two])
assert (a == one).to_list() == [True, False]
assert (a != one).to_list() == [False, True]
assert (a > one).to_list() == [False, True]
assert (a >= one).to_list() == [True, True]
assert (a < one).to_list() == [False, False]
assert (a <= one).to_list() == [True, False]
one = date(201, 1, 1)
two = date(201, 1, 2)
a = pl.Series("a", [one, two])
assert (a == one).to_list() == [True, False]
assert (a == two).to_list() == [False, True]
one = date(5001, 1, 1)
two = date(5001, 1, 2)
a = pl.Series("a", [one, two])
assert (a == one).to_list() == [True, False]
assert (a == two).to_list() == [False, True]
def test_truncate_negative_offset() -> None:
df = pl.DataFrame(
{
"event_date": [
datetime(2021, 4, 11),
datetime(2021, 4, 29),
datetime(2021, 5, 29),
],
"adm1_code": [1, 2, 1],
}
)
out = df.groupby_dynamic(
index_column="event_date",
every="1mo",
period="2mo",
offset="-1mo",
include_boundaries=True,
).agg(
[
pl.col("adm1_code"),
]
)
assert out["event_date"].to_list() == [
datetime(2021, 4, 1),
datetime(2021, 4, 1),
datetime(2021, 5, 1),
]
df = pl.DataFrame(
{
"event_date": [
datetime(2021, 4, 11),
datetime(2021, 4, 29),
datetime(2021, 5, 29),
],
"adm1_code": [1, 2, 1],
"five_type": ["a", "b", "a"],
"actor": ["a", "a", "a"],
"admin": ["a", "a", "a"],
"fatalities": [10, 20, 30],
}
)
out = df.groupby_dynamic(
index_column="event_date",
every="1mo",
by=["admin", "five_type", "actor"],
).agg([pl.col("adm1_code").unique(), (pl.col("fatalities") > 0).sum()])
assert out["event_date"].to_list() == [
datetime(2021, 4, 1),
datetime(2021, 5, 1),
datetime(2021, 4, 1),
]
for dt in [pl.Int32, pl.Int64]:
df = pl.DataFrame(
{
"idx": np.arange(6),
"A": ["A", "A", "B", "B", "B", "C"],
}
).with_columns(pl.col("idx").cast(dt))
out = df.groupby_dynamic(
"idx", every="2i", period="3i", include_boundaries=True
).agg(pl.col("A").list())
assert out.shape == (3, 4)
def test_to_arrow() -> None:
date_series = pl.Series("dates", ["2022-01-16", "2022-01-17"]).str.strptime(
pl.Date, "%Y-%m-%d"
)
arr = date_series.to_arrow()
assert arr.type == pa.date32()
def test_non_exact_strptime() -> None:
a = pl.Series("a", ["2022-01-16", "2022-01-17", "foo2022-01-18", "b2022-01-19ar"])
fmt = "%Y-%m-%d"
expected = pl.Series("a", [date(2022, 1, 16), date(2022, 1, 17), None, None])
verify_series_and_expr_api(
a, expected, "str.strptime", pl.Date, fmt, strict=False, exact=True
)
expected = pl.Series(
"a",
[date(2022, 1, 16), date(2022, 1, 17), date(2022, 1, 18), date(2022, 1, 19)],
)
verify_series_and_expr_api(
a, expected, "str.strptime", pl.Date, fmt, strict=False, exact=False
)
with pytest.raises(Exception):
a.str.strptime(pl.Date, fmt, strict=True, exact=True)
def test_explode_date() -> None:
datetimes = [
datetime(2021, 12, 1, 0, 0),
datetime(2021, 12, 1, 0, 0),
datetime(2021, 12, 1, 0, 0),
datetime(2021, 12, 1, 0, 0),
]
dates = [
date(2021, 12, 1),
date(2021, 12, 1),
date(2021, 12, 1),
date(2021, 12, 1),
]
for d in [dates, datetimes]:
df = pl.DataFrame(
{
"a": d,
"b": ["a", "b", "a", "b"],
"c": [1.0, 2.0, 1.1, 2.2],
}
)
out = (
df.groupby("b")
.agg([pl.col("a"), pl.col("c").pct_change()])
.explode(["a", "c"])
)
assert out.shape == (4, 3)
def test_rolling() -> None:
dates = [
"2020-01-01 13:45:48",
"2020-01-01 16:42:13",
"2020-01-01 16:45:09",
"2020-01-02 18:12:48",
"2020-01-03 19:45:32",
"2020-01-08 23:16:43",
]
df = pl.DataFrame({"dt": dates, "a": [3, 7, 5, 9, 2, 1]}).with_column(
pl.col("dt").str.strptime(pl.Datetime)
)
out = df.groupby_rolling(index_column="dt", period="2d").agg(
[
pl.sum("a").alias("sum_a"),
pl.min("a").alias("min_a"),
pl.max("a").alias("max_a"),
]
)
assert out["sum_a"].to_list() == [3, 10, 15, 24, 11, 1]
assert out["max_a"].to_list() == [3, 7, 7, 9, 9, 1]
assert out["min_a"].to_list() == [3, 3, 3, 3, 2, 1]
def test_upsample() -> None:
df = pl.DataFrame(
{
"time": [
datetime(2021, 2, 1),
datetime(2021, 4, 1),
datetime(2021, 5, 1),
datetime(2021, 6, 1),
],
"admin": ["Åland", "Netherlands", "Åland", "Netherlands"],
"test2": [0, 1, 2, 3],
}
)
up = df.upsample(
time_column="time", every="1mo", by="admin", maintain_order=True
).select(pl.all().forward_fill())
expected = pl.DataFrame(
{
"time": [
datetime(2021, 2, 1, 0, 0),
datetime(2021, 3, 1, 0, 0),
datetime(2021, 4, 1, 0, 0),
datetime(2021, 5, 1, 0, 0),
datetime(2021, 4, 1, 0, 0),
datetime(2021, 5, 1, 0, 0),
datetime(2021, 6, 1, 0, 0),
],
"admin": [
"Åland",
"Åland",
"Åland",
"Åland",
"Netherlands",
"Netherlands",
"Netherlands",
],
"test2": [0, 0, 0, 2, 1, 1, 3],
}
)
assert up.frame_equal(expected)
def test_microseconds_accuracy() -> None:
timestamps = [
datetime(2600, 1, 1, 0, 0, 0, 123456),
datetime(2800, 1, 1, 0, 0, 0, 456789),
]
a = pa.Table.from_arrays(
arrays=[timestamps, [128, 256]],
schema=pa.schema(
[
("timestamp", pa.timestamp("us")),
("value", pa.int16()),
]
),
)
assert pl.from_arrow(a)["timestamp"].to_list() == timestamps
def test_cast_time_units() -> None:
dates = pl.Series("dates", [datetime(2001, 1, 1), datetime(2001, 2, 1, 10, 8, 9)])
dates_in_ns = np.array([978307200000000000, 981022089000000000])
assert dates.dt.cast_time_unit("ns").cast(int).to_list() == list(dates_in_ns)
assert dates.dt.cast_time_unit("us").cast(int).to_list() == list(
dates_in_ns // 1_000
)
assert dates.dt.cast_time_unit("ms").cast(int).to_list() == list(
dates_in_ns // 1_000_000
)
def test_read_utc_times_parquet() -> None:
df = pd.DataFrame(
data={
"Timestamp": pd.date_range(
"2022-01-01T00:00+00:00", "2022-01-01T10:00+00:00", freq="H"
)
}
)
f = io.BytesIO()
df.to_parquet(f)
f.seek(0)
df_in = pl.read_parquet(f)
assert df_in["Timestamp"][0] == datetime(2022, 1, 1, 0, 0)
def test_epoch() -> None:
dates = pl.Series("dates", [datetime(2001, 1, 1), datetime(2001, 2, 1, 10, 8, 9)])
for unit in ["ns", "us", "ms"]:
assert dates.dt.epoch(unit).series_equal(dates.dt.timestamp(unit))
assert dates.dt.epoch("s").series_equal(dates.dt.timestamp("ms") // 1000)
assert dates.dt.epoch("d").series_equal(
(dates.dt.timestamp("ms") // (1000 * 3600 * 24)).cast(pl.Int32)
)
def test_default_negative_every_offset_dynamic_groupby() -> None:
dts = [
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 2, 1),
datetime(2020, 3, 1),
]
df = pl.DataFrame({"dt": dts, "idx": range(len(dts))})
out = df.groupby_dynamic(index_column="dt", every="1mo", closed="right").agg(
pl.col("idx")
)
expected = pl.DataFrame(
{
"dt": [
datetime(2020, 1, 1, 0, 0),
datetime(2020, 1, 1, 0, 0),
datetime(2020, 3, 1, 0, 0),
],
"idx": [[0], [1, 2], [3]],
}
)
assert out.frame_equal(expected)
| true | true |
1c2fecc3e4fcae7bb5d8e4e51a3b7a4a48038d87 | 9,247 | py | Python | networkx/algorithms/tests/test_clique.py | AaronOpfer/networkx | f04ca835c3503f04f9b3e933270575980e44205b | [
"BSD-3-Clause"
] | 1 | 2020-05-13T01:08:42.000Z | 2020-05-13T01:08:42.000Z | networkx/algorithms/tests/test_clique.py | AaronOpfer/networkx | f04ca835c3503f04f9b3e933270575980e44205b | [
"BSD-3-Clause"
] | 1 | 2019-11-28T21:08:50.000Z | 2019-11-28T21:08:50.000Z | networkx/algorithms/tests/test_clique.py | AaronOpfer/networkx | f04ca835c3503f04f9b3e933270575980e44205b | [
"BSD-3-Clause"
] | 1 | 2021-01-27T12:09:05.000Z | 2021-01-27T12:09:05.000Z | #!/usr/bin/env python
from nose.tools import *
import networkx as nx
from networkx import convert_node_labels_to_integers as cnlti
class TestCliques:
def setUp(self):
z = [3, 4, 3, 4, 2, 4, 2, 1, 1, 1, 1]
self.G = cnlti(nx.generators.havel_hakimi_graph(z), first_label=1)
self.cl = list(nx.find_cliques(self.G))
H = nx.complete_graph(6)
H = nx.relabel_nodes(H, dict([(i, i + 1) for i in range(6)]))
H.remove_edges_from([(2, 6), (2, 5), (2, 4), (1, 3), (5, 3)])
self.H = H
def test_find_cliques1(self):
cl = list(nx.find_cliques(self.G))
rcl = nx.find_cliques_recursive(self.G)
expected = [[2, 6, 1, 3], [2, 6, 4], [5, 4, 7], [8, 9], [10, 11]]
assert_equal(sorted(map(sorted, cl)), sorted(map(sorted, rcl)))
assert_equal(sorted(map(sorted, cl)), sorted(map(sorted, expected)))
def test_selfloops(self):
self.G.add_edge(1, 1)
cl = list(nx.find_cliques(self.G))
rcl = list(nx.find_cliques_recursive(self.G))
assert_equal(set(map(frozenset, cl)), set(map(frozenset, rcl)))
answer = [{2, 6, 1, 3}, {2, 6, 4}, {5, 4, 7}, {8, 9}, {10, 11}]
assert_equal(len(answer), len(cl))
assert_true(all(set(c) in answer for c in cl))
def test_find_cliques2(self):
hcl = list(nx.find_cliques(self.H))
assert_equal(sorted(map(sorted, hcl)),
[[1, 2], [1, 4, 5, 6], [2, 3], [3, 4, 6]])
def test_clique_number(self):
G = self.G
assert_equal(nx.graph_clique_number(G), 4)
assert_equal(nx.graph_clique_number(G, cliques=self.cl), 4)
def test_clique_number2(self):
G = nx.Graph()
G.add_nodes_from([1, 2, 3])
assert_equal(nx.graph_clique_number(G), 1)
def test_clique_number3(self):
G = nx.Graph()
assert_equal(nx.graph_clique_number(G), 0)
def test_number_of_cliques(self):
G = self.G
assert_equal(nx.graph_number_of_cliques(G), 5)
assert_equal(nx.graph_number_of_cliques(G, cliques=self.cl), 5)
assert_equal(nx.number_of_cliques(G, 1), 1)
assert_equal(list(nx.number_of_cliques(G, [1]).values()), [1])
assert_equal(list(nx.number_of_cliques(G, [1, 2]).values()), [1, 2])
assert_equal(nx.number_of_cliques(G, [1, 2]), {1: 1, 2: 2})
assert_equal(nx.number_of_cliques(G, 2), 2)
assert_equal(nx.number_of_cliques(G),
{1: 1, 2: 2, 3: 1, 4: 2, 5: 1,
6: 2, 7: 1, 8: 1, 9: 1, 10: 1, 11: 1})
assert_equal(nx.number_of_cliques(G, nodes=list(G)),
{1: 1, 2: 2, 3: 1, 4: 2, 5: 1,
6: 2, 7: 1, 8: 1, 9: 1, 10: 1, 11: 1})
assert_equal(nx.number_of_cliques(G, nodes=[2, 3, 4]),
{2: 2, 3: 1, 4: 2})
assert_equal(nx.number_of_cliques(G, cliques=self.cl),
{1: 1, 2: 2, 3: 1, 4: 2, 5: 1,
6: 2, 7: 1, 8: 1, 9: 1, 10: 1, 11: 1})
assert_equal(nx.number_of_cliques(G, list(G), cliques=self.cl),
{1: 1, 2: 2, 3: 1, 4: 2, 5: 1,
6: 2, 7: 1, 8: 1, 9: 1, 10: 1, 11: 1})
def test_node_clique_number(self):
G = self.G
assert_equal(nx.node_clique_number(G, 1), 4)
assert_equal(list(nx.node_clique_number(G, [1]).values()), [4])
assert_equal(list(nx.node_clique_number(G, [1, 2]).values()), [4, 4])
assert_equal(nx.node_clique_number(G, [1, 2]), {1: 4, 2: 4})
assert_equal(nx.node_clique_number(G, 1), 4)
assert_equal(nx.node_clique_number(G),
{1: 4, 2: 4, 3: 4, 4: 3, 5: 3, 6: 4,
7: 3, 8: 2, 9: 2, 10: 2, 11: 2})
assert_equal(nx.node_clique_number(G, cliques=self.cl),
{1: 4, 2: 4, 3: 4, 4: 3, 5: 3, 6: 4,
7: 3, 8: 2, 9: 2, 10: 2, 11: 2})
def test_cliques_containing_node(self):
G = self.G
assert_equal(nx.cliques_containing_node(G, 1),
[[2, 6, 1, 3]])
assert_equal(list(nx.cliques_containing_node(G, [1]).values()),
[[[2, 6, 1, 3]]])
assert_equal([sorted(c) for c in list(nx.cliques_containing_node(G, [1, 2]).values())],
[[[2, 6, 1, 3]], [[2, 6, 1, 3], [2, 6, 4]]])
result = nx.cliques_containing_node(G, [1, 2])
for k, v in result.items():
result[k] = sorted(v)
assert_equal(result,
{1: [[2, 6, 1, 3]], 2: [[2, 6, 1, 3], [2, 6, 4]]})
assert_equal(nx.cliques_containing_node(G, 1),
[[2, 6, 1, 3]])
expected = [{2, 6, 1, 3}, {2, 6, 4}]
answer = [set(c) for c in nx.cliques_containing_node(G, 2)]
assert_in(answer, (expected, list(reversed(expected))))
answer = [set(c) for c in nx.cliques_containing_node(G, 2, cliques=self.cl)]
assert_in(answer, (expected, list(reversed(expected))))
assert_equal(len(nx.cliques_containing_node(G)), 11)
def test_make_clique_bipartite(self):
G = self.G
B = nx.make_clique_bipartite(G)
assert_equal(sorted(B),
[-5, -4, -3, -2, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
# Project onto the nodes of the original graph.
H = nx.project(B, range(1, 12))
assert_equal(H.adj, G.adj)
# Project onto the nodes representing the cliques.
H1 = nx.project(B, range(-5, 0))
# Relabel the negative numbers as positive ones.
H1 = nx.relabel_nodes(H1, {-v: v for v in range(1, 6)})
assert_equal(sorted(H1), [1, 2, 3, 4, 5])
def test_make_max_clique_graph(self):
"""Tests that the maximal clique graph is the same as the bipartite
clique graph after being projected onto the nodes representing the
cliques.
"""
G = self.G
B = nx.make_clique_bipartite(G)
# Project onto the nodes representing the cliques.
H1 = nx.project(B, range(-5, 0))
# Relabel the negative numbers as nonnegative ones, starting at
# 0.
H1 = nx.relabel_nodes(H1, {-v: v - 1 for v in range(1, 6)})
H2 = nx.make_max_clique_graph(G)
assert_equal(H1.adj, H2.adj)
@raises(nx.NetworkXNotImplemented)
def test_directed(self):
cliques = nx.find_cliques(nx.DiGraph())
class TestEnumerateAllCliques:
def test_paper_figure_4(self):
# Same graph as given in Fig. 4 of paper enumerate_all_cliques is
# based on.
# http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1559964&isnumber=33129
G = nx.Graph()
edges_fig_4 = [('a', 'b'), ('a', 'c'), ('a', 'd'), ('a', 'e'),
('b', 'c'), ('b', 'd'), ('b', 'e'),
('c', 'd'), ('c', 'e'),
('d', 'e'),
('f', 'b'), ('f', 'c'), ('f', 'g'),
('g', 'f'), ('g', 'c'), ('g', 'd'), ('g', 'e')]
G.add_edges_from(edges_fig_4)
cliques = list(nx.enumerate_all_cliques(G))
clique_sizes = list(map(len, cliques))
assert_equal(sorted(clique_sizes), clique_sizes)
expected_cliques = [['a'],
['b'],
['c'],
['d'],
['e'],
['f'],
['g'],
['a', 'b'],
['a', 'b', 'd'],
['a', 'b', 'd', 'e'],
['a', 'b', 'e'],
['a', 'c'],
['a', 'c', 'd'],
['a', 'c', 'd', 'e'],
['a', 'c', 'e'],
['a', 'd'],
['a', 'd', 'e'],
['a', 'e'],
['b', 'c'],
['b', 'c', 'd'],
['b', 'c', 'd', 'e'],
['b', 'c', 'e'],
['b', 'c', 'f'],
['b', 'd'],
['b', 'd', 'e'],
['b', 'e'],
['b', 'f'],
['c', 'd'],
['c', 'd', 'e'],
['c', 'd', 'e', 'g'],
['c', 'd', 'g'],
['c', 'e'],
['c', 'e', 'g'],
['c', 'f'],
['c', 'f', 'g'],
['c', 'g'],
['d', 'e'],
['d', 'e', 'g'],
['d', 'g'],
['e', 'g'],
['f', 'g'],
['a', 'b', 'c'],
['a', 'b', 'c', 'd'],
['a', 'b', 'c', 'd', 'e'],
['a', 'b', 'c', 'e']]
assert_equal(sorted(map(sorted, cliques)),
sorted(map(sorted, expected_cliques)))
| 42.810185 | 95 | 0.43798 |
from nose.tools import *
import networkx as nx
from networkx import convert_node_labels_to_integers as cnlti
class TestCliques:
def setUp(self):
z = [3, 4, 3, 4, 2, 4, 2, 1, 1, 1, 1]
self.G = cnlti(nx.generators.havel_hakimi_graph(z), first_label=1)
self.cl = list(nx.find_cliques(self.G))
H = nx.complete_graph(6)
H = nx.relabel_nodes(H, dict([(i, i + 1) for i in range(6)]))
H.remove_edges_from([(2, 6), (2, 5), (2, 4), (1, 3), (5, 3)])
self.H = H
def test_find_cliques1(self):
cl = list(nx.find_cliques(self.G))
rcl = nx.find_cliques_recursive(self.G)
expected = [[2, 6, 1, 3], [2, 6, 4], [5, 4, 7], [8, 9], [10, 11]]
assert_equal(sorted(map(sorted, cl)), sorted(map(sorted, rcl)))
assert_equal(sorted(map(sorted, cl)), sorted(map(sorted, expected)))
def test_selfloops(self):
self.G.add_edge(1, 1)
cl = list(nx.find_cliques(self.G))
rcl = list(nx.find_cliques_recursive(self.G))
assert_equal(set(map(frozenset, cl)), set(map(frozenset, rcl)))
answer = [{2, 6, 1, 3}, {2, 6, 4}, {5, 4, 7}, {8, 9}, {10, 11}]
assert_equal(len(answer), len(cl))
assert_true(all(set(c) in answer for c in cl))
def test_find_cliques2(self):
hcl = list(nx.find_cliques(self.H))
assert_equal(sorted(map(sorted, hcl)),
[[1, 2], [1, 4, 5, 6], [2, 3], [3, 4, 6]])
def test_clique_number(self):
G = self.G
assert_equal(nx.graph_clique_number(G), 4)
assert_equal(nx.graph_clique_number(G, cliques=self.cl), 4)
def test_clique_number2(self):
G = nx.Graph()
G.add_nodes_from([1, 2, 3])
assert_equal(nx.graph_clique_number(G), 1)
def test_clique_number3(self):
G = nx.Graph()
assert_equal(nx.graph_clique_number(G), 0)
def test_number_of_cliques(self):
G = self.G
assert_equal(nx.graph_number_of_cliques(G), 5)
assert_equal(nx.graph_number_of_cliques(G, cliques=self.cl), 5)
assert_equal(nx.number_of_cliques(G, 1), 1)
assert_equal(list(nx.number_of_cliques(G, [1]).values()), [1])
assert_equal(list(nx.number_of_cliques(G, [1, 2]).values()), [1, 2])
assert_equal(nx.number_of_cliques(G, [1, 2]), {1: 1, 2: 2})
assert_equal(nx.number_of_cliques(G, 2), 2)
assert_equal(nx.number_of_cliques(G),
{1: 1, 2: 2, 3: 1, 4: 2, 5: 1,
6: 2, 7: 1, 8: 1, 9: 1, 10: 1, 11: 1})
assert_equal(nx.number_of_cliques(G, nodes=list(G)),
{1: 1, 2: 2, 3: 1, 4: 2, 5: 1,
6: 2, 7: 1, 8: 1, 9: 1, 10: 1, 11: 1})
assert_equal(nx.number_of_cliques(G, nodes=[2, 3, 4]),
{2: 2, 3: 1, 4: 2})
assert_equal(nx.number_of_cliques(G, cliques=self.cl),
{1: 1, 2: 2, 3: 1, 4: 2, 5: 1,
6: 2, 7: 1, 8: 1, 9: 1, 10: 1, 11: 1})
assert_equal(nx.number_of_cliques(G, list(G), cliques=self.cl),
{1: 1, 2: 2, 3: 1, 4: 2, 5: 1,
6: 2, 7: 1, 8: 1, 9: 1, 10: 1, 11: 1})
def test_node_clique_number(self):
G = self.G
assert_equal(nx.node_clique_number(G, 1), 4)
assert_equal(list(nx.node_clique_number(G, [1]).values()), [4])
assert_equal(list(nx.node_clique_number(G, [1, 2]).values()), [4, 4])
assert_equal(nx.node_clique_number(G, [1, 2]), {1: 4, 2: 4})
assert_equal(nx.node_clique_number(G, 1), 4)
assert_equal(nx.node_clique_number(G),
{1: 4, 2: 4, 3: 4, 4: 3, 5: 3, 6: 4,
7: 3, 8: 2, 9: 2, 10: 2, 11: 2})
assert_equal(nx.node_clique_number(G, cliques=self.cl),
{1: 4, 2: 4, 3: 4, 4: 3, 5: 3, 6: 4,
7: 3, 8: 2, 9: 2, 10: 2, 11: 2})
def test_cliques_containing_node(self):
G = self.G
assert_equal(nx.cliques_containing_node(G, 1),
[[2, 6, 1, 3]])
assert_equal(list(nx.cliques_containing_node(G, [1]).values()),
[[[2, 6, 1, 3]]])
assert_equal([sorted(c) for c in list(nx.cliques_containing_node(G, [1, 2]).values())],
[[[2, 6, 1, 3]], [[2, 6, 1, 3], [2, 6, 4]]])
result = nx.cliques_containing_node(G, [1, 2])
for k, v in result.items():
result[k] = sorted(v)
assert_equal(result,
{1: [[2, 6, 1, 3]], 2: [[2, 6, 1, 3], [2, 6, 4]]})
assert_equal(nx.cliques_containing_node(G, 1),
[[2, 6, 1, 3]])
expected = [{2, 6, 1, 3}, {2, 6, 4}]
answer = [set(c) for c in nx.cliques_containing_node(G, 2)]
assert_in(answer, (expected, list(reversed(expected))))
answer = [set(c) for c in nx.cliques_containing_node(G, 2, cliques=self.cl)]
assert_in(answer, (expected, list(reversed(expected))))
assert_equal(len(nx.cliques_containing_node(G)), 11)
def test_make_clique_bipartite(self):
G = self.G
B = nx.make_clique_bipartite(G)
assert_equal(sorted(B),
[-5, -4, -3, -2, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
H = nx.project(B, range(1, 12))
assert_equal(H.adj, G.adj)
H1 = nx.project(B, range(-5, 0))
H1 = nx.relabel_nodes(H1, {-v: v for v in range(1, 6)})
assert_equal(sorted(H1), [1, 2, 3, 4, 5])
def test_make_max_clique_graph(self):
G = self.G
B = nx.make_clique_bipartite(G)
H1 = nx.project(B, range(-5, 0))
H1 = nx.relabel_nodes(H1, {-v: v - 1 for v in range(1, 6)})
H2 = nx.make_max_clique_graph(G)
assert_equal(H1.adj, H2.adj)
@raises(nx.NetworkXNotImplemented)
def test_directed(self):
cliques = nx.find_cliques(nx.DiGraph())
class TestEnumerateAllCliques:
def test_paper_figure_4(self):
G = nx.Graph()
edges_fig_4 = [('a', 'b'), ('a', 'c'), ('a', 'd'), ('a', 'e'),
('b', 'c'), ('b', 'd'), ('b', 'e'),
('c', 'd'), ('c', 'e'),
('d', 'e'),
('f', 'b'), ('f', 'c'), ('f', 'g'),
('g', 'f'), ('g', 'c'), ('g', 'd'), ('g', 'e')]
G.add_edges_from(edges_fig_4)
cliques = list(nx.enumerate_all_cliques(G))
clique_sizes = list(map(len, cliques))
assert_equal(sorted(clique_sizes), clique_sizes)
expected_cliques = [['a'],
['b'],
['c'],
['d'],
['e'],
['f'],
['g'],
['a', 'b'],
['a', 'b', 'd'],
['a', 'b', 'd', 'e'],
['a', 'b', 'e'],
['a', 'c'],
['a', 'c', 'd'],
['a', 'c', 'd', 'e'],
['a', 'c', 'e'],
['a', 'd'],
['a', 'd', 'e'],
['a', 'e'],
['b', 'c'],
['b', 'c', 'd'],
['b', 'c', 'd', 'e'],
['b', 'c', 'e'],
['b', 'c', 'f'],
['b', 'd'],
['b', 'd', 'e'],
['b', 'e'],
['b', 'f'],
['c', 'd'],
['c', 'd', 'e'],
['c', 'd', 'e', 'g'],
['c', 'd', 'g'],
['c', 'e'],
['c', 'e', 'g'],
['c', 'f'],
['c', 'f', 'g'],
['c', 'g'],
['d', 'e'],
['d', 'e', 'g'],
['d', 'g'],
['e', 'g'],
['f', 'g'],
['a', 'b', 'c'],
['a', 'b', 'c', 'd'],
['a', 'b', 'c', 'd', 'e'],
['a', 'b', 'c', 'e']]
assert_equal(sorted(map(sorted, cliques)),
sorted(map(sorted, expected_cliques)))
| true | true |
1c2feccef9baf99c4cf758dcacd8533f5c4d54ca | 747 | py | Python | gsextract/gsextract.py | ssloxford/gsextract | f892161767f994f291ffd13a45417dfe7184d409 | [
"Unlicense"
] | 35 | 2020-09-30T11:18:13.000Z | 2022-03-20T13:05:24.000Z | gsextract/gsextract.py | ssloxford/gsextract | f892161767f994f291ffd13a45417dfe7184d409 | [
"Unlicense"
] | 2 | 2020-11-30T22:06:58.000Z | 2021-01-01T15:19:43.000Z | gsextract/gsextract.py | ssloxford/gsextract | f892161767f994f291ffd13a45417dfe7184d409 | [
"Unlicense"
] | 4 | 2020-11-19T22:20:25.000Z | 2021-10-09T01:34:58.000Z | import click
import gsextract.gse_parser as gse_parser
@click.command()
@click.argument('input_file', type=click.Path(exists=True))
@click.argument('output_file', type=click.Path())
@click.option('--stream/--no-stream', default=False, help='Stream continuously from the file. Use the --stream flag to dump to a pcap from a real time GSE recording.')
@click.option('--reliable/--no-reliable', default=True, help='Add the --no-reliable flag to attempt to brute force IP headers in certain situations. Increases recovery but also can result in fake packets.')
def gsextract(input_file, output_file, stream, reliable):
gse_parser.gse_parse(file=input_file, outfile=output_file, stream=stream, reliable=reliable)
def cli_runner():
gsextract() | 57.461538 | 206 | 0.768407 | import click
import gsextract.gse_parser as gse_parser
@click.command()
@click.argument('input_file', type=click.Path(exists=True))
@click.argument('output_file', type=click.Path())
@click.option('--stream/--no-stream', default=False, help='Stream continuously from the file. Use the --stream flag to dump to a pcap from a real time GSE recording.')
@click.option('--reliable/--no-reliable', default=True, help='Add the --no-reliable flag to attempt to brute force IP headers in certain situations. Increases recovery but also can result in fake packets.')
def gsextract(input_file, output_file, stream, reliable):
gse_parser.gse_parse(file=input_file, outfile=output_file, stream=stream, reliable=reliable)
def cli_runner():
gsextract() | true | true |
1c2fed3ccbdce6442d7a0f5d1ce30c9fad72def6 | 728 | py | Python | fcuser/admin.py | hwanseok-dev/the-fast | 089952047f7228385e655153c094d0fce9c5e1da | [
"MIT"
] | null | null | null | fcuser/admin.py | hwanseok-dev/the-fast | 089952047f7228385e655153c094d0fce9c5e1da | [
"MIT"
] | null | null | null | fcuser/admin.py | hwanseok-dev/the-fast | 089952047f7228385e655153c094d0fce9c5e1da | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Fcuser
class FcuserAdmin(admin.ModelAdmin):
list_display = ('email', 'password')
def changelist_view(self, request, extra_context=None):
extra_context = {'title': '사용자 목록'}
return super().changelist_view(request, extra_context)
def changeform_view(self, request, object_id=None, form_url='', extra_context=None):
fcuser = Fcuser.objects.get(pk=object_id)
extra_context = {'title': f'{fcuser.name} 수정'}
return super().changeform_view(request, object_id, form_url, extra_context)
admin.site.register(Fcuser, FcuserAdmin)
admin.site.site_header = "HwanSeok's BackOffice"
admin.site.index_title = "HwanSeok's BackOffice"
| 34.666667 | 88 | 0.723901 | from django.contrib import admin
from .models import Fcuser
class FcuserAdmin(admin.ModelAdmin):
list_display = ('email', 'password')
def changelist_view(self, request, extra_context=None):
extra_context = {'title': '사용자 목록'}
return super().changelist_view(request, extra_context)
def changeform_view(self, request, object_id=None, form_url='', extra_context=None):
fcuser = Fcuser.objects.get(pk=object_id)
extra_context = {'title': f'{fcuser.name} 수정'}
return super().changeform_view(request, object_id, form_url, extra_context)
admin.site.register(Fcuser, FcuserAdmin)
admin.site.site_header = "HwanSeok's BackOffice"
admin.site.index_title = "HwanSeok's BackOffice"
| true | true |
1c2fed7afd2f122fb62bb2d4f069b59c2f3a2420 | 4,225 | py | Python | yatube/yatube/settings.py | Andrey11995/yatube_project | 5f053803e6deb42f1e75e69ecb3d2b94cbb255e5 | [
"MIT"
] | null | null | null | yatube/yatube/settings.py | Andrey11995/yatube_project | 5f053803e6deb42f1e75e69ecb3d2b94cbb255e5 | [
"MIT"
] | null | null | null | yatube/yatube/settings.py | Andrey11995/yatube_project | 5f053803e6deb42f1e75e69ecb3d2b94cbb255e5 | [
"MIT"
] | null | null | null | """
Django settings for yatube project.
Generated by 'django-admin startproject' using Django 2.2.19.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'c+utti0k^6xo3=@hpoehw_%jteq#492km*@k-h6q6qci+()c96'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
'localhost',
'127.0.0.1',
'[::1]',
'testserver',
'www.andrey11995.pythonanywhere.com',
'andrey11995.pythonanywhere.com',
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'posts.apps.PostsConfig',
'users.apps.UsersConfig',
'core.apps.CoreConfig',
'about.apps.AboutConfig',
'sorl.thumbnail',
'debug_toolbar',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
if DEBUG:
MIDDLEWARE += [
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
ROOT_URLCONF = 'yatube.urls'
TEMPLATES_DIR = os.path.join(BASE_DIR, 'templates')
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATES_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'core.context_processors.year.year'
],
},
},
]
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
WSGI_APPLICATION = 'yatube.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'ru-ru'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = '/static/'
LOGIN_URL = 'users:login'
LOGIN_REDIRECT_URL = 'posts:index'
# LOGOUT_REDIRECT_URL = 'posts:index'
EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'
EMAIL_FILE_PATH = os.path.join(BASE_DIR, 'sent_emails')
CSRF_FAILURE_VIEW = 'core.views.csrf_failure'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
INTERNAL_IPS = [
'127.0.0.1',
]
| 25.299401 | 91 | 0.692308 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'c+utti0k^6xo3=@hpoehw_%jteq#492km*@k-h6q6qci+()c96'
DEBUG = True
ALLOWED_HOSTS = [
'localhost',
'127.0.0.1',
'[::1]',
'testserver',
'www.andrey11995.pythonanywhere.com',
'andrey11995.pythonanywhere.com',
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'posts.apps.PostsConfig',
'users.apps.UsersConfig',
'core.apps.CoreConfig',
'about.apps.AboutConfig',
'sorl.thumbnail',
'debug_toolbar',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
if DEBUG:
MIDDLEWARE += [
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
ROOT_URLCONF = 'yatube.urls'
TEMPLATES_DIR = os.path.join(BASE_DIR, 'templates')
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATES_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'core.context_processors.year.year'
],
},
},
]
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
WSGI_APPLICATION = 'yatube.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'ru-ru'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = '/static/'
LOGIN_URL = 'users:login'
LOGIN_REDIRECT_URL = 'posts:index'
# LOGOUT_REDIRECT_URL = 'posts:index'
EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'
EMAIL_FILE_PATH = os.path.join(BASE_DIR, 'sent_emails')
CSRF_FAILURE_VIEW = 'core.views.csrf_failure'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
INTERNAL_IPS = [
'127.0.0.1',
]
| true | true |
1c2fee1ff86668a2fd7b4d181ef083791a47ddcd | 6,726 | py | Python | modulo_ht_validate_transform/ht_validate_transform/src/datasets_collections_files/metadata.py | regulondbunam/3RegulonDB-Santana | 7a9076ea3b4a26dc5445f4ac181bc26993d9ec1c | [
"MIT"
] | null | null | null | modulo_ht_validate_transform/ht_validate_transform/src/datasets_collections_files/metadata.py | regulondbunam/3RegulonDB-Santana | 7a9076ea3b4a26dc5445f4ac181bc26993d9ec1c | [
"MIT"
] | null | null | null | modulo_ht_validate_transform/ht_validate_transform/src/datasets_collections_files/metadata.py | regulondbunam/3RegulonDB-Santana | 7a9076ea3b4a26dc5445f4ac181bc26993d9ec1c | [
"MIT"
] | null | null | null | import pandas as pd
import json
'''#
# name: Metadata.py Version [1.0]
Clase que se encargara de recibir un dataframe con datos del metadata y se encargara
de manipularlos y poder crear un dataframe con los datos del metadadata estructurados como
se requieren.
```python
program_name [options list] arguments
```
## examples
```python
put here your code example
```
## description
Manipulacion de datos atraves de dataframes para poder crear el metadata
## arguments
No necesita de argumentos para la ejecuion de dicha clase
## requirements
Sin requerimientos
## softwareRequirements
Se necesita la libreria de python llamadas
pandas - Es un paquete de Python que proporciona estructuras de datos similares a los dataframes de R.
Pandas depende de Numpy, la librería que añade un potente tipo matricial a Python.
json - Es un formato de intercambio de datos ligero inspirado en la sintaxis literal de objetos de JavaScript
## memoryRequirements
Se recomienda al menos tener 8gb de ram para que el proceso se ejecute a una velocidad
estandar a la hora de correrlo.
#'''
class Metadata():
DATASET_TITLE = "#DATASET TITLE"
PMID = "#PMID:"
CORRESPONDING_AUTHOR = "#CORRESPONDING AUTHOR (EMAIL):"
STRAIN = "#STRAIN:"
REFERENCE_GENOME = "#REFERENCE GENOME:"
DATASET_ACCESION_NUMBER = "#DATASET ACCESION NUMBER[DATABASE]:"
EXPERIMENTAL_DETAILS = "#EXPERIMENTAL DETAILS"
METHOD = "#METHOD:"
METHOD_DETAILS = "#METHOD DETAILS:"
INSTRUMENT = "#INSTRUMENT:"
EVIDENCE = "#EVIDENCE:"
STATISTICAL_MODEL = "#STATISTICAL MODEL"
VALUE_COLUMN = 'Value'
def __init__(self, dataframe):
self.dataframe = dataframe
self.title = dataframe
self.pmid = dataframe
self.corresponding_author = dataframe
self.strain = dataframe
self.reference_genome = dataframe
self.dataset_accesion_number = dataframe
self.experiment_details = dataframe
self.method = dataframe
self.method_details = dataframe
self.instrument = dataframe
self.evidence =dataframe
self.statistical_model = dataframe
@property
def title(self):
return self._title
@title.setter
def title(self, dataframe):
try:
self._title = dataframe.at[Metadata.DATASET_TITLE, Metadata.VALUE_COLUMN]
except KeyError:
self._title = None
@property
def pmid(self):
return self._pmid
@pmid.setter
def pmid(self, dataframe):
try:
self._pmid = dataframe.at[Metadata.PMID, Metadata.VALUE_COLUMN]
except KeyError:
self._pmid = None
@property
def corresponding_author(self):
return self._corresponding_author
@corresponding_author.setter
def corresponding_author(self, dataframe):
try:
self._corresponding_author = dataframe.at[Metadata.CORRESPONDING_AUTHOR, Metadata.VALUE_COLUMN]
except KeyError:
self._corresponding_author = None
@property
def strain(self):
return self._strain
@strain.setter
def strain(self, dataframe):
try:
self._strain = dataframe.at[Metadata.STRAIN, Metadata.VALUE_COLUMN]
except KeyError:
self._strain = None
@property
def reference_genome(self):
return self._reference_genome
@reference_genome.setter
def reference_genome(self, dataframe):
try:
self._reference_genome = dataframe.at[Metadata.REFERENCE_GENOME, Metadata.VALUE_COLUMN]
except KeyError:
self._reference_genome = None
@property
def dataset_accesion_number(self):
return self._dataset_accesion_number
@dataset_accesion_number.setter
def dataset_accesion_number(self, dataframe):
try:
self._dataset_accesion_number = dataframe.at[Metadata.DATASET_ACCESION_NUMBER, Metadata.VALUE_COLUMN]
except KeyError:
self._dataset_accesion_number = None
@property
def experiment_details(self):
return self._experiment_details
@experiment_details.setter
def experiment_details(self, dataframe):
try:
self._experiment_details = dataframe.at[Metadata.EXPERIMENTAL_DETAILS, Metadata.VALUE_COLUMN]
except KeyError:
self._experiment_details = None
@property
def method(self):
return self._method
@method.setter
def method(self, dataframe):
try:
self._method = dataframe.at[Metadata.METHOD, Metadata.VALUE_COLUMN]
except KeyError:
self._method = None
@property
def method_details(self):
return self._method_details
@method_details.setter
def method_details(self, dataframe):
try:
self._method_details = dataframe.at[Metadata.METHOD_DETAILS, Metadata.VALUE_COLUMN]
except KeyError:
self._method_details = None
@property
def instrument(self):
return self._instrument
@instrument.setter
def instrument(self, dataframe):
try:
self._instrument = dataframe.at[Metadata.INSTRUMENT, Metadata.VALUE_COLUMN]
except KeyError:
self._instrument = None
@property
def evidence(self):
return self._evidence
@evidence.setter
def evidence(self, dataframe):
try:
self._evidence = dataframe.at[Metadata.EVIDENCE, Metadata.VALUE_COLUMN]
except KeyError:
self._evidence = None
@property
def statistical_model(self):
return self._statistical_model
@statistical_model.setter
def statistical_model(self, dataframe):
try:
self._statistical_model = dataframe.at[Metadata.STATISTICAL_MODEL, Metadata.VALUE_COLUMN]
except KeyError:
self._statistical_model = None
def __call__(self):
metadata_frame = {
'title': self.title,
'pmid': self.pmid,
'author': self.corresponding_author,
'strain': self.strain,
'reference genome': self.reference_genome,
'dataset accesion number': self.dataset_accesion_number,
'experiment details': self.experiment_details,
'method': self.method,
'method details': self.method_details,
'instrument': self.instrument,
'evidence': self.evidence,
'statistical model': self.statistical_model
}
return metadata_frame
'''#
dateCreated: [2020-12-22] - author: [Santana Estrada Hernandez]
dateModified [2021-01-07] - contributor: [Se realizo una optimizacion de codigo]
#'''
| 28.5 | 113 | 0.666667 | import pandas as pd
import json
class Metadata():
DATASET_TITLE = "#DATASET TITLE"
PMID = "#PMID:"
CORRESPONDING_AUTHOR = "#CORRESPONDING AUTHOR (EMAIL):"
STRAIN = "#STRAIN:"
REFERENCE_GENOME = "#REFERENCE GENOME:"
DATASET_ACCESION_NUMBER = "#DATASET ACCESION NUMBER[DATABASE]:"
EXPERIMENTAL_DETAILS = "#EXPERIMENTAL DETAILS"
METHOD = "#METHOD:"
METHOD_DETAILS = "#METHOD DETAILS:"
INSTRUMENT = "#INSTRUMENT:"
EVIDENCE = "#EVIDENCE:"
STATISTICAL_MODEL = "#STATISTICAL MODEL"
VALUE_COLUMN = 'Value'
def __init__(self, dataframe):
self.dataframe = dataframe
self.title = dataframe
self.pmid = dataframe
self.corresponding_author = dataframe
self.strain = dataframe
self.reference_genome = dataframe
self.dataset_accesion_number = dataframe
self.experiment_details = dataframe
self.method = dataframe
self.method_details = dataframe
self.instrument = dataframe
self.evidence =dataframe
self.statistical_model = dataframe
@property
def title(self):
return self._title
@title.setter
def title(self, dataframe):
try:
self._title = dataframe.at[Metadata.DATASET_TITLE, Metadata.VALUE_COLUMN]
except KeyError:
self._title = None
@property
def pmid(self):
return self._pmid
@pmid.setter
def pmid(self, dataframe):
try:
self._pmid = dataframe.at[Metadata.PMID, Metadata.VALUE_COLUMN]
except KeyError:
self._pmid = None
@property
def corresponding_author(self):
return self._corresponding_author
@corresponding_author.setter
def corresponding_author(self, dataframe):
try:
self._corresponding_author = dataframe.at[Metadata.CORRESPONDING_AUTHOR, Metadata.VALUE_COLUMN]
except KeyError:
self._corresponding_author = None
@property
def strain(self):
return self._strain
@strain.setter
def strain(self, dataframe):
try:
self._strain = dataframe.at[Metadata.STRAIN, Metadata.VALUE_COLUMN]
except KeyError:
self._strain = None
@property
def reference_genome(self):
return self._reference_genome
@reference_genome.setter
def reference_genome(self, dataframe):
try:
self._reference_genome = dataframe.at[Metadata.REFERENCE_GENOME, Metadata.VALUE_COLUMN]
except KeyError:
self._reference_genome = None
@property
def dataset_accesion_number(self):
return self._dataset_accesion_number
@dataset_accesion_number.setter
def dataset_accesion_number(self, dataframe):
try:
self._dataset_accesion_number = dataframe.at[Metadata.DATASET_ACCESION_NUMBER, Metadata.VALUE_COLUMN]
except KeyError:
self._dataset_accesion_number = None
@property
def experiment_details(self):
return self._experiment_details
@experiment_details.setter
def experiment_details(self, dataframe):
try:
self._experiment_details = dataframe.at[Metadata.EXPERIMENTAL_DETAILS, Metadata.VALUE_COLUMN]
except KeyError:
self._experiment_details = None
@property
def method(self):
return self._method
@method.setter
def method(self, dataframe):
try:
self._method = dataframe.at[Metadata.METHOD, Metadata.VALUE_COLUMN]
except KeyError:
self._method = None
@property
def method_details(self):
return self._method_details
@method_details.setter
def method_details(self, dataframe):
try:
self._method_details = dataframe.at[Metadata.METHOD_DETAILS, Metadata.VALUE_COLUMN]
except KeyError:
self._method_details = None
@property
def instrument(self):
return self._instrument
@instrument.setter
def instrument(self, dataframe):
try:
self._instrument = dataframe.at[Metadata.INSTRUMENT, Metadata.VALUE_COLUMN]
except KeyError:
self._instrument = None
@property
def evidence(self):
return self._evidence
@evidence.setter
def evidence(self, dataframe):
try:
self._evidence = dataframe.at[Metadata.EVIDENCE, Metadata.VALUE_COLUMN]
except KeyError:
self._evidence = None
@property
def statistical_model(self):
return self._statistical_model
@statistical_model.setter
def statistical_model(self, dataframe):
try:
self._statistical_model = dataframe.at[Metadata.STATISTICAL_MODEL, Metadata.VALUE_COLUMN]
except KeyError:
self._statistical_model = None
def __call__(self):
metadata_frame = {
'title': self.title,
'pmid': self.pmid,
'author': self.corresponding_author,
'strain': self.strain,
'reference genome': self.reference_genome,
'dataset accesion number': self.dataset_accesion_number,
'experiment details': self.experiment_details,
'method': self.method,
'method details': self.method_details,
'instrument': self.instrument,
'evidence': self.evidence,
'statistical model': self.statistical_model
}
return metadata_frame
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.