id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
132827
|
import torch
import torch.nn as nn
from ltprg.model.seq import sort_seq_tensors, unsort_seq_tensors, SequenceModel
from torch.autograd import Variable
class ObservationModel(nn.Module):
def __init__(self):
super(ObservationModel, self).__init__()
def forward(self, observation):
""" Computes batch of transformed observations """
pass
def on_gpu(self):
return next(self.parameters()).is_cuda
def save(self, model_path):
init_params = self._get_init_params()
model_obj = dict()
model_obj["init_params"] = init_params
model_obj["state_dict"] = self.state_dict()
model_obj["obs_type"] = type(self).__name__
torch.save(model_obj, model_path)
@staticmethod
def load(model_path):
model_obj = torch.load(model_path)
init_params = model_obj["init_params"]
state_dict = model_obj["state_dict"]
obs_type = model_obj["obs_type"]
model = None
if obs_type == "ObservationModelIndexedSequential":
model = ObservationModelIndexedSequential.make(init_params)
elif obs_type =="ObservationModelReorderedSequential":
model = ObservationModelReorderedSequential.make(init_params)
model.load_state_dict(state_dict)
return model
class ObservationModelIndexed(ObservationModel):
def __init__(self, indexed_obs_size, num_indices):
super(ObservationModelIndexed, self).__init__()
self._indexed_obs_size = indexed_obs_size
self._num_indices = num_indices
# observations: batch x input observation
# indices (one-hots): batch x (num_indices) x (num_indices)
# return batch x num_indices x indexd_obs_size
def _forward_for_indices(self, observation, indices):
"""
Computes batch of transformed observations from input observations
and indexed index indicators
"""
pass
def forward(self, observation):
indices = torch.eye(self._num_indices).unsqueeze(0).expand(observation[0].size(0), self._num_indices, self._num_indices)
if self.on_gpu():
device = 0
if isinstance(observation, tuple):
device = observation[0].get_device()
else:
device = observation.get_device()
indices = indices.cuda(device)
indices = Variable(indices, requires_grad=False)
transformed = self._forward_for_indices(observation, indices)
transformed = torch.cat((transformed, indices), 2)
return transformed.view(indices.size(0), self._num_indices*(self._num_indices+self._indexed_obs_size))
class ObservationModelIndexedSequential(ObservationModelIndexed):
def __init__(self, indexed_obs_size, num_indices, seq_model):
super(ObservationModelIndexedSequential, self).__init__(indexed_obs_size, num_indices)
self._init_params = dict()
self._init_params["indexed_obs_size"] = indexed_obs_size
self._init_params["num_indices"] = num_indices
self._init_params["arch_type"] = type(seq_model).__name__
self._init_params["seq_model"] = seq_model._get_init_params()
self._seq_model = seq_model
self._decoder = nn.Linear(seq_model.get_hidden_size()*seq_model.get_directions(), indexed_obs_size)
self._decoder_nl = nn.Tanh()
def _get_init_params(self):
return self._init_params
@staticmethod
def make(init_params):
indexed_obs_size = init_params["indexed_obs_size"]
num_indices = init_hidden["num_indices"]
seq_model = SequenceModel.make(init_params["seq_model"], init_params["arch_type"])
return ObservationModelIndexedSequential(indexed_obs_size, num_indices, seq_model)
def get_seq_model(self):
return self._seq_model
# observations: batch x input observation
# indices (one-hots): batch x (num_indices) x (num_indices)
# return batch x num_indices x indexed_obs_size
def _forward_for_indices(self, observation, indices):
num_indices = indices.size(2)
batch_size = indices.size(0)
max_len = observation[0].size(1)
seq = observation[0].transpose(0,1) # After transpose: Length x batch
seq_length = observation[1] # Batch
# length, indices*batch
if len(seq.size()) == 2:
seq = seq.unsqueeze(1).expand(max_len, num_indices, batch_size).contiguous().view(-1, num_indices*batch_size)
else:
seq = seq.unsqueeze(1).expand(max_len, num_indices, batch_size, seq.size(2)).contiguous().view(-1, num_indices*batch_size, seq.size(2)).float()
seq_length = seq_length.unsqueeze(1).expand(batch_size, num_indices).contiguous().view(-1, num_indices*batch_size).squeeze()
indices = indices.contiguous().view(-1, num_indices)
sorted_seq, sorted_length, sorted_inputs, sorted_indices = sort_seq_tensors(seq, seq_length, inputs=[indices], on_gpu=self.on_gpu())
output, hidden = self._seq_model(seq_part=sorted_seq, seq_length=sorted_length, input=sorted_inputs[0])
if isinstance(hidden, tuple): # Handle LSTM
hidden = hidden[0]
decoded = self._decoder(hidden.transpose(0,1).contiguous().view(-1, hidden.size(0)*hidden.size(2)))
output = self._decoder_nl(decoded)
unsorted_output = unsort_seq_tensors(sorted_indices, [output])[0]
unsorted_output = unsorted_output.view(batch_size, num_indices, self._indexed_obs_size)
return unsorted_output
class ObservationModelReordered(ObservationModel):
def __init__(self, indexed_obs_size, num_indices):
super(ObservationModelReordered, self).__init__()
self._indexed_obs_size = indexed_obs_size
self._num_indices = num_indices
# observations: batch x input observation
# indices (one-hots): batch x num_indices
# return batch x num_indices x indexed_obs_size
def _forward_for_indices(self, observation, indices):
"""
Computes batch of transformed observations from input observations
and indexed index indicators
"""
pass
def forward(self, observation):
indices = torch.arange(0, self._num_indices).unsqueeze(0).expand(observation[0].size(0), self._num_indices).long()
if self.on_gpu():
device = 0
if isinstance(observation, tuple):
device = observation[0].get_device()
else:
device = observation.get_device()
indices = indices.cuda(device)
indices = Variable(indices, requires_grad=False)
transformed = self._forward_for_indices(observation, indices)
return transformed.view(indices.size(0), self._num_indices*self._indexed_obs_size)
class ObservationModelReorderedSequential(ObservationModelReordered):
def __init__(self, indexed_obs_size, num_indices, seq_model):
super(ObservationModelReorderedSequential, self).__init__(indexed_obs_size, num_indices)
self._init_params = dict()
self._init_params["indexed_obs_size"] = indexed_obs_size
self._init_params["num_indices"] = num_indices
self._init_params["arch_type"] = type(seq_model).__name__
self._init_params["seq_model"] = seq_model._get_init_params()
self._seq_model = seq_model
if indexed_obs_size != seq_model.get_hidden_size()*seq_model.get_directions():
raise ValueError("indxed_obs_size must be the same as the seq_model hidden size")
def _get_init_params(self):
return self._init_params
@staticmethod
def make(init_params):
indexed_obs_size = init_params["indexed_obs_size"]
num_indices = init_hidden["num_indices"]
seq_model = SequenceModel.make(init_params["seq_model"], init_params["arch_type"])
return ObservationModelReorderedSequential(indexed_obs_size, num_indices, seq_model)
def get_seq_model(self):
return self._seq_model
# observations: batch x input observation
# indices (one-hots): batch x num_indices
# return batch x num_indices x indexed_obs_size
def _forward_for_indices(self, observation, indices):
num_indices = indices.size(1)
batch_size = indices.size(0)
max_len = observation[0].size(1)
obj_size = observation[0].size(2)
reordered_obs = self._make_obs_reorderings(indices, observation)
seq = reordered_obs[0].view(batch_size*num_indices, max_len, obj_size).transpose(0,1).float()
seq_length = reordered_obs[1].contiguous().view(batch_size*num_indices)
sorted_seq, sorted_length, sorted_indices = sort_seq_tensors(seq, seq_length, inputs=None, on_gpu=self.on_gpu())
output, hidden = self._seq_model(seq_part=sorted_seq, seq_length=sorted_length, input=None)
if isinstance(hidden, tuple): # Handle LSTM
hidden = hidden[0]
output = hidden.transpose(0,1).contiguous().view(-1, hidden.size(0)*hidden.size(2))
unsorted_output = unsort_seq_tensors(sorted_indices, [output])[0]
unsorted_output = unsorted_output.view(batch_size, num_indices, self._indexed_obs_size)
return unsorted_output
# observation: batch x length x obj size
# indices: batch x num_indices
# return: batch x num_indices x length x obj size (reorderings)
def _make_obs_reorderings(self, indices, observation):
batch_size = observation[0].size(0)
num_indices = indices.size(1)
max_len = observation[0].size(1)
obj_size = observation[0].size(2)
seq_len = observation[1]
indexed_objs = self._get_indexed_obs_obj(indices, observation)
last_objs = self._get_last_obs_obj(num_indices, observation)
seq = observation[0].unsqueeze(1).expand(batch_size, num_indices, max_len, obj_size).contiguous().view(batch_size*num_indices*max_len, obj_size)
seq_clone = seq.clone()
offset = Variable(torch.arange(0,batch_size*num_indices).long(), requires_grad=False)*max_len
last_indices = Variable(torch.ones(batch_size, num_indices).long()*(observation[1].unsqueeze(1).expand(batch_size,num_indices)-1), requires_grad=False).view(batch_size*num_indices)
if self.on_gpu():
device = observation[0].get_device()
offset = offset.cuda(device)
last_indices = last_indices.cuda(device)
offset_indices = offset+indices.view(batch_size*num_indices)
offset_last_indices = offset + last_indices
seq_clone[offset_indices] = last_objs.view(batch_size*num_indices, obj_size)
seq_clone[offset_last_indices] = indexed_objs.view(batch_size*num_indices, obj_size)
seq_clone = seq_clone.view(batch_size, num_indices, max_len, obj_size)
seq_len = seq_len.unsqueeze(1).expand(batch_size, num_indices)
#print "seq", seq_clone[0]
#print "obs", observation[0][0]
#print "last", last_objs[0]
#print "indexed", indexed_objs[0]
return (seq_clone, seq_len)
# observation: batch x length x obj size
# indices: batch x num_indices
# return: batch x num_indices x obj size, num_indices*batch_size (indices)
def _get_indexed_obs_obj(self, indices, observation):
batch_size = observation[0].size(0)
seq_length = observation[0].size(1)
obj_size = observation[0].size(2)
num_indices = indices.size(1)
offset = Variable(torch.arange(0,batch_size).unsqueeze(0).expand(num_indices, batch_size).transpose(0,1).contiguous().view(num_indices*batch_size).long(), requires_grad=False)*num_indices
if self.on_gpu():
device = observation[0].get_device()
offset = offset.cuda(device)
offset_indices = offset+indices.view(batch_size*num_indices)
indexed_obs = observation[0].contiguous().view(batch_size*seq_length, obj_size)[offset_indices]
return indexed_obs.view(batch_size, num_indices, obj_size)
# observation: batch x length x obj size
# return: batch x num_indices x obj size, num_indices*batch_size (indices)
def _get_last_obs_obj(self, num_indices, observation):
batch_size = observation[0].size(0)
indices = Variable(torch.ones(batch_size, num_indices).long()*(observation[1].unsqueeze(1).expand(batch_size,num_indices)-1), requires_grad=False)
if self.on_gpu():
device = observation[0].get_device()
indices = indices.cuda(device)
return self._get_indexed_obs_obj(indices, observation)
|
132902
|
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
from numpy.linalg import norm
import matplotlib.pyplot as plt
from sklearn import preprocessing
import seaborn as sns; sns.set_theme()
seed = 0
np.random.seed(seed)
"""We test without normalization"""
def normalize(data, shift = 'z-score'):
if shift not in ['mean', 'min', 'z-score', 'pca']:
raise ValueError("please enter a correct shift parameter.")
if shift == 'min':
_mu = data.min(axis=0)
_scl = data.std()
cdata = data / _scl
elif shift == 'mean':
_mu = data.mean(axis=0)
cdata = data - _mu
_scl = cdata.std()
cdata = cdata / _scl
elif shift == 'pca':
_mu = data.mean(axis=0)
cdata = data - _mu # mean center
rds = norm(cdata - _mu, axis=1) # distance of each data point from 0
_scl = np.median(rds) # 50% of data points are within that radius
cdata = cdata / _scl
else: # shift == 'z-score':
_mu = data.mean(axis=0)
_scl = data.std(axis=0)
cdata = (data - _mu) / _scl
return cdata, (_mu, _scl)
def sorting(data, sorting='pca'):
if sorting=='norm-mean':
data, parameters = normalize(data, shift='mean')
size = np.linalg.norm(data, ord=2, axis=1)
ind = np.argsort(size)
if sorting=='norm-orthant':
data, parameters = normalize(data, shift='min')
size = np.linalg.norm(data, ord=2, axis=1)
ind = np.argsort(size)
if sorting=='pca':
# data, parameters = normalize(data, shift='pca')
pca = PCA(n_components=1)
size = pca.fit_transform(data).reshape(-1)
ind = np.argsort(size)
return data[ind], size
def rn_wine_dataset():
plt.style.use('ggplot')
data = pd.read_csv("data/Real_data/Wine.csv")
X = data.drop(['14'],axis=1).values
font_scale = 3
dist_corr = np.zeros((len(X), len(X)))
for i in range(len(X)):
for j in range(i, len(X)):
dist_corr[j,i] = dist_corr[i,j] = np.linalg.norm(X[i]-X[j], ord=2, axis=0)
sns.set(rc={'figure.figsize':(12,10)}, font_scale=font_scale)
fig, ax = plt.subplots()
im = ax.imshow(dist_corr, cmap='YlGnBu', aspect='auto')
fig.colorbar(im, ax=ax)
plt.xticks([0, 25, 50, 75, 100, 125, 150, 175])
plt.yticks([0, 25, 50, 75, 100, 125, 150, 175])
plt.savefig('results/original_wine.pdf', bbox_inches='tight')
# plt.show()
ndata, size_pca = sorting(X, sorting='pca')
dist_corr_sort = np.zeros((len(ndata), len(ndata)))
for i in range(len(ndata)):
for j in range(i, len(ndata)):
dist_corr_sort[j,i] = dist_corr_sort[i,j] = np.linalg.norm(ndata[i]-ndata[j], ord=2, axis=0)
sns.set(rc={'figure.figsize':(12,10)}, font_scale=font_scale)
fig, ax = plt.subplots()
im = ax.imshow(dist_corr_sort, cmap='YlGnBu', aspect='auto')
fig.colorbar(im, ax=ax)
plt.xticks([0, 25, 50, 75, 100, 125, 150, 175])
plt.yticks([0, 25, 50, 75, 100, 125, 150, 175])
plt.savefig('results/pca_wine.pdf', bbox_inches='tight')
# plt.show()
ndata, size_no = sorting(X, sorting='norm-orthant')
dist_corr_sort = np.zeros((len(ndata), len(ndata)))
for i in range(len(ndata)):
for j in range(i, len(ndata)):
dist_corr_sort[j,i] = dist_corr_sort[i,j] = np.linalg.norm(ndata[i]-ndata[j], ord=2, axis=0)
sns.set(rc={'figure.figsize':(12,10)}, font_scale=font_scale)
fig, ax = plt.subplots()
im = ax.imshow(dist_corr_sort, cmap='YlGnBu', aspect='auto')
fig.colorbar(im, ax=ax)
plt.xticks([0, 25, 50, 75, 100, 125, 150, 175])
plt.yticks([0, 25, 50, 75, 100, 125, 150, 175])
plt.savefig('results/norm-orthant_wine.pdf', bbox_inches='tight')
# plt.show()
ndata, size_nm = sorting(X, sorting='norm-mean')
dist_corr_sort = np.zeros((len(ndata), len(ndata)))
for i in range(len(ndata)):
for j in range(i, len(ndata)):
dist_corr_sort[j,i] = dist_corr_sort[i,j] = np.linalg.norm(ndata[i]-ndata[j], ord=2, axis=0)
sns.set(rc={'figure.figsize':(12,10)}, font_scale=font_scale)
fig, ax = plt.subplots()
im = ax.imshow(dist_corr_sort, cmap='YlGnBu', aspect='auto')
fig.colorbar(im, ax=ax)
plt.xticks([0, 25, 50, 75, 100, 125, 150, 175])
plt.yticks([0, 25, 50, 75, 100, 125, 150, 175])
plt.savefig('results/norm-mean_wine.pdf', bbox_inches='tight')
# plt.show()
sorting_df = pd.DataFrame()
sorting_df['PCA'] = size_pca
sorting_df['Norm-mean'] = size_nm
sorting_df['Norm-orthant'] = size_no
sns.set(style='ticks', color_codes=True, font_scale=3)
g = sns.pairplot(sorting_df, corner=True, height=4.2, aspect=1)
plt.savefig('results/sort_pair_plot_wine.pdf', bbox_inches='tight')
# plt.show()
def rn_iris_dataset():
plt.style.use('ggplot')
data = pd.read_csv("data/Real_data/Iris.csv")
le = preprocessing.LabelEncoder()
data['Species'] = le.fit_transform(data['Species'])
X = data.drop(['Species','Id'],axis=1).values
y = data['Species'].values
font_scale = 3
dist_corr = np.zeros((len(X), len(X)))
for i in range(len(X)):
for j in range(i, len(X)):
dist_corr[j,i] = dist_corr[i,j] = np.linalg.norm(X[i]-X[j], ord=2, axis=0)
sns.set(rc={'figure.figsize':(12,10)}, font_scale=font_scale)
fig, ax = plt.subplots()
im = ax.imshow(dist_corr, cmap='YlGnBu', aspect='auto')
fig.colorbar(im, ax=ax)
plt.xticks([0, 20, 40, 60, 80, 100, 120, 140])
plt.yticks([0, 20, 40, 60, 80, 100, 120, 140])
plt.savefig('results/original_iris.pdf', bbox_inches='tight')
# plt.show()
ndata, size_pca = sorting(X, sorting='pca')
dist_corr_sort = np.zeros((len(ndata), len(ndata)))
for i in range(len(ndata)):
for j in range(i, len(ndata)):
dist_corr_sort[j,i] = dist_corr_sort[i,j] = np.linalg.norm(ndata[i]-ndata[j], ord=2, axis=0)
sns.set(rc={'figure.figsize':(12,10)}, font_scale=font_scale)
fig, ax = plt.subplots()
im = ax.imshow(dist_corr_sort, cmap='YlGnBu', aspect='auto')
fig.colorbar(im, ax=ax)
plt.xticks([0, 20, 40, 60, 80, 100, 120, 140])
plt.yticks([0, 20, 40, 60, 80, 100, 120, 140])
plt.savefig('results/pca_iris.pdf', bbox_inches='tight')
# plt.show()
ndata, size_no = sorting(X, sorting='norm-orthant')
dist_corr_sort = np.zeros((len(ndata), len(ndata)))
for i in range(len(ndata)):
for j in range(i, len(ndata)):
dist_corr_sort[j,i] = dist_corr_sort[i,j] = np.linalg.norm(ndata[i]-ndata[j], ord=2, axis=0)
sns.set(rc={'figure.figsize':(12,10)}, font_scale=font_scale)
fig, ax = plt.subplots()
im = ax.imshow(dist_corr_sort, cmap='YlGnBu', aspect='auto')
fig.colorbar(im, ax=ax)
plt.xticks([0, 20, 40, 60, 80, 100, 120, 140])
plt.yticks([0, 20, 40, 60, 80, 100, 120, 140])
plt.savefig('results/norm-orthant_iris.pdf', bbox_inches='tight')
# plt.show()
ndata, size_nm = sorting(X, sorting='norm-mean')
dist_corr_sort = np.zeros((len(ndata), len(ndata)))
for i in range(len(ndata)):
for j in range(i, len(ndata)):
dist_corr_sort[j,i] = dist_corr_sort[i,j] = np.linalg.norm(ndata[i]-ndata[j], ord=2, axis=0)
sns.set(rc={'figure.figsize':(12,10)}, font_scale=font_scale)
fig, ax = plt.subplots()
im = ax.imshow(dist_corr_sort, cmap='YlGnBu', aspect='auto')
fig.colorbar(im, ax=ax)
plt.xticks([0, 20, 40, 60, 80, 100, 120, 140])
plt.yticks([0, 20, 40, 60, 80, 100, 120, 140])
plt.savefig('results/norm-mean_iris.pdf', bbox_inches='tight')
# plt.show()
sorting_df = pd.DataFrame()
sorting_df['PCA'] = size_pca
sorting_df['Norm-mean'] = size_nm
sorting_df['Norm-orthant'] = size_no
sns.set(style='ticks', color_codes=True, font_scale=3)
g = sns.pairplot(sorting_df, corner=True, height=4.2, aspect=1)
plt.savefig('results/sort_pair_plot_iris.pdf', bbox_inches='tight')
# plt.show()
|
132904
|
from meta_policy_search.utils import logger
import numpy as np
import tensorflow as tf
from collections import OrderedDict
from meta_policy_search.optimizers.base import Optimizer
class FiniteDifferenceHvp(Optimizer):
def __init__(self, base_eps=1e-5, symmetric=True, grad_clip=None):
self.base_eps = np.cast['float32'](base_eps)
self.symmetric = symmetric
self.grad_clip = grad_clip
self._target = None
self.reg_coeff = None
self._constraint_gradient = None
self._input_ph_dict = None
def build_graph(self, constraint_obj, target, input_val_dict, reg_coeff):
"""
Sets the objective function and target weights for the optimize function
Args:
constraint_obj (tf_op) : constraint objective
target (Policy) : Policy whose values we are optimizing over
inputs (list) : tuple of tf.placeholders for input data which may be subsampled. The first dimension corresponds to the number of data points
reg_coeff (float): regularization coefficient
"""
self._target = target
self.reg_coeff = reg_coeff
self._input_ph_dict = input_val_dict
params = list(target.get_params().values())
constraint_grads = tf.gradients(constraint_obj, xs=params)
for idx, (grad, param) in enumerate(zip(constraint_grads, params)):
if grad is None:
constraint_grads[idx] = tf.zeros_like(param)
constraint_gradient = tf.concat([tf.reshape(grad, [-1]) for grad in constraint_grads], axis=0)
self._constraint_gradient = constraint_gradient
def constraint_gradient(self, input_val_dict):
"""
Computes the gradient of the constraint objective
Args:
inputs (list): inputs needed to compute the gradient
Returns:
(np.ndarray): flattened gradient
"""
sess = tf.get_default_session()
feed_dict = self.create_feed_dict(input_val_dict)
constraint_gradient = sess.run(self._constraint_gradient, feed_dict)
return constraint_gradient
def Hx(self, input_val_dict, x):
"""
Compute the second derivative of the constraint val in the direction of the vector x
Args:
inputs (list): inputs needed to compute the gradient of the constraint objective
x (np.ndarray): vector indicating the direction on which the Hessian has to be computed
Returns: (np.ndarray): second derivative in the direction of x
"""
assert isinstance(x, np.ndarray)
param_vals = self._target.get_param_values().copy()
flat_param_vals = _flatten_params(param_vals)
eps = self.base_eps
params_plus_eps_vals = _unflatten_params(flat_param_vals + eps * x, params_example=param_vals)
self._target.set_params(params_plus_eps_vals)
constraint_grad_plus_eps = self.constraint_gradient(input_val_dict)
self._target.set_params(param_vals)
if self.symmetric:
params_minus_eps_vals = _unflatten_params(flat_param_vals - eps * x, params_example=param_vals)
self._target.set_params(params_minus_eps_vals)
constraint_grad_minus_eps = self.constraint_gradient(input_val_dict)
self._target.set_params(param_vals)
hx = (constraint_grad_plus_eps - constraint_grad_minus_eps)/(2 * eps)
else:
constraint_grad = self.constraint_gradient(input_val_dict)
hx = (constraint_grad_plus_eps - constraint_grad)/eps
return hx
def build_eval(self, inputs):
"""
Build the Hessian evaluation function. It let's you evaluate the hessian of the constraint objective
in any direction.
Args:
inputs (list): inputs needed to compute the gradient of the constraint objective
Returns:
(function): function that evaluates the Hessian of the constraint objective in the input direction
"""
def evaluate_hessian(x):
return self.Hx(inputs, x) + self.reg_coeff * x
return evaluate_hessian
class ConjugateGradientOptimizer(Optimizer):
"""
Performs constrained optimization via line search. The search direction is computed using a conjugate gradient
algorithm, which gives x = A^{-1}g, where A is a second order approximation of the constraint and g is the gradient
of the loss function.
Args:
cg_iters (int) : The number of conjugate gradients iterations used to calculate A^-1 g
reg_coeff (float) : A small value so that A -> A + reg*I
subsample_factor (float) : Subsampling factor to reduce samples when using "conjugate gradient. Since the computation time for the descent direction dominates, this can greatly reduce the overall computation time.
backtrack_ratio (float) : ratio for decreasing the step size for the line search
max_backtracks (int) : maximum number of backtracking iterations for the line search
debug_nan (bool) : if set to True, NanGuard will be added to the compilation, and ipdb will be invoked when nan is detected
accept_violation (bool) : whether to accept the descent step if it violates the line search condition after exhausting all backtracking budgets
hvp_approach (obj) : Hessian vector product approach
"""
def __init__(
self,
cg_iters=10,
reg_coeff=0,
subsample_factor=1.,
backtrack_ratio=0.8,
max_backtracks=15,
debug_nan=False,
accept_violation=False,
hvp_approach=FiniteDifferenceHvp(),
):
self._cg_iters = cg_iters
self._reg_coeff = reg_coeff
self._subsample_factor = subsample_factor
self._backtrack_ratio = backtrack_ratio
self._max_backtracks = max_backtracks
self._target = None
self._max_constraint_val = None
self._constraint_name = "kl-div"
self._debug_nan = debug_nan
self._accept_violation = accept_violation
self._hvp_approach = hvp_approach
self._loss = None
self._gradient = None
self._constraint_objective = None
self._input_ph_dict = None
def build_graph(self, loss, target, input_ph_dict, leq_constraint):
"""
Sets the objective function and target weights for the optimize function
Args:
loss (tf_op) : minimization objective
target (Policy) : Policy whose values we are optimizing over
inputs (list) : tuple of tf.placeholders for input data which may be subsampled. The first dimension corresponds to the number of data points
extra_inputs (list) : tuple of tf.placeholders for hyperparameters (e.g. learning rate, if annealed)
leq_constraint (tuple) : A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
"""
assert isinstance(loss, tf.Tensor)
assert hasattr(target, 'get_params')
assert isinstance(input_ph_dict, dict)
constraint_objective, constraint_value = leq_constraint
self._target = target
self._constraint_objective = constraint_objective
self._max_constraint_val = constraint_value
self._input_ph_dict = input_ph_dict
self._loss = loss
# build the graph of the hessian vector product (hvp)
self._hvp_approach.build_graph(constraint_objective, target, self._input_ph_dict, self._reg_coeff)
# build the graph of the gradients
params = list(target.get_params().values())
grads = tf.gradients(loss, xs=params)
for idx, (grad, param) in enumerate(zip(grads, params)):
if grad is None:
grads[idx] = tf.zeros_like(param)
gradient = tf.concat([tf.reshape(grad, [-1]) for grad in grads], axis=0)
self._gradient = gradient
def loss(self, input_val_dict):
"""
Computes the value of the loss for given inputs
Args:
inputs (list): inputs needed to compute the loss function
extra_inputs (list): additional inputs needed to compute the loss function
Returns:
(float): value of the loss
"""
sess = tf.get_default_session()
feed_dict = self.create_feed_dict(input_val_dict)
loss = sess.run(self._loss, feed_dict=feed_dict)
return loss
def constraint_val(self, input_val_dict):
"""
Computes the value of the KL-divergence between pre-update policies for given inputs
Args:
inputs (list): inputs needed to compute the inner KL
extra_inputs (list): additional inputs needed to compute the inner KL
Returns:
(float): value of the loss
"""
sess = tf.get_default_session()
feed_dict = self.create_feed_dict(input_val_dict)
constrain_val = sess.run(self._constraint_objective, feed_dict)
return constrain_val
def gradient(self, input_val_dict):
"""
Computes the gradient of the loss function
Args:
inputs (list): inputs needed to compute the gradient
extra_inputs (list): additional inputs needed to compute the loss function
Returns:
(np.ndarray): flattened gradient
"""
sess = tf.get_default_session()
feed_dict = self.create_feed_dict(input_val_dict)
gradient = sess.run(self._gradient, feed_dict)
return gradient
def optimize(self, input_val_dict):
"""
Carries out the optimization step
Args:
inputs (list): inputs for the optimization
extra_inputs (list): extra inputs for the optimization
subsample_grouped_inputs (None or list): subsample data from each element of the list
"""
logger.log("Start CG optimization")
logger.log("computing loss before")
loss_before = self.loss(input_val_dict)
logger.log("performing update")
logger.log("computing gradient")
gradient = self.gradient(input_val_dict)
logger.log("gradient computed")
logger.log("computing descent direction")
Hx = self._hvp_approach.build_eval(input_val_dict)
descent_direction = conjugate_gradients(Hx, gradient, cg_iters=self._cg_iters)
initial_step_size = np.sqrt(2.0 * self._max_constraint_val *
(1. / (descent_direction.dot(Hx(descent_direction)) + 1e-8)))
if np.isnan(initial_step_size):
logger.log("Initial step size is NaN! Rejecting the step!")
return
initial_descent_step = initial_step_size * descent_direction
logger.log("descent direction computed")
prev_params = self._target.get_param_values()
prev_params_values = _flatten_params(prev_params)
loss, constraint_val, n_iter, violated = 0, 0, 0, False
for n_iter, ratio in enumerate(self._backtrack_ratio ** np.arange(self._max_backtracks)):
cur_step = ratio * initial_descent_step
cur_params_values = prev_params_values - cur_step
cur_params = _unflatten_params(cur_params_values, params_example=prev_params)
self._target.set_params(cur_params)
loss, constraint_val = self.loss(input_val_dict), self.constraint_val(input_val_dict)
if loss < loss_before and constraint_val <= self._max_constraint_val:
break
""" ------------------- Logging Stuff -------------------------- """
if np.isnan(loss):
violated = True
logger.log("Line search violated because loss is NaN")
if np.isnan(constraint_val):
violated = True
logger.log("Line search violated because constraint %s is NaN" % self._constraint_name)
if loss >= loss_before:
violated = True
logger.log("Line search violated because loss not improving")
if constraint_val >= self._max_constraint_val:
violated = True
logger.log("Line search violated because constraint %s is violated" % self._constraint_name)
if violated and not self._accept_violation:
logger.log("Line search condition violated. Rejecting the step!")
self._target.set_params(prev_params)
logger.log("backtrack iters: %d" % n_iter)
logger.log("computing loss after")
logger.log("optimization finished")
def _unflatten_params(flat_params, params_example):
unflat_params = []
idx = 0
for key, param in params_example.items():
size_param = np.prod(param.shape)
reshaped_param = np.reshape(flat_params[idx:idx+size_param], newshape=param.shape)
unflat_params.append((key, reshaped_param))
idx += size_param
return OrderedDict(unflat_params)
def _flatten_params(params):
return np.concatenate([param.reshape(-1) for param in params.values()])
def conjugate_gradients(f_Ax, b, cg_iters=10, verbose=False, residual_tol=1e-10):
"""
Demmel p 312
"""
p = b.copy()
r = b.copy()
x = np.zeros_like(b, dtype=np.float32)
rdotr = r.dot(r)
fmtstr = "%10i %10.3g %10.3g"
titlestr = "%10s %10s %10s"
if verbose: print(titlestr % ("iter", "residual norm", "soln norm"))
for i in range(cg_iters):
if verbose: print(fmtstr % (i, rdotr, np.linalg.norm(x)))
z = f_Ax(p)
v = rdotr / p.dot(z)
x += v * p
r -= v * z
newrdotr = r.dot(r)
mu = newrdotr / rdotr
p = r + mu * p
rdotr = newrdotr
if rdotr < residual_tol:
break
if verbose: print(fmtstr % (i + 1, rdotr, np.linalg.norm(x)))
return x
|
132905
|
t = int(input())
ans = []
for testcase in range(t):
n, m = [int(i) for i in input().split()]
if (n == 2) and (m == 2):
folds = []
for i in range(n):
folds += [int(i) for i in input().split()]
for i in range(n - 1):
folds += [int(i) for i in input().split()]
if (folds.count(1) is 1) or (folds.count(0) is 1):
ans.append(1)
else: ans.append(0)
else:
ver_folds = [[int(i) for i in input().split()] for i in range(n)]
hor_folds = [[int(i) for i in input().split()] for i in range(n - 1)]
answer = 1
# print(ver_folds, hor_folds)
for i in range(n-1):
tmp = hor_folds[i]
# print('[*]', tmp)
order = None
for idx, j in enumerate(tmp):
# print((idx, j))
if idx == 0:
order = not j
if j == order:
answer = 0
break
# print(int(order), j)
order = j
else: continue
break
ans.append(answer)
print(*ans)
|
132937
|
import argparse
import logging
import json
import sys
sys.path.insert(0, '.')
from tools.constants import JSON_FORMAT_KWARGS
from tools.utils import get_json_files
def reserialize(file_):
"""Reformat json file"""
with open(file_) as fp:
try:
data = json.load(fp)
except ValueError:
logging.error('Json syntax error in file {}'.format(file_))
raise
with open(file_, 'w') as fp:
json.dump(data, fp, **JSON_FORMAT_KWARGS)
fp.write("\n")
def main():
"""Convert json file(s) to the project format standards"""
logging.basicConfig(level=logging.WARNING)
parser = argparse.ArgumentParser()
parser.add_argument("path",
help="path to file(s) to reserialize")
parser.add_argument("-a", "--all",
action="store_true",
help="reserialize all JSON files under path")
args = parser.parse_args()
if args.all:
category_paths, video_paths = get_json_files(args.path)
paths = category_paths + video_paths
for path in paths:
reserialize(path)
else:
reserialize(args.path)
if __name__ == '__main__':
main()
|
132939
|
from bot.bot import Friendo
from discord import errors
from discord.ext.commands import Cog, Context, group
from bot.settings import AOC_JOIN_CODE, AOC_LEADERBOARD_LINK, AOC_SESSION_COOKIE
class AdventOfCode(Cog):
"""Cog for AOC 2021 for small features."""
def __init__(self, bot: Friendo):
self.bot = bot
@staticmethod
def sort_stats(stat_d: dict) -> dict:
"""Staticmethod for making a sorted dictionary of the leaderboard."""
stats = dict()
for ms in stat_d['members']:
if stat_d['members'][ms]['name'] is None:
stat_d['members'][ms]['name'] = 'Anonymous'
stats.update({(stat_d['members'][ms]['name'],
stat_d['members'][ms]['stars']): stat_d['members'][ms]['local_score']})
stats = {k: stats[k] for k in sorted(stats, key=lambda y: stats[y])[::-1]}
return stats
@group(name="AdventofCode",
aliases=('aoc', 'aoc2021'),
brief="Small commands for AoC 2021",
usage=".aoc [command]")
async def aoc_group(self, ctx: Context) -> None:
"""Group for advent of code commands."""
if not ctx.invoked_subcommand:
await ctx.send("Please enter a valid command")
@aoc_group.command(brief="Get the leaderboard join code in your DM's",
usage=".aoc join",
aliases=("join", "join_lb", "j"))
async def join_leaderboard(self, ctx: Context) -> None:
"""Dms the author the join code and link for the leaderboard."""
info = [
"To join our leaderboard, follow these steps:",
"• Log in on https://adventofcode.com",
"• Head over to https://adventofcode.com/leaderboard/private",
f"• Use this code `{AOC_JOIN_CODE}` to join the Code Collective leaderboard!"]
error_msg = f":x: {ctx.author.mention}, please (temporarily) enable DMs to receive the join code"
await ctx.message.add_reaction("📨")
try:
await ctx.author.send('\n'.join(info))
except errors.Forbidden:
await ctx.send(error_msg)
@aoc_group.command(brief="Get the leaderboard of AoC 2021 for the Code Collective server",
usage=".aoc leaderboard",
aliases=('lb', 'board'))
async def leaderboard(self, ctx: Context) -> None:
"""Shows the leaderboard of code collective server for AoC 2021."""
async with ctx.channel.typing():
cookies = {'session': AOC_SESSION_COOKIE}
async with self.bot.session.get(AOC_LEADERBOARD_LINK, cookies=cookies) as stats:
stats = await stats.json()
sorted_stats = self.sort_stats(stats)
msg = []
count = 1
for name_star, score in sorted_stats.items():
msg.append(
f"{count} | {name_star[0] + ' '*(16-len(name_star[0]))} "
f"| {name_star[1]} ★ | {score}")
count += 1
msg = '\n'.join(msg)
await ctx.send("🎄 Advent of Code 2021 leaderboard for Code Collective 🎄")
await ctx.send(f"``` | Name {' '*(16-4)}| Stars | Score\n{msg}```")
def setup(bot: Friendo) -> None:
"""Sets up the AdventOfCode cog."""
bot.add_cog(AdventOfCode(bot))
|
132951
|
import os
import sys
import random
import itertools
import colorsys
import numpy as np
from skimage.measure import find_contours
import matplotlib.pyplot as plt
from matplotlib import patches, lines
from matplotlib.patches import Polygon
# import IPython.display
def random_colors(N, bright=True):
'''
Generate random colors.
To get visually distinct colors, generate them in HSV space then
convert to RGB.
'''
brightness = 1.0 if bright else 0.7
hsv = [(i / N, 1, brightness) for i in range(N)]
colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
random.shuffle(colors)
return colors
def display_instances(image, boxes, class_ids, class_names,
scores=None, title="",
figsize=(16, 16), ax=None):
'''
boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates.
class_ids: [num_instances]
class_names: list of class names of the dataset
scores: (optional) confidence scores for each box
figsize: (optional) the size of the image.
'''
# Number of instances
N = boxes.shape[0]
if not N:
print("\n*** No instances to display *** \n")
# else:
# assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]
if not ax:
_, ax = plt.subplots(1, figsize=figsize)
# Generate random colors
colors = random_colors(N)
# Show area outside image boundaries.
height, width = image.shape[:2]
ax.set_ylim(height + 10, -10)
ax.set_xlim(-10, width + 10)
ax.axis('off')
ax.set_title(title)
masked_image = image.astype(np.uint32).copy()
for i in range(N):
color = colors[i]
# Bounding box
if not np.any(boxes[i]):
# Skip this instance. Has no bbox. Likely lost in image cropping.
continue
y1, x1, y2, x2 = boxes[i]
y1, x1, y2, x2 = int(y1), int(x1), int(y2), int(x2)
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
alpha=0.7, linestyle="dashed",
edgecolor=color, facecolor='none')
ax.add_patch(p)
# Label
class_id = class_ids[i]
score = scores[i] if scores is not None else None
label = class_names[class_id]
x = random.randint(x1, (x1 + x2) // 2)
caption = "{} {:.3f}".format(label, score) if score else label
ax.text(x1, y1 + 8, caption,
color='w', size=11, backgroundcolor="none")
plt.imshow(image.astype(np.uint8))
def draw_boxes(image, boxes=None, refined_boxes=None,
captions=None, visibilities=None,
title="", ax=None):
'''Draw bounding boxes and segmentation masks with differnt
customizations.
boxes: [N, (y1, x1, y2, x2, class_id)] in image coordinates.
refined_boxes: Like boxes, but draw with solid lines to show
that they're the result of refining 'boxes'.
captions: List of N titles to display on each box
visibilities: (optional) List of values of 0, 1, or 2. Determine how
prominant each bounding box should be.
title: An optional title to show over the image
ax: (optional) Matplotlib axis to draw on.
'''
# Number of boxes
N = boxes.shape[0] if boxes is not None else refined_boxes.shape[0]
# Matplotlib Axis
if not ax:
_, ax = plt.subplots(1, figsize=(16, 16))
# Generate random colors
colors = random_colors(N)
# Show area outside image boundaries.
margin = image.shape[0] // 10
ax.set_ylim(image.shape[0] + margin, -margin)
ax.set_xlim(-margin, image.shape[1] + margin)
ax.axis('off')
ax.set_title(title)
for i in range(N):
# Box visibility
visibility = visibilities[i] if visibilities is not None else 1
if visibility == 0:
color = "gray"
style = "dotted"
alpha = 0.5
elif visibility == 1:
color = colors[i]
style = "dotted"
alpha = 1
elif visibility == 2:
color = colors[i]
style = "solid"
alpha = 1
# Boxes
if boxes is not None:
if not np.any(boxes[i]):
# Skip this instance. Has no bbox. Likely lost in cropping.
continue
y1, x1, y2, x2 = boxes[i]
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
alpha=alpha, linestyle=style,
edgecolor=color, facecolor='none')
ax.add_patch(p)
# Refined boxes
if refined_boxes is not None and visibility > 0:
ry1, rx1, ry2, rx2 = refined_boxes[i].astype(np.int32)
p = patches.Rectangle((rx1, ry1), rx2 - rx1, ry2 - ry1, linewidth=2,
edgecolor=color, facecolor='none')
ax.add_patch(p)
# Connect the top-left corners of the anchor and proposal
if boxes is not None:
ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color))
# Captions
if captions is not None:
caption = captions[i]
# If there are refined boxes, display captions on them
if refined_boxes is not None:
y1, x1, y2, x2 = ry1, rx1, ry2, rx2
x = random.randint(x1, (x1 + x2) // 2)
ax.text(x1, y1, caption, size=11, verticalalignment='top',
color='w', backgroundcolor="none",
bbox={'facecolor': color, 'alpha': 0.5,
'pad': 2, 'edgecolor': 'none'})
ax.imshow(image.astype(np.uint8))
|
133021
|
import argparse
import numpy as np
from tqdm import tqdm
from os.path import join, isfile
from data import Labels
from joblib import Parallel, delayed
labels = Labels()
def job(text_path, numpy_path):
with open(text_path, 'r', encoding='utf8') as file:
text = file.read()
if not labels.is_accepted(text):
return None
required_frames = labels.required_frames(text)
actual_frames = len(np.load(numpy_path))
if required_frames > actual_frames:
return None
return '%s,%d,%s' % (numpy_path, actual_frames, text)
parser = argparse.ArgumentParser(description='Collect utterances')
parser.add_argument('--manifest', type=str)
parser.add_argument('--jobs', type=int, default=8)
args = parser.parse_args()
prefix = args.manifest.replace('.csv', '')
print(prefix)
files = dict()
with open(args.manifest) as f:
progress = tqdm(f.readlines())
for line in progress:
path = line.split(',')[0]
text_path = join(prefix, path.replace('.wav', '.txt'))
if not isfile(text_path):
continue
numpy_path = join(prefix, path.replace('.wav', '.npy'))
if not isfile(numpy_path):
continue
files[text_path] = numpy_path
tasks = []
for text_path, numpy_path in files.items():
tasks.append(delayed(job)(text_path, numpy_path))
print('Tasks:', len(tasks))
results = Parallel(n_jobs=args.jobs, backend='multiprocessing', verbose=1)(tasks)
utterances = sorted([r for r in results if r is not None])
print('Success:', len(utterances))
with open(prefix + '.txt', 'w', encoding='utf8') as file:
file.write('path,frames,text\n')
file.writelines(utterances)
|
133043
|
import requests
import urllib3
from datetime import datetime, timedelta
from requests.auth import HTTPDigestAuth
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, ENTITY_ID_FORMAT
from homeassistant.const import CONF_ID, CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity import async_generate_entity_id
CONF_STOPS = 'stops'
CONF_LINES = 'lines'
CONF_DIRECTIONS = 'directions'
DEFAULT_NAME = 'iMPK'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_STOPS): vol.All(cv.ensure_list, [
vol.Schema({
vol.Required(CONF_ID): cv.positive_int,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_LINES, default=[]): cv.ensure_list,
vol.Optional(CONF_DIRECTIONS, default=[]): cv.ensure_list
})])
})
def setup_platform(hass, config, add_entities, discovery_info=None):
name = config.get(CONF_NAME)
stops = config.get(CONF_STOPS)
available_stops = IMPKSensor.get_stops()
dev = []
for stop in stops:
stop_id = str(stop.get(CONF_ID))
lines = stop.get(CONF_LINES)
directions = stop.get(CONF_DIRECTIONS)
real_stop_name = IMPKSensor.get_stop_name(stop_id, available_stops)
if real_stop_name is None:
raise Exception("Invalid stop id: {}".format(stop_id))
stop_name = stop.get(CONF_NAME) or stop_id
uid = '{}_{}'.format(name, stop_name)
entity_id = async_generate_entity_id(ENTITY_ID_FORMAT, uid, hass=hass)
dev.append(IMPKSensor(entity_id, name, stop_id, stop_name, real_stop_name, lines, directions))
add_entities(dev, True)
class IMPKSensor(Entity):
def __init__(self, entity_id, name, stop_id, stop_name, real_stop_name, watched_lines, watched_directions):
self.entity_id = entity_id
self._name = name
self._stop_id = stop_id
self._watched_lines = watched_lines
self._watched_directions = watched_directions
self._stop_name = stop_name
self._real_stop_name = real_stop_name
self._departures = []
self._departures_number = 0
self._departures_by_line = dict()
@property
def name(self):
return '{} - {}'.format(self._name, self._stop_name)
@property
def state(self):
if self._departures_number is not None and self._departures_number > 0:
dep = self._departures[0]
return IMPKSensor.departure_to_str(dep)
return None
@property
def unit_of_measurement(self):
return None
@property
def device_state_attributes(self):
attr = dict()
attr['stop_name'] = self._real_stop_name
if self._departures is not None:
attr['list'] = self._departures
attr['html_timetable'] = self.get_html_timetable()
attr['html_departures'] = self.get_html_departures()
if self._departures_number > 0:
dep = self._departures[0]
attr['line'] = dep["line"]
attr['direction'] = dep["direction"]
attr['departure'] = dep["departure"]
attr['time_to_departure'] = dep["time_to_departure"]
attr['original_departure'] = dep["original_departure"]
attr['delay'] = dep["delay"]
return attr
@property
def icon(self):
return "mdi:bus-clock"
def update(self):
now = datetime.now()
departures = IMPKSensor.get_departures(self._stop_id)
if departures is None:
return
positions = IMPKSensor.get_positions()
courses = list(map(lambda d: d["c"], departures))
delays = IMPKSensor.get_delays(courses, positions)
stops = IMPKSensor.get_stops()
parsed_departures = []
for departure_details in departures:
line = departure_details["l"]
time = departure_details["t"]
course = departure_details["c"]
direction = IMPKSensor.get_stop_name(departure_details["d"], stops)
if len(self._watched_lines) > 0 and line not in self._watched_lines \
or len(self._watched_directions) > 0 and direction not in self._watched_directions:
continue
delay = delays[course] if course in delays else 0
original_departure = datetime.strptime(time, '%Y-%m-%d %H:%M:%S')
departure = original_departure + timedelta(milliseconds=delay)
time_to_departure = (departure - now).total_seconds() // 60
parsed_departures.append(
{
"line": line,
"direction": direction,
"departure": "{:02}:{:02}".format(departure.hour, departure.minute),
"original_departure": "{:02}:{:02}".format(original_departure.hour, original_departure.minute),
"time_to_departure": int(time_to_departure),
"delay": int(delay // 1000)
})
self._departures = parsed_departures
self._departures_number = len(parsed_departures)
self._departures_by_line = IMPKSensor.group_by_line(self._departures)
def get_html_timetable(self):
html = '<table width="100%" border=1 style="border: 1px black solid; border-collapse: collapse;">\n'
lines = list(self._departures_by_line.keys())
lines.sort()
for line in lines:
directions = list(self._departures_by_line[line].keys())
directions.sort()
for direction in directions:
if len(direction) == 0:
continue
html = html + '<tr><td style="text-align: center; padding: 4px"><big>{}, kier. {}</big></td>'.format(
line, direction)
departures = ', '.join(map(lambda x: x["departure"], self._departures_by_line[line][direction]))
html = html + '<td style="text-align: right; padding: 4px">{}</td></tr>\n'.format(departures)
if len(lines) == 0:
html = html + '<tr><td style="text-align: center; padding: 4px">Brak połączeń</td>'
html = html + '</table>'
return html
def get_html_departures(self):
html = '<table width="100%" border=1 style="border: 1px black solid; border-collapse: collapse;">\n'
for departure in self._departures:
html = html + '<tr><td style="text-align: center; padding: 4px">{}</td></tr>\n'.format(
IMPKSensor.departure_to_str(departure))
html = html + '</table>'
return html
@staticmethod
def departure_to_str(dep):
return '{}, kier. {}: {} ({}m)'.format(dep["line"], dep["direction"], dep["departure"],
dep["time_to_departure"])
@staticmethod
def group_by_line(departures):
departures_by_line = dict()
for departure in departures:
line = departure["line"]
direction = departure["direction"]
if line not in departures_by_line:
departures_by_line[line] = dict()
if direction not in departures_by_line[line]:
departures_by_line[line][direction] = []
departures_by_line[line][direction].append(departure)
return departures_by_line
@staticmethod
def get_stop_name(stop_id, stops):
found = list(filter(lambda stop: stop_id in map(lambda post: post["s"], stop['p']), stops))
return found[0]["n"] if found else None
@staticmethod
def get_delays(courses, positions):
delays = dict()
for position in positions:
if len(delays) == len(courses):
break
if "course" not in position or position["course"] not in courses:
continue
course = position["course"]
delay = position["delay"]
delays[course] = int(delay)
return delays
@staticmethod
def get_positions():
return IMPKSensor.get_data("getPositions")
@staticmethod
def get_stops():
return IMPKSensor.get_data("getPosts")
@staticmethod
def get_departures(stop_id):
return IMPKSensor.get_data("getPostInfo&symbol={}".format(stop_id))
@staticmethod
def get_data(function):
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
address = 'https://192.168.127.12:8088/mobile?function={}'.format(function)
response = requests.get(address, auth=HTTPDigestAuth('android-mpk', 'g5crehAfUCh4Wust'), verify=False)
if response.status_code == 200 and response.content.__len__() > 0:
return response.json()
return None
|
133105
|
import torch
from vedacore.misc import registry
from vedadet.bridge import build_converter, build_meshgrid
from vedadet.misc.bbox import bbox2result, multiclass_nms
from .base_engine import BaseEngine
@registry.register_module('engine')
class InferEngine(BaseEngine):
def __init__(self, model, meshgrid, converter, num_classes, use_sigmoid,
test_cfg):
super().__init__(model)
self.meshgrid = build_meshgrid(meshgrid)
self.converter = build_converter(converter)
if use_sigmoid:
self.cls_out_channels = num_classes
else:
self.cls_out_channels = num_classes + 1
self.test_cfg = test_cfg
def extract_feats(self, img):
feats = self.model(img, train=False)
return feats
def _get_raw_dets(self, img, img_metas):
"""
Args:
img(torch.Tensor): shape N*3*H*W, N is batch size
img_metas(list): len(img_metas) = N
Returns:
dets(list): len(dets) is the batch size, len(dets[ii]) = #classes,
dets[ii][jj] is an np.array whose shape is N*5
"""
feats = self.extract_feats(img)
featmap_sizes = [feat.shape[-2:] for feat in feats[0]]
dtype = feats[0][0].dtype
device = feats[0][0].device
anchor_mesh = self.meshgrid.gen_anchor_mesh(featmap_sizes, img_metas,
dtype, device)
# bboxes, scores, score_factor
dets = self.converter.get_bboxes(anchor_mesh, img_metas, *feats)
return dets
def _simple_infer(self, img, img_metas):
"""
Args:
img(torch.Tensor): shape N*3*H*W, N is batch size
img_metas(list): len(img_metas) = N
Returns:
dets(list): len(dets) is the batch size, len(dets[ii]) = #classes,
dets[ii][jj] is an np.array whose shape is N*5
"""
dets = self._get_raw_dets(img, img_metas)
batch_size = len(dets)
result_list = []
for ii in range(batch_size):
bboxes, scores, centerness = dets[ii]
det_bboxes, det_labels = multiclass_nms(
bboxes,
scores,
self.test_cfg.score_thr,
self.test_cfg.nms,
self.test_cfg.max_per_img,
score_factors=centerness)
bbox_result = bbox2result(det_bboxes, det_labels,
self.cls_out_channels)
result_list.append(bbox_result)
return result_list
def _aug_infer(self, img_list, img_metas_list):
assert len(img_list) == len(img_metas_list)
dets = []
ntransforms = len(img_list)
for idx in range(len(img_list)):
img = img_list[idx]
img_metas = img_metas_list[idx]
tdets = self._get_raw_dets(img, img_metas)
dets.append(tdets)
batch_size = len(dets[0])
nclasses = len(dets[0][0])
merged_dets = []
for ii in range(batch_size):
single_image = []
for kk in range(nclasses):
single_class = []
for jj in range(ntransforms):
single_class.append(dets[jj][ii][kk])
single_image.append(torch.cat(single_class, axis=0))
merged_dets.append(single_image)
result_list = []
for ii in range(batch_size):
bboxes, scores, centerness = merged_dets[ii]
det_bboxes, det_labels = multiclass_nms(
bboxes,
scores,
self.test_cfg.score_thr,
self.test_cfg.nms,
self.test_cfg.max_per_img,
score_factors=centerness)
bbox_result = bbox2result(det_bboxes, det_labels,
self.cls_out_channels)
result_list.append(bbox_result)
return result_list
def infer(self, img, img_metas):
if len(img) == 1:
return self._simple_infer(img[0], img_metas[0])
else:
return self._aug_infer(img, img_metas)
|
133107
|
from datetime import datetime
import unittest
from pypushwoosh import constants
from pypushwoosh.exceptions import PushwooshFilterInvalidOperatorException, PushwooshFilterInvalidOperandException
from pypushwoosh.filter import ApplicationFilter, ApplicationGroupFilter, IntegerTagFilter, StringTagFilter, \
ListTagFilter, DateTagFilter, DaysTagFilter, IntegerTagFilterByApplication, StringTagFilterByApplication, \
DateTagFilterByApplication, DaysTagFilterByApplication, BooleanTagFilter, BooleanTagFilterByApplication
HTTP_200_OK = 200
STATUS_OK = 'OK'
class TestApplicationFilter(unittest.TestCase):
def setUp(self):
self.test_code = '0000-0000'
self.prefix = 'A'
self.pwfilter = ApplicationFilter
def test_valid_filter(self):
expected_result = '%s("%s")' % (self.prefix, self.test_code)
result = self.pwfilter(self.test_code)
self.assertEqual(result.__str__(), expected_result)
def test_valid_filter_with_platforms(self):
expected_result = '%s("%s", ["%s", "%s"])' % (self.prefix,
self.test_code,
constants.PLATFORM_NAMES[constants.PLATFORM_IOS],
constants.PLATFORM_NAMES[constants.PLATFORM_ANDROID])
result = self.pwfilter(self.test_code, [constants.PLATFORM_IOS, constants.PLATFORM_ANDROID])
self.assertEqual(result.__str__(), expected_result)
def test_filter_invalid_platform(self):
try:
self.pwfilter(self.test_code, 'Invalid Platform')
self.assertEqual(True, 'Platform must be invalid')
except TypeError:
self.assertEqual(True, True)
class TestApplicationGroupFilter(TestApplicationFilter):
def setUp(self):
self.test_code = '0000-0000'
self.prefix = 'G'
self.pwfilter = ApplicationGroupFilter
class TestInvalidOperatorForOperand(unittest.TestCase):
def setUp(self):
self.pwfilter = IntegerTagFilter
self.tag_name = 'testInt'
def filter_with_invalid_operator_for_operand(self, value, operator):
args = [self.tag_name, operator, value]
self.assertRaises(PushwooshFilterInvalidOperandException, self.pwfilter, *args)
def test_invalid_operator_type(self):
self.filter_with_invalid_operator_for_operand([1, 2], constants.TAG_FILTER_OPERATOR_GTE)
self.filter_with_invalid_operator_for_operand([1, 2], constants.TAG_FILTER_OPERATOR_LTE)
self.filter_with_invalid_operator_for_operand(1, constants.TAG_FILTER_OPERATOR_BETWEEN)
self.filter_with_invalid_operator_for_operand('1', constants.TAG_FILTER_OPERATOR_BETWEEN)
self.filter_with_invalid_operator_for_operand(1, constants.TAG_FILTER_OPERATOR_IN)
self.filter_with_invalid_operator_for_operand('1', constants.TAG_FILTER_OPERATOR_IN)
class TestInvalidOperand(unittest.TestCase):
def filter_with_invalid_operand_type(self, value, operator, tag_name):
args = [tag_name, operator, value]
self.assertRaises(PushwooshFilterInvalidOperandException, self.pwfilter, *args)
def test_invalid_operand_type_int(self):
tag_name = 'testInt'
str_value = 'Invalid value for int'
between_value = ['invalid_min', 'invalid_max']
in_value = ['1', '2', '3']
self.pwfilter = IntegerTagFilter
self.filter_with_invalid_operand_type(str_value, constants.TAG_FILTER_OPERATOR_EQ, tag_name)
self.filter_with_invalid_operand_type(between_value, constants.TAG_FILTER_OPERATOR_BETWEEN, tag_name)
self.filter_with_invalid_operand_type(in_value, constants.TAG_FILTER_OPERATOR_IN, tag_name)
def test_invalid_operand_type_str(self):
tag_name = 'testString'
list_value_in = [[1, 2], [1, 2]]
self.pwfilter = StringTagFilter
self.filter_with_invalid_operand_type(list_value_in, constants.TAG_FILTER_OPERATOR_IN, tag_name)
def test_invalid_operand_type_list(self):
tag_name = 'testString'
list_value_in = [[1, 2], [1, 2]]
self.pwfilter = ListTagFilter
self.filter_with_invalid_operand_type(list_value_in, constants.TAG_FILTER_OPERATOR_IN, tag_name)
def test_invalid_operand_type_date(self):
tag_name = 'testDate'
self.pwfilter = DateTagFilter
self.filter_with_invalid_operand_type(1, constants.TAG_FILTER_OPERATOR_EQ, tag_name)
self.filter_with_invalid_operand_type([1, 'str'], constants.TAG_FILTER_OPERATOR_BETWEEN, tag_name)
self.filter_with_invalid_operand_type(['str', 'str', 1], constants.TAG_FILTER_OPERATOR_IN, tag_name)
def test_invalid_operand_type_days(self):
tag_name = 'testDays'
str_value = 'Invalid value for days'
between_value = ['invalid_min', 'invalid_max']
self.pwfilter = DaysTagFilter
self.filter_with_invalid_operand_type(str_value, constants.TAG_FILTER_OPERATOR_EQ, tag_name)
self.filter_with_invalid_operand_type(between_value, constants.TAG_FILTER_OPERATOR_BETWEEN, tag_name)
class TestInvalidOperator(unittest.TestCase):
def filter_with_invalid_operator(self, value, operator, tag_name):
args = [tag_name, operator, value]
self.assertRaises(PushwooshFilterInvalidOperatorException, self.pwfilter, *args)
def test_invalid_operator(self):
tag_name = 'testInt'
value = [1, 2, 3]
self.pwfilter = IntegerTagFilter
self.filter_with_invalid_operator(value, 'Invalid Operator', tag_name)
def test_invalid_operator_type_str(self):
tag_name = 'testString'
value = 1
list_value = ['123', 'asd']
self.pwfilter = StringTagFilter
self.filter_with_invalid_operator(value, constants.TAG_FILTER_OPERATOR_GTE, tag_name)
self.filter_with_invalid_operator(value, constants.TAG_FILTER_OPERATOR_LTE, tag_name)
self.filter_with_invalid_operator(list_value, constants.TAG_FILTER_OPERATOR_BETWEEN, tag_name)
def test_invalid_operator_type_list(self):
tag_name = 'testList'
value = 1
list_value = ['123', 'asd']
self.pwfilter = ListTagFilter
self.filter_with_invalid_operator(value, constants.TAG_FILTER_OPERATOR_GTE, tag_name)
self.filter_with_invalid_operator(value, constants.TAG_FILTER_OPERATOR_LTE, tag_name)
self.filter_with_invalid_operator(list_value, constants.TAG_FILTER_OPERATOR_BETWEEN, tag_name)
def test_invalid_operator_type_boolean(self):
tag_name = 'testBool'
value = 1
list_value = [0, 1]
self.pwfilter = BooleanTagFilter
self.filter_with_invalid_operator(value, constants.TAG_FILTER_OPERATOR_GTE, tag_name)
self.filter_with_invalid_operator(value, constants.TAG_FILTER_OPERATOR_LTE, tag_name)
self.filter_with_invalid_operator(list_value, constants.TAG_FILTER_OPERATOR_BETWEEN, tag_name)
self.filter_with_invalid_operator(list_value, constants.TAG_FILTER_OPERATOR_IN, tag_name)
class TestInvalidOperandLength(unittest.TestCase):
def filter_with_invalid_operator_len(self, value, operator, tag_name):
args = [tag_name, operator, value]
self.assertRaises(PushwooshFilterInvalidOperandException, self.pwfilter, *args)
def test_invalid_len_in(self):
tag_name = 'testStr'
value = []
self.pwfilter = ListTagFilter
self.filter_with_invalid_operator_len(value, constants.TAG_FILTER_OPERATOR_IN, tag_name)
def test_invalid_len_between(self):
tag_name = 'testInt'
value_lt = [1]
value_gt = [1, 2, 3]
self.pwfilter = IntegerTagFilter
self.filter_with_invalid_operator_len(value_lt, constants.TAG_FILTER_OPERATOR_BETWEEN, tag_name)
self.filter_with_invalid_operator_len(value_gt, constants.TAG_FILTER_OPERATOR_BETWEEN, tag_name)
class TestIntegerTagFilter(unittest.TestCase):
def setUp(self):
self.pwfilter = IntegerTagFilter
self.tag_name = 'testInt'
def test_valid_filter(self):
expected_result = 'T("%s", %s, 1)' % (self.tag_name, constants.TAG_FILTER_OPERATOR_GTE)
result = self.pwfilter(self.tag_name, constants.TAG_FILTER_OPERATOR_GTE, 1)
self.assertEqual(expected_result, result.__str__())
def test_valid_filter_between_operator(self):
expected_result = 'T("%s", %s, [1, 2])' % (self.tag_name, constants.TAG_FILTER_OPERATOR_BETWEEN)
result = self.pwfilter(self.tag_name, constants.TAG_FILTER_OPERATOR_BETWEEN, [1, 2])
self.assertEqual(expected_result, result.__str__())
def test_valid_filter_in_operator(self):
expected_result = 'T("%s", %s, [1, 2, 3])' % (self.tag_name, constants.TAG_FILTER_OPERATOR_IN)
result = self.pwfilter(self.tag_name, constants.TAG_FILTER_OPERATOR_IN, [1, 2, 3])
self.assertEqual(expected_result, result.__str__())
class TestStringTagFilter(unittest.TestCase):
pwfilter = StringTagFilter
tag_name = 'testStr'
def test_valid_filter(self):
expected_result = 'T("%s", %s, "test value")' % (self.tag_name, constants.TAG_FILTER_OPERATOR_EQ)
result = self.pwfilter(self.tag_name, constants.TAG_FILTER_OPERATOR_EQ, 'test value')
self.assertEqual(expected_result, result.__str__())
def test_valid_filter_in_operator(self):
expected_result = 'T("%s", %s, ["1", "2"])' % (self.tag_name, constants.TAG_FILTER_OPERATOR_IN)
result = self.pwfilter(self.tag_name, constants.TAG_FILTER_OPERATOR_IN, ['1', '2'])
self.assertEqual(expected_result, result.__str__())
class TestListTagFilter(unittest.TestCase):
def setUp(self):
self.pwfilter = ListTagFilter
self.tag_name = 'testList'
def test_valid_filter_in_operator(self):
expected_result = 'T("%s", %s, [1, 2, "2"])' % (self.tag_name, constants.TAG_FILTER_OPERATOR_IN)
result = self.pwfilter(self.tag_name, constants.TAG_FILTER_OPERATOR_IN, [1, 2, '2'])
self.assertEqual(expected_result, result.__str__())
class TestDateTagFilter(unittest.TestCase):
def setUp(self):
self.pwfilter = DateTagFilter
self.tag_name = 'testDate'
def invalid_date_format(self, operator, value):
args = [self.tag_name, operator, value]
self.assertRaises(PushwooshFilterInvalidOperandException, self.pwfilter, *args)
def test_valid_filter(self):
values = [
'2014-12-05 22:22:22',
'2014-12-05 22:22',
'2014-12-05',
'2014-12-05T22:22:22',
'2014-12-05T22:22',
]
for value in values:
expected_result = 'T("%s", %s, "%s")' % (self.tag_name, constants.TAG_FILTER_OPERATOR_EQ, value)
result = self.pwfilter(self.tag_name, constants.TAG_FILTER_OPERATOR_EQ, value)
self.assertEqual(result.__str__(), expected_result)
def test_valid_filter_between(self):
value = ['2013-06-22 00:00:00', '2013-06-25']
expected_result = 'T("%s", %s, ["%s", "%s"])' % (self.tag_name, constants.TAG_FILTER_OPERATOR_BETWEEN, value[0], value[1])
result = self.pwfilter(self.tag_name, constants.TAG_FILTER_OPERATOR_BETWEEN, value)
self.assertEqual(result.__str__(), expected_result)
def test_valid_datetime_object(self):
value = datetime.strptime('2013-06-22 00:00:00', '%Y-%m-%d %H:%M:%S')
expected_result = 'T("%s", %s, "%s")' % (self.tag_name, constants.TAG_FILTER_OPERATOR_EQ, value)
result = self.pwfilter(self.tag_name, constants.TAG_FILTER_OPERATOR_EQ, value)
self.assertEqual(result.__str__(), expected_result)
def test_invalid_date_format(self):
self.invalid_date_format(constants.TAG_FILTER_OPERATOR_GTE, '2')
self.invalid_date_format(constants.TAG_FILTER_OPERATOR_BETWEEN, ['2013-06-25', '1'])
class TestDaysTagFilter(unittest.TestCase):
def setUp(self):
self.pwfilter = DaysTagFilter
self.tag_name = 'testDays'
def test_valid_filter(self):
value = 1
expected_result = 'T("%s", %s, %d)' % (self.tag_name, constants.TAG_FILTER_OPERATOR_EQ, value)
result = self.pwfilter(self.tag_name, constants.TAG_FILTER_OPERATOR_EQ, value)
self.assertEqual(result.__str__(), expected_result)
def test_valid_filter_between(self):
value = [1, 3]
expected_result = 'T("%s", %s, [%s, %s])' % (self.tag_name, constants.TAG_FILTER_OPERATOR_BETWEEN, value[0], value[1])
result = self.pwfilter(self.tag_name, constants.TAG_FILTER_OPERATOR_BETWEEN, value)
self.assertEqual(result.__str__(), expected_result)
def test_invalid_days(self):
args = [self.tag_name, constants.TAG_FILTER_OPERATOR_BETWEEN, [-1, 3]]
self.assertRaises(PushwooshFilterInvalidOperandException, self.pwfilter, *args)
class TestBooleanTagFilter(unittest.TestCase):
def setUp(self):
self.pwfilter = BooleanTagFilter
self.tag_name = 'testBool'
def invalid_boolean(self, operator, value):
args = [self.tag_name, operator, value]
self.assertRaises(PushwooshFilterInvalidOperandException, self.pwfilter, *args)
def test_valid_filter_in_operator(self):
expected_result = 'T("%s", %s, "true")' % (self.tag_name, constants.TAG_FILTER_OPERATOR_EQ)
result = self.pwfilter(self.tag_name, constants.TAG_FILTER_OPERATOR_EQ, 'true')
self.assertEqual(expected_result, result.__str__())
def test_invalid_boolean(self):
self.invalid_boolean(constants.TAG_FILTER_OPERATOR_EQ, 'invalid value')
self.invalid_boolean(constants.TAG_FILTER_OPERATOR_EQ, 2)
class TestOperatorsFilter(unittest.TestCase):
def test_valid_filter(self):
app_code = '0000-0000'
tag_name = 'test_string'
tag_value = 'test value'
expected_result = '(((A("%s") + T("%s", EQ, "%s")) * A("%s")) \ T("%s", EQ, "%s"))' % \
(app_code, tag_name, tag_value, app_code, tag_name, tag_value)
tag_filter = StringTagFilter(tag_name, constants.TAG_FILTER_OPERATOR_EQ, tag_value)
app_filter = ApplicationFilter(app_code)
union_filter = app_filter.union(tag_filter)
intersect_filter = union_filter.intersect(app_filter)
subtract_filter = intersect_filter.subtract(tag_filter)
self.assertEqual(subtract_filter.__str__(), expected_result)
class TestApplicationTagFilter(unittest.TestCase):
def setUp(self):
self.tag_name = 'testApplicationTag'
self.code = '0000-0000'
def test_valid_filter_int(self):
value = 1
expected_result = 'AT("%s", "%s", %s, %d)' % (self.code, self.tag_name, constants.TAG_FILTER_OPERATOR_EQ, value)
result = IntegerTagFilterByApplication(self.tag_name, constants.TAG_FILTER_OPERATOR_EQ, value, self.code)
self.assertEqual(result.__str__(), expected_result)
def test_valid_filter_string(self):
value = 1
expected_result = 'AT("%s", "%s", %s, %d)' % (self.code, self.tag_name, constants.TAG_FILTER_OPERATOR_EQ, value)
result = StringTagFilterByApplication(self.tag_name, constants.TAG_FILTER_OPERATOR_EQ, value, self.code)
self.assertEqual(result.__str__(), expected_result)
def test_valid_filter_list(self):
expected_result = 'AT("%s", "%s", %s, [1, 2])' % (self.code, self.tag_name, constants.TAG_FILTER_OPERATOR_EQ)
result = DaysTagFilterByApplication(self.tag_name, constants.TAG_FILTER_OPERATOR_EQ, [1, 2], self.code)
self.assertEqual(result.__str__(), expected_result)
def test_valid_filter_days(self):
value = 1
expected_result = 'AT("%s", "%s", %s, %d)' % (self.code, self.tag_name, constants.TAG_FILTER_OPERATOR_EQ, value)
result = DaysTagFilterByApplication(self.tag_name, constants.TAG_FILTER_OPERATOR_EQ, value, self.code)
self.assertEqual(result.__str__(), expected_result)
def test_valid_filter_date(self):
value = '2014-02-02 00:15:10'
expected_result = 'AT("%s", "%s", %s, "%s")' % (self.code, self.tag_name, constants.TAG_FILTER_OPERATOR_EQ, value)
result = DateTagFilterByApplication(self.tag_name, constants.TAG_FILTER_OPERATOR_EQ, value, self.code)
self.assertEqual(result.__str__(), expected_result)
def test_valid_filter_boolean(self):
value = 'False'
expected_result = 'AT("%s", "%s", %s, "%s")' % (self.code, self.tag_name, constants.TAG_FILTER_OPERATOR_EQ, value)
result = BooleanTagFilterByApplication(self.tag_name, constants.TAG_FILTER_OPERATOR_EQ, value, self.code)
self.assertEqual(result.__str__(), expected_result)
|
133145
|
import numpy
from .eval_splines import eval_cubic
## the functions in this file provide backward compatibility calls
##
## they can optionnally allocate memory for the result
## they work for any dimension, except the functions which compute the gradient
#######################
# Compatibility calls #
#######################
from numba import generated_jit
from .codegen import source_to_function
@generated_jit
def get_grid(a, b, n, C):
d = C.ndim
s = "({},)".format(str.join(", ", [f"(a[{k}],b[{k}],n[{k}])" for k in range(d)]))
txt = "def get_grid(a,b,n,C): return {}".format(s)
f = source_to_function(txt)
return f
def eval_cubic_spline(a, b, orders, coefs, point):
"""Evaluates a cubic spline at one point
Parameters:
-----------
a : array of size d (float)
Lower bounds of the cartesian grid.
b : array of size d (float)
Upper bounds of the cartesian grid.
orders : array of size d (int)
Number of nodes along each dimension (=(n1,...,nd) )
coefs : array of dimension d, and size (n1+2, ..., nd+2)
Filtered coefficients.
point : array of size d
Coordinate of the point where the splines must be interpolated.
Returns
-------
value : float
Interpolated value.
"""
grid = get_grid(a, b, orders, coefs)
return eval_cubic(grid, coefs, point)
def vec_eval_cubic_spline(a, b, orders, coefs, points, values=None):
"""Evaluates a cubic spline at many points
Parameters:
-----------
a : array of size d (float)
Lower bounds of the cartesian grid.
b : array of size d (float)
Upper bounds of the cartesian grid.
orders : array of size d (int)
Number of nodes along each dimension. (=(n1,...,nd))
coefs : array of dimension d, and size (n1+2, ..., nd+2)
Filtered coefficients.
points : array of size N x d
List of points where the splines must be interpolated.
values (optional) : array of size (N)
If not None, contains the result.
Returns
-------
values : array of size (N)
Interpolated values. values[i] contains spline evaluated at point points[i,:].
"""
grid = get_grid(a, b, orders, coefs)
if values is None:
return eval_cubic(grid, coefs, points)
else:
eval_cubic(grid, coefs, points, values)
def eval_cubic_splines(a, b, orders, mcoefs, point, values=None):
"""Evaluates multi-splines at one point.
Parameters:
-----------
a : array of size d (float)
Lower bounds of the cartesian grid.
b : array of size d (float)
Upper bounds of the cartesian grid.
orders : array of size d (int)
Number of nodes along each dimension.
mcoefs : array of dimension d+1, and size (p, n1+2, ..., nd+2)
Filtered coefficients. For i in 1:(mcoefs.shape[0]), mcoefs[i,...] contains
the coefficients of spline number i.
point : array of size d
Point where the spline must be interpolated.
values (optional) : array of size (p)
If not None, contains the result.
Returns
-------
values : array of size (p)
Interpolated values. values[j] contains spline n-j evaluated at point `point`.
"""
grid = get_grid(a, b, orders, mcoefs[..., 0])
if values is None:
return eval_cubic(grid, mcoefs, point)
else:
eval_cubic(grid, mcoefs, point, values)
def vec_eval_cubic_splines(a, b, orders, mcoefs, points, values=None):
"""Evaluates multi-splines on a series of points.
Parameters:
-----------
a : array of size d (float)
Lower bounds of the cartesian grid.
b : array of size d (float)
Upper bounds of the cartesian grid.
orders : array of size d (int)
Number of nodes along each dimension. ( =(n1,...nd) )
mcoefs : array of dimension d+1, and size (n1+2, ..., nd+2, p)
Filtered coefficients. coefs[i,...] contains the coefficients of spline number i.
points : array of size N x d
List of points where the splines must be interpolated.
values (optional) : array of size (N x p)
If not None, contains the result.
Returns
-------
values : array of size (N x p)
Interpolated values. values[i,j] contains spline n-j evaluated at point points[i,:].
"""
grid = get_grid(a, b, orders, mcoefs[..., 0])
if values is None:
return eval_cubic(grid, mcoefs, points)
else:
eval_cubic(grid, mcoefs, points, values)
#########
from .eval_cubic_numba import (
vec_eval_cubic_splines_G_1,
vec_eval_cubic_splines_G_2,
vec_eval_cubic_splines_G_3,
vec_eval_cubic_splines_G_4,
)
def vec_eval_cubic_splines_G(a, b, orders, mcoefs, points, values=None, dvalues=None):
a = numpy.array(a, dtype=float)
b = numpy.array(b, dtype=float)
orders = numpy.array(orders, dtype=int)
d = a.shape[0]
N = points.shape[0]
n_sp = mcoefs.shape[-1]
if values is None:
values = numpy.empty((N, n_sp))
if dvalues is None:
dvalues = numpy.empty((N, d, n_sp))
if d == 1:
vec_eval_cubic_splines_G_1(a, b, orders, mcoefs, points, values, dvalues)
elif d == 2:
vec_eval_cubic_splines_G_2(a, b, orders, mcoefs, points, values, dvalues)
elif d == 3:
vec_eval_cubic_splines_G_3(a, b, orders, mcoefs, points, values, dvalues)
elif d == 4:
vec_eval_cubic_splines_G_4(a, b, orders, mcoefs, points, values, dvalues)
return [values, dvalues]
|
133261
|
import itertools
from runtests.mpi import MPITest
import pybnb
from .common import mpi_available
def left_child(i):
return 2 * i + 1
def right_child(i):
return 2 * i + 2
def log2floor(n):
assert n > 0
return n.bit_length() - 1
def height(size):
return log2floor(size)
def set_none(heap, i):
if i < len(heap):
heap[i] = None
set_none(heap, left_child(i))
set_none(heap, right_child(i))
def set_one(heap, i):
if i < len(heap):
if heap[i] is not None:
heap[i] = 1
set_one(heap, left_child(i))
set_one(heap, right_child(i))
def is_terminal(heap, i):
N = len(heap)
c1 = left_child(i)
c2 = right_child(i)
return ((c1 >= N) or (heap[c1] is None)) and ((c2 >= N) or (heap[c2] is None))
def get_bound(heap, how=min):
N = len(heap)
assert N >= 1
assert heap[0] is not None
terminal_bounds = []
for i in range(N):
if (heap[i] is not None) and is_terminal(heap, i):
terminal_bounds.append(heap[i])
return how(terminal_bounds)
def powerset(iterable):
s = list(iterable)
return itertools.chain.from_iterable(
itertools.combinations(s, r) for r in range(len(s) + 1)
)
def gen_heaps(k):
for heap_size in range(1, 2 ** (k + 1)):
h = height(heap_size)
for level_none in range(h, h + 1):
level_nodes = list(
filter(
lambda x: x < heap_size,
range((2 ** level_none) - 1, (2 ** (level_none + 1)) - 1),
)
)
for none_list in sorted(powerset(level_nodes)):
if len(none_list) == len(level_nodes):
continue
heap_master = [0] * heap_size
for i in none_list:
set_none(heap_master, i)
for level in range(0, h + 1):
nodes = filter(
lambda x: (x < heap_size) and (heap_master[x] is not None),
range((2 ** level) - 1, (2 ** (level + 1)) - 1),
)
for nodes_list in sorted(powerset(nodes)):
if (len(nodes_list) == 0) and level != 0:
continue
heap = [0] * heap_size
for i in none_list:
set_none(heap, i)
for i in nodes_list:
set_one(heap, i)
yield heap
class Discrete(pybnb.Problem):
def __init__(self, sense, objectives, bound_bheap, default_objective):
assert len(bound_bheap) >= 1
self._sense = sense
self._objectives = objectives
self._bound_bheap = bound_bheap
self._default_objective = default_objective
self._heap_idx = 0
#
# Implement Problem abstract methods
#
def sense(self):
return self._sense
def objective(self):
return self._objectives.get(self._heap_idx, self._default_objective)
def bound(self):
return self._bound_bheap[self._heap_idx]
def save_state(self, node):
node.state = self._heap_idx
def load_state(self, node):
self._heap_idx = node.state
def branch(self):
i = self._heap_idx
assert i >= 0
assert i < len(self._bound_bheap)
left_idx = 2 * i + 1
if (left_idx < len(self._bound_bheap)) and (
self._bound_bheap[left_idx] is not None
):
child = pybnb.Node()
child.state = left_idx
yield child
right_idx = 2 * i + 2
if (right_idx < len(self._bound_bheap)) and (
self._bound_bheap[right_idx] is not None
):
child = pybnb.Node()
child.state = right_idx
yield child
class SmallHeap(pybnb.Problem):
def __init__(self):
self._heap_index = 0
self._max_heap_index = 4
def sense(self):
return pybnb.minimize
def objective(self):
if self._heap_index == 0:
return 3
elif self._heap_index in (1, 2):
return 2
elif self._heap_index == 3:
return 0
else:
assert self._heap_index == 4
return 1
def bound(self):
if self._heap_index == 0:
return -3
elif self._heap_index in (1, 2):
return -2
else:
assert self._heap_index in (3, 4)
return -1
def save_state(self, node):
node.state = self._heap_index
def load_state(self, node):
self._heap_index = node.state
def branch(self):
i = self._heap_index
assert 0 <= i <= self._max_heap_index
left_index = 2 * i + 1
if left_index <= self._max_heap_index:
child = pybnb.Node()
child.state = left_index
yield child
right_index = 2 * i + 2
if right_index <= self._max_heap_index:
child = pybnb.Node()
child.state = right_index
yield child
def _test_heaps(comm):
solver = pybnb.Solver(comm=comm)
if comm is not None:
if comm.rank == 0:
pass
elif comm.rank == 1:
pass
elif comm.rank == 3:
pass
problem = SmallHeap()
results = solver.solve(problem, queue_strategy="breadth")
assert results.solution_status == "feasible"
assert results.termination_condition == "queue_empty"
assert results.objective == 0
assert results.bound == -2
assert results.best_node.objective == 0
assert results.best_node.bound == -1
assert results.best_node.state == 3
_uuid = results.best_node._uuid
queue = solver.save_dispatcher_queue()
if solver.is_dispatcher:
assert queue.bound() == -2
assert queue.worst_terminal_bound == -2
assert len(queue.nodes) == 0
results = solver.solve(problem, initialize_queue=queue, best_node=results.best_node)
assert results.solution_status == "feasible"
assert results.termination_condition == "queue_empty"
assert results.objective == 0
assert results.bound == -2
assert results.best_node.objective == 0
assert results.best_node.bound == -1
assert results.best_node.state == 3
assert results.best_node._uuid == _uuid
queue = solver.save_dispatcher_queue()
if solver.is_dispatcher:
assert queue.bound() == -2
assert queue.worst_terminal_bound == -2
assert len(queue.nodes) == 0
for heap in gen_heaps(2):
heap_bound = get_bound(heap)
node_list = [None, len(heap)] + [
i for i in range(len(heap)) if heap[i] is not None
]
# min
for objective_node in node_list:
if objective_node is not None:
problem = Discrete(
pybnb.minimize, {objective_node: 1}, heap, default_objective=2
)
else:
problem = Discrete(pybnb.minimize, {}, heap, default_objective=1)
results = solver.solve(problem, log=None)
if objective_node == len(heap):
assert results.objective == 2
else:
assert results.objective == 1
assert results.bound == heap_bound
# max
heap_bound = -heap_bound
heap = [-b_ if (b_ is not None) else None for b_ in heap]
for objective_node in node_list:
if objective_node is not None:
problem = Discrete(
pybnb.maximize, {objective_node: -1}, heap, default_objective=-2
)
else:
problem = Discrete(pybnb.maximize, {}, heap, default_objective=-1)
results = solver.solve(problem, log=None)
if objective_node == len(heap):
assert results.objective == -2
else:
assert results.objective == -1
assert results.bound == heap_bound
def test_heaps_nocomm():
_test_heaps(None)
if mpi_available:
@MPITest(commsize=[1, 2, 4])
def test_heaps_comm(comm):
_test_heaps(comm)
|
133310
|
import FWCore.ParameterSet.Config as cms
tccFlatToDigi = cms.EDProducer("EcalFEtoDigi",
FileEventOffset = cms.untracked.int32(0),
UseIdentityLUT = cms.untracked.bool(False),
SuperModuleId = cms.untracked.int32(-1),
debugPrintFlag = cms.untracked.bool(False),
FlatBaseName = cms.untracked.string('ecal_tcc_')
)
|
133366
|
from django.conf.urls import url
from apps.myadmin.views import login, user, team, role, teamUserRelation, userRole, adminUser, businessLine, \
interfaceModule, interfacePermission, moduleManage, source, changeLog, businessLineModule, configService, \
jiraModule, modulePlatform, jiraBusinessLine, jiraBusinessLinePlatform, configUri, configHttp, httpInterfaceDebug, \
httpTestcaseDebug, exePython, standardTask, openApiBusinessLine, openApiUri, unitTestService, uiMobileServer, \
versionManage, userLog, adminManagePermission, standardEnv, cacheManage, webportalBusinessLine, dataStorage,adminServiceConf
'''url规则:myadmin/***/check, 中间件会过滤中间的module'''
urlpatterns = [
url(r'^myadmin/$', login.loginPage, name="admin_login"),
url(r'^myadmin/login$', login.loginPage,name="admin_login"),
url(r'^myadmin/doLogin$', login.doLogin,name="admin_doLogin"),
url(r'^myadmin/changePassword$', login.changePassword, name="admin_changePassword"),
url(r'^myadmin/logout$', login.logout, name="admin_logout"),
url(r'^myadmin/home$', login.home, name="admin_home"),
#user
url(r'^myadmin/user/check$', user.userCheckPage, name="admin_user_check"),
url(r'^myadmin/user/getUserSubPage$', user.getUser, name="admin_user_get_user_sub_page"),
url(r'^myadmin/user/addUser$', user.addUser, name="admin_add_user"),
url(r'^myadmin/user/getUserForId$', user.getUserForId, name="getUserForId"),
url(r'^myadmin/user/editUser$', user.editUser, name="admin_edit_user"),
url(r'^myadmin/user/delUser$', user.delUser, name="admin_del_user"),
url(r'^myadmin/user/addPermissionsToUser$', user.addPermissionsToUser, name="admin_add_permissions_to_user"),
url(r'^myadmin/user/addPermissionsToAllUsers$', user.addPermissionsToAllUsers, name="admin_add_permissions_to_all_users"),
url(r'^myadmin/interfacePermission/getUserPermissionKeys$', user.getUserPermission),
#team
url(r'^myadmin/team/check$', team.teamCheckPage, name="admin_team_check"),
url(r'^myadmin/team/getTeamSubPage$', team.getTeam, name="admin_team_get_team_sub_page"),
url(r'^myadmin/team/addTeam$', team.addTeam, name="admin_add_Team"),
url(r'^myadmin/team/getTeamForId$', team.getTeamForId, name="getTeamForId"),
url(r'^myadmin/team/editTeam$', team.editTeam, name="admin_edit_team"),
url(r'^myadmin/team/delTeam$', team.delTeam, name="admin_del_team"),
url(r'^myadmin/team/resetTeam$', team.resetTeam, name="admin_reset_team"),
url(r'^myadmin/team/getAllUsers$', team.getAllUsers, name="admin_get_all_users"),
url(r'^myadmin/team/addUsersToTeam$', team.addUsersToTeam, name="admin_add_users_to_team"),
url(r'^myadmin/team/getAllSelectedUsers$', team.getAllSelectedUsers, name="admin_get_all_selected_users"),
url(r'^myadmin/team/deleteSelectedUsers$', team.deleteSelectedUsers, name="admin_delete_selected_users"),
url(r'^myadmin/team/addPermissionsToTeam$', team.addPermissionsToTeam, name="admin_add_permissions_to_team"),
url(r'^myadmin/team/getTeammates$', team.getTeammates, name="admin_get_teammates"),
url(r'^myadmin/team/tansferData$', team.tansferData, name="admin_tansferData"),
url(r'^myadmin/interfacePermission/getTeamPermissionKeys$', team.getTeamPermission),
url(r'^myadmin/interfacePermission/reload$', team.permissionReload),
#role
url(r'^myadmin/role/check$', role.roleCheckPage, name="admin_role_check"),
url(r'^myadmin/role/getRoleSubPage$', role.getRole, name="admin_role_get_role_sub_page"),
url(r'^myadmin/role/addRole$', role.addRole, name="admin_add_Role"),
url(r'^myadmin/role/getRoleForId$', role.getRoleForId, name="getRoleForId"),
url(r'^myadmin/role/editRole$', role.editRole, name="admin_edit_role"),
url(r'^myadmin/role/delRole$', role.delRole, name="admin_del_role"),
url(r'^myadmin/role/resetRole$', role.resetRole, name="admin_reset_role"),
url(r'^myadmin/role/addUsersToRole$', role.addUsersToRole, name="admin_add_user_to_role"),
#adminManagePermission
url(r'^myadmin/adminManagePermission/check$', adminManagePermission.permissionCheckPage, name="admin_adminManagePermission_check"),
url(r'^myadmin/adminManagePermission/getPermissionSubPage$', adminManagePermission.getPermission, name="admin_permission_get_permission_sub_page"),
url(r'^myadmin/adminManagePermission/addPermission$', adminManagePermission.addPermission, name="admin_add_Permission"),
url(r'^myadmin/adminManagePermission/getPermissionForId$', adminManagePermission.getPermissionForId, name="getPermissionForId"),
url(r'^myadmin/adminManagePermission/editPermission$', adminManagePermission.editPermission, name="admin_edit_permission"),
url(r'^myadmin/adminManagePermission/delPermission$', adminManagePermission.delPermission, name="admin_del_permission"),
url(r'^myadmin/adminManagePermission/resetPermission$', adminManagePermission.resetPermission, name="admin_reset_permission"),
url(r'^myadmin/adminManagePermission/getAllPermissions$', adminManagePermission.getAllPermissions, name="admin_get_all_permissions"),
url(r'^myadmin/adminManagePermission/getAllSelectedPermissions$', adminManagePermission.getAllSelectedPermissions, name="admin_get_all_selected_permissions"),
url(r'^myadmin/adminManagePermission/getAllSelectedTeamPermissions$', adminManagePermission.getAllSelectedTeamPermissions, name="admin_get_all_selected_team_permissions"),
url(r'^myadmin/adminManagePermission/getAllUsersSelectedPermissions$', adminManagePermission.getAllUsersSelectedPermissions, name="admin_get_all_users_selected_team_permissions"),
#teamUser
url(r'^myadmin/team/getTeammateSubPage$', teamUserRelation.getAllTeammates, name="admin_team_get_teammate_sub_page"),
#userRole
url(r'^myadmin/userRole/userRoleCheckPage$', userRole.userRoleCheckPage, name="admin_user_role_check_page"),
url(r'^myadmin/userRole/getUserRoleSubPage$', userRole.getUserRole, name="admin_team_get_team_sub_page"),
url(r'^myadmin/userRole/setTeamLeader$', userRole.setTeamLeader, name="admin_set_team_leader"),
url(r'^myadmin/userRole/delTeamLeader$', userRole.delTeamLeader, name="admin_del_team_leader"),
#adminUser
url(r'^myadmin/admin/check$', adminUser.adminUserCheckPage, name="admin_admin_user_check"),
url(r'^myadmin/admin/getAdminUserSubPage$', adminUser.getAdminUser, name="admin_get_admin_user_sub_page"),
url(r'^myadmin/admin/addAdminUser$', adminUser.addAdminUser, name="admin_add_adminUser"),
url(r'^myadmin/admin/getAdminUserForId$', adminUser.getAdminUserForId, name="admin_get_adminUser_for_id"),
url(r'^myadmin/admin/editAdminUser$', adminUser.editAdminUser, name="admin_edit_adminUser"),
url(r'^myadmin/admin/delAdminUser$', adminUser.delAdminUser, name="admin_del_adminUser"),
url(r'^myadmin/admin/resetAdminUser$', adminUser.resetAdminUser, name="admin_reset_adminUser"),
url(r'^myadmin/admin/addPermissionsToUser$', adminUser.addPermissionsToUser, name="admin_add_permissions_to_user"),
#businessLine
url(r'^myadmin/businessLine/check$', businessLine.businessLineCheckPage, name="admin_business_line_check"),
url(r'^myadmin/businessLine/getBusinessLineSubPage$', businessLine.getBusinessLine, name="admin_get_business_line_sub_page"),
url(r'^myadmin/businessLine/addBusinessLine$', businessLine.addBusinessLine, name="admin_add_business_line"),
url(r'^myadmin/businessLine/getBusinessLineForId$', businessLine.getBusinessLineForId, name="admin_get_businessLine_for_id"),
url(r'^myadmin/businessLine/editBusinessLine$', businessLine.editBusinessLine, name="admin_edit_businessLine"),
url(r'^myadmin/businessLine/delBusinessLine$', businessLine.delBusinessLine, name="admin_del_businessLine"),
url(r'^myadmin/businessLine/resetBusinessLine$', businessLine.resetBusinessLine, name="admin_reset_businessLine"),
# webportalBusinessLine
url(r'^myadmin/webportalBusinessLine/check$', webportalBusinessLine.businessLineCheckPage, name="admin_webportalBusinessLine_line_check"),
url(r'^myadmin/webportalBusinessLine/getBusinessLineSubPage$', webportalBusinessLine.getBusinessLine, name="admin_get_webportalBusinessLine_line_sub_page"),
url(r'^myadmin/webportalBusinessLine/addBusinessLine$', webportalBusinessLine.addBusinessLine, name="admin_add_webportalBusinessLine_line"),
url(r'^myadmin/webportalBusinessLine/getBusinessLineForId$', webportalBusinessLine.getBusinessLineForId, name="admin_get_webportalBusinessLine_for_id"),
url(r'^myadmin/webportalBusinessLine/editBusinessLine$', webportalBusinessLine.editBusinessLine, name="admin_edit_webportalBusinessLine"),
url(r'^myadmin/webportalBusinessLine/delBusinessLine$', webportalBusinessLine.delBusinessLine, name="admin_del_webportalBusinessLine"),
url(r'^myadmin/webportalBusinessLine/resetBusinessLine$', webportalBusinessLine.resetBusinessLine, name="admin_reset_webportalBusinessLine"),
url(r'^myadmin/webportalBusinessLine/getAllBusinessLines$', webportalBusinessLine.getAllBusinessLines, name="admin_get_allBusinessLines"),
#interfaceModule
# url(r'^myadmin/interfaceModule/check$', interfaceModule.interfaceModuleCheckPage, name="admin_interface_module_check"),
# url(r'^myadmin/interfaceModule/getInterfaceModuleSubPage$', interfaceModule.getInterfaceModule, name="admin_get_interface_module_sub_page"),
# url(r'^myadmin/interfaceModule/addInterfaceModule$', interfaceModule.addInterfaceModule, name="admin_add_interface_module"),
# url(r'^myadmin/interfaceModule/getInterfaceModuleForId$', interfaceModule.getInterfaceModuleForId, name="admin_get_interface_module_for_id"),
# url(r'^myadmin/interfaceModule/editInterfaceModule$', interfaceModule.editInterfaceModule, name="admin_edit_interfaceModule"),
# url(r'^myadmin/interfaceModule/delInterfaceModule$', interfaceModule.delInterfaceModule, name="admin_del_interfaceModule"),
# url(r'^myadmin/interfaceModule/resetInterfaceModule$', interfaceModule.resetInterfaceModule, name="admin_reset_interfaceModule"),
#interfacePermission
url(r'^myadmin/interfacePermission/check$', interfacePermission.interfacePermissionCheckPage, name="admin_interface_permission_check"),
url(r'^myadmin/interfacePermission/getInterfacePermissionSubPage$', interfacePermission.getInterfacePermission, name="admin_get_interface_permission_sub_page"),
url(r'^myadmin/interfacePermission/addInterfacePermission$', interfacePermission.addInterfacePermission, name="admin_add_interface_permission"),
url(r'^myadmin/interfacePermission/getInterfacePermissionForId$', interfacePermission.getInterfacePermissionForId, name="admin_get_interface_permission_for_id"),
url(r'^myadmin/interfacePermission/editInterfacePermission$', interfacePermission.editInterfacePermission, name="admin_edit_interfacePermission"),
url(r'^myadmin/interfacePermission/delInterfacePermission$', interfacePermission.delInterfacePermission, name="admin_del_interfacePermission"),
url(r'^myadmin/interfacePermission/resetInterfacePermission$', interfacePermission.resetInterfacePermission, name="admin_del_interfacePermission"),
# url(r'^myadmin/interfacePermission/getAllInterface$', interfacePermission.getAllInterface, name="admin_get_allInterface"),
url(r'^myadmin/interfacePermission/getAllPermissionKeys$', interfacePermission.getAllPermissionKeys, name="admin_get_allPermissionKeys"),
#module
url(r'^myadmin/moduleManage/check$', moduleManage.moduleManageCheckPage, name="admin_module_manage_check"),
url(r'^myadmin/moduleManage/getModuleManageSubPage$', moduleManage.getModuleManage, name="admin_get_module_manage_sub_page"),
url(r'^myadmin/moduleManage/addModuleManage$', moduleManage.addModuleManage, name="admin_add_module_manage"),
url(r'^myadmin/moduleManage/getModuleManageForId$', moduleManage.getModuleManageForId, name="admin_get_module_manage_for_id"),
url(r'^myadmin/moduleManage/editModuleManage$', moduleManage.editModuleManage, name="admin_edit_moduleManage"),
url(r'^myadmin/moduleManage/delModuleManage$', moduleManage.delModuleManage, name="admin_del_moduleManage"),
url(r'^myadmin/moduleManage/resetModuleManage$', moduleManage.resetModuleManage, name="admin_del_moduleManage"),
# source
url(r'^myadmin/source/check$', source.sourceCheckPage, name="admin_source_check"),
url(r'^myadmin/source/getSourceSubPage$', source.getSource, name="admin_get_source_sub_page"),
url(r'^myadmin/source/addSource$', source.addSource, name="admin_add_source"),
url(r'^myadmin/source/getSourceForId$', source.getSourceForId, name="admin_get_source_for_id"),
url(r'^myadmin/source/editSource$', source.editSource, name="admin_edit_source"),
url(r'^myadmin/source/delSource$', source.delSource, name="admin_del_source"),
url(r'^myadmin/source/resetSource$', source.resetSource, name="admin_reset_source"),
#changeLog
url(r'^myadmin/changeLog/check$', changeLog.changeLogCheckPage, name="admin_changeLog_check"),
url(r'^myadmin/changeLog/getChangeLogSubPage$', changeLog.getChangeLog, name="admin_get_changeLog_sub_page"),
url(r'^myadmin/changeLog/getChangeLogDataForId$', changeLog.getChangeLogDataForId, name="admin_get_changeLogData_for_id"),
#businessLineModule
url(r'^myadmin/businessLineModule/check$', businessLineModule.businessLineModuleCheckPage, name="admin_businessLine_module_check"),
url(r'^myadmin/businessLineModule/getBusinessLineModule$', businessLineModule.getBusinessLineModule, name="admin_get_businessLine_module"),
url(r'^myadmin/businessLineModule/addBusinessLineModule$', businessLineModule.addBusinessLineModule, name="admin_add_businessLine_module"),
url(r'^myadmin/businessLineModule/getAllBusinessLines$', businessLineModule.getAllBusinessLines, name="admin_get_all_businessLine"),
url(r'^myadmin/businessLineModule/getAllModuleNames$', businessLineModule.getAllModuleNames, name="admin_get_all_moduleNames"),
url(r'^myadmin/businessLineModule/getBusinessLineModuleForId$', businessLineModule.getBusinessLineModuleForId, name="admin_get_businessLineModule_for_id"),
url(r'^myadmin/businessLineModule/delBusinessLineModule$', businessLineModule.delBusinessLineModule, name="admin_del_businessLineModule"),
url(r'^myadmin/businessLineModule/editBusinessLineModule$', businessLineModule.editBusinessLineModule, name="admin_edit_businessLineModule"),
url(r'^myadmin/businessLineModule/getBusinessLineId$', businessLineModule.getBusinessLineId, name="admin_get_businessLineId"),
url(r'^myadmin/businessLineModule/getModuleId$', businessLineModule.getModuleId, name="admin_get_moduleId"),
#configService
url(r'^myadmin/configService/check$', configService.configServiceCheckPage, name="admin_configService_check"),
url(r'^myadmin/configService/getConfigServiceSubPage$', configService.getConfigService, name="admin_get_configService_sub_page"),
url(r'^myadmin/configService/addConfigService$', configService.addConfigService, name="admin_add_configService"),
url(r'^myadmin/configService/getConfigServiceForId$', configService.getConfigServiceForId, name="admin_get_configService_for_id"),
url(r'^myadmin/configService/editConfigService$', configService.editConfigService, name="admin_get_configService_for_id"),
url(r'^myadmin/configService/delConfigService$', configService.delConfigService, name="admin_del_configService"),
url(r'^myadmin/configService/resetConfigService$', configService.resetConfigService, name="admin_reset_configService"),
# configUri
url(r'^myadmin/configUri/check$', configUri.configUriCheckPage, name="admin_configURI_check"),
url(r'^myadmin/configUri/getConfigUriSubPage$', configUri.getConfigUri, name="admin_get_configUri_sub_page"),
url(r'^myadmin/configUri/addConfigUri$', configUri.addConfigUri, name="admin_add_configUri"),
url(r'^myadmin/configUri/getConfigUriForId$', configUri.getConfigUriForId, name="admin_get_configUri_for_id"),
url(r'^myadmin/configUri/editConfigUri$', configUri.editConfigUri, name="admin_edit_configUri"),
url(r'^myadmin/configUri/delConfigUri$', configUri.delConfigUri, name="admin_del_configUri"),
url(r'^myadmin/configUri/resetConfigUri$', configUri.resetConfigUri, name="admin_reset_configUri"),
#jiraModule
url(r'^myadmin/jiraModule/check$', jiraModule.jiraModuleCheckPage, name="admin_jiraModule_check"),
url(r'^myadmin/jiraModule/getJiraModuleSubPage$', jiraModule.getJiraModule, name="admin_get_jiraModule_sub_page"),
url(r'^myadmin/jiraModule/addJiraModule$', jiraModule.addJiraModule, name="admin_add_jiraModule"),
url(r'^myadmin/jiraModule/getJiraModuleForId$', jiraModule.getJiraModuleForId, name="admin_get_jiraModule_for_id"),
url(r'^myadmin/jiraModule/editJiraModule$', jiraModule.editJiraModule, name="admin_get_jiraModule_for_id"),
url(r'^myadmin/jiraModule/delJiraModule$', jiraModule.delJiraModule, name="admin_del_jiraModule"),
url(r'^myadmin/jiraModule/resetJiraModule$', jiraModule.resetJiraModule, name="admin_reset_jiraModule"),
#modulePlatform
url(r'^myadmin/modulePlatform/check$', modulePlatform.modulePlatformCheckPage, name="admin_modulePlatform_check"),
url(r'^myadmin/modulePlatform/getModulePlatform$', modulePlatform.getModulePlatform, name="admin_get_modulePlatform_sub_page"),
url(r'^myadmin/modulePlatform/getAllJiraModules$', modulePlatform.getAllJiraModules, name="admin_get_all_jiraModules"),
url(r'^myadmin/modulePlatform/addModulePlatform$', modulePlatform.addModulePlatform, name="admin_add_modulePlatform"),
url(r'^myadmin/modulePlatform/getModulePlatformForId$', modulePlatform.getModulePlatformForId, name="admin_get_modulePlatform_for_id"),
url(r'^myadmin/modulePlatform/editModulePlatform$', modulePlatform.editModulePlatform, name="admin_edit_modulePlatform"),
url(r'^myadmin/modulePlatform/deleteModulePlatform$', modulePlatform.deleteModulePlatform, name="admin_del_modulePlatform"),
url(r'^myadmin/modulePlatform/getJiraModuleId$', modulePlatform.getJiraModuleId, name="admin_get_jiraModuleId"),
url(r'^myadmin/modulePlatform/getModuleId$', modulePlatform.getModuleId, name="admin_get_moduleId"),
#jiraBusinessLine
url(r'^myadmin/jiraBusinessLine/check$', jiraBusinessLine.jiraBusinessLineCheckPage, name="admin_jiraBusinessLine_check"),
url(r'^myadmin/jiraBusinessLine/getJiraBusinessLineSubPage$', jiraBusinessLine.getJiraBusinessLine, name="admin_get_jiraBusinessLine_sub_page"),
url(r'^myadmin/jiraBusinessLine/addJiraBusinessLine$', jiraBusinessLine.addJiraBusinessLine, name="admin_add_jiraBusinessLine"),
url(r'^myadmin/jiraBusinessLine/getJiraBusinessLineForId$', jiraBusinessLine.getJiraBusinessLineForId, name="admin_get_jiraBusinessLine_for_id"),
url(r'^myadmin/jiraBusinessLine/editJiraBusinessLine$', jiraBusinessLine.editJiraBusinessLine, name="admin_edit_jiraBusinessLine_for_id"),
url(r'^myadmin/jiraBusinessLine/delJiraBusinessLine$', jiraBusinessLine.delJiraBusinessLine, name="admin_del_jiraBusinessLine"),
url(r'^myadmin/jiraBusinessLine/resetJiraBusinessLine$', jiraBusinessLine.resetJiraBusinessLine, name="admin_reset_jiraBusinessLine"),
# jiraBusinessLinePlatform
url(r'^myadmin/jiraBusinessLinePlatform/check$', jiraBusinessLinePlatform.jiraBusinessLinePlatformCheckPage, name="admin_jiraBusinessLinePlatform_check"),
url(r'^myadmin/jiraBusinessLinePlatform/getJiraBusinessLinePlatform$', jiraBusinessLinePlatform.getJiraBusinessLinePlatform, name="admin_get_jiraBusinessLinePlatform_sub_page"),
url(r'^myadmin/jiraBusinessLinePlatform/getAllPlatformBusinessLines$', jiraBusinessLinePlatform.getAllPlatformBusinessLines, name="admin_get_all_platformBusinessLines"),
url(r'^myadmin/jiraBusinessLinePlatform/getAllJiraBusinessLines$',jiraBusinessLinePlatform.getAllJiraBusinessLines, name="admin_get_all_jiraBusinessLines"),
url(r'^myadmin/jiraBusinessLinePlatform/addJiraBusinessLinePlatform$', jiraBusinessLinePlatform.addJiraBusinessLinePlatform, name="admin_add_jiraBusinessLinePlatform"),
url(r'^myadmin/jiraBusinessLinePlatform/getJiraBusinessLinePlatformForId$', jiraBusinessLinePlatform.getJiraBusinessLinePlatformForId, name="admin_get_jiraBusinessLinePlatform_for_id"),
url(r'^myadmin/jiraBusinessLinePlatform/editJiraBusinessLinePlatform$', jiraBusinessLinePlatform.editJiraBusinessLinePlatform, name="admin_edit_jiraBusinessLinePlatform"),
url(r'^myadmin/jiraBusinessLinePlatform/deleteJiraBusinessLinePlatform$', jiraBusinessLinePlatform.deleteJiraBusinessLinePlatform, name="admin_del_jiraBusinessLinePlatform"),
url(r'^myadmin/jiraBusinessLinePlatform/getJiraBusinessLineId$', jiraBusinessLinePlatform.getJiraBusinessLineId, name="admin_get_jiraBusinessLineId"),
#configHttp
url(r'^myadmin/configHttp/check$', configHttp.configHttpCheckPage, name="admin_configHttp_check"),
url(r'^myadmin/configHttp/getConfigHttpSubPage$', configHttp.getConfigHttp, name="admin_get_configHttp_sub_page"),
url(r'^myadmin/configHttp/getAllServiceConfKeys$', configHttp.getAllServiceConfKeys, name="admin_get_all_serviceConfKeys"),
url(r'^myadmin/configHttp/addConfigHttp$', configHttp.addConfigHttp, name="admin_add_configHttp"),
url(r'^myadmin/configHttp/getConfigHttpForId$', configHttp.getConfigHttpForId, name="admin_get_configHttp_for_id"),
url(r'^myadmin/configHttp/editConfigHttp$', configHttp.editConfigHttp, name="admin_edit_configHttp"),
url(r'^myadmin/configHttp/delConfigHttp$', configHttp.delConfigHttp, name="admin_del_configHttp"),
url(r'^myadmin/configHttp/resetConfigHttp$', configHttp.resetConfigHttp, name="admin_reset_configHttp"),
# httpInterfaceDebug
url(r'^myadmin/httpInterfaceDebug/check$', httpInterfaceDebug.httpInterfaceDebugCheckPage, name="admin_httpInterfaceDebug_check"),
url(r'^myadmin/httpInterfaceDebug/getHttpInterfaceDebugSubPage$', httpInterfaceDebug.getHttpInterfaceDebug, name="admin_get_httpInterfaceDebug_sub_page"),
url(r'^myadmin/httpInterfaceDebug/getAllBusinessLines$', httpInterfaceDebug.getAllBusinessLines, name="admin_get_all_businessLines"),
url(r'^myadmin/httpInterfaceDebug/getAllModuleNames$', httpInterfaceDebug.getAllModuleNames, name="admin_get_all_moduleNames"),
url(r'^myadmin/httpInterfaceDebug/getAllSourceNames$', httpInterfaceDebug.getAllSourceNames, name="admin_get_all_sourceNames"),
url(r'^myadmin/httpInterfaceDebug/getAllHttpConfKeys$', httpInterfaceDebug.getAllHttpConfKeys, name="admin_get_all_httpConfKeys"),
url(r'^myadmin/httpInterfaceDebug/getAllUsers$', httpInterfaceDebug.getAllUsers, name="admin_get_all_users"),
url(r'^myadmin/httpInterfaceDebug/addHttpInterfaceDebug$', httpInterfaceDebug.addHttpInterfaceDebug, name="admin_add_httpInterfaceDebug"),
url(r'^myadmin/httpInterfaceDebug/getHttpInterfaceDebugForId$', httpInterfaceDebug.getHttpInterfaceDebugForId, name="admin_get_httpInterfaceDebug_for_id"),
url(r'^myadmin/httpInterfaceDebug/editHttpInterfaceDebug$', httpInterfaceDebug.editHttpInterfaceDebug, name="admin_edit_httpInterfaceDebug"),
url(r'^myadmin/httpInterfaceDebug/delHttpInterfaceDebug$', httpInterfaceDebug.delHttpInterfaceDebug, name="admin_del_httpInterfaceDebug"),
url(r'^myadmin/httpInterfaceDebug/resetHttpInterfaceDebug$', httpInterfaceDebug.resetHttpInterfaceDebug, name="admin_reset_httpInterfaceDebug"),
# httpTestcaseDebug
url(r'^myadmin/httpTestcaseDebug/check$', httpTestcaseDebug.httpTestcaseDebugCheckPage, name="admin_httpTestcaseDebug_check"),
url(r'^myadmin/httpTestcaseDebug/getHttpTestcaseDebugSubPage$', httpTestcaseDebug.getHttpTestcaseDebug, name="admin_get_httpTestcaseDebug_sub_page"),
url(r'^myadmin/httpTestcaseDebug/getAllBusinessLines$', httpTestcaseDebug.getAllBusinessLines, name="admin_get_all_businessLines"),
url(r'^myadmin/httpTestcaseDebug/getAllModuleNames$', httpTestcaseDebug.getAllModuleNames, name="admin_get_all_moduleNames"),
url(r'^myadmin/httpTestcaseDebug/getAllSourceNames$', httpTestcaseDebug.getAllSourceNames, name="admin_get_all_sourceNames"),
url(r'^myadmin/httpTestcaseDebug/getAllHttpConfKeys$', httpTestcaseDebug.getAllHttpConfKeys, name="admin_get_all_httpConfKeys"),
url(r'^myadmin/httpTestcaseDebug/getAllUsers$', httpTestcaseDebug.getAllUsers, name="admin_get_all_users"),
url(r'^myadmin/httpTestcaseDebug/addHttpTestcaseDebug$', httpTestcaseDebug.addHttpTestcaseDebug, name="admin_add_httpTestcaseDebug"),
url(r'^myadmin/httpTestcaseDebug/getHttpTestcaseDebugForId$', httpTestcaseDebug.getHttpTestcaseDebugForId, name="admin_get_httpTestcaseDebug_for_id"),
url(r'^myadmin/httpTestcaseDebug/editHttpTestcaseDebug$', httpTestcaseDebug.editHttpTestcaseDebug, name="admin_edit_httptestcaseDebug"),
url(r'^myadmin/httpTestcaseDebug/delHttpTestcaseDebug$', httpTestcaseDebug.delHttpTestcaseDebug, name="admin_del_httptestcaseDebug"),
url(r'^myadmin/httpTestcaseDebug/resetHttpTestcaseDebug$', httpTestcaseDebug.resetHttpTestcaseDebug, name="admin_reset_httptestcaseDebug"),
# exePython
url(r'^myadmin/exePython/check$', exePython.exePythonCheckPage, name="admin_exePython_check"),
url(r'^myadmin/exePython/getExePythonSubPage$', exePython.getExePython,name="admin_get_exePython_sub_page"),
url(r'^myadmin/exePython/addExePython$', exePython.addExePython, name="admin_add_exePython"),
url(r'^myadmin/exePython/getExePythonForId$', exePython.getExePythonForId, name="admin_get_exePython_for_id"),
url(r'^myadmin/exePython/editExePython$', exePython.editExePython, name="admin_edit_exePython"),
url(r'^myadmin/exePython/delExePython$', exePython.delExePython, name="admin_del_exePythone"),
url(r'^myadmin/exePython/delRedisKey$', exePython.delRedisKey, name="admin_del_redisKey"),
url(r'^myadmin/exePython/resetExePython$', exePython.resetExePython, name="admin_reset_exePython"),
# standardTask
url(r'^myadmin/standardTask/check$', standardTask.standardTaskCheckPage, name="admin_standardTask_check"),
url(r'^myadmin/standardTask/getStandardTaskSubPage$', standardTask.getStandardTask, name="admin_get_standardTask_sub_page"),
url(r'^myadmin/standardTask/addStandardTask$', standardTask.addStandardTask, name="admin_add_standardTaskn"),
url(r'^myadmin/standardTask/getStandardTaskForId$', standardTask.getStandardTaskForId, name="admin_get_standardTask_for_id"),
url(r'^myadmin/standardTask/editStandardTask$', standardTask.editStandardTask, name="admin_edit_standardTask"),
url(r'^myadmin/standardTask/delStandardTask$', standardTask.delStandardTask, name="admin_del_standardTask"),
url(r'^myadmin/standardTask/resetStandardTask$', standardTask.resetStandardTask, name="admin_reset_standardTask"),
url(r'^myadmin/standardTask/getAllVersions$', standardTask.getAllVersions, name="admin_get_all_versions"),
url(r'^myadmin/standardTask/copyTaskToOtherVersion$', standardTask.copyTaskToOtherVersion, name="admin_copy_task"),
# standardTask
url(r'^myadmin/openApiBusinessLine/check$', openApiBusinessLine.openApiBusinessLineCheckPage, name="admin_openApiBusinessLine_check"),
url(r'^myadmin/openApiBusinessLine/getOpenApiBusinessLineSubPage$', openApiBusinessLine.getOpenApiBusinessLine, name="admin_get_openApiBusinessLine_sub_page"),
url(r'^myadmin/openApiBusinessLine/addOpenApiBusinessLine$', openApiBusinessLine.addOpenApiBusinessLine, name="admin_add_openApiBusinessLine"),
url(r'^myadmin/openApiBusinessLine/getOpenApiBusinessLineForId$', openApiBusinessLine.getOpenApiBusinessLineForId, name="admin_get_openApiBusinessLine_for_id"),
url(r'^myadmin/openApiBusinessLine/editOpenApiBusinessLine$', openApiBusinessLine.editOpenApiBusinessLine, name="admin_edit_openApiBusinessLine"),
url(r'^myadmin/openApiBusinessLine/delOpenApiBusinessLine$', openApiBusinessLine.delOpenApiBusinessLine, name="admin_del_openApiBusinessLine"),
url(r'^myadmin/openApiBusinessLine/resetOpenApiBusinessLine$', openApiBusinessLine.resetOpenApiBusinessLine, name="admin_reset_openApiBusinessLine"),
# openApiUri
url(r'^myadmin/openApiUri/check$', openApiUri.openApiUriCheckPage, name="admin_openApiUri_check"),
url(r'^myadmin/openApiUri/getOpenApiUriSubPage$', openApiUri.getOpenApiUri, name="admin_get_openApiUri_sub_page"),
url(r'^myadmin/openApiUri/addOpenApiUri$', openApiUri.addOpenApiUri, name="admin_add_openApiUri"),
url(r'^myadmin/openApiUri/getOpenApiUriForId$', openApiUri.getOpenApiUriForId, name="admin_get_openApiUri_for_id"),
url(r'^myadmin/openApiUri/editOpenApiUri$', openApiUri.editOpenApiUri, name="admin_edit_openApiUri"),
url(r'^myadmin/openApiUri/deleteOpenApiUri$', openApiUri.deleteOpenApiUri, name="admin_del_openApiUri"),
url(r'^myadmin/openApiUri/resetOpenApiUri$', openApiUri.resetOpenApiUri, name="admin_reset_openApiUri"),
# unitTestService
url(r'^myadmin/unitTestService/check$', unitTestService.unitTestServiceCheckPage, name="admin_unitTestService_check"),
url(r'^myadmin/unitTestService/getUnitTestServiceSubPage$', unitTestService.getUnitTestService, name="admin_get_unitTestService_sub_page"),
url(r'^myadmin/unitTestService/addUnitTestService$', unitTestService.addUnitTestService, name="admin_add_unitTestService"),
url(r'^myadmin/unitTestService/getUnitTestServiceForId$', unitTestService.getUnitTestServiceForId, name="admin_get_unitTestService_for_id"),
url(r'^myadmin/unitTestService/editUnitTestService$', unitTestService.editUnitTestService, name="admin_edit_unitTestService"),
url(r'^myadmin/unitTestService/deleteUnitTestService$', unitTestService.deleteUnitTestService, name="admin_del_unitTestService"),
url(r'^myadmin/unitTestService/resetUnitTestService$', unitTestService.resetUnitTestService, name="admin_reset_unitTestService"),
# uiMobileServer
url(r'^myadmin/uiMobileServer/check$', uiMobileServer.uiMobileServerCheckPage, name="admin_uiMobileServer_check"),
url(r'^myadmin/uiMobileServer/getUiMobileServerSubPage$', uiMobileServer.getUiMobileServer, name="admin_get_uiMobileServer_sub_page"),
url(r'^myadmin/uiMobileServer/addUiMobileServer$', uiMobileServer.addUiMobileServer, name="admin_add_uiMobileServer"),
url(r'^myadmin/uiMobileServer/getUiMobileServerForId$', uiMobileServer.getUiMobileServerForId, name="admin_get_uuiMobileServer_for_id"),
url(r'^myadmin/uiMobileServer/editUiMobileServer$', uiMobileServer.editUiMobileServer, name="admin_edit_uiMobileServer"),
url(r'^myadmin/uiMobileServer/deleteUiMobileServer$', uiMobileServer.deleteUiMobileServer, name="admin_del_uiMobileServer"),
url(r'^myadmin/uiMobileServer/resetUiMobileServer$', uiMobileServer.resetUiMobileServer, name="admin_reset_uiMobileServer"),
# versionManage
url(r'^myadmin/versionManage/check$', versionManage.versionManageCheckPage, name="admin_versionManage_check"),
url(r'^myadmin/versionManage/getVersionManageSubPage$', versionManage.getVersionManage, name="admin_get_versionManage_sub_page"),
url(r'^myadmin/versionManage/addVersionManage$', versionManage.addVersionManage, name="admin_add_versionManage"),
url(r'^myadmin/versionManage/getVersionManageForId$', versionManage.getVersionManageForId, name="admin_get_versionManage_for_id"),
url(r'^myadmin/versionManage/editVersionManage$', versionManage.editVersionManage, name="admin_edit_versionManage"),
url(r'^myadmin/versionManage/deleteVersionManage$', versionManage.deleteVersionManage, name="admin_del_versionManage"),
url(r'^myadmin/versionManage/resetVersionManage$', versionManage.resetVersionManage, name="admin_reset_versionManage"),
# userLog
url(r'^myadmin/userLog/check$', userLog.userLogCheckPage, name="admin_userLog_check"),
url(r'^myadmin/userLog/getUserLogSubPage$', userLog.getUserLog, name="admin_get_userLog_sub_page"),
url(r'^myadmin/userLog/addUserLog$', userLog.addUserLog, name="admin_add_userLog"),
url(r'^myadmin/userLog/getUserLogForId$', userLog.getUserLogForId, name="admin_get_userLog_for_id"),
url(r'^myadmin/userLog/editUserLog$', userLog.editUserLog, name="admin_edit_userLog"),
url(r'^myadmin/userLog/deleteUserLog$', userLog.deleteUserLog, name="admin_del_userLog"),
url(r'^myadmin/userLog/resetUserLog$', userLog.resetUserLog, name="admin_reset_userLog"),
# standardEnv
url(r'^myadmin/standardEnv/check$', standardEnv.standardEnvCheckPage, name="admin_standardEnv_check"),
url(r'^myadmin/standardEnv/getStandardEnvSubPage$', standardEnv.getStandardEnv, name="admin_get_standardEnv_sub_page"),
url(r'^myadmin/standardEnv/addStandardEnv$', standardEnv.addStandardEnv, name="admin_add_standardEnv"),
url(r'^myadmin/standardEnv/getStandardEnvForId$', standardEnv.getStandardEnvForId, name="admin_get_standardEnv_for_id"),
url(r'^myadmin/standardEnv/editStandardEnv$', standardEnv.editStandardEnv, name="admin_edit_standardEnv"),
url(r'^myadmin/standardEnv/deleteStandardEnv$', standardEnv.deleteStandardEnv, name="admin_del_standardEnv"),
url(r'^myadmin/standardEnv/resetStandardEnv$', standardEnv.resetStandardEnv, name="admin_reset_standardEnv"),
# cacheManage
url(r'^myadmin/cacheManage/check$', cacheManage.cacheManageCheckPage, name="admin_cacheManage_check"),
url(r'^myadmin/cacheManage/getCacheManageSubPage$', cacheManage.getCacheManage, name="admin_get_cacheManage_sub_page"),
url(r'^myadmin/cacheManage/deleteCacheData$', cacheManage.deleteCacheData, name="admin_delete_cacheData_sub_page"),
url(r'^myadmin/cacheManage/flushAllDatas$', cacheManage.flushAllDatas, name="admin_flush_allDatas_sub_page"),
url(r'^myadmin/cacheManage/addCacheData$', cacheManage.addCacheData, name="admin_add_cacheData"),
url(r'^myadmin/cacheManage/getCacheValueForCacheKey$', cacheManage.getCacheValueForCacheKey, name="admin_getCacheValue_for_cacheKey"),
url(r'^myadmin/cacheManage/editCacheData$', cacheManage.editCacheData, name="admin_edit_cacheData"),
# dataStorage
url(r'^myadmin/dataStorage/check$', dataStorage.dataStorageCheckPage, name="admin_dataStorage_check"),
url(r'^myadmin/dataStorage/getCacheManageSubPage$', dataStorage.getdataStorage, name="admin_get_dataStorage_sub_page"),
#serverConf
url(r'^myadmin/serviceConf/check$', adminServiceConf.adminServiceConf, name="admin_service_conf_page"),
url(r'^myadmin/serviceConf/getAdminServiceConfForId$', adminServiceConf.getAdminServiceConfForId, name="admin_get_service_conf_for_id"),
url(r'^myadmin/serviceConf/getServiceConfSubPage', adminServiceConf.getAdminServiceConf, name="admin_service_conf_sub_page"),
url(r'^myadmin/serviceConf/getServiceTaskConfSubPage', adminServiceConf.getAdminServiceTaskConf, name="admin_service_conf_sub_page"),
url(r'^myadmin/serviceConf/saveEditServiceConf', adminServiceConf.editAdminServiceConf, name="admin_edit_service_conf"),
url(r'^myadmin/serviceConf/queueDeleteTask', adminServiceConf.queueDeleteTask, name="admin_edit_service_conf"),
]
|
133392
|
import logging
import requests
import yaml
from bot.listeners import TelegramListener, AlertListener
from bot.protocol import SendExpedition
from ogame.game.const import Ship, CoordsType, Resource
from ogame.game.model import Coordinates
from ogame.util import find_unique
def parse_bot_config(config):
""" @return Parameters to initialize OGameBot. """
bot_config = config.get('bot', {})
sleep_min = bot_config.get('sleep_min')
sleep_max = bot_config.get('sleep_max')
min_time_before_attack_to_act = bot_config.get('min_time_before_attack_to_act')
max_time_before_attack_to_act = bot_config.get('max_time_before_attack_to_act')
try_recalling_saved_fleet = bot_config.get('try_recalling_saved_fleet')
max_return_flight_time = bot_config.get('max_return_flight_time')
harvest_expedition_debris = bot_config.get('harvest_expedition_debris')
harvest_speed = bot_config.get('harvest_speed')
return _remove_empty_values({
'sleep_min': sleep_min,
'sleep_max': sleep_max,
'min_time_before_attack_to_act': min_time_before_attack_to_act,
'max_time_before_attack_to_act': max_time_before_attack_to_act,
'try_recalling_saved_fleet': try_recalling_saved_fleet,
'max_return_flight_time': max_return_flight_time,
'harvest_expedition_debris': harvest_expedition_debris,
'harvest_speed': harvest_speed
})
def parse_client_config(config):
""" @return Parameters to initialize OGame client. """
# Parse account information.
account_config = _require('account', config)
username = _require('username', account_config)
password = _require('password', account_config)
universe = _require('universe', account_config)
language = _require('language', account_config)
country = _require('country', account_config)
if isinstance(universe, int): # universe is server number
server_number = universe
else: # universe is server name so we have to find the corresponding number
servers = get_servers(timeout=10)
def get_server_data(data): return data['name'].casefold(), data['language'].casefold()
server = find_unique(
item=(universe.casefold(), language.casefold()),
iterable=servers,
key=get_server_data)
if not server:
raise ValueError(f'Failed to match {universe} ({language}) to any server.')
server_number = server['number']
logging.debug(f'Matched {universe} ({language}) to server {server_number}.')
variations = {"us": "en"}
if language in variations:
locale = f'{variations[language]}_{country}'
else:
locale = f'{language}_{country}'
# Parse client parameters
bot_config = config.get('bot', {})
request_timeout = bot_config.get('request_timeout')
delay_between_requests = bot_config.get('delay_between_requests')
return _remove_empty_values({
'username': username,
'password': password,
'language': language,
'server_number': server_number,
'locale': locale,
'request_timeout': request_timeout,
'delay_between_requests': delay_between_requests
})
def parse_listener_config(config):
""" @return List of listeners. """
bot_config = config.get('bot', {})
listeners_config = config.get('listeners', {})
active_listeners = bot_config.get('listeners', [])
listeners = [_initialize_listener(name, listeners_config.get(name))
for name in active_listeners]
return listeners
def parse_expedition_config(config):
""" @return List of expeditions. """
bot_config = config.get('bot', {})
expeditions_config = config.get('expeditions', {})
active_expeditions = bot_config.get('expeditions', [])
expeditions = [_initialize_expedition(id, expeditions_config.get(id))
for id in active_expeditions]
return expeditions
def get_servers(**kwargs):
""" @return List of all available servers. We use it for matching server name with its number. """
return requests.get('https://lobby.ogame.gameforge.com/api/servers', **kwargs).json()
def load_config(file):
""" Load configuration from yaml file. """
with open(file, 'r') as stream:
return yaml.safe_load(stream)
def _initialize_listener(name, config):
if name == 'telegram':
return TelegramListener(**config)
elif name == 'alert':
return AlertListener(**config)
else:
raise ValueError(f'Unknown listener: {name}')
def _initialize_expedition(id, config):
origin_galaxy, origin_system, origin_position = _require('origin', config)
origin_type_name = config.get('origin_type', 'planet')
origin_type = CoordsType.from_name(origin_type_name)
if not origin_type:
raise ValueError(f'Unknown origin type: {origin_type_name}')
dest_galaxy, dest_system, dest_position = config.get('dest', [origin_galaxy, origin_system, 16])
ships = {}
for ship_name, amount in _require('ships', config).items():
ship = Ship.from_name(ship_name)
if not ship:
raise ValueError(f'Unknown ship: {ship_name}')
ships[ship] = amount
cargo = {}
for resource_name, amount in config.get('cargo', {}).items():
resource = Resource.from_name(resource_name)
if not resource:
raise ValueError(f'Unknown resource: {resource_name}')
cargo[resource] = amount
speed = config.get('speed', 10)
holding_time = config.get('holding_time', 1)
repeat = config.get('repeat', 'forever')
origin = Coordinates(
galaxy=origin_galaxy,
system=origin_system,
position=origin_position,
type=origin_type)
dest = Coordinates(
galaxy=dest_galaxy,
system=dest_system,
position=dest_position,
type=CoordsType.planet)
expedition = SendExpedition(
id=id,
origin=origin,
dest=dest,
ships=ships,
speed=speed,
holding_time=holding_time,
repeat=repeat,
cargo=cargo)
return expedition
def _require(key, cfg, error_msg=None):
""" Ensures that `key` is in the config `cfg`. """
error_msg = error_msg or f'Missing field `{key}` in the config file.'
val = cfg.get(key)
if not val:
raise ValueError(error_msg)
return val
def _remove_empty_values(dictionary):
""" Remove None values from a dictionary. """
return {k: v for k, v in dictionary.items() if v is not None}
|
133397
|
from pandas_datareader import data
start_date = '2014-01-01'
end_date = '2018-01-01'
goog_data = data.DataReader('GOOG', 'yahoo', start_date, end_date)
import numpy as np
import pandas as pd
goog_data_signal = pd.DataFrame(index=goog_data.index)
goog_data_signal['price'] = goog_data['Adj Close']
goog_data_signal['daily_difference'] = goog_data_signal['price'].diff()
goog_data_signal['signal'] = 0.0
goog_data_signal['signal'][:] = np.where(goog_data_signal['daily_difference'][:] > 0, 1.0, 0.0)
goog_data_signal['positions'] = goog_data_signal['signal'].diff()
import matplotlib.pyplot as plt
fig = plt.figure()
ax1 = fig.add_subplot(111, ylabel='Google price in $')
goog_data_signal['price'].plot(ax=ax1, color='r', lw=2.)
ax1.plot(goog_data_signal.loc[goog_data_signal.positions == 1.0].index,
goog_data_signal.price[goog_data_signal.positions == 1.0],
'^', markersize=5, color='m')
ax1.plot(goog_data_signal.loc[goog_data_signal.positions == -1.0].index,
goog_data_signal.price[goog_data_signal.positions == -1.0],
'v', markersize=5, color='k')
#plt.show()
# Set the initial capital
initial_capital= float(1000.0)
positions = pd.DataFrame(index=goog_data_signal.index).fillna(0.0)
portfolio = pd.DataFrame(index=goog_data_signal.index).fillna(0.0)
positions['GOOG'] = goog_data_signal['signal']
portfolio['positions'] = (positions.multiply(goog_data_signal['price'], axis=0))
portfolio['cash'] = initial_capital - (positions.diff().multiply(goog_data_signal['price'], axis=0)).cumsum()
portfolio['total'] = portfolio['positions'] + portfolio['cash']
portfolio.plot()
plt.show()
fig = plt.figure()
ax1 = fig.add_subplot(111, ylabel='Portfolio value in $')
portfolio['total'].plot(ax=ax1, lw=2.)
ax1.plot(portfolio.loc[goog_data_signal.positions == 1.0].index,portfolio.total[goog_data_signal.positions == 1.0],'^', markersize=10, color='m')
ax1.plot(portfolio.loc[goog_data_signal.positions == -1.0].index,portfolio.total[goog_data_signal.positions == -1.0],'v', markersize=10, color='k')
plt.show()
|
133401
|
import sys
import numpy as np
from starfish import ImageStack
from starfish.spots import FindSpots
from starfish.types import Axes
def test_lmpf_uniform_peak():
data_array = np.zeros(shape=(1, 1, 1, 100, 100), dtype=np.float32)
data_array[0, 0, 0, 45:55, 45:55] = 1
imagestack = ImageStack.from_numpy(data_array)
# standard local max peak finder, should find spots for all the evenly illuminated pixels.
lmpf_no_kwarg = FindSpots.LocalMaxPeakFinder(1, 1, 1, sys.maxsize)
peaks = lmpf_no_kwarg.run(imagestack)
results_no_kwarg = peaks[{Axes.ROUND: 0, Axes.CH: 0}]
assert len(results_no_kwarg.spot_attrs.data) == 100
# local max peak finder, capped at one peak per label.
lmpf_kwarg = FindSpots.LocalMaxPeakFinder(1, 1, 1, sys.maxsize, num_peaks_per_label=1)
peaks = lmpf_kwarg.run(imagestack)
results_kwarg = peaks[{Axes.ROUND: 0, Axes.CH: 0}]
assert len(results_kwarg.spot_attrs.data) == 1
|
133437
|
import pytest
# Code that uses this is commented-out below.
# from ..types import TrackingItem
pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.schema]
@pytest.fixture
def tracking_item():
return {"tracking_type": "other", "other_tracking": {"extra_field": "extra_value"}}
def test_insert_and_get_tracking_item(testapp, tracking_item):
res = testapp.post_json('/tracking-items', tracking_item, status=201)
assert res.json['@graph'][0]['tracking_type'] == tracking_item['tracking_type']
res_uuid = res.json['@graph'][0]['uuid']
get_res = testapp.get('/tracking-items/' + res_uuid).follow()
assert get_res.json['other_tracking']['extra_field'] == tracking_item['other_tracking']['extra_field']
assert get_res.json.get('date_created')
# def test_tracking_item_create_and_commit(testapp, dummy_request):
# test_body = {
# "tracking_type": "other",
# "other_tracking": {"key1": "val1"},
# "submitted_by": "<EMAIL>"
# }
# res = TrackingItem.create_and_commit(dummy_request, test_body)
# assert res['status'] == 'success'
# res_path = res['@graph'][0]
# app_res = testapp.get(res_path)
# assert app_res.json['tracking_type'] == test_body['tracking_type']
# assert app_res.json['other_tracking']['key1'] == test_body['other_tracking']['key1']
# # should not have date created in this case (no validators run)
# assert 'date_created' not in app_res.json
# # however status is added automatically when using create_and_commit fxn
# assert app_res.json['status'] == 'in review by lab'
|
133454
|
import numpy as np
import pandas as pd
import statsmodels.api as sm
from statsmodels.imputation.bayes_mi import BayesGaussMI, MI
from numpy.testing import assert_allclose
def test_pat():
x = np.asarray([[1, np.nan, 3], [np.nan, 2, np.nan], [3, np.nan, 0],
[np.nan, 1, np.nan], [3, 2, 1]])
bm = BayesGaussMI(x)
assert_allclose(bm.patterns[0], np.r_[0, 2])
assert_allclose(bm.patterns[1], np.r_[1, 3])
def test_2x2():
# Generate correlated data with mean and variance
np.random.seed(3434)
x = np.random.normal(size=(1000, 2))
r = 0.5
x[:, 1] = r*x[:, 0] + np.sqrt(1-r**2)*x[:, 1]
x[:, 0] *= 2
x[:, 1] *= 3
x[:, 0] += 1
x[:, 1] -= 2
# Introduce some missing values
u = np.random.normal(size=x.shape[0])
x[u > 1, 0] = np.nan
u = np.random.normal(size=x.shape[0])
x[u > 1, 1] = np.nan
bm = BayesGaussMI(x)
# Burn-in
for k in range(500):
bm.update()
# Estimate the posterior mean
mean = 0
cov = 0
dmean = 0
dcov = 0
for k in range(500):
bm.update()
mean += bm.mean
cov += bm.cov
dmean += bm.data.mean(0)
dcov += np.cov(bm.data.T)
mean /= 500
cov /= 500
dmean /= 500
dcov /= 500
assert_allclose(mean, np.r_[1, -2], 0.1)
assert_allclose(dmean, np.r_[1, -2], 0.1)
assert_allclose(cov, np.asarray([[4, 6*r], [6*r, 9]]), 0.1)
assert_allclose(dcov, np.asarray([[4, 6*r], [6*r, 9]]), 0.1)
def test_MI():
np.random.seed(414)
x = np.random.normal(size=(200, 4))
x[[1, 3, 9], 0] = np.nan
x[[1, 4, 3], 1] = np.nan
x[[2, 11, 21], 2] = np.nan
x[[11, 22, 99], 3] = np.nan
def model_args(x):
# Return endog, exog
# Regress x0 on x1 and x2
return (x[:, 0], x[:, 1:])
for j in (0, 1):
np.random.seed(2342)
imp = BayesGaussMI(x.copy())
mi = MI(imp, sm.OLS, model_args, burn=0)
r = mi.fit()
r.summary() # smoke test
# TODO: why does the test tolerance need to be so slack?
# There is unexpected variation across versions on travis.
assert_allclose(r.params, np.r_[
-0.05347919, -0.02479701, 0.10075517], 0.25, 0)
c = np.asarray([[0.00418232, 0.00029746, -0.00035057],
[0.00029746, 0.00407264, 0.00019496],
[-0.00035057, 0.00019496, 0.00509413]])
assert_allclose(r.cov_params(), c, 0.3, 0)
# Test with ndarray and pandas input
x = pd.DataFrame(x)
def test_MI_stat():
# Test for MI where we know statistically what should happen. The
# analysis model is x0 ~ x1 with standard error 1/sqrt(n) for the
# slope parameter. The nominal n is 1000, but half of the cases
# have missing x1. Then we introduce x2 that is either
# independent of x1, or almost perfectly correlated with x1. In
# the first case the SE is 1/sqrt(500), in the second case the SE
# is 1/sqrt(1000).
np.random.seed(414)
z = np.random.normal(size=(1000, 3))
z[:, 0] += 0.5*z[:, 1]
# Control the degree to which x2 proxies for x1
exp = [1/np.sqrt(500), 1/np.sqrt(1000)]
fmi = [0.5, 0]
for j, r in enumerate((0, 0.9999)):
x = z.copy()
x[:, 2] = r*x[:, 1] + np.sqrt(1 - r**2)*x[:, 2]
x[0:500, 1] = np.nan
def model_args(x):
# Return endog, exog
# Regress x1 on x2
return (x[:, 0], x[:, 1])
np.random.seed(2342)
imp = BayesGaussMI(x.copy())
mi = MI(imp, sm.OLS, model_args, nrep=100, skip=10)
r = mi.fit()
# Check the SE
d = np.abs(r.bse[0] - exp[j]) / exp[j]
assert(d < 0.03)
# Check the FMI
d = np.abs(r.fmi[0] - fmi[j])
assert(d < 0.05)
|
133498
|
import transformers as trans
import torch
import pytorch_lightning as pl
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.models.auto.configuration_auto import AutoConfig
from transformers import AutoTokenizer
from openue.data.utils import get_labels_ner, get_labels_seq, OutputExample
from typing import Dict
class BertForRelationClassification(trans.BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = trans.BertModel(config)
self.relation_classification = torch.nn.Linear(config.hidden_size, config.num_labels)
self.loss_fn = torch.nn.BCEWithLogitsLoss()
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
label_ids_seq=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# import pdb; pdb.set_trace()
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
cls_output = sequence_output[:, 0, :]
relation_output = self.relation_classification(cls_output)
relation_output_sigmoid = torch.sigmoid(relation_output)
if label_ids_seq is None:
return (relation_output_sigmoid, relation_output, cls_output)
else:
loss = self.loss_fn(relation_output, label_ids_seq)
return (loss, relation_output_sigmoid, relation_output, cls_output)
def add_to_argparse(parser):
parser.add_argument("--model_type", type=str, default="bert")
class BertForNER(trans.BertPreTrainedModel):
def __init__(self, config, **model_kwargs):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = trans.BertModel(config)
self.dropout = torch.nn.Dropout(config.hidden_dropout_prob)
self.token_classification = torch.nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
# labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
label_ids_seq=None,
label_ids_ner=None
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# batch_size * 107 * hidden_size
sequence_poolout_output = self.dropout(outputs[0])
# batch_size * 107 * 6
logits = self.token_classification(sequence_poolout_output)
if label_ids_ner is None:
return logits ,outputs[1]
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, label_ids_ner.view(-1), torch.tensor(loss_fct.ignore_index).type_as(label_ids_ner)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), label_ids_ner.view(-1))
# if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
def add_to_argparse(parser):
parser.add_argument("--model_type", type=str, default="bert")
class Inference(pl.LightningModule):
"""
input the text,
return the triples
"""
def __init__(self, args):
super().__init__()
self.args = args
# init the labels
self._init_labels()
self._init_models()
self.mode = "event" if "event" in args.task_name else "triple"
self.start_idx = self.tokenizer("[relation0]", add_special_tokens=False)['input_ids'][0]
if self.mode == "event":
self.process = self.event_process
else:
self.process = self.normal_process
def _init_labels(self):
self.labels_ner = get_labels_ner()
self.label_map_ner: Dict[int, str] = {i: label for i, label in enumerate(self.labels_ner)}
self.num_labels_ner = len(self.labels_ner)
# 读取seq的label
self.labels_seq = get_labels_seq(self.args)
self.label_map_seq: Dict[int, str] = {i: label for i, label in enumerate(self.labels_seq)}
self.num_labels_seq = len(self.labels_seq)
def _init_models(self):
model_name_or_path = self.args.seq_model_name_or_path
config = AutoConfig.from_pretrained(
model_name_or_path,
num_labels=self.num_labels_seq,
label2id={label: i for i, label in enumerate(self.labels_seq)},
)
self.model_seq = BertForRelationClassification.from_pretrained(
model_name_or_path,
config=config,
)
model_name_or_path = self.args.ner_model_name_or_path
# 读取待训练的ner模型
config = AutoConfig.from_pretrained(
model_name_or_path,
num_labels=self.num_labels_ner,
id2label=self.label_map_ner,
label2id={label: i for i, label in enumerate(self.labels_ner)},
)
self.model_ner = BertForNER.from_pretrained(
model_name_or_path,
config=config,
)
self.tokenizer = AutoTokenizer.from_pretrained(
model_name_or_path,
use_fast=False,
)
def forward(self, inputs):
"""
两种方案,一种直接所有relation搞起来,一种使用动态batch size, 针对出现的relation进行forward
首先通过model_seq获得输入语句的类别标签,batch中每一个样本中含有的关系,
之后选择大于阈值(0.5)的关系,将其输入取出来得到[batch_size*num_relation, seq_length]的输入向量,以及每一个样本对应的关系数量,
将其增加了关系类别embedding之后,输入到model_ner中,得到input_ids中每一个token的类别,之后常规的实体识别。
"""
# for k, v in inputs.items():
# if isinstance(v, torch.Tensor):
# inputs[k] = v.to(self.device)
inputs_seq = {'input_ids': inputs['input_ids'],
'token_type_ids': inputs['token_type_ids'],
'attention_mask': inputs['attention_mask'],
}
with torch.no_grad():
outputs_seq = self.model_seq(**inputs_seq)
batch_size = inputs_seq['input_ids'].shape[0]
num_relations = len(self.label_map_seq.keys())
max_length = inputs_seq['input_ids'].shape[1]
# [batch_size, num_relation]
relation_output_sigmoid = outputs_seq[0]
# 多关系预测
mask_relation_output_sigmoid = relation_output_sigmoid > 0.5
# # 这个0.5是超参数,超参数
# 如果没有关系那就选一个最大概率的关系抽取。
for i in range(batch_size):
if torch.sum(mask_relation_output_sigmoid[i]) == 0:
max_relation_idx = torch.max(relation_output_sigmoid[i], dim=0)[1].item()
mask_relation_output_sigmoid[i][max_relation_idx] = 1
mask_relation_output_sigmoid = mask_relation_output_sigmoid.long()
# mask_output [batch_size*num_relation] 表示哪一个输入是需要的
mask_output = mask_relation_output_sigmoid.view(-1)
# relation 特殊表示,需要拼接 input_ids :[SEP relation] attention_mask: [1 1] token_type_ids:[1 1]
# relation_index shape : [batch_size, num_relations]
relation_index = torch.arange(self.start_idx, self.start_idx+num_relations).to(self.device).expand(batch_size, num_relations)
# 需要拼接的部分1:REL, 选取拼接的部分 [batch_size * xxx 不定]
relation_ids = torch.masked_select(relation_index, mask_relation_output_sigmoid.bool())
# 需要拼接的部分2:SEP
cat_sep = torch.full((relation_ids.shape[0], 1), 102).long().to(self.device)
# 需要拼接的部分3:[1]
cat_one = torch.full((relation_ids.shape[0], 1), 1).long().to(self.device)
# 需要拼接的部4:[0]
cat_zero = torch.full((relation_ids.shape[0], 1), 0).long().to(self.device)
# 需要原来的input_ids 扩展到relation num维度。
input_ids_ner = torch.unsqueeze(inputs['input_ids'], 1) # [batch_size, 1, seq_length]
# [batch_size, 50, max_length], 复制50份
input_ids_ner = input_ids_ner.expand(-1, len(self.label_map_seq.keys()), -1)
# [batch_size * 50, max_length]
input_ids_ner_reshape = input_ids_ner.reshape(batch_size * num_relations, max_length)
# 选择预测正确的所有关系
mask = mask_output.unsqueeze(dim=1).expand(-1, max_length) # [batch_size * num_relations, max_length]
# 选取了正确的input_ids
input_ids = torch.masked_select(input_ids_ner_reshape, mask.bool()).view(-1, max_length)
# n(选出来的关系数字) * max_length
# n >> batch_size, 因为一句话中有多个关系
# 添加 sep relation_ids 需要增加的东西
input_ids = torch.cat((input_ids, cat_zero), 1)
input_ids_ner = torch.cat((input_ids, cat_zero), 1)
# 利用attention中1的求和的到rel_pos的位置
attention_mask_ner = torch.unsqueeze(inputs['attention_mask'], 1)
# [batch_size, 50, max_length], 复制50份
attention_mask_ner = attention_mask_ner.expand(-1, len(self.label_map_seq.keys()), -1)
# [batch_size * 50, max_length]
attention_mask_ner_reshape = attention_mask_ner.reshape(batch_size * num_relations, max_length)
# 选择预测正确的所有关系
tmp1 = mask_output.unsqueeze(dim=1) # [200, 1]
mask = tmp1.expand(-1, max_length) # [200, 79]
tmp2 = torch.masked_select(attention_mask_ner_reshape, mask.bool())
# n(选出来的关系数字) * max_length
# n >> batch_size, 因为一句话中有多个关系
tmp3 = tmp2.view(-1, max_length)
# 利用attention中1的求和的到rel_pos的位置
rel_pos = torch.sum(tmp3, dim=1)
(rel_number_find, max_length_find) = input_ids_ner.shape
one_hot = torch.sparse.torch.eye(max_length_find).long().to(self.device)
rel_pos_mask = one_hot.index_select(0, rel_pos)
rel_pos_mask_plus = one_hot.index_select(0, rel_pos+1)
# 拼接input_ids的输入
input_ids_ner[rel_pos_mask.bool()] = relation_ids
input_ids_ner[rel_pos_mask_plus.bool()] = cat_sep.squeeze()
# 拼接token_type_ids的输入
token_type_ids_ner = torch.zeros(rel_number_find, max_length_find).to(self.device)
token_type_ids_ner[rel_pos_mask.bool()] = 1
token_type_ids_ner[rel_pos_mask_plus.bool()] = 1
token_type_ids_ner = token_type_ids_ner.long()
# 拼接attention_mask的输入
# 拼接 0
tmp4 = torch.cat((tmp3, cat_zero), dim=1)
# 拼接 0
tmp5 = torch.cat((tmp4, cat_zero), dim=1)
tmp5[rel_pos_mask.bool()] = 1
tmp5[rel_pos_mask_plus.bool()] = 1
attention_mask_ner_tmp = tmp5
inputs_ner = {
'input_ids': input_ids_ner,
'token_type_ids': token_type_ids_ner,
'attention_mask': attention_mask_ner_tmp,
}
try:
outputs_ner = self.model_ner(**inputs_ner)[0]
except BaseException:
print('23')
_, results = torch.max(outputs_ner, dim=2)
results = results.cpu().tolist()
results = [[self.label_map_ner[__] for __ in _] for _ in results]
attention_position_np = rel_pos.cpu().numpy()
attention_position_list = attention_position_np.tolist()
predict_relation_list = relation_ids.long().tolist()
input_ids_list = input_ids_ner.cpu().tolist()
output = []
input_ids = []
for idx, result in enumerate(results):
tmp1 = result[0: attention_position_list[idx]-1]
tmp2 = input_ids_list[idx][0: attention_position_list[idx]-1]
output.append(tmp1)
input_ids.append(tmp2)
input_split = torch.sum(mask_relation_output_sigmoid, dim=1)
for i in range(1, batch_size):
input_split[i] += input_split[i-1]
tmp_input_ids = [input_ids[:input_split[0]]]
tmp_output = [output[:input_split[0]]]
for i in range(1, batch_size):
tmp_input_ids.append(input_ids[input_split[i-1]:input_split[i]])
tmp_output.append(output[input_split[i-1]:input_split[i]])
output = tmp_output
input_ids = tmp_input_ids
# 将ner的句子转化为BIOES的标签之后把实体拿出来
# processed_results_list_BIO = []
# for result in processed_results_list:
# processed_results_list_BIO.append([self.label_map_ner[token] for token in result])
# 把结果剥离出来
index = 0
triple_output = [[] for _ in range(batch_size)]
# for each relation type or event type
# by default, extract the first head and tail to construct the triples
if self.mode == "triple":
cnt = 0
for ids_list, BIOS_list in zip(input_ids, output):
for ids, BIOS in zip(ids_list, BIOS_list):
labels = self.process(ids, BIOS)
# r = label_map_seq[predict_relation_list[index]]
r = predict_relation_list[index] - self.start_idx
if len(labels['subject']) == 0:
h = None
else:
h = labels['subject']
# h = ''.join(tokenizer.convert_ids_to_tokens(h))
if len(labels['object']) == 0:
t = None
else:
t = labels['object']
# t = ''.join(tokenizer.convert_ids_to_tokens(t))
# greedy select the head and tail
if h and t:
for hh in h:
for tt in t:
triple_output[cnt].append([hh, r, tt])
index = index + 1
cnt += 1
# 先不考虑
# elif self.mode == "event":
# for ids, BIOS in zip(processed_input_ids_list, processed_results_list_BIO):
# triple_output.append(dict(event_type=predict_relation_list[index], argument=self.process(ids, BIOS)))
return triple_output
@staticmethod
def normal_process(text, result):
index = 0
start = None
labels = {}
labels['subject'] = []
labels['object'] = []
indicator = ''
for w, t in zip(text, result):
# ["O", "B-SUB", "I-SUB", "B-OBJ", "I-OBJ", "Relation"
if start is None:
if t == 'B-SUB':
start = index
indicator = 'subject'
elif t == 'B-OBJ':
start = index
indicator = 'object'
else:
# if t == 'I-SUB' or t == 'I-OBJ':
# continue
if t == "O":
# print(result[start: index])
labels[indicator].append(text[start: index])
start = None
index += 1
# print(labels)
return labels
@staticmethod
def event_process(text, result):
"""
return List[Dict(text, label)]
"""
index = 0
start = None
labels = []
indicator = ''
for w, t in zip(text, result):
# ["O", "B-SUB", "I-SUB", "B-OBJ", "I-OBJ", "Relation"
if start is None:
if "B-" in t:
# get the label name
indicator = t.split("-")[-1]
start = index
else:
if t.split("-")[-1] != indicator or "B-" in t:
# B-a I-b wrong, B-a B-a wrong
start = None
elif t == "O":
# print(result[start: index])
labels.append(dict(text=text[start: index], label=indicator))
start = None
index += 1
# print(labels)
return labels
@staticmethod
def add_to_argparse(parser):
parser.add_argument("--seq_model_name_or_path", type=str, default="seq_model")
parser.add_argument("--ner_model_name_or_path", type=str, default="ner_model")
return parser
|
133507
|
from pathlib import Path
from setuptools import setup
VERSION = "0.1.6"
def get_long_description():
readme_path = Path(__file__).parent / "README.md"
with open(readme_path.absolute(), mode="r", encoding="utf8") as fp:
return fp.read()
setup(
name="datasette-dashboards",
description="Datasette plugin providing data dashboards from metadata",
long_description=get_long_description(),
long_description_content_type="text/markdown",
author="<NAME>",
url="https://github.com/rclement/datasette-dashboards",
project_urls={
"Changelog": "https://github.com/rclement/datasette-dashboards/blob/master/CHANGELOG.md"
},
license="Apache License, Version 2.0",
version=VERSION,
packages=["datasette_dashboards"],
entry_points={"datasette": ["dashboards = datasette_dashboards"]},
install_requires=["datasette", "datasette-render-markdown"],
extras_require={"test": ["faker", "pytest", "pytest-asyncio", "sqlite-utils"]},
tests_require=["datasette_dashboards[test]"],
package_data={"datasette_dashboards": ["templates/*.html", "static/*"]},
)
|
133512
|
import datetime
import io
import lzma
import pickle
from mongoengine import signals
def now():
return datetime.datetime.now()
def to_pickle(obj):
buff = pickle.dumps(obj, protocol=pickle.HIGHEST_PROTOCOL)
cbuff = lzma.compress(buff, format=lzma.FORMAT_XZ)
return io.BytesIO(cbuff)
def from_pickle(buff):
return pickle.loads(lzma.decompress(buff.read(), format=lzma.FORMAT_XZ))
def handler(event):
"""Signal decorator to allow use of callback functions as class decorators."""
def decorator(fn):
def apply(cls):
event.connect(fn, sender=cls)
return cls
fn.apply = apply
return fn
return decorator
@handler(signals.pre_save)
def update_modified(sender, document, **kwargs):
if kwargs.get('finished', False):
document.finished_time = now()
document.modified_time = now()
@handler(signals.pre_delete)
def delete_children(sender, document):
for attrname in ['datafield']:
attr = getattr(document, attrname, None)
if attr is not None:
attr.delete()
for child in document.children:
child.delete()
|
133565
|
import torch.nn as nn
class MaskL1Loss(nn.Module):
"""
Loss from paper <Pose Guided Person Image Generation> Sec3.1 pose mask loss
"""
def __init__(self, ratio=1):
super(MaskL1Loss, self).__init__()
self.criterion = nn.L1Loss()
self.ratio = ratio
def forward(self, generated_img, target_img, mask):
pose_mask_l1 = self.criterion(generated_img * mask, target_img * mask)
return self.criterion(generated_img, target_img) + pose_mask_l1 * self.ratio
|
133576
|
import re
from ..exceptions import RouteConfigurationError
class PatternParser:
PARAM_REGEX = re.compile(b'<.*?>')
DYNAMIC_CHARS = bytearray(b'*?.[]()')
CAST = {
str: lambda x: x.decode('utf-8'),
int: lambda x: int(x),
float: lambda x: float(x)
}
@classmethod
def validate_param_name(cls, name: bytes):
# TODO:
if b':' in name:
raise RouteConfigurationError('Special characters are not allowed in param name. '
'Use type hints in function parameters to cast the variable '
'or regexes with named groups to ensure only a specific URL matches.')
@classmethod
def extract_params(cls, pattern: bytes) -> tuple:
"""
:param pattern:
:return:
"""
params = []
new_pattern = pattern
simplified_pattern = pattern
groups = cls.PARAM_REGEX.findall(pattern)
for group in groups:
name = group[1:-1] # Removing <> chars
cls.validate_param_name(name)
simplified_pattern = simplified_pattern.replace(group, b'$' + name)
params.append(name.decode())
new_pattern = new_pattern.replace(group, b'(?P<' + name + b'>[^/]+)')
return re.compile(new_pattern), params, simplified_pattern
@classmethod
def is_dynamic_pattern(cls, pattern: bytes) -> bool:
for index, char in enumerate(pattern):
if char in cls.DYNAMIC_CHARS:
if index > 0 and pattern[index - 1] == '\\':
continue
return True
return False
|
133595
|
import math
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from .encdec_attention_func import encdec_attn_func
import onmt
class EncdecMultiheadAttn(nn.Module):
"""Multi-headed encoder-decoder attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, num_heads, embed_dim, attn_drop=0.):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = attn_drop
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.bias = False
self.scaling = self.head_dim ** -0.5 # this value is hardcoded in the "fast" implementation
self.in_proj_weight_q = Parameter(torch.Tensor(embed_dim, embed_dim))
self.in_proj_weight_kv = Parameter(torch.Tensor(2 * embed_dim, embed_dim))
self.out_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.register_parameter('in_proj_bias_q', None)
self.register_parameter('in_proj_bias_kv', None)
self.in_proj_bias_q = None
self.in_proj_bias_kv = None
self.out_proj_bias = None
self.attn_func = encdec_attn_func
self.reset_parameters()
self.autograd = False
def convert_autograd(self):
if self.autograd:
return
with torch.no_grad():
self.autograd = True
self.linear_q = torch.nn.Linear(self.embed_dim, self.embed_dim, bias=False)
self.linear_kv = torch.nn.Linear(self.embed_dim, 2 * self.embed_dim, bias=False)
self.out_linear = torch.nn.Linear(self.embed_dim, self.embed_dim, bias=False)
self.linear_q.weight.copy_(self.in_proj_weight_q)
self.linear_kv.weight.copy_(self.in_proj_weight_kv)
self.out_linear.weight.copy_(self.out_proj_weight)
del self.in_proj_weight_q
del self.in_proj_weight_kv
del self.out_proj_weight
def reset_parameters(self, init='normal'):
if init == 'normal': # xavier normal
std_ = math.sqrt(2.0 / (self.embed_dim + self.embed_dim))
nn.init.normal_(self.in_proj_weight_q, 0.0, std_)
nn.init.normal_(self.in_proj_weight_kv, 0.0, std_)
nn.init.normal_(self.out_proj_weight, 0.0, std_)
else: # xavier uniform
std_ = math.sqrt(6.0 / (self.embed_dim + self.embed_dim))
nn.init.uniform_(self.in_proj_weight_q, -std_, std_)
nn.init.uniform_(self.in_proj_weight_kv, -std_, std_)
nn.init.uniform_(self.out_proj_weight, -std_, std_)
def forward(self, query, key, value,
attn_mask=None, incremental=False, incremental_cache=None,
rotary_pos_enc=False, pos_emb_q=None, pos_emb_k=None,
**kwargs):
assert value is key, "ERROR: Keys and values must be the same."
is_training = self.training
if self.autograd:
# assert not self.training
mask = attn_mask
if mask is not None:
# Self Attention Pad Mask
mask = mask.to(torch.bool)
if len(mask.shape) == 3:
mask = mask.unsqueeze(1) # for the head dimension
else:
mask = mask.unsqueeze(1).unsqueeze(2) # for the head and query dimension
len_q = query.size(0)
len_k = key.size(0)
bsz = query.size(1)
heads = self.num_heads
head_dim = self.head_dim
scale_t = torch.tensor([head_dim ** -0.5])
input_lin_q_results = self.linear_q(query)
queries = input_lin_q_results.view(len_q, bsz * heads, head_dim)
if incremental and ('c_k' in incremental_cache and 'c_v' in incremental_cache):
keys = incremental_cache['c_k']
values = incremental_cache['c_v']
keys = keys.view(len_k, bsz * heads, head_dim)
values = values.view(len_k, bsz * heads, head_dim)
else:
input_lin_kv_results = self.linear_kv(key)
input_lin_kv_results = input_lin_kv_results.view(len_k, bsz * heads, 2, head_dim)
keys = input_lin_kv_results[:, :, 0, :]
values = input_lin_kv_results[:, :, 1, :]
if incremental:
keys = keys.contiguous().view(len_k, bsz, heads * head_dim)
values = values.contiguous().view(len_k, bsz, heads * head_dim)
incremental_cache['c_k'] = keys
incremental_cache['c_v'] = values
keys = keys.view(len_k, bsz * heads, head_dim)
values = values.view(len_k, bsz * heads, head_dim)
matmul1_results = torch.matmul(queries.transpose(0, 1), keys.transpose(0, 1).transpose(1, 2))
matmul1_results.mul_(scale_t[0])
if mask is not None:
matmul1_results = matmul1_results.view(bsz, heads, len_q, len_k)
# after unsqueezing the mask should have size [bsz x 1 x 1 x seql_k]
matmul1_results = matmul1_results.masked_fill_(mask, float('-inf'))
matmul1_results = matmul1_results.view(bsz * heads, len_q, len_k)
softmax_results = F.softmax(matmul1_results, dim=-1, dtype=torch.float32).type_as(matmul1_results)
dropout_results = F.dropout(softmax_results, self.dropout, training=self.training)
matmul2_results = torch.matmul(dropout_results, values.transpose(0, 1)).transpose(0, 1)
matmul2_results = matmul2_results.contiguous().view(len_q, bsz, self.embed_dim)
outputs = self.out_linear(matmul2_results)
return outputs, softmax_results
else:
recompute = onmt.constants.recompute
outputs, coverage = self.attn_func(recompute, is_training,
self.num_heads, query, key,
self.in_proj_weight_q, self.in_proj_weight_kv,
self.out_proj_weight, attn_mask, self.dropout,
incremental, incremental_cache,
rotary_pos_enc, pos_emb_q, pos_emb_k,
False, True) # double precision False and return coverage True
return outputs, coverage
|
133603
|
from .nlp.JsonFromFiles import JsonFromFilesDataset
dataset_list = {
"JsonFromFiles": JsonFromFilesDataset
}
|
133607
|
import logging
from django.conf import settings
from django.contrib import auth
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseServerError
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt
from onelogin.saml2.auth import OneLogin_Saml2_Auth
from onelogin.saml2.utils import OneLogin_Saml2_Utils
logger = logging.getLogger('django_saml')
def prepare_django_request(request):
"""Extract data from a Django request in the way that OneLogin expects."""
result = {
'https': 'on' if request.is_secure() else 'off',
'http_host': request.META['HTTP_HOST'],
'script_name': request.META['PATH_INFO'],
'server_port': request.META['SERVER_PORT'],
'get_data': request.GET.copy(),
'post_data': request.POST.copy()
}
if settings.SAML_DESTINATION_HOST is not None:
result['http_host'] = settings.SAML_DESTINATION_HOST
if settings.SAML_DESTINATION_HTTPS is not None:
result['https'] = settings.SAML_DESTINATION_HTTPS
result['server_port'] = '443' if result['https'] else '80'
if settings.SAML_DESTINATION_PORT is not None:
result['server_port'] = settings.SAML_DESTINATION_PORT
return result
@never_cache
def login(request):
"""Kick off a SAML login request."""
req = prepare_django_request(request)
saml_auth = OneLogin_Saml2_Auth(req, old_settings=settings.ONELOGIN_SAML_SETTINGS)
if 'next' in request.GET:
redirect_to = OneLogin_Saml2_Utils.get_self_url(req) + request.GET['next']
else:
redirect_to = OneLogin_Saml2_Utils.get_self_url(req) + settings.SAML_LOGIN_REDIRECT
url = saml_auth.login(redirect_to)
request.session['AuthNRequestID'] = saml_auth.get_last_request_id()
return HttpResponseRedirect(url)
@never_cache
def logout(request):
"""Kick off a SAML logout request."""
req = prepare_django_request(request)
saml_auth = OneLogin_Saml2_Auth(req, old_settings=settings.ONELOGIN_SAML_SETTINGS)
name_id = request.session.get('samlNameId', None)
session_index = request.session.get('samlSessionIndex', None)
name_id_format = request.session.get('samlNameIdFormat', None)
name_id_nq = request.session.get('samlNameIdNameQualifier', None)
name_id_spnq = request.session.get('samlNameIdSPNameQualifier', None)
auth.logout(request)
url = saml_auth.logout(
name_id=name_id, session_index=session_index, nq=name_id_nq, name_id_format=name_id_format, spnq=name_id_spnq,
return_to=OneLogin_Saml2_Utils.get_self_url(req) + settings.SAML_LOGOUT_REDIRECT
)
request.session['LogoutRequestID'] = saml_auth.get_last_request_id()
return HttpResponseRedirect(url)
@never_cache
def saml_sls(request):
"""Handle a LogoutResponse from the IdP."""
if request.method != 'GET':
return HttpResponse('Method not allowed.', status=405)
req = prepare_django_request(request)
saml_auth = OneLogin_Saml2_Auth(req, old_settings=settings.ONELOGIN_SAML_SETTINGS)
request_id = request.session.get('LogoutRequestID', None)
try:
url = saml_auth.process_slo(request_id=request_id, delete_session_cb=lambda: request.session.flush())
errors = saml_auth.get_errors()
if len(errors) == 0:
auth.logout(request)
redirect_to = url or settings.SAML_LOGOUT_REDIRECT
return HttpResponseRedirect(redirect_to)
else:
logger.exception(saml_auth.get_last_error_reason())
return HttpResponse("Invalid request", status=400)
except UnicodeDecodeError:
# Happens when someone messes with the response in the URL. No need to log an exception.
return HttpResponse("Invalid request - Unable to decode response", status=400)
except Exception as e:
logger.exception(e)
return HttpResponse("Invalid request", status=400)
@never_cache
@csrf_exempt
def saml_acs(request):
"""Handle an AuthenticationResponse from the IdP."""
if request.method != 'POST':
return HttpResponse('Method not allowed.', status=405)
try:
req = prepare_django_request(request)
saml_auth = OneLogin_Saml2_Auth(req, old_settings=settings.ONELOGIN_SAML_SETTINGS)
request_id = request.session.get('AuthNRequestID', None)
saml_auth.process_response(request_id=request_id)
errors = saml_auth.get_errors()
if not errors:
user = auth.authenticate(session_data=saml_auth.get_attributes())
if user is None:
if settings.SAML_NO_USER_REDIRECT:
return HttpResponseRedirect(settings.SAML_NO_USER_REDIRECT)
raise PermissionDenied()
auth.login(request, user)
# This data is used during Single Log Out
request.session['samlNameId'] = saml_auth.get_nameid()
request.session['samlNameIdFormat'] = saml_auth.get_nameid_format()
request.session['samlNameIdNameQualifier'] = saml_auth.get_nameid_nq()
request.session['samlNameIdSPNameQualifier'] = saml_auth.get_nameid_spnq()
request.session['samlSessionIndex'] = saml_auth.get_session_index()
if 'RelayState' in req['post_data'] \
and OneLogin_Saml2_Utils.get_self_url(req) != req['post_data']['RelayState']:
url = saml_auth.redirect_to(req['post_data']['RelayState'])
return HttpResponseRedirect(url)
else:
return HttpResponseRedirect(settings.SAML_LOGIN_REDIRECT)
logger.exception(saml_auth.get_last_error_reason())
return HttpResponse(content="Invalid Response", status=400)
except PermissionDenied:
raise
except Exception as e:
logger.exception(e)
return HttpResponse(content="Invalid Response", status=400)
def metadata(request):
"""Render the metadata of this service."""
metadata_dict = settings.ONELOGIN_SAML_SETTINGS.get_sp_metadata()
errors = settings.ONELOGIN_SAML_SETTINGS.validate_metadata(metadata_dict)
if len(errors) == 0:
resp = HttpResponse(content=metadata_dict, content_type='text/xml')
else:
resp = HttpResponseServerError(content=', '.join(errors))
return resp
|
133617
|
import torchvision
from torchvision import models
import torch
class DeepLabV3Wrapper(torch.nn.Module):
def __init__(self, model):
super(DeepLabV3Wrapper, self).__init__()
self.model = model
def forward(self, input):
output = self.model(input)['out']
return output
def initialize_model(num_classes, keep_feature_extract=False, use_pretrained=True):
""" DeepLabV3 pretrained on a subset of COCO train2017, on the 20 categories that are present in the Pascal VOC dataset.
"""
model_deeplabv3 = models.segmentation.deeplabv3_resnet101(pretrained=use_pretrained, progress=True)
model_deeplabv3.aux_classifier = None
if keep_feature_extract:
for param in model_deeplabv3.parameters():
param.requires_grad = False
input_size = 224
model_deeplabv3.classifier = torchvision.models.segmentation.deeplabv3.DeepLabHead(2048, num_classes)
return model_deeplabv3, input_size
|
133638
|
import pytest
from .utils import template_test, resolve_param_values_and_ids
def pytest_generate_tests(metafunc):
param_values, param_ids = resolve_param_values_and_ids(
schema_version='http://json-schema.org/draft-07/schema',
suite_dir='JSON-Schema-Test-Suite/tests/draft7',
ignored_suite_files=[
# Optional.
'ecmascript-regex.json',
'idn-hostname.json',
'iri.json',
],
)
metafunc.parametrize(['schema_version', 'schema', 'data', 'is_valid'], param_values, ids=param_ids)
# Real test function to be used with parametrization by previous hook function.
test = template_test
|
133721
|
import pytest
from reactivated import forms
from sample.server.apps.samples import models
@pytest.mark.django_db
@pytest.mark.urls("tests.urls")
def test_autocomplete(client):
composer = models.Composer.objects.create(name="<NAME>")
models.Composer.objects.create(name="<NAME>")
assert client.get("/autocomplete-view/").status_code == 200
assert (
client.post(
"/autocomplete-view/",
{"name": "Zarzuela", "style": "BUFFA", "composer": composer.pk},
).status_code
== 302
)
response = client.get(
"/autocomplete-view/", {"autocomplete": "name", "query": "Wagner"}
)
assert "Rendered form" in str(response.content)
response = client.get(
"/autocomplete-view/", {"autocomplete": "composer", "query": "Wagner"}
)
assert response.json()["results"][0]["label"] == "<NAME>"
@pytest.mark.django_db
@pytest.mark.urls("tests.urls")
def test_invalid_value(client):
response = client.post(
"/autocomplete-view/", {"name": "Zarzuela", "composer": "21s7"}
)
assert "Select a valid choice" in response.context["form"].errors["composer"][0]
assert response.context["form"]["composer"].value() == "21s7"
@pytest.mark.django_db
@pytest.mark.urls("tests.urls")
def test_typed_autocomplete(client):
composer = models.Composer.objects.create(name="<NAME>")
models.Composer.objects.create(name="<NAME>")
assert client.get("/typed-autocomplete-view/").status_code == 200
assert (
client.post(
"/typed-autocomplete-view/", {"name": "Zarzuela", "composer": composer.pk}
).status_code
== 302
)
response = client.get(
"/typed-autocomplete-view/", {"autocomplete": "name", "query": "Wagner"}
)
assert "" in str(response.content)
response = client.get(
"/typed-autocomplete-view/", {"autocomplete": "composer", "query": "Wagner"}
)
assert response.json()["results"][0]["label"] == "<NAME>"
def test_prefix_calculation(client):
assert forms.get_form_or_form_set_descriptor("opera_form_set-0-composer_field") == (
"opera_form_set",
"composer_field",
)
assert forms.get_form_or_form_set_descriptor("opera_form-composer_field") == (
"opera_form",
"composer_field",
)
assert forms.get_form_or_form_set_descriptor("composer_field") == (
None,
"composer_field",
)
|
133783
|
from ..meshio import form_mesh
import numpy as np
import logging
def merge_meshes(input_meshes):
""" Merge multiple meshes into a single mesh.
Args:
input_meshes (``list``): a list of input :class:`Mesh` objects.
Returns:
A :py:class:`Mesh` consists of all vertices, faces and voxels
from ``input_meshes``. The following mesh attributes are defined:
* ``vertex_sources``: Indices of source vertices from the input mesh.
* ``face_sources``: Indices of source faces from the input mesh if the
output contains at least 1 face.
* ``voxel_sources``: Indices of source voxels from the input mesh if the
output contains at least 1 voxel.
"""
logger = logging.getLogger(__name__)
vertices = []
faces = []
voxels = []
vertex_count = 0
vertex_sources = []
face_sources = []
voxel_sources = []
for i,mesh in enumerate(input_meshes):
vertices.append(mesh.vertices)
vertex_sources.append(np.ones(mesh.num_vertices) * i)
if mesh.num_faces > 0:
faces.append(mesh.faces + vertex_count)
face_sources.append(np.ones(mesh.num_faces) * i)
if mesh.num_voxels > 0:
voxels.append(mesh.voxels + vertex_count)
voxel_sources.append(np.ones(mesh.num_voxels) * i)
vertex_count += mesh.num_vertices
if len(vertices) > 0:
vertices = np.vstack(vertices)
vertex_sources = np.concatenate(vertex_sources)
else:
vertices = np.zeros((0, 3), dtype=float)
vertex_sources = np.array([])
if len(faces) > 0:
faces = np.vstack(faces)
face_sources = np.concatenate(face_sources)
else:
faces = np.zeros((0, 3), dtype=int)
face_sources = np.array([])
if len(voxels) > 0 and len(voxels) == len(input_meshes):
voxels = np.vstack(voxels)
voxel_sources = np.concatenate(voxel_sources)
else:
# Not all input meshes contains voxels. So the merged mesh will not be
# a valid volume representation. It is probably base to drop all
# voxels.
if (len(voxels) > 0):
logger.warning("Not all input meshes represent a volume, "
"so dropping all voxels.")
voxels = np.zeros((0, 4), dtype=int)
voxel_sources = np.array([])
output_mesh = form_mesh(vertices, faces, voxels)
output_mesh.add_attribute("vertex_sources")
output_mesh.set_attribute("vertex_sources", vertex_sources)
if (len(face_sources) > 0):
output_mesh.add_attribute("face_sources")
output_mesh.set_attribute("face_sources", face_sources)
if (len(voxel_sources) > 0):
output_mesh.add_attribute("voxel_sources")
output_mesh.set_attribute("voxel_sources", voxel_sources)
return output_mesh
|
133803
|
from collections.abc import MutableMapping
import numpy as np
_HIDDEN_ATTRS = frozenset(
[
"REFERENCE_LIST",
"CLASS",
"DIMENSION_LIST",
"NAME",
"_Netcdf4Dimid",
"_Netcdf4Coordinates",
"_nc3_strict",
"_NCProperties",
]
)
class Attributes(MutableMapping):
def __init__(self, h5attrs, check_dtype):
self._h5attrs = h5attrs
self._check_dtype = check_dtype
def __getitem__(self, key):
import h5py
if key in _HIDDEN_ATTRS:
raise KeyError(key)
# see https://github.com/h5netcdf/h5netcdf/issues/94 for details
if isinstance(self._h5attrs[key], h5py.Empty):
string_info = h5py.check_string_dtype(self._h5attrs[key].dtype)
if string_info and string_info.length == 1:
return b""
return self._h5attrs[key]
def __setitem__(self, key, value):
if key in _HIDDEN_ATTRS:
raise AttributeError("cannot write attribute with reserved name %r" % key)
if hasattr(value, "dtype"):
dtype = value.dtype
else:
dtype = np.asarray(value).dtype
self._check_dtype(dtype)
self._h5attrs[key] = value
def __delitem__(self, key):
del self._h5attrs[key]
def __iter__(self):
for key in self._h5attrs:
if key not in _HIDDEN_ATTRS:
yield key
def __len__(self):
hidden_count = sum(1 if attr in self._h5attrs else 0 for attr in _HIDDEN_ATTRS)
return len(self._h5attrs) - hidden_count
def __repr__(self):
return "\n".join(
["%r" % type(self)] + ["%s: %r" % (k, v) for k, v in self.items()]
)
|
133834
|
from manimlib.constants import *
from manimlib.mobject.svg.tex_mobject import SingleStringTexMobject
from manimlib.mobject.types.vectorized_mobject import VMobject
class DecimalNumber(VMobject):
CONFIG = {
"num_decimal_places": 2,
"include_sign": False,
"group_with_commas": True,
"digit_to_digit_buff": 0.05,
"show_ellipsis": False,
"unit": None, # Aligned to bottom unless it starts with "^"
"include_background_rectangle": False,
"edge_to_fix": LEFT,
}
def __init__(self, number=0, **kwargs):
super().__init__(**kwargs)
self.number = number
self.initial_config = kwargs
if isinstance(number, complex):
formatter = self.get_complex_formatter()
else:
formatter = self.get_formatter()
num_string = formatter.format(number)
rounded_num = np.round(number, self.num_decimal_places)
if num_string.startswith("-") and rounded_num == 0:
if self.include_sign:
num_string = "+" + num_string[1:]
else:
num_string = num_string[1:]
self.add(*[
SingleStringTexMobject(char, **kwargs)
for char in num_string
])
# Add non-numerical bits
if self.show_ellipsis:
self.add(SingleStringTexMobject("\\dots"))
if num_string.startswith("-"):
minus = self.submobjects[0]
minus.next_to(
self.submobjects[1], LEFT,
buff=self.digit_to_digit_buff
)
if self.unit is not None:
self.unit_sign = SingleStringTexMobject(self.unit, color=self.color)
self.add(self.unit_sign)
self.arrange(
buff=self.digit_to_digit_buff,
aligned_edge=DOWN
)
# Handle alignment of parts that should be aligned
# to the bottom
for i, c in enumerate(num_string):
if c == "-" and len(num_string) > i + 1:
self[i].align_to(self[i + 1], UP)
self[i].shift(self[i+1].get_height() * DOWN / 2)
elif c == ",":
self[i].shift(self[i].get_height() * DOWN / 2)
if self.unit and self.unit.startswith("^"):
self.unit_sign.align_to(self, UP)
#
if self.include_background_rectangle:
self.add_background_rectangle()
def get_formatter(self, **kwargs):
"""
Configuration is based first off instance attributes,
but overwritten by any kew word argument. Relevant
key words:
- include_sign
- group_with_commas
- num_decimal_places
- field_name (e.g. 0 or 0.real)
"""
config = dict([
(attr, getattr(self, attr))
for attr in [
"include_sign",
"group_with_commas",
"num_decimal_places",
]
])
config.update(kwargs)
return "".join([
"{",
config.get("field_name", ""),
":",
"+" if config["include_sign"] else "",
"," if config["group_with_commas"] else "",
".", str(config["num_decimal_places"]), "f",
"}",
])
def get_complex_formatter(self, **kwargs):
return "".join([
self.get_formatter(field_name="0.real"),
self.get_formatter(field_name="0.imag", include_sign=True),
"i"
])
def set_value(self, number, **config):
full_config = dict(self.CONFIG)
full_config.update(self.initial_config)
full_config.update(config)
new_decimal = DecimalNumber(number, **full_config)
# Make sure last digit has constant height
new_decimal.scale(
self[-1].get_height() / new_decimal[-1].get_height()
)
new_decimal.move_to(self, self.edge_to_fix)
new_decimal.match_style(self)
old_family = self.get_family()
self.submobjects = new_decimal.submobjects
for mob in old_family:
# Dumb hack...due to how scene handles families
# of animated mobjects
mob.points[:] = 0
self.number = number
return self
def get_value(self):
return self.number
def increment_value(self, delta_t=1):
self.set_value(self.get_value() + delta_t)
class Integer(DecimalNumber):
CONFIG = {
"num_decimal_places": 0,
}
def get_value(self):
return int(np.round(super().get_value()))
|
133845
|
import FWCore.ParameterSet.Config as cms
def customise(process):
# add ECAL and HCAL specific Geant4 hits objects
process.g4SimHits.Watchers = cms.VPSet(cms.PSet(
instanceLabel = cms.untracked.string('EcalValidInfo'),
type = cms.string('EcalSimHitsValidProducer'),
verbose = cms.untracked.bool(False)
))
# use directly the generator output, no Hector
process.g4SimHits.Generator.HepMCProductLabel = cms.string('generatorSmeared')
# user schedule: use only calorimeters digitization and local reconstruction
process.g4SimHits.ECalSD.StoreSecondary = True
process.g4SimHits.CaloTrkProcessing.PutHistory = True
process.simEcalUnsuppressedDigis.apdAddToBarrel = True
return(process)
|
133886
|
import argparse
import json
import time
from pathlib import Path
from sklearn import metrics
from scipy import interpolate
import torch.nn.functional as F
from models import *
from utils.utils import *
from torchvision.transforms import transforms as T
from utils.datasets import LoadImages, JointDataset, collate_fn
def extract_ped_per_frame(
cfg,
input_root,
output_root,
weights,
batch_size=16,
img_size=416,
iou_thres=0.5,
conf_thres=0.3,
nms_thres=0.45,
print_interval=40,
nID=14455,
):
mkdir_if_missing(output_root)
# Initialize model
model = Darknet(cfg, img_size, nID)
# Load weights
if weights.endswith('.pt'): # pytorch format
model.load_state_dict(torch.load(weights, map_location='cpu')['model'], strict=False)
else: # darknet format
load_darknet_weights(model, weights)
model = torch.nn.DataParallel(model)
model.cuda().eval()
vlist = os.listdir(input_root)
vlist = [osp.join(input_root, v, 'img1') for v in vlist]
for vpath in vlist:
vroot = osp.join('/',*vpath.split('/')[:-1])
out_vroot = vroot.replace(input_root, output_root)
mkdir_if_missing(out_vroot)
dataloader = LoadImages(vpath, img_size)
for frame_id, (frame_path, frame, frame_ori) in enumerate(dataloader):
frame_ground_id = frame_path.split('/')[-1].split('.')[0]
if frame_id % 20 == 0:
print('Processing frame {} of video {}'.format(frame_id, frame_path))
blob = torch.from_numpy(frame).cuda().unsqueeze(0)
pred = model(blob)
pred = pred[pred[:,:,4] > conf_thres]
if len(pred) > 0:
dets = non_max_suppression(pred.unsqueeze(0), conf_thres, nms_thres)[0].cpu()
scale_coords(img_size, dets[:, :4], frame_ori.shape).round()
frame_dir = osp.join(out_vroot, frame_ground_id)
mkdir_if_missing(frame_dir)
dets = dets[:, :5]
for ped_id, det in enumerate(dets):
box = det[:4].int()
conf = det[4]
ped = frame_ori[box[1]:box[3], box[0]:box[2]]
ped_path = osp.join(frame_dir, ('{:04d}_'+ '{:d}_'*4 + '{:.2f}.jpg').format(ped_id, *box, conf))
cv2.imwrite(ped_path, ped)
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='test.py')
parser.add_argument('--batch-size', type=int, default=40, help='size of each image batch')
parser.add_argument('--cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path')
parser.add_argument('--weights', type=str, default='weights/mot_64/latest.pt', help='path to weights file')
parser.add_argument('--iou-thres', type=float, default=0.3, help='iou threshold required to qualify as detected')
parser.add_argument('--conf-thres', type=float, default=0.3, help='object confidence threshold')
parser.add_argument('--nms-thres', type=float, default=0.3, help='iou threshold for non-maximum suppression')
parser.add_argument('--img-size', type=int, default=(1088, 608), help='size of each image dimension')
parser.add_argument('--print-interval', type=int, default=10, help='size of each image dimension')
parser.add_argument('--input-root', type=str, default='/home/wangzd/datasets/youtube/data/0004/frame', help='path to input frames')
parser.add_argument('--output-root', type=str, default='/home/wangzd/datasets/youtube/data/0004/ped_per_frame', help='path to output frames')
opt = parser.parse_args()
print(opt, end='\n\n')
with torch.no_grad():
extract_ped_per_frame(
opt.cfg,
opt.input_root,
opt.output_root,
opt.weights,
opt.batch_size,
opt.img_size,
opt.iou_thres,
opt.conf_thres,
opt.nms_thres,
opt.print_interval,
)
|
133930
|
from functools import wraps
from typing import Callable, List, Optional, Tuple
from sanic.request import Request
from sanic_jwt_extended.exceptions import (
AccessDeniedError,
ConfigurationConflictError,
CSRFError,
FreshTokenRequiredError,
InvalidHeaderError,
NoAuthorizationError,
RevokedTokenError,
WrongTokenError,
)
from sanic_jwt_extended.jwt_manager import JWT
from sanic_jwt_extended.tokens import Token
try:
from hmac import compare_digest
except ImportError: # pragma: no cover
def compare_digest(a, b):
if isinstance(a, str):
a = a.encode("utf-8")
if isinstance(b, str):
b = b.encode("utf-8")
if len(a) != len(b):
return False
r = 0
for x, y in zip(a, b):
r |= x ^ y
return not r
jwt_get_function = Callable[[Request, bool], Tuple[str, Optional[str]]]
def _get_request(args) -> Request:
if isinstance(args[0], Request):
request = args[0]
else:
request = args[1]
return request
def _get_raw_jwt_from_request(request, is_access=True):
functions: List[jwt_get_function] = []
for eligible_location in JWT.config.token_location:
if eligible_location == "header":
functions.append(_get_raw_jwt_from_headers)
if eligible_location == "query":
functions.append(_get_raw_jwt_from_query_params)
if eligible_location == "cookies":
functions.append(_get_raw_jwt_from_cookies)
raw_jwt = None
csrf_value = None
errors = []
for f in functions:
try:
raw_jwt, csrf_value = f(request, is_access)
break
except NoAuthorizationError as e:
errors.append(str(e))
if not raw_jwt:
raise NoAuthorizationError(', '.join(errors))
return raw_jwt, csrf_value
def _get_raw_jwt_from_headers(request, is_access):
header_key = (
JWT.config.jwt_header_key if is_access else JWT.config.refresh_jwt_header_key
)
header_prefix = JWT.config.jwt_header_prefix
token_header = request.headers.get(header_key)
if not token_header:
raise NoAuthorizationError(f'Missing header "{header_key}"')
parts: List[str] = token_header.split()
if parts[0] != header_prefix or len(parts) != 2:
raise InvalidHeaderError(
f"Bad {header_key} header. Expected value '{header_prefix} <JWT>'"
)
encoded_token: str = parts[1]
return encoded_token, None
def _get_raw_jwt_from_query_params(request, _):
encoded_token = request.args.get(JWT.config.jwt_query_param_name)
if not encoded_token:
raise NoAuthorizationError(
f'Missing query parameter "{JWT.config.jwt_query_param_name}"'
)
return encoded_token, None
def _get_raw_jwt_from_cookies(request, is_access):
cookie_key = JWT.config.jwt_cookie if is_access else JWT.config.refresh_jwt_cookie
csrf_header_key = (
JWT.config.jwt_csrf_header if is_access else JWT.config.refresh_jwt_csrf_header
)
encoded_token = request.cookies.get(cookie_key)
csrf_value = None
if not encoded_token:
raise NoAuthorizationError(f'Missing cookie "{cookie_key}"')
if JWT.config.csrf_protect and request.method in JWT.config.csrf_request_methods:
csrf_value = request.headers.get(csrf_header_key)
if not csrf_value:
raise CSRFError("Missing CSRF token")
return encoded_token, csrf_value
def _csrf_check(csrf_from_request, csrf_from_jwt):
if not csrf_from_jwt or not isinstance(csrf_from_jwt, str):
raise CSRFError('Can not find valid CSRF data from token')
if not compare_digest(csrf_from_request, csrf_from_jwt):
raise CSRFError('CSRF double submit tokens do not match')
def jwt_required(
function=None, *, allow=None, deny=None, fresh_required=False,
):
def real(fn):
@wraps(fn)
async def wrapper(*args, **kwargs):
request = _get_request(args)
raw_jwt, csrf_value = _get_raw_jwt_from_request(request)
token_obj = Token(raw_jwt)
if csrf_value:
_csrf_check(csrf_value, token_obj.csrf)
if token_obj.type != "access":
raise WrongTokenError("Only access tokens are allowed")
if fresh_required and not token_obj.fresh:
raise FreshTokenRequiredError("Only fresh access tokens are allowed")
if allow and token_obj.role not in allow:
raise AccessDeniedError("You are not allowed to access here")
if deny and token_obj.role in deny:
raise AccessDeniedError("You are not allowed to access here")
if JWT.config.use_blacklist and await JWT.blacklist.is_blacklisted(
token_obj
):
raise RevokedTokenError("Token has been revoked")
kwargs["token"] = token_obj
return await fn(*args, **kwargs)
return wrapper
if function:
return real(function)
else:
if allow and deny:
raise ConfigurationConflictError(
"Can not use 'deny' and 'allow' option together."
)
return real
def jwt_optional(function):
@wraps(function)
async def wrapper(*args, **kwargs):
request = _get_request(args)
token_obj: Optional[Token] = None
try:
raw_jwt, csrf_value = _get_raw_jwt_from_request(request)
token_obj = Token(raw_jwt)
if csrf_value:
_csrf_check(csrf_value, token_obj.csrf)
if token_obj.type != "access":
raise WrongTokenError("Only access tokens are allowed")
except (NoAuthorizationError, InvalidHeaderError):
pass
kwargs["token"] = token_obj
return await function(*args, **kwargs)
return wrapper
def refresh_jwt_required(function=None, *, allow=None, deny=None):
def real(fn):
@wraps(fn)
async def wrapper(*args, **kwargs):
request = _get_request(args)
raw_jwt, csrf_value = _get_raw_jwt_from_request(request, is_access=False)
token_obj = Token(raw_jwt)
if csrf_value:
_csrf_check(csrf_value, token_obj.csrf)
if token_obj.type != "refresh":
raise WrongTokenError("Only refresh tokens are allowed")
if allow and token_obj.role not in allow:
raise AccessDeniedError("You are not allowed to refresh in here")
if deny and token_obj.role in deny:
raise AccessDeniedError("You are not allowed to refresh in here")
if JWT.config.use_blacklist and await JWT.blacklist.is_blacklisted(
token_obj
):
raise RevokedTokenError("Token has been revoked")
kwargs["token"] = token_obj
return await fn(*args, **kwargs)
return wrapper
if function:
return real(function)
else:
if allow and deny:
raise ConfigurationConflictError(
"Can not use 'deny' and 'allow' option together."
)
return real
|
133966
|
import string
import random
def uuid(length=8, lower=True):
"""sebbe-approved UUID"""
# risk of collision
# mixed case: 8 characters -> 1 in 54 trillion
# lower case: 8 characters -> 1 in 208 billion
letters = string.ascii_letters
if lower:
letters = letters.lower()
return ''.join(random.choice(letters) for i in range(length))
|
133968
|
from functools import reduce
from sherlock.codelib.analyzer.factory import ListManagerFactory
class Function(object):
def __init__(self, name, args_type, return_type, code_generator):
self.name = name
self.return_type = return_type
self.args_type = args_type
self.code_generator = code_generator
def is_args_type_match(self, args_type):
return reduce(lambda x, y: x & y, [x == args_type[i] for i, x in enumerate(self.args_type)])
def __repr__(self):
return 'Func(name=%s, args_type=%s, return_type=%s)' % (self.name, repr(self.args_type), repr(self.return_type))
class Functions(ListManagerFactory):
pass
|
133979
|
import os, pickle, json
from collections import deque
import numpy as np
import tensorflow as tf
import torch
import torch.nn.functional as F
from guacamol.distribution_matching_generator import DistributionMatchingGenerator
from rdkit import Chem
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
from data.gen_targets import get_symbol_list
from src.data.loader import SizeSampler, graph_collate_fn
from src.utils import set_seed_if, graph_to_mol, get_index_method, filter_top_k, calculate_graph_properties,\
dct_to_cuda_inplace, copy_graph_remove_data
if int(tf.__version__.split('.')[0]) <= 1:
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
class MockGenerator(DistributionMatchingGenerator):
def __init__(self, smiles_list, num_samples_to_generate, train_smiles_list=None, remove_non_novel=False):
self.smiles_list = smiles_list
if remove_non_novel is True:
self.smiles_list = [s for s in self.smiles_list if s not in train_smiles_list]
self.smiles_list = self.smiles_list[:num_samples_to_generate]
def generate(self, number_samples):
smiles_to_return = self.smiles_list[:number_samples]
self.smiles_list = self.smiles_list[number_samples:] + self.smiles_list[:number_samples]
return smiles_to_return
class GenDataset(Dataset):
def __init__(self, dataset, number_samples):
self.dataset = dataset
self.number_samples = number_samples
def __getitem__(self, index):
return self.dataset[index]
def __len__(self):
return self.number_samples
class GraphGenerator(DistributionMatchingGenerator):
def __init__(self, train_data, model, generation_algorithm, random_init, num_iters, num_sampling_iters, batch_size,
edges_per_batch=-1, retrieve_train_graphs=False, local_cpu=False, cp_save_dir=None,
set_seed_at_load_iter=False, graph_type='QM9', sample_uniformly=False, mask_comp_to_predict=False,
maintain_minority_proportion=False, no_edge_present_type='learned', mask_independently=False,
one_property_per_loop=False, checkpointing_period=1, save_period=1, evaluation_period=1,
evaluate_finegrained=False, save_finegrained=False, variables_per_gibbs_iteration=1, top_k=-1,
save_init=False, cond_property_values={}):
super().__init__()
self.model = model
self.generation_algorithm = generation_algorithm
self.random_init = random_init
self.sample_uniformly = sample_uniformly
self.num_iters = num_iters
self.num_sampling_iters = num_sampling_iters
self.num_argmax_iters = self.num_iters - self.num_sampling_iters
self.train_data = train_data
self.batch_size = batch_size
self.edges_per_batch = edges_per_batch
self.local_cpu = local_cpu
self.cp_save_dir = cp_save_dir
self.calculate_length_dist()
self.get_special_inds()
self.set_seed_at_load_iter = set_seed_at_load_iter
self.symbol_list = get_symbol_list(graph_type)[:self.train_data.num_node_types]
self.retrieve_train_graphs = retrieve_train_graphs
self.mask_comp_to_predict = mask_comp_to_predict
self.maintain_minority_proportion = maintain_minority_proportion
self.no_edge_present_type = no_edge_present_type
self.mask_independently = mask_independently
self.one_property_per_loop = one_property_per_loop
self.index_method = get_index_method()
self.checkpointing_period = checkpointing_period
self.save_period = save_period
self.evaluation_period = evaluation_period
self.evaluate_finegrained = evaluate_finegrained
self.save_finegrained = save_finegrained
self.variables_per_gibbs_iteration = variables_per_gibbs_iteration
self.top_k = top_k
self.save_init = save_init
self.model_forward = self.model_forward_mgm
if self.one_property_per_loop is True:
self.node_property_ints = {'node_type': 1, 'hydrogens': 2, 'charge': 3, 'is_in_ring': 4, 'is_aromatic': 5,
'chirality': 6}
self.edge_property_ints = {'edge_type': 7}
else:
self.node_property_ints = {'node_type': 1, 'hydrogens': 1, 'charge': 1, 'is_in_ring': 1, 'is_aromatic': 1,
'chirality': 1}
self.edge_property_ints = {'edge_type': 2}
self.cond_property_values = {k: float(v) for k, v in cond_property_values.items()}
def generate(self, number_samples):
load_path, load_iters = get_load_path(self.num_sampling_iters, self.num_argmax_iters, self.cp_save_dir)
all_init_node_properties, all_init_edge_properties, all_node_masks, all_edge_masks = \
self.get_all_init_variables(load_path, number_samples)
if self.set_seed_at_load_iter is True:
set_seed_if(load_iters)
retrieve_train_graphs = self.retrieve_train_graphs
for j in range(load_iters, self.num_iters):
if j > 0:
retrieve_train_graphs = False
if self.generation_algorithm == 'Gibbs':
self.train_data.do_not_corrupt = True
loader = self.get_dataloader(all_init_node_properties, all_node_masks, all_init_edge_properties,
number_samples, retrieve_train_graphs)
use_argmax = (j >= self.num_sampling_iters)
all_init_node_properties, all_init_edge_properties, all_node_masks, \
smiles_list = self.carry_out_iteration(loader, use_argmax)
return smiles_list
def generate_with_evaluation(self, num_samples_to_generate, smiles_dataset_path, output_dir,
num_samples_to_evaluate, evaluate_connected_only=False):
load_path, load_iters = get_load_path(self.num_sampling_iters, self.num_argmax_iters, self.cp_save_dir)
all_init_node_properties, all_init_edge_properties, all_node_masks, all_edge_masks = \
self.get_all_init_variables(load_path, num_samples_to_generate)
if self.save_init is True and self.random_init is True and load_iters == 0:
# Save smiles representations of initialised molecules
smiles_list = []
num_nodes = all_node_masks.sum(-1)
for i in range(len(all_init_node_properties['node_type'])):
mol = graph_to_mol({k: v[i][:int(num_nodes[i])].astype(int) \
for k, v in all_init_node_properties.items()},
{k: v[i][:int(num_nodes[i]), :int(num_nodes[i])].astype(int) \
for k, v in all_init_edge_properties.items()},
min_charge=self.train_data.min_charge, symbol_list=self.symbol_list)
smiles_list.append(Chem.MolToSmiles(mol))
save_smiles_list(smiles_list, os.path.join(output_dir, 'smiles_0_0.txt'))
del smiles_list, mol, num_nodes
if self.set_seed_at_load_iter is True:
set_seed_if(load_iters)
retrieve_train_graphs = self.retrieve_train_graphs
for j in tqdm(range(load_iters, self.num_iters)):
if j > 0:
retrieve_train_graphs = False
if self.generation_algorithm == 'Gibbs':
self.train_data.do_not_corrupt = True
loader = self.get_dataloader(all_init_node_properties, all_node_masks, all_init_edge_properties,
num_samples_to_generate, retrieve_train_graphs)
use_argmax = (j >= self.num_sampling_iters)
all_init_node_properties, all_init_edge_properties, all_node_masks,\
smiles_list = self.carry_out_iteration(loader, use_argmax)
sampling_iters_completed = min(j + 1, self.num_sampling_iters)
argmax_iters_completed = max(0, j + 1 - self.num_sampling_iters)
if (j + 1 - load_iters) % self.checkpointing_period == 0:
self.save_checkpoints(all_init_node_properties, all_init_edge_properties,
sampling_iters_completed, argmax_iters_completed)
if (j + 1 - load_iters) % self.save_period == 0 or (self.save_finegrained is True and (j + 1) <= 10):
smiles_output_path = os.path.join(output_dir, 'smiles_{}_{}.txt'.format(
sampling_iters_completed, argmax_iters_completed))
save_smiles_list(smiles_list, smiles_output_path)
if (j + 1 - load_iters) % self.evaluation_period == 0 or \
(self.evaluate_finegrained is True and (j + 1) <= 10):
json_output_path = os.path.join(output_dir, 'distribution_results_{}_{}.json'.format(
sampling_iters_completed, argmax_iters_completed))
evaluate_uncond_generation(MockGenerator(smiles_list, num_samples_to_generate),
smiles_dataset_path, json_output_path, num_samples_to_evaluate,
evaluate_connected_only)
if self.cond_property_values:
cond_json_output_path = os.path.join(output_dir, 'cond_results_{}_{}.json'.format(
sampling_iters_completed, argmax_iters_completed))
self.evaluate_cond_generation(smiles_list[:num_samples_to_evaluate], cond_json_output_path)
def carry_out_iteration(self, loader, use_argmax):
mols, smiles_list = [], []
all_final_node_properties = {name: [] for name in self.train_data.node_property_names}
all_final_edge_properties = {name: [] for name in self.train_data.edge_property_names}
all_final_node_masks = []
print('Generator length: {}'.format(len(loader)), flush=True)
for batch_init_graph, _, batch_target_type_graph, batch_target_type_transpose_graph, \
graph_properties, binary_graph_properties in tqdm(loader):
if self.local_cpu is False:
batch_init_graph = batch_init_graph.to(torch.device('cuda'))
dct_to_cuda_inplace(graph_properties)
if binary_graph_properties: binary_graph_properties = binary_graph_properties.cuda()
batch_init_graph = self.sample_simultaneously(batch_init_graph,
batch_target_type_graph, batch_target_type_transpose_graph,
graph_properties, binary_graph_properties, use_argmax)
batch_init_graph = batch_init_graph.to(torch.device('cpu'))
self.append_and_convert_graphs(batch_init_graph, all_final_node_properties, all_final_edge_properties,
all_final_node_masks, mols, smiles_list)
return all_final_node_properties, all_final_edge_properties, all_final_node_masks, smiles_list
def get_all_init_variables(self, load_path, number_samples):
if load_path is not None:
with open(load_path, 'rb') as f:
load_info = pickle.load(f)
all_init_node_properties, all_init_edge_properties = load_info
all_node_masks = [(node_type != self.train_data.node_properties['node_type']['empty_index']) \
for node_type in all_init_node_properties['node_type']]
all_edge_masks = [(edge_type != self.train_data.edge_properties['edge_type']['empty_index']) \
for edge_type in all_init_edge_properties['edge_type']]
else:
lengths = self.sample_lengths(number_samples)
all_init_node_properties, all_init_edge_properties, all_node_masks, all_edge_masks = \
self.get_masked_variables(lengths, number_samples, pad=False)
return all_init_node_properties, all_init_edge_properties, all_node_masks, all_edge_masks
def get_dataloader(self, all_init_node_properties, all_node_masks, all_init_edge_properties, number_samples,
retrieve_train_graphs):
gen_dataset = GenDataset(self.train_data, number_samples)
if retrieve_train_graphs is False:
for name, node_property in all_init_node_properties.items():
data = []
for i, single_data_property in enumerate(node_property):
if name == 'charge': single_data_property -= abs(self.train_data.min_charge)
data.append(single_data_property[:int(all_node_masks[i].sum())])
gen_dataset.dataset.node_properties[name]['data'] = data
for name, edge_property in all_init_edge_properties.items():
data = []
for i, single_data_property in enumerate(edge_property):
data.append(single_data_property[:int(all_node_masks[i].sum()), :int(all_node_masks[i].sum())])
gen_dataset.dataset.edge_properties[name]['data'] = data
for name, value in self.cond_property_values.items():
gen_dataset.dataset.graph_properties[name] = np.ones_like(gen_dataset.dataset.graph_properties[name]) \
* value
if self.edges_per_batch > 0:
batch_sampler = SizeSampler(gen_dataset, self.edges_per_batch)
batch_sampler.batches.reverse()
loader = DataLoader(gen_dataset, batch_sampler=batch_sampler, collate_fn=graph_collate_fn)
else:
loader = DataLoader(gen_dataset, batch_size=self.batch_size, collate_fn=graph_collate_fn)
return loader
def sample_simultaneously(self, batch_init_graph, batch_target_type_graph, batch_target_type_transpose_graph,
graph_properties=None, binary_graph_properties=None, use_argmax=False):
batch_preds_graph = copy_graph_remove_data(batch_init_graph)
with torch.no_grad():
_, batch_scores_graph, graph_property_scores = self.model_forward(batch_init_graph, graph_properties,
binary_graph_properties)
# This breaks symmetry for edge data
batch_preds_graph = self.predict_from_scores(batch_scores_graph, batch_preds_graph, use_argmax)
for name, target_type in batch_target_type_graph.ndata.items():
batch_init_graph.ndata[name][target_type.numpy() != 0] = \
batch_preds_graph.ndata[name][target_type.numpy() != 0]
for name, target_type in batch_target_type_graph.edata.items():
batch_init_graph.edata[name][target_type.numpy() != 0] = \
batch_preds_graph.edata[name][target_type.numpy() != 0]
batch_init_graph.edata[name][batch_target_type_transpose_graph.edata[name].numpy() != 0] = \
batch_preds_graph.edata[name][batch_target_type_transpose_graph.edata[name].numpy() != 0]
return batch_init_graph
def model_forward_mgm(self, batch_init_graph, graph_properties=None, binary_graph_properties=None):
return self.model(batch_init_graph, graph_properties, binary_graph_properties)
def predict_from_scores(self, batch_scores_graph, batch_preds_graph, use_argmax=False):
for property_name, scores in batch_scores_graph.ndata.items():
if use_argmax is True:
batch_preds_graph.ndata[property_name] = torch.argmax(F.softmax(scores, -1), dim=-1)
else:
if self.top_k > 0:
scores = filter_top_k(scores, self.top_k)
batch_preds_graph.ndata[property_name] = torch.distributions.Categorical(F.softmax(scores, -1)).sample()
for property_name, scores in batch_scores_graph.edata.items():
if use_argmax is True:
batch_preds_graph.edata[property_name] = torch.argmax(F.softmax(scores, -1), dim=-1)
else:
if self.top_k > 0:
scores = filter_top_k(scores, self.top_k)
batch_preds_graph.edata[property_name] = torch.distributions.Categorical(F.softmax(scores, -1)).sample()
return batch_preds_graph
def append_and_convert_graphs(self, batch_init_graph,
all_final_node_properties, all_final_edge_properties, all_final_node_masks,
mols, smiles_list):
all_final_len = len(all_final_node_properties[list(all_final_node_properties.keys())[0]])
node_start, edge_start = 0, 0
for i, num_nodes in enumerate(batch_init_graph.batch_num_nodes()):
num_edges = batch_init_graph.batch_num_edges()[i]
for name, property in batch_init_graph.ndata.items():
all_final_node_properties[name].append(property[node_start:node_start+num_nodes].numpy())
all_final_node_masks.append(np.ones(num_nodes)) # redundant but needs to remain until node masks removed
# from generation
for name, property in batch_init_graph.edata.items():
single_datapoint_fc_data = np.zeros((num_nodes, num_nodes))
single_datapoint_fc_data[
batch_init_graph.edges()[0][edge_start:edge_start+num_edges].numpy() - node_start,
batch_init_graph.edges()[1][edge_start:edge_start+num_edges].numpy() - node_start] = \
property[edge_start:edge_start+num_edges].numpy()
# force symmetry, which is broken earlier by sampling edge predictions
single_datapoint_fc_data = np.triu(single_datapoint_fc_data) + np.tril(single_datapoint_fc_data.T, -1)
all_final_edge_properties[name].append(single_datapoint_fc_data)
node_start += num_nodes
edge_start += num_edges
mol = graph_to_mol({k: v[all_final_len+i] for k, v in all_final_node_properties.items()},
{k: v[all_final_len+i] for k, v in all_final_edge_properties.items()},
min_charge=self.train_data.min_charge, symbol_list=self.symbol_list)
mols.append(mol)
smiles_list.append(Chem.MolToSmiles(mol))
def save_checkpoints(self, all_final_node_properties, all_final_edge_properties, num_sampling_iters,
num_argmax_iters):
if self.cp_save_dir is not None:
save_path = os.path.join(self.cp_save_dir, 'gen_checkpoint_{}_{}.p'.format(
num_sampling_iters, num_argmax_iters))
with open(save_path, 'wb') as f:
pickle.dump([all_final_node_properties, all_final_edge_properties], f)
def calculate_length_dist(self):
lengths_dict = {}
for node_type in self.train_data.node_properties['node_type']['data']:
length = len(node_type)
if length not in lengths_dict:
lengths_dict[length] = 1
else:
lengths_dict[length] += 1
# Normalise
for key in lengths_dict:
lengths_dict[key] /= len(self.train_data)
self.length_dist = lengths_dict
def get_special_inds(self):
self.max_nodes = self.train_data.max_nodes
self.max_edges = int(self.max_nodes * (self.max_nodes-1)/2)
def sample_lengths(self, number_samples=1):
lengths = np.array(list(self.length_dist.keys()))
probs = np.array(list(self.length_dist.values()))
samples = np.random.choice(lengths, number_samples, p=probs)
return samples
def get_masked_variables(self, lengths, number_samples, pad=True):
if pad is True:
all_init_node_properties, all_init_edge_properties = {}, {}
for name, property_info in self.train_data.node_properties.items():
all_init_node_properties[name] = np.ones((number_samples, self.max_nodes)) * \
property_info['empty_index']
for name, property_info in self.train_data.edge_properties.items():
all_init_edge_properties[name] = np.ones((number_samples, self.max_nodes, self.max_nodes)) * \
property_info['empty_index']
node_mask = np.zeros((number_samples, self.max_nodes))
edge_mask = np.zeros((number_samples, self.max_nodes, self.max_nodes))
else:
all_init_node_properties = {name: [] for name in self.train_data.node_property_names}
all_init_edge_properties = {name: [] for name in self.train_data.edge_property_names}
node_mask, edge_mask = [], []
for sample_num, length in enumerate(lengths):
if pad is False:
for name, property_info in self.train_data.node_properties.items():
all_init_node_properties[name].append(np.ones(length) * property_info['empty_index'])
for name, property_info in self.train_data.edge_properties.items():
all_init_edge_properties[name].append(np.ones((length, length)) * property_info['empty_index'])
node_mask.append(np.zeros(length))
edge_mask.append(np.zeros((length, length)))
if self.random_init:
for name, property_info in self.train_data.node_properties.items():
if self.sample_uniformly is True:
samples = np.random.randint(0, property_info['num_categories'], size=length)
else:
samples = torch.distributions.Categorical(1/self.train_data.node_property_weights[name]).sample(
[length]).numpy()
all_init_node_properties[name][sample_num][:length] = samples
for name, property_info in self.train_data.edge_properties.items():
if self.sample_uniformly is True:
samples = np.random.randint(0, property_info['num_categories'],
size=int(length * (length - 1) / 2))
else:
samples = torch.distributions.Categorical(1/self.train_data.edge_property_weights[name]).sample(
[int(length * (length - 1) / 2)]).numpy()
rand_edges = deque(samples)
for i in range(length):
all_init_edge_properties[name][sample_num][i, i] = 0
for j in range(i, length):
if i != j:
all_init_edge_properties[name][sample_num][i, j] = \
all_init_edge_properties[name][sample_num][j, i] = rand_edges.pop()
else:
for name, init_node_property in all_init_node_properties.items():
init_node_property[sample_num][:length] = self.train_data.node_properties[name]['mask_index']
for name, init_edge_property in all_init_edge_properties.items():
init_edge_property[sample_num][:length, :length] = \
self.train_data.edge_properties[name]['mask_index']
node_mask[sample_num][:length] = 1
edge_mask[sample_num][:length, :length] = 1
return all_init_node_properties, all_init_edge_properties, node_mask, edge_mask
def evaluate_cond_generation(self, smiles_list, json_output_path):
valid_mols = []
for s in smiles_list:
mol = Chem.MolFromSmiles(s)
if mol is not None: valid_mols.append(mol)
graph_properties = calculate_graph_properties(valid_mols, self.cond_property_values.keys())
graph_property_stats = {name: {'mean': np.mean(graph_property), 'median': np.median(graph_property),
'std': np.std(graph_property)}
for name, graph_property in graph_properties.items()}
with open(json_output_path, 'w') as f:
json.dump(graph_property_stats, f)
def get_load_path(num_sampling_iters, num_argmax_iters, cp_save_dir):
all_cp_iters = {}
for fname in os.listdir(cp_save_dir):
if 'gen_checkpoint' not in fname: continue
split_fname = os.path.splitext(fname)[0].split('_')
cp_sampling_iters, cp_argmax_iters = int(split_fname[2]), int(split_fname[3])
if cp_sampling_iters in all_cp_iters.keys():
all_cp_iters[cp_sampling_iters].append(cp_argmax_iters)
else:
all_cp_iters[cp_sampling_iters] = [cp_argmax_iters]
if len(all_cp_iters) == 0:
return None, 0
cp_max_sampling_iters = max(all_cp_iters.keys())
sampling_iters_to_load = min(cp_max_sampling_iters, num_sampling_iters)
if sampling_iters_to_load == num_sampling_iters and sampling_iters_to_load in all_cp_iters.keys():
argmax_iters_to_load = min(max(all_cp_iters[sampling_iters_to_load]), num_argmax_iters)
else:
argmax_iters_to_load = 0
if sampling_iters_to_load == argmax_iters_to_load == 0:
return None, 0
load_path = os.path.join(cp_save_dir,
'gen_checkpoint_{}_{}.p'.format(sampling_iters_to_load, argmax_iters_to_load))
return load_path, sampling_iters_to_load + argmax_iters_to_load
def get_shuffled_array(arrays, length=None):
"""
:arg
arrays: list of generation_arrays
length: length of an output generation array with padding
:returns
shuffled_arrays: padded matrix of shape (number of generation arrays, length)
"""
if type(arrays[0][0]) == tuple:
shuffled_arrays = np.ones((len(arrays), length), dtype=(int, 2)) * -1
else:
shuffled_arrays = np.ones((len(arrays), length)) * -1
for i, array in enumerate(arrays):
array = np.random.permutation(array)
shuffled_arrays[i, :len(array)] = array
return shuffled_arrays
def save_smiles_list(smiles_list, smiles_output_path):
with open(smiles_output_path, 'w') as f:
for smiles in smiles_list:
f.write(smiles + '\n')
def evaluate_uncond_generation(mock_generator, smiles_dataset_path,
json_output_path, num_samples_to_evaluate, evaluate_connected_only=False):
from guacamol.assess_distribution_learning import _assess_distribution_learning
if evaluate_connected_only is True:
mock_generator.smiles_list = [s for s in mock_generator.smiles_list if '.' not in s]
_assess_distribution_learning(mock_generator, smiles_dataset_path, json_output_file=json_output_path,
benchmark_version='v1', number_samples=num_samples_to_evaluate)
|
134050
|
import logging
logging.basicConfig(level=logging.DEBUG)
from slack_bolt import App, BoltContext
from slack_bolt.oauth import OAuthFlow
from slack_sdk import WebClient
app = App(oauth_flow=OAuthFlow.sqlite3(database="./slackapp.db"))
@app.use
def dump(context, next, logger):
logger.info(context)
next()
@app.use
def call_apis_with_team_id(context: BoltContext, client: WebClient, next):
# client.users_list()
client.bots_info(bot=context.bot_id)
next()
@app.event("app_mention")
def handle_app_mentions(body, say, logger):
logger.info(body)
say("What's up?")
@app.command("/org-level-command")
def command(ack):
ack("I got it!")
@app.shortcut("org-level-shortcut")
def shortcut(ack):
ack()
@app.event("team_access_granted")
def team_access_granted(event):
pass
@app.event("team_access_revoked")
def team_access_revoked(event):
pass
if __name__ == "__main__":
app.start(3000)
# pip install slack_bolt
# export SLACK_SIGNING_SECRET=***
# export SLACK_BOT_TOKEN=<PASSWORD>-***
# export SLACK_CLIENT_ID=111.111
# export SLACK_CLIENT_SECRET=***
# export SLACK_SCOPES=app_mentions:read,channels:history,im:history,chat:write
# python oauth_app.py
|
134108
|
from setuptools import setup, find_packages
# read readme
with open("README.md", "r") as f:
readme = f.read()
setup(
name="bpreg",
version="1.1.0",
packages=find_packages(),
url="https://github.com/MIC-DKFZ/BodyPartRegression",
include_package_data=True,
package_data={"bpreg": ["settings/body-part-metadata.md"]},
test_suite="unittest",
install_requires=[
"pytorch_lightning==1.2.10",
"nibabel==3.2.1",
"scipy==1.7.0",
"albumentations==0.5.2",
"dataclasses",
"pandas==1.2.1",
"torch==1.8.1",
"torchvision==0.9.1",
],
data_files=[("models", ["bpreg/settings/body-part-metadata.md"])],
long_description=readme,
long_description_content_type="text/markdown",
author="Division of Medical Image Computing, German Cancer Research Center",
author_email="<EMAIL>",
maintainer_email="<EMAIL>",
entry_points={
"console_scripts": [
"bpreg_predict = bpreg.scripts.bpreg_inference:main",
]
},
)
|
134109
|
number = int(input())
if any(number % int(i) for i in input().split()):
print('not divisible by all')
else:
print('divisible by all')
|
134123
|
try:
import unittest
from copy import copy
from numpy.testing import assert_allclose
import numpy as np
from spitfire.chemistry.mechanism import ChemicalMechanismSpec
from spitfire.chemistry.library import Library, Dimension
from spitfire.chemistry.flamelet import FlameletSpec
from spitfire.chemistry.tabulation import build_adiabatic_eq_library, apply_mixing_model, PDFSpec
import cantera
import cantera as ct
import pytabprops
if int(cantera.__version__.replace('.', '')) >= 250:
class Test(unittest.TestCase):
def test(self):
gas = ct.Solution('h2o2.yaml', transport_model='Multi')
mech = ChemicalMechanismSpec.from_solution(gas)
fs = FlameletSpec(mech_spec=mech,
initial_condition='equilibrium',
oxy_stream=mech.stream('TPX', (300, 1.e5, 'O2:1, N2:3.76')),
fuel_stream=mech.stream('TPY', (300, 1.e5, 'H2:1')),
grid_points=16)
eq_lib1 = build_adiabatic_eq_library(fs, verbose=False)
z_dim = Dimension(eq_lib1.mixture_fraction_name, eq_lib1.mixture_fraction_values)
fuel_T_dim = Dimension('fuel_temperature', np.linspace(0.0, 1.0, 4))
air_T_dim = Dimension('air_temperature', np.linspace(0.0, 1.0, 3))
eq_lib2 = Library(z_dim, fuel_T_dim)
eq_lib2T = Library(fuel_T_dim, z_dim)
eq_lib3 = Library(z_dim, fuel_T_dim, air_T_dim)
eq_lib3T1 = Library(fuel_T_dim, z_dim, air_T_dim)
eq_lib3T2 = Library(fuel_T_dim, air_T_dim, z_dim)
for p in eq_lib1.props:
eq_lib2[p] = eq_lib2.get_empty_dataset()
eq_lib2T[p] = eq_lib2T.get_empty_dataset()
eq_lib3[p] = eq_lib3.get_empty_dataset()
eq_lib3T1[p] = eq_lib3T1.get_empty_dataset()
eq_lib3T2[p] = eq_lib3T2.get_empty_dataset()
for i, fuel_T_offset in enumerate(fuel_T_dim.values):
fuel_T = 300 + fuel_T_offset * 500.
fs2 = copy(fs)
fs2.fuel_stream.TP = fuel_T, 1.e5
eq_tmp = build_adiabatic_eq_library(fs2, verbose=False)
for p in eq_lib1.props:
eq_lib2[p][:, i] = eq_tmp[p]
eq_lib2T[p][i, :] = eq_tmp[p]
for j, air_T_offset in enumerate(air_T_dim.values):
air_T = 300 + air_T_offset * 500.
fs3 = copy(fs2)
fs3.oxy_stream.TP = air_T, 1.e5
eq_tmp = build_adiabatic_eq_library(fs3, verbose=False)
for p in eq_lib1.props:
eq_lib3[p][:, i, j] = eq_tmp[p]
eq_lib3T1[p][i, :, j] = eq_tmp[p]
eq_lib3T2[p][i, j, :] = eq_tmp[p]
nonT_props = list(eq_lib1.props)
nonT_props.remove('temperature')
eq_lib1.remove(*nonT_props)
eq_lib2.remove(*nonT_props)
eq_lib2T.remove(*nonT_props)
eq_lib3.remove(*nonT_props)
eq_lib3T1.remove(*nonT_props)
eq_lib3T2.remove(*nonT_props)
z_svv = np.linspace(0., 1., 6)
Tf_svv = np.linspace(0., 1., 5)
eq_lib1_t = apply_mixing_model(eq_lib1, {'mixture_fraction': PDFSpec('ClipGauss', z_svv)}, verbose=False)
eq_lib2_t = apply_mixing_model(eq_lib2, {'mixture_fraction': PDFSpec('ClipGauss', z_svv)}, verbose=False)
eq_lib3_t = apply_mixing_model(eq_lib3, {'mixture_fraction': PDFSpec('ClipGauss', z_svv)}, num_procs=1, verbose=False)
eq_lib2T_t = apply_mixing_model(eq_lib2T, {'mixture_fraction': PDFSpec('ClipGauss', z_svv)}, verbose=False)
eq_lib3T1_t = apply_mixing_model(eq_lib3T1, {'mixture_fraction': PDFSpec('ClipGauss', z_svv)}, num_procs=1, verbose=False)
eq_lib3T2_t = apply_mixing_model(eq_lib3T2, {'mixture_fraction': PDFSpec('ClipGauss', z_svv)}, num_procs=1, verbose=False)
eq_lib2_tt = apply_mixing_model(eq_lib2_t, {'fuel_temperature_mean': PDFSpec('Beta', Tf_svv, variance_name='Tfvar')}, added_suffix='', num_procs=1, verbose=False)
eq_lib3_tt = apply_mixing_model(eq_lib3_t, {'fuel_temperature_mean': PDFSpec('Beta', Tf_svv, variance_name='Tfvar')}, added_suffix='', num_procs=1, verbose=False)
def get_dim_names(lib):
return [d.name for d in lib.dims]
self.assertEqual(['mixture_fraction'], get_dim_names(eq_lib1))
self.assertEqual(['mixture_fraction_mean', 'scaled_scalar_variance_mean'], get_dim_names(eq_lib1_t))
self.assertEqual(['mixture_fraction', 'fuel_temperature'], get_dim_names(eq_lib2))
self.assertEqual(['mixture_fraction_mean', 'fuel_temperature_mean', 'scaled_scalar_variance_mean'],
get_dim_names(eq_lib2_t))
self.assertEqual(
['mixture_fraction_mean', 'fuel_temperature_mean', 'scaled_scalar_variance_mean', 'Tfvar'],
get_dim_names(eq_lib2_tt))
self.assertEqual(['mixture_fraction', 'fuel_temperature', 'air_temperature'],
get_dim_names(eq_lib3))
self.assertEqual(['mixture_fraction_mean', 'fuel_temperature_mean', 'air_temperature_mean',
'scaled_scalar_variance_mean'],
get_dim_names(eq_lib3_t))
self.assertEqual(['mixture_fraction_mean', 'fuel_temperature_mean', 'air_temperature_mean',
'scaled_scalar_variance_mean', 'Tfvar'],
get_dim_names(eq_lib3_tt))
self.assertEqual(['fuel_temperature', 'mixture_fraction'], get_dim_names(eq_lib2T))
self.assertEqual(['fuel_temperature_mean', 'mixture_fraction_mean', 'scaled_scalar_variance_mean'],
get_dim_names(eq_lib2T_t), eq_lib2T_t)
self.assertEqual(['fuel_temperature', 'mixture_fraction', 'air_temperature'],
get_dim_names(eq_lib3T1))
self.assertEqual(['fuel_temperature', 'air_temperature', 'mixture_fraction'],
get_dim_names(eq_lib3T2))
self.assertEqual(['fuel_temperature_mean', 'mixture_fraction_mean', 'air_temperature_mean',
'scaled_scalar_variance_mean'],
get_dim_names(eq_lib3T1_t))
self.assertEqual(['fuel_temperature_mean', 'air_temperature_mean', 'mixture_fraction_mean',
'scaled_scalar_variance_mean'],
get_dim_names(eq_lib3T2_t))
self.assertFalse(np.any(np.isnan(eq_lib1['temperature'])))
self.assertFalse(np.any(np.isnan(eq_lib1_t['temperature'])))
self.assertFalse(np.any(np.isnan(eq_lib2['temperature'])))
self.assertFalse(np.any(np.isnan(eq_lib2T['temperature'])))
self.assertFalse(np.any(np.isnan(eq_lib2_t['temperature'])))
self.assertFalse(np.any(np.isnan(eq_lib2T_t['temperature'])))
self.assertFalse(np.any(np.isnan(eq_lib3['temperature'])))
self.assertFalse(np.any(np.isnan(eq_lib3T1['temperature'])))
self.assertFalse(np.any(np.isnan(eq_lib3T2['temperature'])))
self.assertFalse(np.any(np.isnan(eq_lib3_t['temperature'])))
self.assertFalse(np.any(np.isnan(eq_lib3_tt['temperature'])))
self.assertFalse(np.any(np.isnan(eq_lib3T1_t['temperature'])))
self.assertFalse(np.any(np.isnan(eq_lib3T2_t['temperature'])))
self.assertIsNone(assert_allclose(eq_lib2T['temperature'].T, eq_lib2['temperature']))
self.assertIsNone(assert_allclose(np.swapaxes(eq_lib3T1['temperature'], 0, 1),
eq_lib3['temperature']))
self.assertIsNone(assert_allclose(np.swapaxes(np.swapaxes(eq_lib3T2['temperature'], 1, 2), 0, 1),
eq_lib3['temperature']))
self.assertIsNone(assert_allclose(np.squeeze(eq_lib1_t['temperature'][:, 0]),
eq_lib1['temperature']))
self.assertIsNone(assert_allclose(np.squeeze(eq_lib2_t['temperature'][:, :, 0]),
eq_lib2['temperature']))
self.assertIsNone(assert_allclose(np.squeeze(eq_lib3_t['temperature'][:, :, :, 0]),
eq_lib3['temperature']))
self.assertIsNone(assert_allclose(np.squeeze(eq_lib3_tt['temperature'][:, :, :, 0, 0]),
eq_lib3['temperature']))
if __name__ == '__main__':
unittest.main()
except ImportError:
pass
|
134261
|
import pprint
import click
import fitz # pip install pymupdf
@click.command()
@click.argument("filepath", type=click.Path(exists=True))
def entrypoint(filepath):
pp = pprint.PrettyPrinter(indent=4)
with fitz.open(filepath) as doc:
pp.pprint(doc.metadata)
print(f"Scanned pages: {get_scanned_pages_percentage(filepath) * 100:0.1f}%")
class NoTextPagesException(RuntimeError):
pass
def get_scanned_pages_percentage(filepath: str) -> float:
"""
Return the percentage of pages with text which were scanned.
Note that this could raise a NoTextPagesException.
"""
total_pages = 0
total_scanned_pages = 0
with fitz.open(filepath) as doc:
for page in doc:
text = page.getText().strip()
if len(text) == 0:
# Ignore "empty" pages
continue
total_pages += 1
pix1 = page.getPixmap(alpha=False) # render page to an image
pix1.writePNG(f"page-{page.number}.png") # store image as a PNG
remove_all_text(doc, page)
pix2 = page.getPixmap(alpha=False)
pix2.writePNG(f"page-{page.number}-no-text.png")
img1 = pix1.getImageData("png")
img2 = pix2.getImageData("png")
if img1 == img2:
print(f"{page.number} was scanned or has no text")
if len(text) > 0:
print(f"\tHas text of length {len(text):,} characters")
total_scanned_pages += 1
else:
print(f"{page.number} was NOT scanned")
if total_pages == 0:
raise NoTextPagesException
return total_scanned_pages / total_pages
def remove_all_text(doc, page):
page.cleanContents() # syntax cleaning of page appearance commands
# xref of the cleaned command source (bytes object)
xref = page.getContents()[0]
cont = doc.xrefStream(xref) # read it
ba_cont = bytearray(cont) # a modifyable version
pos = 0
changed = False # switch indicates changes
while pos < len(cont) - 1:
pos = ba_cont.find(b"BT\n", pos) # begin text object
if pos < 0:
break # not (more) found
pos2 = ba_cont.find(b"ET\n", pos) # end text object
if pos2 <= pos:
break # major error in PDF page definition!
ba_cont[pos : pos2 + 2] = b"" # remove text object
changed = True
if changed: # we have indeed removed some text
doc.updateStream(xref, ba_cont) # write back command stream w/o text
if __name__ == "__main__":
entrypoint()
|
134272
|
from flask import Blueprint, session, redirect, url_for
babel_blueprint = Blueprint(
'babel',
__name__,
url_prefix="/babel"
)
@babel_blueprint.route('/<string:locale>')
def index(locale):
session['locale'] = locale
return redirect(url_for('blog.home'))
|
134337
|
import pytest
from django.db.models import Q
from helper import TestMigrations
class TestWithShackdataBase(TestMigrations):
app = "bookkeeping"
migrate_fixtures = ["tests/fixtures/test_shackspace_transactions.json"]
migrate_from = "0012_auto_20180617_1926"
@pytest.mark.xfail
@pytest.mark.django_db
class TestBookkeepingMigrationsFirst(TestWithShackdataBase):
migrate_to = "0013_new_data_model"
def setUpBeforeMigration(self, apps):
RealTransaction = apps.get_model("bookkeeping", "RealTransaction")
VirtualTransaction = apps.get_model("bookkeeping", "VirtualTransaction")
# For test comparison
self.real_transaction_count = RealTransaction.objects.count()
self.virtual_transaction_w_src_count = VirtualTransaction.objects.filter(
source_account__isnull=False
).count()
self.virtual_transaction_w_dst_count = VirtualTransaction.objects.filter(
destination_account__isnull=False
).count()
self.virtual_transaction_member_fees_count = VirtualTransaction.objects.filter(
Q(
source_account__isnull=True,
destination_account__account_category="member_fees",
real_transaction__isnull=True,
)
| Q(
destination_account__isnull=True,
source_account__account_category="member_fees",
real_transaction__isnull=True,
)
).count()
self.orphan_virtual_transaction_count = VirtualTransaction.objects.filter(
real_transaction=None
).count()
self.reversed_transactions = {
rt: rt.reverses
for rt in RealTransaction.objects.filter(reverses__isnull=False).all()
}
def test_accounts_migrated(self):
from byro.bookkeeping.models import Account
assert Account.objects.filter(tags__name="bank").count() == 1
assert Account.objects.filter(tags__name="fees").count() == 1
assert Account.objects.filter(tags__name="fees_receivable").count() == 1
def test_transactions_migrated(self):
from byro.bookkeeping.models import Booking, Transaction
# All RealTransaction lead to one Transaction, as do VirtualTransaction with no RealTransaction
assert (
Transaction.objects.count()
== self.real_transaction_count + self.orphan_virtual_transaction_count
)
# All VirtualTransaction lead to one Booking per direction, as does each RealTransaction
# VirtualTransaction referencing 'member_fees' have an additional implicit direction
assert (
Booking.objects.count()
== self.virtual_transaction_w_src_count
+ self.virtual_transaction_w_dst_count
+ self.real_transaction_count
+ self.virtual_transaction_member_fees_count
)
def test_reverses_migrated(self):
assert len(self.reversed_transactions) > 0
from byro.bookkeeping.models import Transaction
for rt, rt_reverses in self.reversed_transactions.items():
t = Transaction.objects.filter(
Q(memo=rt.purpose) | Q(bookings__memo=rt.purpose)
).first()
t_reverses = Transaction.objects.filter(
Q(memo=rt_reverses.purpose) | Q(bookings__memo=rt_reverses.purpose)
).first()
assert t
assert t_reverses
assert t.reverses == t_reverses
def test_amounts_migrated(self):
from byro.bookkeeping.models import Booking
assert Booking.objects.filter(amount__lt=0).count() == 0
@pytest.mark.xfail
@pytest.mark.django_db
class TestBookkeepingMigrationsFinal(TestWithShackdataBase):
migrate_to = "0014_auto_20180707_1410"
def test_accounts_migrated_fully(self):
from byro.bookkeeping.models import Account, AccountCategory
assert (
Account.objects.exclude(
account_category__in=[
AccountCategory.ASSET,
AccountCategory.LIABILITY,
AccountCategory.INCOME,
AccountCategory.EXPENSE,
AccountCategory.EQUITY,
]
).count()
== 0
)
|
134406
|
from dataclasses import dataclass, field
import xleapp.templating as templating
from xleapp._authors import __authors__, __contributors__
from ..html import Contributor, HtmlPage, Template
@dataclass
class Index(HtmlPage):
"""Main index page for HTML report
Attributes:
authors (list): list of authors
contributors (list): list of contributors
"""
authors: list[Contributor] = field(init=False)
contributors: list[Contributor] = field(init=False)
def __post_init__(self) -> None:
self.authors = templating.get_contributors(__authors__)
self.contributors = templating.get_contributors(__contributors__)
@Template("index")
def html(self) -> str:
"""Generates html for page
Returns:
str: HTML of the index page
"""
return self.template.render(
navigation=self.navigation,
authors=self.authors,
contributors=self.contributors,
)
|
134425
|
import gc
from functools import reduce
from typing import Callable, Iterable, List, Optional, Tuple, TypeVar
import numpy as np
import pandas as pd
from .graph import AttrMap, Graph
from .trace import AddOp, TraceKey
from .utils import filter_not_null
from .utils.fs import IOAction
from .utils.ray import ray_iter
__all__ = [
"calc_iou",
"calc_iou_compact",
"calc_trace_side_overlap",
"calc_weighted_iou",
"calc_class_trace_side_overlap",
"calc_class_trace_side_overlap_norm",
"calc_trace_side_overlap_compact",
"calc_class_trace_side_overlap_compact",
"calc_trace_side_overlap_both_compact",
"calc_density",
"calc_density_compact",
"calc_space",
"calc_skip_ratio",
"calc_trace_size",
"calc_density_compact_per_layer",
"self_similarity_matrix",
"self_similarity_matrix_ray",
]
T = TypeVar("T")
def calc_iou(trace1: AttrMap, trace2: AttrMap, key: str = TraceKey.EDGE) -> float:
def intersect_and_union(node_name: str) -> Optional[Tuple[int, int]]:
node_trace1 = trace1.nodes[node_name]
if key in node_trace1:
node_trace2 = trace2.nodes[node_name]
trace_set1 = TraceKey.to_array(node_trace1[key])
trace_set2 = TraceKey.to_array(node_trace2[key])
intersect = np.intersect1d(trace_set1, trace_set2)
union = np.union1d(trace_set1, trace_set2)
return len(intersect), len(union)
else:
return None
def get_iou(args: Tuple[int, int]) -> float:
intersect_size, union_size = args
return intersect_size / union_size
iou = get_iou(
reduce(
lambda x, y: (x[0] + y[0], x[1] + y[1]),
filter_not_null(
[intersect_and_union(node_name) for node_name in trace1.nodes]
),
)
)
return iou
def calc_iou_frequency(
trace1: AttrMap, trace2: AttrMap, frequency: int, key: str = TraceKey.EDGE
) -> float:
def intersect_and_union(node_name: str) -> Optional[Tuple[int, int]]:
node_trace1 = trace1.nodes[node_name]
if key in node_trace1:
node_trace2 = trace2.nodes[node_name]
trace_set1 = (
node_trace1[key].index[node_trace1[key]["count"] > frequency].values
)
trace_set2 = (
node_trace2[key].index[node_trace2[key]["count"] > frequency].values
)
intersect = np.intersect1d(trace_set1, trace_set2)
union = np.union1d(trace_set1, trace_set2)
return len(intersect), len(union)
else:
return None
def get_iou(args: Tuple[int, int]) -> float:
intersect_size, union_size = args
if union_size == 0:
return 0
else:
return intersect_size / union_size
iou = get_iou(
reduce(
lambda x, y: (x[0] + y[0], x[1] + y[1]),
filter_not_null(
[intersect_and_union(node_name) for node_name in trace1.nodes]
),
)
)
return iou
def calc_iou_frequency_per_layer(
trace1: AttrMap,
trace2: AttrMap,
node_name: str,
frequency: int,
key: str = TraceKey.EDGE,
) -> float:
node_trace1 = trace1.nodes[node_name]
if key in node_trace1:
node_trace2 = trace2.nodes[node_name]
trace_set1 = (
node_trace1[key].index[node_trace1[key]["count"] > frequency].values
)
trace_set2 = (
node_trace2[key].index[node_trace2[key]["count"] > frequency].values
)
intersect = np.intersect1d(trace_set1, trace_set2)
union = np.union1d(trace_set1, trace_set2)
if len(union) != 0:
return len(intersect) / len(union)
else:
return 0
else:
return None
def calc_iou_per_layer(
trace1: AttrMap, trace2: AttrMap, node_name: str, key: str = TraceKey.EDGE
) -> float:
node_trace1 = trace1.nodes[node_name]
if key in node_trace1:
node_trace2 = trace2.nodes[node_name]
trace_set1 = TraceKey.to_array(node_trace1[key])
trace_set2 = TraceKey.to_array(node_trace2[key])
intersect = np.intersect1d(trace_set1, trace_set2)
union = np.union1d(trace_set1, trace_set2)
return len(intersect) / len(union)
else:
return None
def calc_class_trace_side_overlap(
class_trace: AttrMap, trace: AttrMap, key: str = TraceKey.EDGE
) -> float:
def intersect(node_name: str) -> Optional[int]:
node_class_trace = class_trace.nodes[node_name]
if key in node_class_trace:
node_trace = trace.nodes[node_name]
class_trace_set = TraceKey.to_array(node_class_trace[key])
trace_set = TraceKey.to_array(node_trace[key])
intersect = np.intersect1d(class_trace_set, trace_set)
return len(intersect)
else:
return None
iou = (
sum(filter_not_null([intersect(node_name) for node_name in class_trace.nodes]))
/ class_trace.attrs[TraceKey.max_of(TraceKey.num_of(key))]
)
return iou
def calc_class_trace_side_overlap_norm(
class_trace: AttrMap, trace: AttrMap, key: str = TraceKey.EDGE
) -> float:
def intersect(node_name: str) -> Optional[int]:
node_class_trace = class_trace.nodes[node_name]
if key in node_class_trace:
node_trace = trace.nodes[node_name]
class_trace_set = TraceKey.to_array(node_class_trace[key])
trace_set = TraceKey.to_array(node_trace[key])
intersect = np.intersect1d(class_trace_set, trace_set)
return len(intersect)
else:
return None
iou = (
sum(filter_not_null([intersect(node_name) for node_name in class_trace.nodes]))
- class_trace.attrs[TraceKey.min_of(TraceKey.num_of(key))]
) / class_trace.attrs[TraceKey.max_of(TraceKey.num_of(key))]
return iou
def calc_trace_side_overlap(
class_trace: AttrMap,
trace: AttrMap,
key: str = TraceKey.EDGE,
node_name: str = None,
) -> float:
def intersect_and_union(node_name: str) -> Optional[Tuple[int, int]]:
node_class_trace = class_trace.nodes[node_name]
if key in node_class_trace:
# if key == TraceKey.EDGE and node_name.startswith("max"):
# return None
node_trace = trace.nodes[node_name]
class_trace_set = TraceKey.to_array(node_class_trace[key])
trace_set = TraceKey.to_array(node_trace[key])
intersect = np.intersect1d(class_trace_set, trace_set)
return len(intersect), len(trace_set)
else:
return None
def get_iou(args: Tuple[int, int]) -> float:
intersect_size, union_size = args
return intersect_size / union_size
if node_name is None:
iou = get_iou(
reduce(
lambda x, y: (x[0] + y[0], x[1] + y[1]),
filter_not_null(
[intersect_and_union(node_name) for node_name in class_trace.nodes]
),
)
)
else:
iou = intersect_and_union(node_name)
if iou is not None:
iou = get_iou(iou)
return iou
def calc_trace_size(
trace: AttrMap, key: str = TraceKey.EDGE, compact: bool = False
) -> Optional[int]:
def trace_size(node_name: str) -> Optional[int]:
node_trace = trace.nodes[node_name]
if key in node_trace:
if compact:
return np.count_nonzero(np.unpackbits(node_trace[key]))
else:
return TraceKey.to_array(node_trace[key]).size
else:
return None
return sum(filter_not_null([trace_size(node_name) for node_name in trace.nodes]))
def calc_trace_size_per_layer(
trace: AttrMap, layer_name: str, key: str = TraceKey.EDGE, compact: bool = False
) -> Optional[int]:
def trace_size(node_name: str) -> Optional[int]:
node_trace = trace.nodes[node_name]
if key in node_trace:
if compact:
return np.count_nonzero(np.unpackbits(node_trace[key]))
else:
return TraceKey.to_array(node_trace[key]).size
else:
return None
return trace_size(layer_name)
def calc_trace_path_num(trace: AttrMap, layer: str) -> int:
return trace.tensors[layer][TraceKey.PATH]["count"].sum()
def calc_trace_side_overlap_compact(
class_trace: AttrMap,
trace: AttrMap,
key: str = TraceKey.EDGE,
node_name: str = None,
) -> float:
def intersect_and_union(node_name: str) -> Optional[Tuple[int, int]]:
node_class_trace = class_trace.nodes[node_name]
if key in node_class_trace:
node_trace = trace.nodes[node_name]
class_trace_set = TraceKey.to_array(
np.argwhere(np.unpackbits(node_class_trace[key]))
)
trace_set = TraceKey.to_array(node_trace[key])
intersect = np.intersect1d(class_trace_set, trace_set)
return len(intersect), len(trace_set)
else:
return None
def get_iou(args: Tuple[int, int]) -> float:
intersect_size, union_size = args
if union_size == 0:
return 0
else:
return intersect_size / union_size
if node_name is None:
iou = get_iou(
reduce(
lambda x, y: (x[0] + y[0], x[1] + y[1]),
filter_not_null(
[intersect_and_union(node_name) for node_name in class_trace.nodes]
),
)
)
else:
iou = intersect_and_union(node_name)
if iou is not None:
iou = get_iou(iou)
return iou
def calc_trace_side_overlap_both_compact(
class_trace: AttrMap,
trace: AttrMap,
key: str = TraceKey.EDGE,
node_name: str = None,
return_size: bool = False,
) -> float:
def intersect_and_union(node_name: str) -> Optional[Tuple[int, int]]:
node_class_trace = class_trace.nodes[node_name]
if key in node_class_trace:
node_trace = trace.nodes[node_name]
class_trace_set = node_class_trace[key]
trace_set = node_trace[key]
intersect = np.bitwise_and(class_trace_set, trace_set)
return (
np.count_nonzero(np.unpackbits(intersect)),
np.count_nonzero(np.unpackbits(trace_set)),
)
else:
return None
def get_iou(args: Tuple[int, int]) -> float:
intersect_size, union_size = args
if union_size == 0:
return 0
else:
return intersect_size / union_size
if node_name is None:
intersect_size, union_size = reduce(
lambda x, y: (x[0] + y[0], x[1] + y[1]),
filter_not_null(
[intersect_and_union(node_name) for node_name in class_trace.nodes]
),
)
iou = get_iou((intersect_size, union_size))
if return_size:
return iou, intersect_size
else:
iou = intersect_and_union(node_name)
if iou is not None:
iou = get_iou(iou)
return iou
def calc_class_trace_side_overlap_compact(
class_trace: AttrMap,
trace: AttrMap,
key: str = TraceKey.EDGE,
node_name: str = None,
) -> float:
def intersect_and_union(node_name: str) -> Optional[Tuple[int, int]]:
node_class_trace = class_trace.nodes[node_name]
if key in node_class_trace:
node_trace = trace.nodes[node_name]
class_trace_set = TraceKey.to_array(
np.argwhere(np.unpackbits(node_class_trace[key]))
)
trace_set = TraceKey.to_array(node_trace[key])
intersect = np.intersect1d(class_trace_set, trace_set)
return len(intersect), len(class_trace_set)
else:
return None
def get_iou(args: Tuple[int, int]) -> float:
intersect_size, union_size = args
if union_size == 0:
return 0
else:
return intersect_size / union_size
if node_name is None:
iou = get_iou(
reduce(
lambda x, y: (x[0] + y[0], x[1] + y[1]),
filter_not_null(
[intersect_and_union(node_name) for node_name in class_trace.nodes]
),
)
)
else:
iou = intersect_and_union(node_name)
if iou is not None:
iou = get_iou(iou)
return iou
def calc_weighted_iou(
class_trace: AttrMap,
trace: AttrMap,
key: str = TraceKey.EDGE,
node_name: str = None,
) -> float:
def intersect_and_union(node_name: str) -> Optional[Tuple[float, float]]:
node_class_trace = class_trace.nodes[node_name]
if key in node_class_trace:
node_trace = trace.nodes[node_name]
frame_class_trace = node_class_trace[key]
intersect_mask = np.isin(
TraceKey.to_array(frame_class_trace),
node_trace[key],
assume_unique=True,
)
return (
frame_class_trace["count"].values[intersect_mask].sum()
/ class_trace.attrs[TraceKey.COUNT],
frame_class_trace["count"].values.sum()
/ class_trace.attrs[TraceKey.COUNT],
)
else:
return None
def get_iou(args: Tuple[float, float]) -> float:
intersect_size, union_size = args
return intersect_size / union_size
if node_name is None:
iou = get_iou(
reduce(
lambda x, y: (x[0] + y[0], x[1] + y[1]),
filter_not_null(
[intersect_and_union(node_name) for node_name in class_trace.nodes]
),
)
)
else:
iou = intersect_and_union(node_name)
if iou is not None:
iou = get_iou(iou)
return iou
def calc_iou_compact(
trace1: AttrMap, trace2: AttrMap, key: str = TraceKey.EDGE
) -> float:
def intersect_and_union(node_name: str) -> Optional[Tuple[int, int]]:
node_trace1 = trace1.nodes[node_name]
if key in node_trace1:
node_trace2 = trace2.nodes[node_name]
trace_set1 = node_trace1[key]
trace_set2 = node_trace2[key]
intersect = np.bitwise_and(trace_set1, trace_set2)
union = np.bitwise_or(trace_set1, trace_set2)
return (
np.count_nonzero(np.unpackbits(intersect)),
np.count_nonzero(np.unpackbits(union)),
)
else:
return None
def get_iou(args: Tuple[int, int]) -> float:
intersect_size, union_size = args
if union_size == 0:
return 0
else:
return intersect_size / union_size
iou = get_iou(
reduce(
lambda x, y: (x[0] + y[0], x[1] + y[1]),
filter_not_null(
[intersect_and_union(node_name) for node_name in trace1.nodes]
),
)
)
return iou
def calc_iou_compact_per_layer(
trace1: AttrMap, trace2: AttrMap, node_name: str, key: str = TraceKey.EDGE
) -> float:
node_trace1 = trace1.nodes[node_name]
if key in node_trace1:
node_trace2 = trace2.nodes[node_name]
trace_set1 = node_trace1[key]
trace_set2 = node_trace2[key]
intersect = np.bitwise_and(trace_set1, trace_set2)
union = np.bitwise_or(trace_set1, trace_set2)
return np.count_nonzero(np.unpackbits(intersect)) / np.count_nonzero(
np.unpackbits(union)
)
else:
return None
def self_similarity_matrix(
iterable: Iterable[T],
trace_fn: Callable[[T], AttrMap],
similarity_fn: Callable[[AttrMap, AttrMap], float],
) -> np.ndarray:
if not isinstance(iterable, list):
iterable = list(iterable)
size = len(iterable)
matrix = np.eye(size, dtype=float)
for i in range(0, size):
for j in range(i + 1, size):
trace_i = trace_fn(iterable[i])
trace_j = trace_fn(iterable[j])
similarity = similarity_fn(trace_i, trace_j)
matrix[i][j] = similarity
matrix[j][i] = similarity
return matrix
def self_similarity_matrix_ray(
partial_path: str,
iterable: Iterable[T],
trace_fn: Callable[[T], AttrMap],
similarity_fn: Callable[[AttrMap, AttrMap], float],
key: str = TraceKey.EDGE,
) -> np.ndarray:
if not isinstance(iterable, list):
iterable = list(iterable)
size = len(iterable)
def calc_similarity(iter_i, iter_j):
trace_i = trace_fn(iter_i)
trace_j = trace_fn(iter_j)
if trace_i is None or trace_j is None:
return 0.0
else:
similarity = similarity_fn(trace_i, trace_j, key=key)
return similarity
def save_and_load_similarity(i, j):
# tr.print_diff()
iter_i = iterable[i]
iter_j = iterable[j]
action = IOAction(
f"{partial_path}/{iter_i}_{iter_j}.pkl",
init_fn=lambda: calc_similarity(iter_i, iter_j),
cache=True,
)
action.save()
return i, j, action.load()
# tr = tracker.SummaryTracker()
similarity_list = ray_iter(
save_and_load_similarity,
[(i, j) for i in range(0, size) for j in range(i + 1, size)],
out_of_order=True,
chunksize=1,
)
matrix = np.eye(size, dtype=float)
for i, j, similarity in similarity_list:
matrix[i][j] = similarity
matrix[j][i] = similarity
print(f"finish i={i}, j={j}")
return matrix
def inter_class_similarity_matrix_ray(
partial_path: str,
iterable: Iterable[T],
trace_fn: Callable[[T, str], AttrMap],
similarity_fn: Callable[[AttrMap, AttrMap], float],
key: str = TraceKey.EDGE,
) -> np.ndarray:
if not isinstance(iterable, list):
iterable = list(iterable)
size = len(iterable)
def calc_similarity(iter_i, iter_j):
trace_i = trace_fn(iter_i, "left")
trace_j = trace_fn(iter_j, "right")
if trace_i is None or trace_j is None:
return 0.0
else:
similarity = similarity_fn(trace_i, trace_j, key=key)
del trace_i
del trace_j
gc.collect()
return similarity
def save_and_load_similarity(i, j):
# tr.print_diff()
iter_i = iterable[i]
iter_j = iterable[j]
action = IOAction(
f"{partial_path}/{iter_i}_{iter_j}.pkl",
init_fn=lambda: calc_similarity(iter_i, iter_j),
cache=True,
)
action.save()
return i, j, action.load()
# tr = tracker.SummaryTracker()
similarity_list = ray_iter(
save_and_load_similarity,
[(i, j) for i in range(0, size) for j in range(i, size)],
out_of_order=True,
chunksize=1,
)
matrix = np.zeros((size, size), dtype=float)
for i, j, similarity in similarity_list:
matrix[i][j] = similarity
matrix[j][i] = similarity
print(f"finish i={i}, j={j}")
return matrix
def calc_density(trace: AttrMap, key: str) -> float:
density = sum(
node[key].size for name, node in trace.nodes.items() if key in node
) / sum(
np.prod(node[key + "_shape"])
for name, node in trace.nodes.items()
if key in node
)
return density
def calc_density_compact(trace: AttrMap, key: str) -> float:
density = sum(
np.count_nonzero(np.unpackbits(node[key]))
for name, node in trace.nodes.items()
if key in node
) / sum(
np.prod(node[key + "_shape"])
for name, node in trace.nodes.items()
if key in node
)
return density
def calc_density_compact_per_layer(
trace: AttrMap, layers: List[str], key: str
) -> pd.DataFrame:
result_layers = []
densities = []
for layer_name in layers:
node = trace.nodes[layer_name]
if key in node:
result_layers.append(layer_name)
densities.append(
np.count_nonzero(np.unpackbits(node[key]))
/ np.prod(node[key + "_shape"])
)
return pd.DataFrame(dict(density=densities), index=result_layers).rename_axis(
"layer"
)
def calc_metrics_compact_per_layer(trace: AttrMap, layers: List[str]) -> pd.DataFrame:
result_layers = []
metrics = []
for layer_name in layers:
node = trace.nodes[layer_name]
for metric_name in TraceKey.METRICS:
result_layers.append(f"{layer_name}/{metric_name}")
metrics.append(node[metric_name])
return pd.DataFrame(dict(value=metrics), index=result_layers).rename_axis(
"layer_metric"
)
def calc_skip_ratio(graph: Graph, layers: List[str]) -> pd.DataFrame:
result_layers = []
skip_ratios = []
for node_name in layers:
node = graph.node(graph.id(node_name))
if isinstance(node, AddOp):
traced_edges = np.unpackbits(node.attrs[TraceKey.EDGE]).reshape(
node.attrs[TraceKey.EDGE_SHAPE]
)
result_layers.append(node.name)
skip_ratios.append(
np.count_nonzero(traced_edges[1]) / np.count_nonzero(traced_edges)
)
return pd.DataFrame(dict(skip_ratio=skip_ratios), index=result_layers).rename_axis(
"layer"
)
def calc_space(trace: AttrMap, key: str) -> int:
return sum(
np.prod(node[key + "_shape"])
for name, node in trace.nodes.items()
if key in node
)
|
134462
|
import os
from mindware.components.feature_engineering.transformations.base_transformer import Transformer
from mindware.components.utils.class_loader import find_components, ThirdPartyComponents
"""
Load the buildin classifiers.
"""
generator_directory = os.path.split(__file__)[0]
_generator = find_components(__package__, generator_directory, Transformer)
"""
Load third-party classifiers.
"""
_addons = ThirdPartyComponents(Transformer)
def add_generator(generator):
_addons.add_component(generator)
|
134471
|
import DOM
class UIObject:
def getElement(self):
return self.element
def setElement(self, element):
self.element = element
def setStyleName(self, style):
DOM.setAttribute(self.element, "className", style)
class Widget(UIObject):
def setParent(self, parent):
self.parent = parent
class FocusWidget(Widget):
def __init__(self, element):
self.setElement(element)
class ButtonBase(FocusWidget):
def __init__(self, element):
FocusWidget.__init__(self, element)
def setHTML(self, html):
DOM.setInnerHTML(self.getElement(), html)
class Button(ButtonBase):
def __init__(self, html=None):
ButtonBase.__init__(self, DOM.createButton())
self.setStyleName("gwt-Button")
if html:
self.setHTML(html)
|
134486
|
from printer import Printer, PrinterError
from unittest import TestCase
class TestPrinter(TestCase):
def setUp(self):
self.printer = Printer(pages_per_s=2.0, capacity=300)
def test_print_within_capacity(self):
self.printer.print(25)
|
134489
|
from tests import run_main_and_assert
FAST_LOCAL_TEST_ARGS = "--exp-name local_test --datasets mnist" \
" --network LeNet --num-tasks 3 --seed 1 --batch-size 32" \
" --nepochs 3" \
" --num-workers 0" \
" --approach mas"
def test_mas_without_exemplars():
run_main_and_assert(FAST_LOCAL_TEST_ARGS)
def test_mas_with_exemplars():
args_line = FAST_LOCAL_TEST_ARGS
args_line += " --num-exemplars 200"
run_main_and_assert(args_line)
def test_mas_with_warmup():
args_line = FAST_LOCAL_TEST_ARGS
args_line += " --warmup-nepochs 5"
args_line += " --warmup-lr-factor 0.5"
args_line += " --num-exemplars 200"
run_main_and_assert(args_line)
|
134524
|
from __future__ import absolute_import
import sys
import unittest
from testutils import ADMIN_CLIENT, suppress_urllib3_warning
from testutils import harbor_server
from testutils import TEARDOWN
import library.repository
import library.cnab
from library.project import Project
from library.user import User
from library.repository import Repository
from library.artifact import Artifact
from library.scan import Scan
class TestCNAB(unittest.TestCase):
@suppress_urllib3_warning
def setUp(self):
print("Setup")
@unittest.skipIf(TEARDOWN == False, "Test data won't be erased.")
def do_tearDown(self):
"""
Tear down:
1. Delete repository(RA) by user(UA);
2. Delete project(PA);
3. Delete user(UA);
"""
#1. Delete repository(RA) by user(UA);
TestCNAB.repo.delete_repository(TestCNAB.project_name, TestCNAB.cnab_repo_name, **TestCNAB.USER_CLIENT)
#2. Delete project(PA);
TestCNAB.project.delete_project(TestCNAB.project_id, **TestCNAB.USER_CLIENT)
#3. Delete user(UA).
TestCNAB.user.delete_user(TestCNAB.user_id, **ADMIN_CLIENT)
def test_01_PushBundleByCnab(self):
"""
Test case:
Push Bundle By Cnab
Test step and expected result:
1. Create a new user(UA);
2. Create a new project(PA) by user(UA);
3. Push bundle to harbor as repository(RA);
4. Get repository from Harbor successfully;
5. Verfiy bundle name;
6. Get artifact by sha256;
7. Verify artifact information.
"""
TestCNAB.project= Project()
TestCNAB.user= User()
TestCNAB.artifact = Artifact()
TestCNAB.repo= Repository()
TestCNAB.scan = Scan()
TestCNAB.url = ADMIN_CLIENT["endpoint"]
TestCNAB.user_push_cnab_password = "<PASSWORD>"
TestCNAB.cnab_repo_name = "test_cnab"
TestCNAB.cnab_tag = "test_cnab_tag"
TestCNAB.project_name = None
TestCNAB.artifacts_config_ref_child_list = None
TestCNAB.artifacts_ref_child_list = None
#1. Create a new user(UA);
TestCNAB.user_id, TestCNAB.user_name = TestCNAB.user.create_user(user_password = <PASSWORD>.user_push_cnab_password, **ADMIN_CLIENT)
TestCNAB.USER_CLIENT=dict(endpoint = TestCNAB.url, username = TestCNAB.user_name, password = <PASSWORD>AB.user_push_cnab_password, with_scan_overview = True)
#2. Create a new project(PA) by user(UA);
TestCNAB.project_id, TestCNAB.project_name = TestCNAB.project.create_project(metadata = {"public": "false"}, **TestCNAB.USER_CLIENT)
#3. Push bundle to harbor as repository(RA);
target = harbor_server + "/" + TestCNAB.project_name + "/" + TestCNAB.cnab_repo_name + ":" + TestCNAB.cnab_tag
TestCNAB.reference_sha256 = library.cnab.push_cnab_bundle(harbor_server, TestCNAB.user_name, TestCNAB.user_push_cnab_password, "goharbor/harbor-log:v1.10.0", "kong:latest", target)
#4. Get repository from Harbor successfully;
TestCNAB.cnab_bundle_data = TestCNAB.repo.get_repository(TestCNAB.project_name, TestCNAB.cnab_repo_name, **TestCNAB.USER_CLIENT)
print(TestCNAB.cnab_bundle_data)
#4.1 Get refs of CNAB bundle;
TestCNAB.artifacts = TestCNAB.artifact.list_artifacts(TestCNAB.project_name, TestCNAB.cnab_repo_name, **TestCNAB.USER_CLIENT)
print("artifacts:", TestCNAB.artifacts)
TestCNAB.artifacts_ref_child_list = []
TestCNAB.artifacts_config_ref_child_list = []
for ref in TestCNAB.artifacts[0].references:
if ref.annotations["io.cnab.manifest.type"] != 'config':
TestCNAB.artifacts_ref_child_list.append(ref.child_digest)
else:
TestCNAB.artifacts_config_ref_child_list.append(ref.child_digest)
self.assertEqual(len(TestCNAB.artifacts_ref_child_list), 2, msg="Image artifact count should be 2.")
self.assertEqual(len(TestCNAB.artifacts_config_ref_child_list), 1, msg="Bundle count should be 1.")
print(TestCNAB.artifacts_ref_child_list)
#4.2 Cnab bundle can be pulled by ctr successfully;
# This step might not successful since ctr does't support cnab fully, it might be uncomment sometime in future.
# Please keep them in comment!
#library.containerd.ctr_images_pull(TestCNAB.user_name, TestCNAB.user_push_cnab_password, target)
#library.containerd.ctr_images_list(oci_ref = target)
#5. Verfiy bundle name;
self.assertEqual(TestCNAB.cnab_bundle_data.name, TestCNAB.project_name + "/" + TestCNAB.cnab_repo_name)
#6. Get artifact by sha256;
artifact = TestCNAB.artifact.get_reference_info(TestCNAB.project_name, TestCNAB.cnab_repo_name, TestCNAB.reference_sha256, **TestCNAB.USER_CLIENT)
#7. Verify artifact information;
self.assertEqual(artifact.type, 'CNAB')
self.assertEqual(artifact.digest, TestCNAB.reference_sha256)
def test_02_ScanCNAB(self):
"""
Test case:
Scan CNAB
Test step and expected result:
1. Scan config artifact, it should be failed with 400 status code;
2. Scan 1st child artifact, it should be scanned, the other should be not scanned, repository should not be scanned;
3. Scan 2cn child artifact, it should be scanned, repository should not be scanned;
4. Scan repository, it should be scanned;
Tear down:
"""
#1. Scan config artifact, it should be failed with 400 status code;
TestCNAB.scan.scan_artifact(TestCNAB.project_name, TestCNAB.cnab_repo_name, TestCNAB.artifacts_config_ref_child_list[0], expect_status_code = 400, **TestCNAB.USER_CLIENT)
#2. Scan 1st child artifact, it should be scanned, the other should be not scanned, repository should not be scanned;
TestCNAB.scan.scan_artifact(TestCNAB.project_name, TestCNAB.cnab_repo_name, TestCNAB.artifacts_ref_child_list[0], **TestCNAB.USER_CLIENT)
TestCNAB.artifact.check_image_scan_result(TestCNAB.project_name, TestCNAB.cnab_repo_name, TestCNAB.artifacts_ref_child_list[0], **TestCNAB.USER_CLIENT)
TestCNAB.artifact.check_image_scan_result(TestCNAB.project_name, TestCNAB.cnab_repo_name, TestCNAB.artifacts_ref_child_list[1], expected_scan_status = "Not Scanned", **TestCNAB.USER_CLIENT)
TestCNAB.artifact.check_image_scan_result(TestCNAB.project_name, TestCNAB.cnab_repo_name, TestCNAB.artifacts_config_ref_child_list[0], expected_scan_status = "No Scan Overview", **TestCNAB.USER_CLIENT)
TestCNAB.artifact.check_image_scan_result(TestCNAB.project_name, TestCNAB.cnab_repo_name, TestCNAB.artifacts[0].digest, expected_scan_status = "Not Scanned", **TestCNAB.USER_CLIENT)
#3. Scan 2cn child artifact, it should be scanned, repository should not be scanned;
TestCNAB.scan.scan_artifact(TestCNAB.project_name, TestCNAB.cnab_repo_name, TestCNAB.artifacts_ref_child_list[1], **TestCNAB.USER_CLIENT)
TestCNAB.artifact.check_image_scan_result(TestCNAB.project_name, TestCNAB.cnab_repo_name, TestCNAB.artifacts_ref_child_list[1], **TestCNAB.USER_CLIENT)
TestCNAB.artifact.check_image_scan_result(TestCNAB.project_name, TestCNAB.cnab_repo_name, TestCNAB.artifacts_config_ref_child_list[0], expected_scan_status = "No Scan Overview", **TestCNAB.USER_CLIENT)
TestCNAB.artifact.check_image_scan_result(TestCNAB.project_name, TestCNAB.cnab_repo_name, TestCNAB.artifacts[0].digest, expected_scan_status = "Not Scanned", **TestCNAB.USER_CLIENT)
#4. Scan repository, it should be scanned;
TestCNAB.scan.scan_artifact(TestCNAB.project_name, TestCNAB.cnab_repo_name, TestCNAB.artifacts[0].digest, **TestCNAB.USER_CLIENT)
TestCNAB.artifact.check_image_scan_result(TestCNAB.project_name, TestCNAB.cnab_repo_name, TestCNAB.artifacts[0].digest, **TestCNAB.USER_CLIENT)
self.do_tearDown()
if __name__ == '__main__':
suite = unittest.TestSuite(unittest.makeSuite(TestCNAB))
result = unittest.TextTestRunner(sys.stdout, verbosity=2, failfast=True).run(suite)
if not result.wasSuccessful():
raise Exception(r"CNAB test failed: {}".format(result))
|
134539
|
from time import sleep
from pycrunch_trace.client.api import trace
def alternative_ways_to_trace():
sleep(0.25)
print('You can use Trace object to manually start and stop tracing')
print(' Or by applying @trace decorator to the method')
print(' See examples bellow')
def example_without_decorators():
from pycrunch_trace.client.api import Trace
tracer = Trace()
tracer.start('recording_name')
code_under_trace()
another_code_to_trace()
tracer.stop()
@trace
def example_with_decorator():
# Recording will be named the same as the method name
pass
@trace('this_is_custom_name')
def example_with_custom_name():
pass
|
134624
|
from __future__ import print_function
from datetime import datetime
import inspect
import os
import socket
import sys
import threading
import uuid
import gridengine
from gridengine import schedulers
# ----------------------------------------------------------------------------
# JOB DISPATCHER
# ----------------------------------------------------------------------------
class JobDispatcher(object):
"""
Server-like node tasked with dispatching and mediating jobs
"""
def __init__(self, scheduler=schedulers.best_available):
"""Initialize a new dispatcher
Keyword Args:
scheduler: A schedulers.Scheduler instance or class. By default, the
system tries to return a GridEngineScheduler, and falls back to a
ProcessScheduler if it is not available
"""
# setup the ZeroMQ communications
import zmq
self.context = zmq.Context()
self.host_name = socket.gethostname()
self.ip = socket.gethostbyname(self.host_name)
self.transport = 'tcp://{ip}'.format(ip=self.ip)
# server/reply protocol (zmq.REP)
self.socket = self.context.socket(zmq.REP)
self.port = self.socket.bind_to_random_port(self.transport)
self.address = '{transport}:{port}'.format(transport=self.transport, port=self.port)
# poller
self.poller = zmq.Poller()
self.poller.register(self.socket, zmq.POLLIN)
# control locks
self._finished = True
self.dispatcher_lock = threading.Lock()
# initialize the scheduler if it's not already an instance
self.scheduler = scheduler if isinstance(scheduler, schedulers.Scheduler) else scheduler()
def __del__(self):
"""make sure the socket is closed on deallocation"""
self.socket.close()
def controller(self):
print('JobDispatcher: starting job dispatcher on transport {0}'.format(self.address))
while not self.finished:
# poll the socket with timeout
if self.poller.poll(timeout=1000):
request = gridengine.serializer.loads(self.socket.recv())
request, jobid, data = [request.get(key, None) for key in ('request', 'jobid', 'data')]
if request == 'fetch_job':
# find the requested job
job = self.job_queue.pop()
# send the job back to the client
self.socket.send(gridengine.serializer.dumps(job, gridengine.serializer.HIGHEST_PROTOCOL))
if request == 'store_data':
# store the results
self.results[jobid] = data
self.socket.send(gridengine.serializer.dumps(True, gridengine.serializer.HIGHEST_PROTOCOL))
def dispatch(self, jobs):
"""Dispatch a set of jobs to run asynchronously
Request the scheduler to schedule the set of jobs to run,
then spin up the JobDispatcher.controller in a separate
thread to control execution of the jobs.
This method will raise a RuntimeError if called more than once
before a call to join().
Raises:
RuntimeError: if called multiple times before a corresponding
call to join()
"""
if not self.finished:
raise RuntimeError('Dispatcher is already running')
# create a shared job lookup table (1-based indexing)
for id, job in enumerate(jobs):
job.id = id
self.job_queue = [job for job in jobs]
self.results = dict.fromkeys(job.id for job in jobs)
# spin up the controller
self.finished = False
self.job_controller = threading.Thread(target=self.controller)
self.job_controller.start()
# store the job start time
self.start_time = datetime.now()
self.end_time = None
self.elapsed_time = None
# spin up the scheduler
self.scheduler.schedule(self.address, self.job_queue)
def join(self, timeout=None):
"""Wait until the jobs terminate
This blocks the calling thread until the jobs terminate - either
normally or through an unhandled exception - or until the optional
timeout occurs.
Raises:
TimeoutError: If the jobs have not finished before the specified timeout
RuntimeError: If a call to join is made before dispatching
"""
if self.finished:
raise RuntimeError('No dispatched jobs to join')
# raises TimeoutError
try:
self.scheduler.join(timeout=timeout)
except schedulers.TimeoutError as e:
# reraise the exception without joining the controller
raise
except (KeyboardInterrupt, Exception) as e:
# shut down the controller then reraise the exception
self.finished = True
self.job_controller.join()
raise
else:
# shut down the controller
self.finished = True
self.job_controller.join()
# get the elapsed time
self.end_time = datetime.now()
self.elapsed_time = self.end_time - self.start_time
# return the results
return [self.results[id] for id in sorted(self.results)]
def get_finished(self):
with self.dispatcher_lock:
return self._finished
def set_finished(self, value):
with self.dispatcher_lock:
self._finished = value
finished = property(get_finished, set_finished)
|
134639
|
from cryptoxlib.exceptions import CryptoXLibException
class BiboxException(CryptoXLibException):
pass
|
134671
|
import gdown
import os
from zipfile import ZipFile
demos = {
"Sawyer_chair_agne_0007_00XX.zip": "1-lVTCH4oPq22cLC4Mmia9AKqzDIIVDO0",
'Sawyer_table_dockstra_0279_00XX': '1QAchFmYpQGqa6zaZ2QeZH5ET-iuyerU0',
"Sawyer_bench_bjursta_0210_00XX.zip": "12b8_j1mC8-pgotjARF1aTcqH2T7FNHNF",
"Sawyer_table_bjorkudden_0207_00XX.zip": "19DA5M2iPvOYa9KG54uIxOhNF0r2zXClK",
"Sawyer_table_lack_0825_00XX.zip": "1BrgbaE9Wx-Si7VtXpUJSHRrRnyqdLJA7",
"Sawyer_toy_table_00XX.zip": "1Wg6oxkiiOX8DsYVdr7sYNmYnSdaxIskc",
"Sawyer_chair_ingolf_0650_00XX.zip": "1i9A9CVPys7LiUnePRn4OkVgczRjqT4kZ",
"Sawyer_chair_bernhard_0146_00XX.zip": "1nWnHDSQq33INXdOmIAL_28wrd6BKEUr-",
}
# url = 'https://drive.google.com/uc?id=' + unique google drive ID
# compression format = '.zip'
for key, value in demos.items():
url = "https://drive.google.com/uc?id=" + value
outfile = os.path.join("demos", key)
if os.path.exists(outfile):
print("already downloaded", outfile)
else:
gdown.download(url, outfile, quiet=False)
answer = input("Do you want to unzip demos? [y/n] ")
if answer == "y":
for key in demos.keys():
furniture_name = key.rsplit("_", 1)[0]
demo_path = os.path.join("demos", furniture_name)
os.makedirs(demo_path, exist_ok=True)
zip_file = os.path.join("demos", key)
with ZipFile(zip_file, "r") as zf:
zf.extractall(demo_path)
|
134676
|
from socket import gethostbyname
from random import randint
# proxy: https://luminati.io/
def get_luminati_session(username, password):
"""Returns a new sticky Luminati Proxy Session."""
port = 22225
ip = gethostbyname("zproxy.lum-superproxy.io")
session_id = randint(1000, 9999)
return (
f"http://{username}-session-glob_{session_id}:{password}@{ip}:{port}",
session_id,
)
|
134689
|
from typing import Iterable, Any
from numpy.random import RandomState
def shuffled_cycle(iterable: Iterable[Any], rng: RandomState, nb_loops: int = -1) -> Iterable[Any]:
"""
Yield each element of `iterable` one by one, then shuffle the elements
and start yielding from the start. Stop after `nb_loops` loops.
Arguments:
iterable: Iterable containing the elements to yield.
rng: Random generator used to shuffle the elements after each loop.
nb_loops: Number of times to go through all the elements. If set to -1,
loop an infinite number of times.
"""
elements = []
for e in iterable:
elements.append(e)
yield e
cpt = nb_loops
while cpt != 0:
cpt -= 1
rng.shuffle(elements)
for e in elements:
yield e
|
134692
|
import file_helper
import shutil
import os
import re
import yaml
import json
#
# The `specification` folder in the azure-rest-api-specs repo contains the folder hierarchy for the swagger specs
#
# specification
# |-service1 (e.g. `cdn` or `compute`)
# | |-common
# | |-quickstart-templates
# | |-data-plane
# | |-resource-manager (we're only interested in the contents of this folder)
# | |- resource-type1 (e.g. `Microsoft.Compute`)
# | | |- common
# | | | |- *.json (want these)
# | | |- preview
# | | | |- 2016-04-20-preview
# | | | |- *.json
# | | |- stable
# | | | |- 2015-06-15
# | | | |- *.json
# | | | |- 2017-12-01
# | | | |- *.json
# | | | |- examples
# | | | |- 2018-10-01
# | | | |- *.json (want these)
# | | | |- examples
# | |- readme.md (this lists api versions and the files in each version)
# | |- misc files (e.g. readme)
# ...
#
#
# For each top level folder (service name) iterate the resource type folders under resource-manager
# For each resource type find the latest stable release (or the latest preview if no stable is available)
# and then take the json files in that directory (ignoring subfolders such as examples)
#
#
# The output to create is
# swagger-specs
# |-service1 (e.g. `cdn` or `compute`)
# | |-common (want these)
# | |-quickstart-templates
# | |-data-plane
# | |- resource-type1 (e.g. `Microsoft.Compute`)
# | | |- common
# | | | |- *.json (want these)
# | | |- stable (NB - may preview if no stable)
# | | | |- 2018-10-01
# | | | |- *.json (want these)
# | |- api-set.json (based on content in readme.md but easier for subsequent parsing)
# | |-resource-manager (we're only interested in the contents of this folder)
# | |- resource-type1 (e.g. `Microsoft.Compute`)
# | | |- common
# | | | |- *.json (want these)
# | | |- stable (NB - may preview if no stable)
# | | | |- 2018-10-01
# | | | |- *.json (want these)
# | |- api-set.json (based on content in readme.md but easier for subsequent parsing)
# ...
class ApiSet:
def __init__(self, resource_provider_name, base_folder, api_version):
self.resource_provider_name = resource_provider_name
self.base_folder = base_folder
self.api_version = api_version
def get_resource_provider_name(self):
return self.resource_provider_name
def get_base_folder(self):
return self.base_folder
def get_api_version(self):
return self.api_version
class ApiVersion:
def __init__(self, name, input_files, addition_input_file_paths):
self.name = name
self.input_files = input_files
self.addition_input_file_paths = addition_input_file_paths
def get_name(self):
return self.name
def get_input_files(self):
return self.input_files + self.addition_input_file_paths
def to_json(self):
return json.dumps(self.__dict__, ensure_ascii=False, sort_keys=True)
tag_regex = re.compile("openapi-type: [a-z\\-]+\ntag: ([a-z\\-0-9]*)")
tag_from_header_regex = re.compile("### Tag: (package-[0-9]{4}-[0-9]{2}.*)")
def get_api_version_tag(resource_provider_name, readme_contents, overrides):
override = overrides.get(resource_provider_name)
if override != None:
return override
match = tag_regex.search(readme_contents)
if match == None:
return None
tag = match.group(1)
if 'preview' not in tag:
return tag
# If the default is a 'preview', return the stable api version if it exists,
# if not return the tag with 'preview' in it
stable_match = tag_from_header_regex.search(readme_contents)
if stable_match == None:
return tag
return stable_match.group(1)
code_block_end_regex = re.compile("^[\\s]*```[\\s]*$", flags=re.MULTILINE)
def find_api_version(resource_provider_name, readme_contents, version_tag, input_file_additions):
# Regex to match: ```yaml $(tag) == 'the-version-tag`
# Also match: ```yaml $(tag) == 'the-version-tag` || $(tag) == 'some-other-tag'
# But don't match ```yaml $(tag) == 'the-version-tag' && $(another-condition)
start_match = re.search(
"^```[\\s]*yaml [^&^\\n]*\\$\\(tag\\) == '" + version_tag + "'[^&^\\n]*$",
readme_contents,
flags=re.MULTILINE,
)
if start_match == None:
return None
end_match = code_block_end_regex.search(readme_contents, start_match.end())
yaml_contents = readme_contents[start_match.end() : end_match.start()]
yaml_data = yaml.load(yaml_contents, Loader=yaml.BaseLoader)
input_files = []
if yaml_data != None:
input_files = [file.replace("\\", "/") for file in yaml_data["input-file"]]
additional_input_file_paths = get_additional_files_for_version(input_file_additions, resource_provider_name, version_tag)
api_version = ApiVersion(
version_tag,
input_files,
additional_input_file_paths
)
return api_version
def get_api_version_from_readme(resource_provider_name, readme_path, version_overrides, input_file_additions):
if not os.path.isfile(readme_path):
return None
print("==> Opening: " + readme_path)
with open(readme_path, "r", encoding="utf8") as stream:
contents = stream.read()
version_tag = get_api_version_tag(resource_provider_name, contents, version_overrides)
if version_tag == None:
print("==> no version tag found in readme: " + readme_path)
return None
api_version = find_api_version(resource_provider_name, contents, version_tag, input_file_additions)
return api_version
def copy_api_sets_to_swagger_specs(api_sets, source_folder, target_folder):
for api_set in api_sets:
print("\nCopying " + api_set.get_resource_provider_name())
api_version = api_set.get_api_version()
resource_provider_source = (
source_folder
+ "/" + api_set.get_base_folder()
)
resource_provider_target = (
target_folder
+ "/" + api_set.get_base_folder()
)
# The core of this method is to copy the files defined by the api version
# There are some places where additional files that aren't listed in the definition tend to live
# For now we're handling the separate cases (e.g. `common`)
# We _could_ load the specs and scan for linked files and build out the list that way
# Doing that would remove the need for these additional checks
# as well as fixing the problem with definitions referenced back in other folders as with comsmos-db etc
# Look for `common` folder under the `resource-manager` folder
file_helper.copy_child_folder_if_exists(
resource_provider_source,
resource_provider_target,
"/common",
)
# Look for `common` folders under the resource type folder
resource_type_folders = set(
[x[0 : x.index("/")] for x in api_version.get_input_files()]
)
for resource_type_folder in resource_type_folders:
file_helper.copy_child_folder_if_exists(
resource_provider_source,
resource_provider_target,
resource_type_folder + "/common",
)
# Look for `entityTypes` or `definitions` folders under api versions
api_version_folders = set(
[x[0 : x.rfind("/")] for x in api_version.get_input_files()]
)
for api_version_folder in api_version_folders:
file_helper.copy_child_folder_if_exists(
resource_provider_source,
resource_provider_target,
api_version_folder + "/entityTypes",
)
file_helper.copy_child_folder_if_exists(
resource_provider_source,
resource_provider_target,
api_version_folder + "/definitions",
)
# Hack: Handle the case where a package version folder has files which aren't in the README docs
file_helper.copy_child_folder_if_exists(
resource_provider_source,
resource_provider_target,
api_version_folder,
ignore='examples'
)
# find 'common.json' or ... 'Common.json'
if os.path.exists(resource_provider_source + "/" + api_version_folder + "/common.json"):
file_helper.copy_file_ensure_paths(resource_provider_source, resource_provider_target, api_version_folder + "/common.json")
elif os.path.exists(resource_provider_source + "/" + api_version_folder + "/Common.json"):
file_helper.copy_file_ensure_paths(resource_provider_source, resource_provider_target, api_version_folder + "/Common.json")
# Copy the files defined in the api version
for file in api_version.get_input_files():
file_helper.copy_file_ensure_paths(resource_provider_source, resource_provider_target, file)
# Write api-set.json file per folder with contents to load for swagger-codegen
api_set_filename = resource_provider_target + "/api-set.json"
print("Writing " + api_set_filename)
with open(api_set_filename, "w") as f:
f.write(api_version.to_json())
def get_api_set_for_folder(spec_folder, api_folder, resource_provider_name, version_overrides, input_file_additions):
api_version = get_api_version_from_readme(resource_provider_name, api_folder + "/readme.md", version_overrides, input_file_additions)
if api_version == None:
return None
spec_relative_folder = api_folder[len(spec_folder) + 1 :]
api_set = ApiSet(
resource_provider_name,
spec_relative_folder,
api_version
)
return api_set
def get_additional_files_for_version(input_file_additions, resource_provider_name, api_version):
files_to_add = []
try:
files_to_add = input_file_additions[resource_provider_name][api_version]
except KeyError:
pass
return files_to_add
def get_api_sets(spec_folder, version_overrides, input_file_additions):
rp_folders = sorted([f.path for f in os.scandir(spec_folder) if f.is_dir()])
api_sets = []
for folder in rp_folders:
resource_provider_name = folder.split("/")[-1]
if version_overrides.get(resource_provider_name) == "":
print("Resource provider " + resource_provider_name + " is skipped in config")
continue
got_api_set = False
for api_type_folder in ["resource-manager", "data-plane"]:
qualified_api_type_folder = folder + "/" + api_type_folder
if not os.path.exists(qualified_api_type_folder):
continue
api_set = get_api_set_for_folder(spec_folder, qualified_api_type_folder, resource_provider_name, version_overrides, input_file_additions)
if api_set != None:
api_sets.append(api_set)
got_api_set = True
else:
# didn't find readme.md under (e.g.) search/data-plane/
# look for search/data-plane/*/readme.md
print("\n*************************************************************************************")
print(qualified_api_type_folder)
# sub_folders = [f.path for f in os.scandir(qualified_api_type_folder) if f.is_dir() and os.path.exists(qualified_api_type_folder + "/" + f.path + "/readme.md")]
sub_folders = [f.path for f in os.scandir(qualified_api_type_folder) if f.is_dir()]
for sub_folder in sub_folders:
print(sub_folder)
api_set = get_api_set_for_folder(spec_folder, sub_folder, resource_provider_name, version_overrides, input_file_additions)
if api_set != None:
print("got api_set")
api_sets.append(api_set)
got_api_set = True
if not got_api_set:
print("***No api version found, ignoring: " + folder)
return api_sets
|
134732
|
import toolz
import toolz.curried
from toolz.curried import (take, first, second, sorted, merge_with, reduce,
merge, operator as cop)
from collections import defaultdict
from importlib import import_module
from operator import add
def test_take():
assert list(take(2)([1, 2, 3])) == [1, 2]
def test_first():
assert first is toolz.itertoolz.first
def test_merge():
assert merge(factory=lambda: defaultdict(int))({1: 1}) == {1: 1}
assert merge({1: 1}) == {1: 1}
assert merge({1: 1}, factory=lambda: defaultdict(int)) == {1: 1}
def test_merge_with():
assert merge_with(sum)({1: 1}, {1: 2}) == {1: 3}
def test_merge_with_list():
assert merge_with(sum, [{'a': 1}, {'a': 2}]) == {'a': 3}
def test_sorted():
assert sorted(key=second)([(1, 2), (2, 1)]) == [(2, 1), (1, 2)]
def test_reduce():
assert reduce(add)((1, 2, 3)) == 6
def test_module_name():
assert toolz.curried.__name__ == 'toolz.curried'
def test_curried_operator():
for k, v in vars(cop).items():
if not callable(v):
continue
if not isinstance(v, toolz.curry):
try:
# Make sure it is unary
v(1)
except TypeError:
try:
v('x')
except TypeError:
pass
else:
continue
raise AssertionError(
'toolz.curried.operator.%s is not curried!' % k,
)
# Make sure this isn't totally empty.
assert len(set(vars(cop)) & {'add', 'sub', 'mul'}) == 3
def test_curried_namespace():
exceptions = import_module('toolz.curried.exceptions')
namespace = {}
def should_curry(func):
if not callable(func) or isinstance(func, toolz.curry):
return False
nargs = toolz.functoolz.num_required_args(func)
if nargs is None or nargs > 1:
return True
return nargs == 1 and toolz.functoolz.has_keywords(func)
def curry_namespace(ns):
return {
name: toolz.curry(f) if should_curry(f) else f
for name, f in ns.items() if '__' not in name
}
from_toolz = curry_namespace(vars(toolz))
from_exceptions = curry_namespace(vars(exceptions))
namespace.update(toolz.merge(from_toolz, from_exceptions))
namespace = toolz.valfilter(callable, namespace)
curried_namespace = toolz.valfilter(callable, toolz.curried.__dict__)
if namespace != curried_namespace:
missing = set(namespace) - set(curried_namespace)
if missing:
raise AssertionError('There are missing functions in toolz.curried:\n %s'
% ' \n'.join(sorted(missing)))
extra = set(curried_namespace) - set(namespace)
if extra:
raise AssertionError('There are extra functions in toolz.curried:\n %s'
% ' \n'.join(sorted(extra)))
unequal = toolz.merge_with(list, namespace, curried_namespace)
unequal = toolz.valfilter(lambda x: x[0] != x[1], unequal)
messages = []
for name, (orig_func, auto_func) in sorted(unequal.items()):
if name in from_exceptions:
messages.append('%s should come from toolz.curried.exceptions' % name)
elif should_curry(getattr(toolz, name)):
messages.append('%s should be curried from toolz' % name)
else:
messages.append('%s should come from toolz and NOT be curried' % name)
raise AssertionError('\n'.join(messages))
|
134796
|
from cas.common.assets.models import (
Asset,
AssetBuildContext,
SerialDriver,
PrecompileResult,
)
from typing import List
class CaptionDriver(SerialDriver):
"""
Driver that handles compiling closed captions
"""
def _tool_name(self):
return "captioncompiler"
def precompile(self, context: AssetBuildContext, asset: Asset) -> List[str]:
asset.outpath = asset.path.with_suffix(".dat")
return PrecompileResult([asset.path], [asset.outpath])
def compile(self, context: AssetBuildContext, asset: Asset) -> bool:
args = [str(self.tool), str(asset.path)]
returncode = self.env.run_tool(args, source=True)
return returncode == 0
_driver = CaptionDriver
|
134827
|
import inspect
import re
import sys
import unicodedata
import yaml
# from reversion import revisions as reversion
import reversion
from django.conf import settings
from django.contrib.auth.models import Group
from django.db import models
from django.db.models import Q
from django.db.models.signals import m2m_changed, post_save
from django.dispatch import receiver
from django.urls import reverse
from guardian.shortcuts import assign_perm, remove_perm
from apis_core.apis_metainfo.models import Collection, TempEntityClass, Uri
from apis_core.apis_vocabularies.models import (
EventType,
InstitutionType,
PlaceType,
ProfessionType,
Title,
WorkType,
)
from apis_core.helper_functions import EntityRelationFieldGenerator
BASE_URI = getattr(settings, "APIS_BASE_URI", "http://apis.info/")
class AbstractEntity(TempEntityClass):
"""
Abstract super class which encapsulates common logic between the different entity kinds and provides various methods
relating to either all or a specific entity kind.
Most of the class methods are designed to be used in the sublcass as they are considering contexts which depend on
the subclass entity type. So they are to be understood in that dynamic context.
"""
# Placeholder for list filter classes attached to each entity later
list_filter_class = None
class Meta:
abstract = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__class__.create_relation_methods_from_manytomany_fields()
# Methods dealing with individual data retrievals of instances
####################################################################################################################
def __str__(self):
if self.__class__ == Person:
def valid(name):
return name != "" and name is not None
if valid(self.first_name) and valid(self.name):
return "{}, {}".format(self.name, self.first_name)
elif valid(self.first_name) and not valid(self.name):
return "{}, {}".format("no surename provided", self.first_name)
elif not valid(self.first_name) and valid(self.name):
return self.name
elif not valid(self.first_name) and not valid(self.name):
return "no name provided"
else:
if self.name != "":
return self.name
else:
return "no name provided"
@classmethod
def get_or_create_uri(cls, uri):
uri = str(uri)
try:
if re.match(r"^[0-9]*$", uri):
p = cls.objects.get(pk=uri)
else:
p = cls.objects.get(uri__uri=uri)
return p
except:
print("Found no object corresponding to given uri.")
return False
@classmethod
def set_list_filter_class(cls, list_filter_class):
cls.list_filter_class = list_filter_class
# Various Methods enabling convenient shortcuts between entities, relations, fields, etc
####################################################################################################################
@classmethod
def create_relation_methods_from_manytomany_fields(cls):
"""
Creates methods on an entity class with which other related entities can be called without the need to consider
potential self-references (the A and B sides therein).
The resulting methods follow the syntax:
<entity>.get_related_<entity>_instances()
e.g. for Person:
person.get_related_work_instances()
or
person.get_related_person_instances()
Note that with these methods it is not necessary to differentiate between A and B entities when self-relations exist.
The result of any such method call is the queryset of the related entities.
(And not a ManyToManyManager as is the case when calling <entity>.<entity>_set where in the case of self-relation
it must be differentiated between A and B entities, e.g. person.personA_set )
It was not possible to my understanding to change managers in such a way that two (the A and the B) could be combined
into one manager. Hence these additional shortcut methods.
:return: None
"""
def create_function_from_manytomany_field_to_other_entity(
entity_manytomany_field,
):
"""
creates the individual method from a ManyToMany field by calling the manager's objects.all()
This method creation has to be done here in a separate method, so that it can be called once before assignment
as otherwise the variable 'entity_name' in the loop below changes with each iteration and with that also the
method references (due to python's "late binding").
A method call in between thus forces the content of 'entity_name' to be assigned for good to the
respective class ( = forced early binding).
For more details on this: https://stackoverflow.com/questions/3431676/creating-functions-in-a-loop
:param entity_manytomany_field: the ManyToManyManager to another model
:return: method which will call the managers's objects.all() method
"""
return lambda self: getattr(self, entity_manytomany_field).all().distinct()
def create_function_from_manytomany_field_to_self_entity(
entityA_manytomany_field, entityB_manytomany_field
):
"""
Same method as above, but with two managers instead of one for the case of self-relations.
Both managers' objects.all() methods are called and their queryset results are unionised for the
shortcut method of an entity to its own related entities.
:param entityA_manytomany_field: ManyToManyManager to entity A in a self-relation
:param entityB_manytomany_field: ManyToManyManager to entity A in a self-relation
:return: method to call both and return the distinct union of them
"""
return lambda self: (
getattr(self, entityA_manytomany_field)
.all()
.union(getattr(self, entityB_manytomany_field).all())
).distinct()
for entity_name in cls.get_all_entity_names():
# Iterate over each entity defined within this models' module
related_entity_function_name = "get_related_" + entity_name + "_instances"
if not hasattr(cls, related_entity_function_name):
if cls.__name__.lower() == entity_name:
# If the related entity is the same as this current one, then set the names of the related functions
# to A and B and also combine them into one function where both A and B are returned.
related_entityA_function_name = (
"get_related_" + entity_name + "A_instances"
)
related_entityB_function_name = (
"get_related_" + entity_name + "B_instances"
)
entityA_manytomany_field = entity_name + "A_set"
entityB_manytomany_field = entity_name + "B_set"
setattr(
cls,
related_entityA_function_name,
create_function_from_manytomany_field_to_other_entity(
entityA_manytomany_field
),
)
setattr(
cls,
related_entityB_function_name,
create_function_from_manytomany_field_to_other_entity(
entityB_manytomany_field
),
)
setattr(
cls,
related_entity_function_name,
create_function_from_manytomany_field_to_self_entity(
entityA_manytomany_field, entityB_manytomany_field
),
)
else:
# If the related entity is a different one, then just build on the usual names
entity_manytomany_field = entity_name + "_set"
setattr(
cls,
related_entity_function_name,
create_function_from_manytomany_field_to_other_entity(
entity_manytomany_field
),
)
# Methods dealing with all entities
####################################################################################################################
_all_entity_classes = None
_all_entity_names = None
@classmethod
def get_all_entity_classes(cls):
"""
:return: list of all python classes of the entities defined within this models' module
"""
if cls._all_entity_classes == None:
entity_classes = []
entity_names = []
for entity_name, entity_class in inspect.getmembers(
sys.modules[__name__], inspect.isclass
):
if (
entity_class.__module__ == "apis_core.apis_entities.models"
and entity_name != "ent_class"
and entity_name != "AbstractEntity"
):
entity_classes.append(entity_class)
entity_names.append(entity_name.lower())
cls._all_entity_classes = entity_classes
cls._all_entity_names = entity_names
return cls._all_entity_classes
@classmethod
def get_entity_class_of_name(cls, entity_name):
"""
:param entity_name: str : The name of an entity
:return: The model class of the entity respective to the given name
"""
for entity_class in cls.get_all_entity_classes():
if entity_class.__name__.lower() == entity_name.lower():
return entity_class
raise Exception("Could not find entity class of name:", entity_name)
@classmethod
def get_all_entity_names(cls):
"""
:return: list of all class names in lower case of the entities defined within this models' module
"""
if cls._all_entity_names == None:
cls.get_all_entity_classes()
return cls._all_entity_names
# Methods dealing with related entities
####################################################################################################################
_related_entity_field_names = None
@classmethod
def get_related_entity_field_names(cls):
"""
:return: a list of names of all ManyToMany field names relating to entities from the respective entity class
E.g. for Person.get_related_entity_field_names() or person_instance.get_related_entity_field_names() ->
['event_set', 'institution_set', 'personB_set', 'personA_set', 'place_set', 'work_set']
Note: this method depends on the 'generate_all_fields' function of the EntityRelationFieldGenerator class
which wires the ManyToMany Fields into the entities and respective relationtypes.
This method is nevertheless defined here within AbstractEntity for documentational purpose.
"""
if cls._related_entity_field_names == None:
raise Exception("_related_entity_field_names was not initialized yet.")
else:
return cls._related_entity_field_names
@classmethod
def add_related_entity_field_name(cls, entity_field_name):
"""
:param entity_field_name: the name of one of several ManyToMany fields created automatically
:return: None
Note: this method depends on the 'generate_all_fields' function of the EntityRelationFieldGenerator class
which wires the ManyToMany Fields into the entities and respective relationtypes.
This method is nevertheless defined here within AbstractEntity for documentational purpose.
"""
if cls._related_entity_field_names == None:
cls._related_entity_field_names = []
cls._related_entity_field_names.append(entity_field_name)
def get_related_entity_instances(self):
"""
:return: list of queryset of all entity instances which are somehow related to the calling entity instance
"""
queryset_list = []
for entity_name in self.get_all_entity_names():
queryset = getattr(self, "get_related_" + entity_name + "_instances")()
if len(queryset) > 0:
queryset_list.append(queryset)
return queryset_list
# Methods dealing with related relations
####################################################################################################################
@classmethod
def get_related_relation_classes(cls):
"""
:return: list of python classes of the relations which are related to the respective entity class
E.g. for Place.get_related_relation_classes() or place_instance.get_related_relation_classes() ->
[ InstitutionPlace, PersonPlace, PlaceEvent, PlacePlace, PlaceWork ]
"""
# TODO __sresch__ : check for best practice on local imports vs circularity problems.
from apis_core.apis_relations.models import AbstractRelation
return AbstractRelation.get_relation_classes_of_entity_class(cls)
@classmethod
def get_related_relation_field_names(cls):
"""
:return: list of class names in lower case of the relations which are related to the respective entity class
E.g. for Place.get_related_relation_names() or place_instance.get_related_relation_names() ->
['institutionplace_set', 'personplace_set', 'placeevent_set', 'placeplace_set', 'placework_set']
"""
# TODO __sresch__ : check for best practice on local imports vs circularity problems.
from apis_core.apis_relations.models import AbstractRelation
return AbstractRelation.get_relation_field_names_of_entity_class(cls)
def get_related_relation_instances(self):
"""
:return: list of queryset of all relation instances which are somehow related to the calling entity instance
"""
queryset_list = []
for relation_class in self.get_related_relation_classes():
q_args = Q()
if relation_class.get_related_entity_classA() == self.__class__:
q_args |= Q(**{relation_class.get_related_entity_field_nameA(): self})
if relation_class.get_related_entity_classB() == self.__class__:
q_args |= Q(**{relation_class.get_related_entity_field_nameB(): self})
queryset = relation_class.objects.filter(q_args)
queryset_list.extend(list(queryset))
return queryset_list
# Methods dealing with related relationtypes
####################################################################################################################
_related_relationtype_classes = None
_related_relationtype_field_names = None
_related_relationtype_names = None
@classmethod
def get_related_relationtype_classes(cls):
"""
:return: list of python classes of the relation types which are related to the respective entity class
E.g. for Place.get_related_relation_classes() or place_instance.get_related_relation_classes() ->
[ InstitutionPlaceRelation, PersonPlaceRelation, PlaceEventRelation, PlacePlaceRelation, PlaceWorkRelation ]
"""
if cls._related_relationtype_classes == None:
relationtype_classes = []
relationtype_names = []
# TODO __sresch__ : check for best practice on local imports vs circularity problems.
from apis_core.apis_vocabularies.models import AbstractRelationType
for (
relationtype_class
) in AbstractRelationType.get_all_relationtype_classes():
relationtype_name = relationtype_class.__name__.lower()
if cls.__name__.lower() in relationtype_name:
relationtype_classes.append(relationtype_class)
relationtype_names.append(relationtype_name)
cls._related_relationtype_classes = relationtype_classes
cls._related_relationtype_names = relationtype_names
return cls._related_relationtype_classes
@classmethod
def get_related_relationtype_names(cls):
"""
:return: list of class names in lower case of the relation types which are related to the respective entity class
E.g. for Place.get_related_relation_classes() or place_instance.get_related_relation_classes() ->
[ 'institutionplacerelation', 'personplacerelation', 'placeeventrelation', 'placeplacerelation', 'placeworkrelation' ]
"""
if cls._related_relationtype_names == None:
cls.get_related_relationtype_classes()
return cls._related_relationtype_names
@classmethod
def get_related_relationtype_field_names(cls):
"""
:return: a list of names of all ManyToMany field names relating to relationtypes from the respective entity class
E.g. for PersonPerson.get_related_relationtype_field_names() or person_instance.get_related_relationtype_field_names() ->
['event_relationtype_set', 'institution_relationtype_set', 'personB_relationtype_set', 'personA_relationtype_set', 'place_relationtype_set', 'work_relationtype_set']
Note: this method depends on the 'generate_all_fields' function of the EntityRelationFieldGenerator class
which wires the ManyToMany Fields into the entities and respective relationtypes.
This method is nevertheless defined here within AbstractEntity for documentational purpose.
"""
if cls._related_relationtype_field_names == None:
raise Exception(
"_related_relationtype_field_names was not initialized yet."
)
else:
return cls._related_relationtype_field_names
@classmethod
def add_related_relationtype_field_name(cls, relationtype_field_name):
"""
:param entity_field_name: the name of one of several ManyToMany fields created automatically
:return: None
Note: this method depends on the 'generate_all_fields' function of the EntityRelationFieldGenerator class
which wires the ManyToMany Fields into the entities and respective relationtypes.
This method is nevertheless defined here within AbstractEntity for documentational purpose.
"""
if cls._related_relationtype_field_names == None:
cls._related_relationtype_field_names = []
cls._related_relationtype_field_names.append(relationtype_field_name)
def get_related_relationtype_instances(self):
"""
:return: list of queryset of all relationtype instances which are somehow related to the calling entity instance
"""
queryset_list = []
for entity_name in self.get_all_entity_names():
queryset = None
if entity_name != self.__class__.__name__.lower():
queryset = (
getattr(self, entity_name + "_relationtype_set").all().distinct()
)
else:
querysetA = (
getattr(self, entity_name + "A_relationtype_set").all().distinct()
)
querysetB = (
getattr(self, entity_name + "B_relationtype_set").all().distinct()
)
queryset = querysetA.union(querysetB)
if queryset and len(queryset) > 0:
queryset_list.append(queryset)
return queryset_list
@reversion.register(follow=["tempentityclass_ptr"])
class Person(AbstractEntity):
GENDER_CHOICES = (
("female", "female"),
("male", "male"),
("third gender", "third gender"),
)
first_name = models.CharField(
max_length=255,
help_text="The persons´s forename. In case of more then one name...",
blank=True,
null=True,
)
profession = models.ManyToManyField(ProfessionType, blank=True)
title = models.ManyToManyField(Title, blank=True)
gender = models.CharField(
max_length=15, choices=GENDER_CHOICES, blank=True, null=True
)
def save(self, *args, **kwargs):
if self.first_name:
# secure correct unicode encoding
if self.first_name != unicodedata.normalize("NFC", self.first_name):
self.first_name = unicodedata.normalize("NFC", self.first_name)
super(Person, self).save(*args, **kwargs)
return self
@reversion.register(follow=["tempentityclass_ptr"])
class Place(AbstractEntity):
kind = models.ForeignKey(
PlaceType, blank=True, null=True, on_delete=models.SET_NULL
)
lat = models.FloatField(blank=True, null=True, verbose_name="latitude")
lng = models.FloatField(blank=True, null=True, verbose_name="longitude")
def save(self, *args, **kwargs):
if isinstance(self.lat, float) and isinstance(self.lng, float):
self.status = "distinct"
super(Place, self).save(*args, **kwargs)
return self
@reversion.register(follow=["tempentityclass_ptr"])
class Institution(AbstractEntity):
kind = models.ForeignKey(
InstitutionType, blank=True, null=True, on_delete=models.SET_NULL
)
@reversion.register(follow=["tempentityclass_ptr"])
class Event(AbstractEntity):
kind = models.ForeignKey(
EventType, blank=True, null=True, on_delete=models.SET_NULL
)
@reversion.register(follow=["tempentityclass_ptr"])
class Work(AbstractEntity):
kind = models.ForeignKey(WorkType, blank=True, null=True, on_delete=models.SET_NULL)
a_ents = getattr(settings, "APIS_ADDITIONAL_ENTITIES", False)
def prepare_fields_dict(fields_list, vocabs, vocabs_m2m):
res = dict()
for f in fields_list:
res[f["name"]] = getattr(models, f["field_type"])(**f["attributes"])
for v in vocabs:
res[v] = models.ForeignKey(
f"apis_vocabularies.{v}", blank=True, null=True, on_delete=models.SET_NULL
)
for v2 in vocabs_m2m:
res[v2] = models.ManyToManyField(f"apis_vocabularies.{v2}", blank=True)
return res
ents_cls_list = []
if a_ents:
with open(a_ents, "r") as ents_file:
ents = yaml.load(ents_file, Loader=yaml.CLoader)
print(ents)
for ent in ents["entities"]:
attributes = prepare_fields_dict(
ent["fields"], ent.get("vocabs", []), ent.get("vocabs_m2m", [])
)
attributes["__module__"] = __name__
ent_class = type(ent["name"], (AbstractEntity,), attributes)
globals()[ent["name"]] = ent_class
ents_cls_list.append(ent_class)
reversion.register(ent_class, follow=["tempentityclass_ptr"])
@receiver(post_save, dispatch_uid="create_default_uri")
def create_default_uri(sender, instance, **kwargs):
if kwargs["created"] and sender in [Person, Institution, Place, Work, Event] + ents_cls_list:
if BASE_URI.endswith("/"):
base1 = BASE_URI[:-1]
else:
base1 = BASE_URI
uri_c = "{}{}".format(
base1, reverse("GetEntityGenericRoot", kwargs={"pk": instance.pk}),
)
uri2 = Uri(uri=uri_c, domain="apis default", entity=instance)
uri2.save()
lst_entities_complete = [
globals()[x]
for x in globals()
if isinstance(globals()[x], models.base.ModelBase)
and globals()[x].__module__ == "apis_core.apis_entities.models"
and x != "AbstractEntity"
and globals()[x]
]
lst_entities_complete = list(dict.fromkeys(lst_entities_complete))
perm_change_senders = [
getattr(getattr(x, "collection"), "through") for x in lst_entities_complete
] # TODO: Inspect. This list here will contain only duplicates if `lst_entities_complete` contains all entities
@receiver(
m2m_changed, dispatch_uid="create_object_permissions",
)
def create_object_permissions(sender, instance, **kwargs):
if kwargs["action"] == "pre_add" and sender in perm_change_senders:
perms = []
for j in kwargs["model"].objects.filter(pk__in=kwargs["pk_set"]):
perms.extend(j.groups_allowed.all())
for x in perms:
assign_perm("change_" + instance.__class__.__name__.lower(), x, instance)
assign_perm("delete_" + instance.__class__.__name__.lower(), x, instance)
elif kwargs["action"] == "post_remove" and sender in perm_change_senders:
perms = []
perms_keep = []
for j in kwargs["model"].objects.filter(pk__in=kwargs["pk_set"]):
perms.extend(j.groups_allowed.all())
for u in instance.collection.all():
perms_keep.extend(u.groups_allowed.all())
rm_perms = set(perms) - set(perms_keep)
for x in rm_perms:
remove_perm("change_" + instance.__class__.__name__.lower(), x, instance)
remove_perm("delete_" + instance.__class__.__name__.lower(), x, instance)
@receiver(
m2m_changed,
sender=Collection.groups_allowed.through,
dispatch_uid="add_usergroup_collection",
)
def add_usergroup_collection(sender, instance, **kwargs):
if kwargs["action"] == "pre_add":
for x in kwargs["model"].objects.filter(pk__in=kwargs["pk_set"]):
for z in ["change", "delete"]:
for y in [Person, Institution, Place, Event, Work]:
assign_perm(
z + "_" + y.__name__.lower(),
x,
y.objects.filter(collection=instance),
)
if "registration" in getattr(settings, "INSTALLED_APPS", []):
from registration.backends.simple.views import RegistrationView
from registration.signals import user_registered
@receiver(
user_registered,
sender=RegistrationView,
dispatch_uid="add_registered_user_to_group",
)
def add_user_to_group(sender, user, request, **kwargs):
user_group = getattr(settings, "APIS_AUTO_USERGROUP", None)
if user_group is not None:
user.groups.add(Group.objects.get(name=user_group))
# Call the field generation function here, after all relevant entity classes have been defined above
EntityRelationFieldGenerator.generate_all_fields()
|
134830
|
from typing import List
import numpy as np
import segmentation_models_pytorch as smp
from segmentation_models_pytorch.base.modules import Activation
import torch
from torch import nn
from torch.nn import functional as F
from torchvision import datasets
from torchvision.transforms import transforms
from baal import ActiveLearningDataset
pascal_voc_ids = np.array([
[0, 0, 0],
[128, 0, 0],
[0, 128, 0],
[128, 128, 0],
[0, 0, 128],
[128, 0, 128],
[0, 128, 128],
[128, 128, 128],
[64, 0, 0],
[192, 0, 0],
[64, 128, 0],
[192, 128, 0],
[64, 0, 128],
[192, 0, 128],
[64, 128, 128],
[192, 128, 128],
[0, 64, 0],
[128, 64, 0],
[0, 192, 0],
[128, 192, 0],
[0, 64, 128],
])
def active_pascal(
path="/tmp",
*args,
transform=transforms.ToTensor(),
test_transform=transforms.ToTensor(),
**kwargs,
):
"""Get active Pascal-VOC 2102 datasets.
Arguments:
path : str
The root folder for the Pascal dataset
Returns:
ActiveLearningDataset
the active learning dataset, training data
Dataset
the evaluation dataset
"""
return (
ActiveLearningDataset(datasets.VOCSegmentation(
path, image_set='train', transform=transform, download=False, *args, **kwargs
)),
datasets.VOCSegmentation(path, image_set='val', transform=test_transform, download=False,
*args, **kwargs),
)
class SegmentationHead(nn.Sequential):
def __init__(self, in_channels, out_channels, kernel_size=3, activation=None, upsampling=1):
dropout = nn.Dropout2d(0.5)
conv2d = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size,
padding=kernel_size // 2)
upsampling = nn.UpsamplingBilinear2d(
scale_factor=upsampling) if upsampling > 1 else nn.Identity()
activation = Activation(activation)
super().__init__(dropout, conv2d, upsampling, activation)
def add_dropout(model: smp.Unet, decoder_channels: List[int] = (256, 128, 64, 32, 16),
classes=1, activation=None):
seg_head = SegmentationHead(
in_channels=decoder_channels[-1],
out_channels=classes,
activation=activation,
kernel_size=3,
)
model.add_module('segmentation_head', seg_head)
model.initialize()
class FocalLoss(nn.Module):
"""
References:
Author: clcarwin
Site https://github.com/clcarwin/focal_loss_pytorch/blob/master/focalloss.py
"""
def __init__(self, gamma=0, alpha=None, size_average=True):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
if isinstance(alpha, (float, int)): self.alpha = torch.Tensor([alpha, 1 - alpha])
if isinstance(alpha, list): self.alpha = torch.Tensor(alpha)
self.size_average = size_average
def forward(self, input, target):
if input.dim() > 2:
input = input.view(input.size(0), input.size(1), -1) # N,C,H,W => N,C,H*W
input = input.transpose(1, 2) # N,C,H*W => N,H*W,C
input = input.contiguous().view(-1, input.size(2)) # N,H*W,C => N*H*W,C
target = target.view(-1, 1)
logpt = F.log_softmax(input, dim=1)
logpt = logpt.gather(1, target)
logpt = logpt.view(-1)
pt = logpt.data.exp()
if self.alpha is not None:
if self.alpha.type() != input.data.type():
self.alpha = self.alpha.type_as(input.data)
select = (target != 0).type(torch.LongTensor).to(self.alpha.device)
at = self.alpha.gather(0, select.data.view(-1))
logpt = logpt * at
loss = -1 * (1 - pt) ** self.gamma * logpt
if self.size_average:
return loss.mean()
else:
return loss.sum()
|
134862
|
from sacred import Experiment
import os.path as osp
import os
import numpy as np
import yaml
import cv2
import torch
from torch.utils.data import DataLoader
from tracktor.config import get_output_dir, get_tb_dir
from tracktor.solver import Solver
from tracktor.datasets.factory import Datasets
from tracktor.resnet import resnet50
ex = Experiment()
ex.add_config('experiments/cfgs/siamese.yaml')
Solver = ex.capture(Solver, prefix='siamese.solver')
@ex.automain
def my_main(_config, siamese):
# set all seeds
torch.manual_seed(siamese['seed'])
torch.cuda.manual_seed(siamese['seed'])
np.random.seed(siamese['seed'])
torch.backends.cudnn.deterministic = True
print(_config)
output_dir = osp.join(get_output_dir(siamese['module_name']), siamese['name'])
tb_dir = osp.join(get_tb_dir(siamese['module_name']), siamese['name'])
sacred_config = osp.join(output_dir, 'sacred_config.yaml')
if not osp.exists(output_dir):
os.makedirs(output_dir)
with open(sacred_config, 'w') as outfile:
yaml.dump(_config, outfile, default_flow_style=False)
#########################
# Initialize dataloader #
#########################
print("[*] Initializing Dataloader")
db_train = Datasets(siamese['db_train'], siamese['dataloader'])
db_train = DataLoader(db_train, batch_size=1, shuffle=True)
if siamese['db_val']:
db_val = None
#db_val = DataLoader(db_val, batch_size=1, shuffle=True)
else:
db_val = None
##########################
# Initialize the modules #
##########################
print("[*] Building CNN")
network = resnet50(pretrained=True, **siamese['cnn'])
network.train()
network.cuda()
##################
# Begin training #
##################
print("[*] Solving ...")
# build scheduling like in "In Defense of the Triplet Loss for Person Re-Identification"
# from Hermans et al.
lr = siamese['solver']['optim_args']['lr']
iters_per_epoch = len(db_train)
# we want to keep lr until iter 15000 and from there to iter 25000 a exponential decay
l = eval("lambda epoch: 1 if epoch*{} < 15000 else 0.001**((epoch*{} - 15000)/(25000-15000))".format(
iters_per_epoch, iters_per_epoch))
#else:
# l = None
max_epochs = 25000 // len(db_train.dataset) + 1 if 25000%len(db_train.dataset) else 25000 // len(db_train.dataset)
solver = Solver(output_dir, tb_dir, lr_scheduler_lambda=l)
solver.train(network, db_train, db_val, max_epochs, 100, model_args=siamese['model_args'])
|
134889
|
from pathlib import Path
from typing import Any, Iterable, List, Tuple, Type
from black import FileMode, format_str
from click import secho
from jinja2 import Environment, PackageLoader
from reactant.exceptions import RenderFailed
from reactant.main import PeeweeORM
from reactant.orm.peewee import PeeweeCombustor, PeeweeModel
env = Environment(
loader=PackageLoader("reactant"),
trim_blocks=True,
)
class PeeweeCombustionChamber:
"""This class contains methods for rendering the files. Processes PeeweeORM subclasses."""
def __init__(self, reactants: List[Type[PeeweeORM]]) -> None:
self.reactants = reactants
def get_models(self) -> List[PeeweeModel]:
models = []
for reactant in self.reactants:
try:
model = PeeweeCombustor.generate_peewee_orm_model(reactant)
models.append(model)
except Exception:
raise
return models
def render_manager(self) -> None:
try:
models = self.get_models()
fields_list = []
for model in models:
for field in model.fields:
fields_list.append(field.type)
fields_set = set(fields_list)
models_code, models_name_str = self.render_models(models, fields_set)
self.write_to_file(models_code, models_name_str)
except Exception:
raise
def render_models(
self, models: List[PeeweeModel], fields_set: Iterable
) -> Tuple[str, str]:
item_name = "models"
try:
template_models = env.get_template("peewee_models.txt.jinja")
output_models = template_models.render(models=models, fields_set=fields_set)
except Exception:
raise RenderFailed(item_name)
else:
return (output_models, item_name)
def write_to_file(self, item: Any, item_name: str) -> None:
try:
p = Path("reactant_products/peewee")
p.mkdir(parents=True, exist_ok=True)
formatted_code = format_str(item, mode=FileMode())
with open(f"{p}/{item_name}.py", "w") as file:
file.write(formatted_code)
except Exception:
raise
else:
self._success_secho(item_name)
def _success_secho(self, item_name: str):
return secho(f"Peewee {item_name}.py finished rendering.", fg="green")
|
134905
|
from unittest import mock
from django.test import TestCase
from django.test.utils import override_settings
from cid.cursor import CidCursorWrapper
class TestCidCursor(TestCase):
def setUp(self):
self.cursor = mock.Mock()
self.cursor.execute = mock.Mock(return_value=None)
self.cursor.executemany = mock.Mock(return_value=None)
self.cursor_wrapper = CidCursorWrapper(self.cursor)
@mock.patch('cid.cursor.get_cid')
def test_adds_comment(self, get_cid):
get_cid.return_value = 'testing-cursor'
expected = "/* cid: testing-cursor */\nSELECT 1;"
self.assertEqual(
expected,
self.cursor_wrapper.add_comment("SELECT 1;")
)
@override_settings(CID_SQL_COMMENT_TEMPLATE='correlation_id={cid}')
@mock.patch('cid.cursor.get_cid')
def test_adds_comment_setting_overriden(self, get_cid):
get_cid.return_value = 'testing-cursor'
expected = "/* correlation_id=testing-cursor */\nSELECT 1;"
self.assertEqual(
expected,
self.cursor_wrapper.add_comment("SELECT 1;")
)
@mock.patch('cid.cursor.get_cid')
def test_no_comment_when_cid_is_none(self, get_cid):
get_cid.return_value = None
expected = "SELECT 1;"
self.assertEqual(
expected,
self.cursor_wrapper.add_comment("SELECT 1;")
)
@mock.patch('cid.cursor.CidCursorWrapper.add_comment')
def test_execute_calls_add_comment(self, add_comment):
sql = "SELECT 1;"
self.cursor_wrapper.execute(sql)
add_comment.assert_called_with(sql)
@mock.patch('cid.cursor.CidCursorWrapper.add_comment')
def test_executemany_calls_add_comment(self, add_comment):
sql = "SELECT 1;"
self.cursor_wrapper.executemany(sql, [])
add_comment.assert_called_with(sql)
@mock.patch('cid.cursor.get_cid')
def test_escape_cid(self, get_cid):
get_cid.return_value = '/* a correlation id with funny characters */'
expected = '/* cid: \\/\\* a correlation id with funny characters \\*\\/ */\nSELECT 1;'
self.assertEqual(self.cursor_wrapper.add_comment('SELECT 1;'), expected)
|
134925
|
import cv2
import numpy as np
from cv_viewer.utils import *
import pyzed.sl as sl
#----------------------------------------------------------------------
# 2D VIEW
#----------------------------------------------------------------------
def cvt(pt, scale):
'''
Function that scales point coordinates
'''
out = [pt[0]*scale[0], pt[1]*scale[1]]
return out
def render_2D(left_display, img_scale, objects, is_tracking_on):
'''
Parameters
left_display (np.array): numpy array containing image data
img_scale (list[float])
objects (list[sl.ObjectData])
'''
overlay = left_display.copy()
# Render skeleton joints and bones
for obj in objects:
if render_object(obj, is_tracking_on):
if len(obj.keypoint_2d) > 0:
color = generate_color_id_u(obj.id)
# Draw skeleton bones
for part in SKELETON_BONES:
kp_a = cvt(obj.keypoint_2d[part[0].value], img_scale)
kp_b = cvt(obj.keypoint_2d[part[1].value], img_scale)
# Check that the keypoints are inside the image
if(kp_a[0] < left_display.shape[1] and kp_a[1] < left_display.shape[0]
and kp_b[0] < left_display.shape[1] and kp_b[1] < left_display.shape[0]
and kp_a[0] > 0 and kp_a[1] > 0 and kp_b[0] > 0 and kp_b[1] > 0 ):
cv2.line(left_display, (int(kp_a[0]), int(kp_a[1])), (int(kp_b[0]), int(kp_b[1])), color, 1, cv2.LINE_AA)
# Get spine base coordinates to create backbone
left_hip = obj.keypoint_2d[sl.BODY_PARTS.LEFT_HIP.value]
right_hip = obj.keypoint_2d[sl.BODY_PARTS.RIGHT_HIP.value]
spine = (left_hip + right_hip) / 2
kp_spine = cvt(spine, img_scale)
kp_neck = cvt(obj.keypoint_2d[sl.BODY_PARTS.NECK.value], img_scale)
# Check that the keypoints are inside the image
if(kp_spine[0] < left_display.shape[1] and kp_spine[1] < left_display.shape[0]
and kp_neck[0] < left_display.shape[1] and kp_neck[1] < left_display.shape[0]
and kp_spine[0] > 0 and kp_spine[1] > 0 and kp_neck[0] > 0 and kp_neck[1] > 0
and left_hip[0] > 0 and left_hip[1] > 0 and right_hip[0] > 0 and right_hip[1] > 0 ):
cv2.line(left_display, (int(kp_spine[0]), int(kp_spine[1])), (int(kp_neck[0]), int(kp_neck[1])), color, 1, cv2.LINE_AA)
# Skeleton joints
for kp in obj.keypoint_2d:
cv_kp = cvt(kp, img_scale)
if(cv_kp[0] < left_display.shape[1] and cv_kp[1] < left_display.shape[0]):
cv2.circle(left_display, (int(cv_kp[0]), int(cv_kp[1])), 3, color, -1)
if(kp_spine[0] < left_display.shape[1] and kp_spine[1] < left_display.shape[0]
and left_hip[0] > 0 and left_hip[1] > 0 and right_hip[0] > 0 and right_hip[1] > 0 ):
cv2.circle(left_display, (int(kp_spine[0]), int(kp_spine[1])), 3, color, -1)
cv2.addWeighted(left_display, 0.9, overlay, 0.1, 0.0, left_display)
|
134939
|
from acceptability.modules import LMGenerator
if __name__ == '__main__':
trainer = LMGenerator()
trainer.load()
trainer.generate()
|
134942
|
from __future__ import print_function
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
from h2o.estimators.word2vec import H2OWord2vecEstimator
def word2vec_to_frame():
print("Test converting a word2vec model to a Frame")
words = h2o.create_frame(rows=1000,cols=1,string_fraction=1.0,missing_fraction=0.0)
embeddings = h2o.create_frame(rows=1000,cols=100,real_fraction=1.0,missing_fraction=0.0)
word_embeddings = words.cbind(embeddings)
w2v_model = H2OWord2vecEstimator(pre_trained=word_embeddings)
w2v_model.train()
w2v_frame = w2v_model.to_frame()
word_embeddings.names = w2v_frame.names
assert word_embeddings.as_data_frame().equals(word_embeddings.as_data_frame()), "Source and generated embeddings match"
if __name__ == "__main__":
pyunit_utils.standalone_test(word2vec_to_frame)
else:
word2vec_to_frame()
|
134969
|
from django.conf import settings
from importlib import import_module
from django.utils.module_loading import import_string
try:
from django.urls import URLPattern as RegexURLPattern
from django.urls import URLResolver as RegexURLResolver
except:
from django.core.urlresolvers import RegexURLResolver, RegexURLPattern
from addict import Dict
from rest_framework.views import APIView
from .endpoint import Endpoint
from django.contrib.admindocs.views import simplify_regex
class BaseAPIParser:
"""
Class to iherit other parsers from
"""
def __init__(self, patterns=None):
self.patterns = patterns
if not patterns:
try:
root_urlconf = import_string(settings.ROOT_URLCONF)
except ImportError:
# Handle a case when there's no dot in ROOT_URLCONF
root_urlconf = import_module(settings.ROOT_URLCONF)
if hasattr(root_urlconf, 'urls'):
self.patterns = root_urlconf.urls.urlpatterns
else:
self.patterns = root_urlconf.urlpatterns
self.parse()
def parse(self):
raise NotImplementedError("Inherit some parser from this class first")
@staticmethod
def _is_drf_pattern(pattern):
if hasattr(pattern.callback, 'view_class'):
return issubclass(pattern.callback.view_class, APIView)
class TreeAPIParser(BaseAPIParser):
"""
Creates a nice tree of API
"""
def __init__(self, *args, **kwargs):
self.endpoints_tree = Dict()
super().__init__(*args, **kwargs)
def parse(self):
self.parse_tree(self.patterns, self.endpoints_tree)
def parse_tree(self, urlpatterns, parent_node, prefix=''):
for pattern in urlpatterns:
if isinstance(pattern, RegexURLResolver):
try:
regex = pattern._regex if hasattr(pattern, "_regex") else pattern.pattern._regex
except:
regex = ""
child_node_name = simplify_regex(regex).strip('/') if regex else ""
self.parse_tree(
urlpatterns=pattern.url_patterns,
parent_node=parent_node[child_node_name] if child_node_name else parent_node,
prefix='%s/%s' % (prefix, child_node_name)
)
elif isinstance(pattern, RegexURLPattern) and self._is_drf_pattern(pattern):
api_endpoint = Endpoint(pattern, prefix)
parent_node[api_endpoint.name] = api_endpoint
|
134988
|
from django.conf.urls import url
from blog.views import IndexView, PostView, CommentView, RepositoryView, RepositoryDetailView, TagListView, \
CategoryListView, AuthorPostListView, CommentDeleteView
urlpatterns = [
url(r'^$', IndexView.as_view()),
url(r'^post/(?P<pk>[0-9]+)$', PostView.as_view()),
url(r'^comment/add/(?P<pk>[0-9]+)$', CommentView.as_view()),
url(r'^comment/delete/(?P<pk>[0-9]+)$', CommentDeleteView.as_view()),
url(r'^repository$', RepositoryView.as_view()),
url(r'^repository/(?P<pk>[0-9]+)$', RepositoryDetailView.as_view()),
url(r'^tag/(?P<slug>[\w\u4e00-\u9fa5]+)$', TagListView.as_view()),
url(r'^category/(?P<slug>[\w\u4e00-\u9fa5]+)$', CategoryListView.as_view()),
url(r'^author/(?P<pk>[0-9]+)$', AuthorPostListView.as_view())
]
|
134993
|
from datetime import timedelta
# 3rd party imports
from django.conf import settings
from django.test import TestCase
from django.test.utils import override_settings
from django.urls import reverse
from django.utils.timezone import now
from libya_site.tests.factories import DEFAULT_USER_PASSWORD, UserFactory
from register.models import Registration
from register.tests.factories import OfficeFactory, RegistrationCenterFactory
from reporting_api import create_test_data, tasks
from reporting_api.reports import empty_report_store
from voting.models import Election
from voting.tests.factories import ElectionFactory
from vr_dashboard.views.views import ELECTION_SESSION_KEY
URI_NAMESPACE = 'vr_dashboard:'
# all URIs which contain no parameters
ALL_URI_NAMES = ('csv', 'daily-csv', 'election-day', 'election-day-center', 'national', 'offices',
'offices-detail', 'regions', 'sms', 'subconstituencies', 'weekly',
'reports', 'center-csv',
'election-day-hq', 'election-day-preliminary')
PUBLIC_URI_NAMES = ('national', 'offices', 'regions')
# simple URIs that also support a CSV rendering
SUPPORT_CSV_FORMAT = ('election-day', 'election-day-center')
NUM_REGISTRATIONS = 100
class TestElectionSelection(TestCase):
def setUp(self):
self.election_1 = ElectionFactory(
polling_start_time=now() - timedelta(days=10),
polling_end_time=now() - timedelta(days=9)
)
self.election_2 = ElectionFactory(
polling_start_time=now() - timedelta(days=4),
polling_end_time=now() - timedelta(days=3)
)
self.staff_user = UserFactory()
self.staff_user.is_staff = True
self.staff_user.save()
def test_election_in_session(self):
assert self.client.login(username=self.staff_user.username, password=<PASSWORD>_USER_PASSWORD)
self.client.get(reverse('vr_dashboard:election-day'))
self.assertEqual(self.client.session[ELECTION_SESSION_KEY],
Election.objects.get_most_current_election().id)
self.client.get(reverse('vr_dashboard:election-day') + '?election=%d' % self.election_1.id)
self.assertEqual(self.client.session[ELECTION_SESSION_KEY], self.election_1.id)
self.client.get(reverse('vr_dashboard:election-day-center'))
self.assertEqual(self.client.session[ELECTION_SESSION_KEY], self.election_1.id)
self.client.get(reverse('vr_dashboard:election-day-center')
+ '?election=%d' % self.election_2.id)
self.assertEqual(self.client.session[ELECTION_SESSION_KEY], self.election_2.id)
self.client.get(reverse('vr_dashboard:election-day'))
self.assertEqual(self.client.session[ELECTION_SESSION_KEY], self.election_2.id)
class TestRegistrationData(TestCase):
def setUp(self):
create_test_data.create(num_registrations=NUM_REGISTRATIONS)
self.unused_center = RegistrationCenterFactory()
self.unused_office = OfficeFactory()
tasks.election_day()
tasks.registrations()
self.staff_user = UserFactory()
self.staff_user.is_staff = True
self.staff_user.save()
@override_settings(HIDE_PUBLIC_DASHBOARD=True)
def test_auth_hiding_public(self):
"""
When user not logged in,
ensure that we get a redirect to the login page for non-public pages.
and to the HNEC site for public pages.
"""
for uri_name in ALL_URI_NAMES:
uri = reverse(URI_NAMESPACE + uri_name)
rsp = self.client.get(uri)
if uri_name in PUBLIC_URI_NAMES:
self.assertRedirects(rsp, settings.PUBLIC_REDIRECT_URL,
fetch_redirect_response=False,
msg_prefix='Path %s not handled properly' % uri)
else:
self.assertRedirects(rsp, reverse(settings.LOGIN_URL) + "?next=" + uri,
msg_prefix='Path %s not handled properly' % uri)
@override_settings(HIDE_PUBLIC_DASHBOARD=False)
def test_auth_not_hiding_public(self):
"""
When user not logged in,
ensure that we get a redirect to the login page for non-public pages,
but not for public pages.
"""
for uri_name in ALL_URI_NAMES:
uri = reverse(URI_NAMESPACE + uri_name)
rsp = self.client.get(uri)
if uri_name in PUBLIC_URI_NAMES:
self.assertEqual(200, rsp.status_code,
'Request to %s failed with status %d' % (uri, rsp.status_code))
else:
self.assertRedirects(rsp, reverse(settings.LOGIN_URL) + "?next=" + uri,
msg_prefix='Path %s not handled properly' % uri)
@override_settings(HIDE_PUBLIC_DASHBOARD=True)
def test_staff_not_hiding_public(self):
"""
When a staff user is logged in, they can view "public" pages
even if HIDE_PUBLIC_DASHBOARD is True.
"""
assert self.client.login(username=self.staff_user.username, password=<PASSWORD>)
for uri_name in PUBLIC_URI_NAMES:
uri = reverse(URI_NAMESPACE + uri_name)
rsp = self.client.get(uri)
self.assertEqual(200, rsp.status_code,
'Request to %s failed with status %d' % (uri, rsp.status_code))
def test_basic_operation(self):
""" For the time being, simply ensure that the VR dashboard pages don't blow up. """
assert self.client.login(username=self.staff_user.username, password=<PASSWORD>)
for uri_name in ALL_URI_NAMES:
uri = reverse(URI_NAMESPACE + uri_name)
rsp = self.client.get(uri)
self.assertEqual(200, rsp.status_code,
'Request to %s failed with status %d' % (uri, rsp.status_code))
if uri_name in SUPPORT_CSV_FORMAT:
rsp = self.client.get(uri + '?format=csv')
self.assertEqual(200, rsp.status_code,
'CSV request to %s failed with status %d' % (uri, rsp.status_code))
# pages without fixed paths
# test election-day-office-n with both default and CSV renderings
# First, we must find an office that actually has registrations
some_valid_office_id = Registration.objects.first().registration_center.office.id
uri = reverse(URI_NAMESPACE + 'election-day-office-n', args=[some_valid_office_id])
rsp = self.client.get(uri)
self.assertEqual(200, rsp.status_code,
'Request to %s failed with status %d' % (uri, rsp.status_code))
rsp = self.client.get(uri + '?format=csv')
self.assertEqual(200, rsp.status_code,
'Request to %s failed with status %d' % (uri, rsp.status_code))
def test_invalid_office_center(self):
assert self.client.login(username=self.staff_user.username, password=<PASSWORD>)
# We should get 404 from truly bogus ids as well as from centers or offices that
# exist but aren't used.
for input_uri_name, invalid_id in [
('vr_dashboard:election-day-center-n', self.unused_center.id),
('vr_dashboard:election-day-center-n', 999999),
('vr_dashboard:election-day-office-n', self.unused_office.id),
('vr_dashboard:election-day-office-n', 999999)
]:
uri = reverse(input_uri_name, args=[invalid_id])
rsp = self.client.get(uri)
self.assertContains(rsp, str(invalid_id), status_code=404)
class TestWithNoRegistrationData(TestCase):
def setUp(self):
create_test_data.create(num_registrations=0, num_registration_dates=0)
tasks.election_day()
tasks.registrations()
self.staff_user = UserFactory()
self.staff_user.is_staff = True
self.staff_user.save()
def test_basic_operation(self):
""" For the time being, simply ensure that the VR dashboard pages (and report generation tasks)
don't blow up when there aren't any registrations. """
assert self.client.login(username=self.staff_user.username, password=<PASSWORD>_<PASSWORD>)
for uri_name in ALL_URI_NAMES:
uri = reverse(URI_NAMESPACE + uri_name)
rsp = self.client.get(uri)
self.assertEqual(200, rsp.status_code,
'Request to %s failed with status %d' % (uri, rsp.status_code))
class TestWithNoGeneratedReports(TestCase):
@classmethod
def setUpTestData(cls): # No database changes
empty_report_store()
def setUp(self):
self.staff_user = UserFactory()
self.staff_user.is_staff = True
self.staff_user.save()
def test_no_report_error(self):
expected_page_flags = {
'election-day': ['election_day_overview_page', 'staff_page'],
'election-day-center': ['election_day_center_page', 'staff_page'],
'election-day-hq': ['election_day_hq_page', 'staff_page'],
'election-day-preliminary': ['election_day_preliminary_votes_page', 'staff_page']
}
expected_status_code = 503
assert self.client.login(username=self.staff_user.username, password=<PASSWORD>)
for uri_name in ALL_URI_NAMES:
uri = reverse(URI_NAMESPACE + uri_name)
rsp = self.client.get(uri)
self.assertEqual(expected_status_code, rsp.status_code,
'Request to %s had status %d instead of %d'
% (uri, rsp.status_code, expected_status_code))
if uri_name in expected_page_flags:
for expected in expected_page_flags[uri_name]:
self.assertIn(
expected, rsp.context,
'Error page for %s doesn\'t set page flag "%s"' % (uri_name, expected)
)
for uri_name in ['election-day-office-n', 'election-day-center-n']:
uri = reverse(URI_NAMESPACE + uri_name, args=[999999])
rsp = self.client.get(uri)
self.assertEqual(expected_status_code, rsp.status_code,
'Request to %s had status %d instead of %d'
% (uri, rsp.status_code, expected_status_code))
class TestRedirects(TestCase):
@override_settings(HIDE_PUBLIC_DASHBOARD=False)
def test_dashboard_root_redirects(self):
""" /data goes to /data/ because it needs a trailing slash (via
Django), and /data/ goes to /data/national/ because that's the
default dashboard dashboard page (via a view).
"""
rsp = self.client.get('/data')
self.assertRedirects(rsp, '/data/', status_code=301,
fetch_redirect_response=False)
rsp = self.client.get('/data/')
self.assertRedirects(rsp, reverse(URI_NAMESPACE + 'national'),
fetch_redirect_response=False)
|
134995
|
import os
import sys
import numpy as np
from joblib import Parallel, delayed
import joblib
import argparse
import importlib
from itertools import product
import collections
from copy import deepcopy
from mcpy.utils import filesafe
from mcpy import plotting
def _get(opts, key, default):
return opts[key] if (key in opts) else default
def _check_valid_config(config):
assert 'dgps' in config, "config dict must contain dgps"
assert 'dgp_opts' in config, "config dict must contain dgp_opts"
assert 'method_opts' in config, "config dict must contain method_opts"
assert 'mc_opts' in config, "config dict must contain mc_opts"
assert 'metrics' in config, "config dict must contain metrics"
assert 'methods' in config, "config dict must contain methods"
assert 'plots' in config, "config dict must contain plots"
assert 'target_dir' in config, "config must contain target_dir"
assert 'reload_results' in config, "config must contain reload_results"
assert 'n_experiments' in config['mc_opts'], "config[mc_opts] must contain n_experiments"
assert 'seed' in config['mc_opts'], "config[mc_opts] must contain seed"
def _get(opts, key, default):
return opts[key] if (key in opts) else default
class MonteCarlo:
def __init__(self, config):
self.config = config
_check_valid_config(self.config)
config['param_str'] = '_'.join(
['{}_{}'.format(filesafe(k), v) for k, v in self.config['mc_opts'].items()])
config['param_str'] += '_' + '_'.join(
['{}_{}'.format(filesafe(k), v) for k, v in self.config['dgp_opts'].items()])
config['param_str'] += '_' + '_'.join(
['{}_{}'.format(filesafe(k), v) for k, v in self.config['method_opts'].items()])
return
def experiment(self, exp_id):
''' Runs an experiment on a single randomly generated instance and sample and returns
the parameter estimates for each method and the evaluated metrics for each method
'''
np.random.seed(exp_id)
param_estimates = {}
true_params = {}
for dgp_name, dgp_fn in self.config['dgps'].items():
data, true_param = dgp_fn(self.config['dgp_opts'])
true_params[dgp_name] = true_param
param_estimates[dgp_name] = {}
for method_name, method in self.config['methods'].items():
param_estimates[dgp_name][method_name] = method(
data, self.config['method_opts'])
return param_estimates, true_params
def run(self):
''' Runs multiple experiments in parallel on randomly generated instances and samples and returns
the parameter estimates for each method and the evaluated metrics for each method across all
experiments
'''
random_seed = self.config['mc_opts']['seed']
if not os.path.exists(self.config['target_dir']):
os.makedirs(self.config['target_dir'])
results_file = os.path.join(
self.config['target_dir'], 'results_{}.jbl'.format(self.config['param_str']))
if self.config['reload_results'] and os.path.exists(results_file):
results = joblib.load(results_file)
else:
results = Parallel(n_jobs=_get(self.config['mc_opts'], 'n_jobs', -1), verbose=1)(
delayed(self.experiment)(random_seed + exp_id)
for exp_id in range(self.config['mc_opts']['n_experiments']))
joblib.dump(results, results_file)
param_estimates = {}
metric_results = {}
for dgp_name in self.config['dgps'].keys():
param_estimates[dgp_name] = {}
metric_results[dgp_name] = {}
for method_name in self.config['methods'].keys():
param_estimates[dgp_name][method_name] = np.array(
[results[i][0][dgp_name][method_name] for i in range(self.config['mc_opts']['n_experiments'])])
metric_results[dgp_name][method_name] = {}
for metric_name, metric_fn in self.config['metrics'].items():
metric_results[dgp_name][method_name][metric_name] = np.array([metric_fn(results[i][0][dgp_name][method_name], results[i][1][dgp_name])
for i in range(self.config['mc_opts']['n_experiments'])])
for plot_name, plot_fn in self.config['plots'].items():
if isinstance(plot_fn, dict):
plotting.instance_plot(
plot_name, param_estimates, metric_results, self.config, plot_fn)
else:
plot_fn(param_estimates, metric_results, self.config)
return param_estimates, metric_results
class MonteCarloSweep:
def __init__(self, config):
self.config = config
_check_valid_config(self.config)
config['param_str'] = '_'.join(['{}_{}'.format(filesafe(
k), self._stringify_param(v)) for k, v in self.config['mc_opts'].items()])
config['param_str'] += '_' + '_'.join(['{}_{}'.format(filesafe(
k), self._stringify_param(v)) for k, v in self.config['dgp_opts'].items()])
config['param_str'] += '_' + '_'.join(['{}_{}'.format(filesafe(
k), self._stringify_param(v)) for k, v in self.config['method_opts'].items()])
return
def _stringify_param(self, param):
if hasattr(param, "__len__"):
return '{}_to_{}'.format(np.min(param), np.max(param))
else:
return param
def run(self):
dgp_sweep_params = []
dgp_sweep_param_vals = []
for dgp_key, dgp_val in self.config['dgp_opts'].items():
if hasattr(dgp_val, "__len__"):
dgp_sweep_params.append(dgp_key)
dgp_sweep_param_vals.append(dgp_val)
n_sweeps = len(list(product(*dgp_sweep_param_vals)))
if 'cluster_opts' in self.config:
n_nodes = _get(self.config['cluster_opts'], 'n_nodes', 1)
node_id = _get(self.config['cluster_opts'], 'node_id', 0)
else:
n_nodes = 1
node_id = 0
start_sweep, end_sweep = 0, 0
if node_id < n_nodes - 1:
node_splits = np.array_split(np.arange(n_sweeps), n_nodes - 1)
start_sweep, end_sweep = node_splits[node_id][0], node_splits[node_id][-1]
sweep_keys = []
sweep_params = []
sweep_metrics = []
inst_config = deepcopy(self.config)
# This is the node that loads results and plots sweep plots
if (n_nodes > 1) and (node_id == n_nodes - 1):
inst_config['reload_results'] = True
inst_config['plots'] = {}
for it, vec in enumerate(product(*dgp_sweep_param_vals)):
if (node_id == n_nodes - 1) or ((it >= start_sweep) and (it <= end_sweep)):
setting = list(zip(dgp_sweep_params, vec))
for k, v in setting:
inst_config['dgp_opts'][k] = v
params, metrics = MonteCarlo(inst_config).run()
sweep_keys.append(setting)
sweep_params.append(params)
sweep_metrics.append(metrics)
if node_id == n_nodes - 1:
for plot_key, plot_fn in self.config['sweep_plots'].items():
if isinstance(plot_fn, dict):
plotting.sweep_plot(
plot_key, sweep_keys, sweep_params, sweep_metrics, self.config, plot_fn)
else:
plot_fn(plot_key, sweep_keys, sweep_params,
sweep_metrics, self.config)
return sweep_keys, sweep_params, sweep_metrics
def monte_carlo_main():
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--config', type=str, help='config file')
args = parser.parse_args(sys.argv[1:])
config = importlib.import_module(args.config)
MonteCarlo(config.CONFIG).run()
if __name__ == "__main__":
monte_carlo_main()
|
135023
|
from .base import AuthenticationBase
class RevokeToken(AuthenticationBase):
"""Revoke Refresh Token endpoint
Args:
domain (str): Your auth0 domain (e.g: username.auth0.com)
"""
def revoke_refresh_token(self, client_id, token, client_secret=None):
"""Revokes a Refresh Token if it has been compromised
Each revocation request invalidates not only the specific token, but all other tokens
based on the same authorization grant. This means that all Refresh Tokens that have
been issued for the same user, application, and audience will be revoked.
Args:
client_id (str): The Client ID for your Application
token (str): The Refresh Token you want to revoke
client_secret (str, optional): The Client Secret for your Application.
Required for confidential applications.
See: https://auth0.com/docs/applications/application-types#confidential-applications
See: https://auth0.com/docs/api/authentication#refresh-token
"""
body = {
'client_id': client_id,
'token': token,
}
if client_secret:
body.update({'client_secret': client_secret})
return self.post('{}://{}/oauth/revoke'.format(self.protocol, self.domain), data=body)
|
135046
|
import random
from datetime import datetime
random.seed(datetime.now())
class SoS(object):
def __init__(self, CSs, environment):
self.CSs = CSs
self.environment = environment
pass
def run(self, tick):
logs = []
random.shuffle(self.CSs)
for CS in self.CSs:
result = CS.act(tick, self.environment)
if result:
logs.append(result)
logs.append(str(self.environment))
return logs
def reset(self):
for CS in self.CSs:
CS.reset()
self.resetEnvironment()
def resetEnvironment(self):
for i in range(len(self.environment)):
self.environment[i] = 0
|
135051
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import pickle
import numpy as np
class RMTPP(nn.Module):
def __init__(self, cfg, args):
super(RMTPP, self).__init__()
self.cfg = cfg
self.args = args
if self.cfg.EMB_DIM != 0:
self.embedding = nn.Embedding(self.cfg.EVENT_CLASSES, self.cfg.EMB_DIM)
self.dropout = nn.Dropout(self.cfg.EMB_DROPOUT)
self.lstm = nn.LSTM(self.cfg.EMB_DIM + 1, self.cfg.RNN_HIDDEN_DIM, self.cfg.RNN_LAYERS)
else:
self.lstm = nn.LSTM(self.cfg.EVENT_CLASSES + 1, self.cfg.RNN_HIDDEN_DIM, self.cfg.RNN_LAYERS)
self.mlp = nn.Linear(self.cfg.RNN_HIDDEN_DIM, self.cfg.MLP_DIM)
self.event_linear = nn.Linear(self.cfg.MLP_DIM, self.cfg.EVENT_CLASSES)
self.time_linear = nn.Linear(self.cfg.MLP_DIM, 1)
def forward(self, input, length):
time_sequences = torch.tensor(input[:, :, 0:1], dtype=torch.float, device=self.args.device)
event_sequences = torch.tensor(input[:, :, 1:], dtype=torch.long, device=self.args.device)
if self.cfg.EMB_DIM != 0:
event_embedding = self.embedding(event_sequences)
event_embedding_dropout = self.dropout(event_embedding)
time_event_input = torch.cat((time_sequences, event_embedding_dropout), 2)
else:
event_one_hot = torch.zeros(event_sequences.shape[0], event_sequences.shape[1], self.cfg.EVENT_CLASSES,
dtype=torch.float, device=self.args.device).scatter_(2, event_sequences, 1.0)
time_event_input = torch.cat((time_sequences, event_one_hot), 2)
time_event_input_packed = nn.utils.rnn.pack_padded_sequence(time_event_input, length, batch_first=True)
h0 = torch.zeros(self.cfg.RNN_LAYERS, time_event_input.shape[0], self.cfg.RNN_HIDDEN_DIM,
dtype=torch.float, device=self.args.device, requires_grad=True)
c0 = torch.zeros(self.cfg.RNN_LAYERS, time_event_input.shape[0], self.cfg.RNN_HIDDEN_DIM,
dtype=torch.float, device=self.args.device, requires_grad=True)
output_packed, hidden = self.lstm(time_event_input_packed, (h0, c0))
output, _ = nn.utils.rnn.pad_packed_sequence(output_packed, batch_first=True)
output_mlp = torch.sigmoid(self.mlp(output[:, -1, :]))
event_output = self.event_linear(output_mlp)
event_output = F.log_softmax(event_output, dim=1)
time_output = self.time_linear(output_mlp)
return time_output, event_output
class RMTPPLoss(nn.Module):
def __init__(self, cfg, args):
super(RMTPPLoss, self).__init__()
self.cfg = cfg
self.args = args
self.intensity_w = nn.Parameter(torch.tensor(0.1, dtype=torch.float, device=args.device))
self.intensity_b = nn.Parameter(torch.tensor(0.1, dtype=torch.float, device=args.device))
event2index = pickle.load(open(self.cfg.EVENT_INDEX_FILE, 'rb'))
event_stat = np.zeros(len(event2index), dtype=np.float32)
for event in event2index.values():
event_stat[event['index']] = event['cnt']
event_stat = event_stat.sum() / event_stat
event_stat = torch.from_numpy(event_stat)
event_stat = event_stat.to(args.device)
self.event_loss = nn.NLLLoss(weight=event_stat)
def forward(self, output, target):
time_target = torch.tensor(target[:, 0], dtype=torch.float, device=self.args.device)
event_target = torch.tensor(target[:, 1], dtype=torch.long, device=self.args.device)
time_output = output[0].squeeze()
event_output = output[1]
time = -1 * torch.mean(time_output + self.intensity_w * time_target + self.intensity_b +
(torch.exp(time_output + self.intensity_b) -
torch.exp(time_output + self.intensity_w * time_target + self.intensity_b)) / self.intensity_w)
event = self.event_loss(event_output, event_target)
return time, event, self.cfg.LOSS_ALPHA * time + event
|
135060
|
import sys
import os
from os import path
import shutil
"""
Look at a "rendered" folder, move the rendered to the output path
Keep the empty folders in place (don't delete since it might still be rendered right)
Also copy the corresponding yaml in there
"""
input_path = sys.argv[1]
output_path = sys.argv[2]
yaml_path = sys.argv[3]
# This overrides the softlink
# os.makedirs(output_path, exist_ok=True)
renders = os.listdir(input_path)
is_rendered = [len(os.listdir(path.join(input_path, r, 'segmentation')))==160 for r in renders]
updated = 0
for i, r in enumerate(renders):
if is_rendered[i]:
if not path.exists(path.join(output_path, r)):
shutil.move(path.join(input_path, r), output_path)
prefix = r[:3]
shutil.copy2(path.join(yaml_path, 'yaml_%s'%prefix, '%s.yaml'%r), path.join(output_path, r))
updated += 1
else:
print('path exist')
else:
# Nothing for now. Can do something later
pass
print('Number of completed renders: ', len(os.listdir(output_path)))
print('Number of updated renders: ', updated)
|
135154
|
from sklearn import tree
def train(X, y):
clf = tree.DecisionTreeClassifier(max_depth=10, random_state=0)
clf = clf.fit(X, y)
return clf
|
135202
|
joystick = runtime.createAndStart("joystick","Joystick")
sleep(4)
a = 0
rb = 0
def buttonA():
global a
a = msg_joystick_button1.data[0]
print a
check()
def buttonRB():
global rb
rb = msg_joystick_button6.data[0]
print rb
check()
def check():
if ((a == 1) and (rb == 1)):
print "Combo!"
elif ((a == 1) and (rb == 0)):
print "Button A pressed"
elif ((a == 0) and (rb == 1)):
print "Button RB pressed"
else:
print "Nothing pressed"
joystick.addListener("button1", python.name, "buttonA")
joystick.addListener("button6", python.name, "buttonRB")
|
135215
|
import os
import openai
from secrets import API_Token
from prompt import en_ru
translate_input = input("What to Translate: ")
openai.api_key = API_Token
response = openai.Completion.create(
engine="davinci",
prompt=en_ru + translate_input + "\nRussian: ",
temperature=0.5,
max_tokens=100,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=["###"]
)
print("Translation: " + response["choices"][0]["text"])
|
135217
|
import sys
import os
import errno
from fontTools.ttLib import TTFont
from os.path import dirname, abspath, join as pjoin
PYVER = sys.version_info[0]
BASEDIR = abspath(pjoin(dirname(__file__), os.pardir, os.pardir))
_enc_kwargs = {}
if PYVER >= 3:
_enc_kwargs = {'encoding': 'utf-8'}
def readTextFile(filename):
with open(filename, 'r', **_enc_kwargs) as f:
return f.read()
def mkdirs(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise # raises the error again
def loadTTFont(file):
return TTFont(file, recalcBBoxes=False, recalcTimestamp=False)
|
135220
|
import logging
import json
import os
import torch
import pickle
from cnn import CNN
import numpy as np
import gzip
from io import BytesIO, StringIO
OUTPUT_CONTENT_TYPE = 'text/csv'
INPUT_CONTENT_TYPE = 'application/x-npy'
logger = logging.getLogger(__name__)
image_names = []
def model_fn(model_dir):
model_info = {}
with open(os.path.join(model_dir, 'model_info.pth'), 'rb') as f:
model_info = torch.load(f)
print('model_info: {}'.format(model_info))
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logger.info('Current device: {}'.format(device))
model = CNN(similarity_dims=model_info['simililarity-dims'])
with open(os.path.join(model_dir, 'model.pth'), 'rb') as f:
model.load_state_dict(torch.load(f))
model.eval()
logger.info(model)
return model
def input_fn(request_body, accept=INPUT_CONTENT_TYPE):
logger.info('Deserializing the generated input.')
if accept == INPUT_CONTENT_TYPE:
# logger.info(request_body)
logger.info(len(request_body))
logger.info(pickle.format_version)
logger.info(np.version.version)
request_body = gzip.decompress(request_body)
(names, tensors) = pickle.load(BytesIO(request_body), fix_imports=True)
global image_names
image_names = names
logger.info(tensors.shape)
logger.info(image_names)
return torch.from_numpy(tensors)
raise Exception('Requested unsupported ContentType: ' + accept)
def output_fn(prediction_output, accept=OUTPUT_CONTENT_TYPE):
logger.info('Serializing the generated output for '+accept)
if accept == OUTPUT_CONTENT_TYPE:
stream = StringIO()
for i in range(len(prediction_output)):
stream.write(image_names[i]+','+str(prediction_output[i])+'\n')
return stream.getvalue()
raise Exception('Requested unsupported ContentType in Accept: ' + accept)
def predict_fn(input_data, model):
logger.info('Making prediction.')
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = model.to(DEVICE)
input_data = input_data.to(DEVICE)
logger.info(input_data.shape)
img1 = input_data.narrow(0,0,1)
img2 = input_data.narrow(0,1,input_data.shape[0]-1)
print(img1.shape)
print(img2.shape)
logger.info(img1.shape)
logger.info(img2.shape)
distances = model.forward(img1,img2).tolist()
logger.info(distances)
return distances
|
135226
|
from googlemaps.timezone import timezone as _timezone
async def timezone(client, location, timestamp=None, language=None):
return await _timezone(client, location,
timestamp=timestamp,
language=language)
|
135275
|
import os
import pytest
from molecule import config
from molecule.verifier import ansible
@pytest.fixture
def _patched_ansible_verify(mocker):
m = mocker.patch("molecule.provisioner.ansible.Ansible.verify")
m.return_value = "patched-ansible-verify-stdout"
return m
@pytest.fixture
def _verifier_section_data():
return {"verifier": {"name": "ansible", "env": {"FOO": "bar"}}}
# NOTE(retr0h): The use of the `patched_config_validate` fixture, disables
# config.Config._validate from executing. Thus preventing odd side-effects
# throughout patched.assert_called unit tests.
@pytest.fixture
def _instance(_verifier_section_data, patched_config_validate, config_instance):
return ansible.Ansible(config_instance)
def test_config_private_member(_instance):
assert isinstance(_instance._config, config.Config)
def test_default_options_property(_instance):
assert {} == _instance.default_options
def test_default_env_property(_instance):
assert "MOLECULE_FILE" in _instance.default_env
assert "MOLECULE_INVENTORY_FILE" in _instance.default_env
assert "MOLECULE_SCENARIO_DIRECTORY" in _instance.default_env
assert "MOLECULE_INSTANCE_CONFIG" in _instance.default_env
@pytest.mark.parametrize("config_instance", ["_verifier_section_data"], indirect=True)
def test_env_property(_instance):
assert "bar" == _instance.env["FOO"]
def test_name_property(_instance):
assert "ansible" == _instance.name
def test_enabled_property(_instance):
assert _instance.enabled
def test_directory_property(_instance):
parts = _instance.directory.split(os.path.sep)
# Unused by Ansible verifier
assert ["molecule", "default", "tests"] == parts[-3:]
@pytest.mark.parametrize("config_instance", ["_verifier_section_data"], indirect=True)
def test_options_property(_instance):
x = {}
assert x == _instance.options
@pytest.mark.parametrize("config_instance", ["_verifier_section_data"], indirect=True)
def test_options_property_handles_cli_args(_instance):
_instance._config.args = {"debug": True}
x = {}
assert x == _instance.options
def test_execute(patched_logger_info, _patched_ansible_verify, _instance):
_instance.execute()
_patched_ansible_verify.assert_called_once_with(None)
msg = "Running Ansible Verifier"
patched_logger_info.assert_any_call(msg)
msg = "Verifier completed successfully."
patched_logger_info.assert_any_call(msg)
def test_execute_does_not_execute(
patched_ansible_converge, patched_logger_warning, _instance
):
_instance._config.config["verifier"]["enabled"] = False
_instance.execute()
assert not patched_ansible_converge.called
msg = "Skipping, verifier is disabled."
patched_logger_warning.assert_called_once_with(msg)
|
135290
|
import logging
import os
import posixpath
from django.conf import settings
from django.utils import timezone
from celery import shared_task, current_task
from .exporter import get_export_models, get_resource_for_model
logger = logging.getLogger(__name__)
@shared_task
def export(exporter_class, format='xlsx', **kwargs):
"""
Generates the export.
Support for django-tenant-schemas is built in.
"""
tenant = kwargs.pop('tenant', None)
if tenant is not None:
logger.debug('Settings tenant to %s' % tenant)
from django.db import connection
connection.set_tenant(tenant)
export_root = settings.EXPORTDB_EXPORT_ROOT % tenant.schema_name
else:
export_root = settings.EXPORTDB_EXPORT_ROOT
filename = u'export-{timestamp}.{ext}'.format(
timestamp=timezone.now().strftime('%Y-%m-%d_%H%M%S'),
ext=format
)
models = get_export_models()
resources = [get_resource_for_model(model, **kwargs) for model in models]
exporter = exporter_class(resources)
logger.info('Exporting resources: %s' % resources)
databook = exporter.export(task=current_task)
export_to = os.path.join(export_root, filename)
if not os.path.exists(export_root):
os.makedirs(export_root)
with open(export_to, 'wb') as outfile:
outfile.write(getattr(databook, format))
return filename
def plain_export(exporter_class, format='xlsx', **kwargs):
"""
Generates the export.
Support for django-tenant-schemas is built in.
"""
tenant = kwargs.pop('tenant', None)
if tenant is not None:
logger.debug('Settings tenant to %s' % tenant)
from django.db import connection
connection.set_tenant(tenant)
export_root = settings.EXPORTDB_EXPORT_ROOT % tenant.schema_name
else:
export_root = settings.EXPORTDB_EXPORT_ROOT
filename = u'export-{timestamp}.{ext}'.format(
timestamp=timezone.now().strftime('%Y-%m-%d_%H%M%S'),
ext=format
)
models = get_export_models()
resources = [get_resource_for_model(model, **kwargs) for model in models]
exporter = exporter_class(resources)
logger.info('Exporting resources: %s' % resources)
databook = exporter.export()
export_to = os.path.join(export_root, filename)
if not os.path.exists(export_root):
os.makedirs(export_root)
with open(export_to, 'wb') as outfile:
outfile.write(getattr(databook, format))
return posixpath.join(settings.EXPORTDB_EXPORT_ROOT % tenant.schema_name, filename)
|
135300
|
import tensorflow as tf
def attention(inputs):
# Trainable parameters
hidden_size = inputs.shape[2].value
u_omega = tf.get_variable("u_omega", [hidden_size], initializer=tf.keras.initializers.glorot_normal())
with tf.name_scope('v'):
v = tf.tanh(inputs)
# For each of the timestamps its vector of size A from `v` is reduced with `u` vector
vu = tf.tensordot(v, u_omega, axes=1, name='vu') # (B,T) shape
alphas = tf.nn.softmax(vu, name='alphas') # (B,T) shape
# Output of (Bi-)RNN is reduced with attention vector; the result has (B,D) shape
output = tf.reduce_sum(inputs * tf.expand_dims(alphas, -1), 1)
# Final output with tanh
output = tf.tanh(output)
return output, alphas
|
135327
|
import unittest
class TestMisc(unittest.TestCase):
def test_pypi_api(self):
from dl_coursera.lib.misc import get_latest_app_version
ver = get_latest_app_version()
self.assertRegex(ver, r'\d+\.\d+\.\d+')
|
135339
|
from __future__ import absolute_import
import argparse
import collections
import gc
import json
import os
from datetime import datetime
import numpy as np
from catalyst.dl import SupervisedRunner, OptimizerCallback, SchedulerCallback
from catalyst.utils import load_checkpoint, unpack_checkpoint
from pytorch_toolbelt.optimization.functional import get_lr_decay_parameters, get_optimizable_parameters
from pytorch_toolbelt.utils import fs, torch_utils
from pytorch_toolbelt.utils.catalyst import (
ShowPolarBatchesCallback,
report_checkpoint,
clean_checkpoint,
HyperParametersCallback,
)
from pytorch_toolbelt.utils.random import set_manual_seed
from pytorch_toolbelt.utils.torch_utils import count_parameters, transfer_weights
from torch import nn
from torch.utils.data import DataLoader
from alaska2 import *
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-acc", "--accumulation-steps", type=int, default=1, help="Number of batches to process")
parser.add_argument("--seed", type=int, default=42, help="Random seed")
parser.add_argument("--obliterate", type=float, default=0, help="Change of obliteration")
parser.add_argument("-nid", "--negative-image-dir", type=str, default=None, help="Change of obliteration")
parser.add_argument("-v", "--verbose", action="store_true")
parser.add_argument("--fast", action="store_true")
parser.add_argument("--cache", action="store_true")
parser.add_argument("-dd", "--data-dir", type=str, default=os.environ.get("KAGGLE_2020_ALASKA2"))
parser.add_argument("-m", "--model", type=str, default="resnet34", help="")
parser.add_argument("-b", "--batch-size", type=int, default=16, help="Batch Size during training, e.g. -b 64")
parser.add_argument(
"-wbs", "--warmup-batch-size", type=int, default=None, help="Batch Size during training, e.g. -b 64"
)
parser.add_argument("-e", "--epochs", type=int, default=100, help="Epoch to run")
parser.add_argument(
"-es", "--early-stopping", type=int, default=None, help="Maximum number of epochs without improvement"
)
parser.add_argument("-fe", "--freeze-encoder", action="store_true", help="Freeze encoder parameters for N epochs")
parser.add_argument("-lr", "--learning-rate", type=float, default=1e-3, help="Initial learning rate")
parser.add_argument(
"-l", "--modification-flag-loss", type=str, default=None, action="append", nargs="+" # [["ce", 1.0]],
)
parser.add_argument(
"--modification-type-loss", type=str, default=None, action="append", nargs="+" # [["ce", 1.0]],
)
parser.add_argument("--embedding-loss", type=str, default=None, action="append", nargs="+") # [["ce", 1.0]],
parser.add_argument("--feature-maps-loss", type=str, default=None, action="append", nargs="+") # [["ce", 1.0]],
parser.add_argument("--mask-loss", type=str, default=None, action="append", nargs="+") # [["ce", 1.0]],
parser.add_argument("--bits-loss", type=str, default=None, action="append", nargs="+") # [["ce", 1.0]],
parser.add_argument("-o", "--optimizer", default="RAdam", help="Name of the optimizer")
parser.add_argument(
"-c", "--checkpoint", type=str, default=None, help="Checkpoint filename to use as initial model weights"
)
parser.add_argument("-w", "--workers", default=8, type=int, help="Num workers")
parser.add_argument("-a", "--augmentations", default="safe", type=str, help="Level of image augmentations")
parser.add_argument("--transfer", default=None, type=str, help="")
parser.add_argument("--fp16", action="store_true")
parser.add_argument("--mixup", action="store_true")
parser.add_argument("--cutmix", action="store_true")
parser.add_argument("--tsa", action="store_true")
parser.add_argument("--fold", default=None, type=int)
parser.add_argument("-s", "--scheduler", default=None, type=str, help="")
parser.add_argument("-x", "--experiment", default=None, type=str, help="")
parser.add_argument("-d", "--dropout", default=None, type=float, help="Dropout before head layer")
parser.add_argument(
"--warmup", default=0, type=int, help="Number of warmup epochs with reduced LR on encoder parameters"
)
parser.add_argument(
"--fine-tune", default=0, type=int, help="Number of warmup epochs with reduced LR on encoder parameters"
)
parser.add_argument("-wd", "--weight-decay", default=0, type=float, help="L2 weight decay")
parser.add_argument("--show", action="store_true")
parser.add_argument("--balance", action="store_true")
parser.add_argument("--freeze-bn", action="store_true")
args = parser.parse_args()
set_manual_seed(args.seed)
assert (
args.modification_flag_loss or args.modification_type_loss or args.embedding_loss
), "At least one of losses must be set"
modification_flag_loss = args.modification_flag_loss
modification_type_loss = args.modification_type_loss
embedding_loss = args.embedding_loss
feature_maps_loss = args.feature_maps_loss
mask_loss = args.mask_loss
bits_loss = args.bits_loss
freeze_encoder = args.freeze_encoder
data_dir = args.data_dir
cache = args.cache
num_workers = args.workers
num_epochs = args.epochs
learning_rate = args.learning_rate
model_name: str = args.model
optimizer_name = args.optimizer
image_size = (512, 512)
fast = args.fast
augmentations = args.augmentations
fp16 = args.fp16
scheduler_name = args.scheduler
experiment = args.experiment
dropout = args.dropout
verbose = args.verbose
warmup = args.warmup
show = args.show
accumulation_steps = args.accumulation_steps
weight_decay = args.weight_decay
fold = args.fold
balance = args.balance
freeze_bn = args.freeze_bn
train_batch_size = args.batch_size
mixup = args.mixup
cutmix = args.cutmix
tsa = args.tsa
fine_tune = args.fine_tune
obliterate_p = args.obliterate
negative_image_dir = args.negative_image_dir
warmup_batch_size = args.warmup_batch_size or args.batch_size
# Compute batch size for validation
valid_batch_size = train_batch_size
run_train = num_epochs > 0
custom_model_kwargs = {}
if dropout is not None:
custom_model_kwargs["dropout"] = float(dropout)
if embedding_loss is not None:
custom_model_kwargs["need_embedding"] = True
model: nn.Module = get_model(model_name, **custom_model_kwargs).cuda()
required_features = model.required_features
if mask_loss is not None:
required_features.append(INPUT_TRUE_MODIFICATION_MASK)
if args.transfer:
transfer_checkpoint = fs.auto_file(args.transfer)
print("Transferring weights from model checkpoint", transfer_checkpoint)
checkpoint = load_checkpoint(transfer_checkpoint)
pretrained_dict = checkpoint["model_state_dict"]
transfer_weights(model, pretrained_dict)
if args.checkpoint:
checkpoint = load_checkpoint(fs.auto_file(args.checkpoint))
unpack_checkpoint(checkpoint, model=model)
print("Loaded model weights from:", args.checkpoint)
report_checkpoint(checkpoint)
if freeze_bn:
from pytorch_toolbelt.optimization.functional import freeze_model
freeze_model(model, freeze_bn=True)
print("Freezing bn params")
main_metric = "loss"
main_metric_minimize = True
current_time = datetime.now().strftime("%b%d_%H_%M")
checkpoint_prefix = f"{current_time}_{args.model}_fold{fold}"
if fp16:
checkpoint_prefix += "_fp16"
if fast:
checkpoint_prefix += "_fast"
if mixup:
checkpoint_prefix += "_mixup"
if cutmix:
checkpoint_prefix += "_cutmix"
if experiment is not None:
checkpoint_prefix = experiment
log_dir = os.path.join("runs", checkpoint_prefix)
os.makedirs(log_dir, exist_ok=False)
config_fname = os.path.join(log_dir, f"{checkpoint_prefix}.json")
with open(config_fname, "w") as f:
train_session_args = vars(args)
f.write(json.dumps(train_session_args, indent=2))
default_callbacks = []
if show:
default_callbacks += [ShowPolarBatchesCallback(draw_predictions, metric="loss", minimize=True)]
# Pretrain/warmup
if warmup:
train_ds, valid_ds, train_sampler = get_datasets(
data_dir=data_dir,
augmentation=augmentations,
balance=balance,
fast=fast,
fold=fold,
features=required_features,
obliterate_p=0,
)
criterions_dict, loss_callbacks = get_criterions(
modification_flag=modification_flag_loss,
modification_type=modification_type_loss,
embedding_loss=embedding_loss,
mask_loss=mask_loss,
bits_loss=bits_loss,
feature_maps_loss=feature_maps_loss,
num_epochs=warmup,
mixup=mixup,
cutmix=cutmix,
tsa=tsa,
)
callbacks = (
default_callbacks
+ loss_callbacks
+ [
OptimizerCallback(accumulation_steps=accumulation_steps, decouple_weight_decay=False),
HyperParametersCallback(
hparam_dict={
"model": model_name,
"scheduler": scheduler_name,
"optimizer": optimizer_name,
"augmentations": augmentations,
"size": image_size[0],
"weight_decay": weight_decay,
}
),
]
)
loaders = collections.OrderedDict()
loaders["train"] = DataLoader(
train_ds,
batch_size=warmup_batch_size,
num_workers=num_workers,
pin_memory=True,
drop_last=True,
shuffle=train_sampler is None,
sampler=train_sampler,
)
loaders["valid"] = DataLoader(valid_ds, batch_size=warmup_batch_size, num_workers=num_workers, pin_memory=True)
if freeze_encoder:
from pytorch_toolbelt.optimization.functional import freeze_model
freeze_model(model.encoder, freeze_parameters=True, freeze_bn=None)
optimizer = get_optimizer(
"Ranger", get_optimizable_parameters(model), weight_decay=weight_decay, learning_rate=3e-4
)
scheduler = None
print("Train session :", checkpoint_prefix)
print(" FP16 mode :", fp16)
print(" Fast mode :", args.fast)
print(" Epochs :", num_epochs)
print(" Workers :", num_workers)
print(" Data dir :", data_dir)
print(" Log dir :", log_dir)
print(" Cache :", cache)
print("Data ")
print(" Augmentations :", augmentations)
print(" Negative images:", negative_image_dir)
print(" Train size :", len(loaders["train"]), "batches", len(train_ds), "samples")
print(" Valid size :", len(loaders["valid"]), "batches", len(valid_ds), "samples")
print(" Image size :", image_size)
print(" Balance :", balance)
print(" Mixup :", mixup)
print(" CutMix :", cutmix)
print(" TSA :", tsa)
print("Model :", model_name)
print(" Parameters :", count_parameters(model))
print(" Dropout :", dropout, "(Non-default)" if dropout is not None else "")
print("Optimizer :", optimizer_name)
print(" Learning rate :", learning_rate)
print(" Weight decay :", weight_decay)
print(" Scheduler :", scheduler_name)
print(" Batch sizes :", train_batch_size, valid_batch_size)
print("Losses ")
print(" Flag :", modification_flag_loss)
print(" Type :", modification_type_loss)
print(" Embedding :", embedding_loss)
print(" Feature maps :", feature_maps_loss)
print(" Mask :", mask_loss)
print(" Bits :", bits_loss)
runner = SupervisedRunner(input_key=required_features, output_key=None)
runner.train(
fp16=fp16,
model=model,
criterion=criterions_dict,
optimizer=optimizer,
scheduler=scheduler,
callbacks=callbacks,
loaders=loaders,
logdir=os.path.join(log_dir, "warmup"),
num_epochs=warmup,
verbose=verbose,
main_metric=main_metric,
minimize_metric=main_metric_minimize,
checkpoint_data={"cmd_args": vars(args)},
)
del optimizer, loaders, runner, callbacks
best_checkpoint = os.path.join(log_dir, "warmup", "checkpoints", "best.pth")
model_checkpoint = os.path.join(log_dir, f"{checkpoint_prefix}_warmup.pth")
clean_checkpoint(best_checkpoint, model_checkpoint)
# Restore state of best model
# unpack_checkpoint(load_checkpoint(model_checkpoint), model=model)
torch.cuda.empty_cache()
gc.collect()
if run_train:
train_ds, valid_ds, train_sampler = get_datasets(
data_dir=data_dir,
augmentation=augmentations,
balance=balance,
fast=fast,
fold=fold,
features=required_features,
obliterate_p=obliterate_p,
)
if negative_image_dir:
negatives_ds = get_negatives_ds(
negative_image_dir, fold=fold, features=required_features, max_images=16536
)
train_ds = train_ds + negatives_ds
train_sampler = None # TODO: Add proper support of sampler
print("Adding", len(negatives_ds), "negative samples to training set")
criterions_dict, loss_callbacks = get_criterions(
modification_flag=modification_flag_loss,
modification_type=modification_type_loss,
embedding_loss=embedding_loss,
feature_maps_loss=feature_maps_loss,
mask_loss=mask_loss,
bits_loss=bits_loss,
num_epochs=num_epochs,
mixup=mixup,
cutmix=cutmix,
tsa=tsa,
)
callbacks = (
default_callbacks
+ loss_callbacks
+ [
OptimizerCallback(accumulation_steps=accumulation_steps, decouple_weight_decay=False),
HyperParametersCallback(
hparam_dict={
"model": model_name,
"scheduler": scheduler_name,
"optimizer": optimizer_name,
"augmentations": augmentations,
"size": image_size[0],
"weight_decay": weight_decay,
}
),
]
)
loaders = collections.OrderedDict()
loaders["train"] = DataLoader(
train_ds,
batch_size=train_batch_size,
num_workers=num_workers,
pin_memory=True,
drop_last=True,
shuffle=train_sampler is None,
sampler=train_sampler,
)
loaders["valid"] = DataLoader(valid_ds, batch_size=valid_batch_size, num_workers=num_workers, pin_memory=True)
print("Train session :", checkpoint_prefix)
print(" FP16 mode :", fp16)
print(" Fast mode :", args.fast)
print(" Epochs :", num_epochs)
print(" Workers :", num_workers)
print(" Data dir :", data_dir)
print(" Log dir :", log_dir)
print(" Cache :", cache)
print("Data ")
print(" Augmentations :", augmentations)
print(" Obliterate (%) :", obliterate_p)
print(" Negative images:", negative_image_dir)
print(" Train size :", len(loaders["train"]), "batches", len(train_ds), "samples")
print(" Valid size :", len(loaders["valid"]), "batches", len(valid_ds), "samples")
print(" Image size :", image_size)
print(" Balance :", balance)
print(" Mixup :", mixup)
print(" CutMix :", cutmix)
print(" TSA :", tsa)
print("Model :", model_name)
print(" Parameters :", count_parameters(model))
print(" Dropout :", dropout)
print("Optimizer :", optimizer_name)
print(" Learning rate :", learning_rate)
print(" Weight decay :", weight_decay)
print(" Scheduler :", scheduler_name)
print(" Batch sizes :", train_batch_size, valid_batch_size)
print("Losses ")
print(" Flag :", modification_flag_loss)
print(" Type :", modification_type_loss)
print(" Embedding :", embedding_loss)
print(" Feature maps :", feature_maps_loss)
print(" Mask :", mask_loss)
print(" Bits :", bits_loss)
optimizer = get_optimizer(
optimizer_name, get_optimizable_parameters(model), learning_rate=learning_rate, weight_decay=weight_decay
)
scheduler = get_scheduler(
scheduler_name, optimizer, lr=learning_rate, num_epochs=num_epochs, batches_in_epoch=len(loaders["train"])
)
if isinstance(scheduler, CyclicLR):
callbacks += [SchedulerCallback(mode="batch")]
# model training
runner = SupervisedRunner(input_key=required_features, output_key=None)
runner.train(
fp16=fp16,
model=model,
criterion=criterions_dict,
optimizer=optimizer,
scheduler=scheduler,
callbacks=callbacks,
loaders=loaders,
logdir=os.path.join(log_dir, "main"),
num_epochs=num_epochs,
verbose=verbose,
main_metric=main_metric,
minimize_metric=main_metric_minimize,
checkpoint_data={"cmd_args": vars(args)},
)
del optimizer, loaders, runner, callbacks
best_checkpoint = os.path.join(log_dir, "main", "checkpoints", "best.pth")
model_checkpoint = os.path.join(log_dir, f"{checkpoint_prefix}.pth")
# Restore state of best model
clean_checkpoint(best_checkpoint, model_checkpoint)
# unpack_checkpoint(load_checkpoint(model_checkpoint), model=model)
torch.cuda.empty_cache()
gc.collect()
if fine_tune:
train_ds, valid_ds, train_sampler = get_datasets(
data_dir=data_dir,
augmentation="light",
balance=balance,
fast=fast,
fold=fold,
features=required_features,
obliterate_p=obliterate_p,
)
criterions_dict, loss_callbacks = get_criterions(
modification_flag=modification_flag_loss,
modification_type=modification_type_loss,
embedding_loss=embedding_loss,
feature_maps_loss=feature_maps_loss,
mask_loss=mask_loss,
bits_loss=bits_loss,
num_epochs=fine_tune,
mixup=False,
cutmix=False,
tsa=False,
)
callbacks = (
default_callbacks
+ loss_callbacks
+ [
OptimizerCallback(accumulation_steps=accumulation_steps, decouple_weight_decay=False),
HyperParametersCallback(
hparam_dict={
"model": model_name,
"scheduler": scheduler_name,
"optimizer": optimizer_name,
"augmentations": augmentations,
"size": image_size[0],
"weight_decay": weight_decay,
}
),
]
)
loaders = collections.OrderedDict()
loaders["train"] = DataLoader(
train_ds,
batch_size=train_batch_size,
num_workers=num_workers,
pin_memory=True,
drop_last=True,
shuffle=train_sampler is None,
sampler=train_sampler,
)
loaders["valid"] = DataLoader(valid_ds, batch_size=valid_batch_size, num_workers=num_workers, pin_memory=True)
print("Train session :", checkpoint_prefix)
print(" FP16 mode :", fp16)
print(" Fast mode :", args.fast)
print(" Epochs :", num_epochs)
print(" Workers :", num_workers)
print(" Data dir :", data_dir)
print(" Log dir :", log_dir)
print(" Cache :", cache)
print("Data ")
print(" Augmentations :", augmentations)
print(" Obliterate (%) :", obliterate_p)
print(" Negative images:", negative_image_dir)
print(" Train size :", len(loaders["train"]), "batches", len(train_ds), "samples")
print(" Valid size :", len(loaders["valid"]), "batches", len(valid_ds), "samples")
print(" Image size :", image_size)
print(" Balance :", balance)
print(" Mixup :", mixup)
print(" CutMix :", cutmix)
print(" TSA :", tsa)
print("Model :", model_name)
print(" Parameters :", count_parameters(model))
print(" Dropout :", dropout)
print("Optimizer :", optimizer_name)
print(" Learning rate :", learning_rate)
print(" Weight decay :", weight_decay)
print(" Scheduler :", scheduler_name)
print(" Batch sizes :", train_batch_size, valid_batch_size)
print("Losses ")
print(" Flag :", modification_flag_loss)
print(" Type :", modification_type_loss)
print(" Embedding :", embedding_loss)
print(" Feature maps :", feature_maps_loss)
print(" Mask :", mask_loss)
print(" Bits :", bits_loss)
optimizer = get_optimizer(
"SGD", get_optimizable_parameters(model), learning_rate=learning_rate, weight_decay=weight_decay
)
scheduler = get_scheduler(
"cos", optimizer, lr=learning_rate, num_epochs=fine_tune, batches_in_epoch=len(loaders["train"])
)
if isinstance(scheduler, CyclicLR):
callbacks += [SchedulerCallback(mode="batch")]
# model training
runner = SupervisedRunner(input_key=required_features, output_key=None)
runner.train(
fp16=fp16,
model=model,
criterion=criterions_dict,
optimizer=optimizer,
scheduler=scheduler,
callbacks=callbacks,
loaders=loaders,
logdir=os.path.join(log_dir, "finetune"),
num_epochs=fine_tune,
verbose=verbose,
main_metric=main_metric,
minimize_metric=main_metric_minimize,
checkpoint_data={"cmd_args": vars(args)},
)
best_checkpoint = os.path.join(log_dir, "finetune", "checkpoints", "best.pth")
model_checkpoint = os.path.join(log_dir, f"{checkpoint_prefix}_finetune.pth")
clean_checkpoint(best_checkpoint, model_checkpoint)
unpack_checkpoint(load_checkpoint(model_checkpoint), model=model)
del optimizer, loaders, runner, callbacks
if __name__ == "__main__":
main()
|
135357
|
from .. import mq
class MQ(mq.MQ):
"""Redis Message Broker
"""
def __init__(self, backend, store):
super().__init__(backend, store)
self._client = store.client()
async def get_message(self, *queues):
'''Asynchronously retrieve a :class:`Task` from queues
:return: a :class:`.Task` or ``None``.
'''
assert queues
args = [self.prefixed(q) for q in queues]
args.append(max(1, int(self.cfg.task_pool_timeout)))
qt = await self._client.execute('brpop', *args)
if qt:
_, message = qt
return self.decode(message)
async def flush_queues(self, *queues):
'''Clear a list of task queues
'''
pipe = self._client.pipeline()
for queue in queues:
pipe.execute('del', self.prefixed(queue))
await pipe.commit()
async def queue_message(self, queue, message):
'''Asynchronously queue a task
'''
await self._client.lpush(self.prefixed(queue), message)
async def size(self, *queues):
pipe = self._client.pipeline()
for queue in queues:
pipe.execute('llen', self.prefixed(queue))
sizes = await pipe.commit()
return sizes
async def incr(self, name):
concurrent = await self._client.incr(self.prefixed(name))
return concurrent
async def decr(self, name):
concurrent = await self._client.decr(self.prefixed(name))
return concurrent
|
135376
|
import logging
import os
from logging.handlers import RotatingFileHandler
LOGGER_NAME = "connexion_example"
def create_log():
if not os.path.exists("./logs"):
os.makedirs("./logs")
logger = logging.getLogger(LOGGER_NAME)
logger.setLevel(logging.DEBUG)
handler_local = RotatingFileHandler(
f"./logs/{LOGGER_NAME}.log", mode="a", maxBytes=50000, backupCount=10
)
logger.addHandler(handler_local)
return logger
|
135378
|
import os, sys
import argparse
import numpy as np
import cv2
from skimage import filters
from linefiller.thinning import thinning
from linefiller.trappedball_fill import trapped_ball_fill_multi, flood_fill_multi, mark_fill, build_fill_map, merge_fill, \
show_fill_map, my_merge_fill
def dline_of(x, low_thr=1, high_thr=20, bf_args=[30,40,30]):
xm = cv2.medianBlur(x, 5)
# xga = cv2.GaussianBlur(x,(5, 5),cv2.BORDER_DEFAULT)
xb = cv2.bilateralFilter(x, bf_args[0], bf_args[1], bf_args[2])
# xb = cv2.bilateralFilter(xb, 20, 60, 10 )
xg = cv2.cvtColor(xb, cv2.COLOR_RGB2GRAY)
xl = cv2.Laplacian(xb, ddepth = cv2.CV_32F, ksize=5)
xgg = xl
xgg = xgg.astype(np.float32) * (255. / (xgg.astype(np.float32).max() * 1.0))
xh = filters.apply_hysteresis_threshold(xgg, low_thr, high_thr)
xgg[xh == False] = 0
# xgg[xh == True] = 255
xgg1 = xgg.copy() * 20
xgg1 = np.max(xgg1, axis=2)
return np.clip(255 - xgg1, 0, 255)
def squeeze_label_map(label_map):
ret_label_map = label_map.copy()
labels, counts = np.unique(ret_label_map, return_counts=True)
label_orders = np.argsort(counts)
for ord_id, ord_val in enumerate(label_orders):
mask = (label_map == labels[ord_val])
ret_label_map[mask] = ord_id
return ret_label_map
def trapped_ball_processed(binary, in_image=None, do_merge=True):
fills = []
result = binary
fill = trapped_ball_fill_multi(result, 3, method='max')
fills += fill
result = mark_fill(result, fill)
print('result num 3: ', len(fills))
fill = trapped_ball_fill_multi(result, 2, method=None)
fills += fill
result = mark_fill(result, fill)
print('result num 2: ', len(fills))
fill = trapped_ball_fill_multi(result, 1, method=None)
fills += fill
result = mark_fill(result, fill)
print('result num 1: ', len(fills))
fill = flood_fill_multi(result)
fills += fill
print('flood_fill_multi num 1: ', len(fills))
fillmap = build_fill_map(result, fills)
# print('fillmap num: ', len(np.unique(fillmap)))
if do_merge:
if in_image is None:
fillmap = merge_fill(fillmap, max_iter=10)
else:
fillmap = my_merge_fill(in_image, fillmap)
fillmap = thinning(fillmap)
return fillmap
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('input_root')
parser.add_argument('output_root')
parser.add_argument('--start_idx', default=0,
help='')
parser.add_argument('--end_idx', default=None,
help='')
parser.add_argument('--height', default=960,
help='height of the generated flow, default: 960')
parser.add_argument('--width', default=540,
help='width of the generated flow, default: 540')
parser.add_argument('--use_gpu', action='store_true')
args = parser.parse_args()
######
folder_root = args.input_root
save_root = args.output_root
use_gpu = args.use_gpu
start_idx = int(args.start_idx)
end_idx = None if args.end_idx is None else int(args.end_idx)
# tar_size = (1280, 720)
tar_size = (args.height, args.width)
# tar_size = (640, 360)
######
print('use gpu: ', use_gpu)
sys.stdout.flush()
if not os.path.exists(save_root):
os.makedirs(save_root)
folderList = sorted(os.listdir(folder_root))
print('folderList length: ', len(folderList))
for f_idx, folder in enumerate(folderList[start_idx:end_idx]):
f_idx += start_idx
input_subfolder = os.path.join(folder_root, folder)
imgFileNames = sorted(os.listdir(input_subfolder))
print('-- [%d] %s'%(f_idx, folder))
print(imgFileNames)
saveFolder = os.path.join(save_root, folder)
labelMap1_savePath = os.path.join(saveFolder, 'labelmap_1.npy')
labelMap2_savePath = os.path.join(saveFolder, 'labelmap_3.npy')
# if os.path.exists(labelMap1_savePath) and os.path.exists(labelMap2_savePath):
# try:
# binMap1 = np.load(labelMap1_savePath)
# binMap3 = np.load(labelMap2_savePath)
# except IOError:
# print("labelmap file corrupted")
# else:
# print("already generated")
# continue
sys.stdout.flush()
img1 = cv2.imread(os.path.join(input_subfolder, imgFileNames[0]))
img3 = cv2.imread(os.path.join(input_subfolder, imgFileNames[-1]))
# segmentation
img1_rs = cv2.resize(img1, tar_size)
img3_rs = cv2.resize(img3, tar_size)
if 'Disney' in folder:
boundImg1 = dline_of(img1_rs, 1, 20, [30,40,30]).astype(np.uint8)
boundImg3 = dline_of(img3_rs, 1, 20, [30,40,30]).astype(np.uint8)
else:
boundImg1 = dline_of(img1_rs, 2, 20, [10,10,10]).astype(np.uint8)
boundImg3 = dline_of(img3_rs, 2, 20, [10,10,10]).astype(np.uint8)
ret, binMap1 = cv2.threshold(boundImg1, 220, 255, cv2.THRESH_BINARY)
ret, binMap3 = cv2.threshold(boundImg3, 220, 255, cv2.THRESH_BINARY)
print('- trapped_ball_processed()')
sys.stdout.flush()
fillMap1 = trapped_ball_processed(binMap1, img1_rs)
fillMap3 = trapped_ball_processed(binMap3, img3_rs)
labelMap1 = squeeze_label_map(fillMap1)
labelMap3 = squeeze_label_map(fillMap3)
# save flows
if not os.path.exists(saveFolder):
os.mkdir(saveFolder)
np.save(labelMap1_savePath, labelMap1)
np.save(labelMap2_savePath, labelMap3)
print('save to %s, %s'%(labelMap1_savePath, labelMap2_savePath))
sys.stdout.flush()
labelMap1_img = show_fill_map(labelMap1)
labelMap3_img = show_fill_map(labelMap3)
cv2.imwrite(os.path.join(saveFolder, 'labelmap_1.jpg'), labelMap1_img)
cv2.imwrite(os.path.join(saveFolder, 'labelmap_3.jpg'), labelMap3_img)
|
135420
|
import base64
import json
import logging
from typing import (
Union,
)
import uuid
import attr
from botocore.exceptions import (
ClientError,
)
from azul.service import (
AbstractService,
)
from azul.service.step_function_helper import (
StateMachineError,
StepFunctionHelper,
)
from azul.types import (
JSON,
)
logger = logging.getLogger(__name__)
class InvalidTokenError(Exception):
def __init__(self) -> None:
super().__init__('Invalid token given')
@attr.s(frozen=True, auto_attribs=True, kw_only=True)
class Token:
"""
Represents an ongoing manifest generation
"""
execution_id: str
request_index: int
wait_time: int
def encode(self) -> str:
token = attr.asdict(self)
return base64.urlsafe_b64encode(json.dumps(token).encode()).decode()
@classmethod
def decode(cls, token: str) -> 'Token':
try:
return cls(**json.loads(base64.urlsafe_b64decode(token).decode()))
except Exception as e:
raise InvalidTokenError from e
def advance(self, wait_time: int) -> 'Token':
return attr.evolve(self,
wait_time=wait_time,
request_index=self.request_index + 1)
class AsyncManifestService(AbstractService):
"""
Starting and checking the status of manifest generation jobs.
"""
step_function_helper = StepFunctionHelper()
def __init__(self, state_machine_name):
self.state_machine_name = state_machine_name
def start_generation(self, input: JSON) -> Token:
execution_id = str(uuid.uuid4())
self.step_function_helper.start_execution(self.state_machine_name,
execution_id,
execution_input=input)
return Token(execution_id=execution_id,
request_index=0,
wait_time=self._get_next_wait_time(0))
def inspect_generation(self, token) -> Union[Token, JSON]:
try:
execution = self.step_function_helper.describe_execution(state_machine_name=self.state_machine_name,
execution_name=token.execution_id)
except ClientError as e:
if e.response['Error']['Code'] == 'ExecutionDoesNotExist':
raise InvalidTokenError from e
else:
raise
output = execution.get('output', None)
status = execution['status']
if status == 'SUCCEEDED':
# Because describe_execution is eventually consistent, output may
# not yet be present
if output is None:
return token.advance(wait_time=1)
else:
return json.loads(output)
elif status == 'RUNNING':
return token.advance(wait_time=self._get_next_wait_time(token.request_index))
else:
raise StateMachineError(status, output)
def _get_next_wait_time(self, request_index: int) -> int:
wait_times = [1, 1, 4, 6, 10]
try:
return wait_times[request_index]
except IndexError:
return wait_times[-1]
|
135443
|
from ctypes import *
import unittest
import os
import ctypes
import _ctypes_test
class BITS(Structure):
_fields_ = [("A", c_int, 1),
("B", c_int, 2),
("C", c_int, 3),
("D", c_int, 4),
("E", c_int, 5),
("F", c_int, 6),
("G", c_int, 7),
("H", c_int, 8),
("I", c_int, 9),
("M", c_short, 1),
("N", c_short, 2),
("O", c_short, 3),
("P", c_short, 4),
("Q", c_short, 5),
("R", c_short, 6),
("S", c_short, 7)]
func = CDLL(_ctypes_test.__file__).unpack_bitfields
func.argtypes = POINTER(BITS), c_char
##for n in "ABCDEFGHIMNOPQRS":
## print n, hex(getattr(BITS, n).size), getattr(BITS, n).offset
class C_Test(unittest.TestCase):
def test_ints(self):
for i in range(512):
for name in "ABCDEFGHI":
b = BITS()
setattr(b, name, i)
self.failUnlessEqual((name, i, getattr(b, name)), (name, i, func(byref(b), name)))
def test_shorts(self):
for i in range(256):
for name in "MNOPQRS":
b = BITS()
setattr(b, name, i)
self.failUnlessEqual((name, i, getattr(b, name)), (name, i, func(byref(b), name)))
signed_int_types = (c_byte, c_short, c_int, c_long, c_longlong)
unsigned_int_types = (c_ubyte, c_ushort, c_uint, c_ulong, c_ulonglong)
int_types = unsigned_int_types + signed_int_types
class BitFieldTest(unittest.TestCase):
def test_longlong(self):
class X(Structure):
_fields_ = [("a", c_longlong, 1),
("b", c_longlong, 62),
("c", c_longlong, 1)]
self.failUnlessEqual(sizeof(X), sizeof(c_longlong))
x = X()
x.a, x.b, x.c = -1, 7, -1
self.failUnlessEqual((x.a, x.b, x.c), (-1, 7, -1))
def test_ulonglong(self):
class X(Structure):
_fields_ = [("a", c_ulonglong, 1),
("b", c_ulonglong, 62),
("c", c_ulonglong, 1)]
self.failUnlessEqual(sizeof(X), sizeof(c_longlong))
x = X()
self.failUnlessEqual((x.a, x.b, x.c), (0, 0, 0))
x.a, x.b, x.c = 7, 7, 7
self.failUnlessEqual((x.a, x.b, x.c), (1, 7, 1))
def test_signed(self):
for c_typ in signed_int_types:
class X(Structure):
_fields_ = [("dummy", c_typ),
("a", c_typ, 3),
("b", c_typ, 3),
("c", c_typ, 1)]
self.failUnlessEqual(sizeof(X), sizeof(c_typ)*2)
x = X()
self.failUnlessEqual((c_typ, x.a, x.b, x.c), (c_typ, 0, 0, 0))
x.a = -1
self.failUnlessEqual((c_typ, x.a, x.b, x.c), (c_typ, -1, 0, 0))
x.a, x.b = 0, -1
self.failUnlessEqual((c_typ, x.a, x.b, x.c), (c_typ, 0, -1, 0))
def test_unsigned(self):
for c_typ in unsigned_int_types:
class X(Structure):
_fields_ = [("a", c_typ, 3),
("b", c_typ, 3),
("c", c_typ, 1)]
self.failUnlessEqual(sizeof(X), sizeof(c_typ))
x = X()
self.failUnlessEqual((c_typ, x.a, x.b, x.c), (c_typ, 0, 0, 0))
x.a = -1
self.failUnlessEqual((c_typ, x.a, x.b, x.c), (c_typ, 7, 0, 0))
x.a, x.b = 0, -1
self.failUnlessEqual((c_typ, x.a, x.b, x.c), (c_typ, 0, 7, 0))
def fail_fields(self, *fields):
return self.get_except(type(Structure), "X", (),
{"_fields_": fields})
def test_nonint_types(self):
# bit fields are not allowed on non-integer types.
result = self.fail_fields(("a", c_char_p, 1))
self.failUnlessEqual(result, (TypeError, 'bit fields not allowed for type c_char_p'))
result = self.fail_fields(("a", c_void_p, 1))
self.failUnlessEqual(result, (TypeError, 'bit fields not allowed for type c_void_p'))
if c_int != c_long:
result = self.fail_fields(("a", POINTER(c_int), 1))
self.failUnlessEqual(result, (TypeError, 'bit fields not allowed for type LP_c_int'))
result = self.fail_fields(("a", c_char, 1))
self.failUnlessEqual(result, (TypeError, 'bit fields not allowed for type c_char'))
try:
c_wchar
except NameError:
pass
else:
result = self.fail_fields(("a", c_wchar, 1))
self.failUnlessEqual(result, (TypeError, 'bit fields not allowed for type c_wchar'))
class Dummy(Structure):
_fields_ = []
result = self.fail_fields(("a", Dummy, 1))
self.failUnlessEqual(result, (TypeError, 'bit fields not allowed for type Dummy'))
def test_single_bitfield_size(self):
for c_typ in int_types:
result = self.fail_fields(("a", c_typ, -1))
self.failUnlessEqual(result, (ValueError, 'number of bits invalid for bit field'))
result = self.fail_fields(("a", c_typ, 0))
self.failUnlessEqual(result, (ValueError, 'number of bits invalid for bit field'))
class X(Structure):
_fields_ = [("a", c_typ, 1)]
self.failUnlessEqual(sizeof(X), sizeof(c_typ))
class X(Structure):
_fields_ = [("a", c_typ, sizeof(c_typ)*8)]
self.failUnlessEqual(sizeof(X), sizeof(c_typ))
result = self.fail_fields(("a", c_typ, sizeof(c_typ)*8 + 1))
self.failUnlessEqual(result, (ValueError, 'number of bits invalid for bit field'))
def test_multi_bitfields_size(self):
class X(Structure):
_fields_ = [("a", c_short, 1),
("b", c_short, 14),
("c", c_short, 1)]
self.failUnlessEqual(sizeof(X), sizeof(c_short))
class X(Structure):
_fields_ = [("a", c_short, 1),
("a1", c_short),
("b", c_short, 14),
("c", c_short, 1)]
self.failUnlessEqual(sizeof(X), sizeof(c_short)*3)
self.failUnlessEqual(X.a.offset, 0)
self.failUnlessEqual(X.a1.offset, sizeof(c_short))
self.failUnlessEqual(X.b.offset, sizeof(c_short)*2)
self.failUnlessEqual(X.c.offset, sizeof(c_short)*2)
class X(Structure):
_fields_ = [("a", c_short, 3),
("b", c_short, 14),
("c", c_short, 14)]
self.failUnlessEqual(sizeof(X), sizeof(c_short)*3)
self.failUnlessEqual(X.a.offset, sizeof(c_short)*0)
self.failUnlessEqual(X.b.offset, sizeof(c_short)*1)
self.failUnlessEqual(X.c.offset, sizeof(c_short)*2)
def get_except(self, func, *args, **kw):
try:
func(*args, **kw)
except Exception, detail:
return detail.__class__, str(detail)
def test_mixed_1(self):
class X(Structure):
_fields_ = [("a", c_byte, 4),
("b", c_int, 4)]
if os.name in ("nt", "ce"):
self.failUnlessEqual(sizeof(X), sizeof(c_int)*2)
else:
self.failUnlessEqual(sizeof(X), sizeof(c_int))
def test_mixed_2(self):
class X(Structure):
_fields_ = [("a", c_byte, 4),
("b", c_int, 32)]
self.failUnlessEqual(sizeof(X), sizeof(c_int)*2)
def test_mixed_3(self):
class X(Structure):
_fields_ = [("a", c_byte, 4),
("b", c_ubyte, 4)]
self.failUnlessEqual(sizeof(X), sizeof(c_byte))
def test_mixed_4(self):
class X(Structure):
_fields_ = [("a", c_short, 4),
("b", c_short, 4),
("c", c_int, 24),
("d", c_short, 4),
("e", c_short, 4),
("f", c_int, 24)]
# MSVC does NOT combine c_short and c_int into one field, GCC
# does (unless GCC is run with '-mms-bitfields' which
# produces code compatible with MSVC).
if os.name in ("nt", "ce"):
self.failUnlessEqual(sizeof(X), sizeof(c_int) * 4)
else:
self.failUnlessEqual(sizeof(X), sizeof(c_int) * 2)
def test_anon_bitfields(self):
# anonymous bit-fields gave a strange error message
class X(Structure):
_fields_ = [("a", c_byte, 4),
("b", c_ubyte, 4)]
class Y(Structure):
_anonymous_ = ["_"]
_fields_ = [("_", X)]
if __name__ == "__main__":
unittest.main()
|
135455
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
import versioneer
setuptools.setup(
name="removestar",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
author="<NAME>",
author_email="<EMAIL>",
description="A tool to automatically replace 'import *' imports with explicit imports in files",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://www.asmeurer.com/removestar/",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
entry_points={'console_scripts': [ 'removestar = removestar.__main__:main']},
python_requires= '>=3.6',
install_requires=[
'pyflakes'
],
license='MIT',
)
|
135471
|
def assert_raises(excClass, callableObj, *args, **kwargs):
"""
Like unittest.TestCase.assertRaises, but returns the exception.
"""
try:
callableObj(*args, **kwargs)
except excClass as e:
return e
else:
if hasattr(excClass,'__name__'): excName = excClass.__name__
else: excName = str(excClass)
raise AssertionError("%s not raised" % excName)
|
135472
|
import numpy as np
import pandas.compat as compat
import pandas as pd
class TablePlotter(object):
"""
Layout some DataFrames in vertical/horizontal layout for explanation.
Used in merging.rst
"""
def __init__(self, cell_width=0.37, cell_height=0.25, font_size=7.5):
self.cell_width = cell_width
self.cell_height = cell_height
self.font_size = font_size
def _shape(self, df):
"""
Calculate table chape considering index levels.
"""
row, col = df.shape
return row + df.columns.nlevels, col + df.index.nlevels
def _get_cells(self, left, right, vertical):
"""
Calculate appropriate figure size based on left and right data.
"""
if vertical:
# calculate required number of cells
vcells = max(sum(self._shape(l)[0] for l in left),
self._shape(right)[0])
hcells = (max(self._shape(l)[1] for l in left) +
self._shape(right)[1])
else:
vcells = max([self._shape(l)[0] for l in left] +
[self._shape(right)[0]])
hcells = sum([self._shape(l)[1] for l in left] +
[self._shape(right)[1]])
return hcells, vcells
def plot(self, left, right, labels=None, vertical=True):
"""
Plot left / right DataFrames in specified layout.
Parameters
----------
left : list of DataFrames before operation is applied
right : DataFrame of operation result
labels : list of str to be drawn as titles of left DataFrames
vertical : bool
If True, use vertical layout. If False, use horizontal layout.
"""
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
if not isinstance(left, list):
left = [left]
left = [self._conv(l) for l in left]
right = self._conv(right)
hcells, vcells = self._get_cells(left, right, vertical)
if vertical:
figsize = self.cell_width * hcells, self.cell_height * vcells
else:
# include margin for titles
figsize = self.cell_width * hcells, self.cell_height * vcells
fig = plt.figure(figsize=figsize)
if vertical:
gs = gridspec.GridSpec(len(left), hcells)
# left
max_left_cols = max(self._shape(l)[1] for l in left)
max_left_rows = max(self._shape(l)[0] for l in left)
for i, (l, label) in enumerate(zip(left, labels)):
ax = fig.add_subplot(gs[i, 0:max_left_cols])
self._make_table(ax, l, title=label,
height=1.0 / max_left_rows)
# right
ax = plt.subplot(gs[:, max_left_cols:])
self._make_table(ax, right, title='Result', height=1.05 / vcells)
fig.subplots_adjust(top=0.9, bottom=0.05, left=0.05, right=0.95)
else:
max_rows = max(self._shape(df)[0] for df in left + [right])
height = 1.0 / np.max(max_rows)
gs = gridspec.GridSpec(1, hcells)
# left
i = 0
for l, label in zip(left, labels):
sp = self._shape(l)
ax = fig.add_subplot(gs[0, i:i + sp[1]])
self._make_table(ax, l, title=label, height=height)
i += sp[1]
# right
ax = plt.subplot(gs[0, i:])
self._make_table(ax, right, title='Result', height=height)
fig.subplots_adjust(top=0.85, bottom=0.05, left=0.05, right=0.95)
return fig
def _conv(self, data):
"""Convert each input to appropriate for table outplot"""
if isinstance(data, pd.Series):
if data.name is None:
data = data.to_frame(name='')
else:
data = data.to_frame()
data = data.fillna('NaN')
return data
def _insert_index(self, data):
# insert is destructive
data = data.copy()
idx_nlevels = data.index.nlevels
if idx_nlevels == 1:
data.insert(0, 'Index', data.index)
else:
for i in range(idx_nlevels):
data.insert(i, 'Index{0}'.format(i),
data.index._get_level_values(i))
col_nlevels = data.columns.nlevels
if col_nlevels > 1:
col = data.columns._get_level_values(0)
values = [data.columns._get_level_values(i).values
for i in range(1, col_nlevels)]
col_df = pd.DataFrame(values)
data.columns = col_df.columns
data = pd.concat([col_df, data])
data.columns = col
return data
def _make_table(self, ax, df, title, height=None):
if df is None:
ax.set_visible(False)
return
import pandas.plotting as plotting
idx_nlevels = df.index.nlevels
col_nlevels = df.columns.nlevels
# must be convert here to get index levels for colorization
df = self._insert_index(df)
tb = plotting.table(ax, df, loc=9)
tb.set_fontsize(self.font_size)
if height is None:
height = 1.0 / (len(df) + 1)
props = tb.properties()
for (r, c), cell in compat.iteritems(props['celld']):
if c == -1:
cell.set_visible(False)
elif r < col_nlevels and c < idx_nlevels:
cell.set_visible(False)
elif r < col_nlevels or c < idx_nlevels:
cell.set_facecolor('#AAAAAA')
cell.set_height(height)
ax.set_title(title, size=self.font_size)
ax.axis('off')
class _WritableDoc(type):
# Remove this when Python2 support is dropped
# __doc__ is not mutable for new-style classes in Python2, which means
# we can't use @Appender to share class docstrings. This can be used
# with `add_metaclass` to make cls.__doc__ mutable.
pass
if __name__ == "__main__":
import matplotlib.pyplot as plt
p = TablePlotter()
df1 = pd.DataFrame({'A': [10, 11, 12],
'B': [20, 21, 22],
'C': [30, 31, 32]})
df2 = pd.DataFrame({'A': [10, 12],
'C': [30, 32]})
p.plot([df1, df2], pd.concat([df1, df2]),
labels=['df1', 'df2'], vertical=True)
plt.show()
df3 = pd.DataFrame({'X': [10, 12],
'Z': [30, 32]})
p.plot([df1, df3], pd.concat([df1, df3], axis=1),
labels=['df1', 'df2'], vertical=False)
plt.show()
idx = pd.MultiIndex.from_tuples([(1, 'A'), (1, 'B'), (1, 'C'),
(2, 'A'), (2, 'B'), (2, 'C')])
col = pd.MultiIndex.from_tuples([(1, 'A'), (1, 'B')])
df3 = pd.DataFrame({'v1': [1, 2, 3, 4, 5, 6],
'v2': [5, 6, 7, 8, 9, 10]},
index=idx)
df3.columns = col
p.plot(df3, df3, labels=['df3'])
plt.show()
|
135492
|
import io
from django.contrib import messages
from django.template.defaultfilters import linebreaksbr
from django.utils.translation import ugettext as _
import ghdiff
from CommcareTranslationChecker import validate_workbook
from CommcareTranslationChecker.exceptions import FatalError
from corehq.apps.app_manager.exceptions import (
FormNotFoundException,
ModuleNotFoundException,
)
from corehq.apps.hqwebapp.tasks import send_html_email_async
from corehq.apps.translations.app_translations.upload_form import (
BulkAppTranslationFormUpdater,
)
from corehq.apps.translations.app_translations.upload_module import (
BulkAppTranslationModuleUpdater,
)
from corehq.apps.translations.app_translations.utils import (
BulkAppTranslationUpdater,
get_bulk_app_sheet_headers,
get_menu_or_form_by_sheet_name,
get_menu_or_form_by_unique_id,
get_unicode_dicts,
is_form_sheet,
is_module_sheet,
is_modules_and_forms_sheet,
is_single_sheet,
is_single_sheet_workbook,
)
from corehq.apps.translations.const import (
MODULES_AND_FORMS_SHEET_NAME,
SINGLE_SHEET_NAME,
)
from corehq.apps.translations.exceptions import BulkAppTranslationsException
from corehq.util.files import read_workbook_content_as_file
from corehq.util.workbook_json.excel import (
WorkbookJSONError,
get_single_worksheet,
)
def validate_bulk_app_translation_upload(app, workbook, email, lang_to_compare, file_obj):
from corehq.apps.translations.validator import UploadedTranslationsValidator
msgs = UploadedTranslationsValidator(app, workbook, lang_to_compare).compare()
checker_messages, result_wb = run_translation_checker(file_obj)
if msgs or checker_messages:
_email_app_translations_discrepancies(msgs, checker_messages, email, app.name, result_wb)
return [(messages.error, _("Issues found. You should receive an email shortly."))]
else:
return [(messages.success, _("No issues found."))]
def run_translation_checker(file_obj):
translation_checker_messages = []
result_wb = None
try:
result_wb, translation_checker_messages = validate_workbook(file_obj)
except FatalError as e:
translation_checker_messages.append(
_("Workbook check failed to finish due to the following error : %s" % e))
return translation_checker_messages, result_wb
def _email_app_translations_discrepancies(msgs, checker_messages, email, app_name, result_wb):
"""
:param msgs: messages for app translation discrepancies
:param checker_messages: messages for issues found by translation checker
:param email: email to
:param app_name: name of the application
:param result_wb: result wb of translation checker to attach with the email
"""
def form_email_content(msgs, checker_messages):
if msgs:
html_file_content = ghdiff.default_css
for sheet_name, msg in msgs.items():
html_file_content += "<strong>{}</strong>".format(sheet_name) + msg
text_content = _("Hi, PFA file for discrepancies found for app translations.") + "\n"
else:
html_file_content = None
text_content = _("Hi, No discrepancies found for app translations.") + "\n"
if checker_messages:
text_content += _("Issues found with the workbook are as follows :") + "\n"
text_content += '\n'.join([_(msg) for msg in checker_messages])
else:
text_content += _("No issues found with the workbook.")
return html_file_content, text_content
def attachment(title, content, mimetype='text/html'):
return {'title': title, 'file_obj': content, 'mimetype': mimetype}
subject = _("App Translations Discrepancies for {}").format(app_name)
html_file_content, text_content = form_email_content(msgs, checker_messages)
attachments = []
if html_file_content:
attachments.append(attachment("{} Discrepancies.html".format(app_name), io.StringIO(html_file_content)))
if result_wb:
attachments.append(attachment("{} TranslationChecker.xlsx".format(app_name),
io.BytesIO(read_workbook_content_as_file(result_wb)), result_wb.mime_type))
send_html_email_async.delay(subject, email, linebreaksbr(text_content), file_attachments=attachments)
def process_bulk_app_translation_upload(app, workbook, sheet_name_to_unique_id, lang=None):
"""
Process the bulk upload file for the given app.
We return these message tuples instead of calling them now to allow this
function to be used independently of request objects.
:return: Returns a list of message tuples. The first item in each tuple is
a function like django.contrib.messages.error, and the second is a string.
"""
def get_expected_headers(sheet_name):
# This function does its best to return the headers we expect, based
# on the current app, for an uploaded sheet. If the sheet is old, it
# might not include the unique IDs of the modules/forms. In that case
# `sheet_name_to_unique_id` will be empty and we fall back to using the
# name of the sheet and hope that modules/forms have not been moved
# since the sheet was originally downloaded.
#
# If a user created a new sheet, or renamed a sheet, or a form/module
# has been deleted since this sheet was downloaded, then expected
# headers will not be found. We return an empty list, and
# `_check_for_sheet_error()` will handle it.
if sheet_name in sheet_name_to_unique_id:
unique_id = sheet_name_to_unique_id[sheet_name]
if unique_id in expected_headers_by_id:
return expected_headers_by_id[unique_id]
return expected_headers_by_sheet_name.get(sheet_name, [])
msgs = []
single_sheet = is_single_sheet_workbook(workbook)
expected_headers_by_sheet_name = {k: v for k, v in get_bulk_app_sheet_headers(app, single_sheet=single_sheet,
lang=lang)}
expected_headers_by_id = {k: v for k, v in get_bulk_app_sheet_headers(app, single_sheet=single_sheet,
lang=lang, by_id=True)}
processed_sheets = set()
for sheet in workbook.worksheets:
expected_headers = get_expected_headers(sheet.worksheet.title)
try:
_check_for_sheet_error(sheet, expected_headers, processed_sheets)
except BulkAppTranslationsException as e:
msgs.append((messages.error, str(e)))
continue
processed_sheets.add(sheet.worksheet.title)
warnings = _check_for_sheet_warnings(sheet, expected_headers)
for warning in warnings:
msgs.append((messages.warning, warning))
if is_single_sheet(sheet.worksheet.title):
msgs.extend(_process_single_sheet(app, sheet, names_map=sheet_name_to_unique_id, lang=lang))
else:
msgs.extend(_process_rows(app, sheet.worksheet.title, sheet, names_map=sheet_name_to_unique_id))
msgs.append(
(messages.success, _("App Translations Updated!"))
)
return msgs
def get_sheet_name_to_unique_id_map(file_or_filename, lang):
"""
Returns a map of sheet names to unique IDs, so that when modules or
forms have been moved we can use their ID and not their (changed) name.
This function is called before we process the upload so that we can use
the sheet-name-to-unique-ID map to check the sheets before they are
processed.
`file_or_filename` is a file not a workbook because we read uploaded
Excel files using WorkbookJSONReader, and it can only iterate sheet
rows once. This function opens its own Reader to parse the first sheet.
"""
def get_sheet_name():
return MODULES_AND_FORMS_SHEET_NAME if is_multisheet() else SINGLE_SHEET_NAME
def is_multisheet():
return not lang
def is_modules_and_forms_row(row):
"""
Returns the rows about modules and forms in single-sheet uploads.
They are the rows that include the unique IDs.
"""
return not row['case_property'] and not row['list_or_detail'] and not row['label']
sheet_name_to_unique_id = {}
try:
worksheet = get_single_worksheet(file_or_filename, title=get_sheet_name())
except WorkbookJSONError:
# There is something wrong with the file. The problem will happen
# again when we try to process the upload. To preserve current
# behaviour, just return silently.
return sheet_name_to_unique_id
if is_multisheet():
rows = worksheet
else:
rows = (row for row in worksheet if is_modules_and_forms_row(row))
for row in get_unicode_dicts(rows):
sheet_name = row.get('menu_or_form', '')
unique_id = row.get('unique_id')
if unique_id and sheet_name not in sheet_name_to_unique_id:
sheet_name_to_unique_id[sheet_name] = unique_id
return sheet_name_to_unique_id
def _process_single_sheet(app, sheet, names_map, lang=None):
"""
A single-sheet translation file deals with only one language, and
fits all the items to be translated onto the same sheet. All items
share the same columns. If the column is not applicable to the row,
it is left empty.
:param app: The application being translated
:param sheet: The worksheet containing the translations
:param names_map: A map of sheet_name (like "menu1" or "menu1_form1") to
module/form unique_id, used to fetch a module/form
even if it has been moved since the worksheet was created
:param lang: The language that the app is being translated into
:return: A list of error messages or an empty list
"""
msgs = []
module_or_form = None
modules_and_forms_rows = []
rows = []
for row in sheet:
if not row['case_property'] and not row['list_or_detail'] and not row['label']:
modules_and_forms_rows.append(row)
elif module_or_form != row['menu_or_form']:
msgs.extend(_process_rows(app, module_or_form, rows, names_map, lang=lang))
module_or_form = row['menu_or_form']
rows = [row]
else:
rows.append(row)
msgs.extend(_process_rows(app, module_or_form, rows, names_map, lang=lang))
msgs.extend(_process_rows(app, MODULES_AND_FORMS_SHEET_NAME,
modules_and_forms_rows, names_map, lang=lang))
return msgs
def _process_rows(app, sheet_name, rows, names_map, lang=None):
"""
Processes the rows of a worksheet of translations.
This is the complement of get_bulk_app_sheets_by_name() and
get_bulk_app_single_sheet_by_name(), from
corehq/apps/translations/app_translations/download.py, which creates
these worksheets and rows.
:param app: The application being translated
:param sheet_name: The tab name of the sheet being processed.
e.g. "menu1", "menu1_form1", or "Menus_and_forms"
:param rows: The rows in the worksheet
:param names_map: A map of sheet_name to module/form unique_id, used
to fetch a module/form even if it has been moved
since the worksheet was created
:param lang: The language that the app is being translated into
:return: A list of error messages or an empty list
"""
if not sheet_name or not rows:
return []
if is_modules_and_forms_sheet(sheet_name):
updater = BulkAppTranslationModulesAndFormsUpdater(app, names_map, lang=lang)
return updater.update(rows)
if is_module_sheet(sheet_name):
unique_id = names_map.get(sheet_name)
try:
updater = BulkAppTranslationModuleUpdater(app, sheet_name, unique_id, lang=lang)
except ModuleNotFoundException:
return [(
messages.error,
_('Invalid menu in row "%s", skipping row.') % sheet_name
)]
return updater.update(rows)
if is_form_sheet(sheet_name):
unique_id = names_map.get(sheet_name)
try:
updater = BulkAppTranslationFormUpdater(app, sheet_name, unique_id, lang=lang)
except FormNotFoundException:
return [(
messages.error,
_('Invalid form in row "%s", skipping row.') % sheet_name
)]
return updater.update(rows)
return [(
messages.error,
_('Did not recognize "%s", skipping row.') % sheet_name
)]
def _check_for_sheet_error(sheet, expected_headers, processed_sheets=Ellipsis):
if sheet.worksheet.title in processed_sheets:
raise BulkAppTranslationsException(_('Sheet "%s" was repeated. Only the first occurrence has been '
'processed.') % sheet.worksheet.title)
if not expected_headers:
raise BulkAppTranslationsException(_('Skipping sheet "%s", could not recognize title') %
sheet.worksheet.title)
num_required_headers = 0
if is_modules_and_forms_sheet(sheet.worksheet.title):
num_required_headers = 1 # type
elif is_module_sheet(sheet.worksheet.title):
num_required_headers = 2 # case property, list or detail
elif is_form_sheet(sheet.worksheet.title):
num_required_headers = 1 # label
elif is_single_sheet(sheet.worksheet.title):
num_required_headers = 4 # menu or form, case property, list or detail, label
expected_required_headers = tuple(expected_headers[:num_required_headers])
actual_required_headers = tuple(sheet.headers[:num_required_headers])
if expected_required_headers != actual_required_headers:
raise BulkAppTranslationsException(_('Skipping sheet {title}: expected first columns to be '
'{expected}').format(
title=sheet.worksheet.title,
expected=", ".join(expected_required_headers)))
def _check_for_sheet_warnings(sheet, expected_headers):
warnings = []
missing_cols = set(expected_headers) - set(sheet.headers)
extra_cols = set(sheet.headers) - set(expected_headers)
if len(missing_cols) > 0:
warnings.append((_('Sheet "{sheet}" has fewer columns than expected. Sheet will be processed but the '
'following translations will be unchanged: {columns}').format(sheet=sheet.worksheet.title,
columns=", ".join(missing_cols))))
if len(extra_cols) > 0:
warnings.append(_('Sheet "{sheet}" has unrecognized columns. Sheet will be processed but will ignore the '
'following columns: {columns}').format(sheet=sheet.worksheet.title, columns=", ".join(extra_cols)))
return warnings
class BulkAppTranslationModulesAndFormsUpdater(BulkAppTranslationUpdater):
def __init__(self, app, names_map, lang=None):
super(BulkAppTranslationModulesAndFormsUpdater, self).__init__(app, lang)
self.sheet_name_to_unique_id = names_map
def update(self, rows):
"""
This handles updating module/form names and menu media
(the contents of the "Menus and forms" sheet in the multi-tab upload).
"""
self.msgs = []
for row in get_unicode_dicts(rows):
sheet_name = row.get('menu_or_form', '')
# The unique_id column is populated on the "Menus_and_forms" sheet in multi-sheet translation files,
# and in the "name / menu media" row in single-sheet translation files.
unique_id = row.get('unique_id')
if not unique_id and sheet_name in self.sheet_name_to_unique_id:
# If we don't have a value for unique_id, try to fetch it from self.sheet_name_to_unique_id
unique_id = self.sheet_name_to_unique_id[sheet_name]
try:
if unique_id:
document = get_menu_or_form_by_unique_id(self.app, unique_id, sheet_name)
else:
document = get_menu_or_form_by_sheet_name(self.app, sheet_name)
except (ModuleNotFoundException, FormNotFoundException, ValueError) as err:
self.msgs.append((messages.error, str(err)))
continue
self.update_translation_dict('default_', document.name, row)
# Update menu media
for lang in self.langs:
image_header = 'image_%s' % lang
if image_header in row:
document.set_icon(lang, row[image_header])
audio_header = 'audio_%s' % lang
if audio_header in row:
document.set_audio(lang, row[audio_header])
return self.msgs
|
135517
|
import enum
from typing import Dict, Any, Optional
from .types import *
class DeviceException(Exception): # pylint: disable=too-few-public-methods
exc: Dict[int, Any] = {
0x6A86: WrongP1P2Error,
0x6A87: WrongDataLengthError,
0x6D00: InsNotSupportedError,
0x6E00: ClaNotSupportedError,
0xB000: AppNameTooLongError,
0x6985: ActionCancelledError,
0x6F10: MetadatasParsingError
}
def __new__(cls,
error_code: int,
ins: Optional[enum.IntEnum] = None,
message: str = ""
) -> Any:
error_message: str = (f"Error in {ins!r} command"
if ins else "Error in command")
if error_code in DeviceException.exc:
return DeviceException.exc[error_code](hex(error_code),
error_message,
message)
return UnknownDeviceError(hex(error_code), error_message, message)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.