hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f71fd916fcc0b6fd5407ff0c3b8ac492320273d7 | 26,028 | py | Python | tf_rl_tutorial/models.py | 4k4xs4pH1r3/tf_rl_tutorial | c58d10c60cfd79b2e0661b4a49cccae8d4584c57 | [
"Apache-2.0"
] | 40 | 2016-03-09T03:03:08.000Z | 2021-09-11T21:44:12.000Z | tf_rl_tutorial/models.py | 4k4xs4pH1r3/tf_rl_tutorial | c58d10c60cfd79b2e0661b4a49cccae8d4584c57 | [
"Apache-2.0"
] | 1 | 2018-05-08T13:45:43.000Z | 2018-05-08T13:45:43.000Z | tf_rl_tutorial/models.py | 4k4xs4pH1r3/tf_rl_tutorial | c58d10c60cfd79b2e0661b4a49cccae8d4584c57 | [
"Apache-2.0"
] | 17 | 2016-03-17T14:57:11.000Z | 2021-06-04T16:24:48.000Z | # Copyright 2016 Mandiant, A FireEye Company
# Authors: Brian Jones
# License: Apache 2.0
''' Model classes for "Relational Learning with TensorFlow" tutorial '''
import numpy as np
import tensorflow as tf
from .util import ContrastiveTrainingProvider
def least_squares_objective(output, target, add_bias=True):
''' Creates final model output and loss for least squares objective
Args:
output: Model output
target: Training target placeholder
add_bias: If True, a bias Variable will be added to the output
Returns:
tuple (final output, loss)
'''
y = output
if add_bias:
bias = tf.Variable([0.0])
y = output + bias
loss = tf.reduce_sum(tf.square(y - target))
return y, loss
def logistic_objective(output, target, add_bias=True):
''' Creates final model output and loss for logistic objective
Args:
output: Model output
target: Training target placeholder
add_bias: If True, a bias Variable will be added to the output
Returns:
tuple (final output, loss)
'''
y = output
if add_bias:
bias = tf.Variable([0.0])
y = output + bias
sig_y = tf.clip_by_value(tf.sigmoid(y), 0.001, 0.999) # avoid NaNs
loss = -tf.reduce_sum(target*tf.log(sig_y) + (1-target)*tf.log(1-sig_y))
return sig_y, loss
def ranking_margin_objective(output, margin=1.0):
''' Create final model output and loss for pairwise ranking margin objective
Loss for single pair (f(p), f(n)) = [margin - f(p) + f(n)]+
This only works when given model output on alternating positive/negative
pairs: [pos,neg,pos,neg,...]. TODO: check target placeholder
at runtime to make sure this is the case?
Args:
output: Model output
margin: The margin value for the pairwise hinge loss
Returns:
tuple (final output, loss)
'''
y_pairs = tf.reshape(output, [-1,2]) # fold: 1 x n -> [n/2 x 2]
pos_scores, neg_scores = tf.split(1, 2, y_pairs) # separate pairs
hinge_losses = tf.nn.relu(margin - pos_scores + neg_scores)
total_hinge_loss = tf.reduce_sum(hinge_losses)
return output, total_hinge_loss
def sparse_maxnorm_update(var_matrix, indices, maxnorm=1.0):
'''Sparse update operation that ensures selected rows in var_matrix
do not have a Euclidean norm greater than maxnorm. Rows that exceed
it are scaled to length.
Args:
var_matrix: 2D mutable tensor (Variable) to operate on
indices: 1D tensor with the row indices to constrain
maxnorm: the maximum Euclidean norm
Returns:
An operation that will update var_matrix when run in a Session
'''
selected_rows = tf.nn.embedding_lookup(var_matrix, indices)
row_norms = tf.sqrt(tf.reduce_sum(tf.square(selected_rows), 1))
scaling = maxnorm / tf.maximum(row_norms, maxnorm)
scaled = selected_rows * tf.expand_dims(scaling, 1)
return tf.scatter_update(var_matrix, indices, scaled)
def dense_maxnorm_update(var_matrix, maxnorm=1.0):
'''Dense update operation that ensures all rows in var_matrix
do not have a Euclidean norm greater than maxnorm. Rows that exceed
it are scaled to length.
Args:
var_matrix: 2D mutable tensor (Variable) to operate on
maxnorm: the maximum Euclidean norm
Returns:
An operation that will update var_matrix when run in a Session
'''
row_norms = tf.sqrt(tf.reduce_sum(tf.square(var_matrix), 1))
scaling = maxnorm / tf.maximum(row_norms, maxnorm)
scaled = var_matrix * tf.expand_dims(scaling, 1)
return tf.assign(var_matrix, scaled)
def dense_maxnorm(var_matrix, maxnorm=1.0):
'''Similar to dense_maxnorm_update(), except this returns a new Tensor
instead of an operation that modifies var_matrix.
Args:
var_matrix: 2D tensor (Variable)
maxnorm: the maximum Euclidean norm
Returns:
A new tensor where all rows have been scaled as necessary
'''
axis_norms = tf.sqrt(tf.reduce_sum(tf.square(var_matrix), 1))
scaling = maxnorm / tf.maximum(axis_norms, maxnorm)
return var_matrix * tf.expand_dims(scaling, 1)
class BaseModel(object):
''' Base class for embedding-based relational learning models that use
maxnorm regularization. Subclasses must implement _create_model() and
populate self.train_step, and can optionally populate self.post_step for
post-processing.
Note: When model_type is 'ranking_margin', the mini-batch provider returned
by _create_batch_provider() must provide instances in alternating
pos/neg pairs: [pos, neg, pos, neg, ...]. This is satisfied when using
ContrastiveTrainingProvider; be careful if you use a different one.
Args:
embedding_size: Embedding vector length
maxnorm: Maximum Euclidean norm for embedding vectors
batch_pos_cnt: Number of positive examples to use in each mini-batch
max_iter: Maximum number of optimization iterations to perform
model_type: Possible values:
'least_squares': squared loss on 0/1 targets
'logistic': sigmoid link function, crossent loss on 0/1 targets
'ranking_margin': ranking margin on pos/neg pairs
add_bias: If True, a bias Variable will be added to the output for
least_squares and logistic models.
opt: An optimizer object to use. If None, the default optimizer is
tf.train.AdagradOptimizer(1.0)
TODO: add support for other regularizers like L2
'''
def __init__(self, embedding_size, maxnorm=1.0,
batch_pos_cnt=100, max_iter=1000,
model_type='least_squares', add_bias=True,
opt=None):
self.embedding_size = embedding_size
self.maxnorm = maxnorm
self.batch_pos_cnt = batch_pos_cnt
self.max_iter = max_iter
self.model_type = model_type
self.add_bias = add_bias
if opt is None:
opt = tf.train.AdagradOptimizer(1.0)
self.opt = opt
self.sess = None
self.train_step = None
self.post_step = None
self.graph = tf.Graph()
with self.graph.as_default():
self.head_input = tf.placeholder(tf.int32, shape=[None])
self.rel_input = tf.placeholder(tf.int32, shape=[None])
self.tail_input = tf.placeholder(tf.int32, shape=[None])
self.target = tf.placeholder(tf.float32, shape=[None])
def _create_model(self, train_triples):
''' Subclasses must build Graph and set self.train_step '''
raise Exception('subclass must implement')
def _create_batch_provider(self, train_triples):
''' Default implementation '''
return ContrastiveTrainingProvider(train_triples, self.batch_pos_cnt)
def _create_output_and_loss(self, raw_output):
if self.model_type == 'least_squares':
return least_squares_objective(raw_output, self.target, self.add_bias)
elif self.model_type == 'logistic':
return logistic_objective(raw_output, self.target, self.add_bias)
elif self.model_type == 'ranking_margin':
return ranking_margin_objective(raw_output, 1.0)
else:
raise Exception('Unknown model_type')
def _norm_constraint_op(self, var_matrix, row_indices, maxnorm):
'''
Args:
var_matrix: A 2D Tensor holding the vectors to constrain (in rows)
row_indices: The rows in var_tensor that are being considered for
constraint application (typically embedding vectors for
entities observed for a minibatch of training data). These
will be used for a sparse variable update operation if the
chosen optimizer only modified these entries. Otherwise
a dense operation is used and row_indices are ignored.
maxnorm: The maximum Euclidean norm for the rows in var_tensor
Returns:
An operation which will apply the constraints when run in a Session
'''
# Currently, TF optimizers do not update variables with zero gradient
# except AdamOptimizer
if isinstance(self.opt, tf.train.AdamOptimizer):
return dense_maxnorm_update(var_matrix, maxnorm)
else:
return sparse_maxnorm_update(var_matrix, row_indices, maxnorm)
def embeddings(self):
''' Subclass should override this if it uses different embedding
variables
Returns:
A list of pairs: [(embedding name, embedding 2D Tensor)]
'''
return [('entity', self.entity_embedding_vars),
('rel', self.rel_embedding_vars)]
def create_feed_dict(self, triples, labels=None, training=False):
''' Create a TensorFlow feed dict for relationship triples
Args:
triples: A numpy integer array of relationship triples, where each
row contains [head idx, relationship idx, tail idx]
labels: (optional) A label array for triples
training: (optional) A flag indicating whether the feed dict is
for training or test purposes. Useful for things like
dropout where a dropout_probability variable is set differently
in the two contexts.
'''
feed_dict = {self.head_input: triples[:, 0],
self.rel_input: triples[:, 1],
self.tail_input: triples[:, 2]}
if labels is not None:
feed_dict[self.target] = labels
return feed_dict
def close(self):
''' Closes the TensorFlow Session object '''
self.sess.close();
def fit(self, train_triples, step_callback=None):
''' Trains the model on relationship triples
Args:
train_triples: A numpy integer array of relationship triples, where
each row of contains [head idx, relationship idx, tail idx]
step_callback: (optional) A function that will be called before each
optimization step, step_callback(iteration, feed_dict)
'''
if self.sess is not None:
self.sess.close()
self.sess = tf.Session(graph=self.graph)
with self.graph.as_default():
self._create_model(train_triples)
self.sess.run(tf.initialize_all_variables())
batch_provider = self._create_batch_provider(train_triples)
for i in range(self.max_iter):
batch_triples, batch_labels = batch_provider.next_batch()
feed_dict = self.create_feed_dict(batch_triples, batch_labels, training=True)
if step_callback:
keep_going = step_callback(i, feed_dict)
if not keep_going:
break
self.sess.run(self.train_step, feed_dict)
if self.post_step is not None:
self.sess.run(self.post_step, feed_dict)
def predict(self, triples):
''' Runs a trained model on the supplied relationship triples. fit()
must be called before calling this function.
Args:
triples: A numpy integer array of relationship triples, where each
row of contains [head idx, relationship idx, tail idx]
'''
feed_dict = self.create_feed_dict(triples, training=False)
return self.sess.run(self.output, feed_dict=feed_dict)
class Contrastive_CP(BaseModel):
''' Model with a scoring function based on CANDECOMP/PARAFAC tensor
decomposition. Optimization differs, however, in the use of maxnorm
regularization and contrastive negative sampling.
Score for (head i, rel k, tail j) triple is: h_i^T * diag(r_k) * t_j,
where h_i and t_j are embedding vectors for the head and tail entities,
and r_k is an embedding vector for the relationship type.
Args:
embedding_size: Embedding vector length
maxnorm: Maximum Euclidean norm for embedding vectors
batch_pos_cnt: Number of positive examples to use in each mini-batch
max_iter: Maximum number of optimization iterations to perform
model_type: Possible values:
'least_squares': squared loss on 0/1 targets
'logistic': sigmoid link function, crossent loss on 0/1 targets
'ranking_margin': ranking margin on pos/neg pairs
add_bias: If True, a bias Variable will be added to the output for
least_squares and logistic models.
opt: An optimizer object to use. If None, the default optimizer is
tf.train.AdagradOptimizer(1.0)
References:
Kolda, Tamara G., and Brett W. Bader. "Tensor decompositions and
applications." SIAM review 51.3 (2009): 455-500.
'''
def _create_model(self, train_triples):
# Count unique items to determine embedding matrix sizes
head_cnt = len(set(train_triples[:,0]))
rel_cnt = len(set(train_triples[:,1]))
tail_cnt = len(set(train_triples[:,2]))
init_sd = 1.0 / np.sqrt(self.embedding_size)
# Embedding matrices for entities and relationship types
head_init = tf.truncated_normal([head_cnt, self.embedding_size], stddev=init_sd)
rel_init = tf.truncated_normal([rel_cnt, self.embedding_size], stddev=init_sd)
tail_init = tf.truncated_normal([tail_cnt, self.embedding_size], stddev=init_sd)
if self.maxnorm is not None:
# Ensure maxnorm constraints are initially satisfied
head_init = dense_maxnorm(head_init, self.maxnorm)
rel_init = dense_maxnorm(rel_init, self.maxnorm)
tail_init = dense_maxnorm(tail_init, self.maxnorm)
self.head_embedding_vars = tf.Variable(head_init)
self.rel_embedding_vars = tf.Variable(rel_init)
self.tail_embedding_vars = tf.Variable(tail_init)
# Embedding layer for each (head, rel, tail) triple being fed in as input
head_embed = tf.nn.embedding_lookup(self.head_embedding_vars, self.head_input)
rel_embed = tf.nn.embedding_lookup(self.rel_embedding_vars, self.rel_input)
tail_embed = tf.nn.embedding_lookup(self.tail_embedding_vars, self.tail_input)
# Model output
raw_output = tf.reduce_sum(tf.mul(tf.mul(head_embed, rel_embed), tail_embed), 1)
self.output, self.loss = self._create_output_and_loss(raw_output)
# Optimization
self.train_step = self.opt.minimize(self.loss)
if self.maxnorm is not None:
# Post-processing to limit embedding vars to L2 ball
head_constraint = self._norm_constraint_op(self.head_embedding_vars,
tf.unique(self.head_input)[0],
self.maxnorm)
rel_constraint = self._norm_constraint_op(self.rel_embedding_vars,
tf.unique(self.rel_input)[0],
self.maxnorm)
tail_constraint = self._norm_constraint_op(self.tail_embedding_vars,
tf.unique(self.tail_input)[0],
self.maxnorm)
self.post_step = [head_constraint, rel_constraint, tail_constraint]
def _create_batch_provider(self, train):
# CP treats head and tail entities separately
return ContrastiveTrainingProvider(train,
self.batch_pos_cnt,
separate_head_tail=True)
def embeddings(self):
'''
Returns:
A list of pairs: [(embedding name, embedding 2D Tensor)]
'''
return [('head', self.head_embedding_vars),
('tail', self.head_embedding_vars),
('rel', self.rel_embedding_vars)]
class Bilinear(BaseModel):
''' Model with a scoring function based on the bilinear formulation of
RESCAL. Optimization differs, however, in the use of maxnorm
regularization and contrastive negative sampling.
Score for (head i, rel k, tail j) triple is: e_i^T * R_k * e_j
where e_i and e_j are D-dimensional embedding vectors for the head and tail
entities, and R_k is a (D x D) matrix for the relationship type
acting as a bilinear operator.
Args:
embedding_size: Embedding vector length
maxnorm: Maximum Euclidean norm for embedding vectors
rel_maxnorm_mult: Multiplier for the maxnorm threshold used for
relationship embeddings. Example: If maxnorm=2.0 and
rel_maxnorm_mult=4.0, then the maxnorm constrain for relationships
will be 2.0 * 4.0 = 8.0.
batch_pos_cnt: Number of positive examples to use in each mini-batch
max_iter: Maximum number of optimization iterations to perform
model_type: Possible values:
'least_squares': squared loss on 0/1 targets
'logistic': sigmoid link function, crossent loss on 0/1 targets
'ranking_margin': ranking margin on pos/neg pairs
add_bias: If True, a bias Variable will be added to the output for
least_squares and logistic models.
opt: An optimizer object to use. If None, the default optimizer is
tf.train.AdagradOptimizer(1.0)
References:
Nickel, Maximilian, Volker Tresp, and Hans-Peter Kriegel. "A three-way
model for collective learning on multi-relational data." Proceedings of
the 28th international conference on machine learning (ICML-11). 2011.
'''
def __init__(self, embedding_size, maxnorm=1.0, rel_maxnorm_mult=3.0,
batch_pos_cnt=100, max_iter=1000,
model_type='least_squares', add_bias=True, opt=None):
super(Bilinear, self).__init__(
embedding_size=embedding_size,
maxnorm=maxnorm,
batch_pos_cnt=batch_pos_cnt,
max_iter=max_iter,
model_type=model_type,
opt=opt)
self.rel_maxnorm_mult = rel_maxnorm_mult
def _create_model(self, train_triples):
# Count unique items to determine embedding matrix sizes
entity_cnt = len(set(train_triples[:,0]).union(train_triples[:,2]))
rel_cnt = len(set(train_triples[:,1]))
init_sd = 1.0 / np.sqrt(self.embedding_size)
# Embedding variables for all entities and relationship types
entity_embedding_shape = [entity_cnt, self.embedding_size]
# Relationship embeddings will be stored in flattened format to make
# applying maxnorm constraints easier
rel_embedding_shape = [rel_cnt, self.embedding_size * self.embedding_size]
entity_init = tf.truncated_normal(entity_embedding_shape, stddev=init_sd)
rel_init = tf.truncated_normal(rel_embedding_shape, stddev=init_sd)
if self.maxnorm is not None:
# Ensure maxnorm constraints are initially satisfied
entity_init = dense_maxnorm(entity_init, self.maxnorm)
rel_init = dense_maxnorm(rel_init, self.maxnorm)
self.entity_embedding_vars = tf.Variable(entity_init)
self.rel_embedding_vars = tf.Variable(rel_init)
# Embedding layer for each (head, rel, tail) triple being fed in as input
head_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.head_input)
tail_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.tail_input)
rel_embed = tf.nn.embedding_lookup(self.rel_embedding_vars, self.rel_input)
# Reshape rel_embed into square D x D matrices
rel_embed_square = tf.reshape(rel_embed, (-1, self.embedding_size, self.embedding_size))
# Reshape head_embed and tail_embed to be suitable for the matrix multiplication
head_embed_row = tf.expand_dims(head_embed, 1) # embeddings as row vectors
tail_embed_col = tf.expand_dims(tail_embed, 2) # embeddings as column vectors
head_rel_mult = tf.batch_matmul(head_embed_row, rel_embed_square)
# Output needs a squeeze into a 1d vector
raw_output = tf.squeeze(tf.batch_matmul(head_rel_mult, tail_embed_col))
self.output, self.loss = self._create_output_and_loss(raw_output)
# Optimization
self.train_step = self.opt.minimize(self.loss)
if self.maxnorm is not None:
# Post-processing to limit embedding vars to L2 ball
rel_maxnorm = self.maxnorm * self.rel_maxnorm_mult
unique_ent_indices = tf.unique(tf.concat(0, [self.head_input, self.tail_input]))[0]
unique_rel_indices = tf.unique(self.rel_input)[0]
entity_constraint = self._norm_constraint_op(self.entity_embedding_vars,
unique_ent_indices,
self.maxnorm)
rel_constraint = self._norm_constraint_op(self.rel_embedding_vars,
unique_rel_indices,
rel_maxnorm)
self.post_step = [entity_constraint, rel_constraint]
class TransE(BaseModel):
''' TransE: Translational Embeddings Model
Score for (head i, rel k, tail j) triple is: d(e_i + t_k, e_i)
where e_i and e_j are D-dimensional embedding vectors for the head and
tail entities, t_k is a another D-dimensional vector acting as a
translation, and d() is a dissimilarity function like Euclidean distance.
Optimization is performed uing SGD on ranking margin loss between
contrastive training pairs. Entity embeddings are contrained to lie within
the unit L2 ball, relationship vectors are left unconstrained.
Args:
embedding_size: Embedding vector length
batch_pos_cnt: Number of positive examples to use in each mini-batch
max_iter: Maximum number of optimization iterations to perform
dist: Distance function used in loss:
'euclidean': sqrt(sum((x - y)^2))
'sqeuclidean': squared Euclidean, sum((x - y)^2)
'manhattan': sum of absolute differences, sum(|x - y|)
margin: Margin parameter for parwise ranking hinge loss
opt: An optimizer object to use. If None, the default optimizer is
tf.train.AdagradOptimizer(1.0)
References:
Bordes, Antoine, et al. "Translating embeddings for modeling multi-relational
data." Advances in Neural Information Processing Systems. 2013.
'''
def __init__(self, embedding_size, batch_pos_cnt=100,
max_iter=1000, dist='euclidean',
margin=1.0, opt=None):
super(TransE, self).__init__(embedding_size=embedding_size,
maxnorm=1.0,
batch_pos_cnt=batch_pos_cnt,
max_iter=max_iter,
model_type='ranking_margin',
opt=opt)
self.dist = dist
self.margin = margin
self.EPS = 1e-3 # for sqrt gradient when dist='euclidean'
def _create_model(self, train_triples):
# Count unique items to determine embedding matrix sizes
entity_cnt = len(set(train_triples[:,0]).union(train_triples[:,2]))
rel_cnt = len(set(train_triples[:,1]))
init_sd = 1.0 / np.sqrt(self.embedding_size)
# Embedding variables
entity_var_shape = [entity_cnt, self.embedding_size]
rel_var_shape = [rel_cnt, self.embedding_size]
entity_init = tf.truncated_normal(entity_var_shape, stddev=init_sd)
rel_init = tf.truncated_normal(rel_var_shape, stddev=init_sd)
# Ensure maxnorm constraints are initially satisfied
entity_init = dense_maxnorm(entity_init, self.maxnorm)
self.entity_embedding_vars = tf.Variable(entity_init)
self.rel_embedding_vars = tf.Variable(rel_init)
# Embedding layer for each (head, rel, tail) triple being fed in as input
head_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.head_input)
tail_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.tail_input)
rel_embed = tf.nn.embedding_lookup(self.rel_embedding_vars, self.rel_input)
# Relationship vector acts as a translation in entity embedding space
diff_vec = tail_embed - (head_embed + rel_embed)
# negative dist so higher scores are better (important for pairwise loss)
if self.dist == 'manhattan':
raw_output = -tf.reduce_sum(tf.abs(diff_vec), 1)
elif self.dist == 'euclidean':
# +eps because gradients can misbehave for small values in sqrt
raw_output = -tf.sqrt(tf.reduce_sum(tf.square(diff_vec), 1) + self.EPS)
elif self.dist == 'sqeuclidean':
raw_output = -tf.reduce_sum(tf.square(diff_vec), 1)
else:
raise Exception('Unknown distance type')
# Model output
self.output, self.loss = ranking_margin_objective(raw_output, self.margin)
# Optimization with postprocessing to limit embedding vars to L2 ball
self.train_step = self.opt.minimize(self.loss)
unique_ent_indices = tf.unique(tf.concat(0, [self.head_input, self.tail_input]))[0]
self.post_step = self._norm_constraint_op(self.entity_embedding_vars,
unique_ent_indices,
self.maxnorm) | 47.67033 | 96 | 0.643807 |
import numpy as np
import tensorflow as tf
from .util import ContrastiveTrainingProvider
def least_squares_objective(output, target, add_bias=True):
y = output
if add_bias:
bias = tf.Variable([0.0])
y = output + bias
loss = tf.reduce_sum(tf.square(y - target))
return y, loss
def logistic_objective(output, target, add_bias=True):
y = output
if add_bias:
bias = tf.Variable([0.0])
y = output + bias
sig_y = tf.clip_by_value(tf.sigmoid(y), 0.001, 0.999)
loss = -tf.reduce_sum(target*tf.log(sig_y) + (1-target)*tf.log(1-sig_y))
return sig_y, loss
def ranking_margin_objective(output, margin=1.0):
y_pairs = tf.reshape(output, [-1,2])
pos_scores, neg_scores = tf.split(1, 2, y_pairs)
hinge_losses = tf.nn.relu(margin - pos_scores + neg_scores)
total_hinge_loss = tf.reduce_sum(hinge_losses)
return output, total_hinge_loss
def sparse_maxnorm_update(var_matrix, indices, maxnorm=1.0):
selected_rows = tf.nn.embedding_lookup(var_matrix, indices)
row_norms = tf.sqrt(tf.reduce_sum(tf.square(selected_rows), 1))
scaling = maxnorm / tf.maximum(row_norms, maxnorm)
scaled = selected_rows * tf.expand_dims(scaling, 1)
return tf.scatter_update(var_matrix, indices, scaled)
def dense_maxnorm_update(var_matrix, maxnorm=1.0):
row_norms = tf.sqrt(tf.reduce_sum(tf.square(var_matrix), 1))
scaling = maxnorm / tf.maximum(row_norms, maxnorm)
scaled = var_matrix * tf.expand_dims(scaling, 1)
return tf.assign(var_matrix, scaled)
def dense_maxnorm(var_matrix, maxnorm=1.0):
axis_norms = tf.sqrt(tf.reduce_sum(tf.square(var_matrix), 1))
scaling = maxnorm / tf.maximum(axis_norms, maxnorm)
return var_matrix * tf.expand_dims(scaling, 1)
class BaseModel(object):
def __init__(self, embedding_size, maxnorm=1.0,
batch_pos_cnt=100, max_iter=1000,
model_type='least_squares', add_bias=True,
opt=None):
self.embedding_size = embedding_size
self.maxnorm = maxnorm
self.batch_pos_cnt = batch_pos_cnt
self.max_iter = max_iter
self.model_type = model_type
self.add_bias = add_bias
if opt is None:
opt = tf.train.AdagradOptimizer(1.0)
self.opt = opt
self.sess = None
self.train_step = None
self.post_step = None
self.graph = tf.Graph()
with self.graph.as_default():
self.head_input = tf.placeholder(tf.int32, shape=[None])
self.rel_input = tf.placeholder(tf.int32, shape=[None])
self.tail_input = tf.placeholder(tf.int32, shape=[None])
self.target = tf.placeholder(tf.float32, shape=[None])
def _create_model(self, train_triples):
raise Exception('subclass must implement')
def _create_batch_provider(self, train_triples):
return ContrastiveTrainingProvider(train_triples, self.batch_pos_cnt)
def _create_output_and_loss(self, raw_output):
if self.model_type == 'least_squares':
return least_squares_objective(raw_output, self.target, self.add_bias)
elif self.model_type == 'logistic':
return logistic_objective(raw_output, self.target, self.add_bias)
elif self.model_type == 'ranking_margin':
return ranking_margin_objective(raw_output, 1.0)
else:
raise Exception('Unknown model_type')
def _norm_constraint_op(self, var_matrix, row_indices, maxnorm):
if isinstance(self.opt, tf.train.AdamOptimizer):
return dense_maxnorm_update(var_matrix, maxnorm)
else:
return sparse_maxnorm_update(var_matrix, row_indices, maxnorm)
def embeddings(self):
return [('entity', self.entity_embedding_vars),
('rel', self.rel_embedding_vars)]
def create_feed_dict(self, triples, labels=None, training=False):
feed_dict = {self.head_input: triples[:, 0],
self.rel_input: triples[:, 1],
self.tail_input: triples[:, 2]}
if labels is not None:
feed_dict[self.target] = labels
return feed_dict
def close(self):
self.sess.close();
def fit(self, train_triples, step_callback=None):
if self.sess is not None:
self.sess.close()
self.sess = tf.Session(graph=self.graph)
with self.graph.as_default():
self._create_model(train_triples)
self.sess.run(tf.initialize_all_variables())
batch_provider = self._create_batch_provider(train_triples)
for i in range(self.max_iter):
batch_triples, batch_labels = batch_provider.next_batch()
feed_dict = self.create_feed_dict(batch_triples, batch_labels, training=True)
if step_callback:
keep_going = step_callback(i, feed_dict)
if not keep_going:
break
self.sess.run(self.train_step, feed_dict)
if self.post_step is not None:
self.sess.run(self.post_step, feed_dict)
def predict(self, triples):
feed_dict = self.create_feed_dict(triples, training=False)
return self.sess.run(self.output, feed_dict=feed_dict)
class Contrastive_CP(BaseModel):
def _create_model(self, train_triples):
head_cnt = len(set(train_triples[:,0]))
rel_cnt = len(set(train_triples[:,1]))
tail_cnt = len(set(train_triples[:,2]))
init_sd = 1.0 / np.sqrt(self.embedding_size)
head_init = tf.truncated_normal([head_cnt, self.embedding_size], stddev=init_sd)
rel_init = tf.truncated_normal([rel_cnt, self.embedding_size], stddev=init_sd)
tail_init = tf.truncated_normal([tail_cnt, self.embedding_size], stddev=init_sd)
if self.maxnorm is not None:
head_init = dense_maxnorm(head_init, self.maxnorm)
rel_init = dense_maxnorm(rel_init, self.maxnorm)
tail_init = dense_maxnorm(tail_init, self.maxnorm)
self.head_embedding_vars = tf.Variable(head_init)
self.rel_embedding_vars = tf.Variable(rel_init)
self.tail_embedding_vars = tf.Variable(tail_init)
head_embed = tf.nn.embedding_lookup(self.head_embedding_vars, self.head_input)
rel_embed = tf.nn.embedding_lookup(self.rel_embedding_vars, self.rel_input)
tail_embed = tf.nn.embedding_lookup(self.tail_embedding_vars, self.tail_input)
raw_output = tf.reduce_sum(tf.mul(tf.mul(head_embed, rel_embed), tail_embed), 1)
self.output, self.loss = self._create_output_and_loss(raw_output)
self.train_step = self.opt.minimize(self.loss)
if self.maxnorm is not None:
head_constraint = self._norm_constraint_op(self.head_embedding_vars,
tf.unique(self.head_input)[0],
self.maxnorm)
rel_constraint = self._norm_constraint_op(self.rel_embedding_vars,
tf.unique(self.rel_input)[0],
self.maxnorm)
tail_constraint = self._norm_constraint_op(self.tail_embedding_vars,
tf.unique(self.tail_input)[0],
self.maxnorm)
self.post_step = [head_constraint, rel_constraint, tail_constraint]
def _create_batch_provider(self, train):
return ContrastiveTrainingProvider(train,
self.batch_pos_cnt,
separate_head_tail=True)
def embeddings(self):
return [('head', self.head_embedding_vars),
('tail', self.head_embedding_vars),
('rel', self.rel_embedding_vars)]
class Bilinear(BaseModel):
def __init__(self, embedding_size, maxnorm=1.0, rel_maxnorm_mult=3.0,
batch_pos_cnt=100, max_iter=1000,
model_type='least_squares', add_bias=True, opt=None):
super(Bilinear, self).__init__(
embedding_size=embedding_size,
maxnorm=maxnorm,
batch_pos_cnt=batch_pos_cnt,
max_iter=max_iter,
model_type=model_type,
opt=opt)
self.rel_maxnorm_mult = rel_maxnorm_mult
def _create_model(self, train_triples):
entity_cnt = len(set(train_triples[:,0]).union(train_triples[:,2]))
rel_cnt = len(set(train_triples[:,1]))
init_sd = 1.0 / np.sqrt(self.embedding_size)
entity_embedding_shape = [entity_cnt, self.embedding_size]
rel_embedding_shape = [rel_cnt, self.embedding_size * self.embedding_size]
entity_init = tf.truncated_normal(entity_embedding_shape, stddev=init_sd)
rel_init = tf.truncated_normal(rel_embedding_shape, stddev=init_sd)
if self.maxnorm is not None:
entity_init = dense_maxnorm(entity_init, self.maxnorm)
rel_init = dense_maxnorm(rel_init, self.maxnorm)
self.entity_embedding_vars = tf.Variable(entity_init)
self.rel_embedding_vars = tf.Variable(rel_init)
head_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.head_input)
tail_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.tail_input)
rel_embed = tf.nn.embedding_lookup(self.rel_embedding_vars, self.rel_input)
rel_embed_square = tf.reshape(rel_embed, (-1, self.embedding_size, self.embedding_size))
head_embed_row = tf.expand_dims(head_embed, 1)
tail_embed_col = tf.expand_dims(tail_embed, 2)
head_rel_mult = tf.batch_matmul(head_embed_row, rel_embed_square)
raw_output = tf.squeeze(tf.batch_matmul(head_rel_mult, tail_embed_col))
self.output, self.loss = self._create_output_and_loss(raw_output)
self.train_step = self.opt.minimize(self.loss)
if self.maxnorm is not None:
rel_maxnorm = self.maxnorm * self.rel_maxnorm_mult
unique_ent_indices = tf.unique(tf.concat(0, [self.head_input, self.tail_input]))[0]
unique_rel_indices = tf.unique(self.rel_input)[0]
entity_constraint = self._norm_constraint_op(self.entity_embedding_vars,
unique_ent_indices,
self.maxnorm)
rel_constraint = self._norm_constraint_op(self.rel_embedding_vars,
unique_rel_indices,
rel_maxnorm)
self.post_step = [entity_constraint, rel_constraint]
class TransE(BaseModel):
def __init__(self, embedding_size, batch_pos_cnt=100,
max_iter=1000, dist='euclidean',
margin=1.0, opt=None):
super(TransE, self).__init__(embedding_size=embedding_size,
maxnorm=1.0,
batch_pos_cnt=batch_pos_cnt,
max_iter=max_iter,
model_type='ranking_margin',
opt=opt)
self.dist = dist
self.margin = margin
self.EPS = 1e-3
def _create_model(self, train_triples):
entity_cnt = len(set(train_triples[:,0]).union(train_triples[:,2]))
rel_cnt = len(set(train_triples[:,1]))
init_sd = 1.0 / np.sqrt(self.embedding_size)
entity_var_shape = [entity_cnt, self.embedding_size]
rel_var_shape = [rel_cnt, self.embedding_size]
entity_init = tf.truncated_normal(entity_var_shape, stddev=init_sd)
rel_init = tf.truncated_normal(rel_var_shape, stddev=init_sd)
entity_init = dense_maxnorm(entity_init, self.maxnorm)
self.entity_embedding_vars = tf.Variable(entity_init)
self.rel_embedding_vars = tf.Variable(rel_init)
head_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.head_input)
tail_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.tail_input)
rel_embed = tf.nn.embedding_lookup(self.rel_embedding_vars, self.rel_input)
diff_vec = tail_embed - (head_embed + rel_embed)
if self.dist == 'manhattan':
raw_output = -tf.reduce_sum(tf.abs(diff_vec), 1)
elif self.dist == 'euclidean':
raw_output = -tf.sqrt(tf.reduce_sum(tf.square(diff_vec), 1) + self.EPS)
elif self.dist == 'sqeuclidean':
raw_output = -tf.reduce_sum(tf.square(diff_vec), 1)
else:
raise Exception('Unknown distance type')
self.output, self.loss = ranking_margin_objective(raw_output, self.margin)
self.train_step = self.opt.minimize(self.loss)
unique_ent_indices = tf.unique(tf.concat(0, [self.head_input, self.tail_input]))[0]
self.post_step = self._norm_constraint_op(self.entity_embedding_vars,
unique_ent_indices,
self.maxnorm) | true | true |
f71fd969d3ac6dc91ff8442595549e245b3a9430 | 1,059 | py | Python | salvia/wallet/payment.py | Salvia-Network/salvia-blockchain | b0ce4b9f75c2fc354941b45eb468ffcf917ead30 | [
"Apache-2.0"
] | 6 | 2021-09-13T17:20:49.000Z | 2022-02-09T04:31:47.000Z | salvia/wallet/payment.py | Salvia-Network/salvia-blockchain | b0ce4b9f75c2fc354941b45eb468ffcf917ead30 | [
"Apache-2.0"
] | 21 | 2021-09-20T00:56:54.000Z | 2022-03-22T01:12:12.000Z | salvia/wallet/payment.py | Salvia-Network/salvia-blockchain | b0ce4b9f75c2fc354941b45eb468ffcf917ead30 | [
"Apache-2.0"
] | 9 | 2021-09-13T17:54:04.000Z | 2022-03-15T08:38:35.000Z | from dataclasses import dataclass
from typing import List
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.types.blockchain_format.program import Program
from salvia.util.ints import uint64
# This class is supposed to correspond to a CREATE_COIN condition
@dataclass(frozen=True)
class Payment:
puzzle_hash: bytes32
amount: uint64
memos: List[bytes]
def as_condition_args(self) -> List:
return [self.puzzle_hash, self.amount, self.memos]
def as_condition(self) -> Program:
return Program.to([51, *self.as_condition_args()])
def name(self) -> bytes32:
return self.as_condition().get_tree_hash()
@classmethod
def from_condition(cls, condition: Program) -> "Payment":
python_condition: List = condition.as_python()
puzzle_hash, amount = python_condition[1:3]
memos: List[bytes] = []
if len(python_condition) > 3:
memos = python_condition[3]
return cls(bytes32(puzzle_hash), uint64(int.from_bytes(amount, "big")), memos)
| 31.147059 | 86 | 0.705382 | from dataclasses import dataclass
from typing import List
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.types.blockchain_format.program import Program
from salvia.util.ints import uint64
@dataclass(frozen=True)
class Payment:
puzzle_hash: bytes32
amount: uint64
memos: List[bytes]
def as_condition_args(self) -> List:
return [self.puzzle_hash, self.amount, self.memos]
def as_condition(self) -> Program:
return Program.to([51, *self.as_condition_args()])
def name(self) -> bytes32:
return self.as_condition().get_tree_hash()
@classmethod
def from_condition(cls, condition: Program) -> "Payment":
python_condition: List = condition.as_python()
puzzle_hash, amount = python_condition[1:3]
memos: List[bytes] = []
if len(python_condition) > 3:
memos = python_condition[3]
return cls(bytes32(puzzle_hash), uint64(int.from_bytes(amount, "big")), memos)
| true | true |
f71fd999a54a3748a94533f9d632879d0495dbcd | 1,271 | py | Python | checklink/parse/__init__.py | zombie110year/find_dead_link | 565ec99c0fcbecaa4f7d82006bc9d58d0c05fa06 | [
"MIT"
] | null | null | null | checklink/parse/__init__.py | zombie110year/find_dead_link | 565ec99c0fcbecaa4f7d82006bc9d58d0c05fa06 | [
"MIT"
] | null | null | null | checklink/parse/__init__.py | zombie110year/find_dead_link | 565ec99c0fcbecaa4f7d82006bc9d58d0c05fa06 | [
"MIT"
] | null | null | null | """
Text Parsers to find url from content.
Every url item should contain:
- url
- location(`filepath:row:column`)
"""
from abc import abstractmethod
from typing import List
class Link:
def __init__(self, url: str, path: str, row: int, column: int):
"""init link object
:param str url: link's href
:param str path: where found this link, file path
:param int row: where found this link, line number
:param int column: where found this link, chars after line beginning
"""
self.__url = url
self.__path = path
self.__row = row
self.__column = column
@property
def url(self) -> str:
return self.__url
@property
def path(self) -> str:
return self.__path
@property
def row(self) -> int:
return self.__row
@property
def column(self) -> int:
return self.__column
@property
def location(self) -> str:
return f"{self.path}:{self.row}:{self.column}"
@path.setter
def path(self, other: str):
self.__path = other
class Parser:
@abstractmethod
def parse(self, text: str) -> List[Link]:
pass
@abstractmethod
def parse_file(self, path: str) -> List[Link]:
pass
| 21.183333 | 76 | 0.601888 | from abc import abstractmethod
from typing import List
class Link:
def __init__(self, url: str, path: str, row: int, column: int):
self.__url = url
self.__path = path
self.__row = row
self.__column = column
@property
def url(self) -> str:
return self.__url
@property
def path(self) -> str:
return self.__path
@property
def row(self) -> int:
return self.__row
@property
def column(self) -> int:
return self.__column
@property
def location(self) -> str:
return f"{self.path}:{self.row}:{self.column}"
@path.setter
def path(self, other: str):
self.__path = other
class Parser:
@abstractmethod
def parse(self, text: str) -> List[Link]:
pass
@abstractmethod
def parse_file(self, path: str) -> List[Link]:
pass
| true | true |
f71fd9b490090c7c03e0f828032d2a989edaca88 | 3,415 | py | Python | components/studio/projects/models.py | MuhammadNaumanAbid/stackn | 484501efda19f8f9c9c088bcf6095060c925d3b1 | [
"Apache-2.0"
] | null | null | null | components/studio/projects/models.py | MuhammadNaumanAbid/stackn | 484501efda19f8f9c9c088bcf6095060c925d3b1 | [
"Apache-2.0"
] | null | null | null | components/studio/projects/models.py | MuhammadNaumanAbid/stackn | 484501efda19f8f9c9c088bcf6095060c925d3b1 | [
"Apache-2.0"
] | null | null | null | import base64
from django.db import models
from django.contrib.auth.models import User
from django.utils.text import slugify
import string
import random
DEFAULT_ENVIRONMENT_ID = 1
class Flavor(models.Model):
name = models.CharField(max_length=512)
slug = models.CharField(max_length=512)
cpu = models.TextField(blank=True, null=True)
mem = models.TextField(blank=True, null=True)
gpu = models.TextField(blank=True, null=True)
updated_at = models.DateTimeField(auto_now=True)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return str(self.name)
class Environment(models.Model):
name = models.CharField(max_length=512)
slug = models.CharField(max_length=512, blank=True, null=True)
image = models.CharField(max_length=512)
dockerfile = models.TextField(default='FROM jupyter/base-notebook')
startup = models.TextField(null=True, blank=True)
teardown = models.TextField(null=True, blank=True)
updated_at = models.DateTimeField(auto_now=True)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return str(self.name)
class ProjectManager(models.Manager):
def generate_passkey(self, length=20):
import secrets
import string
alphabet = string.ascii_letters + string.digits
password = ''.join(secrets.choice(alphabet) for _ in range(length))
# Encrypt the key
password = password.encode('ascii')
base64_bytes = base64.b64encode(password)
password = base64_bytes.decode('ascii')
return password
def create_project(self, name, owner, description, repository):
letters = string.ascii_lowercase
slug = name.replace(" ","-").replace("_","-")
from .helpers import urlify
slug = urlify(slug)
slug_extension = ''.join(random.choice(letters) for i in range(3))
slug = '{}-{}'.format(slugify(slug), slug_extension)
key = self.generate_passkey()
secret = self.generate_passkey(40)
project = self.create(name=name, owner=owner, slug=slug, project_key=key, project_secret=secret,
description=description, repository=repository,
repository_imported=False)
return project
class Project(models.Model):
objects = ProjectManager()
name = models.CharField(max_length=512, unique=True)
description = models.TextField(null=True, blank=True)
slug = models.CharField(max_length=512, unique=True)
owner = models.ForeignKey(User, on_delete=models.DO_NOTHING, related_name='owner')
authorized = models.ManyToManyField(User, blank=True)
image = models.CharField(max_length=2048, blank=True, null=True)
project_key = models.CharField(max_length=512)
project_secret = models.CharField(max_length=512)
updated_at = models.DateTimeField(auto_now=True)
created_at = models.DateTimeField(auto_now_add=True)
repository = models.CharField(max_length=512, null=True, blank=True)
repository_imported = models.BooleanField(default=False)
def __str__(self):
return "Name: {} Description: {}".format(self.name, self.description)
environment = models.ForeignKey('projects.Environment', on_delete=models.DO_NOTHING, default=DEFAULT_ENVIRONMENT_ID)
clone_url = models.CharField(max_length=512, null=True, blank=True)
| 34.846939 | 120 | 0.703075 | import base64
from django.db import models
from django.contrib.auth.models import User
from django.utils.text import slugify
import string
import random
DEFAULT_ENVIRONMENT_ID = 1
class Flavor(models.Model):
name = models.CharField(max_length=512)
slug = models.CharField(max_length=512)
cpu = models.TextField(blank=True, null=True)
mem = models.TextField(blank=True, null=True)
gpu = models.TextField(blank=True, null=True)
updated_at = models.DateTimeField(auto_now=True)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return str(self.name)
class Environment(models.Model):
name = models.CharField(max_length=512)
slug = models.CharField(max_length=512, blank=True, null=True)
image = models.CharField(max_length=512)
dockerfile = models.TextField(default='FROM jupyter/base-notebook')
startup = models.TextField(null=True, blank=True)
teardown = models.TextField(null=True, blank=True)
updated_at = models.DateTimeField(auto_now=True)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return str(self.name)
class ProjectManager(models.Manager):
def generate_passkey(self, length=20):
import secrets
import string
alphabet = string.ascii_letters + string.digits
password = ''.join(secrets.choice(alphabet) for _ in range(length))
password = password.encode('ascii')
base64_bytes = base64.b64encode(password)
password = base64_bytes.decode('ascii')
return password
def create_project(self, name, owner, description, repository):
letters = string.ascii_lowercase
slug = name.replace(" ","-").replace("_","-")
from .helpers import urlify
slug = urlify(slug)
slug_extension = ''.join(random.choice(letters) for i in range(3))
slug = '{}-{}'.format(slugify(slug), slug_extension)
key = self.generate_passkey()
secret = self.generate_passkey(40)
project = self.create(name=name, owner=owner, slug=slug, project_key=key, project_secret=secret,
description=description, repository=repository,
repository_imported=False)
return project
class Project(models.Model):
objects = ProjectManager()
name = models.CharField(max_length=512, unique=True)
description = models.TextField(null=True, blank=True)
slug = models.CharField(max_length=512, unique=True)
owner = models.ForeignKey(User, on_delete=models.DO_NOTHING, related_name='owner')
authorized = models.ManyToManyField(User, blank=True)
image = models.CharField(max_length=2048, blank=True, null=True)
project_key = models.CharField(max_length=512)
project_secret = models.CharField(max_length=512)
updated_at = models.DateTimeField(auto_now=True)
created_at = models.DateTimeField(auto_now_add=True)
repository = models.CharField(max_length=512, null=True, blank=True)
repository_imported = models.BooleanField(default=False)
def __str__(self):
return "Name: {} Description: {}".format(self.name, self.description)
environment = models.ForeignKey('projects.Environment', on_delete=models.DO_NOTHING, default=DEFAULT_ENVIRONMENT_ID)
clone_url = models.CharField(max_length=512, null=True, blank=True)
| true | true |
f71fdbd179d815f56f9c409701685cd66a7005c3 | 23,154 | py | Python | yuu/ext/abematv.py | soltia48/yuu | 30d2fcf9427cbbea930d01baef337b64ad7fb05b | [
"BSD-3-Clause"
] | null | null | null | yuu/ext/abematv.py | soltia48/yuu | 30d2fcf9427cbbea930d01baef337b64ad7fb05b | [
"BSD-3-Clause"
] | null | null | null | yuu/ext/abematv.py | soltia48/yuu | 30d2fcf9427cbbea930d01baef337b64ad7fb05b | [
"BSD-3-Clause"
] | null | null | null | import hashlib
import hmac
import json
import logging
import os
import re
import struct
import tempfile
import time
import uuid
from base64 import urlsafe_b64encode
from binascii import unhexlify
import m3u8
from Crypto.Cipher import AES
from tqdm import tqdm
def is_channel(url):
url = re.findall('(slot)', url)
if url:
return True
return False
yuu_log = logging.getLogger('yuu.abematv')
class AbemaTVDownloader:
def __init__(self, url, session):
self.key = None
self.iv = None
self.url = url
self.session = session
self.merge = True
if os.name == "nt":
self.yuu_folder = os.path.join(os.getenv('LOCALAPPDATA'), 'yuu_data')
sffx = '\\'
else:
self.yuu_folder = os.path.join(os.getenv('HOME'), '.yuu_data')
sffx = '/'
if not os.path.isdir(self.yuu_folder):
os.mkdir(self.yuu_folder)
self.temporary_folder = tempfile.mkdtemp(dir=self.yuu_folder)
self.temporary_folder = self.temporary_folder + sffx
self._aes = None
def setup_decryptor(self):
self.iv = unhexlify(self.iv)
self._aes = AES.new(self.key, AES.MODE_CBC, IV=self.iv)
def download_chunk(self, files, key, iv):
if iv.startswith('0x'):
self.iv = iv[2:]
else:
self.iv = iv
self.key = key
self.downloaded_files = []
self.setup_decryptor() # Initialize a new decryptor
try:
with tqdm(total=len(files), desc='Downloading', ascii=True, unit='file') as pbar:
for tsf in files:
outputtemp = self.temporary_folder + os.path.basename(tsf)
if outputtemp.find('?tver') != -1:
outputtemp = outputtemp[:outputtemp.find('?tver')]
with open(outputtemp, 'wb') as outf:
try:
vid = self.session.get(tsf)
vid = self._aes.decrypt(vid.content)
outf.write(vid)
except Exception as err:
yuu_log.error('Problem occured\nreason: {}'.format(err))
return None
pbar.update()
self.downloaded_files.append(outputtemp)
except KeyboardInterrupt:
yuu_log.warn('User pressed CTRL+C, cleaning up...')
return None
return self.downloaded_files
class AbemaTV:
def __init__(self, url, session):
self.session = session
self.type = 'AbemaTV'
self.yuu_logger = logging.getLogger('yuu.abematv.AbemaTV')
self.url = url
self.m3u8_url = None
self.resolution = None
self.resolution_o = None
self.device_id = None
self.is_m3u8 = False
self.est_filesize = None # In MiB
self.resolution_data = {
"1080p": ["4000kb/s", "AAC 192kb/s 2ch"],
"720p": ["2000kb/s", "AAC 160kb/s 2ch"],
"480p": ["900kb/s", "AAC 128kb/s 2ch"],
"360p": ["550kb/s", "AAC 128kb/s 2ch"],
"240p": ["240kb/s", "AAC 64kb/s 1ch"],
"180p": ["120kb/s", "AAC 64kb/s 1ch"]
}
self.bitrate_calculation = {
"1080p": 5175,
"720p": 2373,
"480p": 1367,
"360p": 878,
"240p": 292,
"180p": 179
}
self.authorization_required = False
self.authorized = False # Ignore for now
#self.authorize = True # Ignore for now
self.resumable = True
self._STRTABLE = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
self._HKEY = b"3AF0298C219469522A313570E8583005A642E73EDD58E3EA2FB7339D3DF1597E"
self._KEYPARAMS = {
"osName": "android",
"osVersion": "6.0.1",
"osLand": "ja_JP",
"osTimezone": "Asia/Tokyo",
"appId": "tv.abema",
"appVersion": "3.27.1"
}
self._MEDIATOKEN_API = "https://api.abema.io/v1/media/token"
self._LICENSE_API = "https://license.abema.io/abematv-hls"
self._USERAPI = "https://api.abema.io/v1/users"
self._PROGRAMAPI = 'https://api.abema.io/v1/video/programs/'
self._CHANNELAPI = 'https://api.abema.io/v1/media/slots/'
self._SERIESAPI = "https://api.abema.io/v1/video/series/"
# Use Chrome UA
self.session.headers.update({'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'})
def __repr__(self):
return '<yuu.AbemaTV: URL={}, Resolution={}, Device ID={}, m3u8 URL={}>'.format(self.url, self.resolution, self.device_id, self.m3u8_url)
def get_downloader(self):
"""
Return a :class: of the Downloader
"""
return AbemaTVDownloader(self.url, self.session)
def resume_prepare(self):
"""
Add support for resuming files, this function will prepare everything to start resuming download.
"""
return None
def authorize(self, username, password):
if not self.device_id:
self.yuu_logger.info('{}: Fetching temporary token'.format(self.type))
res, reas = self.get_token() # Abema needs authorization header before authenticating
if not res:
return res, reas
_ENDPOINT_MAIL = 'https://api.abema.io/v1/auth/user/email'
_ENDPOINT_OTP = 'https://api.abema.io/v1/auth/oneTimePassword'
mail_regex = r'^\w+([\.-]?\w+)*@\w+([\.-]?\w+)*(\.\w{2,3})+$'
if re.search(mail_regex, username):
_ENDPOINT_USE = _ENDPOINT_MAIL
_USERNAME_METHOD = 'email'
else:
_ENDPOINT_USE = _ENDPOINT_OTP
_USERNAME_METHOD = 'userId'
auth_ = {
_USERNAME_METHOD: username,
"password": password
}
res = self.session.post(_ENDPOINT_USE, json=auth_)
if res.status_code > 299:
res_j = res.json()
self.yuu_logger.debug('Abema Response: {}'.format(res_j['message']))
return False, 'Wrong {} and password combination'.format(_USERNAME_METHOD)
res_j = res.json()
self.yuu_logger.debug('Authentication Token: {}'.format(res_j['token']))
self.session.headers.update({'Authorization': 'bearer ' + res_j['token']})
self.authorized = True
return True, 'Authorized'
def get_token(self):
def key_secret(devid):
SECRETKEY = (b"v+Gjs=25Aw5erR!J8ZuvRrCx*rGswhB&qdHd_SYerEWdU&a?3DzN9B"
b"Rbp5KwY4hEmcj5#fykMjJ=AuWz5GSMY-d@H7DMEh3M@9n2G552Us$$"
b"k9cD=3TxwWe86!x#Zyhe")
deviceid = devid.encode("utf-8")
ts_1hour = (int(time.time()) + 60 * 60) // 3600 * 3600
time_struct = time.gmtime(ts_1hour)
ts_1hour_str = str(ts_1hour).encode("utf-8")
h = hmac.new(SECRETKEY, digestmod=hashlib.sha256)
h.update(SECRETKEY)
tmp = h.digest()
for _ in range(time_struct.tm_mon):
h = hmac.new(SECRETKEY, digestmod=hashlib.sha256)
h.update(tmp)
tmp = h.digest()
h = hmac.new(SECRETKEY, digestmod=hashlib.sha256)
h.update(urlsafe_b64encode(tmp).rstrip(b"=") + deviceid)
tmp = h.digest()
for _ in range(time_struct.tm_mday % 5):
h = hmac.new(SECRETKEY, digestmod=hashlib.sha256)
h.update(tmp)
tmp = h.digest()
h = hmac.new(SECRETKEY, digestmod=hashlib.sha256)
h.update(urlsafe_b64encode(tmp).rstrip(b"=") + ts_1hour_str)
tmp = h.digest()
for _ in range(time_struct.tm_hour % 5): # utc hour
h = hmac.new(SECRETKEY, digestmod=hashlib.sha256)
h.update(tmp)
tmp = h.digest()
finalize = urlsafe_b64encode(tmp).rstrip(b"=").decode("utf-8")
self.yuu_logger.debug('Secret Key: {}'.format(finalize))
return finalize
if self.authorized: # Ignore this if already login
return True, 'Success'
deviceid = str(uuid.uuid4())
self.yuu_logger.debug('Generated Device UUID: {}'.format(deviceid))
json_data = {"deviceId": deviceid, "applicationKeySecret": key_secret(deviceid)}
self.yuu_logger.debug('Generated applicationKeySecret: {}'.format(json_data['applicationKeySecret']))
self.yuu_logger.debug('Sending json data')
res = self.session.post(self._USERAPI, json=json_data).json()
try:
self.yuu_logger.debug('Data sent, getting token')
token = res['token']
self.yuu_logger.debug('User token: {}'.format(token))
except:
return None, 'Failed to get user token.'
self.device_id = deviceid
self.session.headers.update({'Authorization': 'bearer ' + token})
return 'Success', 'Success'
def parse(self, resolution=None, check_only=False):
"""
Function to parse abema url
"""
res_list = [
'180p', '240p', '360p', '480p', '720p', '1080p', 'best', 'worst'
]
if resolution not in res_list:
if not check_only:
return None, 'Unknown resolution: {}. (Check it with `-R`)'.format(resolution)
if resolution == 'best':
resolution = '1080p'
self.resolution_o = 'best'
if resolution == 'worst':
resolution = '180p'
# https://abema.tv/video/title/26-55 (series/playlists)
# https://api.abema.io/v1/video/series/26-55
# https://api.abema.io/v1/video/series/26-55/programs?seriesVersion=1577436473958778090&seasonId=26-55_s1&offset=0&order=seq&limit=40
series = re.search(r"(?P<series>title)/(?P<video_id>.*[^-_])", self.url)
if series:
video_id = series.group(2)
self.yuu_logger.info('Series url format detected, fetching all links...')
self.yuu_logger.debug('Requesting data to Abema API.')
req = self.session.get(self._SERIESAPI + video_id)
if req.status_code != 200:
self.yuu_logger.log(40, 'Abema Response: ' + req.text)
return None, 'Error occured when communicating with Abema (Response: {})'.format(req.status_code)
self.yuu_logger.debug('Data requested')
self.yuu_logger.debug('Parsing json results...')
m3u8_url_list = []
output_list = []
jsdata = req.json()
to_be_requested = "{api}{vid}/programs?seriesVersion={sv}&seasonId={si}&offset=0&order={od}"
season_data = jsdata['seasons']
if not season_data:
season_data = [{'id': ''}] # Assume film or some shit
version = jsdata['version']
prog_order = jsdata['programOrder']
for ns, season in enumerate(season_data, 1):
self.yuu_logger.info('Processing season ' + str(ns))
self.yuu_logger.debug('Requesting data to Abema API.')
req_season = self.session.get(to_be_requested.format(api=self._SERIESAPI, vid=video_id, sv=version, si=season['id'], od=prog_order))
if req_season.status_code != 200:
self.yuu_logger.log(40, 'Abema Response: ' + req_season.text)
return None, 'Error occured when communicating with Abema (Response: {})'.format(req_season.status_code)
self.yuu_logger.debug('Data requested')
self.yuu_logger.debug('Parsing json results...')
season_jsdata = req_season.json()
self.yuu_logger.debug('Processing total of {ep} episode for season {se}'.format(ep=len(season_jsdata['programs']), se=ns))
for nep, episode in enumerate(season_jsdata['programs'], 1):
free_episode = False
if 'label' in episode:
if 'free' in episode['label']:
free_episode = True
elif 'freeEndAt' in episode:
free_episode = True
if 'episode' in episode:
try:
episode_name = episode['episode']['title']
if not episode_name:
episode_name = episode_name['title']['number']
except KeyError:
episode_name = episode_name['title']['number']
else:
episode_name = nep
if not free_episode and not self.authorized:
self.yuu_logger.warn('Skipping episode {} (Not authorized and premium video)'.format(episode_name))
continue
self.yuu_logger.info('Processing episode {}'.format(episode_name))
req_ep = self.session.get(self._PROGRAMAPI + episode['id'])
if req_ep.status_code != 200:
self.yuu_logger.log(40, 'Abema Response: ' + req_ep.text)
return None, 'Error occured when communicating with Abema (Response: {})'.format(req_ep.status_code)
self.yuu_logger.debug('Data requested')
self.yuu_logger.debug('Parsing json API')
ep_json = req_ep.json()
title = ep_json['series']['title']
epnum = ep_json['episode']['title']
hls = ep_json['playback']['hls']
output_name = title + ' - ' + epnum
m3u8_url = '{x}/{r}/playlist.m3u8'.format(x=hls[:hls.rfind('/')], r=resolution[:-1])
self.yuu_logger.debug('M3U8 Link: {}'.format(m3u8_url))
self.yuu_logger.debug('Video title: {}'.format(title))
m3u8_url_list.append(m3u8_url)
output_list.append(output_name)
self.resolution = resolution
self.m3u8_url = m3u8_url_list
if not output_list:
err_msg = "All video are for premium only, please provide login details."
else:
err_msg = "Success"
return output_list, err_msg
if '.m3u8' in self.url[-5:]:
reg = re.compile(r'(program|slot)\/[\w+-]+')
self.url = re.search(reg, m3u8)[0]
self.is_m3u8 = True
ep_link = self.url[self.url.rfind('/')+1:]
self.yuu_logger.debug('Requesting data to Abema API')
if is_channel(self.url):
req = self.session.get(self._CHANNELAPI + ep_link)
if req.status_code != 200:
self.yuu_logger.log(40, 'Abema Response: ' + req.text)
return None, 'Error occured when communicating with Abema (Response: {})'.format(req.status_code)
self.yuu_logger.debug('Data requested')
self.yuu_logger.debug('Parsing json API')
jsdata = req.json()
output_name = jsdata['slot']['title']
if 'playback' in jsdata['slot']:
hls = jsdata['slot']['playback']['hls']
else:
hls = jsdata['slot']['chasePlayback']['hls'] # Compat
m3u8_url = '{x}/{r}/playlist.m3u8'.format(x=hls[:hls.rfind('/')], r=resolution[:-1])
if self.is_m3u8:
m3u8_url = self.url
self.yuu_logger.debug('M3U8 Link: {}'.format(m3u8_url))
self.yuu_logger.debug('Title: {}'.format(output_name))
else:
req = self.session.get(self._PROGRAMAPI + ep_link)
if req.status_code != 200:
self.yuu_logger.log(40, 'Abema Response: ' + req.text)
return None, 'Error occured when communicating with Abema (Response: {})'.format(req.status_code)
self.yuu_logger.debug('Data requested')
self.yuu_logger.debug('Parsing json API')
jsdata = req.json()
if jsdata['mediaStatus']:
if 'drm' in jsdata['mediaStatus']:
if jsdata['mediaStatus']['drm']:
return None, 'This video has a different DRM method and cannot be decrypted by yuu for now'
title = jsdata['series']['title']
epnum = jsdata['episode']['title']
hls = jsdata['playback']['hls']
output_name = title + ' - ' + epnum
m3u8_url = '{x}/{r}/playlist.m3u8'.format(x=hls[:hls.rfind('/')], r=resolution[:-1])
if self.is_m3u8:
m3u8_url = self.url
self.yuu_logger.debug('M3U8 Link: {}'.format(m3u8_url))
self.yuu_logger.debug('Video title: {}'.format(title))
self.yuu_logger.debug('Episode number: {}'.format(epnum))
self.resolution = resolution
self.m3u8_url = m3u8_url
return output_name, 'Success'
def parse_m3u8(self, m3u8_url):
self.yuu_logger.debug('Requesting m3u8')
r = self.session.get(m3u8_url)
self.yuu_logger.debug('Data requested')
if 'timeshift forbidden' in r.text:
return None, None, None, 'This video can\'t be downloaded for now.'
if r.status_code == 403:
return None, None, None, 'This video is geo-locked for Japan only.'
self.yuu_logger.debug('Parsing m3u8')
x = m3u8.loads(r.text)
files = x.files[1:]
if not files[0]:
files = files[1:]
if 'tsda' in files[5]:
# Assume DRMed
return None, None, None, 'This video has a different DRM method and cannot be decrypted by yuu for now'
resgex = re.findall(r'(\d*)(?:\/\w+.ts)', files[0])[0]
keys_data = x.keys[0]
iv = x.keys[0].iv
ticket = x.keys[0].uri[18:]
parsed_files = []
for f in files:
if f.startswith('/tsvpg') or f.startswith('/tspg'):
f = 'https://ds-vod-abematv.akamaized.net' + f
parsed_files.append(f)
if self.resolution[:-1] != resgex:
if not self.resolution_o:
self.yuu_logger.warn('Changing resolution, from {} to {}p'.format(self.resolution, resgex))
self.resolution = resgex + 'p'
self.yuu_logger.debug('Total files: {}'.format(len(files)))
self.yuu_logger.debug('IV: {}'.format(iv))
self.yuu_logger.debug('Ticket key: {}'.format(ticket))
n = 0.0
for seg in x.segments:
n += seg.duration
self.est_filesize = round((round(n) * self.bitrate_calculation[self.resolution]) / 1024 / 6, 2)
return parsed_files, iv[2:], ticket, 'Success'
def get_video_key(self, ticket):
self.yuu_logger.debug('Sending parameter to API')
restoken = self.session.get(self._MEDIATOKEN_API, params=self._KEYPARAMS).json()
mediatoken = restoken['token']
self.yuu_logger.debug('Media token: {}'.format(mediatoken))
self.yuu_logger.debug('Sending ticket and media token to License API')
rgl = self.session.post(self._LICENSE_API, params={"t": mediatoken}, json={"kv": "a", "lt": ticket})
if rgl.status_code == 403:
return None, 'Access to this video are not allowed\nProbably a premium video or geo-locked.'
gl = rgl.json()
cid = gl['cid']
k = gl['k']
self.yuu_logger.debug('CID: {}'.format(cid))
self.yuu_logger.debug('K: {}'.format(k))
self.yuu_logger.debug('Summing up data with STRTABLE')
res = sum([self._STRTABLE.find(k[i]) * (58 ** (len(k) - 1 - i)) for i in range(len(k))])
self.yuu_logger.debug('Result: {}'.format(res))
self.yuu_logger.debug('Intepreting data')
encvk = struct.pack('>QQ', res >> 64, res & 0xffffffffffffffff)
self.yuu_logger.debug('Encoded video key: {}'.format(encvk))
self.yuu_logger.debug('Hashing data')
h = hmac.new(unhexlify(self._HKEY), (cid + self.device_id).encode("utf-8"), digestmod=hashlib.sha256)
enckey = h.digest()
self.yuu_logger.debug('Second Encoded video key: {}'.format(enckey))
self.yuu_logger.debug('Decrypting result')
aes = AES.new(enckey, AES.MODE_ECB)
vkey = aes.decrypt(encvk)
self.yuu_logger.debug('Decrypted, Result: {}'.format(vkey))
return vkey, 'Success getting video key'
def resolutions(self, m3u8_uri):
self.yuu_logger.debug('Requesting data to API')
m3u8_ = m3u8_uri[:m3u8_uri.rfind('/')]
base_url = m3u8_[:m3u8_.rfind('/')] + '/'
m3u8_1080 = m3u8_[:m3u8_.rfind('/')] + '/1080/playlist.m3u8'
m3u8_720 = m3u8_[:m3u8_.rfind('/')] + '/720/playlist.m3u8'
m3u8_480 = m3u8_[:m3u8_.rfind('/')] + '/480/playlist.m3u8'
m3u8_360 = m3u8_[:m3u8_.rfind('/')] + '/360/playlist.m3u8'
m3u8_240 = m3u8_[:m3u8_.rfind('/')] + '/240/playlist.m3u8'
m3u8_180 = m3u8_[:m3u8_.rfind('/')] + '/180/playlist.m3u8'
rr_all = self.session.get(base_url + 'playlist.m3u8')
if 'timeshift forbidden' in rr_all.text:
return None, 'This video can\'t be downloaded for now.'
r_all = m3u8.loads(rr_all.text)
play_res = []
for r_p in r_all.playlists:
temp = []
temp.append(r_p.stream_info.resolution)
temp.append(base_url + r_p.uri)
play_res.append(temp)
resgex = re.compile(r'(\d*)(?:\/\w+.ts)')
ava_reso = []
for resdata in play_res:
reswh, m3u8_uri = resdata
resw, resh = reswh
self.yuu_logger.debug('Validating {}p resolution'.format(resh))
rres = m3u8.loads(self.session.get(m3u8_uri).text)
m3f = rres.files[1:]
if not m3f:
return None, 'This video can\'t be downloaded for now.'
self.yuu_logger.debug('Sample link: ' + m3f[5])
if 'tsda' in files[5]:
# Assume DRMed
return None, 'This video has a different DRM method and cannot be decrypted by yuu for now'
if str(resh) in re.findall(resgex, m3f[5]):
ava_reso.append(
[
'{h}p'.format(h=resh),
'{w}x{h}'.format(w=resw, h=resh)
]
)
if ava_reso:
reso = [r[0] for r in ava_reso]
self.yuu_logger.debug('Resolution list: {}'.format(', '.join(reso)))
return ava_reso, 'Success'
def check_output(self, output=None, output_name=None):
if output:
fn_, ext_ = os.path.splitext(output)
if ext_ != 'ts':
output = fn_ + '.ts'
else:
output = '{x} ({m} {r}).ts'.format(x=output_name, m=self.type, r=self.resolution)
return output
| 38.914286 | 170 | 0.556319 | import hashlib
import hmac
import json
import logging
import os
import re
import struct
import tempfile
import time
import uuid
from base64 import urlsafe_b64encode
from binascii import unhexlify
import m3u8
from Crypto.Cipher import AES
from tqdm import tqdm
def is_channel(url):
url = re.findall('(slot)', url)
if url:
return True
return False
yuu_log = logging.getLogger('yuu.abematv')
class AbemaTVDownloader:
def __init__(self, url, session):
self.key = None
self.iv = None
self.url = url
self.session = session
self.merge = True
if os.name == "nt":
self.yuu_folder = os.path.join(os.getenv('LOCALAPPDATA'), 'yuu_data')
sffx = '\\'
else:
self.yuu_folder = os.path.join(os.getenv('HOME'), '.yuu_data')
sffx = '/'
if not os.path.isdir(self.yuu_folder):
os.mkdir(self.yuu_folder)
self.temporary_folder = tempfile.mkdtemp(dir=self.yuu_folder)
self.temporary_folder = self.temporary_folder + sffx
self._aes = None
def setup_decryptor(self):
self.iv = unhexlify(self.iv)
self._aes = AES.new(self.key, AES.MODE_CBC, IV=self.iv)
def download_chunk(self, files, key, iv):
if iv.startswith('0x'):
self.iv = iv[2:]
else:
self.iv = iv
self.key = key
self.downloaded_files = []
self.setup_decryptor()
try:
with tqdm(total=len(files), desc='Downloading', ascii=True, unit='file') as pbar:
for tsf in files:
outputtemp = self.temporary_folder + os.path.basename(tsf)
if outputtemp.find('?tver') != -1:
outputtemp = outputtemp[:outputtemp.find('?tver')]
with open(outputtemp, 'wb') as outf:
try:
vid = self.session.get(tsf)
vid = self._aes.decrypt(vid.content)
outf.write(vid)
except Exception as err:
yuu_log.error('Problem occured\nreason: {}'.format(err))
return None
pbar.update()
self.downloaded_files.append(outputtemp)
except KeyboardInterrupt:
yuu_log.warn('User pressed CTRL+C, cleaning up...')
return None
return self.downloaded_files
class AbemaTV:
def __init__(self, url, session):
self.session = session
self.type = 'AbemaTV'
self.yuu_logger = logging.getLogger('yuu.abematv.AbemaTV')
self.url = url
self.m3u8_url = None
self.resolution = None
self.resolution_o = None
self.device_id = None
self.is_m3u8 = False
self.est_filesize = None
self.resolution_data = {
"1080p": ["4000kb/s", "AAC 192kb/s 2ch"],
"720p": ["2000kb/s", "AAC 160kb/s 2ch"],
"480p": ["900kb/s", "AAC 128kb/s 2ch"],
"360p": ["550kb/s", "AAC 128kb/s 2ch"],
"240p": ["240kb/s", "AAC 64kb/s 1ch"],
"180p": ["120kb/s", "AAC 64kb/s 1ch"]
}
self.bitrate_calculation = {
"1080p": 5175,
"720p": 2373,
"480p": 1367,
"360p": 878,
"240p": 292,
"180p": 179
}
self.authorization_required = False
self.authorized = False
esumable = True
self._STRTABLE = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
self._HKEY = b"3AF0298C219469522A313570E8583005A642E73EDD58E3EA2FB7339D3DF1597E"
self._KEYPARAMS = {
"osName": "android",
"osVersion": "6.0.1",
"osLand": "ja_JP",
"osTimezone": "Asia/Tokyo",
"appId": "tv.abema",
"appVersion": "3.27.1"
}
self._MEDIATOKEN_API = "https://api.abema.io/v1/media/token"
self._LICENSE_API = "https://license.abema.io/abematv-hls"
self._USERAPI = "https://api.abema.io/v1/users"
self._PROGRAMAPI = 'https://api.abema.io/v1/video/programs/'
self._CHANNELAPI = 'https://api.abema.io/v1/media/slots/'
self._SERIESAPI = "https://api.abema.io/v1/video/series/"
self.session.headers.update({'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'})
def __repr__(self):
return '<yuu.AbemaTV: URL={}, Resolution={}, Device ID={}, m3u8 URL={}>'.format(self.url, self.resolution, self.device_id, self.m3u8_url)
def get_downloader(self):
return AbemaTVDownloader(self.url, self.session)
def resume_prepare(self):
return None
def authorize(self, username, password):
if not self.device_id:
self.yuu_logger.info('{}: Fetching temporary token'.format(self.type))
res, reas = self.get_token()
if not res:
return res, reas
_ENDPOINT_MAIL = 'https://api.abema.io/v1/auth/user/email'
_ENDPOINT_OTP = 'https://api.abema.io/v1/auth/oneTimePassword'
mail_regex = r'^\w+([\.-]?\w+)*@\w+([\.-]?\w+)*(\.\w{2,3})+$'
if re.search(mail_regex, username):
_ENDPOINT_USE = _ENDPOINT_MAIL
_USERNAME_METHOD = 'email'
else:
_ENDPOINT_USE = _ENDPOINT_OTP
_USERNAME_METHOD = 'userId'
auth_ = {
_USERNAME_METHOD: username,
"password": password
}
res = self.session.post(_ENDPOINT_USE, json=auth_)
if res.status_code > 299:
res_j = res.json()
self.yuu_logger.debug('Abema Response: {}'.format(res_j['message']))
return False, 'Wrong {} and password combination'.format(_USERNAME_METHOD)
res_j = res.json()
self.yuu_logger.debug('Authentication Token: {}'.format(res_j['token']))
self.session.headers.update({'Authorization': 'bearer ' + res_j['token']})
self.authorized = True
return True, 'Authorized'
def get_token(self):
def key_secret(devid):
SECRETKEY = (b"v+Gjs=25Aw5erR!J8ZuvRrCx*rGswhB&qdHd_SYerEWdU&a?3DzN9B"
b"Rbp5KwY4hEmcj5#fykMjJ=AuWz5GSMY-d@H7DMEh3M@9n2G552Us$$"
b"k9cD=3TxwWe86!x#Zyhe")
deviceid = devid.encode("utf-8")
ts_1hour = (int(time.time()) + 60 * 60) // 3600 * 3600
time_struct = time.gmtime(ts_1hour)
ts_1hour_str = str(ts_1hour).encode("utf-8")
h = hmac.new(SECRETKEY, digestmod=hashlib.sha256)
h.update(SECRETKEY)
tmp = h.digest()
for _ in range(time_struct.tm_mon):
h = hmac.new(SECRETKEY, digestmod=hashlib.sha256)
h.update(tmp)
tmp = h.digest()
h = hmac.new(SECRETKEY, digestmod=hashlib.sha256)
h.update(urlsafe_b64encode(tmp).rstrip(b"=") + deviceid)
tmp = h.digest()
for _ in range(time_struct.tm_mday % 5):
h = hmac.new(SECRETKEY, digestmod=hashlib.sha256)
h.update(tmp)
tmp = h.digest()
h = hmac.new(SECRETKEY, digestmod=hashlib.sha256)
h.update(urlsafe_b64encode(tmp).rstrip(b"=") + ts_1hour_str)
tmp = h.digest()
for _ in range(time_struct.tm_hour % 5):
h = hmac.new(SECRETKEY, digestmod=hashlib.sha256)
h.update(tmp)
tmp = h.digest()
finalize = urlsafe_b64encode(tmp).rstrip(b"=").decode("utf-8")
self.yuu_logger.debug('Secret Key: {}'.format(finalize))
return finalize
if self.authorized:
return True, 'Success'
deviceid = str(uuid.uuid4())
self.yuu_logger.debug('Generated Device UUID: {}'.format(deviceid))
json_data = {"deviceId": deviceid, "applicationKeySecret": key_secret(deviceid)}
self.yuu_logger.debug('Generated applicationKeySecret: {}'.format(json_data['applicationKeySecret']))
self.yuu_logger.debug('Sending json data')
res = self.session.post(self._USERAPI, json=json_data).json()
try:
self.yuu_logger.debug('Data sent, getting token')
token = res['token']
self.yuu_logger.debug('User token: {}'.format(token))
except:
return None, 'Failed to get user token.'
self.device_id = deviceid
self.session.headers.update({'Authorization': 'bearer ' + token})
return 'Success', 'Success'
def parse(self, resolution=None, check_only=False):
res_list = [
'180p', '240p', '360p', '480p', '720p', '1080p', 'best', 'worst'
]
if resolution not in res_list:
if not check_only:
return None, 'Unknown resolution: {}. (Check it with `-R`)'.format(resolution)
if resolution == 'best':
resolution = '1080p'
self.resolution_o = 'best'
if resolution == 'worst':
resolution = '180p'
series = re.search(r"(?P<series>title)/(?P<video_id>.*[^-_])", self.url)
if series:
video_id = series.group(2)
self.yuu_logger.info('Series url format detected, fetching all links...')
self.yuu_logger.debug('Requesting data to Abema API.')
req = self.session.get(self._SERIESAPI + video_id)
if req.status_code != 200:
self.yuu_logger.log(40, 'Abema Response: ' + req.text)
return None, 'Error occured when communicating with Abema (Response: {})'.format(req.status_code)
self.yuu_logger.debug('Data requested')
self.yuu_logger.debug('Parsing json results...')
m3u8_url_list = []
output_list = []
jsdata = req.json()
to_be_requested = "{api}{vid}/programs?seriesVersion={sv}&seasonId={si}&offset=0&order={od}"
season_data = jsdata['seasons']
if not season_data:
season_data = [{'id': ''}]
version = jsdata['version']
prog_order = jsdata['programOrder']
for ns, season in enumerate(season_data, 1):
self.yuu_logger.info('Processing season ' + str(ns))
self.yuu_logger.debug('Requesting data to Abema API.')
req_season = self.session.get(to_be_requested.format(api=self._SERIESAPI, vid=video_id, sv=version, si=season['id'], od=prog_order))
if req_season.status_code != 200:
self.yuu_logger.log(40, 'Abema Response: ' + req_season.text)
return None, 'Error occured when communicating with Abema (Response: {})'.format(req_season.status_code)
self.yuu_logger.debug('Data requested')
self.yuu_logger.debug('Parsing json results...')
season_jsdata = req_season.json()
self.yuu_logger.debug('Processing total of {ep} episode for season {se}'.format(ep=len(season_jsdata['programs']), se=ns))
for nep, episode in enumerate(season_jsdata['programs'], 1):
free_episode = False
if 'label' in episode:
if 'free' in episode['label']:
free_episode = True
elif 'freeEndAt' in episode:
free_episode = True
if 'episode' in episode:
try:
episode_name = episode['episode']['title']
if not episode_name:
episode_name = episode_name['title']['number']
except KeyError:
episode_name = episode_name['title']['number']
else:
episode_name = nep
if not free_episode and not self.authorized:
self.yuu_logger.warn('Skipping episode {} (Not authorized and premium video)'.format(episode_name))
continue
self.yuu_logger.info('Processing episode {}'.format(episode_name))
req_ep = self.session.get(self._PROGRAMAPI + episode['id'])
if req_ep.status_code != 200:
self.yuu_logger.log(40, 'Abema Response: ' + req_ep.text)
return None, 'Error occured when communicating with Abema (Response: {})'.format(req_ep.status_code)
self.yuu_logger.debug('Data requested')
self.yuu_logger.debug('Parsing json API')
ep_json = req_ep.json()
title = ep_json['series']['title']
epnum = ep_json['episode']['title']
hls = ep_json['playback']['hls']
output_name = title + ' - ' + epnum
m3u8_url = '{x}/{r}/playlist.m3u8'.format(x=hls[:hls.rfind('/')], r=resolution[:-1])
self.yuu_logger.debug('M3U8 Link: {}'.format(m3u8_url))
self.yuu_logger.debug('Video title: {}'.format(title))
m3u8_url_list.append(m3u8_url)
output_list.append(output_name)
self.resolution = resolution
self.m3u8_url = m3u8_url_list
if not output_list:
err_msg = "All video are for premium only, please provide login details."
else:
err_msg = "Success"
return output_list, err_msg
if '.m3u8' in self.url[-5:]:
reg = re.compile(r'(program|slot)\/[\w+-]+')
self.url = re.search(reg, m3u8)[0]
self.is_m3u8 = True
ep_link = self.url[self.url.rfind('/')+1:]
self.yuu_logger.debug('Requesting data to Abema API')
if is_channel(self.url):
req = self.session.get(self._CHANNELAPI + ep_link)
if req.status_code != 200:
self.yuu_logger.log(40, 'Abema Response: ' + req.text)
return None, 'Error occured when communicating with Abema (Response: {})'.format(req.status_code)
self.yuu_logger.debug('Data requested')
self.yuu_logger.debug('Parsing json API')
jsdata = req.json()
output_name = jsdata['slot']['title']
if 'playback' in jsdata['slot']:
hls = jsdata['slot']['playback']['hls']
else:
hls = jsdata['slot']['chasePlayback']['hls']
m3u8_url = '{x}/{r}/playlist.m3u8'.format(x=hls[:hls.rfind('/')], r=resolution[:-1])
if self.is_m3u8:
m3u8_url = self.url
self.yuu_logger.debug('M3U8 Link: {}'.format(m3u8_url))
self.yuu_logger.debug('Title: {}'.format(output_name))
else:
req = self.session.get(self._PROGRAMAPI + ep_link)
if req.status_code != 200:
self.yuu_logger.log(40, 'Abema Response: ' + req.text)
return None, 'Error occured when communicating with Abema (Response: {})'.format(req.status_code)
self.yuu_logger.debug('Data requested')
self.yuu_logger.debug('Parsing json API')
jsdata = req.json()
if jsdata['mediaStatus']:
if 'drm' in jsdata['mediaStatus']:
if jsdata['mediaStatus']['drm']:
return None, 'This video has a different DRM method and cannot be decrypted by yuu for now'
title = jsdata['series']['title']
epnum = jsdata['episode']['title']
hls = jsdata['playback']['hls']
output_name = title + ' - ' + epnum
m3u8_url = '{x}/{r}/playlist.m3u8'.format(x=hls[:hls.rfind('/')], r=resolution[:-1])
if self.is_m3u8:
m3u8_url = self.url
self.yuu_logger.debug('M3U8 Link: {}'.format(m3u8_url))
self.yuu_logger.debug('Video title: {}'.format(title))
self.yuu_logger.debug('Episode number: {}'.format(epnum))
self.resolution = resolution
self.m3u8_url = m3u8_url
return output_name, 'Success'
def parse_m3u8(self, m3u8_url):
self.yuu_logger.debug('Requesting m3u8')
r = self.session.get(m3u8_url)
self.yuu_logger.debug('Data requested')
if 'timeshift forbidden' in r.text:
return None, None, None, 'This video can\'t be downloaded for now.'
if r.status_code == 403:
return None, None, None, 'This video is geo-locked for Japan only.'
self.yuu_logger.debug('Parsing m3u8')
x = m3u8.loads(r.text)
files = x.files[1:]
if not files[0]:
files = files[1:]
if 'tsda' in files[5]:
# Assume DRMed
return None, None, None, 'This video has a different DRM method and cannot be decrypted by yuu for now'
resgex = re.findall(r'(\d*)(?:\/\w+.ts)', files[0])[0]
keys_data = x.keys[0]
iv = x.keys[0].iv
ticket = x.keys[0].uri[18:]
parsed_files = []
for f in files:
if f.startswith('/tsvpg') or f.startswith('/tspg'):
f = 'https://ds-vod-abematv.akamaized.net' + f
parsed_files.append(f)
if self.resolution[:-1] != resgex:
if not self.resolution_o:
self.yuu_logger.warn('Changing resolution, from {} to {}p'.format(self.resolution, resgex))
self.resolution = resgex + 'p'
self.yuu_logger.debug('Total files: {}'.format(len(files)))
self.yuu_logger.debug('IV: {}'.format(iv))
self.yuu_logger.debug('Ticket key: {}'.format(ticket))
n = 0.0
for seg in x.segments:
n += seg.duration
self.est_filesize = round((round(n) * self.bitrate_calculation[self.resolution]) / 1024 / 6, 2)
return parsed_files, iv[2:], ticket, 'Success'
def get_video_key(self, ticket):
self.yuu_logger.debug('Sending parameter to API')
restoken = self.session.get(self._MEDIATOKEN_API, params=self._KEYPARAMS).json()
mediatoken = restoken['token']
self.yuu_logger.debug('Media token: {}'.format(mediatoken))
self.yuu_logger.debug('Sending ticket and media token to License API')
rgl = self.session.post(self._LICENSE_API, params={"t": mediatoken}, json={"kv": "a", "lt": ticket})
if rgl.status_code == 403:
return None, 'Access to this video are not allowed\nProbably a premium video or geo-locked.'
gl = rgl.json()
cid = gl['cid']
k = gl['k']
self.yuu_logger.debug('CID: {}'.format(cid))
self.yuu_logger.debug('K: {}'.format(k))
self.yuu_logger.debug('Summing up data with STRTABLE')
res = sum([self._STRTABLE.find(k[i]) * (58 ** (len(k) - 1 - i)) for i in range(len(k))])
self.yuu_logger.debug('Result: {}'.format(res))
self.yuu_logger.debug('Intepreting data')
encvk = struct.pack('>QQ', res >> 64, res & 0xffffffffffffffff)
self.yuu_logger.debug('Encoded video key: {}'.format(encvk))
self.yuu_logger.debug('Hashing data')
h = hmac.new(unhexlify(self._HKEY), (cid + self.device_id).encode("utf-8"), digestmod=hashlib.sha256)
enckey = h.digest()
self.yuu_logger.debug('Second Encoded video key: {}'.format(enckey))
self.yuu_logger.debug('Decrypting result')
aes = AES.new(enckey, AES.MODE_ECB)
vkey = aes.decrypt(encvk)
self.yuu_logger.debug('Decrypted, Result: {}'.format(vkey))
return vkey, 'Success getting video key'
def resolutions(self, m3u8_uri):
self.yuu_logger.debug('Requesting data to API')
m3u8_ = m3u8_uri[:m3u8_uri.rfind('/')]
base_url = m3u8_[:m3u8_.rfind('/')] + '/'
m3u8_1080 = m3u8_[:m3u8_.rfind('/')] + '/1080/playlist.m3u8'
m3u8_720 = m3u8_[:m3u8_.rfind('/')] + '/720/playlist.m3u8'
m3u8_480 = m3u8_[:m3u8_.rfind('/')] + '/480/playlist.m3u8'
m3u8_360 = m3u8_[:m3u8_.rfind('/')] + '/360/playlist.m3u8'
m3u8_240 = m3u8_[:m3u8_.rfind('/')] + '/240/playlist.m3u8'
m3u8_180 = m3u8_[:m3u8_.rfind('/')] + '/180/playlist.m3u8'
rr_all = self.session.get(base_url + 'playlist.m3u8')
if 'timeshift forbidden' in rr_all.text:
return None, 'This video can\'t be downloaded for now.'
r_all = m3u8.loads(rr_all.text)
play_res = []
for r_p in r_all.playlists:
temp = []
temp.append(r_p.stream_info.resolution)
temp.append(base_url + r_p.uri)
play_res.append(temp)
resgex = re.compile(r'(\d*)(?:\/\w+.ts)')
ava_reso = []
for resdata in play_res:
reswh, m3u8_uri = resdata
resw, resh = reswh
self.yuu_logger.debug('Validating {}p resolution'.format(resh))
rres = m3u8.loads(self.session.get(m3u8_uri).text)
m3f = rres.files[1:]
if not m3f:
return None, 'This video can\'t be downloaded for now.'
self.yuu_logger.debug('Sample link: ' + m3f[5])
if 'tsda' in files[5]:
# Assume DRMed
return None, 'This video has a different DRM method and cannot be decrypted by yuu for now'
if str(resh) in re.findall(resgex, m3f[5]):
ava_reso.append(
[
'{h}p'.format(h=resh),
'{w}x{h}'.format(w=resw, h=resh)
]
)
if ava_reso:
reso = [r[0] for r in ava_reso]
self.yuu_logger.debug('Resolution list: {}'.format(', '.join(reso)))
return ava_reso, 'Success'
def check_output(self, output=None, output_name=None):
if output:
fn_, ext_ = os.path.splitext(output)
if ext_ != 'ts':
output = fn_ + '.ts'
else:
output = '{x} ({m} {r}).ts'.format(x=output_name, m=self.type, r=self.resolution)
return output
| true | true |
f71fde1bd02fcc1f714d372b9d638e5ed8bbe7be | 4,468 | py | Python | data_steward/cdr_cleaner/cleaning_rules/deid/dateshift.py | lrwb-aou/curation | e80447e56d269dc2c9c8bc79e78218d4b0dc504c | [
"MIT"
] | 16 | 2017-06-30T20:05:05.000Z | 2022-03-08T21:03:19.000Z | data_steward/cdr_cleaner/cleaning_rules/deid/dateshift.py | lrwb-aou/curation | e80447e56d269dc2c9c8bc79e78218d4b0dc504c | [
"MIT"
] | 342 | 2017-06-23T21:37:40.000Z | 2022-03-30T16:44:16.000Z | data_steward/cdr_cleaner/cleaning_rules/deid/dateshift.py | lrwb-aou/curation | e80447e56d269dc2c9c8bc79e78218d4b0dc504c | [
"MIT"
] | 33 | 2017-07-01T00:12:20.000Z | 2022-01-26T18:06:53.000Z | """
The basic date shifting rule..
Original Issue: DC-1005
This is an abstract class and cannot be directly instantiated. It must be
extended to be used.
"""
# Python Imports
import logging
from abc import abstractmethod
# Project imports
from cdr_cleaner.cleaning_rules.base_cleaning_rule import BaseCleaningRule
from common import JINJA_ENV
LOGGER = logging.getLogger(__name__)
SHIFT_EXP = JINJA_ENV.from_string("""
{{field_type}}_SUB( CAST({{field}} AS {{field_type}}), INTERVAL (
SELECT
shift
FROM
`{{project}}.{{mapping_dataset_id}}.{{mapping_table_id}}` AS map
WHERE
map.research_id = remodel.person_id) DAY) AS {{field}}
""")
SELECT_STATEMENT = JINJA_ENV.from_string("""
CREATE OR REPLACE TABLE `{{project}}.{{dataset}}.{{table}}` AS (
SELECT
{{fields}}
FROM `{{project}}.{{dataset}}.{{table}}` AS remodel)
""")
class DateShiftRule(BaseCleaningRule):
"""
Date shift fields from 1 - 365 days in the past.
Performs a "day" shift for any field in the provided table names
and schemas. Uses the field type to determine the shift function to
use. Currently works for the DATE, DATETIME, and TIMESTAMP type fields.
"""
def __init__(self,
project_id,
dataset_id,
sandbox_dataset_id,
issue_numbers,
description,
affected_datasets,
affected_tables,
mapping_dataset_id,
mapping_table_id,
depends_on=None):
"""
Initialize the class.
Set the issue numbers, description and affected datasets. As other
tickets may affect this SQL, append them to the list of Jira Issues.
DO NOT REMOVE ORIGINAL JIRA ISSUE NUMBERS!
"""
if depends_on is None:
depends_on = []
desc = (f'Date shift date and timestamp fields by the date shift '
f'calculated in the static mapping table.')
self.mapping_dataset_id = mapping_dataset_id
self.mapping_table_id = mapping_table_id
super().__init__(issue_numbers=issue_numbers,
description=description,
affected_datasets=affected_datasets,
project_id=project_id,
dataset_id=dataset_id,
sandbox_dataset_id=sandbox_dataset_id,
affected_tables=affected_tables,
depends_on=depends_on)
@abstractmethod
def get_tables_and_schemas(self):
"""
Provide dictionary of table names and schemas.
:returns: a dictionary whose key, value patterns are in the
form of {"tablename": "json schema",}.
"""
pass
def get_query_specs(self):
"""
Return a list of dictionary query specifications.
:return: A list of dictionaries. Each dictionary contains a
single query and a specification for how to execute that query.
The specifications are optional but the query is required.
"""
date_shift_queries = []
for table, schema in self.get_tables_and_schemas().items():
LOGGER.info(f"Building Date Shifting query for {self.dataset_id}."
f"{table}")
fields = []
for field in schema:
field_type = field.get('type').lower()
field_name = field.get('name')
if field_type in ['date', 'datetime', 'timestamp']:
shift_string = SHIFT_EXP.render(
project=self.project_id,
mapping_dataset_id=self.mapping_dataset_id,
mapping_table_id=self.mapping_table_id,
field_type=field_type.upper(),
field=field_name,
table=table)
fields.append(shift_string)
else:
fields.append(field_name)
fields_string = ',\n'.join(fields)
query = SELECT_STATEMENT.render(project=self.project_id,
dataset=self.dataset_id,
table=table,
fields=fields_string)
date_shift_queries.append({'query': query})
return date_shift_queries
| 34.90625 | 78 | 0.576321 |
import logging
from abc import abstractmethod
from cdr_cleaner.cleaning_rules.base_cleaning_rule import BaseCleaningRule
from common import JINJA_ENV
LOGGER = logging.getLogger(__name__)
SHIFT_EXP = JINJA_ENV.from_string("""
{{field_type}}_SUB( CAST({{field}} AS {{field_type}}), INTERVAL (
SELECT
shift
FROM
`{{project}}.{{mapping_dataset_id}}.{{mapping_table_id}}` AS map
WHERE
map.research_id = remodel.person_id) DAY) AS {{field}}
""")
SELECT_STATEMENT = JINJA_ENV.from_string("""
CREATE OR REPLACE TABLE `{{project}}.{{dataset}}.{{table}}` AS (
SELECT
{{fields}}
FROM `{{project}}.{{dataset}}.{{table}}` AS remodel)
""")
class DateShiftRule(BaseCleaningRule):
def __init__(self,
project_id,
dataset_id,
sandbox_dataset_id,
issue_numbers,
description,
affected_datasets,
affected_tables,
mapping_dataset_id,
mapping_table_id,
depends_on=None):
if depends_on is None:
depends_on = []
desc = (f'Date shift date and timestamp fields by the date shift '
f'calculated in the static mapping table.')
self.mapping_dataset_id = mapping_dataset_id
self.mapping_table_id = mapping_table_id
super().__init__(issue_numbers=issue_numbers,
description=description,
affected_datasets=affected_datasets,
project_id=project_id,
dataset_id=dataset_id,
sandbox_dataset_id=sandbox_dataset_id,
affected_tables=affected_tables,
depends_on=depends_on)
@abstractmethod
def get_tables_and_schemas(self):
pass
def get_query_specs(self):
date_shift_queries = []
for table, schema in self.get_tables_and_schemas().items():
LOGGER.info(f"Building Date Shifting query for {self.dataset_id}."
f"{table}")
fields = []
for field in schema:
field_type = field.get('type').lower()
field_name = field.get('name')
if field_type in ['date', 'datetime', 'timestamp']:
shift_string = SHIFT_EXP.render(
project=self.project_id,
mapping_dataset_id=self.mapping_dataset_id,
mapping_table_id=self.mapping_table_id,
field_type=field_type.upper(),
field=field_name,
table=table)
fields.append(shift_string)
else:
fields.append(field_name)
fields_string = ',\n'.join(fields)
query = SELECT_STATEMENT.render(project=self.project_id,
dataset=self.dataset_id,
table=table,
fields=fields_string)
date_shift_queries.append({'query': query})
return date_shift_queries
| true | true |
f71fde73faca108579365cdbb13033f096a89b4b | 37,146 | py | Python | conf_selection_and_DFT/PL_dft_library_201027.py | aspuru-guzik-group/kraken | 4eaad505c1343e6083032b4a3fda47e004e19734 | [
"MIT"
] | 3 | 2022-01-13T12:39:54.000Z | 2022-03-30T00:10:52.000Z | conf_selection_and_DFT/PL_dft_library_201027.py | aspuru-guzik-group/kraken | 4eaad505c1343e6083032b4a3fda47e004e19734 | [
"MIT"
] | null | null | null | conf_selection_and_DFT/PL_dft_library_201027.py | aspuru-guzik-group/kraken | 4eaad505c1343e6083032b4a3fda47e004e19734 | [
"MIT"
] | null | null | null | # 201005: rename/restructure .yml files for consistency with xtb-level data
# 201006: in read_conformer() fix error message when log files are missing
import os,re,itertools,time
#import pybel
#from openbabel import pybel
import numpy as np
import pandas as pd
import pathlib as pl
cwd = pl.Path.cwd()
import yaml
from yaml import CLoader as Loader
from yaml import CDumper as Dumper
from rdkit import Chem,Geometry
from rdkit.Chem import rdmolfiles, AllChem, rdMolAlign,rdmolops
from multiprocessing import Pool
import morfeus # Kjell Jorner
from PL_split_logs_201006 import split_log # TG
from PL_conformer_selection_200411 import mirror_mol, delete_element_from_rdkitmol, delete_haloalkane_halides # TG #changed from PL_conformer_selection_201019 5/17/21 by EP
import PL_gaussian_properties_201021 as gp # TG
import vmin4 as vmin # TG/Iris Guo
import P_int_200916 as P_int # Robert Pollice (,TG(,ML))
# import PL_visvol as visvol # Ellyn Peters
# covalent radii, from Pyykko and Atsumi, Chem. Eur. J. 15, 2009, 188-197
# values for metals decreased by 10% according to Robert Paton's Sterimol implementation
rcov = {
"H": 0.32,"He": 0.46,"Li": 1.2,"Be": 0.94,"B": 0.77,"C": 0.75,"N": 0.71,"O": 0.63,"F": 0.64,"Ne": 0.67,"Na": 1.4,"Mg": 1.25,"Al": 1.13,"Si": 1.04,"P": 1.1,"S": 1.02,"Cl": 0.99,"Ar": 0.96,"K": 1.76,"Ca": 1.54,"Sc": 1.33,"Ti": 1.22,"V": 1.21,"Cr": 1.1,"Mn": 1.07,"Fe": 1.04,"Co": 1.0,"Ni": 0.99,"Cu": 1.01,"Zn": 1.09,"Ga": 1.12,"Ge": 1.09,"As": 1.15,"Se": 1.1,"Br": 1.14,"Kr": 1.17,"Rb": 1.89,"Sr": 1.67,"Y": 1.47,"Zr": 1.39,"Nb": 1.32,"Mo": 1.24,"Tc": 1.15,"Ru": 1.13,"Rh": 1.13,"Pd": 1.08,"Ag": 1.15,"Cd": 1.23,"In": 1.28,"Sn": 1.26,"Sb": 1.26,"Te": 1.23,"I": 1.32,"Xe": 1.31,"Cs": 2.09,"Ba": 1.76,"La": 1.62,"Ce": 1.47,"Pr": 1.58,"Nd": 1.57,"Pm": 1.56,"Sm": 1.55,"Eu": 1.51,"Gd": 1.52,"Tb": 1.51,"Dy": 1.5,"Ho": 1.49,"Er": 1.49,"Tm": 1.48,"Yb": 1.53,"Lu": 1.46,"Hf": 1.37,"Ta": 1.31,"W": 1.23,"Re": 1.18,"Os": 1.16,"Ir": 1.11,"Pt": 1.12,"Au": 1.13,"Hg": 1.32,"Tl": 1.3,"Pb": 1.3,"Bi": 1.36,"Po": 1.31,"At": 1.38,"Rn": 1.42,"Fr": 2.01,"Ra": 1.81,"Ac": 1.67,"Th": 1.58,"Pa": 1.52,"U": 1.53,"Np": 1.54,"Pu": 1.55
}
# some constants
R = 0.0019872036 #kcal mol^-1 K^-1
T = 298.15 #K
hartree_kcalmol = 627.50947
periodictable = ["Bq","H","He","Li","Be","B","C","N","O","F","Ne","Na","Mg","Al","Si","P","S","Cl","Ar","K","Ca","Sc","Ti","V","Cr","Mn","Fe","Co","Ni","Cu","Zn","Ga","Ge","As","Se","Br","Kr","Rb","Sr","Y","Zr","Nb","Mo","Tc","Ru","Rh","Pd","Ag","Cd","In","Sn","Sb","Te","I","Xe","Cs","Ba","La","Ce","Pr","Nd","Pm","Sm","Eu","Gd","Tb","Dy","Ho","Er","Tm","Yb","Lu","Hf","Ta","W","Re","Os","Ir","Pt","Au","Hg","Tl","Pb","Bi","Po","At","Rn","Fr","Ra","Ac","Th","Pa","U","Np","Pu","Am","Cm","Bk","Cf","Es","Fm","Md","No","Lr","Rf","Db","Sg","Bh","Hs","Mt","Ds","Rg","Uub","Uut","Uuq","Uup","Uuh","Uus","Uuo","X"]
def get_conmat(elements, coords):
# partially based on code from Robert Paton's Sterimol script, which based this part on Grimme's D3 code
# elements is a list of strings, coords is a numpy array or nested list of shape N_atoms x 3
if type(coords) == list:
coords = np.asarray(coords)
natom = len(elements)
#max_elem = 94
k1 = 16.0
k2 = 4.0/3.0
conmat = np.zeros((natom,natom))
for i in range(0,natom):
if elements[i] not in rcov.keys():
continue
for iat in range(0,natom):
if elements[iat] not in rcov.keys():
continue
if iat != i:
dxyz = coords[iat]-coords[i]
r = np.linalg.norm(dxyz)
rco = rcov[elements[i]]+rcov[elements[iat]]
rco = rco*k2
rr=rco/r
damp=1.0/(1.0+np.math.exp(-k1*(rr-1.0)))
if damp > 0.85: #check if threshold is good enough for general purpose
conmat[i,iat],conmat[iat,i] = 1,1
return(conmat)
def add_valence(elements,coords,conmat,base_idx,add_element="Pd"):
# Adds a valence to base so that the angle to the previous substituents is maximized and reorders the coordinate output for convenience
# add_element: add any of the following elements:
distpx = {"O":1.5,"Se":2.12,"Pd":2.28,"X":1.8} # typical bond distances to P
if type(coords) == list:
coords = np.asarray(coords)
num_atoms = len(elements)
coord_base = coords[base_idx]
base_element = elements[base_idx]
vec = np.array([0.0,0.0,0.0])
bonded = []
for atom in range(num_atoms):
if conmat[base_idx][atom]:
bonded.append(atom)
vec += coord_base - coords[atom]
coordox = distpx[add_element]*vec/np.linalg.norm(vec) + coord_base
atoms = [x for x in range(num_atoms+1)]
coords_temp = np.vstack((coords,coordox))
if sum(get_conmat(elements+[add_element],coords_temp)[-1]) != 1.0:
print(" Warning: possible collision!")
# sort coordinates so that base is first, add_element is second, and the other atoms bonded to base are next
elements_new = [base_element,add_element]+[elements[a] for a in bonded] + [a for i,a in enumerate(elements) if i not in [base_idx]+bonded]
coords_new = np.vstack((coord_base, coordox, coords[bonded], coords[[i for i,a in enumerate(elements) if i not in [base_idx]+bonded]]))
return(elements_new, coords_new)
def write_xyz(elements,coords,filename):
with open(filename,"w") as f:
f.write(f"{len(elements)}\n\n")
for i,a in enumerate(elements):
f.write(f"{a.title():>3} " + " ".join([f"{coords[i][j]:15f}" for j in range(3)]) + "\n")
def rmsd_matrix(conformers):
molobjects = [rdmolfiles.MolFromMolFile(str(cwd/conformer/f"{conformer}_opt.sdf"),removeHs=False,strictParsing=False) for conformer in conformers]
molobjects = [Chem.RemoveHs(mol) for mol in molobjects] # Remove all H: optional but speeds up RMSD calculation
molobjects = [delete_haloalkane_halides(mol) for mol in molobjects] # Remove halides in perhaloalkyl moieties. Improves RMSD matching and timing
molobjects_inv = [mirror_mol(mol) for mol in molobjects] # create mirror images of each conformer
rmsd_mat = np.zeros((len(conformers),len(conformers)))
for i,j in itertools.product(range(len(conformers)),range(len(conformers))):
if i<j: continue
if i==j:
rmsd_mat[i,j] = 1
else:
rmsd_mat[i,j] = min((rdMolAlign.GetBestRMS(molobjects[i],molobjects[j]),rdMolAlign.GetBestRMS(molobjects[i],molobjects_inv[j])))
rmsd_mat[j,i] = rmsd_mat[i,j]
return(rmsd_mat)
def dict_key_rmsd(candidate_pair):
return float(rmsd_matrix(candidate_pair)[0,1])
# which energies to read from which log-file
energylogs = {
"e_dz":"freq",
"e_tz_gas":"nbo",
"e_tz_gas":"sp",
"e_tz_solv":"solv",
"e_tz_ra":"ra",
"e_tz_rc":"rc",
}
# which properties to read from which log-file
proplogs = {
"freq":["nimag","g","t"],
"sp" :["dipole","homo","qpole","t"],
"ra" :["homo","nbo","t"],
"rc" :["homo","nbo","t"],
"nbo" :["nbo","nborbsP","t"],
"nmr" :["nmr","t"],
"efg" :["efg","nuesp","t"],
"solv":["ecds","t"],
}
# assign names to each descriptor
propoutput = {
"freq_g": ["","g"],
"freq_nimag": ["nimag"],
"sp_dipole": ["dipolemoment",],
"sp_homo": ["fmo_e_homo","fmo_e_lumo","fmo_mu","fmo_eta","fmo_omega"],
"ra_homo":["somo_ra","","","",""],
"rc_homo":["somo_rc","","","",""],
"sp_qpole": ["qpole_amp","qpoletens_xx","qpoletens_yy","qpoletens_zz"],
"nbo_nbo": ["nbo_P"],
"ra_nbo": ["nbo_P_ra","spindens_P_ra"],
"rc_nbo": ["nbo_P_rc","spindens_P_rc"],
"nmr_nmr": ["nmr_P","nmrtens_sxx_P","nmrtens_syy_P","nmrtens_szz_P",],
"efg_efg": ["efg_amp_P","efgtens_xx_P","efgtens_yy_P","efgtens_zz_P"],
"efg_nuesp": ["nuesp_P",],
"solv_ecds": ["E_solv_cds"],
"nbo_dipole": ["dipolemoment",],
"nbo_homo": ["fmo_e_homo","fmo_e_lumo","fmo_mu","fmo_eta","fmo_omega"],
"nbo_qpole": ["qpole_amp","qpoletens_xx","qpoletens_yy","qpoletens_zz"],
}
boltzproperties = ['vmin_vmin','vmin_r','dipolemoment', 'fmo_e_homo', 'fmo_e_lumo', 'fmo_mu', 'fmo_eta', 'fmo_omega', 'somo_ra', 'somo_rc', 'qpole_amp', 'qpoletens_xx', 'qpoletens_yy', 'qpoletens_zz', 'nbo_P', 'nbo_P_ra', 'spindens_P_ra', 'nbo_P_rc', 'spindens_P_rc', 'nmr_P', 'nmrtens_sxx_P', 'nmrtens_syy_P', 'nmrtens_szz_P', 'efg_amp_P', 'efgtens_xx_P', 'efgtens_yy_P', 'efgtens_zz_P', 'nuesp_P', 'E_solv_cds', 'nbo_lp_P_percent_s', 'nbo_lp_P_occ', 'nbo_lp_P_e', 'nbo_bd_e_max', 'nbo_bd_e_avg', 'nbo_bds_e_min', 'nbo_bds_e_avg', 'nbo_bd_occ_min', 'nbo_bd_occ_avg', 'nbo_bds_occ_max', 'nbo_bds_occ_avg', 'E_solv_total', 'E_solv_elstat', 'E_oxidation', 'E_reduction', 'fukui_p', 'fukui_m', 'pyr_P', 'pyr_alpha', 'vbur_vbur', 'vbur_vtot', 'vbur_ratio_vbur_vtot', 'vbur_qvbur_min', 'vbur_qvbur_max', 'vbur_qvtot_min', 'vbur_qvtot_max', 'vbur_max_delta_qvbur', 'vbur_max_delta_qvtot', 'vbur_ovbur_min', 'vbur_ovbur_max', 'vbur_ovtot_min', 'vbur_ovtot_max', 'vbur_near_vbur', 'vbur_far_vbur', 'vbur_near_vtot', 'vbur_far_vtot', 'sterimol_B1', 'sterimol_B5', 'sterimol_L', 'sterimol_burB1', 'sterimol_burB5', 'sterimol_burL',"Pint_P_int","Pint_dP","Pint_P_min","Pint_P_max","volume","surface_area","sphericity"] # "vv_total_visible_volume","vv_proximal_visible_volume","vv_distal_visible_volume","vv_ratio_visible_total","vv_ratio_proxvis_total",
mmproperties = ['dipolemoment', 'qpole_amp', 'qpoletens_xx', 'qpoletens_yy', 'qpoletens_zz', 'pyr_P', 'pyr_alpha', 'vbur_vbur', 'vbur_vtot', 'vbur_qvbur_min', 'vbur_qvbur_max', 'vbur_qvtot_min', 'vbur_qvtot_max', 'vbur_max_delta_qvbur', 'vbur_max_delta_qvtot', 'vbur_ovbur_min', 'vbur_ovbur_max', 'vbur_ovtot_min', 'vbur_ovtot_max', 'vbur_near_vbur', 'vbur_far_vbur', 'vbur_near_vtot', 'vbur_far_vtot', 'sterimol_B1', 'sterimol_B5', 'sterimol_L', 'sterimol_burB1', 'sterimol_burB5', 'sterimol_burL'] # ,"vv_total_visible_volume","vv_proximal_visible_volume","vv_distal_visible_volume","vv_ratio_visible_total","vv_ratio_proxvis_total",
Pintresults = ["Pint_P_int","Pint_dP","Pint_P_min","Pint_P_max","volume","surface_area","sphericity"]
def morfeus_properties(elements,coordinates,confdata):
# Morfeus: Sterimol, Vbur, pyr
morfdict = {}
if "pyr_P" not in confdata.keys() and confdata["p_val"] == 3:
# Pyramidalization - two equivalent measurments P and alpha
pyr = morfeus.Pyramidalization(elements=elements,coordinates=coordinates,atom_index=1,excluded_atoms=[2]) # remove Pd
morfdict["pyr_P"] = float(pyr.P)
morfdict["pyr_alpha"] = float(pyr.alpha)
if "vbur_vbur" not in confdata.keys():
#Buried volume - get quadrant volumes and distal volume
# iterate through P-substituents, aligning the quadrants paralell to each once (= xz_plane definition)
# Metal/point of reference should be 2.28 A away from P
# z_axis_atoms: P
# xz_plane_atoms: each of the substituents once
# keep lowest and highest quadrant and octant volume across all three orientations of the coordinate system
# keep highest difference of any neighboring quadrant volume
# keep volume in each of the two hemispheres
qvbur_all = np.array([])
qvdist_all = np.array([])
qvtot_all = np.array([])
max_delta_qvbur_all = []
max_delta_qvtot_all = []
ovbur_all = np.array([])
ovtot_all = np.array([])
for i in range(3):#confdata["p_val"]):
bv = morfeus.BuriedVolume(elements,coordinates,2,excluded_atoms=[2],z_axis_atoms=[1],xz_plane_atoms=[3+i])
bv.octant_analysis()
bv.compute_distal_volume(method="buried_volume",octants=True)
vbur = bv.buried_volume # these are identical for each iteration
vdist = bv.distal_volume #
vtot = vbur + vdist #
qvbur = np.asarray(list(bv.quadrants["buried_volume"].values()))
qvdist = np.asarray(list(bv.quadrants["distal_volume"].values()))
qvtot = qvbur + qvdist
qvbur_all = np.append(qvbur_all,qvbur)
qvtot_all = np.append(qvtot_all,qvtot)
max_delta_qvbur_all.append(max([abs(qvbur[j]-qvbur[j-1]) for j in range(4)]))
max_delta_qvtot_all.append(max([abs(qvtot[j]-qvtot[j-1]) for j in range(4)]))
ovbur = np.asarray(list(bv.octants["buried_volume"].values()))
ovdist = np.asarray(list(bv.octants["distal_volume"].values()))
ovtot = ovbur + ovdist
ovbur_all = np.append(ovbur_all,ovbur)
ovtot_all = np.append(ovtot_all,ovtot)
near_vbur = ovbur[4:].sum() # these are identical for each iteration
far_vbur = ovbur[:4].sum() #
near_vtot = ovtot[4:].sum() #
far_vtot = ovtot[:4].sum() #
morfdict["vbur_vbur"] = vbur
morfdict["vbur_vtot"] = float(vtot)
morfdict["vbur_ratio_vbur_vtot"] = float(vbur/vtot)
morfdict["vbur_qvbur_min"] = float(min(qvbur_all))
morfdict["vbur_qvbur_max"] = float(max(qvbur_all))
morfdict["vbur_qvtot_min"] = float(min(qvtot_all))
morfdict["vbur_qvtot_max"] = float(max(qvtot_all))
morfdict["vbur_max_delta_qvbur"] = float(max(max_delta_qvbur_all))
morfdict["vbur_max_delta_qvtot"] = float(max(max_delta_qvtot_all))
morfdict["vbur_ovbur_min"] = float(min(ovbur_all))
morfdict["vbur_ovbur_max"] = float(max(ovbur_all))
morfdict["vbur_ovtot_min"] = float(min(ovtot_all))
morfdict["vbur_ovtot_max"] = float(max(ovtot_all))
morfdict["vbur_near_vbur"] = float(near_vbur)
morfdict["vbur_far_vbur"] = float(far_vbur)
morfdict["vbur_near_vtot"] = float(near_vtot)
morfdict["vbur_far_vtot"] = float(far_vtot)
if "sterimol_B1" not in confdata.keys():
# Sterimol
# for Sterimol values matching Rob Paton's implementation:
patonradii = morfeus.helpers.get_radii(elements, radii_type="bondi")
patonradii = np.array(patonradii)
patonradii[patonradii == 1.2] = 1.09
sterimol = morfeus.Sterimol(elements, coordinates, 2, 1, radii=patonradii, n_rot_vectors=3600)
morfdict["sterimol_B1"] = float(sterimol.B_1_value)
morfdict["sterimol_B5"] = float(sterimol.B_5_value)
morfdict["sterimol_L"] = float(sterimol.L_value)
# buried Sterimol
sterimol_bur = morfeus.Sterimol(elements, coordinates, 2, 1,calculate=False,radii=patonradii, n_rot_vectors=3600)
sterimol_bur.bury(sphere_radius=5.5,method="delete",radii_scale=0.5)
# sterimol.bury(sphere_radius=4.5,method="delete",radii_scale=1)
morfdict["sterimol_burB1"] = float(sterimol_bur.B_1_value)
morfdict["sterimol_burB5"] = float(sterimol_bur.B_5_value)
morfdict["sterimol_burL"] = float(sterimol_bur.L_value)
return(morfdict)
def gp_properties(ligand,conformer,p_idx):
# reads gaussian log files
gpdict = {}
gpdict["properties"] = {}
contents = {
"streams":{},
"filecont":{},
}
# read energies
for e,log in energylogs.items():
contents["streams"][log] = gp.get_outstreams(cwd/conformer/f"{conformer}_{log}.log")
if contents["streams"][log] == "failed or incomplete job":
return({"error":True})
else:
gpdict[e] = gp.get_e_hf(contents["streams"][log])
gpdict["error"] = False
# going through each log file, get the relevant properties
for log in proplogs.keys():
contents["filecont"][log] = gp.get_filecont(cwd/conformer/f"{conformer}_{log}.log")
for prop in proplogs[log]:
gpresults = gp.jobtypes[prop][0](contents[gp.jobtypes[prop][1]][log],p_idx)
if prop == "nborbsP": # NBO orbital analysis returns a dictionary with the proper labels
gpdict["properties"].update(gpresults)
elif prop == "t": # subjob time
gpdict[f"{log}_t"] = gpresults
elif prop in ["e_dz","g","e_tz_gas","e_tz_solv","e_tz_ra","e_tz_rc","nimag"]:
gpdict.update({propoutput[f"{log}_{prop}"][i]: float(gpresults[i]) for i in range(len(gpresults))})
else: # all other functions return a list. This is assigned into a dict with proper names here
gpdict["properties"].update({propoutput[f"{log}_{prop}"][i]: float(gpresults[i]) for i in range(len(gpresults))})
gpdict["g_tz_gas"] = gpdict["g"] - gpdict["e_dz"] + gpdict["e_tz_gas"] # in Hartree
gpdict["g_tz_solv"] = gpdict["g"] - gpdict["e_dz"] + gpdict["e_tz_solv"] # in Hartree
gpdict["properties"]["E_solv_total"] = (gpdict["e_tz_solv"] - gpdict["e_tz_gas"]) * hartree_kcalmol # in kcal/mol
gpdict["properties"]["E_solv_elstat"] = gpdict["properties"]["E_solv_total"] - gpdict["properties"]["E_solv_cds"] # in kcal/mol
gpdict["properties"]["E_oxidation"] = gpdict["e_tz_rc"] - gpdict["e_tz_gas"] # in Hartree
gpdict["properties"]["E_reduction"] = gpdict["e_tz_ra"] - gpdict["e_tz_gas"] # in Hartree
gpdict["properties"]["fukui_p"] = gpdict["properties"]["nbo_P"]-gpdict["properties"]["nbo_P_ra"] # fukui electrophilicity
gpdict["properties"]["fukui_m"] = gpdict["properties"]["nbo_P_rc"]-gpdict["properties"]["nbo_P"] # fukui nucleophilicity
gpdict["t_total"] = sum([gpdict[f"{log}_t"] for log in proplogs.keys()])
if "" in gpdict.keys():
del gpdict[""]
if "" in gpdict["properties"].keys():
del gpdict["properties"][""]
return(gpdict)
def read_conformer(cwd, ligand, conformer): # cwd: pathlib path of current working directory. ligand: 0-digit ligand ID. conformer: full name of the conformer (including the ID at the beginnig)
confdata = {}
errors = []
checklogs = [cwd/conformer/f"{conformer}_{l}.log" for l in proplogs.keys() if not (cwd/conformer/f"{conformer}_{l}.log").exists()]
if len(checklogs) != 0:
#! log this as a conformer-level error
err = f"Missing Gaussian log files, flagged in read_conformer: {','.join([chkl.name for chkl in checklogs])}"
errors.append(err)
print(f"{ligand};{conformer};{err}")
with open(cwd/f"{ligand}_errors.txt","a") as f:
f.write(f"{ligand};{conformer};{err}\n")
confdata["error"] = True
return(confdata,errors)
if "elements_pd" not in confdata.keys():
# mol = next(pybel.readfile("g09",str(cwd/conformer/f"{conformer}_nbo.log")))
#mol = next(pybel.readfile("g09",str(cwd/conformer/f"{conformer}_opt.log")))
#elements = [periodictable[a.atomicnum] for a in mol.atoms]
#coordinates = [list(a.coords) for a in mol.atoms]
#coordinates_a = np.array([a.coords for a in mol.atoms])
def read_gaussian_logfile(fn):
time0=time.time()
read=False
for line in open(fn,"r"):
if read:
if "---" in line and len(elements)>0:
read=False
if read:
if "X" not in line and "---" not in line:
atomnum = int(line.split()[1])
#print(line.replace("\n",""))
#print(atomnum)
el = periodictable[atomnum]
elements.append(el)
coordinates.append([float(line.split()[3]),float(line.split()[4]), float(line.split()[5])])
if "Coordinates (Angstroms)" in line:
coordinates, elements = [], []
read=True
time1=time.time()
print("gaussian log parser done in %.2f seconds"%(time1-time0))
return(coordinates, elements)
coordinates, elements = read_gaussian_logfile(str(cwd/conformer/f"{conformer}_opt.log"))
coordinates_a = np.array(coordinates)
conmat = get_conmat(elements,coordinates_a)
p_idx = [i for i in range(len(elements)) if elements[i] == "P" and sum(conmat[i]) <= 3][0] # this removes quaternary P (phosphonium, phosphate etc) but allows for P with 2 substituents (phosphabenzene, phosphaimine etc). Can we be sure that we never have more than one non-quaternary P(III)?
elements_pd, coordinates_pd = add_valence(elements,coordinates,conmat,p_idx,add_element="Pd") # Add "Pd" at the reference position in the P-lone pair region
if not (cwd/conformer/f"{conformer}_opt_Pd.xyz").exists():
#out = pybel.Outputfile("xyz",str(cwd/conformer/f"{conformer}_opt.xyz"))
#out.write(mol)
#out.close()
write_xyz(elements, coordinates, cwd/conformer/f"{conformer}_opt.xyz")
#out = pybel.Outputfile("sdf",str(cwd/conformer/f"{conformer}_opt.sdf"))
#out.write(mol)
#out.close()
os.system("obabel -ixyz %s -osdf >> %s"%(str(cwd/conformer/f"{conformer}_opt.xyz"), str(cwd/conformer/f"{conformer}_opt.sdf")))
write_xyz(elements_pd,coordinates_pd,cwd/conformer/f"{conformer}_opt_Pd.xyz")
confdata["coords"] = coordinates
confdata["coords_pd"] = coordinates_pd.tolist()
confdata["elements"] = elements
confdata["elements_pd"] = elements_pd
confdata["conmat"] = conmat.tolist()
confdata["p_idx"] = p_idx
confdata["p_val"] = int(sum(conmat[p_idx])) # how many substituents at P
confdata["properties"] = {}
## get properties
# gp_properties: everything that can be read from the Gaussian log files (most electronic properties)
confdata.update(gp_properties(ligand,conformer,confdata["p_idx"]))
if confdata["error"]:
#! log this as a conformer-level error
err = "Error in the Gaussian computations, flagged in read_conformer, please check log files."
errors.append(err)
print(f"{ligand};{conformer};{err}")
with open(cwd/f"{ligand}_errors.txt","a") as f:
f.write(f"{ligand};{conformer};{err}\n")
with open(cwd/conformer/f"{conformer}_data.yml","w") as f:
yaml.dump(confdata,f,Dumper=Dumper)
return(confdata,errors)
if confdata["nimag"] != 0:
#! log this as a conformer-level error
err = f"Number of imaginary frequencies: {confdata['nimag']}."
errors.append(err)
print(f"{ligand};{conformer};{err}")
with open(cwd/f"{ligand}_errors.txt","a") as f:
f.write(f"{ligand};{conformer};{err}\n")
with open(cwd/conformer/f"{conformer}_data.yml","w") as f:
yaml.dump(confdata,f,Dumper=Dumper)
confdata["error"] = True
return(confdata,errors)
# morfeus: properties that use the geometry/steric properties
confdata["properties"].update(morfeus_properties(confdata["elements_pd"],confdata["coords_pd"],confdata))
# # P_int
# if "Pint_P_int" not in confdata.keys():
# confdata.update(P_int.P_int_main(name=conformer,directory=cwd/conformer))
# read results
disp = "d3"
pint_read = P_int.read_dedout(cwd/conformer,conformer,disp)+P_int.read_multiwfnout(cwd/conformer,conformer)+P_int.read_disp(cwd/conformer,conformer,disp)
confdata["properties"].update({Pintresults[i]:float(pint_read[i]) for i in range(7)})
# V_min
try:
if "vmin_vmin" not in confdata.keys():
vminob = vmin.get_vmin(f"{conformer}.fchk",str(cwd/conformer)+"/",True)
confdata["properties"]["vmin_vmin"] = float(vminob.v_min)
confdata["properties"]["vmin_r"] = float(vminob.r_min)
except:
err = f"Vmin FileNotFoundError."
errors.append(err)
print(f"{ligand};{conformer};{err}")
with open(cwd/f"{ligand}_errors.txt","a") as f:
f.write(f"{ligand};{conformer};{err}\n")
confdata["error"] = True
# visvol
# if "vv_total_visible_volume" not in confdata.keys():
# confdata.update(visvol.get_vis_vol(cwd/conformer/f"{conformer}_opt_Pd.xyz",radii_type = 'rcov',prox_cutoff = 3.5,ignore_H = 0,write_results = 1, plot = 0))
with open(cwd/conformer/f"{conformer}_data.yml","w") as f:
yaml.dump(confdata,f,Dumper=Dumper)
return(confdata,errors)
def read_ligand(cwd, ligand, conformers, liganddata = {}): # cwd is the ligand-level directory
status = {"ligandlevel": [],}
if len(liganddata.keys()) == 0:
if (cwd/f"{ligand}_data.yml").exists():
with open(cwd/f"{ligand}_data.yml","r") as f:
liganddata = yaml.load(f,Loader=Loader)
if (cwd/f"{ligand}_confdata.yml").exists():
with open(cwd/f"{ligand}_confdata.yml","r") as f:
liganddata["confdata"] = yaml.load(f,Loader=Loader)
else:
liganddata = {
"conformers_all": conformers,
"conformers": conformers.copy(), # Duplicates and computations with errors (including nimag=1) will be removed from this list
"number_of_conformers": len(conformers),
"removed_duplicates": [],
"confdata": {},#{c:{} for c in conformers},
"boltzmann_averaged_data": {},
"min_data": {},
"max_data": {},
"delta_data": {},
"vburminconf_data": {},
}
newconfs = 0
for conformer in conformers:
if conformer in liganddata["removed_duplicates"]:
continue
print(conformer)
if conformer in liganddata["confdata"].keys():
pass
elif (cwd/conformer/f"{conformer}_data.yml").exists():
with open(cwd/conformer/f"{conformer}_data.yml","r") as f:
liganddata["confdata"][conformer] = yaml.load(f,Loader=Loader)
newconfs += 1
else:
print("read conformer data")
liganddata["confdata"][conformer],status[conformer] = read_conformer(cwd, ligand, conformer) # returns the dictionary with the conformer data and a list with errors
newconfs += 1
if newconfs > 0:
# error, NIMAG removal
liganddata["conformers_w_error"] = [conformer for conformer in liganddata["conformers"] if liganddata["confdata"][conformer]["error"]]
liganddata["conformers"] = [c for c in liganddata["conformers"] if c not in liganddata["conformers_w_error"]]
liganddata["number_of_conformers"] = len(liganddata["conformers"])
energies = ["e_dz","g","e_tz_gas","g_tz_gas","e_tz_solv","g_tz_solv"]
liganddata["energies"] = {}
liganddata["relative_energies"] = {}
for e in energies:
liganddata["energies"][e] = {conformer: liganddata["confdata"][conformer][e] for conformer in liganddata["conformers"]}
liganddata[e+"_min"] = min(liganddata["energies"][e].values())
liganddata[e+"_minconf"] = list(liganddata["energies"][e].keys())[np.argmin(list(liganddata["energies"][e].values()))]
liganddata["relative_energies"][e+"_rel"] = {conformer: (liganddata["energies"][e][conformer]-liganddata[e+"_min"])*hartree_kcalmol for conformer in liganddata["conformers"]}
# erel_df = pd.DataFrame(np.array([list(liganddata[e+"_rel"].values()) for e in energies]).T ,columns=energies,index=liganddata["conformers"] )
erel_df = pd.DataFrame([liganddata["relative_energies"][e+"_rel"] for e in energies],index=energies).T
#liganddata["relative_energies_df"] = erel_df
liganddata["relative_energies_dict"] = erel_df.to_dict()
# Find duplicates:
# 1) find pairs of conformers that are within E_rel < 0.1 kcal/mol (relative energies seem to be much more reliable than relative free energies)
# 2) check these pairs to also have RMSD < 0.2 A
# 3) Remove the conformer with higher relative free energy
duplicates_candidates = [(i,j) for i,j in itertools.combinations(liganddata["conformers"],2) if abs(erel_df["e_dz"].loc[i] - erel_df["e_dz"].loc[j]) < 0.1]
try:
# Throw a name error here if you wanna only run the except
cores = max(os.cpu_count() - 2, 1)
with Pool(cores) as p:
values = p.map(dict_key_rmsd, duplicates_candidates)
liganddata["rmsd_candidates"] = {key: value for key, value in zip(duplicates_candidates, values)}
# The less cool, non-parallel way
#liganddata["rmsd_candidates"] = {candidate_pair: float(rmsd_matrix(candidate_pair)[0,1]) for candidate_pair in duplicates_candidates} # keep all RMSD for potential debugging
liganddata["duplicates"] = [candidate_pair for candidate_pair in liganddata["rmsd_candidates"] if liganddata["rmsd_candidates"][candidate_pair] < 0.2]
except: # RDkit failed to generate Mol objects and thus could not compute RMSD, or some of the internal structures in those mol files are different despite actually being the same. Default to duplicate detection based on dipole moment and chemical shift similarity
#! log this on ligand level for double-checking
err = "Warning: RDKit error at duplicate RMSD testing. Please double check."
status["ligandlevel"].append(err)
print(f"{ligand};ligandlevel;{err}")
with open(cwd/f"{ligand}_errors.txt","a") as f:
f.write(f"{ligand};ligandlevel;{err}\n")
dipole_candidates = set([(i,j) for i,j in duplicates_candidates if abs(liganddata["confdata"][i]["properties"]["dipolemoment"] - liganddata["confdata"][j]["properties"]["dipolemoment"]) < 0.025])
nmr_candidates = set([(i,j) for i,j in duplicates_candidates if abs(liganddata["confdata"][i]["properties"]["nmr_P"] - liganddata["confdata"][j]["properties"]["nmr_P"]) < 0.1])
liganddata["duplicates"] = sorted(dipole_candidates & nmr_candidates)
liganddata["removed_duplicates"] = [erel_df.loc[list(pair)]["g_tz_gas"].idxmax() for pair in liganddata["duplicates"]]
liganddata["conformers"] = [c for c in liganddata["conformers"] if c not in liganddata["removed_duplicates"]]
liganddata["number_of_conformers"] = len(liganddata["conformers"])
# Boltzmann averaging
#boltzfacs = {conformer: np.exp(-liganddata["relative_energies_df"]["g_tz_gas"].loc[conformer]/(R*T)) for conformer in liganddata["conformers"]}
boltzfacs = {conformer: np.exp(-erel_df["g_tz_gas"].loc[conformer]/(R*T)) for conformer in liganddata["conformers"]}
Q = sum(boltzfacs.values())
liganddata["boltzmann_weights"] = {conformer: float(boltzfacs[conformer]/Q) for conformer in liganddata["conformers"] } # probability
for prop in boltzproperties:
confsmissingprop = [conf for conf in liganddata["conformers"] if prop not in liganddata["confdata"][conf]["properties"].keys()]
if len(confsmissingprop) == 0:
liganddata["boltzmann_averaged_data"][prop] = sum([liganddata["boltzmann_weights"][conf] * liganddata["confdata"][conf]["properties"][prop] for conf in liganddata["conformers"]])
else: # if a single conformer is missing a property value, set Boltzmann-average to None
#! log this as a ligand-level error with prop and confsmissingprop
err = f"Warning: {len(confsmissingprop)}/{len(liganddata['conformers'])} conformers missing values for property {prop}: {','.join(confsmissingprop)}."
status["ligandlevel"].append(err)
print(f"{ligand};ligandlevel;{err}")
with open(cwd/f"{ligand}_errors.txt","a") as f:
f.write(f"{ligand};ligandlevel;{err}\n")
liganddata["boltzmann_averaged_data"][prop] = None
continue
# "Condensed" properties
liganddata["vburminconf"] = liganddata["conformers"][np.argmin([liganddata["confdata"][conf]["properties"]["vbur_vbur"] for conf in liganddata["conformers"]])]
for prop in mmproperties:
proplist = [liganddata["confdata"][conf]["properties"][prop] for conf in liganddata["conformers"] if prop in liganddata["confdata"][conf]["properties"].keys()]
# if a single conformer is missing a property value, still perform min/max analysis (Boltzmann-average will be None to indicate missing value(s))
# if all confs are missing this prop, set min/max/delta to None
if len(proplist) == 0:
liganddata["min_data"][prop] = None
liganddata["max_data"][prop] = None
liganddata["delta_data"][prop] = None
liganddata["vburminconf_data"][prop] = None
else:
liganddata["min_data"][prop] = min(proplist)
liganddata["max_data"][prop] = max(proplist)
liganddata["delta_data"][prop] = liganddata["max_data"][prop] - liganddata["min_data"][prop]
liganddata["vburminconf_data"][prop] = liganddata["confdata"][liganddata["vburminconf"]]["properties"][prop]
liganddata["time_all"] = sum([liganddata["confdata"][conf]["t_total"] for conf in liganddata["conformers_all"] if "t_total" in liganddata["confdata"][conf].keys()])
with open(cwd/f"{ligand}_data.yml","w") as f:
yaml.dump({k:v for k,v in liganddata.items() if k != "confdata"},f,Dumper=Dumper)
with open(cwd/f"{ligand}_confdata.yml","w") as f:
yaml.dump(liganddata["confdata"],f,Dumper=Dumper)
erel_df.to_csv(cwd/f"{ligand}_relative_energies.csv",sep=";")
return(liganddata,status)
def main_split_logs(cwd, ligand):
if not (cwd/"ERR").exists():
(cwd/"ERR").mkdir()
# if not (cwd/"done").exists():
# (cwd/"done").mkdir()
conformers = [i.name for i in (cwd/ligand).iterdir() if i.is_dir()]
conformers_good = []
for conformer in conformers:
logs = [i.name for i in (cwd/ligand/conformer).rglob("*.log")]
if f"{conformer}.log" in logs and f"{conformer}_opt.log" not in logs:
status = split_log(ligand, conformer)
if status != "Error":
#(cwd/ligand/conformer/f"{conformer}.log").rename(cwd/f"done/{conformer}.log")
conformers_good.append(conformer)
return(conformers_good)
if __name__ == '__main__':
starttime_all = time.time()
ligname = re.compile("[0-9]{8}")
ligands = sorted([i.name for i in cwd.iterdir() if (ligname.match(i.name) and i.is_dir())])
conformers = {ligand: [i.name for i in (cwd/ligand).iterdir() if i.is_dir()] for ligand in ligands}
if not (cwd/"ERR").exists():
(cwd/"ERR").mkdir()
if not (cwd/"done").exists():
(cwd/"done").mkdir()
for ligand in ligands:
for conformer in conformers[ligand]:
logs = [i.name for i in (cwd/ligand/conformer).rglob("*.log")]
if f"{conformer}.log" in logs and f"{conformer}_opt.log" not in logs:
status = split_log(ligand,conformer)
if status != "Error":
(cwd/ligand/conformer/f"{conformer}.log").rename(cwd/f"done/{conformer}.log")
if (cwd/"allligands_data.yml").exists():
with open(cwd/"allligands_data.yml","r") as f:
allliganddata = yaml.load(f,Loader=Loader)
else:
allliganddata = {}
for ligand in ligands:
print(ligand)
print(conformers[ligand])
if ligand in allliganddata.keys():
allliganddata[ligand],status = read_ligand(cwd,ligand,conformers[ligand],allliganddata[ligand])
else:
allliganddata[ligand],status = read_ligand(cwd,ligand,conformers[ligand])
with open(cwd/"allligands_data.yml","w") as f:
yaml.dump(allliganddata,f,Dumper=Dumper)
variants = ["boltz","min","max","delta","vburminconf"]
columns = [i+"_boltz" for i in boltzproperties if i not in mmproperties] + [f"{i}_{j}" for i,j in itertools.product(mmproperties,variants)]# + ["t_total","number_of_conformers"]
df = pd.DataFrame(columns = columns,index = ligands)
for l in ligands:
for c in columns:
print(allliganddata[l]["properties"])
exit()
df.loc[l][c] = allliganddata[l]["properties"][c]
df["t_total"] = [allliganddata[l]["t_total"] for l in ligands]
df["number_of_conformers"] = [allliganddata[l]["number_of_conformers"] for l in ligands]
df.to_csv("allligands_data.csv",sep=";")
print(f"All done. Total time: {round((time.time()-starttime_all),2)} sec")
| 57.324074 | 1,343 | 0.619959 |
import os,re,itertools,time
import numpy as np
import pandas as pd
import pathlib as pl
cwd = pl.Path.cwd()
import yaml
from yaml import CLoader as Loader
from yaml import CDumper as Dumper
from rdkit import Chem,Geometry
from rdkit.Chem import rdmolfiles, AllChem, rdMolAlign,rdmolops
from multiprocessing import Pool
import morfeus
from PL_split_logs_201006 import split_log
from PL_conformer_selection_200411 import mirror_mol, delete_element_from_rdkitmol, delete_haloalkane_halides as vmin
import P_int_200916 as P_int
"H": 0.32,"He": 0.46,"Li": 1.2,"Be": 0.94,"B": 0.77,"C": 0.75,"N": 0.71,"O": 0.63,"F": 0.64,"Ne": 0.67,"Na": 1.4,"Mg": 1.25,"Al": 1.13,"Si": 1.04,"P": 1.1,"S": 1.02,"Cl": 0.99,"Ar": 0.96,"K": 1.76,"Ca": 1.54,"Sc": 1.33,"Ti": 1.22,"V": 1.21,"Cr": 1.1,"Mn": 1.07,"Fe": 1.04,"Co": 1.0,"Ni": 0.99,"Cu": 1.01,"Zn": 1.09,"Ga": 1.12,"Ge": 1.09,"As": 1.15,"Se": 1.1,"Br": 1.14,"Kr": 1.17,"Rb": 1.89,"Sr": 1.67,"Y": 1.47,"Zr": 1.39,"Nb": 1.32,"Mo": 1.24,"Tc": 1.15,"Ru": 1.13,"Rh": 1.13,"Pd": 1.08,"Ag": 1.15,"Cd": 1.23,"In": 1.28,"Sn": 1.26,"Sb": 1.26,"Te": 1.23,"I": 1.32,"Xe": 1.31,"Cs": 2.09,"Ba": 1.76,"La": 1.62,"Ce": 1.47,"Pr": 1.58,"Nd": 1.57,"Pm": 1.56,"Sm": 1.55,"Eu": 1.51,"Gd": 1.52,"Tb": 1.51,"Dy": 1.5,"Ho": 1.49,"Er": 1.49,"Tm": 1.48,"Yb": 1.53,"Lu": 1.46,"Hf": 1.37,"Ta": 1.31,"W": 1.23,"Re": 1.18,"Os": 1.16,"Ir": 1.11,"Pt": 1.12,"Au": 1.13,"Hg": 1.32,"Tl": 1.3,"Pb": 1.3,"Bi": 1.36,"Po": 1.31,"At": 1.38,"Rn": 1.42,"Fr": 2.01,"Ra": 1.81,"Ac": 1.67,"Th": 1.58,"Pa": 1.52,"U": 1.53,"Np": 1.54,"Pu": 1.55
}
# some constants
R = 0.0019872036 #kcal mol^-1 K^-1
T = 298.15 #K
hartree_kcalmol = 627.50947
periodictable = ["Bq","H","He","Li","Be","B","C","N","O","F","Ne","Na","Mg","Al","Si","P","S","Cl","Ar","K","Ca","Sc","Ti","V","Cr","Mn","Fe","Co","Ni","Cu","Zn","Ga","Ge","As","Se","Br","Kr","Rb","Sr","Y","Zr","Nb","Mo","Tc","Ru","Rh","Pd","Ag","Cd","In","Sn","Sb","Te","I","Xe","Cs","Ba","La","Ce","Pr","Nd","Pm","Sm","Eu","Gd","Tb","Dy","Ho","Er","Tm","Yb","Lu","Hf","Ta","W","Re","Os","Ir","Pt","Au","Hg","Tl","Pb","Bi","Po","At","Rn","Fr","Ra","Ac","Th","Pa","U","Np","Pu","Am","Cm","Bk","Cf","Es","Fm","Md","No","Lr","Rf","Db","Sg","Bh","Hs","Mt","Ds","Rg","Uub","Uut","Uuq","Uup","Uuh","Uus","Uuo","X"]
def get_conmat(elements, coords):
# partially based on code from Robert Paton's Sterimol script, which based this part on Grimme's D3 code
# elements is a list of strings, coords is a numpy array or nested list of shape N_atoms x 3
if type(coords) == list:
coords = np.asarray(coords)
natom = len(elements)
#max_elem = 94
k1 = 16.0
k2 = 4.0/3.0
conmat = np.zeros((natom,natom))
for i in range(0,natom):
if elements[i] not in rcov.keys():
continue
for iat in range(0,natom):
if elements[iat] not in rcov.keys():
continue
if iat != i:
dxyz = coords[iat]-coords[i]
r = np.linalg.norm(dxyz)
rco = rcov[elements[i]]+rcov[elements[iat]]
rco = rco*k2
rr=rco/r
damp=1.0/(1.0+np.math.exp(-k1*(rr-1.0)))
if damp > 0.85: #check if threshold is good enough for general purpose
conmat[i,iat],conmat[iat,i] = 1,1
return(conmat)
def add_valence(elements,coords,conmat,base_idx,add_element="Pd"):
# Adds a valence to base so that the angle to the previous substituents is maximized and reorders the coordinate output for convenience
# add_element: add any of the following elements:
distpx = {"O":1.5,"Se":2.12,"Pd":2.28,"X":1.8} # typical bond distances to P
if type(coords) == list:
coords = np.asarray(coords)
num_atoms = len(elements)
coord_base = coords[base_idx]
base_element = elements[base_idx]
vec = np.array([0.0,0.0,0.0])
bonded = []
for atom in range(num_atoms):
if conmat[base_idx][atom]:
bonded.append(atom)
vec += coord_base - coords[atom]
coordox = distpx[add_element]*vec/np.linalg.norm(vec) + coord_base
atoms = [x for x in range(num_atoms+1)]
coords_temp = np.vstack((coords,coordox))
if sum(get_conmat(elements+[add_element],coords_temp)[-1]) != 1.0:
print(" Warning: possible collision!")
# sort coordinates so that base is first, add_element is second, and the other atoms bonded to base are next
elements_new = [base_element,add_element]+[elements[a] for a in bonded] + [a for i,a in enumerate(elements) if i not in [base_idx]+bonded]
coords_new = np.vstack((coord_base, coordox, coords[bonded], coords[[i for i,a in enumerate(elements) if i not in [base_idx]+bonded]]))
return(elements_new, coords_new)
def write_xyz(elements,coords,filename):
with open(filename,"w") as f:
f.write(f"{len(elements)}\n\n")
for i,a in enumerate(elements):
f.write(f"{a.title():>3} " + " ".join([f"{coords[i][j]:15f}" for j in range(3)]) + "\n")
def rmsd_matrix(conformers):
molobjects = [rdmolfiles.MolFromMolFile(str(cwd/conformer/f"{conformer}_opt.sdf"),removeHs=False,strictParsing=False) for conformer in conformers]
molobjects = [Chem.RemoveHs(mol) for mol in molobjects] # Remove all H: optional but speeds up RMSD calculation
molobjects = [delete_haloalkane_halides(mol) for mol in molobjects] # Remove halides in perhaloalkyl moieties. Improves RMSD matching and timing
molobjects_inv = [mirror_mol(mol) for mol in molobjects] # create mirror images of each conformer
rmsd_mat = np.zeros((len(conformers),len(conformers)))
for i,j in itertools.product(range(len(conformers)),range(len(conformers))):
if i<j: continue
if i==j:
rmsd_mat[i,j] = 1
else:
rmsd_mat[i,j] = min((rdMolAlign.GetBestRMS(molobjects[i],molobjects[j]),rdMolAlign.GetBestRMS(molobjects[i],molobjects_inv[j])))
rmsd_mat[j,i] = rmsd_mat[i,j]
return(rmsd_mat)
def dict_key_rmsd(candidate_pair):
return float(rmsd_matrix(candidate_pair)[0,1])
# which energies to read from which log-file
energylogs = {
"e_dz":"freq",
"e_tz_gas":"nbo",
"e_tz_gas":"sp",
"e_tz_solv":"solv",
"e_tz_ra":"ra",
"e_tz_rc":"rc",
}
# which properties to read from which log-file
proplogs = {
"freq":["nimag","g","t"],
"sp" :["dipole","homo","qpole","t"],
"ra" :["homo","nbo","t"],
"rc" :["homo","nbo","t"],
"nbo" :["nbo","nborbsP","t"],
"nmr" :["nmr","t"],
"efg" :["efg","nuesp","t"],
"solv":["ecds","t"],
}
# assign names to each descriptor
propoutput = {
"freq_g": ["","g"],
"freq_nimag": ["nimag"],
"sp_dipole": ["dipolemoment",],
"sp_homo": ["fmo_e_homo","fmo_e_lumo","fmo_mu","fmo_eta","fmo_omega"],
"ra_homo":["somo_ra","","","",""],
"rc_homo":["somo_rc","","","",""],
"sp_qpole": ["qpole_amp","qpoletens_xx","qpoletens_yy","qpoletens_zz"],
"nbo_nbo": ["nbo_P"],
"ra_nbo": ["nbo_P_ra","spindens_P_ra"],
"rc_nbo": ["nbo_P_rc","spindens_P_rc"],
"nmr_nmr": ["nmr_P","nmrtens_sxx_P","nmrtens_syy_P","nmrtens_szz_P",],
"efg_efg": ["efg_amp_P","efgtens_xx_P","efgtens_yy_P","efgtens_zz_P"],
"efg_nuesp": ["nuesp_P",],
"solv_ecds": ["E_solv_cds"],
"nbo_dipole": ["dipolemoment",],
"nbo_homo": ["fmo_e_homo","fmo_e_lumo","fmo_mu","fmo_eta","fmo_omega"],
"nbo_qpole": ["qpole_amp","qpoletens_xx","qpoletens_yy","qpoletens_zz"],
}
boltzproperties = ['vmin_vmin','vmin_r','dipolemoment', 'fmo_e_homo', 'fmo_e_lumo', 'fmo_mu', 'fmo_eta', 'fmo_omega', 'somo_ra', 'somo_rc', 'qpole_amp', 'qpoletens_xx', 'qpoletens_yy', 'qpoletens_zz', 'nbo_P', 'nbo_P_ra', 'spindens_P_ra', 'nbo_P_rc', 'spindens_P_rc', 'nmr_P', 'nmrtens_sxx_P', 'nmrtens_syy_P', 'nmrtens_szz_P', 'efg_amp_P', 'efgtens_xx_P', 'efgtens_yy_P', 'efgtens_zz_P', 'nuesp_P', 'E_solv_cds', 'nbo_lp_P_percent_s', 'nbo_lp_P_occ', 'nbo_lp_P_e', 'nbo_bd_e_max', 'nbo_bd_e_avg', 'nbo_bds_e_min', 'nbo_bds_e_avg', 'nbo_bd_occ_min', 'nbo_bd_occ_avg', 'nbo_bds_occ_max', 'nbo_bds_occ_avg', 'E_solv_total', 'E_solv_elstat', 'E_oxidation', 'E_reduction', 'fukui_p', 'fukui_m', 'pyr_P', 'pyr_alpha', 'vbur_vbur', 'vbur_vtot', 'vbur_ratio_vbur_vtot', 'vbur_qvbur_min', 'vbur_qvbur_max', 'vbur_qvtot_min', 'vbur_qvtot_max', 'vbur_max_delta_qvbur', 'vbur_max_delta_qvtot', 'vbur_ovbur_min', 'vbur_ovbur_max', 'vbur_ovtot_min', 'vbur_ovtot_max', 'vbur_near_vbur', 'vbur_far_vbur', 'vbur_near_vtot', 'vbur_far_vtot', 'sterimol_B1', 'sterimol_B5', 'sterimol_L', 'sterimol_burB1', 'sterimol_burB5', 'sterimol_burL',"Pint_P_int","Pint_dP","Pint_P_min","Pint_P_max","volume","surface_area","sphericity"] # "vv_total_visible_volume","vv_proximal_visible_volume","vv_distal_visible_volume","vv_ratio_visible_total","vv_ratio_proxvis_total",
mmproperties = ['dipolemoment', 'qpole_amp', 'qpoletens_xx', 'qpoletens_yy', 'qpoletens_zz', 'pyr_P', 'pyr_alpha', 'vbur_vbur', 'vbur_vtot', 'vbur_qvbur_min', 'vbur_qvbur_max', 'vbur_qvtot_min', 'vbur_qvtot_max', 'vbur_max_delta_qvbur', 'vbur_max_delta_qvtot', 'vbur_ovbur_min', 'vbur_ovbur_max', 'vbur_ovtot_min', 'vbur_ovtot_max', 'vbur_near_vbur', 'vbur_far_vbur', 'vbur_near_vtot', 'vbur_far_vtot', 'sterimol_B1', 'sterimol_B5', 'sterimol_L', 'sterimol_burB1', 'sterimol_burB5', 'sterimol_burL'] # ,"vv_total_visible_volume","vv_proximal_visible_volume","vv_distal_visible_volume","vv_ratio_visible_total","vv_ratio_proxvis_total",
Pintresults = ["Pint_P_int","Pint_dP","Pint_P_min","Pint_P_max","volume","surface_area","sphericity"]
def morfeus_properties(elements,coordinates,confdata):
# Morfeus: Sterimol, Vbur, pyr
morfdict = {}
if "pyr_P" not in confdata.keys() and confdata["p_val"] == 3:
# Pyramidalization - two equivalent measurments P and alpha
pyr = morfeus.Pyramidalization(elements=elements,coordinates=coordinates,atom_index=1,excluded_atoms=[2]) # remove Pd
morfdict["pyr_P"] = float(pyr.P)
morfdict["pyr_alpha"] = float(pyr.alpha)
if "vbur_vbur" not in confdata.keys():
#Buried volume - get quadrant volumes and distal volume
# iterate through P-substituents, aligning the quadrants paralell to each once (= xz_plane definition)
# Metal/point of reference should be 2.28 A away from P
# z_axis_atoms: P
# xz_plane_atoms: each of the substituents once
# keep lowest and highest quadrant and octant volume across all three orientations of the coordinate system
# keep highest difference of any neighboring quadrant volume
# keep volume in each of the two hemispheres
qvbur_all = np.array([])
qvdist_all = np.array([])
qvtot_all = np.array([])
max_delta_qvbur_all = []
max_delta_qvtot_all = []
ovbur_all = np.array([])
ovtot_all = np.array([])
for i in range(3):#confdata["p_val"]):
bv = morfeus.BuriedVolume(elements,coordinates,2,excluded_atoms=[2],z_axis_atoms=[1],xz_plane_atoms=[3+i])
bv.octant_analysis()
bv.compute_distal_volume(method="buried_volume",octants=True)
vbur = bv.buried_volume # these are identical for each iteration
vdist = bv.distal_volume #
vtot = vbur + vdist #
qvbur = np.asarray(list(bv.quadrants["buried_volume"].values()))
qvdist = np.asarray(list(bv.quadrants["distal_volume"].values()))
qvtot = qvbur + qvdist
qvbur_all = np.append(qvbur_all,qvbur)
qvtot_all = np.append(qvtot_all,qvtot)
max_delta_qvbur_all.append(max([abs(qvbur[j]-qvbur[j-1]) for j in range(4)]))
max_delta_qvtot_all.append(max([abs(qvtot[j]-qvtot[j-1]) for j in range(4)]))
ovbur = np.asarray(list(bv.octants["buried_volume"].values()))
ovdist = np.asarray(list(bv.octants["distal_volume"].values()))
ovtot = ovbur + ovdist
ovbur_all = np.append(ovbur_all,ovbur)
ovtot_all = np.append(ovtot_all,ovtot)
near_vbur = ovbur[4:].sum() # these are identical for each iteration
far_vbur = ovbur[:4].sum() #
near_vtot = ovtot[4:].sum() #
far_vtot = ovtot[:4].sum() #
morfdict["vbur_vbur"] = vbur
morfdict["vbur_vtot"] = float(vtot)
morfdict["vbur_ratio_vbur_vtot"] = float(vbur/vtot)
morfdict["vbur_qvbur_min"] = float(min(qvbur_all))
morfdict["vbur_qvbur_max"] = float(max(qvbur_all))
morfdict["vbur_qvtot_min"] = float(min(qvtot_all))
morfdict["vbur_qvtot_max"] = float(max(qvtot_all))
morfdict["vbur_max_delta_qvbur"] = float(max(max_delta_qvbur_all))
morfdict["vbur_max_delta_qvtot"] = float(max(max_delta_qvtot_all))
morfdict["vbur_ovbur_min"] = float(min(ovbur_all))
morfdict["vbur_ovbur_max"] = float(max(ovbur_all))
morfdict["vbur_ovtot_min"] = float(min(ovtot_all))
morfdict["vbur_ovtot_max"] = float(max(ovtot_all))
morfdict["vbur_near_vbur"] = float(near_vbur)
morfdict["vbur_far_vbur"] = float(far_vbur)
morfdict["vbur_near_vtot"] = float(near_vtot)
morfdict["vbur_far_vtot"] = float(far_vtot)
if "sterimol_B1" not in confdata.keys():
# Sterimol
# for Sterimol values matching Rob Paton's implementation:
patonradii = morfeus.helpers.get_radii(elements, radii_type="bondi")
patonradii = np.array(patonradii)
patonradii[patonradii == 1.2] = 1.09
sterimol = morfeus.Sterimol(elements, coordinates, 2, 1, radii=patonradii, n_rot_vectors=3600)
morfdict["sterimol_B1"] = float(sterimol.B_1_value)
morfdict["sterimol_B5"] = float(sterimol.B_5_value)
morfdict["sterimol_L"] = float(sterimol.L_value)
sterimol_bur = morfeus.Sterimol(elements, coordinates, 2, 1,calculate=False,radii=patonradii, n_rot_vectors=3600)
sterimol_bur.bury(sphere_radius=5.5,method="delete",radii_scale=0.5)
morfdict["sterimol_burB1"] = float(sterimol_bur.B_1_value)
morfdict["sterimol_burB5"] = float(sterimol_bur.B_5_value)
morfdict["sterimol_burL"] = float(sterimol_bur.L_value)
return(morfdict)
def gp_properties(ligand,conformer,p_idx):
gpdict = {}
gpdict["properties"] = {}
contents = {
"streams":{},
"filecont":{},
}
for e,log in energylogs.items():
contents["streams"][log] = gp.get_outstreams(cwd/conformer/f"{conformer}_{log}.log")
if contents["streams"][log] == "failed or incomplete job":
return({"error":True})
else:
gpdict[e] = gp.get_e_hf(contents["streams"][log])
gpdict["error"] = False
for log in proplogs.keys():
contents["filecont"][log] = gp.get_filecont(cwd/conformer/f"{conformer}_{log}.log")
for prop in proplogs[log]:
gpresults = gp.jobtypes[prop][0](contents[gp.jobtypes[prop][1]][log],p_idx)
if prop == "nborbsP":
gpdict["properties"].update(gpresults)
elif prop == "t":
gpdict[f"{log}_t"] = gpresults
elif prop in ["e_dz","g","e_tz_gas","e_tz_solv","e_tz_ra","e_tz_rc","nimag"]:
gpdict.update({propoutput[f"{log}_{prop}"][i]: float(gpresults[i]) for i in range(len(gpresults))})
else:
gpdict["properties"].update({propoutput[f"{log}_{prop}"][i]: float(gpresults[i]) for i in range(len(gpresults))})
gpdict["g_tz_gas"] = gpdict["g"] - gpdict["e_dz"] + gpdict["e_tz_gas"]
gpdict["g_tz_solv"] = gpdict["g"] - gpdict["e_dz"] + gpdict["e_tz_solv"]
gpdict["properties"]["E_solv_total"] = (gpdict["e_tz_solv"] - gpdict["e_tz_gas"]) * hartree_kcalmol
gpdict["properties"]["E_solv_elstat"] = gpdict["properties"]["E_solv_total"] - gpdict["properties"]["E_solv_cds"]
gpdict["properties"]["E_oxidation"] = gpdict["e_tz_rc"] - gpdict["e_tz_gas"]
gpdict["properties"]["E_reduction"] = gpdict["e_tz_ra"] - gpdict["e_tz_gas"]
gpdict["properties"]["fukui_p"] = gpdict["properties"]["nbo_P"]-gpdict["properties"]["nbo_P_ra"]
gpdict["properties"]["fukui_m"] = gpdict["properties"]["nbo_P_rc"]-gpdict["properties"]["nbo_P"]
gpdict["t_total"] = sum([gpdict[f"{log}_t"] for log in proplogs.keys()])
if "" in gpdict.keys():
del gpdict[""]
if "" in gpdict["properties"].keys():
del gpdict["properties"][""]
return(gpdict)
def read_conformer(cwd, ligand, conformer):
confdata = {}
errors = []
checklogs = [cwd/conformer/f"{conformer}_{l}.log" for l in proplogs.keys() if not (cwd/conformer/f"{conformer}_{l}.log").exists()]
if len(checklogs) != 0:
err = f"Missing Gaussian log files, flagged in read_conformer: {','.join([chkl.name for chkl in checklogs])}"
errors.append(err)
print(f"{ligand};{conformer};{err}")
with open(cwd/f"{ligand}_errors.txt","a") as f:
f.write(f"{ligand};{conformer};{err}\n")
confdata["error"] = True
return(confdata,errors)
if "elements_pd" not in confdata.keys():
def read_gaussian_logfile(fn):
time0=time.time()
read=False
for line in open(fn,"r"):
if read:
if "---" in line and len(elements)>0:
read=False
if read:
if "X" not in line and "---" not in line:
atomnum = int(line.split()[1])
el = periodictable[atomnum]
elements.append(el)
coordinates.append([float(line.split()[3]),float(line.split()[4]), float(line.split()[5])])
if "Coordinates (Angstroms)" in line:
coordinates, elements = [], []
read=True
time1=time.time()
print("gaussian log parser done in %.2f seconds"%(time1-time0))
return(coordinates, elements)
coordinates, elements = read_gaussian_logfile(str(cwd/conformer/f"{conformer}_opt.log"))
coordinates_a = np.array(coordinates)
conmat = get_conmat(elements,coordinates_a)
p_idx = [i for i in range(len(elements)) if elements[i] == "P" and sum(conmat[i]) <= 3][0]
elements_pd, coordinates_pd = add_valence(elements,coordinates,conmat,p_idx,add_element="Pd")
if not (cwd/conformer/f"{conformer}_opt_Pd.xyz").exists():
write_xyz(elements, coordinates, cwd/conformer/f"{conformer}_opt.xyz")
os.system("obabel -ixyz %s -osdf >> %s"%(str(cwd/conformer/f"{conformer}_opt.xyz"), str(cwd/conformer/f"{conformer}_opt.sdf")))
write_xyz(elements_pd,coordinates_pd,cwd/conformer/f"{conformer}_opt_Pd.xyz")
confdata["coords"] = coordinates
confdata["coords_pd"] = coordinates_pd.tolist()
confdata["elements"] = elements
confdata["elements_pd"] = elements_pd
confdata["conmat"] = conmat.tolist()
confdata["p_idx"] = p_idx
confdata["p_val"] = int(sum(conmat[p_idx]))
confdata["properties"] = {}
a.update(gp_properties(ligand,conformer,confdata["p_idx"]))
if confdata["error"]:
err = "Error in the Gaussian computations, flagged in read_conformer, please check log files."
errors.append(err)
print(f"{ligand};{conformer};{err}")
with open(cwd/f"{ligand}_errors.txt","a") as f:
f.write(f"{ligand};{conformer};{err}\n")
with open(cwd/conformer/f"{conformer}_data.yml","w") as f:
yaml.dump(confdata,f,Dumper=Dumper)
return(confdata,errors)
if confdata["nimag"] != 0:
err = f"Number of imaginary frequencies: {confdata['nimag']}."
errors.append(err)
print(f"{ligand};{conformer};{err}")
with open(cwd/f"{ligand}_errors.txt","a") as f:
f.write(f"{ligand};{conformer};{err}\n")
with open(cwd/conformer/f"{conformer}_data.yml","w") as f:
yaml.dump(confdata,f,Dumper=Dumper)
confdata["error"] = True
return(confdata,errors)
confdata["properties"].update(morfeus_properties(confdata["elements_pd"],confdata["coords_pd"],confdata))
disp = "d3"
pint_read = P_int.read_dedout(cwd/conformer,conformer,disp)+P_int.read_multiwfnout(cwd/conformer,conformer)+P_int.read_disp(cwd/conformer,conformer,disp)
confdata["properties"].update({Pintresults[i]:float(pint_read[i]) for i in range(7)})
try:
if "vmin_vmin" not in confdata.keys():
vminob = vmin.get_vmin(f"{conformer}.fchk",str(cwd/conformer)+"/",True)
confdata["properties"]["vmin_vmin"] = float(vminob.v_min)
confdata["properties"]["vmin_r"] = float(vminob.r_min)
except:
err = f"Vmin FileNotFoundError."
errors.append(err)
print(f"{ligand};{conformer};{err}")
with open(cwd/f"{ligand}_errors.txt","a") as f:
f.write(f"{ligand};{conformer};{err}\n")
confdata["error"] = True
with open(cwd/conformer/f"{conformer}_data.yml","w") as f:
yaml.dump(confdata,f,Dumper=Dumper)
return(confdata,errors)
def read_ligand(cwd, ligand, conformers, liganddata = {}):
status = {"ligandlevel": [],}
if len(liganddata.keys()) == 0:
if (cwd/f"{ligand}_data.yml").exists():
with open(cwd/f"{ligand}_data.yml","r") as f:
liganddata = yaml.load(f,Loader=Loader)
if (cwd/f"{ligand}_confdata.yml").exists():
with open(cwd/f"{ligand}_confdata.yml","r") as f:
liganddata["confdata"] = yaml.load(f,Loader=Loader)
else:
liganddata = {
"conformers_all": conformers,
"conformers": conformers.copy(),
"number_of_conformers": len(conformers),
"removed_duplicates": [],
"confdata": {},
"boltzmann_averaged_data": {},
"min_data": {},
"max_data": {},
"delta_data": {},
"vburminconf_data": {},
}
newconfs = 0
for conformer in conformers:
if conformer in liganddata["removed_duplicates"]:
continue
print(conformer)
if conformer in liganddata["confdata"].keys():
pass
elif (cwd/conformer/f"{conformer}_data.yml").exists():
with open(cwd/conformer/f"{conformer}_data.yml","r") as f:
liganddata["confdata"][conformer] = yaml.load(f,Loader=Loader)
newconfs += 1
else:
print("read conformer data")
liganddata["confdata"][conformer],status[conformer] = read_conformer(cwd, ligand, conformer)
newconfs += 1
if newconfs > 0:
liganddata["conformers_w_error"] = [conformer for conformer in liganddata["conformers"] if liganddata["confdata"][conformer]["error"]]
liganddata["conformers"] = [c for c in liganddata["conformers"] if c not in liganddata["conformers_w_error"]]
liganddata["number_of_conformers"] = len(liganddata["conformers"])
energies = ["e_dz","g","e_tz_gas","g_tz_gas","e_tz_solv","g_tz_solv"]
liganddata["energies"] = {}
liganddata["relative_energies"] = {}
for e in energies:
liganddata["energies"][e] = {conformer: liganddata["confdata"][conformer][e] for conformer in liganddata["conformers"]}
liganddata[e+"_min"] = min(liganddata["energies"][e].values())
liganddata[e+"_minconf"] = list(liganddata["energies"][e].keys())[np.argmin(list(liganddata["energies"][e].values()))]
liganddata["relative_energies"][e+"_rel"] = {conformer: (liganddata["energies"][e][conformer]-liganddata[e+"_min"])*hartree_kcalmol for conformer in liganddata["conformers"]}
erel_df = pd.DataFrame([liganddata["relative_energies"][e+"_rel"] for e in energies],index=energies).T
liganddata["relative_energies_dict"] = erel_df.to_dict()
duplicates_candidates = [(i,j) for i,j in itertools.combinations(liganddata["conformers"],2) if abs(erel_df["e_dz"].loc[i] - erel_df["e_dz"].loc[j]) < 0.1]
try:
cores = max(os.cpu_count() - 2, 1)
with Pool(cores) as p:
values = p.map(dict_key_rmsd, duplicates_candidates)
liganddata["rmsd_candidates"] = {key: value for key, value in zip(duplicates_candidates, values)}
[candidate_pair for candidate_pair in liganddata["rmsd_candidates"] if liganddata["rmsd_candidates"][candidate_pair] < 0.2]
except:
err = "Warning: RDKit error at duplicate RMSD testing. Please double check."
status["ligandlevel"].append(err)
print(f"{ligand};ligandlevel;{err}")
with open(cwd/f"{ligand}_errors.txt","a") as f:
f.write(f"{ligand};ligandlevel;{err}\n")
dipole_candidates = set([(i,j) for i,j in duplicates_candidates if abs(liganddata["confdata"][i]["properties"]["dipolemoment"] - liganddata["confdata"][j]["properties"]["dipolemoment"]) < 0.025])
nmr_candidates = set([(i,j) for i,j in duplicates_candidates if abs(liganddata["confdata"][i]["properties"]["nmr_P"] - liganddata["confdata"][j]["properties"]["nmr_P"]) < 0.1])
liganddata["duplicates"] = sorted(dipole_candidates & nmr_candidates)
liganddata["removed_duplicates"] = [erel_df.loc[list(pair)]["g_tz_gas"].idxmax() for pair in liganddata["duplicates"]]
liganddata["conformers"] = [c for c in liganddata["conformers"] if c not in liganddata["removed_duplicates"]]
liganddata["number_of_conformers"] = len(liganddata["conformers"])
boltzfacs = {conformer: np.exp(-erel_df["g_tz_gas"].loc[conformer]/(R*T)) for conformer in liganddata["conformers"]}
Q = sum(boltzfacs.values())
liganddata["boltzmann_weights"] = {conformer: float(boltzfacs[conformer]/Q) for conformer in liganddata["conformers"] }
for prop in boltzproperties:
confsmissingprop = [conf for conf in liganddata["conformers"] if prop not in liganddata["confdata"][conf]["properties"].keys()]
if len(confsmissingprop) == 0:
liganddata["boltzmann_averaged_data"][prop] = sum([liganddata["boltzmann_weights"][conf] * liganddata["confdata"][conf]["properties"][prop] for conf in liganddata["conformers"]])
else:
err = f"Warning: {len(confsmissingprop)}/{len(liganddata['conformers'])} conformers missing values for property {prop}: {','.join(confsmissingprop)}."
status["ligandlevel"].append(err)
print(f"{ligand};ligandlevel;{err}")
with open(cwd/f"{ligand}_errors.txt","a") as f:
f.write(f"{ligand};ligandlevel;{err}\n")
liganddata["boltzmann_averaged_data"][prop] = None
continue
liganddata["vburminconf"] = liganddata["conformers"][np.argmin([liganddata["confdata"][conf]["properties"]["vbur_vbur"] for conf in liganddata["conformers"]])]
for prop in mmproperties:
proplist = [liganddata["confdata"][conf]["properties"][prop] for conf in liganddata["conformers"] if prop in liganddata["confdata"][conf]["properties"].keys()]
if len(proplist) == 0:
liganddata["min_data"][prop] = None
liganddata["max_data"][prop] = None
liganddata["delta_data"][prop] = None
liganddata["vburminconf_data"][prop] = None
else:
liganddata["min_data"][prop] = min(proplist)
liganddata["max_data"][prop] = max(proplist)
liganddata["delta_data"][prop] = liganddata["max_data"][prop] - liganddata["min_data"][prop]
liganddata["vburminconf_data"][prop] = liganddata["confdata"][liganddata["vburminconf"]]["properties"][prop]
liganddata["time_all"] = sum([liganddata["confdata"][conf]["t_total"] for conf in liganddata["conformers_all"] if "t_total" in liganddata["confdata"][conf].keys()])
with open(cwd/f"{ligand}_data.yml","w") as f:
yaml.dump({k:v for k,v in liganddata.items() if k != "confdata"},f,Dumper=Dumper)
with open(cwd/f"{ligand}_confdata.yml","w") as f:
yaml.dump(liganddata["confdata"],f,Dumper=Dumper)
erel_df.to_csv(cwd/f"{ligand}_relative_energies.csv",sep=";")
return(liganddata,status)
def main_split_logs(cwd, ligand):
if not (cwd/"ERR").exists():
(cwd/"ERR").mkdir()
conformers = [i.name for i in (cwd/ligand).iterdir() if i.is_dir()]
conformers_good = []
for conformer in conformers:
logs = [i.name for i in (cwd/ligand/conformer).rglob("*.log")]
if f"{conformer}.log" in logs and f"{conformer}_opt.log" not in logs:
status = split_log(ligand, conformer)
if status != "Error":
conformers_good.append(conformer)
return(conformers_good)
if __name__ == '__main__':
starttime_all = time.time()
ligname = re.compile("[0-9]{8}")
ligands = sorted([i.name for i in cwd.iterdir() if (ligname.match(i.name) and i.is_dir())])
conformers = {ligand: [i.name for i in (cwd/ligand).iterdir() if i.is_dir()] for ligand in ligands}
if not (cwd/"ERR").exists():
(cwd/"ERR").mkdir()
if not (cwd/"done").exists():
(cwd/"done").mkdir()
for ligand in ligands:
for conformer in conformers[ligand]:
logs = [i.name for i in (cwd/ligand/conformer).rglob("*.log")]
if f"{conformer}.log" in logs and f"{conformer}_opt.log" not in logs:
status = split_log(ligand,conformer)
if status != "Error":
(cwd/ligand/conformer/f"{conformer}.log").rename(cwd/f"done/{conformer}.log")
if (cwd/"allligands_data.yml").exists():
with open(cwd/"allligands_data.yml","r") as f:
allliganddata = yaml.load(f,Loader=Loader)
else:
allliganddata = {}
for ligand in ligands:
print(ligand)
print(conformers[ligand])
if ligand in allliganddata.keys():
allliganddata[ligand],status = read_ligand(cwd,ligand,conformers[ligand],allliganddata[ligand])
else:
allliganddata[ligand],status = read_ligand(cwd,ligand,conformers[ligand])
with open(cwd/"allligands_data.yml","w") as f:
yaml.dump(allliganddata,f,Dumper=Dumper)
variants = ["boltz","min","max","delta","vburminconf"]
columns = [i+"_boltz" for i in boltzproperties if i not in mmproperties] + [f"{i}_{j}" for i,j in itertools.product(mmproperties,variants)]
df = pd.DataFrame(columns = columns,index = ligands)
for l in ligands:
for c in columns:
print(allliganddata[l]["properties"])
exit()
df.loc[l][c] = allliganddata[l]["properties"][c]
df["t_total"] = [allliganddata[l]["t_total"] for l in ligands]
df["number_of_conformers"] = [allliganddata[l]["number_of_conformers"] for l in ligands]
df.to_csv("allligands_data.csv",sep=";")
print(f"All done. Total time: {round((time.time()-starttime_all),2)} sec")
| true | true |
f71fdf25a6bbbf3c2ecdf90eda58463aa369ca88 | 52,543 | py | Python | t5/seqio/dataset_providers.py | dptam/text-to-text-transfer-transformer | 3662823b126ebf39d9d8ed147a8af0c6973f0ba9 | [
"Apache-2.0"
] | null | null | null | t5/seqio/dataset_providers.py | dptam/text-to-text-transfer-transformer | 3662823b126ebf39d9d8ed147a8af0c6973f0ba9 | [
"Apache-2.0"
] | null | null | null | t5/seqio/dataset_providers.py | dptam/text-to-text-transfer-transformer | 3662823b126ebf39d9d8ed147a8af0c6973f0ba9 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The T5 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Classes for data loading and processing.
Defines Tasks, TaskRegistry, Mixture, and MixtureRegistry
"""
import abc
import collections
import inspect
import json
import os
import re
from typing import Any, Callable, Iterable, Mapping, MutableMapping, Optional, Sequence, Tuple, Type, Union
from absl import logging
import dataclasses
import numpy as np
from packaging import version
from t5.seqio import utils
from t5.seqio.feature_converters import FeatureConverter
from t5.seqio.vocabularies import Vocabulary
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
import typing_extensions
_DEFAULT_FEATURE_KEYS = ["inputs", "targets"]
_VALID_TASK_NAME_REGEX = re.compile(r"^[\w\d\._]+$")
_MAX_EXAMPLES_TO_MEM_CACHE = 10000
SHUFFLE_BUFFER_SIZE = 1000
@dataclasses.dataclass(frozen=True)
class Feature:
"""A container for attributes of output features of data providers."""
vocabulary: Vocabulary
add_eos: bool = True
required: bool = True
dtype: tf.DType = tf.int32
@dataclasses.dataclass(frozen=True)
class ShardInfo:
"""A container for specifying sharding info."""
index: int
num_shards: int
class DatasetProviderBase(metaclass=abc.ABCMeta):
"""Abstract base for classes that provide a tf.data.Dataset."""
@abc.abstractproperty
def output_features(self) -> Mapping[str, Feature]:
raise NotImplementedError
@abc.abstractproperty
def splits(self) -> Sequence[str]:
raise NotImplementedError
@abc.abstractmethod
def get_dataset(
self,
sequence_length: int,
split: str,
use_cached: bool = False,
shuffle: bool = True,
seed: Optional[int] = None,
shard_info: Optional[ShardInfo] = None,
num_epochs: int = 1
) -> tf.data.Dataset:
"""Returns the requested tf.data.Dataset."""
raise NotImplementedError
@abc.abstractmethod
def num_input_examples(self, split: str) -> int:
raise NotImplementedError
class DatasetProviderRegistry(object):
"""Base for registry of data providers.
Subclasses must wrap `get` method to override the return type for pytype.
TODO(adarob): Remove the need to override `get`.
"""
# Class variables must be defined in subclasses.
_REGISTRY: MutableMapping[str, DatasetProviderBase]
_PROVIDER_TYPE: Type[DatasetProviderBase]
@classmethod
def add_provider(cls, name: str, provider):
"""Adds a data provider instance to the registry."""
if name in cls._REGISTRY:
raise ValueError("Attempting to register duplicate provider: %s" % name)
if not isinstance(provider, cls._PROVIDER_TYPE):
raise ValueError(
"Attempting to register a class not of an invalid type. "
"Expecting instance of %s, got %s" %
(cls._PROVIDER_TYPE, type(provider).__name__))
cls._REGISTRY[name] = provider
@classmethod
def add(
cls,
name: str,
provider_cls,
*provider_args,
**provider_kwargs
):
"""Instantiates and adds provider to the registry."""
if not issubclass(provider_cls, cls._PROVIDER_TYPE):
raise ValueError(
"Attempting to register a class not of an invalid type. "
"Expecting instance of %s, got %s" %
(cls._PROVIDER_TYPE, provider_cls))
provider = provider_cls(*provider_args, **provider_kwargs)
cls.add_provider(name, provider)
return provider
@classmethod
def remove(cls, name):
"""Remove provider from the registry, if it exists."""
if name in cls._REGISTRY:
del cls._REGISTRY[name]
@classmethod
def get(cls, name):
"""Returns provider from the registry."""
if name not in cls._REGISTRY:
raise ValueError("Provider name not registered: %s" % name)
return cls._REGISTRY[name]
@classmethod
def names(cls):
"""Returns all provider names in registry."""
return cls._REGISTRY.keys()
@classmethod
def reset(cls):
"""Removes all of the registered tasks."""
cls._REGISTRY = {}
@classmethod
def get_dataset(
cls,
name,
sequence_length,
split,
use_cached=False,
shuffle=True,
seed=None,
shard_info=None,
num_epochs=1):
"""Returns the requested tf.data.Dataset."""
return cls.get(name).get_dataset(
sequence_length=sequence_length, split=split, use_cached=use_cached,
shuffle=shuffle, seed=seed, shard_info=shard_info,
num_epochs=num_epochs)
# =============================== DataSources ==================================
class DataSource(DatasetProviderBase):
"""A `DatasetProvider` that provides raw data from an input source.
Inherits all abstract methods and properties of `DatasetProviderBase` except
those overidden below.
"""
def __init__(
self,
splits: Iterable[str],
num_input_examples: Optional[Mapping[str, int]] = None):
self._splits = tuple(splits)
self._num_input_examples = (
dict(num_input_examples) if num_input_examples is not None else None)
@property
def splits(self) -> Sequence[str]:
return self._splits
@property
def output_features(self) -> Mapping[str, Feature]:
"""Override unused property of `DatasetProviderBase`."""
raise NotImplementedError
@abc.abstractmethod
def list_shards(self, split: str) -> Sequence[str]:
"""Returns string identifiers of input shards."""
raise NotImplementedError
@abc.abstractmethod
def get_dataset(
self,
split: str,
shuffle: bool = True,
seed: Optional[int] = None,
shard_info: Optional[ShardInfo] = None
) -> tf.data.Dataset:
"""Overrides base class to add shard identifier and remove use_cached.
Args:
split: string, the split to return.
shuffle: bool, whether to shuffle the input source.
seed: tf.int64 scalar tf.Tensor (or None) for shuffling input source.
shard_info: optional specification for loading a shard of the split.
"""
raise NotImplementedError
def num_input_examples(self, split: str) -> Optional[int]:
if self._num_input_examples is None:
return None
return self._num_input_examples[split]
def _validate_args(fn, expected_pos_args):
"""Ensure function has exactly expected positional args."""
argspec = inspect.getfullargspec(fn)
expected_pos_args = tuple(expected_pos_args)
actual_args = tuple(argspec.args)
if actual_args[:len(expected_pos_args)] != expected_pos_args:
raise ValueError(
"'%s' must have positional args %s, got: %s" % (
fn.__name__, expected_pos_args, actual_args))
actual_pos_args = tuple(
argspec.args[:-len(argspec.defaults)]
if argspec.defaults else argspec.args)
if actual_pos_args != expected_pos_args[:len(actual_pos_args)]:
raise ValueError(
"'%s' may only have positional args %s, got: %s" % (
fn.__name__, expected_pos_args, actual_pos_args))
class DatasetFnCallable(typing_extensions.Protocol):
def __call__(self,
split: str,
shuffle_files: bool,
seed: Optional[int] = None) -> tf.data.Dataset:
...
class FunctionDataSource(DataSource):
"""A `DataSource` that uses a function to provide the input data."""
def __init__(
self,
dataset_fn: DatasetFnCallable,
splits: Iterable[str],
num_input_examples: Optional[Mapping[str, int]] = None
):
"""FunctionDataSource constructor.
Args:
dataset_fn: a function with the signature `dataset_fn(split,
shuffle_files)' (and optionally the variable `seed`) that returns a
`tf.data.Dataset`.
splits: an iterable of applicable string split names.
num_input_examples: dict or None, an optional dictionary mapping split
to its size in number of input examples (before preprocessing). The
`num_input_examples` method will return None if not provided.
"""
_validate_args(dataset_fn, ["split", "shuffle_files"])
self._dataset_fn = dataset_fn
super().__init__(splits=splits, num_input_examples=num_input_examples)
def get_dataset(
self,
split: str,
shuffle: bool = True,
seed: Optional[int] = None,
shard_info: Optional[ShardInfo] = None
) -> tf.data.Dataset:
if shard_info and shard_info.num_shards > 1:
raise ValueError(
"`FunctionDataSource` does not support low-level sharding. Use "
"tf.data.Dataset.shard instead.")
if seed is None:
ds = self._dataset_fn(split=split, shuffle_files=shuffle)
else:
_validate_args(self._dataset_fn, ["split", "shuffle_files", "seed"])
ds = self._dataset_fn(split=split, shuffle_files=shuffle, seed=seed)
return ds
def list_shards(self, split: str) -> Sequence[str]:
return [split]
class TfdsDataSource(DataSource):
"""A `DataSource` that uses TensorFlow Datasets to provide the input data."""
def __init__(
self,
tfds_name: str,
tfds_data_dir: Optional[str] = None,
splits: Optional[Union[Iterable[str], Mapping[str, str]]] = None
):
"""TfdsTask constructor.
Args:
tfds_name: string, the name and version number of a TFDS dataset,
optionally with a config.
tfds_data_dir: string, an optional path to a specific TFDS data directory
to use.
splits: an iterable of allowable string split names, a dict mapping
allowable canonical splits (e.g., 'validation') to TFDS splits or slices
(e.g., 'train[':1%']), or None. The default, None, uses all available
splits from the TFDS dataset info.
"""
if ":" not in tfds_name:
raise ValueError("TFDS name must contain a version number, got: %s" %
tfds_name)
self._tfds_dataset = utils.LazyTfdsLoader(
tfds_name,
data_dir=tfds_data_dir,
split_map=splits if isinstance(splits, dict) else None)
# If splits are not provided, we pass an empty tuple and use the lazy
# lookup in the `splits` property.
super().__init__(splits=splits or ())
@property
def splits(self):
"""Overrides since we can't call `info.splits` until after init."""
return self._splits or self._tfds_dataset.info.splits
@property
def tfds_dataset(self):
return self._tfds_dataset
def get_dataset(
self,
split: str,
shuffle: bool = True,
seed: Optional[int] = None,
shard_info: Optional[ShardInfo] = None
) -> tf.data.Dataset:
return self.tfds_dataset.load(
split, shuffle_files=shuffle, seed=seed, shard_info=shard_info)
def num_input_examples(self, split: str) -> int:
"""Overrides since we can't call `info.splits` until after init."""
return self.tfds_dataset.size(split)
def list_shards(self, split: str) -> Sequence[str]:
return self.tfds_dataset.files(split)
class FileDataSource(DataSource):
"""A `DataSource` that reads a file to provide the input dataset."""
def __init__(
self,
read_file_fn: Callable[[tf.data.Dataset], tf.data.Dataset],
split_to_filepattern: Mapping[str, Union[str, Iterable[str]]],
num_input_examples: Optional[Mapping[str, int]] = None,
):
"""FileDataSource constructor.
Args:
read_file_fn: a callable for creating a `tf.data.Dataset` from a
`tf.data.Dataset` of file paths, e.g., `tf.data.TFRecordDataset`.
split_to_filepattern: a mapping from split names to filepatterns to be
expanded with glob.
num_input_examples: dict or None, an optional dictionary mapping split
to its size in number of input examples (before preprocessing). The
`num_input_examples` method will return None if not provided.
"""
self._split_to_filepattern = split_to_filepattern
self._reader = read_file_fn
super().__init__(
splits=split_to_filepattern.keys(),
num_input_examples=num_input_examples)
def get_dataset(
self,
split: str,
shuffle: bool = True,
seed: Optional[int] = None,
shard_info: Optional[ShardInfo] = None
) -> tf.data.Dataset:
files = self.list_shards(split)
if not files:
raise ValueError(
"No file is found for the file pattern: "
f"{self._split_to_filepattern[split]}."
)
files_ds = tf.data.Dataset.from_tensor_slices(np.array(files, dtype=np.str))
if shard_info:
if len(files) < shard_info.num_shards:
raise ValueError(
f"Dataset has too few files to shard. {len(files)} files vs "
f"{shard_info.num_shards} shards requested.")
files_ds = files_ds.shard(shard_info.num_shards, shard_info.index)
if shuffle:
files_ds = files_ds.shuffle(buffer_size=16, seed=seed)
return files_ds.interleave(
self._reader,
cycle_length=16,
block_length=16,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
def list_shards(self, split: str) -> Sequence[str]:
return tf.io.gfile.glob(self._split_to_filepattern[split])
class TextLineDataSource(FileDataSource):
"""A `FileDataSource` that reads lines of text from a file as input."""
def __init__(
self,
split_to_filepattern: Mapping[str, Union[str, Iterable[str]]],
skip_header_lines: int = 0,
num_input_examples: Optional[Mapping[str, int]] = None,
):
"""TextLineDataSource constructor.
Args:
split_to_filepattern: a mapping from split names to filepatterns to be
expanded with glob.
skip_header_lines: int, number of header lines to skip in each source
file.
num_input_examples: dict or None, an optional dictionary mapping split to
its size in number of input examples (before preprocessing). The
`num_input_examples` method will return None if not provided.
"""
# Used during caching.
self._skip_header_lines = skip_header_lines
def read_file_fn(filepattern):
return tf.data.TextLineDataset(filepattern).skip(skip_header_lines)
super().__init__(
read_file_fn=read_file_fn,
split_to_filepattern=split_to_filepattern,
num_input_examples=num_input_examples)
class TFExampleDataSource(FileDataSource):
"""A `FileDataSource` that reads files of tf.train.Example protos as input."""
def __init__(
self,
split_to_filepattern: Mapping[str, Union[str, Iterable[str]]],
feature_description: Mapping[str, Union[tf.io.FixedLenFeature,
tf.io.VarLenFeature]],
reader_cls: Type[tf.data.Dataset] = tf.data.TFRecordDataset,
num_input_examples: Optional[Mapping[str, int]] = None,
):
"""TFExampleDataSource constructor.
Args:
split_to_filepattern: dict of string (split name) to either string
(filename or filepattern) or list of strings (filenames or
filepatterns).
feature_description: dict, a mapping of string feature keys to
`tf.io.FixedLenFeature` or `tf.io.VarLenFeature` values.
reader_cls: `tf.data.Dataset`, a dataset class to read the input files.
num_input_examples: dict or None, an optional dictionary mapping split to
its size in number of input examples (before preprocessing). The
`num_input_examples` method will return None if not provided.
"""
def read_file_fn(filepattern):
return reader_cls(filepattern).map(
lambda pb: tf.io.parse_single_example(pb, feature_description),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
super().__init__(
read_file_fn=read_file_fn,
split_to_filepattern=split_to_filepattern,
num_input_examples=num_input_examples)
# ========================== Offline Caching Helpers ===========================
def _rename_plaintext_to_pretokenized(
dataset: tf.data.Dataset) -> tf.data.Dataset:
"""Rename cached _plaintext features to new _pretokenized standard."""
def _rename(inputs):
outputs = {}
for k, v in inputs.items():
if k.endswith("_plaintext"):
k = k[:-len("plaintext")] + "pretokenized"
outputs[k] = v
return outputs
return dataset.map(
_rename, num_parallel_calls=tf.data.experimental.AUTOTUNE)
class _CachedDataSource(FileDataSource):
"""A `FileDataSource` for reading datasets cached offline."""
def __init__(self, cache_dir: str, split: str):
with tf.io.gfile.GFile(utils.get_cached_info_path(cache_dir, split)) as f:
split_info = json.load(f)
features = split_info["features"]
with tf.io.gfile.GFile(utils.get_cached_stats_path(cache_dir, split)) as f:
stats = json.load(f)
version_when_cached = version.Version(
split_info.get("seqio_version", "0.pre"))
version_with_true_dtypes = version.Version("0.0.0")
if version_when_cached < version_with_true_dtypes:
# Assume that all int64 features are really int32.
for name, feat in features.items():
if feat["dtype"] == "int64":
logging.info("Casting cached '%s' to int32.", name)
feat["dtype"] = "int32"
# Use `FixedLenSequenceFeature` for sequences with variable length.
def _feature_config(shape, dtype):
if dtype in ("int32", "bool"):
# int32 and bool are stored as int64 in the tf.train.Example protobuf.
# TODO(adarob): Support other conversions.
dtype = "int64"
if shape and shape[0] is None:
return tf.io.FixedLenSequenceFeature(
shape[1:], dtype, allow_missing=True)
return tf.io.FixedLenFeature(shape, dtype)
feature_description = {
feat: _feature_config(**desc) for feat, desc in features.items()
}
def read_file_fn(filepattern):
ds = tf.data.TFRecordDataset(filepattern)
ds = ds.map(
lambda pb: tf.io.parse_single_example(pb, feature_description),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
# Cast features back to the types from the info JSON since some features
# must be cast for storage (e.g., in32 is stored as int64).
ds = ds.map(
lambda x: {k: tf.cast(v, features[k]["dtype"]) for k, v in x.items()},
num_parallel_calls=tf.data.experimental.AUTOTUNE)
# Legacy cached datasets may use old "_plaintext" suffix. Rename to
# "_pretokenized".
ds = _rename_plaintext_to_pretokenized(ds)
return ds
split_to_filepattern = {
split: "%s-*-of-*%d" % (
utils.get_cached_tfrecord_prefix(cache_dir, split),
split_info["num_shards"])
}
super().__init__(
read_file_fn=read_file_fn,
split_to_filepattern=split_to_filepattern,
num_input_examples={split: stats["examples"]}
)
class CacheDatasetPlaceholder(object):
"""A placeholder to signal when in the pipeline offline caching will occur."""
def __init__(self, required=False):
"""CacheDatasetPlaceholder constructor.
Args:
required: whether the dataset must be accessed in its cached form, and
on-the-fly preprocessing is disallowed.
"""
self._required = required
@property
def required(self):
return self._required
def __call__(self, dataset):
raise RuntimeError("`CacheDatasetPlaceholder` should never be called.")
# ================================ Tasks =======================================
MetricFnCallable = Callable[..., Mapping[str, float]]
class Task(DatasetProviderBase):
"""A class to manage a dataset and its related metrics."""
def __init__(
self,
name: str,
source: DataSource,
output_features: Mapping[str, Feature],
preprocessors: Optional[Sequence[Callable[..., tf.data.Dataset]]] = None,
postprocess_fn: Optional[Callable[..., Any]] = None,
metric_fns: Optional[Sequence[MetricFnCallable]] = None,
shuffle_buffer_size: Optional[int] = SHUFFLE_BUFFER_SIZE):
"""Task constructor.
Args:
name: a unique name for the Task.
source: a `DataSource` that provides a raw `tf.data.Dataset`.
output_features: dict(str, Feature), output features of the Task to be
passed to the model. After preprocessing, examples will be validated to
ensure they include features that match this specification. Note that
additional features may be included (e.g., for evaluation), but they
will not be passed to the model.
preprocessors: list(callable), an optional list of functions that receive
a tf.data.Dataset and return a tf.data.Dataset. These will be executed
sequentually and the final dataset must include features matching
`output_features`.
postprocess_fn: callable, an optional function that receives decoded model
outputs and converts them to a form that is ready for evaluation using
the metric functions in `metric_fns`.
metric_fns: list(callable), an optional list of metric functions with the
signature `metric_fn(targets, predictions)` to use during evaluation. If
undefined or empty, no evaluation will occur on the task.
shuffle_buffer_size: an optional integer to set the shuffle buffer size.
If None, shuffling will be disallowed.
"""
if not _VALID_TASK_NAME_REGEX.match(name):
raise ValueError(
"Task name '%s' contains invalid characters. Must match regex: %s" % (
name, _VALID_TASK_NAME_REGEX.pattern))
metric_fns = metric_fns or []
self._predict_metric_fns = []
self._score_metric_fns = []
for metric_fn in metric_fns:
pos_args = tuple(
key for key, param in inspect.signature(metric_fn).parameters.items()
if param.default == inspect.Parameter.empty
)
if pos_args == ("targets", "scores"):
self._score_metric_fns.append(metric_fn)
elif pos_args == ("targets", "predictions"):
self._predict_metric_fns.append(metric_fn)
else:
raise ValueError(
"Metric functions must have positional arguments matching either "
"('targets', 'predictions') or ('targets', 'scores'). "
f"Got: {pos_args}")
self._name = name
self._source = source
# Find optional CacheDatasetPlaceholder.
preprocessors = tuple(preprocessors or [])
cache_step_idxs = [
i for i, p in enumerate(preprocessors)
if isinstance(p, CacheDatasetPlaceholder)
]
if len(cache_step_idxs) > 1:
raise ValueError(
"`CacheDatasetPlaceholder` can appear at most once in the "
f"preprocessing pipeline. Found {len(cache_step_idxs)} in '{name}'.")
cache_step_idx = cache_step_idxs[0] if cache_step_idxs else None
if cache_step_idx is not None:
for prep in preprocessors[:cache_step_idx]:
prep_args = inspect.signature(prep).parameters.keys()
if "sequence_length" in prep_args:
raise ValueError(
f"'{prep.__name__}' has a `sequence_length` argument but occurs "
f"before `CacheDatasetPlaceholder` in '{name}'. This is not "
"allowed since the sequence length is specified at run time.")
if "seed" in prep_args or "seeds" in prep_args:
raise logging.warning( # pylint:disable=logging-format-interpolation
f"'{prep.__name__}' has a `seed(s)` argument but occurs before "
f"`CacheDatasetPlaceholder` in '{name}'. This is not recommended "
"since the same samples will be used each epoch when reading "
"from the cache.")
self._cache_step_idx = cache_step_idx
self._preprocessors = preprocessors
self._metric_fns = tuple(metric_fns)
self._postprocess_fn = postprocess_fn
self._cache_dir = None
self._stats = {}
self._shuffle_buffer_size = shuffle_buffer_size
self._output_features = collections.OrderedDict(
sorted(list(output_features.items()))
)
@property
def name(self) -> str:
return self._name
@property
def metric_fns(self) -> Sequence[MetricFnCallable]:
"""List of all metric functions."""
return self._predict_metric_fns + self._score_metric_fns
@property
def score_metric_fns(self) -> Sequence[MetricFnCallable]:
"""List of metric functions that use log likelihood scores."""
return self._score_metric_fns
@property
def predict_metric_fns(self) -> Sequence[MetricFnCallable]:
"""List of metric functions that use model predictions."""
return self._predict_metric_fns
@property
def output_features(self) -> Mapping[str, Feature]:
return self._output_features
@property
def splits(self) -> Sequence[str]:
s = self.source.splits
if not s:
raise ValueError(f"Task {self.name} has no splits")
return s
@property
def source(self) -> DataSource:
return self._source
@property
def preprocessors(self) -> Sequence[Callable[..., tf.data.Dataset]]:
return self._preprocessors
def num_input_examples(self, split: str) -> Optional[int]:
return self.source.num_input_examples(split)
def _preprocess_dataset(
self,
dataset: tf.data.Dataset,
preprocessors: Sequence[Callable[..., tf.data.Dataset]],
sequence_length: Optional[Mapping[str, int]] = None) -> tf.data.Dataset:
"""Sequentially applies preprocessors."""
for prep_fn in preprocessors:
# prep_fn must not rely on variable length keyword args such as **kwargs.
fn_args = set(inspect.signature(prep_fn).parameters.keys())
kwargs = {}
if "sequence_length" in fn_args:
kwargs["sequence_length"] = sequence_length
if "output_features" in fn_args:
kwargs["output_features"] = self.output_features
dataset = prep_fn(dataset, **kwargs)
return dataset
def _validate_preprocessing(
self, dataset: tf.data.Dataset
) -> tf.data.Dataset:
"""Validates preprocessed dataset, raising Exceptions if needed.
Args:
dataset: a tf.data.Dataset to validate.
Returns:
a validated tf.data.Dataset.
"""
actual_specs = dataset.element_spec
for feat, feat_spec in self.output_features.items():
if feat not in actual_specs:
if feat_spec.required:
raise ValueError(
"Task dataset is missing expected output feature after "
f"preprocessing: {feat}")
else:
# It's ok that this feature does not exist.
continue
actual_spec = actual_specs[feat]
if feat_spec.dtype != actual_spec.dtype:
raise ValueError(
f"Task dataset has incorrect type for feature '{feat}' after "
f"preprocessing: Got {actual_spec.dtype.name}, expected "
f"{feat_spec.dtype.name}")
if actual_spec.shape.rank != 1:
raise ValueError(
f"Task dataset has incorrect rank for feature '{feat}' after "
f"preprocessing: Got {actual_spec.shape.rank}, expected 1")
return dataset
def _trim_output_features(
self,
dataset: tf.data.Dataset,
sequence_length: Optional[Mapping[str, int]]
) -> tf.data.Dataset:
"""Trim output features to sequence length."""
def _trim(k: str, v: tf.Tensor) -> tf.Tensor:
if k not in self.output_features or not sequence_length:
return v
return v[:sequence_length[k]]
return dataset.map(
lambda ex: {k: _trim(k, v) for k, v in ex.items()},
num_parallel_calls=tf.data.experimental.AUTOTUNE)
def preprocess_precache(
self,
dataset: tf.data.Dataset,
seed: Optional[int] = None
) -> tf.data.Dataset:
"""Runs preprocessing steps before the optional CacheDatasetPlaceholder."""
if not self.supports_caching:
return dataset
with utils.map_seed_manager(seed):
return self._preprocess_dataset(
dataset,
self._preprocessors[:self._cache_step_idx],
)
def preprocess_postcache(
self,
dataset: tf.data.Dataset,
sequence_length: Optional[Mapping[str, int]],
seed: Optional[int] = None
) -> tf.data.Dataset:
"""Runs preprocessing steps after the optional CacheDatasetPlaceholder.
Args:
dataset: a tf.data.Dataset
sequence_length: dict mapping feature key to int length for that feature.
If None, the features will not be truncated.
seed: an optional random seed for deterministic preprocessing.
Returns:
a tf.data.Dataset
"""
start_idx = 0
if self.supports_caching:
# Skip a sufficient number of seeds to avoid duplicating any from
# pre-cache preprocessing.
seed = None if seed is None else seed + 42 * self._cache_step_idx
start_idx = self._cache_step_idx + 1
with utils.map_seed_manager(seed):
dataset = self._preprocess_dataset(
dataset,
self._preprocessors[start_idx:],
sequence_length=sequence_length,
)
return dataset
@property
def cache_dir(self) -> Optional[str]:
"""Returns the cache directory (or None), initializing if needed."""
if not self._cache_dir:
# See if cached data exists in any of the cache directories.
potential_cache_dirs = [
os.path.join(d, self.name) for d in utils.get_global_cache_dirs()]
for cache_dir in potential_cache_dirs:
try:
if tf.io.gfile.exists(os.path.join(cache_dir, "COMPLETED")):
self._cache_dir = cache_dir
logging.info("'%s' is cached at %s.", self.name, self.cache_dir)
break
except tf.errors.PermissionDeniedError:
logging.warning(
"Permission denied for global cache folder: %s", cache_dir)
if not self._cache_dir:
logging.info(
"'%s' does not exist in any task cache directories (searched %s).",
self.name,
potential_cache_dirs,
)
return self._cache_dir
@property
def supports_caching(self) -> bool:
"""Whether or not this task supports offline caching."""
return self._cache_step_idx is not None
@property
def requires_caching(self) -> bool:
"""Whether or not this task requires offline caching."""
return (self._cache_step_idx is not None and
self.preprocessors[self._cache_step_idx].required)
def assert_cached(self) -> None:
"""Raises an assertion error if cached dataset does not exist."""
assert self.cache_dir, (
f"'{self.name}' does not exist in any of the task cache directories.")
def get_cached_stats(self,
split: str = tfds.Split.TRAIN
) -> Mapping[str, Union[int, float]]:
"""Returns basic statistics for cached dataset."""
self.assert_cached()
if split not in self._stats:
stats_path = utils.get_cached_stats_path(self.cache_dir, split)
if not tf.io.gfile.exists(stats_path):
raise ValueError(
"Stats do not exist for '%s' split: %s" % (self.name, split))
with tf.io.gfile.GFile(stats_path) as f:
self._stats[split] = json.load(f)
return self._stats[split]
def get_dataset(
self,
sequence_length: Optional[Mapping[str, int]],
split: str = tfds.Split.TRAIN,
use_cached: bool = False,
shuffle: bool = True,
shuffle_buffer_size: Optional[int] = None,
seed: Optional[int] = None,
shard_info: Optional[ShardInfo] = None,
num_epochs: Optional[int] = 1
) -> tf.data.Dataset:
"""Returns a tf.data.Dataset from cache or generated on the fly.
Args:
sequence_length: dict mapping feature key to maximum int length for that
feature. If longer after preprocessing, the feature will be truncated.
May be set to None to avoid truncation.
split: string, the split to return.
use_cached: bool, whether to use the cached dataset instead of processing
it on the fly. Defaults to False.
shuffle: bool, whether to shuffle the dataset. Only used when generating
on the fly (use_cached=False).
shuffle_buffer_size: an integer or None to use task-specific buffer size.
seed: tf.int64 scalar tf.Tensor (or None) for shuffling tf.data.
shard_info: optional specification for loading a shard of the split. If
the Task's DataSource contains at least the number of shards in the
specification, it will be passed the shard info to avoid loading the
full source dataset. Otherwise, the full source dataset will be loaded
and sharded at the individual examples.
num_epochs: the number of times to iterate through the dataset, or `None`
to repeat indefinitely. Note that the repeat occurs in the pipeline
after offline caching, but before applying potentially stochastic
post-cache preprocessors and is therefore typically preferred to calling
`repeat()` on the returned dataset. Defaults to `1`.
Returns:
A tf.data.Dataset.
"""
if use_cached and not self.supports_caching:
logging.warning(
"Task '%s' does not support caching. Switching to on-the-fly "
"preprocessing.", self.name)
use_cached = False
elif self.requires_caching and not use_cached:
raise ValueError(
f"Task '{self.name}' requires caching, but was called with "
"`use_cached=False`.")
if shard_info:
# Whether we should shard at source or on the examples from the source.
shard_data_source = (
len(self.source.list_shards(split=split)) >= shard_info.num_shards)
logging.info("Sharding at the %s: %d of %d",
"data source" if shard_data_source else "examples",
shard_info.index, shard_info.num_shards)
else:
# No sharding.
shard_data_source = False
shard_info = ShardInfo(0, 1)
if use_cached:
source = self._get_cached_source(split)
else:
source = self.source
if shard_data_source:
ds = source.get_dataset(
split=split, shuffle=shuffle, seed=seed, shard_info=shard_info)
else:
ds = source.get_dataset(split=split, shuffle=shuffle, seed=seed)
ds = ds.shard(shard_info.num_shards, shard_info.index)
if ((use_cached and
self.get_cached_stats(split)["examples"] < _MAX_EXAMPLES_TO_MEM_CACHE)
or (self.num_input_examples(split) and
self.num_input_examples(split) < _MAX_EXAMPLES_TO_MEM_CACHE)):
logging.info(
"Automatically caching small dataset in memory: '%s:%s'",
self.name, split)
ds = ds.cache()
if not use_cached:
ds = self.preprocess_precache(ds, seed=seed)
ds = ds.prefetch(tf.data.experimental.AUTOTUNE)
# We repeat before calling any (potentially) stochastic post-cache
# preprocessing in order to take new samples each epoch.
ds = ds.repeat(num_epochs)
# Post cache processing.
ds = self.preprocess_postcache(
ds, sequence_length=sequence_length, seed=seed)
ds = self._validate_preprocessing(ds)
ds = self._trim_output_features(ds, sequence_length=sequence_length)
if shuffle:
if self._shuffle_buffer_size is None:
raise ValueError(
f"Shuffling is disallowed for Task '{self.name}' since its "
"`shuffle_buffer_size` was set to `None` on construction.")
shuffle_buffer_size = shuffle_buffer_size or self._shuffle_buffer_size
# Shuffle before mixing since preprocessor can output multiple
# (correlated) examples per input.
ds = ds.shuffle(shuffle_buffer_size, seed=seed)
return ds.prefetch(tf.data.experimental.AUTOTUNE)
def _get_cached_source(self, split) -> _CachedDataSource:
"""Returns a DataSource to read cached files for split."""
self.assert_cached()
return _CachedDataSource(self.cache_dir, split)
def postprocess_fn(self, decoded_model_output: Any,
**postprocess_kwargs) -> Any:
"""Returns the model output after applying the postprocess function."""
if self._postprocess_fn:
return self._postprocess_fn(decoded_model_output, **postprocess_kwargs)
return decoded_model_output
class TaskRegistry(DatasetProviderRegistry):
"""Registry of Tasks."""
_REGISTRY = {}
_PROVIDER_TYPE = Task
@classmethod
def add(
cls,
name: str,
source: DataSource,
output_features: Mapping[str, Feature],
preprocessors: Optional[Sequence[Callable[..., tf.data.Dataset]]] = None,
postprocess_fn: Optional[Callable[..., Any]] = None,
metric_fns: Optional[Sequence[Callable[..., Mapping[str, float]]]] = None,
**kwargs) -> Task:
return super().add(name, Task, name, source, output_features, preprocessors,
postprocess_fn, metric_fns, **kwargs)
@classmethod
def get(cls, name) -> Task:
return super().get(name)
# ================================ Mixtures ====================================
class Mixture(DatasetProviderBase):
"""Class for mixing multiple tasks."""
def __init__(self,
name: str,
tasks: Union[Sequence[str],
Sequence[Tuple[str, Union[int, float,
Callable[[Task],
float]]]]],
default_rate: Union[float, Callable[[Task], float]] = None):
"""Mixture constructor.
A mixture specifies a set of tasks with associated mixing rates.
Mixing happens on preprocessed tokenized examples.
The mixing rates represent relative numbers of examples to use from their
associated tasks. Setting the mixing rates to be equal to the numbers of
examples in the tasks will result in each task going through an epoch in
about the same amount of time - i.e. all examples are sampled equally across
all tasks.
Rates can be expressed either as absolute numbers or as functions that
receive the Task as an argument.
Args:
name: string, a unique name for the Mixture.
tasks: a list where each element is either a string (task name) or a
pair whose first element is the task name and whose second element
is either a float (rate) or a function from Task to float.
default_rate: a float or a function from Task to float. This specifies the
default rate if rates are not provided in the `tasks` argument.
"""
self._task_to_rate = {}
self._tasks = []
self._sub_mixtures = []
self._name = name
for t in tasks:
if isinstance(t, str):
task_name = t
rate = default_rate
if default_rate is None:
raise ValueError("need a rate for each task")
else:
task_name, rate = t
if task_name in TaskRegistry.names():
self._tasks.append(TaskRegistry.get(task_name))
self._task_to_rate[task_name] = rate
else:
self._sub_mixtures.append(MixtureRegistry.get(task_name)) # pytype:disable=name-error
self._task_to_rate[task_name] = rate
if len(set(tuple(t.output_features) for t in self.tasks)) != 1:
raise ValueError(
"All Tasks in a Mixture must have the same output features."
)
@property
def name(self) -> str:
return self._name
@property
def tasks(self) -> Sequence[Task]:
sub_tasks = (mix.tasks for mix in self._sub_mixtures)
return list(sorted(set(sum(sub_tasks, self._tasks)), key=lambda t: t.name))
@property
def total_rate(self) -> float:
return sum(float(rate(TaskRegistry.get(name)) if callable(rate) else rate)
for name, rate in self._task_to_rate.items())
def get_rate(self, task: Task) -> float:
"""Computes the mixing rate for the given task."""
value = 0.0
for mix in self._sub_mixtures:
if task in mix.tasks:
rate = self._task_to_rate[mix.name]
value += rate * mix.get_rate(task) / mix.total_rate
if task.name in self._task_to_rate:
rate = self._task_to_rate[task.name]
value += float(rate(task) if callable(rate) else rate)
return value
def num_input_examples(self, split: str) -> int:
return sum(t.num_input_examples(split) for t in self.tasks)
@property
def splits(self) -> Sequence[str]:
splits = set()
for task in self.tasks:
splits.update(task.splits)
return tuple(splits)
@property
def output_features(self) -> Mapping[str, Feature]:
# We require all tasks to have the same output_features in __init__
# so we can just get the output_features for the 0th task
return self.tasks[0].output_features
def _check_compatible_features(self) -> None:
"""Throw Exception if features across tasks have different vocabs or dtypes.
"""
for name, feature in self.tasks[0].output_features.items():
for task in self.tasks[1:]:
if task.output_features[name].vocabulary != feature.vocabulary:
raise ValueError(
"Features across tasks in a mixture must use the same vocabulary."
)
if task.output_features[name].dtype != feature.dtype:
raise ValueError(
"Features across tasks in a mixture must use the same dtype."
)
def get_dataset(
self,
sequence_length: Optional[Mapping[str, int]],
split: str = tfds.Split.TRAIN,
use_cached: bool = False,
shuffle: bool = True,
seed: Optional[int] = None,
shard_info: Optional[ShardInfo] = None,
num_epochs: Optional[int] = None,
copy_pretokenized: bool = False,
compute_stats_empirically: bool = False,
) -> tf.data.Dataset:
"""Returns the dataset of mixed tasks using the object-specified rates.
Args:
sequence_length: dict mapping feature key to maximum int length for that
feature. If longer after preprocessing, the feature will be truncated.
May be set to None to avoid truncation.
split: string, the split to return for all tasks.
use_cached: bool, whether to use the cached dataset instead of processing
it on the fly. Defaults to False.
shuffle: bool, whether to shuffle the dataset. Only used when generating
on the fly (use_cached=False).
seed: tf.int64 scalar tf.Tensor (or None) for shuffling tf.data.
shard_info: optional specification for loading a shard of the split.
num_epochs: the number of times to iterate through the dataset, or `None`
to repeat indefinitely. Note that the repeat occurs in the pipeline
after offline caching, but before applying potentially stochastic
post-cache preprocessors and is therefore typically preferred to calling
`repeat()` on the returned dataset. Defaults to `None`.
copy_pretokenized: bool, whether to pass through copies of pretokenized
features a "_pretokenized" suffix added to the key.
compute_stats_empirically: a boolean - does not work on TPU
"""
self._check_compatible_features()
tasks = []
for task in self.tasks:
if split not in task.splits:
logging.warning(
"Task %s has no '%s' split, skipping.", task.name, split
)
continue
tasks.append(task)
if not tasks:
raise ValueError("No datasets have a '{}' split".format(split))
output_feature_keys = set(self.output_features.keys())
if copy_pretokenized:
output_feature_keys.update(
{f + "_pretokenized" for f in output_feature_keys})
def filter_features(ex):
return {k: v for k, v in ex.items() if k in output_feature_keys}
datasets = [
task.get_dataset( # pylint:disable=g-complex-comprehension
sequence_length,
split=split,
use_cached=use_cached,
shuffle=shuffle,
seed=seed,
shard_info=shard_info,
num_epochs=num_epochs)
.map(filter_features, num_parallel_calls=tf.data.experimental.AUTOTUNE)
for task in tasks]
rates = [self.get_rate(task) for task in tasks]
# Sample from the dataset with the rates rates
if seed is not None:
sample_seed = seed
elif shuffle:
sample_seed = None
else:
sample_seed = 42
dataset = tf.data.experimental.sample_from_datasets(
datasets, rates, sample_seed)
if (split == "train" and use_cached and
all(t.supports_caching for t in tasks)):
_log_mixing_proportions(tasks, datasets, rates, dataset, sequence_length,
compute_stats_empirically)
return dataset
def _log_padding_fractions(dataset, sequence_length, num_examples=100):
"""Empirically compute the fraction of padding - log the results.
Args:
dataset: a tf.data.Dataset
sequence_length: dict from string to int (packed lengths)
num_examples: an integer
"""
logging.info("computing padding fractions")
keys = sequence_length.keys()
padding_frac = {k: 0 for k in keys}
for ex in tfds.as_numpy(dataset.take(num_examples)):
for k in keys:
padding_frac[k] += 1 - (sequence_length[k] / len(ex[k]))
for k in keys:
logging.info("%s padding fraction = %g", k, padding_frac[k])
def _log_mixing_proportions(
tasks, datasets, rates, mixed_dataset,
sequence_length, compute_stats_empirically):
"""Log information about the mixing proportions.
Called from Mixture.get_dataset.
Args:
tasks: a list of Task
datasets: a list of tf.data.Dataset
rates: a list of floats
mixed_dataset: a tf.data.Dataset
sequence_length: dict from string to int (packed lengths)
compute_stats_empirically: a boolean - does not work on TPU
"""
def _normalize(l):
denom = sum(l)
if not denom:
return l
return [x / denom for x in l]
# compute some stats about the mixture
examples_fraction = _normalize(rates)
if compute_stats_empirically:
stats_examples = 100
mean_inputs_length = []
mean_targets_length = []
for dataset in datasets:
inputs_sum = 0
targets_sum = 0
for ex in tfds.as_numpy(dataset.take(stats_examples)):
# Some tasks, like LMs, don't have inputs.
if "inputs" in ex:
inputs_sum += ex["inputs"].size
targets_sum += ex["targets"].size
mean_inputs_length.append(inputs_sum / float(stats_examples))
mean_targets_length.append(targets_sum / float(stats_examples))
else:
def _estimated_mean_length(task, key):
if key not in sequence_length:
return 0
if (task.supports_caching and
task._cache_step_idx < len(task._preprocessors) - 1): # pylint:disable=protected-access
# There is processing after caching, so we can't rely on the stats.
return sequence_length[key]
# Some tasks, like LMs, don't have inputs.
if key + "_tokens" in task.get_cached_stats("train"):
return min(sequence_length[key],
(task.get_cached_stats("train")[key + "_tokens"] /
task.get_cached_stats("train")["examples"]))
else:
return 0
mean_inputs_length = [_estimated_mean_length(task, "inputs")
for task in tasks]
mean_targets_length = [_estimated_mean_length(task, "targets")
for task in tasks]
inputs_fraction = _normalize(
[l * r for l, r in zip(mean_inputs_length, rates)])
targets_fraction = _normalize(
[l * r for l, r in zip(mean_targets_length, rates)])
logging.info("%12s %12s %12s %12s %12s %12s %s",
"rate", "ex.frac.", "inp.frac.", "tgt.frac.",
"inp.len.", "tgt.len", "task")
for i in range(len(rates)):
logging.info("%12g %12g %12g %12g %12g %12g %s",
rates[i], examples_fraction[i],
inputs_fraction[i], targets_fraction[i],
mean_inputs_length[i], mean_targets_length[i],
tasks[i].name)
if compute_stats_empirically:
_log_padding_fractions(mixed_dataset, sequence_length)
class MixtureRegistry(DatasetProviderRegistry):
"""Registry of Mixtures."""
_REGISTRY = {}
_PROVIDER_TYPE = Mixture
@classmethod
def add(cls, name, tasks, default_rate=None) -> Mixture:
return super().add(name, Mixture, name, tasks, default_rate)
@classmethod
def get(cls, name) -> Mixture:
return super().get(name)
def get_mixture_or_task(task_or_mixture_name):
"""Return the Task or Mixture from the appropriate registry."""
mixtures = MixtureRegistry.names()
tasks = TaskRegistry.names()
if task_or_mixture_name in mixtures:
if task_or_mixture_name in tasks:
logging.warning("%s is both a Task and a Mixture, returning Mixture",
task_or_mixture_name)
return MixtureRegistry.get(task_or_mixture_name)
if task_or_mixture_name in tasks:
return TaskRegistry.get(task_or_mixture_name)
else:
raise ValueError("No Task or Mixture found with name: %s" %
task_or_mixture_name)
def get_subtasks(task_or_mixture):
"""Returns all the Tasks in a Mixture as a list or the Task itself."""
if isinstance(task_or_mixture, Task):
return [task_or_mixture]
else:
return task_or_mixture.tasks
def get_dataset(
mixture_or_task_name: str,
task_feature_lengths: Mapping[str, int],
feature_converter: FeatureConverter,
dataset_split: str = "train",
use_cached: bool = False,
shuffle: bool = False,
num_epochs: Optional[int] = 1,
shard_info: ShardInfo = None,
verbose: bool = True,
seed: Optional[int] = None
) -> tf.data.Dataset:
"""Get processed dataset with the model features.
In order to use options specific to a feature converter, e.g., packing,
`feature_converter` instance should be instantiated with those options before
being pased to this function.
Getting sharded datasets is supported. To use this feature, pass in
`shard_info`, with shard_index and num_shards information. Sharding is done
before the feature converter stage. Therefore, if packing is used it will be
done on the sharded dataset.
Args:
mixture_or_task_name: mixture or task name for the Task API.
task_feature_lengths: dict mapping task feature key to its sequence length.
This specifies the sequence length of the dataset from the Task API.
feature_converter: a feature converter object to use to convert the task
features to model features.
Must be a subclass of FeatureConverter.
dataset_split: the split to use.
use_cached: whether to use the cached dataset instead of processing it on
the fly.
shuffle: whether to shuffle the dataset.
num_epochs: the number of times to iterate through the dataset, or `None` to
repeat indefinitely. Note that the repeat occurs in the pipeline after
offline caching, but before applying potentially stochastic post-cache
preprocessors and is therefore typically preferred to calling `repeat()`
on the returned dataset. Defaults to `1`.
shard_info: number of shards and shard index information.
verbose: if true, log the feature shapes.
seed: a random seed to for shuffling tf.data.
Returns:
ds: the processed dataset.
"""
if not isinstance(feature_converter, FeatureConverter):
raise TypeError(
"feature_converter should be an instance of FeatureConverter.")
mixture_or_task = get_mixture_or_task(mixture_or_task_name)
ds = mixture_or_task.get_dataset(
task_feature_lengths,
split=dataset_split,
use_cached=use_cached,
shuffle=shuffle,
seed=seed,
shard_info=shard_info,
num_epochs=num_epochs)
ds = feature_converter(ds, task_feature_lengths=task_feature_lengths)
if verbose:
logging.info(
"The output dataset from seqio.get_dataset has the following features")
for feature_name, tensor_spec in ds.element_spec.items():
logging.info("feature: %s \t shape: %s \t dtype: %s", feature_name,
tensor_spec.shape.as_list(), tensor_spec.dtype.name)
return ds
| 36.161734 | 107 | 0.671888 |
import abc
import collections
import inspect
import json
import os
import re
from typing import Any, Callable, Iterable, Mapping, MutableMapping, Optional, Sequence, Tuple, Type, Union
from absl import logging
import dataclasses
import numpy as np
from packaging import version
from t5.seqio import utils
from t5.seqio.feature_converters import FeatureConverter
from t5.seqio.vocabularies import Vocabulary
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
import typing_extensions
_DEFAULT_FEATURE_KEYS = ["inputs", "targets"]
_VALID_TASK_NAME_REGEX = re.compile(r"^[\w\d\._]+$")
_MAX_EXAMPLES_TO_MEM_CACHE = 10000
SHUFFLE_BUFFER_SIZE = 1000
@dataclasses.dataclass(frozen=True)
class Feature:
vocabulary: Vocabulary
add_eos: bool = True
required: bool = True
dtype: tf.DType = tf.int32
@dataclasses.dataclass(frozen=True)
class ShardInfo:
index: int
num_shards: int
class DatasetProviderBase(metaclass=abc.ABCMeta):
@abc.abstractproperty
def output_features(self) -> Mapping[str, Feature]:
raise NotImplementedError
@abc.abstractproperty
def splits(self) -> Sequence[str]:
raise NotImplementedError
@abc.abstractmethod
def get_dataset(
self,
sequence_length: int,
split: str,
use_cached: bool = False,
shuffle: bool = True,
seed: Optional[int] = None,
shard_info: Optional[ShardInfo] = None,
num_epochs: int = 1
) -> tf.data.Dataset:
raise NotImplementedError
@abc.abstractmethod
def num_input_examples(self, split: str) -> int:
raise NotImplementedError
class DatasetProviderRegistry(object):
_REGISTRY: MutableMapping[str, DatasetProviderBase]
_PROVIDER_TYPE: Type[DatasetProviderBase]
@classmethod
def add_provider(cls, name: str, provider):
if name in cls._REGISTRY:
raise ValueError("Attempting to register duplicate provider: %s" % name)
if not isinstance(provider, cls._PROVIDER_TYPE):
raise ValueError(
"Attempting to register a class not of an invalid type. "
"Expecting instance of %s, got %s" %
(cls._PROVIDER_TYPE, type(provider).__name__))
cls._REGISTRY[name] = provider
@classmethod
def add(
cls,
name: str,
provider_cls,
*provider_args,
**provider_kwargs
):
if not issubclass(provider_cls, cls._PROVIDER_TYPE):
raise ValueError(
"Attempting to register a class not of an invalid type. "
"Expecting instance of %s, got %s" %
(cls._PROVIDER_TYPE, provider_cls))
provider = provider_cls(*provider_args, **provider_kwargs)
cls.add_provider(name, provider)
return provider
@classmethod
def remove(cls, name):
if name in cls._REGISTRY:
del cls._REGISTRY[name]
@classmethod
def get(cls, name):
if name not in cls._REGISTRY:
raise ValueError("Provider name not registered: %s" % name)
return cls._REGISTRY[name]
@classmethod
def names(cls):
return cls._REGISTRY.keys()
@classmethod
def reset(cls):
cls._REGISTRY = {}
@classmethod
def get_dataset(
cls,
name,
sequence_length,
split,
use_cached=False,
shuffle=True,
seed=None,
shard_info=None,
num_epochs=1):
return cls.get(name).get_dataset(
sequence_length=sequence_length, split=split, use_cached=use_cached,
shuffle=shuffle, seed=seed, shard_info=shard_info,
num_epochs=num_epochs)
class DataSource(DatasetProviderBase):
def __init__(
self,
splits: Iterable[str],
num_input_examples: Optional[Mapping[str, int]] = None):
self._splits = tuple(splits)
self._num_input_examples = (
dict(num_input_examples) if num_input_examples is not None else None)
@property
def splits(self) -> Sequence[str]:
return self._splits
@property
def output_features(self) -> Mapping[str, Feature]:
raise NotImplementedError
@abc.abstractmethod
def list_shards(self, split: str) -> Sequence[str]:
raise NotImplementedError
@abc.abstractmethod
def get_dataset(
self,
split: str,
shuffle: bool = True,
seed: Optional[int] = None,
shard_info: Optional[ShardInfo] = None
) -> tf.data.Dataset:
raise NotImplementedError
def num_input_examples(self, split: str) -> Optional[int]:
if self._num_input_examples is None:
return None
return self._num_input_examples[split]
def _validate_args(fn, expected_pos_args):
argspec = inspect.getfullargspec(fn)
expected_pos_args = tuple(expected_pos_args)
actual_args = tuple(argspec.args)
if actual_args[:len(expected_pos_args)] != expected_pos_args:
raise ValueError(
"'%s' must have positional args %s, got: %s" % (
fn.__name__, expected_pos_args, actual_args))
actual_pos_args = tuple(
argspec.args[:-len(argspec.defaults)]
if argspec.defaults else argspec.args)
if actual_pos_args != expected_pos_args[:len(actual_pos_args)]:
raise ValueError(
"'%s' may only have positional args %s, got: %s" % (
fn.__name__, expected_pos_args, actual_pos_args))
class DatasetFnCallable(typing_extensions.Protocol):
def __call__(self,
split: str,
shuffle_files: bool,
seed: Optional[int] = None) -> tf.data.Dataset:
...
class FunctionDataSource(DataSource):
def __init__(
self,
dataset_fn: DatasetFnCallable,
splits: Iterable[str],
num_input_examples: Optional[Mapping[str, int]] = None
):
_validate_args(dataset_fn, ["split", "shuffle_files"])
self._dataset_fn = dataset_fn
super().__init__(splits=splits, num_input_examples=num_input_examples)
def get_dataset(
self,
split: str,
shuffle: bool = True,
seed: Optional[int] = None,
shard_info: Optional[ShardInfo] = None
) -> tf.data.Dataset:
if shard_info and shard_info.num_shards > 1:
raise ValueError(
"`FunctionDataSource` does not support low-level sharding. Use "
"tf.data.Dataset.shard instead.")
if seed is None:
ds = self._dataset_fn(split=split, shuffle_files=shuffle)
else:
_validate_args(self._dataset_fn, ["split", "shuffle_files", "seed"])
ds = self._dataset_fn(split=split, shuffle_files=shuffle, seed=seed)
return ds
def list_shards(self, split: str) -> Sequence[str]:
return [split]
class TfdsDataSource(DataSource):
def __init__(
self,
tfds_name: str,
tfds_data_dir: Optional[str] = None,
splits: Optional[Union[Iterable[str], Mapping[str, str]]] = None
):
if ":" not in tfds_name:
raise ValueError("TFDS name must contain a version number, got: %s" %
tfds_name)
self._tfds_dataset = utils.LazyTfdsLoader(
tfds_name,
data_dir=tfds_data_dir,
split_map=splits if isinstance(splits, dict) else None)
super().__init__(splits=splits or ())
@property
def splits(self):
return self._splits or self._tfds_dataset.info.splits
@property
def tfds_dataset(self):
return self._tfds_dataset
def get_dataset(
self,
split: str,
shuffle: bool = True,
seed: Optional[int] = None,
shard_info: Optional[ShardInfo] = None
) -> tf.data.Dataset:
return self.tfds_dataset.load(
split, shuffle_files=shuffle, seed=seed, shard_info=shard_info)
def num_input_examples(self, split: str) -> int:
return self.tfds_dataset.size(split)
def list_shards(self, split: str) -> Sequence[str]:
return self.tfds_dataset.files(split)
class FileDataSource(DataSource):
def __init__(
self,
read_file_fn: Callable[[tf.data.Dataset], tf.data.Dataset],
split_to_filepattern: Mapping[str, Union[str, Iterable[str]]],
num_input_examples: Optional[Mapping[str, int]] = None,
):
self._split_to_filepattern = split_to_filepattern
self._reader = read_file_fn
super().__init__(
splits=split_to_filepattern.keys(),
num_input_examples=num_input_examples)
def get_dataset(
self,
split: str,
shuffle: bool = True,
seed: Optional[int] = None,
shard_info: Optional[ShardInfo] = None
) -> tf.data.Dataset:
files = self.list_shards(split)
if not files:
raise ValueError(
"No file is found for the file pattern: "
f"{self._split_to_filepattern[split]}."
)
files_ds = tf.data.Dataset.from_tensor_slices(np.array(files, dtype=np.str))
if shard_info:
if len(files) < shard_info.num_shards:
raise ValueError(
f"Dataset has too few files to shard. {len(files)} files vs "
f"{shard_info.num_shards} shards requested.")
files_ds = files_ds.shard(shard_info.num_shards, shard_info.index)
if shuffle:
files_ds = files_ds.shuffle(buffer_size=16, seed=seed)
return files_ds.interleave(
self._reader,
cycle_length=16,
block_length=16,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
def list_shards(self, split: str) -> Sequence[str]:
return tf.io.gfile.glob(self._split_to_filepattern[split])
class TextLineDataSource(FileDataSource):
def __init__(
self,
split_to_filepattern: Mapping[str, Union[str, Iterable[str]]],
skip_header_lines: int = 0,
num_input_examples: Optional[Mapping[str, int]] = None,
):
self._skip_header_lines = skip_header_lines
def read_file_fn(filepattern):
return tf.data.TextLineDataset(filepattern).skip(skip_header_lines)
super().__init__(
read_file_fn=read_file_fn,
split_to_filepattern=split_to_filepattern,
num_input_examples=num_input_examples)
class TFExampleDataSource(FileDataSource):
def __init__(
self,
split_to_filepattern: Mapping[str, Union[str, Iterable[str]]],
feature_description: Mapping[str, Union[tf.io.FixedLenFeature,
tf.io.VarLenFeature]],
reader_cls: Type[tf.data.Dataset] = tf.data.TFRecordDataset,
num_input_examples: Optional[Mapping[str, int]] = None,
):
def read_file_fn(filepattern):
return reader_cls(filepattern).map(
lambda pb: tf.io.parse_single_example(pb, feature_description),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
super().__init__(
read_file_fn=read_file_fn,
split_to_filepattern=split_to_filepattern,
num_input_examples=num_input_examples)
def _rename_plaintext_to_pretokenized(
dataset: tf.data.Dataset) -> tf.data.Dataset:
def _rename(inputs):
outputs = {}
for k, v in inputs.items():
if k.endswith("_plaintext"):
k = k[:-len("plaintext")] + "pretokenized"
outputs[k] = v
return outputs
return dataset.map(
_rename, num_parallel_calls=tf.data.experimental.AUTOTUNE)
class _CachedDataSource(FileDataSource):
def __init__(self, cache_dir: str, split: str):
with tf.io.gfile.GFile(utils.get_cached_info_path(cache_dir, split)) as f:
split_info = json.load(f)
features = split_info["features"]
with tf.io.gfile.GFile(utils.get_cached_stats_path(cache_dir, split)) as f:
stats = json.load(f)
version_when_cached = version.Version(
split_info.get("seqio_version", "0.pre"))
version_with_true_dtypes = version.Version("0.0.0")
if version_when_cached < version_with_true_dtypes:
for name, feat in features.items():
if feat["dtype"] == "int64":
logging.info("Casting cached '%s' to int32.", name)
feat["dtype"] = "int32"
def _feature_config(shape, dtype):
if dtype in ("int32", "bool"):
dtype = "int64"
if shape and shape[0] is None:
return tf.io.FixedLenSequenceFeature(
shape[1:], dtype, allow_missing=True)
return tf.io.FixedLenFeature(shape, dtype)
feature_description = {
feat: _feature_config(**desc) for feat, desc in features.items()
}
def read_file_fn(filepattern):
ds = tf.data.TFRecordDataset(filepattern)
ds = ds.map(
lambda pb: tf.io.parse_single_example(pb, feature_description),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds = ds.map(
lambda x: {k: tf.cast(v, features[k]["dtype"]) for k, v in x.items()},
num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds = _rename_plaintext_to_pretokenized(ds)
return ds
split_to_filepattern = {
split: "%s-*-of-*%d" % (
utils.get_cached_tfrecord_prefix(cache_dir, split),
split_info["num_shards"])
}
super().__init__(
read_file_fn=read_file_fn,
split_to_filepattern=split_to_filepattern,
num_input_examples={split: stats["examples"]}
)
class CacheDatasetPlaceholder(object):
def __init__(self, required=False):
self._required = required
@property
def required(self):
return self._required
def __call__(self, dataset):
raise RuntimeError("`CacheDatasetPlaceholder` should never be called.")
MetricFnCallable = Callable[..., Mapping[str, float]]
class Task(DatasetProviderBase):
def __init__(
self,
name: str,
source: DataSource,
output_features: Mapping[str, Feature],
preprocessors: Optional[Sequence[Callable[..., tf.data.Dataset]]] = None,
postprocess_fn: Optional[Callable[..., Any]] = None,
metric_fns: Optional[Sequence[MetricFnCallable]] = None,
shuffle_buffer_size: Optional[int] = SHUFFLE_BUFFER_SIZE):
if not _VALID_TASK_NAME_REGEX.match(name):
raise ValueError(
"Task name '%s' contains invalid characters. Must match regex: %s" % (
name, _VALID_TASK_NAME_REGEX.pattern))
metric_fns = metric_fns or []
self._predict_metric_fns = []
self._score_metric_fns = []
for metric_fn in metric_fns:
pos_args = tuple(
key for key, param in inspect.signature(metric_fn).parameters.items()
if param.default == inspect.Parameter.empty
)
if pos_args == ("targets", "scores"):
self._score_metric_fns.append(metric_fn)
elif pos_args == ("targets", "predictions"):
self._predict_metric_fns.append(metric_fn)
else:
raise ValueError(
"Metric functions must have positional arguments matching either "
"('targets', 'predictions') or ('targets', 'scores'). "
f"Got: {pos_args}")
self._name = name
self._source = source
preprocessors = tuple(preprocessors or [])
cache_step_idxs = [
i for i, p in enumerate(preprocessors)
if isinstance(p, CacheDatasetPlaceholder)
]
if len(cache_step_idxs) > 1:
raise ValueError(
"`CacheDatasetPlaceholder` can appear at most once in the "
f"preprocessing pipeline. Found {len(cache_step_idxs)} in '{name}'.")
cache_step_idx = cache_step_idxs[0] if cache_step_idxs else None
if cache_step_idx is not None:
for prep in preprocessors[:cache_step_idx]:
prep_args = inspect.signature(prep).parameters.keys()
if "sequence_length" in prep_args:
raise ValueError(
f"'{prep.__name__}' has a `sequence_length` argument but occurs "
f"before `CacheDatasetPlaceholder` in '{name}'. This is not "
"allowed since the sequence length is specified at run time.")
if "seed" in prep_args or "seeds" in prep_args:
raise logging.warning(
f"'{prep.__name__}' has a `seed(s)` argument but occurs before "
f"`CacheDatasetPlaceholder` in '{name}'. This is not recommended "
"since the same samples will be used each epoch when reading "
"from the cache.")
self._cache_step_idx = cache_step_idx
self._preprocessors = preprocessors
self._metric_fns = tuple(metric_fns)
self._postprocess_fn = postprocess_fn
self._cache_dir = None
self._stats = {}
self._shuffle_buffer_size = shuffle_buffer_size
self._output_features = collections.OrderedDict(
sorted(list(output_features.items()))
)
@property
def name(self) -> str:
return self._name
@property
def metric_fns(self) -> Sequence[MetricFnCallable]:
return self._predict_metric_fns + self._score_metric_fns
@property
def score_metric_fns(self) -> Sequence[MetricFnCallable]:
return self._score_metric_fns
@property
def predict_metric_fns(self) -> Sequence[MetricFnCallable]:
return self._predict_metric_fns
@property
def output_features(self) -> Mapping[str, Feature]:
return self._output_features
@property
def splits(self) -> Sequence[str]:
s = self.source.splits
if not s:
raise ValueError(f"Task {self.name} has no splits")
return s
@property
def source(self) -> DataSource:
return self._source
@property
def preprocessors(self) -> Sequence[Callable[..., tf.data.Dataset]]:
return self._preprocessors
def num_input_examples(self, split: str) -> Optional[int]:
return self.source.num_input_examples(split)
def _preprocess_dataset(
self,
dataset: tf.data.Dataset,
preprocessors: Sequence[Callable[..., tf.data.Dataset]],
sequence_length: Optional[Mapping[str, int]] = None) -> tf.data.Dataset:
for prep_fn in preprocessors:
fn_args = set(inspect.signature(prep_fn).parameters.keys())
kwargs = {}
if "sequence_length" in fn_args:
kwargs["sequence_length"] = sequence_length
if "output_features" in fn_args:
kwargs["output_features"] = self.output_features
dataset = prep_fn(dataset, **kwargs)
return dataset
def _validate_preprocessing(
self, dataset: tf.data.Dataset
) -> tf.data.Dataset:
actual_specs = dataset.element_spec
for feat, feat_spec in self.output_features.items():
if feat not in actual_specs:
if feat_spec.required:
raise ValueError(
"Task dataset is missing expected output feature after "
f"preprocessing: {feat}")
else:
continue
actual_spec = actual_specs[feat]
if feat_spec.dtype != actual_spec.dtype:
raise ValueError(
f"Task dataset has incorrect type for feature '{feat}' after "
f"preprocessing: Got {actual_spec.dtype.name}, expected "
f"{feat_spec.dtype.name}")
if actual_spec.shape.rank != 1:
raise ValueError(
f"Task dataset has incorrect rank for feature '{feat}' after "
f"preprocessing: Got {actual_spec.shape.rank}, expected 1")
return dataset
def _trim_output_features(
self,
dataset: tf.data.Dataset,
sequence_length: Optional[Mapping[str, int]]
) -> tf.data.Dataset:
def _trim(k: str, v: tf.Tensor) -> tf.Tensor:
if k not in self.output_features or not sequence_length:
return v
return v[:sequence_length[k]]
return dataset.map(
lambda ex: {k: _trim(k, v) for k, v in ex.items()},
num_parallel_calls=tf.data.experimental.AUTOTUNE)
def preprocess_precache(
self,
dataset: tf.data.Dataset,
seed: Optional[int] = None
) -> tf.data.Dataset:
if not self.supports_caching:
return dataset
with utils.map_seed_manager(seed):
return self._preprocess_dataset(
dataset,
self._preprocessors[:self._cache_step_idx],
)
def preprocess_postcache(
self,
dataset: tf.data.Dataset,
sequence_length: Optional[Mapping[str, int]],
seed: Optional[int] = None
) -> tf.data.Dataset:
start_idx = 0
if self.supports_caching:
# Skip a sufficient number of seeds to avoid duplicating any from
# pre-cache preprocessing.
seed = None if seed is None else seed + 42 * self._cache_step_idx
start_idx = self._cache_step_idx + 1
with utils.map_seed_manager(seed):
dataset = self._preprocess_dataset(
dataset,
self._preprocessors[start_idx:],
sequence_length=sequence_length,
)
return dataset
@property
def cache_dir(self) -> Optional[str]:
if not self._cache_dir:
# See if cached data exists in any of the cache directories.
potential_cache_dirs = [
os.path.join(d, self.name) for d in utils.get_global_cache_dirs()]
for cache_dir in potential_cache_dirs:
try:
if tf.io.gfile.exists(os.path.join(cache_dir, "COMPLETED")):
self._cache_dir = cache_dir
logging.info("'%s' is cached at %s.", self.name, self.cache_dir)
break
except tf.errors.PermissionDeniedError:
logging.warning(
"Permission denied for global cache folder: %s", cache_dir)
if not self._cache_dir:
logging.info(
"'%s' does not exist in any task cache directories (searched %s).",
self.name,
potential_cache_dirs,
)
return self._cache_dir
@property
def supports_caching(self) -> bool:
return self._cache_step_idx is not None
@property
def requires_caching(self) -> bool:
return (self._cache_step_idx is not None and
self.preprocessors[self._cache_step_idx].required)
def assert_cached(self) -> None:
assert self.cache_dir, (
f"'{self.name}' does not exist in any of the task cache directories.")
def get_cached_stats(self,
split: str = tfds.Split.TRAIN
) -> Mapping[str, Union[int, float]]:
self.assert_cached()
if split not in self._stats:
stats_path = utils.get_cached_stats_path(self.cache_dir, split)
if not tf.io.gfile.exists(stats_path):
raise ValueError(
"Stats do not exist for '%s' split: %s" % (self.name, split))
with tf.io.gfile.GFile(stats_path) as f:
self._stats[split] = json.load(f)
return self._stats[split]
def get_dataset(
self,
sequence_length: Optional[Mapping[str, int]],
split: str = tfds.Split.TRAIN,
use_cached: bool = False,
shuffle: bool = True,
shuffle_buffer_size: Optional[int] = None,
seed: Optional[int] = None,
shard_info: Optional[ShardInfo] = None,
num_epochs: Optional[int] = 1
) -> tf.data.Dataset:
if use_cached and not self.supports_caching:
logging.warning(
"Task '%s' does not support caching. Switching to on-the-fly "
"preprocessing.", self.name)
use_cached = False
elif self.requires_caching and not use_cached:
raise ValueError(
f"Task '{self.name}' requires caching, but was called with "
"`use_cached=False`.")
if shard_info:
# Whether we should shard at source or on the examples from the source.
shard_data_source = (
len(self.source.list_shards(split=split)) >= shard_info.num_shards)
logging.info("Sharding at the %s: %d of %d",
"data source" if shard_data_source else "examples",
shard_info.index, shard_info.num_shards)
else:
# No sharding.
shard_data_source = False
shard_info = ShardInfo(0, 1)
if use_cached:
source = self._get_cached_source(split)
else:
source = self.source
if shard_data_source:
ds = source.get_dataset(
split=split, shuffle=shuffle, seed=seed, shard_info=shard_info)
else:
ds = source.get_dataset(split=split, shuffle=shuffle, seed=seed)
ds = ds.shard(shard_info.num_shards, shard_info.index)
if ((use_cached and
self.get_cached_stats(split)["examples"] < _MAX_EXAMPLES_TO_MEM_CACHE)
or (self.num_input_examples(split) and
self.num_input_examples(split) < _MAX_EXAMPLES_TO_MEM_CACHE)):
logging.info(
"Automatically caching small dataset in memory: '%s:%s'",
self.name, split)
ds = ds.cache()
if not use_cached:
ds = self.preprocess_precache(ds, seed=seed)
ds = ds.prefetch(tf.data.experimental.AUTOTUNE)
# We repeat before calling any (potentially) stochastic post-cache
# preprocessing in order to take new samples each epoch.
ds = ds.repeat(num_epochs)
# Post cache processing.
ds = self.preprocess_postcache(
ds, sequence_length=sequence_length, seed=seed)
ds = self._validate_preprocessing(ds)
ds = self._trim_output_features(ds, sequence_length=sequence_length)
if shuffle:
if self._shuffle_buffer_size is None:
raise ValueError(
f"Shuffling is disallowed for Task '{self.name}' since its "
"`shuffle_buffer_size` was set to `None` on construction.")
shuffle_buffer_size = shuffle_buffer_size or self._shuffle_buffer_size
# Shuffle before mixing since preprocessor can output multiple
# (correlated) examples per input.
ds = ds.shuffle(shuffle_buffer_size, seed=seed)
return ds.prefetch(tf.data.experimental.AUTOTUNE)
def _get_cached_source(self, split) -> _CachedDataSource:
self.assert_cached()
return _CachedDataSource(self.cache_dir, split)
def postprocess_fn(self, decoded_model_output: Any,
**postprocess_kwargs) -> Any:
if self._postprocess_fn:
return self._postprocess_fn(decoded_model_output, **postprocess_kwargs)
return decoded_model_output
class TaskRegistry(DatasetProviderRegistry):
_REGISTRY = {}
_PROVIDER_TYPE = Task
@classmethod
def add(
cls,
name: str,
source: DataSource,
output_features: Mapping[str, Feature],
preprocessors: Optional[Sequence[Callable[..., tf.data.Dataset]]] = None,
postprocess_fn: Optional[Callable[..., Any]] = None,
metric_fns: Optional[Sequence[Callable[..., Mapping[str, float]]]] = None,
**kwargs) -> Task:
return super().add(name, Task, name, source, output_features, preprocessors,
postprocess_fn, metric_fns, **kwargs)
@classmethod
def get(cls, name) -> Task:
return super().get(name)
# ================================ Mixtures ====================================
class Mixture(DatasetProviderBase):
def __init__(self,
name: str,
tasks: Union[Sequence[str],
Sequence[Tuple[str, Union[int, float,
Callable[[Task],
float]]]]],
default_rate: Union[float, Callable[[Task], float]] = None):
self._task_to_rate = {}
self._tasks = []
self._sub_mixtures = []
self._name = name
for t in tasks:
if isinstance(t, str):
task_name = t
rate = default_rate
if default_rate is None:
raise ValueError("need a rate for each task")
else:
task_name, rate = t
if task_name in TaskRegistry.names():
self._tasks.append(TaskRegistry.get(task_name))
self._task_to_rate[task_name] = rate
else:
self._sub_mixtures.append(MixtureRegistry.get(task_name)) # pytype:disable=name-error
self._task_to_rate[task_name] = rate
if len(set(tuple(t.output_features) for t in self.tasks)) != 1:
raise ValueError(
"All Tasks in a Mixture must have the same output features."
)
@property
def name(self) -> str:
return self._name
@property
def tasks(self) -> Sequence[Task]:
sub_tasks = (mix.tasks for mix in self._sub_mixtures)
return list(sorted(set(sum(sub_tasks, self._tasks)), key=lambda t: t.name))
@property
def total_rate(self) -> float:
return sum(float(rate(TaskRegistry.get(name)) if callable(rate) else rate)
for name, rate in self._task_to_rate.items())
def get_rate(self, task: Task) -> float:
value = 0.0
for mix in self._sub_mixtures:
if task in mix.tasks:
rate = self._task_to_rate[mix.name]
value += rate * mix.get_rate(task) / mix.total_rate
if task.name in self._task_to_rate:
rate = self._task_to_rate[task.name]
value += float(rate(task) if callable(rate) else rate)
return value
def num_input_examples(self, split: str) -> int:
return sum(t.num_input_examples(split) for t in self.tasks)
@property
def splits(self) -> Sequence[str]:
splits = set()
for task in self.tasks:
splits.update(task.splits)
return tuple(splits)
@property
def output_features(self) -> Mapping[str, Feature]:
# We require all tasks to have the same output_features in __init__
# so we can just get the output_features for the 0th task
return self.tasks[0].output_features
def _check_compatible_features(self) -> None:
for name, feature in self.tasks[0].output_features.items():
for task in self.tasks[1:]:
if task.output_features[name].vocabulary != feature.vocabulary:
raise ValueError(
"Features across tasks in a mixture must use the same vocabulary."
)
if task.output_features[name].dtype != feature.dtype:
raise ValueError(
"Features across tasks in a mixture must use the same dtype."
)
def get_dataset(
self,
sequence_length: Optional[Mapping[str, int]],
split: str = tfds.Split.TRAIN,
use_cached: bool = False,
shuffle: bool = True,
seed: Optional[int] = None,
shard_info: Optional[ShardInfo] = None,
num_epochs: Optional[int] = None,
copy_pretokenized: bool = False,
compute_stats_empirically: bool = False,
) -> tf.data.Dataset:
self._check_compatible_features()
tasks = []
for task in self.tasks:
if split not in task.splits:
logging.warning(
"Task %s has no '%s' split, skipping.", task.name, split
)
continue
tasks.append(task)
if not tasks:
raise ValueError("No datasets have a '{}' split".format(split))
output_feature_keys = set(self.output_features.keys())
if copy_pretokenized:
output_feature_keys.update(
{f + "_pretokenized" for f in output_feature_keys})
def filter_features(ex):
return {k: v for k, v in ex.items() if k in output_feature_keys}
datasets = [
task.get_dataset( # pylint:disable=g-complex-comprehension
sequence_length,
split=split,
use_cached=use_cached,
shuffle=shuffle,
seed=seed,
shard_info=shard_info,
num_epochs=num_epochs)
.map(filter_features, num_parallel_calls=tf.data.experimental.AUTOTUNE)
for task in tasks]
rates = [self.get_rate(task) for task in tasks]
# Sample from the dataset with the rates rates
if seed is not None:
sample_seed = seed
elif shuffle:
sample_seed = None
else:
sample_seed = 42
dataset = tf.data.experimental.sample_from_datasets(
datasets, rates, sample_seed)
if (split == "train" and use_cached and
all(t.supports_caching for t in tasks)):
_log_mixing_proportions(tasks, datasets, rates, dataset, sequence_length,
compute_stats_empirically)
return dataset
def _log_padding_fractions(dataset, sequence_length, num_examples=100):
logging.info("computing padding fractions")
keys = sequence_length.keys()
padding_frac = {k: 0 for k in keys}
for ex in tfds.as_numpy(dataset.take(num_examples)):
for k in keys:
padding_frac[k] += 1 - (sequence_length[k] / len(ex[k]))
for k in keys:
logging.info("%s padding fraction = %g", k, padding_frac[k])
def _log_mixing_proportions(
tasks, datasets, rates, mixed_dataset,
sequence_length, compute_stats_empirically):
def _normalize(l):
denom = sum(l)
if not denom:
return l
return [x / denom for x in l]
# compute some stats about the mixture
examples_fraction = _normalize(rates)
if compute_stats_empirically:
stats_examples = 100
mean_inputs_length = []
mean_targets_length = []
for dataset in datasets:
inputs_sum = 0
targets_sum = 0
for ex in tfds.as_numpy(dataset.take(stats_examples)):
# Some tasks, like LMs, don't have inputs.
if "inputs" in ex:
inputs_sum += ex["inputs"].size
targets_sum += ex["targets"].size
mean_inputs_length.append(inputs_sum / float(stats_examples))
mean_targets_length.append(targets_sum / float(stats_examples))
else:
def _estimated_mean_length(task, key):
if key not in sequence_length:
return 0
if (task.supports_caching and
task._cache_step_idx < len(task._preprocessors) - 1):
return sequence_length[key]
# Some tasks, like LMs, don't have inputs.
if key + "_tokens" in task.get_cached_stats("train"):
return min(sequence_length[key],
(task.get_cached_stats("train")[key + "_tokens"] /
task.get_cached_stats("train")["examples"]))
else:
return 0
mean_inputs_length = [_estimated_mean_length(task, "inputs")
for task in tasks]
mean_targets_length = [_estimated_mean_length(task, "targets")
for task in tasks]
inputs_fraction = _normalize(
[l * r for l, r in zip(mean_inputs_length, rates)])
targets_fraction = _normalize(
[l * r for l, r in zip(mean_targets_length, rates)])
logging.info("%12s %12s %12s %12s %12s %12s %s",
"rate", "ex.frac.", "inp.frac.", "tgt.frac.",
"inp.len.", "tgt.len", "task")
for i in range(len(rates)):
logging.info("%12g %12g %12g %12g %12g %12g %s",
rates[i], examples_fraction[i],
inputs_fraction[i], targets_fraction[i],
mean_inputs_length[i], mean_targets_length[i],
tasks[i].name)
if compute_stats_empirically:
_log_padding_fractions(mixed_dataset, sequence_length)
class MixtureRegistry(DatasetProviderRegistry):
_REGISTRY = {}
_PROVIDER_TYPE = Mixture
@classmethod
def add(cls, name, tasks, default_rate=None) -> Mixture:
return super().add(name, Mixture, name, tasks, default_rate)
@classmethod
def get(cls, name) -> Mixture:
return super().get(name)
def get_mixture_or_task(task_or_mixture_name):
mixtures = MixtureRegistry.names()
tasks = TaskRegistry.names()
if task_or_mixture_name in mixtures:
if task_or_mixture_name in tasks:
logging.warning("%s is both a Task and a Mixture, returning Mixture",
task_or_mixture_name)
return MixtureRegistry.get(task_or_mixture_name)
if task_or_mixture_name in tasks:
return TaskRegistry.get(task_or_mixture_name)
else:
raise ValueError("No Task or Mixture found with name: %s" %
task_or_mixture_name)
def get_subtasks(task_or_mixture):
if isinstance(task_or_mixture, Task):
return [task_or_mixture]
else:
return task_or_mixture.tasks
def get_dataset(
mixture_or_task_name: str,
task_feature_lengths: Mapping[str, int],
feature_converter: FeatureConverter,
dataset_split: str = "train",
use_cached: bool = False,
shuffle: bool = False,
num_epochs: Optional[int] = 1,
shard_info: ShardInfo = None,
verbose: bool = True,
seed: Optional[int] = None
) -> tf.data.Dataset:
if not isinstance(feature_converter, FeatureConverter):
raise TypeError(
"feature_converter should be an instance of FeatureConverter.")
mixture_or_task = get_mixture_or_task(mixture_or_task_name)
ds = mixture_or_task.get_dataset(
task_feature_lengths,
split=dataset_split,
use_cached=use_cached,
shuffle=shuffle,
seed=seed,
shard_info=shard_info,
num_epochs=num_epochs)
ds = feature_converter(ds, task_feature_lengths=task_feature_lengths)
if verbose:
logging.info(
"The output dataset from seqio.get_dataset has the following features")
for feature_name, tensor_spec in ds.element_spec.items():
logging.info("feature: %s \t shape: %s \t dtype: %s", feature_name,
tensor_spec.shape.as_list(), tensor_spec.dtype.name)
return ds
| true | true |
f71fdf38821c8803ae681fd71c4c71d7da8b1c90 | 2,037 | py | Python | airflow/migrations/versions/0090_30867afad44a_rename_concurrency_column_in_dag_table_.py | npodewitz/airflow | 511ea702d5f732582d018dad79754b54d5e53f9d | [
"Apache-2.0"
] | 8,092 | 2016-04-27T20:32:29.000Z | 2019-01-05T07:39:33.000Z | airflow/migrations/versions/0090_30867afad44a_rename_concurrency_column_in_dag_table_.py | npodewitz/airflow | 511ea702d5f732582d018dad79754b54d5e53f9d | [
"Apache-2.0"
] | 2,961 | 2016-05-05T07:16:16.000Z | 2019-01-05T08:47:59.000Z | airflow/migrations/versions/0090_30867afad44a_rename_concurrency_column_in_dag_table_.py | npodewitz/airflow | 511ea702d5f732582d018dad79754b54d5e53f9d | [
"Apache-2.0"
] | 3,546 | 2016-05-04T20:33:16.000Z | 2019-01-05T05:14:26.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Rename ``concurrency`` column in ``dag`` table to`` max_active_tasks``
Revision ID: 30867afad44a
Revises: e9304a3141f0
Create Date: 2021-06-04 22:11:19.849981
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '30867afad44a'
down_revision = 'e9304a3141f0'
branch_labels = None
depends_on = None
airflow_version = '2.2.0'
def upgrade():
"""Apply Rename ``concurrency`` column in ``dag`` table to`` max_active_tasks``"""
conn = op.get_bind()
is_sqlite = bool(conn.dialect.name == "sqlite")
if is_sqlite:
op.execute("PRAGMA foreign_keys=off")
with op.batch_alter_table('dag') as batch_op:
batch_op.alter_column(
'concurrency',
new_column_name='max_active_tasks',
type_=sa.Integer(),
nullable=False,
)
if is_sqlite:
op.execute("PRAGMA foreign_keys=on")
def downgrade():
"""Unapply Rename ``concurrency`` column in ``dag`` table to`` max_active_tasks``"""
with op.batch_alter_table('dag') as batch_op:
batch_op.alter_column(
'max_active_tasks',
new_column_name='concurrency',
type_=sa.Integer(),
nullable=False,
)
| 31.338462 | 88 | 0.691703 |
import sqlalchemy as sa
from alembic import op
revision = '30867afad44a'
down_revision = 'e9304a3141f0'
branch_labels = None
depends_on = None
airflow_version = '2.2.0'
def upgrade():
conn = op.get_bind()
is_sqlite = bool(conn.dialect.name == "sqlite")
if is_sqlite:
op.execute("PRAGMA foreign_keys=off")
with op.batch_alter_table('dag') as batch_op:
batch_op.alter_column(
'concurrency',
new_column_name='max_active_tasks',
type_=sa.Integer(),
nullable=False,
)
if is_sqlite:
op.execute("PRAGMA foreign_keys=on")
def downgrade():
with op.batch_alter_table('dag') as batch_op:
batch_op.alter_column(
'max_active_tasks',
new_column_name='concurrency',
type_=sa.Integer(),
nullable=False,
)
| true | true |
f71fdf687002e4d434788ab435b395447a70728b | 228 | py | Python | dstlib/node.py | Algebra7/dstlib | 3f891fb48c8c00caf89255c45a1b41f76331d252 | [
"MIT"
] | 1 | 2021-07-27T08:22:29.000Z | 2021-07-27T08:22:29.000Z | dstlib/node.py | Algebra7/dstlib | 3f891fb48c8c00caf89255c45a1b41f76331d252 | [
"MIT"
] | null | null | null | dstlib/node.py | Algebra7/dstlib | 3f891fb48c8c00caf89255c45a1b41f76331d252 | [
"MIT"
] | null | null | null | class Node:
"""Class for storing linked list node."""
def __init__(self, element, next_pointer, prev_pointer=None):
self._element = element
self._next = next_pointer
self._prev = prev_pointer | 32.571429 | 65 | 0.653509 | class Node:
def __init__(self, element, next_pointer, prev_pointer=None):
self._element = element
self._next = next_pointer
self._prev = prev_pointer | true | true |
f71fdf8b98012c19be34962342155bc04ff638eb | 25,034 | py | Python | garnets.py | seanth/garnets | 3ff37bcbf095df14586cccb39a52bcf7b221c8ee | [
"MIT"
] | 1 | 2022-02-25T14:32:34.000Z | 2022-02-25T14:32:34.000Z | garnets.py | seanth/garnets | 3ff37bcbf095df14586cccb39a52bcf7b221c8ee | [
"MIT"
] | null | null | null | garnets.py | seanth/garnets | 3ff37bcbf095df14586cccb39a52bcf7b221c8ee | [
"MIT"
] | 2 | 2020-09-02T17:18:33.000Z | 2022-02-25T14:32:48.000Z | import logging
import random
from math import sqrt, log
from stellar_system import Star
from stellar_system import Planetesimal
from stellar_system import Protoplanet
from stellar_system import Protomoon
from stellar_system import Planet
from stellar_system import Orbit
from accrete import CircumstellarDisk
from constants import ECCENTRICITY_COEFF, PROTOPLANET_MASS
from constants import SUN_MASS_IN_EARTH_MASSES
from constants import EARTH_ALBEDO, GAS_GIANT_ALBEDO, FREEZING_POINT_OF_WATER, KM_PER_AU, EARTH_AVERAGE_KELVIN, EARTH_EXOSPHERE_TEMP
from constants import MOL_NITROGEN, MOL_HYDROGEN, HELIUM
from constants import ASTEROID_MASS_LIMIT
from constants import MILLIBARS_PER_BAR
from enviroment import kothari_radius, gas_life, rms_vel, est_temp, period, day_length, acceleration, gravity, min_molec_weight, orb_zone, volume_radius, volume_density, grnhouse, boiling_point, escape_vel, empirical_density, inclination, iterate_surface_temp, pressure, vol_inventory
from enviroment import PlanetType
from math import exp
from math import inf as INCREDIBLY_LARGE_NUMBER # TODO(woursler): Just use inf
from util import about, random_number, random_eccentricity
from chemtable import gases
logging.getLogger().setLevel(logging.INFO)
def random_star(): # TODO: Add seed?
# Sources
# exoplanets.co/exoplanet-correlations/host-star-mass-distribution.html
# en.wikipedia.org/wiki/Main_sequence#mediaviewer/File:HRDiagram.png
# TODO: Code up generation.
age = random.randrange(1*10**9, 6*10**9)
return Star(age=age, mass_ratio=1)
def generate_stellar_system(star, do_gases=True, do_moons=True):
protoplanets = generate_planetary_masses(
star,
0.0,
star.stellar_dust_limit,
do_moons=do_moons
)
star.planets = [
generate_planet(
p,
star,
do_gases=do_gases,
do_moons=do_moons
) for p in protoplanets
]
return star
# Create protoplanets.
def random_planetesimal(disk):
a = random.uniform(disk.planet_inner_bound, disk.planet_outer_bound)
e = 1.0 - (random.uniform(0.0, 1.0) ** ECCENTRICITY_COEFF)
if e > .99:
e = .99
return Planetesimal(
disk=disk,
orbit=Orbit(
a=a,
e=e,
),
dust_mass=PROTOPLANET_MASS,
gas_mass=0,
)
def generate_planetary_masses(star, inner_dust, outer_dust, do_moons=True):
disk = CircumstellarDisk(star)
planets = []
sequential_failures = 0
while disk.dust_left and sequential_failures < 10**3:
canidate = random_planetesimal(disk)
iel = canidate.inner_effect_limit
oel = canidate.outer_effect_limit
if disk.dust_available(iel, oel) > 0:
sequential_failures = 0
logging.info("Injecting planetesimal at " +
str(canidate.orbit.a) + " AU ...")
disk.accrete_dust(canidate)
if canidate.mass > PROTOPLANET_MASS:
coalesce_planetesimals(disk, planets, canidate, do_moons)
logging.info("\tsuccess.\n")
else:
logging.info("\tfailed due to large neighbor.\n")
else:
sequential_failures += 1
return planets
def convert_planetesimal_to_protoplanet(planetesimal):
return Protoplanet(
star=planetesimal.disk.star,
orbit=planetesimal.orbit,
dust_mass=planetesimal.dust_mass,
gas_mass=planetesimal.gas_mass
)
def convert_planetesimal_to_protomoon(planetesimal, planet):
print(" Capturing a protomoon.")
return Protomoon(
protoplanet=planet,
orbit=Orbit(
a=None,
e=None,
),
dust_mass=planetesimal.dust_mass,
gas_mass=planetesimal.gas_mass,
)
def coalesce_planetesimals(disk, planets, canidate, do_moons):
finished = False
# First we try to find an existing planet with an over-lapping orbit.
for planet in planets:
#print("Out of order", planet, canidate)
diff = planet.orbit.a - canidate.orbit.a
if diff > 0.0:
dist1 = canidate.orbit.apoapsis * (1.0 + canidate.reduced_mass) - canidate.orbit.a
# x aphelion
dist2 = planet.orbit.a - (planet.orbit.periapsis * (1.0 - planet.reduced_mass))
else:
dist1 = canidate.orbit.a - (canidate.orbit.periapsis * (1.0 - canidate.reduced_mass))
# x perihelion
dist2 = (planet.orbit.apoapsis * (1.0 + planet.reduced_mass)) - planet.orbit.a
if abs(diff) <= abs(dist1) or abs(diff) <= abs(dist2):
# Figure out the new orbit.
a = (planet.mass + canidate.mass) / \
((planet.mass / planet.orbit.a) + (canidate.mass / canidate.orbit.a))
temp = planet.mass * sqrt(planet.orbit.a) * sqrt(1.0 - (planet.orbit.e ** 2.0))
temp = temp + (canidate.mass * sqrt(canidate.orbit.a) *
sqrt(sqrt(1.0 - (canidate.orbit.e ** 2.0))))
temp = temp / ((planet.mass + canidate.mass) * sqrt(canidate.orbit.a))
temp = 1.0 - (temp ** 2.0)
if temp < 0.0 or temp >= 1.0:
temp = 0.0
e = sqrt(temp)
if do_moons:
if canidate.mass < canidate.critical_mass:
if canidate.mass * SUN_MASS_IN_EARTH_MASSES < 2.5 \
and canidate.mass * SUN_MASS_IN_EARTH_MASSES > .0001 \
and planet.mass_of_moons < planet.mass * .05 \
and planet.mass > canidate.mass:
# TODO: Remove planet.mass > canidate.mass distinction, just switch the canidate and planet!
planet.add_moon(
convert_planetesimal_to_protomoon(canidate, planet))
logging.info("Moon captured at " + str(planet.orbit.a) + " AU. Planet Mass: " + str(planet.mass * SUN_MASS_IN_EARTH_MASSES) +
" earth masses Moon Mass: " + str(canidate.mass * SUN_MASS_IN_EARTH_MASSES) + " earth masses.")
finished = True
break
else:
# TODO: Reasons.
logging.info("Did not capture potential moon at " +
str(planet.orbit.a) + " AU. Collision imminent.")
logging.info(
"Collision between two planetesimals! Computing new orbit and accumulating additional mass.")
# Accrete MORE DUST! TODO: Refactor to this.
disk.accrete_dust(planet)
planet.orbit = Orbit(a=a, e=e)
#####
planet.orbit_a = a
planet.orbit_e = e
planet.dust_mass = planet.dust_mass + canidate.dust_mass # + new_dust
planet.gas_mass = planet.gas_mass + canidate.gas_mass # + new_gas
finished = True
logging.info(
"Conglomerate is now " +
str(planet.mass * SUN_MASS_IN_EARTH_MASSES) +
" earth masses at " + str(planet.orbit.a) + " AU."
)
if not finished:
# TODO: Extra info.
logging.info("New Protoplanet at " + str(canidate.orbit.a) + "AU.")
planets.append(convert_planetesimal_to_protoplanet(canidate))
def calculate_gases(star, planet, planet_id):
if planet.surf_pressure > 0:
amount = [0 for _ in range(len(gases))]
totamount = 0
pressure = planet.surf_pressure/MILLIBARS_PER_BAR
n = 0
for i in range(len(gases)):
yp = gases[i].boil / \
(373. * ((log((pressure) + 0.001) / -5050.5) + (1.0 / 373.)))
if ((yp >= 0 and yp < planet.low_temp) and (gases[i].weight >= planet.molec_weight)):
vrms = rms_vel(gases[i].weight, planet.exospheric_temp)
pvrms = pow(1 / (1 + vrms / planet.esc_velocity),
star.age / 1e9)
abund = gases[i].abunds # gases[i].abunde
react = 1.0
fract = 1.0
pres2 = 1.0
if gases[i].symbol == "Ar":
react = .15 * star.age/4e9
elif gases[i].symbol == "He":
abund = abund * (0.001 + (planet.gas_mass / planet.mass))
pres2 = (0.75 + pressure)
react = pow(1 / (1 + gases[i].reactivity),
star.age/2e9 * pres2)
elif (gases[i].symbol == "O" or gases[i].symbol == "O2") and star.age > 2e9 and planet.surf_temp > 270 and planet.surf_temp < 400:
pres2 = (0.89 + pressure/4)
react = pow(
1 / (1 + gases[i].reactivity), pow(star.age/2e9, 0.25) * pres2)
elif gases[i].symbol == "CO2" and star.age > 2e9 and planet.surf_temp > 270 and planet.surf_temp < 400:
pres2 = (0.75 + pressure)
react = pow(
1 / (1 + gases[i].reactivity), pow(star.age/2e9, 0.5) * pres2)
react *= 1.5
else:
pres2 = 0.75 + pressure
react = pow(
1 / (1 + gases[i].reactivity), star.age/2e9 * pres2)
fract = (1 - (planet.molec_weight / gases[i].weight))
amount[i] = abund * pvrms * react * fract
'''if ((flag_verbose & 0x4000) and
(strcmp(gases[i].symbol, "O") == 0 or
strcmp(gases[i].symbol, "N") == 0 or
strcmp(gases[i].symbol, "Ar") == 0 or
strcmp(gases[i].symbol, "He") == 0 or
strcmp(gases[i].symbol, "CO2") == 0))
fprintf (stderr, "%-5.2Lf %-3.3s, %-5.2Lf = a %-5.2Lf * p %-5.2Lf * r %-5.2Lf * p2 %-5.2Lf * f %-5.2Lf\t(%.3Lf%%)\n",
planet.mass * SUN_MASS_IN_EARTH_MASSES,
gases[i].symbol,
amount[i],
abund,
pvrms,
react,
pres2,
fract,
100.0 * (planet.gas_mass / planet.mass)
)'''
totamount += amount[i]
if (amount[i] > 0.0):
n += 1
else:
amount[i] = 0.0
if n > 0:
planet.gases = n
planet.atmosphere = []
for i in range(len(gases)):
if amount[i] > 0.0:
planet.atmosphere.append((gases[i], planet.surf_pressure * amount[i] / totamount))
'''if (flag_verbose & 0x2000)
if ((planet.atmosphere[n].num == AN_O) and
inspired_partial_pressure (planet.surf_pressure,
planet.atmosphere[n].surf_pressure)
> gases[i].max_ipp)
fprintf (stderr, "%s\t Poisoned by O2\n",
planet_id)'''
n += 1
# TODO(woursler): sort planet.atmosphere
'''if (flag_verbose & 0x0010):
fprintf (stderr, "\n%s (%5.1Lf AU) gases:\n",
planet_id, planet.orbit.a)
for (i = 0; i < planet.gases; i++)
fprintf (stderr, "%3d: %6.1Lf, %11.7Lf%%\n",
planet.atmosphere[i].num,
planet.atmosphere[i].surf_pressure,
100. * (planet.atmosphere[i].surf_pressure /
planet.surf_pressure)
)'''
def roche_limit(planet, moon):
return 2.44 * planet.radius * pow((planet.density / moon.density), (1.0 / 3.0))
def hill_sphere(planet, star):
return planet.orbit.a * KM_PER_AU * pow((planet.mass / (3.0 * star.mass_ratio)), (1.0 / 3.0))
def generate_planet(protoplanet, star, random_tilt=0, planet_id=None, do_gases=True, do_moons=True, is_moon=False):
planet = Planet(
sun=star,
orbit=protoplanet.orbit,
dust_mass=protoplanet.dust_mass,
gas_mass=protoplanet.gas_mass,
mass=protoplanet.mass,
axial_tilt=inclination(protoplanet.orbit.a) if random_tilt else 0,
atmosphere=None,
surf_temp=0,
high_temp=0,
low_temp=0,
max_temp=0,
min_temp=0,
greenhs_rise=0,
resonant_period=False,
orbit_zone=orb_zone(star.luminosity_ratio, protoplanet.orbit.a),
orb_period=period(protoplanet.orbit.a, protoplanet.mass, star.mass_ratio)
)
planet.exospheric_temp = EARTH_EXOSPHERE_TEMP / \
((planet.orbit.a / star.r_ecosphere) ** 2)
planet.rms_velocity = rms_vel(MOL_NITROGEN, planet.exospheric_temp)
planet.core_radius = kothari_radius(
planet.dust_mass, False, planet.orbit_zone)
# Calculate the radius as a gas giant, to verify it will retain gas.
# Then if mass > Earth, it's at least 5% gas and retains He, it's
# some flavor of gas giant.
planet.density = empirical_density(
planet.mass, planet.orbit.a, star.r_ecosphere, True)
planet.radius = volume_radius(planet.mass, planet.density)
planet.surf_accel = acceleration(planet.mass, planet.radius)
planet.surf_grav = gravity(planet.surf_accel)
planet.molec_weight = min_molec_weight(planet)
if (((planet.mass * SUN_MASS_IN_EARTH_MASSES) > 1.0)
and ((planet.gas_mass / planet.mass) > 0.05)
and (min_molec_weight(planet) <= 4.0)):
if ((planet.gas_mass / planet.mass) < 0.20):
planet.type = PlanetType.SUB_SUB_GAS_GIANT
elif ((planet.mass * SUN_MASS_IN_EARTH_MASSES) < 20.0):
planet.type = PlanetType.SUB_GAS_GIANT
else:
planet.type = PlanetType.GAS_GIANT
else: # If not, it's rocky.
planet.radius = kothari_radius(planet.mass, False, planet.orbit_zone)
planet.density = volume_density(planet.mass, planet.radius)
planet.surf_accel = acceleration(planet.mass, planet.radius)
planet.surf_grav = gravity(planet.surf_accel)
if ((planet.gas_mass / planet.mass) > 0.000001):
h2_mass = planet.gas_mass * 0.85
he_mass = (planet.gas_mass - h2_mass) * 0.999
h2_loss = 0.0
he_loss = 0.0
h2_life = gas_life(MOL_HYDROGEN, planet)
he_life = gas_life(HELIUM, planet)
if (h2_life < star.age):
#math.exp with a value above 709 results in a math range error
#this is a dumb fix. STH 2021-0131
if (star.age / h2_life)>709:
h2_loss = ((1.0 - (1.0 / exp(709.0))) * h2_mass)
else:
h2_loss = ((1.0 - (1.0 / exp(star.age / h2_life))) * h2_mass)
planet.gas_mass -= h2_loss
planet.mass -= h2_loss
planet.surf_accel = acceleration(planet.mass, planet.radius)
planet.surf_grav = gravity(planet.surf_accel)
if (he_life < star.age):
he_loss = ((1.0 - (1.0 / exp(star.age / he_life))) * he_mass)
planet.gas_mass -= he_loss
planet.mass -= he_loss
planet.surf_accel = acceleration(planet.mass, planet.radius)
planet.surf_grav = gravity(planet.surf_accel)
'''if (((h2_loss + he_loss) > .000001) and (flag_verbose & 0x0080)):
fprintf(stderr, "%s\tLosing gas: H2: %5.3Lf EM, He: %5.3Lf EM\n",
planet_id,
h2_loss * SUN_MASS_IN_EARTH_MASSES, he_loss * SUN_MASS_IN_EARTH_MASSES)'''
planet.day = day_length(planet) # Modifies planet.resonant_period
planet.esc_velocity = escape_vel(planet.mass, planet.radius)
if planet.type == PlanetType.GAS_GIANT or planet.type == PlanetType.SUB_GAS_GIANT or planet.type == PlanetType.SUB_SUB_GAS_GIANT:
planet.greenhouse_effect = False
planet.volatile_gas_inventory = INCREDIBLY_LARGE_NUMBER
planet.surf_pressure = INCREDIBLY_LARGE_NUMBER
planet.boil_point = INCREDIBLY_LARGE_NUMBER
planet.surf_temp = INCREDIBLY_LARGE_NUMBER
planet.greenhs_rise = 0
planet.albedo = about(GAS_GIANT_ALBEDO, 0.1)
planet.hydrosphere = 1.0
planet.cloud_cover = 1.0
planet.ice_cover = 0.0
planet.surf_grav = gravity(planet.surf_accel)
planet.molec_weight = min_molec_weight(planet)
planet.surf_grav = INCREDIBLY_LARGE_NUMBER
planet.estimated_temp = est_temp(
star.r_ecosphere, planet.orbit.a, planet.albedo)
planet.estimated_terr_temp = est_temp(
star.r_ecosphere, planet.orbit.a, EARTH_ALBEDO)
temp = planet.estimated_terr_temp
if (temp >= FREEZING_POINT_OF_WATER) and (temp <= EARTH_AVERAGE_KELVIN + 10.) and (star.age > 2.0E9):
pass
'''if (flag_verbose & 0x8000):
fprintf (stderr, "%s\t%s (%4.2LfEM %5.3Lf By)%s with earth-like temperature (%.1Lf C, %.1Lf F, %+.1Lf C Earth).\n",
planet_id,
planet.type == PlanetType.GAS_GIANT ? "Jovian" :
planet.type == PlanetType.SUB_GAS_GIANT ? "Sub-Jovian" :
planet.type == PlanetType.SUB_SUB_GAS_GIANT ? "Gas Dwarf" :
"Big",
planet.mass * SUN_MASS_IN_EARTH_MASSES,
star.age /1.0E9,
planet.first_moon == NULL ? "" : " WITH MOON",
temp - FREEZING_POINT_OF_WATER,
32 + ((temp - FREEZING_POINT_OF_WATER) * 1.8),
temp - EARTH_AVERAGE_KELVIN)'''
else:
planet.estimated_temp = est_temp(
star.r_ecosphere, planet.orbit.a, EARTH_ALBEDO)
planet.estimated_terr_temp = est_temp(
star.r_ecosphere, planet.orbit.a, EARTH_ALBEDO)
planet.surf_grav = gravity(planet.surf_accel)
planet.molec_weight = min_molec_weight(planet)
planet.greenhouse_effect = grnhouse(star.r_ecosphere, planet.orbit.a)
planet.volatile_gas_inventory = vol_inventory(planet.mass,
planet.esc_velocity,
planet.rms_velocity,
star.mass_ratio,
planet.orbit_zone,
planet.greenhouse_effect,
(planet.gas_mass
/ planet.mass) > 0.000001)
planet.surf_pressure = pressure(planet.volatile_gas_inventory,
planet.radius,
planet.surf_grav)
if ((planet.surf_pressure == 0.0)):
planet.boil_point = 0.0
else:
planet.boil_point = boiling_point(planet.surf_pressure)
# Sets:
# planet.surf_temp
# planet.greenhs_rise
# planet.albedo
# planet.hydrosphere
# planet.cloud_cover
# planet.ice_cover
iterate_surface_temp(planet)
if (do_gases and (planet.max_temp >= FREEZING_POINT_OF_WATER) and (planet.min_temp <= planet.boil_point)):
calculate_gases(star, planet, planet_id)
# Next we assign a type to the planet.
if (planet.surf_pressure < 1.0):
if (not is_moon) and ((planet.mass * SUN_MASS_IN_EARTH_MASSES) < ASTEROID_MASS_LIMIT):
planet.type = PlanetType.ASTEROIDS
else:
planet.type = PlanetType.ROCK
elif (planet.surf_pressure > 6000.0) and (planet.molec_weight <= 2.0): # Retains Hydrogen
planet.type = PlanetType.SUB_SUB_GAS_GIANT
planet.gases = 0
planet.atmosphere = None
else:
# Atmospheres:
if (int(planet.day) == int(planet.orb_period * 24.0)) or planet.resonant_period:
planet.type = PlanetType.ONE_FACE
elif (planet.hydrosphere >= 0.95):
planet.type = PlanetType.WATER # >95% water
elif (planet.ice_cover >= 0.95):
planet.type = PlanetType.ICE # >95% ice
elif (planet.hydrosphere > 0.05):
planet.type = PlanetType.TERRESTRIAL # Terrestrial
# else <5% water
elif (planet.max_temp > planet.boil_point):
planet.type = PlanetType.VENUSIAN # Hot = Venusian
elif ((planet.gas_mass / planet.mass) > 0.0001):
# Accreted gas
planet.type = PlanetType.ICE # But no Greenhouse
planet.ice_cover = 1.0 # or liquid water
# Make it an Ice World
elif (planet.surf_pressure <= 250.0): # Thin air = Martian
planet.type = PlanetType.MARTIAN
elif (planet.surf_temp < FREEZING_POINT_OF_WATER):
planet.type = PlanetType.ICE
else:
planet.type = PlanetType.UNKNOWN # TODO(woursler): Consider throwing an error here.
'''if (flag_verbose & 0x0001)
fprintf (stderr, "%12s\tp=%4.2Lf\tm=%4.2Lf\tg=%4.2Lf\tt=%+.1Lf\t%s\t Unknown %s\n",
type_string (planet.type),
planet.surf_pressure,
planet.mass * SUN_MASS_IN_EARTH_MASSES,
planet.surf_grav,
planet.surf_temp - EARTH_AVERAGE_KELVIN,
planet_id,
((int)planet.day == (int)(planet.orb_period * 24.0) or
(planet.resonant_period)) ? "(1-Face)" : ""
)'''
if do_moons and not is_moon:
for protomoon in protoplanet.moons:
if protomoon.mass * SUN_MASS_IN_EARTH_MASSES > .000001:
protomoon.orbit = planet.orbit
# Note: adjusts density, which is used in computing the roche limit.
moon = generate_planet(
protoplanet=protomoon,
star=star,
random_tilt=random_tilt,
do_gases=do_gases,
do_moons=do_moons,
is_moon=True
)
# TODO(woursler): these should be their own subroutines.
roche_limit_r = roche_limit(planet, moon)
hill_sphere_r = hill_sphere(planet, star)
if (roche_limit_r * 3.0) < hill_sphere_r:
moon_a = random_number(
roche_limit_r * 1.5, hill_sphere_r / 2.0) / KM_PER_AU
moon_e = random_eccentricity()
moon.orbit = Orbit(a=moon_a, e=moon_e)
else:
moon.orbit = Orbit(a=0, e=0)
planet.moons.append(moon)
'''
if (flag_verbose & 0x40000):
fprintf (stderr,
" Roche limit: R = %4.2Lg, rM = %4.2Lg, rm = %4.2Lg . %.0Lf km\n"
" Hill Sphere: a = %4.2Lg, m = %4.2Lg, M = %4.2Lg . %.0Lf km\n"
"%s Moon orbit: a = %.0Lf km, e = %.0Lg\n",
planet.radius, planet.density, ptr.density,
roche_limit,
planet.orbit.a * KM_PER_AU, planet.mass * SOLAR_MASS_IN_KILOGRAMS, star.mass_ratio * SOLAR_MASS_IN_KILOGRAMS,
hill_sphere,
moon_id,
ptr.moon_a * KM_PER_AU, ptr.moon_e
)
if (flag_verbose & 0x1000):
fprintf (stderr, " %s: (%7.2LfEM) %d %4.2LgEM\n",
planet_id,
planet.mass * SUN_MASS_IN_EARTH_MASSES,
n,
ptr.mass * SUN_MASS_IN_EARTH_MASSES)'''
return planet
###
# Smoke Test
###
if __name__ == '__main__':
random.seed('earth')
print(generate_stellar_system(random_star()))
| 40.247588 | 284 | 0.533874 | import logging
import random
from math import sqrt, log
from stellar_system import Star
from stellar_system import Planetesimal
from stellar_system import Protoplanet
from stellar_system import Protomoon
from stellar_system import Planet
from stellar_system import Orbit
from accrete import CircumstellarDisk
from constants import ECCENTRICITY_COEFF, PROTOPLANET_MASS
from constants import SUN_MASS_IN_EARTH_MASSES
from constants import EARTH_ALBEDO, GAS_GIANT_ALBEDO, FREEZING_POINT_OF_WATER, KM_PER_AU, EARTH_AVERAGE_KELVIN, EARTH_EXOSPHERE_TEMP
from constants import MOL_NITROGEN, MOL_HYDROGEN, HELIUM
from constants import ASTEROID_MASS_LIMIT
from constants import MILLIBARS_PER_BAR
from enviroment import kothari_radius, gas_life, rms_vel, est_temp, period, day_length, acceleration, gravity, min_molec_weight, orb_zone, volume_radius, volume_density, grnhouse, boiling_point, escape_vel, empirical_density, inclination, iterate_surface_temp, pressure, vol_inventory
from enviroment import PlanetType
from math import exp
from math import inf as INCREDIBLY_LARGE_NUMBER
from util import about, random_number, random_eccentricity
from chemtable import gases
logging.getLogger().setLevel(logging.INFO)
def random_star():
e(1*10**9, 6*10**9)
return Star(age=age, mass_ratio=1)
def generate_stellar_system(star, do_gases=True, do_moons=True):
protoplanets = generate_planetary_masses(
star,
0.0,
star.stellar_dust_limit,
do_moons=do_moons
)
star.planets = [
generate_planet(
p,
star,
do_gases=do_gases,
do_moons=do_moons
) for p in protoplanets
]
return star
def random_planetesimal(disk):
a = random.uniform(disk.planet_inner_bound, disk.planet_outer_bound)
e = 1.0 - (random.uniform(0.0, 1.0) ** ECCENTRICITY_COEFF)
if e > .99:
e = .99
return Planetesimal(
disk=disk,
orbit=Orbit(
a=a,
e=e,
),
dust_mass=PROTOPLANET_MASS,
gas_mass=0,
)
def generate_planetary_masses(star, inner_dust, outer_dust, do_moons=True):
disk = CircumstellarDisk(star)
planets = []
sequential_failures = 0
while disk.dust_left and sequential_failures < 10**3:
canidate = random_planetesimal(disk)
iel = canidate.inner_effect_limit
oel = canidate.outer_effect_limit
if disk.dust_available(iel, oel) > 0:
sequential_failures = 0
logging.info("Injecting planetesimal at " +
str(canidate.orbit.a) + " AU ...")
disk.accrete_dust(canidate)
if canidate.mass > PROTOPLANET_MASS:
coalesce_planetesimals(disk, planets, canidate, do_moons)
logging.info("\tsuccess.\n")
else:
logging.info("\tfailed due to large neighbor.\n")
else:
sequential_failures += 1
return planets
def convert_planetesimal_to_protoplanet(planetesimal):
return Protoplanet(
star=planetesimal.disk.star,
orbit=planetesimal.orbit,
dust_mass=planetesimal.dust_mass,
gas_mass=planetesimal.gas_mass
)
def convert_planetesimal_to_protomoon(planetesimal, planet):
print(" Capturing a protomoon.")
return Protomoon(
protoplanet=planet,
orbit=Orbit(
a=None,
e=None,
),
dust_mass=planetesimal.dust_mass,
gas_mass=planetesimal.gas_mass,
)
def coalesce_planetesimals(disk, planets, canidate, do_moons):
finished = False
for planet in planets:
diff = planet.orbit.a - canidate.orbit.a
if diff > 0.0:
dist1 = canidate.orbit.apoapsis * (1.0 + canidate.reduced_mass) - canidate.orbit.a
dist2 = planet.orbit.a - (planet.orbit.periapsis * (1.0 - planet.reduced_mass))
else:
dist1 = canidate.orbit.a - (canidate.orbit.periapsis * (1.0 - canidate.reduced_mass))
dist2 = (planet.orbit.apoapsis * (1.0 + planet.reduced_mass)) - planet.orbit.a
if abs(diff) <= abs(dist1) or abs(diff) <= abs(dist2):
a = (planet.mass + canidate.mass) / \
((planet.mass / planet.orbit.a) + (canidate.mass / canidate.orbit.a))
temp = planet.mass * sqrt(planet.orbit.a) * sqrt(1.0 - (planet.orbit.e ** 2.0))
temp = temp + (canidate.mass * sqrt(canidate.orbit.a) *
sqrt(sqrt(1.0 - (canidate.orbit.e ** 2.0))))
temp = temp / ((planet.mass + canidate.mass) * sqrt(canidate.orbit.a))
temp = 1.0 - (temp ** 2.0)
if temp < 0.0 or temp >= 1.0:
temp = 0.0
e = sqrt(temp)
if do_moons:
if canidate.mass < canidate.critical_mass:
if canidate.mass * SUN_MASS_IN_EARTH_MASSES < 2.5 \
and canidate.mass * SUN_MASS_IN_EARTH_MASSES > .0001 \
and planet.mass_of_moons < planet.mass * .05 \
and planet.mass > canidate.mass:
planet.add_moon(
convert_planetesimal_to_protomoon(canidate, planet))
logging.info("Moon captured at " + str(planet.orbit.a) + " AU. Planet Mass: " + str(planet.mass * SUN_MASS_IN_EARTH_MASSES) +
" earth masses Moon Mass: " + str(canidate.mass * SUN_MASS_IN_EARTH_MASSES) + " earth masses.")
finished = True
break
else:
logging.info("Did not capture potential moon at " +
str(planet.orbit.a) + " AU. Collision imminent.")
logging.info(
"Collision between two planetesimals! Computing new orbit and accumulating additional mass.")
disk.accrete_dust(planet)
planet.orbit = Orbit(a=a, e=e)
planet.orbit_a = a
planet.orbit_e = e
planet.dust_mass = planet.dust_mass + canidate.dust_mass
planet.gas_mass = planet.gas_mass + canidate.gas_mass
finished = True
logging.info(
"Conglomerate is now " +
str(planet.mass * SUN_MASS_IN_EARTH_MASSES) +
" earth masses at " + str(planet.orbit.a) + " AU."
)
if not finished:
logging.info("New Protoplanet at " + str(canidate.orbit.a) + "AU.")
planets.append(convert_planetesimal_to_protoplanet(canidate))
def calculate_gases(star, planet, planet_id):
if planet.surf_pressure > 0:
amount = [0 for _ in range(len(gases))]
totamount = 0
pressure = planet.surf_pressure/MILLIBARS_PER_BAR
n = 0
for i in range(len(gases)):
yp = gases[i].boil / \
(373. * ((log((pressure) + 0.001) / -5050.5) + (1.0 / 373.)))
if ((yp >= 0 and yp < planet.low_temp) and (gases[i].weight >= planet.molec_weight)):
vrms = rms_vel(gases[i].weight, planet.exospheric_temp)
pvrms = pow(1 / (1 + vrms / planet.esc_velocity),
star.age / 1e9)
abund = gases[i].abunds
react = 1.0
fract = 1.0
pres2 = 1.0
if gases[i].symbol == "Ar":
react = .15 * star.age/4e9
elif gases[i].symbol == "He":
abund = abund * (0.001 + (planet.gas_mass / planet.mass))
pres2 = (0.75 + pressure)
react = pow(1 / (1 + gases[i].reactivity),
star.age/2e9 * pres2)
elif (gases[i].symbol == "O" or gases[i].symbol == "O2") and star.age > 2e9 and planet.surf_temp > 270 and planet.surf_temp < 400:
pres2 = (0.89 + pressure/4)
react = pow(
1 / (1 + gases[i].reactivity), pow(star.age/2e9, 0.25) * pres2)
elif gases[i].symbol == "CO2" and star.age > 2e9 and planet.surf_temp > 270 and planet.surf_temp < 400:
pres2 = (0.75 + pressure)
react = pow(
1 / (1 + gases[i].reactivity), pow(star.age/2e9, 0.5) * pres2)
react *= 1.5
else:
pres2 = 0.75 + pressure
react = pow(
1 / (1 + gases[i].reactivity), star.age/2e9 * pres2)
fract = (1 - (planet.molec_weight / gases[i].weight))
amount[i] = abund * pvrms * react * fract
totamount += amount[i]
if (amount[i] > 0.0):
n += 1
else:
amount[i] = 0.0
if n > 0:
planet.gases = n
planet.atmosphere = []
for i in range(len(gases)):
if amount[i] > 0.0:
planet.atmosphere.append((gases[i], planet.surf_pressure * amount[i] / totamount))
n += 1
def roche_limit(planet, moon):
return 2.44 * planet.radius * pow((planet.density / moon.density), (1.0 / 3.0))
def hill_sphere(planet, star):
return planet.orbit.a * KM_PER_AU * pow((planet.mass / (3.0 * star.mass_ratio)), (1.0 / 3.0))
def generate_planet(protoplanet, star, random_tilt=0, planet_id=None, do_gases=True, do_moons=True, is_moon=False):
planet = Planet(
sun=star,
orbit=protoplanet.orbit,
dust_mass=protoplanet.dust_mass,
gas_mass=protoplanet.gas_mass,
mass=protoplanet.mass,
axial_tilt=inclination(protoplanet.orbit.a) if random_tilt else 0,
atmosphere=None,
surf_temp=0,
high_temp=0,
low_temp=0,
max_temp=0,
min_temp=0,
greenhs_rise=0,
resonant_period=False,
orbit_zone=orb_zone(star.luminosity_ratio, protoplanet.orbit.a),
orb_period=period(protoplanet.orbit.a, protoplanet.mass, star.mass_ratio)
)
planet.exospheric_temp = EARTH_EXOSPHERE_TEMP / \
((planet.orbit.a / star.r_ecosphere) ** 2)
planet.rms_velocity = rms_vel(MOL_NITROGEN, planet.exospheric_temp)
planet.core_radius = kothari_radius(
planet.dust_mass, False, planet.orbit_zone)
planet.density = empirical_density(
planet.mass, planet.orbit.a, star.r_ecosphere, True)
planet.radius = volume_radius(planet.mass, planet.density)
planet.surf_accel = acceleration(planet.mass, planet.radius)
planet.surf_grav = gravity(planet.surf_accel)
planet.molec_weight = min_molec_weight(planet)
if (((planet.mass * SUN_MASS_IN_EARTH_MASSES) > 1.0)
and ((planet.gas_mass / planet.mass) > 0.05)
and (min_molec_weight(planet) <= 4.0)):
if ((planet.gas_mass / planet.mass) < 0.20):
planet.type = PlanetType.SUB_SUB_GAS_GIANT
elif ((planet.mass * SUN_MASS_IN_EARTH_MASSES) < 20.0):
planet.type = PlanetType.SUB_GAS_GIANT
else:
planet.type = PlanetType.GAS_GIANT
else:
planet.radius = kothari_radius(planet.mass, False, planet.orbit_zone)
planet.density = volume_density(planet.mass, planet.radius)
planet.surf_accel = acceleration(planet.mass, planet.radius)
planet.surf_grav = gravity(planet.surf_accel)
if ((planet.gas_mass / planet.mass) > 0.000001):
h2_mass = planet.gas_mass * 0.85
he_mass = (planet.gas_mass - h2_mass) * 0.999
h2_loss = 0.0
he_loss = 0.0
h2_life = gas_life(MOL_HYDROGEN, planet)
he_life = gas_life(HELIUM, planet)
if (h2_life < star.age):
#math.exp with a value above 709 results in a math range error
#this is a dumb fix. STH 2021-0131
if (star.age / h2_life)>709:
h2_loss = ((1.0 - (1.0 / exp(709.0))) * h2_mass)
else:
h2_loss = ((1.0 - (1.0 / exp(star.age / h2_life))) * h2_mass)
planet.gas_mass -= h2_loss
planet.mass -= h2_loss
planet.surf_accel = acceleration(planet.mass, planet.radius)
planet.surf_grav = gravity(planet.surf_accel)
if (he_life < star.age):
he_loss = ((1.0 - (1.0 / exp(star.age / he_life))) * he_mass)
planet.gas_mass -= he_loss
planet.mass -= he_loss
planet.surf_accel = acceleration(planet.mass, planet.radius)
planet.surf_grav = gravity(planet.surf_accel)
'''if (((h2_loss + he_loss) > .000001) and (flag_verbose & 0x0080)):
fprintf(stderr, "%s\tLosing gas: H2: %5.3Lf EM, He: %5.3Lf EM\n",
planet_id,
h2_loss * SUN_MASS_IN_EARTH_MASSES, he_loss * SUN_MASS_IN_EARTH_MASSES)'''
planet.day = day_length(planet) # Modifies planet.resonant_period
planet.esc_velocity = escape_vel(planet.mass, planet.radius)
if planet.type == PlanetType.GAS_GIANT or planet.type == PlanetType.SUB_GAS_GIANT or planet.type == PlanetType.SUB_SUB_GAS_GIANT:
planet.greenhouse_effect = False
planet.volatile_gas_inventory = INCREDIBLY_LARGE_NUMBER
planet.surf_pressure = INCREDIBLY_LARGE_NUMBER
planet.boil_point = INCREDIBLY_LARGE_NUMBER
planet.surf_temp = INCREDIBLY_LARGE_NUMBER
planet.greenhs_rise = 0
planet.albedo = about(GAS_GIANT_ALBEDO, 0.1)
planet.hydrosphere = 1.0
planet.cloud_cover = 1.0
planet.ice_cover = 0.0
planet.surf_grav = gravity(planet.surf_accel)
planet.molec_weight = min_molec_weight(planet)
planet.surf_grav = INCREDIBLY_LARGE_NUMBER
planet.estimated_temp = est_temp(
star.r_ecosphere, planet.orbit.a, planet.albedo)
planet.estimated_terr_temp = est_temp(
star.r_ecosphere, planet.orbit.a, EARTH_ALBEDO)
temp = planet.estimated_terr_temp
if (temp >= FREEZING_POINT_OF_WATER) and (temp <= EARTH_AVERAGE_KELVIN + 10.) and (star.age > 2.0E9):
pass
else:
planet.estimated_temp = est_temp(
star.r_ecosphere, planet.orbit.a, EARTH_ALBEDO)
planet.estimated_terr_temp = est_temp(
star.r_ecosphere, planet.orbit.a, EARTH_ALBEDO)
planet.surf_grav = gravity(planet.surf_accel)
planet.molec_weight = min_molec_weight(planet)
planet.greenhouse_effect = grnhouse(star.r_ecosphere, planet.orbit.a)
planet.volatile_gas_inventory = vol_inventory(planet.mass,
planet.esc_velocity,
planet.rms_velocity,
star.mass_ratio,
planet.orbit_zone,
planet.greenhouse_effect,
(planet.gas_mass
/ planet.mass) > 0.000001)
planet.surf_pressure = pressure(planet.volatile_gas_inventory,
planet.radius,
planet.surf_grav)
if ((planet.surf_pressure == 0.0)):
planet.boil_point = 0.0
else:
planet.boil_point = boiling_point(planet.surf_pressure)
# Sets:
# planet.surf_temp
# planet.greenhs_rise
# planet.albedo
# planet.hydrosphere
# planet.cloud_cover
# planet.ice_cover
iterate_surface_temp(planet)
if (do_gases and (planet.max_temp >= FREEZING_POINT_OF_WATER) and (planet.min_temp <= planet.boil_point)):
calculate_gases(star, planet, planet_id)
# Next we assign a type to the planet.
if (planet.surf_pressure < 1.0):
if (not is_moon) and ((planet.mass * SUN_MASS_IN_EARTH_MASSES) < ASTEROID_MASS_LIMIT):
planet.type = PlanetType.ASTEROIDS
else:
planet.type = PlanetType.ROCK
elif (planet.surf_pressure > 6000.0) and (planet.molec_weight <= 2.0): # Retains Hydrogen
planet.type = PlanetType.SUB_SUB_GAS_GIANT
planet.gases = 0
planet.atmosphere = None
else:
# Atmospheres:
if (int(planet.day) == int(planet.orb_period * 24.0)) or planet.resonant_period:
planet.type = PlanetType.ONE_FACE
elif (planet.hydrosphere >= 0.95):
planet.type = PlanetType.WATER # >95% water
elif (planet.ice_cover >= 0.95):
planet.type = PlanetType.ICE # >95% ice
elif (planet.hydrosphere > 0.05):
planet.type = PlanetType.TERRESTRIAL # Terrestrial
# else <5% water
elif (planet.max_temp > planet.boil_point):
planet.type = PlanetType.VENUSIAN # Hot = Venusian
elif ((planet.gas_mass / planet.mass) > 0.0001):
# Accreted gas
planet.type = PlanetType.ICE # But no Greenhouse
planet.ice_cover = 1.0 # or liquid water
# Make it an Ice World
elif (planet.surf_pressure <= 250.0): # Thin air = Martian
planet.type = PlanetType.MARTIAN
elif (planet.surf_temp < FREEZING_POINT_OF_WATER):
planet.type = PlanetType.ICE
else:
planet.type = PlanetType.UNKNOWN # TODO(woursler): Consider throwing an error here.
'''if (flag_verbose & 0x0001)
fprintf (stderr, "%12s\tp=%4.2Lf\tm=%4.2Lf\tg=%4.2Lf\tt=%+.1Lf\t%s\t Unknown %s\n",
type_string (planet.type),
planet.surf_pressure,
planet.mass * SUN_MASS_IN_EARTH_MASSES,
planet.surf_grav,
planet.surf_temp - EARTH_AVERAGE_KELVIN,
planet_id,
((int)planet.day == (int)(planet.orb_period * 24.0) or
(planet.resonant_period)) ? "(1-Face)" : ""
)'''
if do_moons and not is_moon:
for protomoon in protoplanet.moons:
if protomoon.mass * SUN_MASS_IN_EARTH_MASSES > .000001:
protomoon.orbit = planet.orbit
# Note: adjusts density, which is used in computing the roche limit.
moon = generate_planet(
protoplanet=protomoon,
star=star,
random_tilt=random_tilt,
do_gases=do_gases,
do_moons=do_moons,
is_moon=True
)
# TODO(woursler): these should be their own subroutines.
roche_limit_r = roche_limit(planet, moon)
hill_sphere_r = hill_sphere(planet, star)
if (roche_limit_r * 3.0) < hill_sphere_r:
moon_a = random_number(
roche_limit_r * 1.5, hill_sphere_r / 2.0) / KM_PER_AU
moon_e = random_eccentricity()
moon.orbit = Orbit(a=moon_a, e=moon_e)
else:
moon.orbit = Orbit(a=0, e=0)
planet.moons.append(moon)
return planet
###
# Smoke Test
###
if __name__ == '__main__':
random.seed('earth')
print(generate_stellar_system(random_star()))
| true | true |
f71fe0dc7aac7d9afad4c1f08c6e9b94fde74e57 | 4,160 | py | Python | vivisect/extensions/example_gui_extension.py | bat-serjo/vivisect | f60934a2c8c51c7acdba52a65756e717a108a440 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | vivisect/extensions/example_gui_extension.py | bat-serjo/vivisect | f60934a2c8c51c7acdba52a65756e717a108a440 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | vivisect/extensions/example_gui_extension.py | bat-serjo/vivisect | f60934a2c8c51c7acdba52a65756e717a108a440 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | try:
from PyQt5.QtWidgets import QToolBar, QLabel, QPushButton, QTextEdit, QWidget, QInputDialog
from PyQt5 import QtCore
except:
from PyQt4.QtGui import QToolBar, QLabel, QPushButton, QTextEdit, QWidget, QInputDialog
from PyQt4 import QtCore
from vqt.main import idlethread
from vqt.basics import VBox
from vqt.common import ACT
'''
This is an example of a vivisect GUI extension module.
Set the environment variable VIV_EXT_PATH to point at a
directory full of python modules such as this to extend
and implement your own vivisect features.
The extension should be a python module, either in the
form of a .py file or a directory with a __init__.py
file. Either way, the module will be loaded into
memory and the "vivExtension" function called.
'''
from PyQt5.QtWidgets import QToolBar, QLabel, QPushButton, QTextEdit
from vqt.main import idlethread
from vqt.basics import VBox
class ExampleToolbar(QToolBar):
def __init__(self, vw, vwgui):
self.vw = vw
self.vwgui = vwgui
QToolBar.__init__(self, parent=vwgui)
# Add a label to the toolbar
self.addWidget( QLabel('Example Toolbar:', parent=self) )
# Add an action button to the toolbar
self.addAction('ONE', self.doOne)
def doOne(self):
self.vw.vprint('did one!')
class ExampleWindow(QWidget):
def __init__(self, vw, vwgui):
self.vw = vw
self.vwgui = vwgui
QWidget.__init__(self, parent=vwgui)
# Set the window title
self.setWindowTitle('Example Window!')
# Add a Button and a Text Edit object in a basic VBox layout
button = QPushButton('My Button!', parent=self)
textedit = QTextEdit('WOOT! Some text!', parent=self)
self.setLayout( VBox(button, textedit) )
def vprint(vw, s, *args, **kwargs):
vw.vprint(s % args)
print(s % args)
def ctxMenuHook(vw, va, expr, menu, parent, nav):
'''
Context Menu handler (adds options as we wish)
'''
try:
if va == 0x41414141:
menu.addAction('WAT?', ACT(vw.vprint, "We're at AAAA!"))
menu.addAction('bookmark (B)', ACT(vw.getVivGui().addBookmark, va))
menu.addAction('YEEE HAH', ACT(vw.vprint, "YEE HAH %x %r %r %r %r" % (va, expr, menu, parent, nav)))
menu.addAction('YEEE HAH1', ACT(vprint, vw, "YEE HAH %x %r %r %r %r", va, expr, menu, parent, nav))
except Exception as e:
import traceback
traceback.print_exc()
class Crap:
'''
This is a helpful class for storing vw and vwgui and "doing the thing"
Currently Vivisect's Hot Keys are tied to the many gui widgets, so
vw and vwgui are not available when the "thing" is called.
'''
def __init__(self, vw, vwgui):
self.vw = vw
self.vwgui = vwgui
def thing(self):
vprint(self.vw, "Blah Blah Blah")
def printUserInput(self):
# ok is whether the "OK" button was pressed, utext is the user text
utext, ok = QInputDialog.getText(self.vwgui, 'Enter...', 'User Text')
vprint(self.vw, '%r: %r', ok, utext)
@idlethread
def vivExtension(vw, vwgui):
# Create a toolbar and add it to the GUI
toolbar = ExampleToolbar(vw, vwgui)
vwgui.addToolBar(QtCore.Qt.TopToolBarArea, toolbar)
# Create a new Vivisect Dock Window (based on a QWidget)
window = ExampleWindow(vw, vwgui)
d = vwgui.vqDockWidget(window, floating=True)
d.resize(300,200)
# Add a menu item
vwgui.vqAddMenuField('&Example.&FooBar.&PrintDiscoveredStats', vw.printDiscoveredStats, ())
# hook context menu
vw.addCtxMenuHook('example', ctxMenuHook)
# add HotKeyTargets and HotKeys
tempmod = Crap(vw, vwgui)
vwgui.addHotKey('ctrl+p', 'file:hackme')
vwgui.addHotKeyTarget('file:hackme', tempmod.thing)
# Popups/Dialogs - add a menu entry to ask for input and print the output
vwgui.vqAddMenuField("&Example.&FooBar.&PrintUserInput", tempmod.printUserInput, ())
# get Dock Windows by name
for w, vqDW in vwgui.vqGetDockWidgetsByName('viv'):
vprint(vw, "Window: %r DockWidget: %r (%r)", w, vqDW, w.getEnviNavName())
| 31.755725 | 111 | 0.665625 | try:
from PyQt5.QtWidgets import QToolBar, QLabel, QPushButton, QTextEdit, QWidget, QInputDialog
from PyQt5 import QtCore
except:
from PyQt4.QtGui import QToolBar, QLabel, QPushButton, QTextEdit, QWidget, QInputDialog
from PyQt4 import QtCore
from vqt.main import idlethread
from vqt.basics import VBox
from vqt.common import ACT
from PyQt5.QtWidgets import QToolBar, QLabel, QPushButton, QTextEdit
from vqt.main import idlethread
from vqt.basics import VBox
class ExampleToolbar(QToolBar):
def __init__(self, vw, vwgui):
self.vw = vw
self.vwgui = vwgui
QToolBar.__init__(self, parent=vwgui)
self.addWidget( QLabel('Example Toolbar:', parent=self) )
self.addAction('ONE', self.doOne)
def doOne(self):
self.vw.vprint('did one!')
class ExampleWindow(QWidget):
def __init__(self, vw, vwgui):
self.vw = vw
self.vwgui = vwgui
QWidget.__init__(self, parent=vwgui)
self.setWindowTitle('Example Window!')
button = QPushButton('My Button!', parent=self)
textedit = QTextEdit('WOOT! Some text!', parent=self)
self.setLayout( VBox(button, textedit) )
def vprint(vw, s, *args, **kwargs):
vw.vprint(s % args)
print(s % args)
def ctxMenuHook(vw, va, expr, menu, parent, nav):
try:
if va == 0x41414141:
menu.addAction('WAT?', ACT(vw.vprint, "We're at AAAA!"))
menu.addAction('bookmark (B)', ACT(vw.getVivGui().addBookmark, va))
menu.addAction('YEEE HAH', ACT(vw.vprint, "YEE HAH %x %r %r %r %r" % (va, expr, menu, parent, nav)))
menu.addAction('YEEE HAH1', ACT(vprint, vw, "YEE HAH %x %r %r %r %r", va, expr, menu, parent, nav))
except Exception as e:
import traceback
traceback.print_exc()
class Crap:
def __init__(self, vw, vwgui):
self.vw = vw
self.vwgui = vwgui
def thing(self):
vprint(self.vw, "Blah Blah Blah")
def printUserInput(self):
# ok is whether the "OK" button was pressed, utext is the user text
utext, ok = QInputDialog.getText(self.vwgui, 'Enter...', 'User Text')
vprint(self.vw, '%r: %r', ok, utext)
@idlethread
def vivExtension(vw, vwgui):
# Create a toolbar and add it to the GUI
toolbar = ExampleToolbar(vw, vwgui)
vwgui.addToolBar(QtCore.Qt.TopToolBarArea, toolbar)
# Create a new Vivisect Dock Window (based on a QWidget)
window = ExampleWindow(vw, vwgui)
d = vwgui.vqDockWidget(window, floating=True)
d.resize(300,200)
# Add a menu item
vwgui.vqAddMenuField('&Example.&FooBar.&PrintDiscoveredStats', vw.printDiscoveredStats, ())
# hook context menu
vw.addCtxMenuHook('example', ctxMenuHook)
# add HotKeyTargets and HotKeys
tempmod = Crap(vw, vwgui)
vwgui.addHotKey('ctrl+p', 'file:hackme')
vwgui.addHotKeyTarget('file:hackme', tempmod.thing)
# Popups/Dialogs - add a menu entry to ask for input and print the output
vwgui.vqAddMenuField("&Example.&FooBar.&PrintUserInput", tempmod.printUserInput, ())
# get Dock Windows by name
for w, vqDW in vwgui.vqGetDockWidgetsByName('viv'):
vprint(vw, "Window: %r DockWidget: %r (%r)", w, vqDW, w.getEnviNavName())
| true | true |
f71fe1a680e38a876089f6c92424e564085015cd | 728 | py | Python | tests/test_bit.py | robertchase/aiomysql | 80236fca02c70cd693cb02112646ca14f2c7e2be | [
"MIT"
] | null | null | null | tests/test_bit.py | robertchase/aiomysql | 80236fca02c70cd693cb02112646ca14f2c7e2be | [
"MIT"
] | null | null | null | tests/test_bit.py | robertchase/aiomysql | 80236fca02c70cd693cb02112646ca14f2c7e2be | [
"MIT"
] | 1 | 2021-04-30T14:11:42.000Z | 2021-04-30T14:11:42.000Z | """test Bit operations"""
import pytest
from aiomysql.bit import Bit
@pytest.mark.parametrize(
'length, value, expected', (
(10, None, ValueError),
(10, 1, 1),
(10, '123', TypeError),
(10, '0', 0),
(10, '1', 1),
(10, '010', 2),
(10, '1010', 10),
(10, '01000000000', ValueError),
),
)
def test_bit(length, value, expected):
"""test different inputs"""
bit = Bit(length)
if expected in (ValueError, TypeError):
with pytest.raises(expected):
bit(value)
else:
assert bit(value).value == expected
def test_as_binary():
"""verify binary conversion"""
bit = Bit(5)(10)
assert bit.as_binary() == '1010'
| 22.060606 | 43 | 0.54533 | import pytest
from aiomysql.bit import Bit
@pytest.mark.parametrize(
'length, value, expected', (
(10, None, ValueError),
(10, 1, 1),
(10, '123', TypeError),
(10, '0', 0),
(10, '1', 1),
(10, '010', 2),
(10, '1010', 10),
(10, '01000000000', ValueError),
),
)
def test_bit(length, value, expected):
bit = Bit(length)
if expected in (ValueError, TypeError):
with pytest.raises(expected):
bit(value)
else:
assert bit(value).value == expected
def test_as_binary():
bit = Bit(5)(10)
assert bit.as_binary() == '1010'
| true | true |
f71fe27393e1a3a75142186240ffddd9c0d963a4 | 15,812 | py | Python | contrib/experimental/input/osx.py | bitcraft/pyglet | 144257c365ca85528c6a4c5bed8141e683d7a9b6 | [
"BSD-3-Clause"
] | 15 | 2015-01-21T12:29:01.000Z | 2018-12-09T09:17:33.000Z | contrib/experimental/input/osx.py | bitcraft/pyglet | 144257c365ca85528c6a4c5bed8141e683d7a9b6 | [
"BSD-3-Clause"
] | null | null | null | contrib/experimental/input/osx.py | bitcraft/pyglet | 144257c365ca85528c6a4c5bed8141e683d7a9b6 | [
"BSD-3-Clause"
] | 9 | 2015-12-12T09:12:46.000Z | 2021-12-26T13:29:14.000Z | #!/usr/bin/env python
"""
"""
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import ctypes
import pyglet
from pyglet.libs.darwin import carbon, _oscheck, create_cfstring
from pyglet.libs.darwin.constants import *
import input
import usage
# non-broken c_void_p
void_p = ctypes.POINTER(ctypes.c_int)
class CFUUIDBytes(ctypes.Structure):
_fields_ = [('byte%d' % i, ctypes.c_uint8) for i in range(16)]
mach_port_t = void_p
io_iterator_t = void_p
kern_return_t = ctypes.c_int
IOReturn = ctypes.c_uint
CFDictionaryRef = void_p
CFMutableDictionaryRef = void_p
CFArrayRef = void_p
CFUUIDRef = ctypes.POINTER(CFUUIDBytes)
AbsoluteTime = ctypes.c_double
HRESULT = ctypes.c_int
REFIID = CFUUIDBytes
IOHIDElementType = ctypes.c_int
kIOHIDElementTypeInput_Misc = 1
kIOHIDElementTypeInput_Button = 2
kIOHIDElementTypeInput_Axis = 3
kIOHIDElementTypeInput_ScanCodes = 4
kIOHIDElementTypeOutput = 129
kIOHIDElementTypeFeature = 257
kIOHIDElementTypeCollection = 513
IOHIDElementCookie = ctypes.c_void_p
MACH_PORT_NULL = 0
kIOHIDDeviceKey = "IOHIDDevice"
kIOServicePlane = "IOService"
kIOHIDProductIDKey = "ProductID"
kCFNumberIntType = 9
kIOHIDOptionsTypeSeizeDevice = 1
kIOReturnExclusiveAccess = 0xe00002c5
carbon.CFUUIDGetConstantUUIDWithBytes.restype = CFUUIDRef
kIOHIDDeviceUserClientTypeID = carbon.CFUUIDGetConstantUUIDWithBytes(None,
0xFA, 0x12,
0xFA, 0x38,
0x6F, 0x1A,
0x11, 0xD4,
0xBA, 0x0C,
0x00, 0x05,
0x02, 0x8F,
0x18, 0xD5)
kIOCFPlugInInterfaceID = carbon.CFUUIDGetConstantUUIDWithBytes(None,
0xC2, 0x44, 0xE8,
0x58, 0x10, 0x9C,
0x11, 0xD4,
0x91, 0xD4, 0x00,
0x50, 0xE4, 0xC6,
0x42, 0x6F)
kIOHIDDeviceInterfaceID = carbon.CFUUIDGetConstantUUIDWithBytes(None,
0x78, 0xBD,
0x42, 0x0C,
0x6F, 0x14,
0x11, 0xD4,
0x94, 0x74,
0x00, 0x05,
0x02, 0x8F,
0x18, 0xD5)
class IOHIDEventStruct(ctypes.Structure):
_fields_ = (
('type', IOHIDElementType),
('elementCookie', IOHIDElementCookie),
('value', ctypes.c_int32),
('timestamp', AbsoluteTime),
('longValueSize', ctypes.c_uint32),
('longValue', ctypes.c_void_p)
)
Self = ctypes.c_void_p
class IUnknown(ctypes.Structure):
_fields_ = (
('_reserved', ctypes.c_void_p),
('QueryInterface',
ctypes.CFUNCTYPE(HRESULT, Self, REFIID, ctypes.c_void_p)),
('AddRef',
ctypes.CFUNCTYPE(ctypes.c_ulong, Self)),
('Release',
ctypes.CFUNCTYPE(ctypes.c_ulong, Self)),
)
# Most of these function prototypes are not filled in yet because I haven't
# bothered.
class IOHIDQueueInterface(ctypes.Structure):
_fields_ = IUnknown._fields_ + (
('createAsyncEventSource', ctypes.c_void_p),
('getAsyncEventSource', ctypes.c_void_p),
('createAsyncPort', ctypes.c_void_p),
('getAsyncPort', ctypes.c_void_p),
('create', ctypes.CFUNCTYPE(IOReturn,
Self, ctypes.c_uint32, ctypes.c_uint32)),
('dispose', ctypes.CFUNCTYPE(IOReturn,
Self)),
('addElement', ctypes.CFUNCTYPE(IOReturn,
Self, IOHIDElementCookie)),
('removeElement', ctypes.c_void_p),
('hasElement', ctypes.c_void_p),
('start', ctypes.CFUNCTYPE(IOReturn,
Self)),
('stop', ctypes.CFUNCTYPE(IOReturn,
Self)),
('getNextEvent', ctypes.CFUNCTYPE(IOReturn,
Self,
ctypes.POINTER(IOHIDEventStruct),
AbsoluteTime,
ctypes.c_uint32)),
('setEventCallout', ctypes.c_void_p),
('getEventCallout', ctypes.c_void_p),
)
class IOHIDDeviceInterface(ctypes.Structure):
_fields_ = IUnknown._fields_ + (
('createAsyncEventSource', ctypes.c_void_p),
('getAsyncEventSource', ctypes.c_void_p),
('createAsyncPort', ctypes.c_void_p),
('getAsyncPort', ctypes.c_void_p),
('open', ctypes.CFUNCTYPE(IOReturn,
Self, ctypes.c_uint32)),
('close', ctypes.CFUNCTYPE(IOReturn,
Self)),
('setRemovalCallback', ctypes.c_void_p),
('getElementValue', ctypes.CFUNCTYPE(IOReturn,
Self, IOHIDElementCookie,
ctypes.POINTER(IOHIDEventStruct))),
('setElementValue', ctypes.c_void_p),
('queryElementValue', ctypes.c_void_p),
('startAllQueues', ctypes.c_void_p),
('stopAllQueues', ctypes.c_void_p),
('allocQueue', ctypes.CFUNCTYPE(
ctypes.POINTER(ctypes.POINTER(IOHIDQueueInterface)),
Self)),
('allocOutputTransaction', ctypes.c_void_p),
# 1.2.1 (10.2.3)
('setReport', ctypes.c_void_p),
('getReport', ctypes.c_void_p),
# 1.2.2 (10.3)
('copyMatchingElements', ctypes.CFUNCTYPE(IOReturn,
Self, CFDictionaryRef,
ctypes.POINTER(CFArrayRef))),
('setInterruptReportHandlerCallback', ctypes.c_void_p),
)
def get_master_port():
master_port = mach_port_t()
_oscheck(
carbon.IOMasterPort(MACH_PORT_NULL, ctypes.byref(master_port))
)
return master_port
def get_matching_dictionary():
carbon.IOServiceMatching.restype = CFMutableDictionaryRef
matching_dictionary = carbon.IOServiceMatching(kIOHIDDeviceKey)
return matching_dictionary
def get_existing_devices(master_port, matching_dictionary):
# Consumes reference to matching_dictionary
iterator = io_iterator_t()
_oscheck(
carbon.IOServiceGetMatchingServices(master_port,
matching_dictionary,
ctypes.byref(iterator))
)
devices = list()
while carbon.IOIteratorIsValid(iterator):
device = carbon.IOIteratorNext(iterator)
if not device:
break
devices.append(Device(device))
carbon.IOObjectRelease(iterator)
return devices
def cfstring_to_string(value_string):
value_length = carbon.CFStringGetLength(value_string)
buffer_length = carbon.CFStringGetMaximumSizeForEncoding(
value_length, kCFStringEncodingUTF8)
buffer = ctypes.c_buffer(buffer_length + 1)
result = carbon.CFStringGetCString(value_string,
buffer,
len(buffer),
kCFStringEncodingUTF8)
if not result:
return
return buffer.value
def cfnumber_to_int(value):
result = ctypes.c_int()
carbon.CFNumberGetValue(value, kCFNumberIntType, ctypes.byref(result))
return result.value
def cfvalue_to_value(value):
if not value:
return None
value_type = carbon.CFGetTypeID(value)
if value_type == carbon.CFStringGetTypeID():
return cfstring_to_string(value)
elif value_type == carbon.CFNumberGetTypeID():
return cfnumber_to_int(value)
else:
return None
def get_property_value(properties, key):
key_string = create_cfstring(key)
value = ctypes.c_void_p()
present = carbon.CFDictionaryGetValueIfPresent(properties,
key_string,
ctypes.byref(value))
carbon.CFRelease(key_string)
if not present:
return None
return value
def get_property(properties, key):
return cfvalue_to_value(get_property_value(properties, key))
def dump_properties(properties):
def func(key, value, context):
print('%s = %s' % (cfstring_to_string(key), cfvalue_to_value(value)))
CFDictionaryApplierFunction = ctypes.CFUNCTYPE(None,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p)
carbon.CFDictionaryApplyFunction(properties,
CFDictionaryApplierFunction(func), None)
class Device:
"""
:IVariables:
`name` : str
`manufacturer` : str
"""
def __init__(self, generic_device):
self._init_properties(generic_device)
self._device = self._get_device_interface(generic_device)
self.elements = self._get_elements()
self._open = False
self._queue = None
self._queue_depth = 8 # Number of events queue can buffer
def _init_properties(self, generic_device):
properties = CFMutableDictionaryRef()
_oscheck(
carbon.IORegistryEntryCreateCFProperties(generic_device,
ctypes.byref(properties),
None, 0)
)
self.name = get_property(properties, "Product")
self.manufacturer = get_property(properties, "Manufacturer")
carbon.CFRelease(properties)
def _get_device_interface(self, generic_device):
plug_in_interface = \
ctypes.POINTER(ctypes.POINTER(IUnknown))()
score = ctypes.c_int32()
_oscheck(
carbon.IOCreatePlugInInterfaceForService(
generic_device,
kIOHIDDeviceUserClientTypeID,
kIOCFPlugInInterfaceID,
ctypes.byref(plug_in_interface),
ctypes.byref(score))
)
carbon.CFUUIDGetUUIDBytes.restype = CFUUIDBytes
hid_device_interface = \
ctypes.POINTER(ctypes.POINTER(IOHIDDeviceInterface))()
_oscheck(
plug_in_interface.contents.contents.QueryInterface(
plug_in_interface,
carbon.CFUUIDGetUUIDBytes(kIOHIDDeviceInterfaceID),
ctypes.byref(hid_device_interface))
)
plug_in_interface.contents.contents.Release(plug_in_interface)
return hid_device_interface
def _get_elements(self):
elements_array = CFArrayRef()
_oscheck(
self._device.contents.contents.copyMatchingElements(self._device,
None,
ctypes.byref(
elements_array))
)
self._element_cookies = dict()
elements = list()
n_elements = carbon.CFArrayGetCount(elements_array)
for i in range(n_elements):
properties = carbon.CFArrayGetValueAtIndex(elements_array, i)
element = DeviceElement(self, properties)
elements.append(element)
self._element_cookies[element._cookie] = element
carbon.CFRelease(elements_array)
return elements
def __repr__(self):
return '%s(name=%r, manufacturer=%r)' % (
self.__class__.__name__, self.product, self.manufacturer)
def open(self, exclusive=False):
flags = 0
if exclusive:
flags |= kIOHIDOptionsTypeSeizeDevice
result = self._device.contents.contents.open(self._device, flags)
if result == 0:
self._open = True
elif result == kIOReturnExclusiveAccess:
raise input.InputDeviceExclusiveException()
# Create event queue
self._queue = self._device.contents.contents.allocQueue(self._device)
_oscheck(
self._queue.contents.contents.create(self._queue,
0, self._queue_depth)
)
# Add all elements into queue
# TODO: only "interesting/known" elements?
for element in self.elements:
r = self._queue.contents.contents.addElement(self._queue,
element._cookie, 0)
if r != 0:
print('error adding %r' % element)
_oscheck(
self._queue.contents.contents.start(self._queue)
)
# HACK TODO:
pyglet.clock.schedule(self.dispatch_events)
def close(self):
if not self._open:
return
# HACK TODO:
pyglet.clock.unschedule(self.dispatch_events)
_oscheck(
self._queue.contents.contents.stop(self._queue)
)
_oscheck(
self._queue.contents.contents.dispose(self._queue)
)
self._queue.contents.contents.Release(self._queue)
self._queue = None
_oscheck(
self._device.contents.contents.close(self._device)
)
self._open = False
# TODO: TEMP/HACK
def dispatch_events(self, dt=None):
if not self._open:
return
event = IOHIDEventStruct()
r = self._queue.contents.contents.getNextEvent(self._queue,
ctypes.byref(event), 0,
0)
if r != 0:
# Undocumented behaviour? returns 3758097127L when no events are
# in queue (is documented to block)
return
try:
element = self._element_cookies[event.elementCookie]
element.value = event.value
except KeyError:
pass
class DeviceElement:
def __init__(self, device, properties):
self.device = device
self._cookie = get_property(properties, 'ElementCookie')
_usage = get_property(properties, 'Usage')
usage_page = get_property(properties, 'UsagePage')
self.name = usage.get_element_usage_name(usage_page, _usage)
self.known = usage.get_element_usage_known(usage_page, _usage)
self.value = None
def get_value(self):
return self.value
"""
def get_value(self):
event = IOHIDEventStruct()
self.device._device.contents.contents.getElementValue(
self.device._device, self._cookie, ctypes.byref(event))
return event.value
"""
def get_devices():
return get_existing_devices(get_master_port(), get_matching_dictionary())
| 34.982301 | 84 | 0.544397 |
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import ctypes
import pyglet
from pyglet.libs.darwin import carbon, _oscheck, create_cfstring
from pyglet.libs.darwin.constants import *
import input
import usage
void_p = ctypes.POINTER(ctypes.c_int)
class CFUUIDBytes(ctypes.Structure):
_fields_ = [('byte%d' % i, ctypes.c_uint8) for i in range(16)]
mach_port_t = void_p
io_iterator_t = void_p
kern_return_t = ctypes.c_int
IOReturn = ctypes.c_uint
CFDictionaryRef = void_p
CFMutableDictionaryRef = void_p
CFArrayRef = void_p
CFUUIDRef = ctypes.POINTER(CFUUIDBytes)
AbsoluteTime = ctypes.c_double
HRESULT = ctypes.c_int
REFIID = CFUUIDBytes
IOHIDElementType = ctypes.c_int
kIOHIDElementTypeInput_Misc = 1
kIOHIDElementTypeInput_Button = 2
kIOHIDElementTypeInput_Axis = 3
kIOHIDElementTypeInput_ScanCodes = 4
kIOHIDElementTypeOutput = 129
kIOHIDElementTypeFeature = 257
kIOHIDElementTypeCollection = 513
IOHIDElementCookie = ctypes.c_void_p
MACH_PORT_NULL = 0
kIOHIDDeviceKey = "IOHIDDevice"
kIOServicePlane = "IOService"
kIOHIDProductIDKey = "ProductID"
kCFNumberIntType = 9
kIOHIDOptionsTypeSeizeDevice = 1
kIOReturnExclusiveAccess = 0xe00002c5
carbon.CFUUIDGetConstantUUIDWithBytes.restype = CFUUIDRef
kIOHIDDeviceUserClientTypeID = carbon.CFUUIDGetConstantUUIDWithBytes(None,
0xFA, 0x12,
0xFA, 0x38,
0x6F, 0x1A,
0x11, 0xD4,
0xBA, 0x0C,
0x00, 0x05,
0x02, 0x8F,
0x18, 0xD5)
kIOCFPlugInInterfaceID = carbon.CFUUIDGetConstantUUIDWithBytes(None,
0xC2, 0x44, 0xE8,
0x58, 0x10, 0x9C,
0x11, 0xD4,
0x91, 0xD4, 0x00,
0x50, 0xE4, 0xC6,
0x42, 0x6F)
kIOHIDDeviceInterfaceID = carbon.CFUUIDGetConstantUUIDWithBytes(None,
0x78, 0xBD,
0x42, 0x0C,
0x6F, 0x14,
0x11, 0xD4,
0x94, 0x74,
0x00, 0x05,
0x02, 0x8F,
0x18, 0xD5)
class IOHIDEventStruct(ctypes.Structure):
_fields_ = (
('type', IOHIDElementType),
('elementCookie', IOHIDElementCookie),
('value', ctypes.c_int32),
('timestamp', AbsoluteTime),
('longValueSize', ctypes.c_uint32),
('longValue', ctypes.c_void_p)
)
Self = ctypes.c_void_p
class IUnknown(ctypes.Structure):
_fields_ = (
('_reserved', ctypes.c_void_p),
('QueryInterface',
ctypes.CFUNCTYPE(HRESULT, Self, REFIID, ctypes.c_void_p)),
('AddRef',
ctypes.CFUNCTYPE(ctypes.c_ulong, Self)),
('Release',
ctypes.CFUNCTYPE(ctypes.c_ulong, Self)),
)
# bothered.
class IOHIDQueueInterface(ctypes.Structure):
_fields_ = IUnknown._fields_ + (
('createAsyncEventSource', ctypes.c_void_p),
('getAsyncEventSource', ctypes.c_void_p),
('createAsyncPort', ctypes.c_void_p),
('getAsyncPort', ctypes.c_void_p),
('create', ctypes.CFUNCTYPE(IOReturn,
Self, ctypes.c_uint32, ctypes.c_uint32)),
('dispose', ctypes.CFUNCTYPE(IOReturn,
Self)),
('addElement', ctypes.CFUNCTYPE(IOReturn,
Self, IOHIDElementCookie)),
('removeElement', ctypes.c_void_p),
('hasElement', ctypes.c_void_p),
('start', ctypes.CFUNCTYPE(IOReturn,
Self)),
('stop', ctypes.CFUNCTYPE(IOReturn,
Self)),
('getNextEvent', ctypes.CFUNCTYPE(IOReturn,
Self,
ctypes.POINTER(IOHIDEventStruct),
AbsoluteTime,
ctypes.c_uint32)),
('setEventCallout', ctypes.c_void_p),
('getEventCallout', ctypes.c_void_p),
)
class IOHIDDeviceInterface(ctypes.Structure):
_fields_ = IUnknown._fields_ + (
('createAsyncEventSource', ctypes.c_void_p),
('getAsyncEventSource', ctypes.c_void_p),
('createAsyncPort', ctypes.c_void_p),
('getAsyncPort', ctypes.c_void_p),
('open', ctypes.CFUNCTYPE(IOReturn,
Self, ctypes.c_uint32)),
('close', ctypes.CFUNCTYPE(IOReturn,
Self)),
('setRemovalCallback', ctypes.c_void_p),
('getElementValue', ctypes.CFUNCTYPE(IOReturn,
Self, IOHIDElementCookie,
ctypes.POINTER(IOHIDEventStruct))),
('setElementValue', ctypes.c_void_p),
('queryElementValue', ctypes.c_void_p),
('startAllQueues', ctypes.c_void_p),
('stopAllQueues', ctypes.c_void_p),
('allocQueue', ctypes.CFUNCTYPE(
ctypes.POINTER(ctypes.POINTER(IOHIDQueueInterface)),
Self)),
('allocOutputTransaction', ctypes.c_void_p),
# 1.2.1 (10.2.3)
('setReport', ctypes.c_void_p),
('getReport', ctypes.c_void_p),
# 1.2.2 (10.3)
('copyMatchingElements', ctypes.CFUNCTYPE(IOReturn,
Self, CFDictionaryRef,
ctypes.POINTER(CFArrayRef))),
('setInterruptReportHandlerCallback', ctypes.c_void_p),
)
def get_master_port():
master_port = mach_port_t()
_oscheck(
carbon.IOMasterPort(MACH_PORT_NULL, ctypes.byref(master_port))
)
return master_port
def get_matching_dictionary():
carbon.IOServiceMatching.restype = CFMutableDictionaryRef
matching_dictionary = carbon.IOServiceMatching(kIOHIDDeviceKey)
return matching_dictionary
def get_existing_devices(master_port, matching_dictionary):
# Consumes reference to matching_dictionary
iterator = io_iterator_t()
_oscheck(
carbon.IOServiceGetMatchingServices(master_port,
matching_dictionary,
ctypes.byref(iterator))
)
devices = list()
while carbon.IOIteratorIsValid(iterator):
device = carbon.IOIteratorNext(iterator)
if not device:
break
devices.append(Device(device))
carbon.IOObjectRelease(iterator)
return devices
def cfstring_to_string(value_string):
value_length = carbon.CFStringGetLength(value_string)
buffer_length = carbon.CFStringGetMaximumSizeForEncoding(
value_length, kCFStringEncodingUTF8)
buffer = ctypes.c_buffer(buffer_length + 1)
result = carbon.CFStringGetCString(value_string,
buffer,
len(buffer),
kCFStringEncodingUTF8)
if not result:
return
return buffer.value
def cfnumber_to_int(value):
result = ctypes.c_int()
carbon.CFNumberGetValue(value, kCFNumberIntType, ctypes.byref(result))
return result.value
def cfvalue_to_value(value):
if not value:
return None
value_type = carbon.CFGetTypeID(value)
if value_type == carbon.CFStringGetTypeID():
return cfstring_to_string(value)
elif value_type == carbon.CFNumberGetTypeID():
return cfnumber_to_int(value)
else:
return None
def get_property_value(properties, key):
key_string = create_cfstring(key)
value = ctypes.c_void_p()
present = carbon.CFDictionaryGetValueIfPresent(properties,
key_string,
ctypes.byref(value))
carbon.CFRelease(key_string)
if not present:
return None
return value
def get_property(properties, key):
return cfvalue_to_value(get_property_value(properties, key))
def dump_properties(properties):
def func(key, value, context):
print('%s = %s' % (cfstring_to_string(key), cfvalue_to_value(value)))
CFDictionaryApplierFunction = ctypes.CFUNCTYPE(None,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p)
carbon.CFDictionaryApplyFunction(properties,
CFDictionaryApplierFunction(func), None)
class Device:
def __init__(self, generic_device):
self._init_properties(generic_device)
self._device = self._get_device_interface(generic_device)
self.elements = self._get_elements()
self._open = False
self._queue = None
self._queue_depth = 8 # Number of events queue can buffer
def _init_properties(self, generic_device):
properties = CFMutableDictionaryRef()
_oscheck(
carbon.IORegistryEntryCreateCFProperties(generic_device,
ctypes.byref(properties),
None, 0)
)
self.name = get_property(properties, "Product")
self.manufacturer = get_property(properties, "Manufacturer")
carbon.CFRelease(properties)
def _get_device_interface(self, generic_device):
plug_in_interface = \
ctypes.POINTER(ctypes.POINTER(IUnknown))()
score = ctypes.c_int32()
_oscheck(
carbon.IOCreatePlugInInterfaceForService(
generic_device,
kIOHIDDeviceUserClientTypeID,
kIOCFPlugInInterfaceID,
ctypes.byref(plug_in_interface),
ctypes.byref(score))
)
carbon.CFUUIDGetUUIDBytes.restype = CFUUIDBytes
hid_device_interface = \
ctypes.POINTER(ctypes.POINTER(IOHIDDeviceInterface))()
_oscheck(
plug_in_interface.contents.contents.QueryInterface(
plug_in_interface,
carbon.CFUUIDGetUUIDBytes(kIOHIDDeviceInterfaceID),
ctypes.byref(hid_device_interface))
)
plug_in_interface.contents.contents.Release(plug_in_interface)
return hid_device_interface
def _get_elements(self):
elements_array = CFArrayRef()
_oscheck(
self._device.contents.contents.copyMatchingElements(self._device,
None,
ctypes.byref(
elements_array))
)
self._element_cookies = dict()
elements = list()
n_elements = carbon.CFArrayGetCount(elements_array)
for i in range(n_elements):
properties = carbon.CFArrayGetValueAtIndex(elements_array, i)
element = DeviceElement(self, properties)
elements.append(element)
self._element_cookies[element._cookie] = element
carbon.CFRelease(elements_array)
return elements
def __repr__(self):
return '%s(name=%r, manufacturer=%r)' % (
self.__class__.__name__, self.product, self.manufacturer)
def open(self, exclusive=False):
flags = 0
if exclusive:
flags |= kIOHIDOptionsTypeSeizeDevice
result = self._device.contents.contents.open(self._device, flags)
if result == 0:
self._open = True
elif result == kIOReturnExclusiveAccess:
raise input.InputDeviceExclusiveException()
# Create event queue
self._queue = self._device.contents.contents.allocQueue(self._device)
_oscheck(
self._queue.contents.contents.create(self._queue,
0, self._queue_depth)
)
# Add all elements into queue
# TODO: only "interesting/known" elements?
for element in self.elements:
r = self._queue.contents.contents.addElement(self._queue,
element._cookie, 0)
if r != 0:
print('error adding %r' % element)
_oscheck(
self._queue.contents.contents.start(self._queue)
)
# HACK TODO:
pyglet.clock.schedule(self.dispatch_events)
def close(self):
if not self._open:
return
# HACK TODO:
pyglet.clock.unschedule(self.dispatch_events)
_oscheck(
self._queue.contents.contents.stop(self._queue)
)
_oscheck(
self._queue.contents.contents.dispose(self._queue)
)
self._queue.contents.contents.Release(self._queue)
self._queue = None
_oscheck(
self._device.contents.contents.close(self._device)
)
self._open = False
# TODO: TEMP/HACK
def dispatch_events(self, dt=None):
if not self._open:
return
event = IOHIDEventStruct()
r = self._queue.contents.contents.getNextEvent(self._queue,
ctypes.byref(event), 0,
0)
if r != 0:
# Undocumented behaviour? returns 3758097127L when no events are
# in queue (is documented to block)
return
try:
element = self._element_cookies[event.elementCookie]
element.value = event.value
except KeyError:
pass
class DeviceElement:
def __init__(self, device, properties):
self.device = device
self._cookie = get_property(properties, 'ElementCookie')
_usage = get_property(properties, 'Usage')
usage_page = get_property(properties, 'UsagePage')
self.name = usage.get_element_usage_name(usage_page, _usage)
self.known = usage.get_element_usage_known(usage_page, _usage)
self.value = None
def get_value(self):
return self.value
def get_devices():
return get_existing_devices(get_master_port(), get_matching_dictionary())
| true | true |
f71fe39b002b6987bd56fdc8f822aa4a1ab3f554 | 548 | py | Python | config_parser.py | benkelaci/qrcode_medicinedispenser | 41cee011dc0e9ab5d1ef0738efd5e1ea11c13d0a | [
"MIT"
] | null | null | null | config_parser.py | benkelaci/qrcode_medicinedispenser | 41cee011dc0e9ab5d1ef0738efd5e1ea11c13d0a | [
"MIT"
] | null | null | null | config_parser.py | benkelaci/qrcode_medicinedispenser | 41cee011dc0e9ab5d1ef0738efd5e1ea11c13d0a | [
"MIT"
] | null | null | null | import json
class Struct(object):
def __init__(self, data):
for name, value in data.items():
setattr(self, name, self._wrap(value))
def _wrap(self, value):
if isinstance(value, (tuple, list, set, frozenset)):
return type(value)([self._wrap(v) for v in value])
else:
return Struct(value) if isinstance(value, dict) else value
config_path = "/home/pi/qrcode_detect/cfg.json"
with open(config_path) as config:
cfg = json.load(config, object_hook=Struct)
| 27.4 | 71 | 0.616788 | import json
class Struct(object):
def __init__(self, data):
for name, value in data.items():
setattr(self, name, self._wrap(value))
def _wrap(self, value):
if isinstance(value, (tuple, list, set, frozenset)):
return type(value)([self._wrap(v) for v in value])
else:
return Struct(value) if isinstance(value, dict) else value
config_path = "/home/pi/qrcode_detect/cfg.json"
with open(config_path) as config:
cfg = json.load(config, object_hook=Struct)
| true | true |
f71fe39bd8323e42c4bdd0221f1966de94ab7729 | 904 | py | Python | examples/sawyer/moveit_planning.py | gujralsanyam22/pyrobot | a0448714857b684d8b280f710e9304988524d2e0 | [
"MIT"
] | 2,150 | 2019-06-12T20:55:41.000Z | 2022-03-21T07:14:51.000Z | examples/sawyer/moveit_planning.py | gujralsanyam22/pyrobot | a0448714857b684d8b280f710e9304988524d2e0 | [
"MIT"
] | 124 | 2019-06-22T17:12:27.000Z | 2022-02-26T11:43:13.000Z | examples/sawyer/moveit_planning.py | gujralsanyam22/pyrobot | a0448714857b684d8b280f710e9304988524d2e0 | [
"MIT"
] | 329 | 2019-06-13T03:03:54.000Z | 2022-03-30T07:04:55.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Example for commanding robot with position control using moveit planner
"""
import time
from pyrobot import Robot
def main():
target_joints = [
[0.704, -0.455, -0.159, 1.395, -1.240, 1.069, 2.477],
[-0.341, -0.384, -0.018, 1.533, -0.977, -1.492, -1.084],
]
config = dict(moveit_planner_type="ESTkConfigDefault")
bot = Robot(
"sawyer",
use_arm=True,
use_base=False,
use_camera=False,
use_gripper=True,
arm_config=config,
)
bot.arm.go_home()
time.sleep(1)
for joint in target_joints:
bot.arm.set_joint_positions(joint, plan=True)
time.sleep(1)
bot.arm.go_home()
if __name__ == "__main__":
main()
| 21.52381 | 71 | 0.622788 |
import time
from pyrobot import Robot
def main():
target_joints = [
[0.704, -0.455, -0.159, 1.395, -1.240, 1.069, 2.477],
[-0.341, -0.384, -0.018, 1.533, -0.977, -1.492, -1.084],
]
config = dict(moveit_planner_type="ESTkConfigDefault")
bot = Robot(
"sawyer",
use_arm=True,
use_base=False,
use_camera=False,
use_gripper=True,
arm_config=config,
)
bot.arm.go_home()
time.sleep(1)
for joint in target_joints:
bot.arm.set_joint_positions(joint, plan=True)
time.sleep(1)
bot.arm.go_home()
if __name__ == "__main__":
main()
| true | true |
f71fe3a7ec4b148032db722f5c53b2c067b9a249 | 1,863 | py | Python | random_colors.py | electric-blue-green/trinket | 82e1e265934252c0cf3b2fa72f9bc1d60a35ac93 | [
"Unlicense"
] | 1 | 2021-06-05T03:12:36.000Z | 2021-06-05T03:12:36.000Z | random_colors.py | aejb/trinket | 82e1e265934252c0cf3b2fa72f9bc1d60a35ac93 | [
"Unlicense"
] | 1 | 2018-02-26T11:22:50.000Z | 2018-02-26T11:22:50.000Z | random_colors.py | electric-blue-green/trinket | 82e1e265934252c0cf3b2fa72f9bc1d60a35ac93 | [
"Unlicense"
] | null | null | null | import board
import busio
import time
import random
dotstar = busio.SPI(board.APA102_SCK, board.APA102_MOSI)
#colors = [1, 128, 244] # set colors all to 1
colors = [random.randint(3, 240), random.randint(3, 240), random.randint(3, 240), ] # selects random start color in "safe zone"
steps = [1, 3, 4] # set wavelength
steps = [random.randint(1, 5), random.randint(1, 5), random.randint(1, 5)] # selects random step beteween 1 and 5
print("INIT") ## REPL
def getColor(index, colors, steps):
if colors[index] >= 255 or colors[index] <= 0: # flip the sign of the step at the max/min
steps[index] *= -1
colors[index] += steps[index] # increment the value
if colors[index] > 255: colors[index] = 255 # accounting for stepping over 255
if colors[index] < 0: colors[index] = 0 # accounting for stepping under 0
return (colors[index], colors, steps) # returns colors for index
def setPixel(red, green, blue): # call setpixel
if not dotstar.try_lock(): # see if clock is locked
return
#print("setting pixel to: %d %d %d" % (red, green, blue)) # debug
dotstar.write(bytearray([0x00, 0x00, 0x00, 0x00, 0xff, blue, green, red, 0xff, 0xff, 0xff, 0xff]))
dotstar.unlock() # pass new color
while True:
r, colors, steps = getColor(0, colors, steps) # gets red
g, colors, steps = getColor(1, colors, steps) # gets green
b, colors, steps = getColor(2, colors, steps) # gets blue
print("STEP = ", steps, "COLOR = ", colors) # REPL debug print
setPixel(r, g, b) # calls setPixel
time.sleep(random.random()) # random wait time between 0 and 1
| 56.454545 | 131 | 0.574342 | import board
import busio
import time
import random
dotstar = busio.SPI(board.APA102_SCK, board.APA102_MOSI)
dint(3, 240), random.randint(3, 240), random.randint(3, 240), ]
steps = [1, 3, 4]
steps = [random.randint(1, 5), random.randint(1, 5), random.randint(1, 5)]
print("INIT") etColor(index, colors, steps):
if colors[index] >= 255 or colors[index] <= 0:
steps[index] *= -1
colors[index] += steps[index]
if colors[index] > 255: colors[index] = 255
if colors[index] < 0: colors[index] = 0
return (colors[index], colors, steps)
def setPixel(red, green, blue):
if not dotstar.try_lock():
return
tstar.write(bytearray([0x00, 0x00, 0x00, 0x00, 0xff, blue, green, red, 0xff, 0xff, 0xff, 0xff]))
dotstar.unlock()
while True:
r, colors, steps = getColor(0, colors, steps)
g, colors, steps = getColor(1, colors, steps)
b, colors, steps = getColor(2, colors, steps)
print("STEP = ", steps, "COLOR = ", colors)
setPixel(r, g, b)
time.sleep(random.random())
| true | true |
f71fe485cdc1d845da328b9bf8355e5b4665fa3d | 393 | py | Python | project-euler/py/e6.py | aaycee/aaycee.github.io | b609a869c5c9d02f7cbc1798b643ec083475f741 | [
"MIT"
] | null | null | null | project-euler/py/e6.py | aaycee/aaycee.github.io | b609a869c5c9d02f7cbc1798b643ec083475f741 | [
"MIT"
] | null | null | null | project-euler/py/e6.py | aaycee/aaycee.github.io | b609a869c5c9d02f7cbc1798b643ec083475f741 | [
"MIT"
] | null | null | null | # Akachukwu Obi, 2018
# Project Euler #6
# see .js file for build up
def diffOfSumOfSquares(max):
sumOfNumbers = max * (max + 1) / 2 # sum of n natural numbers is n(n + 1)/2
sumOfSquares = (max / 6.0) * (2 * max + 1) * (max + 1) # I used 6.0 to avoid getting a math.floor situation in puthon2.7
return sumOfNumbers * sumOfNumbers - sumOfSquares
print(diffOfSumOfSquares(100)) #25164150.0 | 35.727273 | 121 | 0.689567 |
def diffOfSumOfSquares(max):
sumOfNumbers = max * (max + 1) / 2
sumOfSquares = (max / 6.0) * (2 * max + 1) * (max + 1)
return sumOfNumbers * sumOfNumbers - sumOfSquares
print(diffOfSumOfSquares(100)) | true | true |
f71fe4adf7dd0cb5bea1c0b3a038207a017240b9 | 250 | py | Python | experiments_approximate/experiments/create_dico_alphacsc.py | bmalezieux/unrolled_dl | 5854a6991e44db025a99a9f0d38be6b1e669aa83 | [
"MIT"
] | null | null | null | experiments_approximate/experiments/create_dico_alphacsc.py | bmalezieux/unrolled_dl | 5854a6991e44db025a99a9f0d38be6b1e669aa83 | [
"MIT"
] | null | null | null | experiments_approximate/experiments/create_dico_alphacsc.py | bmalezieux/unrolled_dl | 5854a6991e44db025a99a9f0d38be6b1e669aa83 | [
"MIT"
] | null | null | null | import numpy as np
atoms_to_save = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 12, 15, 18])
u_cdl = np.load("u_cdl.npy")
v_cdl = np.load("v_cdl.npy")
np.save("u_cdl_modified.npy", u_cdl[atoms_to_save])
np.save("v_cdl_modified.npy", v_cdl[atoms_to_save])
| 25 | 65 | 0.68 | import numpy as np
atoms_to_save = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 12, 15, 18])
u_cdl = np.load("u_cdl.npy")
v_cdl = np.load("v_cdl.npy")
np.save("u_cdl_modified.npy", u_cdl[atoms_to_save])
np.save("v_cdl_modified.npy", v_cdl[atoms_to_save])
| true | true |
f71fe97a19d5758ce4f745c12696da1bb54d0fba | 15,439 | py | Python | src/catsys/exe/_res.py | AtomCrafty/catsystem-py | 11bd96708e3959be84e41e30397820ebaa54974d | [
"MIT"
] | 6 | 2020-10-20T13:26:56.000Z | 2022-02-15T05:26:38.000Z | src/catsys/exe/_res.py | AtomCrafty/catsystem-py | 11bd96708e3959be84e41e30397820ebaa54974d | [
"MIT"
] | 2 | 2020-10-20T16:15:35.000Z | 2021-07-08T18:15:23.000Z | src/catsys/exe/_res.py | AtomCrafty/catsystem-py | 11bd96708e3959be84e41e30397820ebaa54974d | [
"MIT"
] | 1 | 2020-10-19T15:20:50.000Z | 2020-10-19T15:20:50.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Windows resource types
Requires packages: pefile
WARNING: This module is deprecated, broken, and will be removed eventually.
"""
__version__ = '0.0.1'
__date__ = '2020-01-01'
__author__ = 'Robert Jordan'
__all__ = ['ResourceName', 'ResourceId']
#######################################################################################
import abc, collections, datetime, pefile
from typing import Any, Dict, Iterable, List, Optional, Set, Text, Tuple, Union
## PREDECLARE TYPES ##
ResName = Union[int,str]
ResNameArg = Union[int,str,pefile.UnicodeStringWrapperPostProcessor,pefile.ResourceDirEntryData]
OptionalResName = Optional[ResName]
OptionalResNameArg = Optional[ResNameArg]
class ResourceName(object):
"""
A single name or id for a resource. This can be the type, name, or language id.
"""
__slots__ = ('value',)
def __new__(cls, resname:Union[int,str,'ResourceName',None]):
if resname is None:
return None
elif cls is ResourceName: # and resname is not None:
if resname.__class__ is ResourceName:
return resname # Return existing instance
return object.__new__(cls)
def __init__(self, resname:Union[int,str,None]=None):
"""Convert argument to a valid resource name for use with ResourceId"""
if isinstance(resname, ResourceName):
resname = resname.value
elif not isinstance(resname, (int,str)):
raise TypeError('ResourceName() expected int, str or ResourceName, not {0}'.format(type(resname).__name__))
self.value:Union[int,str] = resname
def __setattr__(self, key:str, value):
if not hasattr(self, 'value'):
# Very lazy method to check for initialization
object.__setattr__(self, key, value)
else:
# Very lazy method to throw a different error on attribute existance
val = object.__getattribute__(self, key)
raise AttributeError('ResourceName object attribute {0!r} is read-only'.format(key))
@staticmethod
def cast(other:Union[int,str,'ResourceName',None]) -> Union[int,str,None]:
if other is None:
return None
elif isinstance(other, (int,str)):
return other
elif isinstance(other, ResourceName):
return other.value
else:
raise TypeError('ResourceName cast() expected int, str, ResourceName or None, not {0}'.format(type(other).__name__))
@staticmethod
def cast_entry(entry:Union[pefile.ResourceDataEntryData,pefile.UnicodeStringWrapperPostProcessor,None]) -> Union[int,str,None]:
"""Convert pefile module types to resource names."""
if entry is None:
return None
elif isinstance(entry, pefile.ResourceDirEntryData):
return entry.name.decode() if entry.name is not None else entry.id
elif isinstance(entry, pefile.UnicodeStringWrapperPostProcessor):
return entry.decode()
else:
raise TypeError('ResourceName read_name() expected ResourceDirEntryData, UnicodeStringWrapperPostProcessor or None, not {0}'.format(type(entry).__name__))
@staticmethod
def from_entry(entry:Union[pefile.ResourceDataEntryData,pefile.UnicodeStringWrapperPostProcessor,None]) -> 'ResourceName':
"""Convert pefile module types to resource names."""
if entry is None:
return ResourceName(None)
elif isinstance(entry, pefile.ResourceDirEntryData):
return ResourceName(entry.name.decode() if entry.name is not None else entry.id)
elif isinstance(entry, pefile.UnicodeStringWrapperPostProcessor):
return ResourceName(entry.decode())
else:
raise TypeError('ResourceName from_entry() expected ResourceDirEntryData, UnicodeStringWrapperPostProcessor or None, not {0}'.format(type(entry).__name__))
def __bool__(self) -> bool:
#TODO: How to handle bool?
#return self.value is not None
return bool(self.value)
def __int__(self) -> int:
if isinstance(self.value, int):
return self.value
raise TypeError('ResourceName() cannot cast {0} to int'.format(type(self.value).__name__))
def __str__(self) -> str:
return str(self.value)
def __repr__(self) -> str:
return 'ResourceName({0!r})'.format(self.value)
#return repr(self.value)
def __hash__(self) -> int:
return hash(self.value)
# int/str equality
def __eq__(self, other) -> bool:
return self.value == ResourceName._cast(other)
def __ne__(self, other) -> bool:
return self.value != ResourceName._cast(other)
def __lt__(self, other) -> bool:
return self.value < ResourceName._cast(other)
def __gt__(self, other) -> bool:
return self.value > ResourceName._cast(other)
def __le__(self, other) -> bool:
return self.value <= ResourceName._cast(other)
def __ge__(self, other) -> bool:
return self.value >= ResourceName._cast(other)
@staticmethod
def from_entry(entry:Union[pefile.ResourceDataEntryData,pefile.UnicodeStringWrapperPostProcessor,None]) -> 'ResourceName':
"""Convert pefile module types to resource names."""
if entry is None:
return ResourceName(None)
elif isinstance(entry, pefile.ResourceDirEntryData):
return ResourceName(entry.name.decode() if entry.name is not None else entry.id)
elif isinstance(entry, pefile.UnicodeStringWrapperPostProcessor):
return ResourceName(entry.decode())
else:
raise TypeError('ResourceName from_entry() expected ResourceDirEntryData, UnicodeStringWrapperPostProcessor or None, not {0}'.format(type(entry).__name__))
@property
def id(self) -> Union[int,None]:
"""Returns the resource id when an int, otherwise None."""
return self.value if isinstance(self.value, int) else None
@property
def name(self) -> Union[str,None]:
"""Returns the resource name when a str, otherwise None."""
return self.value if isinstance(self.value, str) else None
@staticmethod
def _cast(other:OptionalResNameArg) -> Union[str,int,None]:
if isinstance(other, ResourceName):
return other.value
elif isinstance(other, pefile.ResourceDirEntryData):
return other.name.decode() if other.name is not None else other.id
elif isinstance(other, pefile.UnicodeStringWrapperPostProcessor):
return other.decode()
return other
ResourceName(None) # Instantiate global
_ResourceIdNamedTuple = collections.namedtuple('_ResourceIdNamedTuple', ('type','name','lang'))
class ResourceId(_ResourceIdNamedTuple):
"""ResourceId(type, name, lang)"""
type:ResourceName
name:ResourceName
lang:ResourceName
def __new__(cls, type:ResourceName=None, name:ResourceName=None, lang:ResourceName=None):
if cls is ResourceId and type is not None and name is None and lang is None:
if type.__class__ is ResourceId:
return type # Return existing instance
if isinstance(type, tuple) and name is None and lang is None:
type, name, lang = type
if type is not None and
return tuple.__new__(cls, (ResourceName(type), ResourceName(name), ResourceName(lang)))
def __repr__(self) -> str:
return 'ResourceId(type={0.value!r}, name={1.value!r}, lang={2.value!r})'.format(*self)
def __str__(self) -> str:
return 'ResourceId(type={0.value!r}, name={1.value!r}, lang={2.value!r})'.format(*self)
del _ResourceIdNamedTuple
def read_struct(pe:pefile.PE, struct:pefile.Structure) -> bytes:
return pe.get_string_at_rva(struct.OffsetToData, struct.Size)
class Resource(object):
def __init__(self, resid:ResourceId, resdata:Union[pefile.ResourceDirEntryData,pefile.ResourceDataEntryData], pe:Optional[pefile.PE]=None):
if pe is not None and not isinstance(pe, pefile.PE):
raise TypeError('Resource() argument pe must be pefile.PE or None, not {0}'.format(type(pe).__name__))
if resid is None or not isinstance(resid, (ResourceId, tuple)):
raise TypeError('Resource() argument resid must be ResourceId or tuple, not {0}'.format(type(resid).__name__))
if resdata is None or not isinstance(resdata, (pefile.ResourceDirEntryData,pefile.ResourceDataEntryData)):
raise TypeError('Resource() argument resdata must be pefile.ResourceDirEntryData or pefile.ResourceDataEntryData, not {0}'.format(type(resdata).__name__))
if not isinstance(resdata, pefile.ResourceDataEntryData):
if not hasattr(resdata, 'data'):
raise ValueError('Resource() argument resdata must have a data attribute when pefile.ResourceDirEntryData')
resdata = resdata.data
self._resid:ResourceId = resid if isinstance(resid, ResourceId) else ResourceId(*resid)
self._resdata:pefile.ResourceDataEntryData = resdata
self._data:bytes = self.read(pe) if pe is not None else None #(pe, resdata.struct) #self.read(pe)
def read(self, pe:pefile.PE) -> bytes:
if self._data is None:
self._data = read_struct(pe, self._resdata.struct)
return self._data
@property
def data(self) -> bytes:
return self._data
@property
def offset(self) -> int:
return self._resdata.struct.OffsetToData
@property
def size(self) -> int:
return self._resdata.struct.Size
@property
def struct(self):
return self._resdata.struct
# def read(pe:pefile.PE=...) -> bytes:
# if self._data is not None:
# return self._data
# #elif pe is None:
# # raise TypeError('Data has')
# return pe.get_string_at_rva(self.offset, self.size)
def __repr__(self) -> str:
return 'Resource(type={0.value!r}, name={1.value!r}, lang={2.value!r}, size={3!r})'.format(*self._resid, self.size)
def __str__(self) -> str:
return 'Resource(type={0.value!r}, name={1.value!r}, lang={2.value!r}, size={3!r})'.format(*self._resid, self.size)
def _match_entry(entry:pefile.ResourceDirEntryData, search_set:Set[ResourceName]) -> Union[ResourceName,None]:
entry_match = ResourceName.cast_entry(entry)
if entry_match in search_set:
return ResourceName(entry)
search_set.remove(entry_match)
return
def find_resource_match(entry:pefile.ResourceDirEntryData, resname:ResourceName) -> Optional[ResourceName]:
name:Union[int,str] = ResourceName.from_entry(entry)
if resname is None:
return ResourceName(name)
elif name == resname:
return resname
return None
def find_resources(pe:pefile.PE, search_id:ResourceId, read_data:bool=False) -> List[Resource]:
if isinstance(pe, pefile.PE):
if not hasattr(pe, 'DIRECTORY_ENTRY_RESOURCE'):
raise TypeError('iter_resource*() argument pe (PE) does not have {0!r}'.format('DIRECTORY_ENTRY_RESOURCE'))
directory = pe.DIRECTORY_ENTRY_RESOURCE
search_id = search_id if isinstance(search_id, ResourceId) else ResourceId(*search_id)
results:List[Resource] = []
for entry_t in directory.entries:
t = find_resource_match(entry_t, search_id.type)
if t is None: continue
for entry_n in entry_t.directory.entries:
n = find_resource_match(entry_n, search_id.name)
if n is None: continue
for entry_l in entry_n.directory.entries:
l = find_resource_match(entry_l, search_id.lang)
if l is not None:
results.append(Resource(ResourceId(t,n,l), entry_l.data, pe=pe if read_data else None))
return results
# def iter_resources(pe:pefile.PE)
# results:List[Resource]=[]
# if isinstance(types, tuple) and not isinstance(types, ResourceId):
# search_types = set(ResourceName(t) for t in types)
# for entry_t in directory.entries:
# entry_type = ResourceName.cast_entry(entry_t)
# if search_id.type is not None and entry_type != search_id.type:
# continue
# entry_type = search_id.type or ResourceName(entry_type)
# for entry_n in entry_t.directory.entries:
# entry_type = ResourceName.cast_entry(entry_n)
# if search_id.type is not None and entry_type != search_id.type:
# continue
# entry_type = search_id.type or ResourceName(entry_type)
# for entry_l in entry_n.directory.entries:
# l = ResourceName.cast_entry(entry_l)
# if search_id.lang is None:
# l = ResourceName(l)
# elif l == search_id.lang:
# l = search_id.lang
# continue
# l = search_id.lang or
# if entry_type in search_types:
# search_types.remove(entry_type)
# def _iter_resource_names(entries):
# search_types = set(ResourceName(t) for t in types)
# for entry in directory.entries:
# entry_type = ResourceName.cast_entry(entry)
# if entry_type in search_types:
# search_types.remove(entry_type)
# def _iter_resource_types(entries)
# def iter_resource_types(pe:pefile.PE) -> Iterable[ResourceName]:
# if 'DIRECTORY_ENTRY_RESOURCE' not in pe:
# raise ValueError('iter_resource*() argument pe does not have {0!r}'.format('DIRECTORY_ENTRY_RESOURCE'))
# entries = pe.DIRECTORY_ENTRY_RESOURCE.entries
# for entry in entries:
# yield ResourceName.from_entry(entry)
# def iter_resource_names(pe:Union[pefile.PE,pefile.ResourceDirData,pefile.ResourceDirEntryData], types:Union[ResourceName,Tuple[ResourceName]]=...) -> Iterable[ResourceName]:
# if isinstance(pe, pefile.PE):
# if 'DIRECTORY_ENTRY_RESOURCE' not in pe:
# raise TypeError('iter_resource*() argument pe (PE) does not have {0!r}'.format('DIRECTORY_ENTRY_RESOURCE'))
# directory = pe.DIRECTORY_ENTRY_RESOURCE
# # elif isinstance(pe, pefile.ResourceDirData):
# # # if not hasattr(pe, 'directory'):
# # # raise TypeError('iter_resource*() argument pe (ResourceDirData) does not have {0!r}'.format('entries'))
# # directory = pe
# # elif isinstance(pe, pefile.ResourceDirEntryData):
# # if not hasattr(pe, 'directory'):
# # raise TypeError('iter_resource*() argument pe (ResourceDirEntryData) does not have {0!r}'.format('directory'))
# # directory = pe.directory
# #entries = pe.DIRECTORY_ENTRY_RESOURCE.entries
# entries = directory.entries
# if isinstance(types, tuple) and not isinstance(types, ResourceId):
# search_types = set(ResourceName(t) for t in types)
# for entry in directory.entries:
# entry_type = ResourceName.cast_entry(entry)
# if entry_type in search_types:
# search_types.remove(entry_type)
# for entry in directory.entries:
# yield ResourceName.from_entry(entry)
# def
# pe2.parse_data_directories(directories=[pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_IMPORT']])
# def iter_resource_names(pe:Union[pefile.PE,pefile.ResourceDirEntryData], type:Union[ResourceName,Tuple[ResourceName]]) -> Iterable[ResourceName]:
# if isinstance(pe, pefile.ResourceDirEntryData):
# if not hasattr(pe, 'directory'):
# raise TypeError('iter_resource*() argument pe (as directory) does not have {0!r}'.format('directory'))
# directory =
# elif isinstance(pe, pefile.PE):
# if 'DIRECTORY_ENTRY_RESOURCE' not in pe:
# raise TypeError('iter_resource*() argument pe does not have {0!r}'.format('DIRECTORY_ENTRY_RESOURCE'))
# directory = pe.DIRECTORY_ENTRY_RESOURCE
# if 'DIRECTORY_ENTRY_RESOURCE' not in pe:
# raise ValueError('iter_resource*() argument pe does not have {0!r}'.format('DIRECTORY_ENTRY_RESOURCE'))
# if 'DIRECTORY_ENTRY_RESOURCE' not in pe:
# return
# entries = pe.DIRECTORY_ENTRY_RESOURCE.entries
# for entry in entries:
# yield ResourceName.from_entry(entry)
# def iter_resource_ids(pe:pefile.PE, )
# def iter_resource_entries(directory:pefile.ResourceDirData) -> Iterable[ResourceName]:
# entries = directory.entries
# for entry in entries:
# yield ResourceName.from_entry(entry)
| 43.985755 | 175 | 0.711704 |
"""Windows resource types
Requires packages: pefile
WARNING: This module is deprecated, broken, and will be removed eventually.
"""
__version__ = '0.0.1'
__date__ = '2020-01-01'
__author__ = 'Robert Jordan'
__all__ = ['ResourceName', 'ResourceId']
ol:
return self.value < ResourceName._cast(other)
def __gt__(self, other) -> bool:
return self.value > ResourceName._cast(other)
def __le__(self, other) -> bool:
return self.value <= ResourceName._cast(other)
def __ge__(self, other) -> bool:
return self.value >= ResourceName._cast(other)
@staticmethod
def from_entry(entry:Union[pefile.ResourceDataEntryData,pefile.UnicodeStringWrapperPostProcessor,None]) -> 'ResourceName':
"""Convert pefile module types to resource names."""
if entry is None:
return ResourceName(None)
elif isinstance(entry, pefile.ResourceDirEntryData):
return ResourceName(entry.name.decode() if entry.name is not None else entry.id)
elif isinstance(entry, pefile.UnicodeStringWrapperPostProcessor):
return ResourceName(entry.decode())
else:
raise TypeError('ResourceName from_entry() expected ResourceDirEntryData, UnicodeStringWrapperPostProcessor or None, not {0}'.format(type(entry).__name__))
@property
def id(self) -> Union[int,None]:
"""Returns the resource id when an int, otherwise None."""
return self.value if isinstance(self.value, int) else None
@property
def name(self) -> Union[str,None]:
"""Returns the resource name when a str, otherwise None."""
return self.value if isinstance(self.value, str) else None
@staticmethod
def _cast(other:OptionalResNameArg) -> Union[str,int,None]:
if isinstance(other, ResourceName):
return other.value
elif isinstance(other, pefile.ResourceDirEntryData):
return other.name.decode() if other.name is not None else other.id
elif isinstance(other, pefile.UnicodeStringWrapperPostProcessor):
return other.decode()
return other
ResourceName(None)
_ResourceIdNamedTuple = collections.namedtuple('_ResourceIdNamedTuple', ('type','name','lang'))
class ResourceId(_ResourceIdNamedTuple):
"""ResourceId(type, name, lang)"""
type:ResourceName
name:ResourceName
lang:ResourceName
def __new__(cls, type:ResourceName=None, name:ResourceName=None, lang:ResourceName=None):
if cls is ResourceId and type is not None and name is None and lang is None:
if type.__class__ is ResourceId:
return type
if isinstance(type, tuple) and name is None and lang is None:
type, name, lang = type
if type is not None and
return tuple.__new__(cls, (ResourceName(type), ResourceName(name), ResourceName(lang)))
def __repr__(self) -> str:
return 'ResourceId(type={0.value!r}, name={1.value!r}, lang={2.value!r})'.format(*self)
def __str__(self) -> str:
return 'ResourceId(type={0.value!r}, name={1.value!r}, lang={2.value!r})'.format(*self)
del _ResourceIdNamedTuple
def read_struct(pe:pefile.PE, struct:pefile.Structure) -> bytes:
return pe.get_string_at_rva(struct.OffsetToData, struct.Size)
class Resource(object):
def __init__(self, resid:ResourceId, resdata:Union[pefile.ResourceDirEntryData,pefile.ResourceDataEntryData], pe:Optional[pefile.PE]=None):
if pe is not None and not isinstance(pe, pefile.PE):
raise TypeError('Resource() argument pe must be pefile.PE or None, not {0}'.format(type(pe).__name__))
if resid is None or not isinstance(resid, (ResourceId, tuple)):
raise TypeError('Resource() argument resid must be ResourceId or tuple, not {0}'.format(type(resid).__name__))
if resdata is None or not isinstance(resdata, (pefile.ResourceDirEntryData,pefile.ResourceDataEntryData)):
raise TypeError('Resource() argument resdata must be pefile.ResourceDirEntryData or pefile.ResourceDataEntryData, not {0}'.format(type(resdata).__name__))
if not isinstance(resdata, pefile.ResourceDataEntryData):
if not hasattr(resdata, 'data'):
raise ValueError('Resource() argument resdata must have a data attribute when pefile.ResourceDirEntryData')
resdata = resdata.data
self._resid:ResourceId = resid if isinstance(resid, ResourceId) else ResourceId(*resid)
self._resdata:pefile.ResourceDataEntryData = resdata
self._data:bytes = self.read(pe) if pe is not None else None lf, pe:pefile.PE) -> bytes:
if self._data is None:
self._data = read_struct(pe, self._resdata.struct)
return self._data
@property
def data(self) -> bytes:
return self._data
@property
def offset(self) -> int:
return self._resdata.struct.OffsetToData
@property
def size(self) -> int:
return self._resdata.struct.Size
@property
def struct(self):
return self._resdata.struct
'Resource(type={0.value!r}, name={1.value!r}, lang={2.value!r}, size={3!r})'.format(*self._resid, self.size)
def __str__(self) -> str:
return 'Resource(type={0.value!r}, name={1.value!r}, lang={2.value!r}, size={3!r})'.format(*self._resid, self.size)
def _match_entry(entry:pefile.ResourceDirEntryData, search_set:Set[ResourceName]) -> Union[ResourceName,None]:
entry_match = ResourceName.cast_entry(entry)
if entry_match in search_set:
return ResourceName(entry)
search_set.remove(entry_match)
return
def find_resource_match(entry:pefile.ResourceDirEntryData, resname:ResourceName) -> Optional[ResourceName]:
name:Union[int,str] = ResourceName.from_entry(entry)
if resname is None:
return ResourceName(name)
elif name == resname:
return resname
return None
def find_resources(pe:pefile.PE, search_id:ResourceId, read_data:bool=False) -> List[Resource]:
if isinstance(pe, pefile.PE):
if not hasattr(pe, 'DIRECTORY_ENTRY_RESOURCE'):
raise TypeError('iter_resource*() argument pe (PE) does not have {0!r}'.format('DIRECTORY_ENTRY_RESOURCE'))
directory = pe.DIRECTORY_ENTRY_RESOURCE
search_id = search_id if isinstance(search_id, ResourceId) else ResourceId(*search_id)
results:List[Resource] = []
for entry_t in directory.entries:
t = find_resource_match(entry_t, search_id.type)
if t is None: continue
for entry_n in entry_t.directory.entries:
n = find_resource_match(entry_n, search_id.name)
if n is None: continue
for entry_l in entry_n.directory.entries:
l = find_resource_match(entry_l, search_id.lang)
if l is not None:
results.append(Resource(ResourceId(t,n,l), entry_l.data, pe=pe if read_data else None))
return results
| false | true |
f71fead294ee942753789629c9a72d7384c394a0 | 2,087 | py | Python | main.py | KevHg/reddit-sentiment | 383407105957b8a582a524fa29b9f21d7b2cbd23 | [
"MIT"
] | 3 | 2020-12-22T09:03:15.000Z | 2021-05-13T18:17:44.000Z | main.py | KevHg/reddit-sentiment | 383407105957b8a582a524fa29b9f21d7b2cbd23 | [
"MIT"
] | 3 | 2020-11-11T15:33:13.000Z | 2021-12-13T20:18:41.000Z | main.py | KevHg/reddit-sentiment | 383407105957b8a582a524fa29b9f21d7b2cbd23 | [
"MIT"
] | 1 | 2021-02-18T19:56:09.000Z | 2021-02-18T19:56:09.000Z | import os
from scrapy.crawler import CrawlerProcess
import pandas as pd
import logging
import nltk
import json_reader
from sentiment_score import clean_text, calculate_sentiment_score
from reddit_scraper.reddit_scraper.spiders.reddit_post_scraper import RedditPostCrawler
if __name__ == '__main__':
# Initial setup: Disable scrapy logs and download NLTK files
logging.getLogger('scrapy').propagate = False
nltk.download('averaged_perceptron_tagger', quiet=True)
nltk.download('wordnet', quiet=True)
# Ask for user query
subreddit = input('Subreddit: ')
term = input('Search term: ')
term = term.replace(' ', '+')
# Start crawler process
print('[LOG] Crawling Reddit, this will take a little time...')
process = CrawlerProcess(settings={
'FEED_FORMAT': 'jl',
'FEED_URI': 'data.jl'
})
process.crawl(RedditPostCrawler,
domain=f'https://old.reddit.com/r/{subreddit}/search?q={term}&restrict_sr=on&sort=relevance&t=all')
process.start()
# Convert data file to class
print('[LOG] Creating DataFrame table...')
reddit_posts = json_reader.convert_json('data.jl')
all_comments = []
all_upvotes = []
for post in reddit_posts:
for comment in post.comments:
all_comments.append(clean_text(comment.text))
# Convert upvote text to float, e.g. '15.3k upvotes' -> 15300
upvote = comment.upvotes.split(' ')[0]
if 'k' in upvote:
upvote = upvote[:-1]
upvote = float(upvote) * 1000
all_upvotes.append(float(upvote))
df = pd.DataFrame({'comment': all_comments, 'upvotes': all_upvotes})
df = df[df.upvotes >= 1]
print('[LOG] Calculating sentiment score, this may take a longer time...')
df = calculate_sentiment_score(df)
# df.to_csv('results.csv')
normalized_result = df.sentiment.mean()
print('[LOG] Completed!\n')
print('Average sentiment:', normalized_result)
print('where +1 is most positive and -1 is most negative')
os.remove('data.jl')
| 33.66129 | 117 | 0.661236 | import os
from scrapy.crawler import CrawlerProcess
import pandas as pd
import logging
import nltk
import json_reader
from sentiment_score import clean_text, calculate_sentiment_score
from reddit_scraper.reddit_scraper.spiders.reddit_post_scraper import RedditPostCrawler
if __name__ == '__main__':
logging.getLogger('scrapy').propagate = False
nltk.download('averaged_perceptron_tagger', quiet=True)
nltk.download('wordnet', quiet=True)
subreddit = input('Subreddit: ')
term = input('Search term: ')
term = term.replace(' ', '+')
print('[LOG] Crawling Reddit, this will take a little time...')
process = CrawlerProcess(settings={
'FEED_FORMAT': 'jl',
'FEED_URI': 'data.jl'
})
process.crawl(RedditPostCrawler,
domain=f'https://old.reddit.com/r/{subreddit}/search?q={term}&restrict_sr=on&sort=relevance&t=all')
process.start()
print('[LOG] Creating DataFrame table...')
reddit_posts = json_reader.convert_json('data.jl')
all_comments = []
all_upvotes = []
for post in reddit_posts:
for comment in post.comments:
all_comments.append(clean_text(comment.text))
upvote = comment.upvotes.split(' ')[0]
if 'k' in upvote:
upvote = upvote[:-1]
upvote = float(upvote) * 1000
all_upvotes.append(float(upvote))
df = pd.DataFrame({'comment': all_comments, 'upvotes': all_upvotes})
df = df[df.upvotes >= 1]
print('[LOG] Calculating sentiment score, this may take a longer time...')
df = calculate_sentiment_score(df)
normalized_result = df.sentiment.mean()
print('[LOG] Completed!\n')
print('Average sentiment:', normalized_result)
print('where +1 is most positive and -1 is most negative')
os.remove('data.jl')
| true | true |
f71feb945e7c962481692be8e2384e367a3c4bbd | 3,443 | py | Python | pollbot/models/reference.py | shubham-king/poll | 677e870bea36dffbf27f24e4cdeec892b40f7128 | [
"MIT"
] | 112 | 2019-06-11T17:52:57.000Z | 2022-03-18T00:05:21.000Z | pollbot/models/reference.py | shubham-king/poll | 677e870bea36dffbf27f24e4cdeec892b40f7128 | [
"MIT"
] | 91 | 2019-05-28T11:33:40.000Z | 2022-02-27T12:12:07.000Z | pollbot/models/reference.py | shubham-king/poll | 677e870bea36dffbf27f24e4cdeec892b40f7128 | [
"MIT"
] | 69 | 2019-07-10T16:58:06.000Z | 2022-03-30T22:09:44.000Z | """The sqlalchemy model for a polloption."""
from __future__ import annotations
from sqlalchemy import Column, ForeignKey, Index, func
from sqlalchemy.orm import relationship
from sqlalchemy.types import BigInteger, DateTime, Integer, String
from pollbot.db import base
from pollbot.enums import ReferenceType
class Reference(base):
"""The model for a Reference."""
__tablename__ = "reference"
__mapper_args__ = {"confirm_deleted_rows": False}
id = Column(Integer, primary_key=True)
type = Column(String)
bot_inline_message_id = Column(String)
message_id = Column(BigInteger)
# Keep those for now, in case we migrate to mtproto
message_dc_id = Column(BigInteger)
message_access_hash = Column(BigInteger)
user_id = Column(
BigInteger,
ForeignKey("user.id", ondelete="cascade", name="user_fk"),
nullable=True,
index=True,
)
user = relationship("User", foreign_keys="Reference.user_id")
created_at = Column(DateTime, server_default=func.now(), nullable=False)
updated_at = Column(
DateTime, server_default=func.now(), onupdate=func.now(), nullable=False
)
# ManyToOne
poll_id = Column(
Integer,
ForeignKey("poll.id", ondelete="cascade", name="reference_poll"),
nullable=False,
index=True,
)
poll = relationship("Poll", back_populates="references")
def __init__(
self,
poll,
reference_type,
user=None,
message_id=None,
inline_message_id=None,
):
"""Create a new poll."""
self.poll = poll
self.type = reference_type
# There are three types of references
# 1. Messages in private chat:
# - Admin interface
# - Private vote
if (
user is not None
and message_id is not None
and reference_type
in [ReferenceType.admin.name, ReferenceType.private_vote.name]
):
self.user = user
self.message_id = message_id
# 2. Messages shared via inline query
elif (
inline_message_id is not None
and reference_type == ReferenceType.inline.name
):
self.bot_inline_message_id = inline_message_id
else:
raise Exception(
"Tried to create Reference with wrong type or missing parameters"
)
def __repr__(self):
"""Print as string."""
if self.type == ReferenceType.inline.name:
message = f"Reference {self.id}: message_id {self.message_id}"
elif self.type == ReferenceType.admin.name:
message = f"Reference {self.id}: message_id {self.message_id}, admin: {self.user.id}"
else:
message = f"Reference {self.id}: message_id {self.message_id}, user: {self.user.id}"
return message
Index(
"ix_unique_admin_reference",
Reference.poll_id,
Reference.user_id,
Reference.message_id,
unique=True,
postgresql_where=Reference.type == "admin",
)
Index(
"ix_unique_private_vote_reference",
Reference.poll_id,
Reference.user_id,
Reference.message_id,
unique=True,
postgresql_where=Reference.type == "private_vote",
)
Index(
"ix_unique_inline_share",
Reference.poll_id,
Reference.bot_inline_message_id,
unique=True,
postgresql_where=Reference.type == "inline",
)
| 27.99187 | 97 | 0.637816 | from __future__ import annotations
from sqlalchemy import Column, ForeignKey, Index, func
from sqlalchemy.orm import relationship
from sqlalchemy.types import BigInteger, DateTime, Integer, String
from pollbot.db import base
from pollbot.enums import ReferenceType
class Reference(base):
__tablename__ = "reference"
__mapper_args__ = {"confirm_deleted_rows": False}
id = Column(Integer, primary_key=True)
type = Column(String)
bot_inline_message_id = Column(String)
message_id = Column(BigInteger)
message_dc_id = Column(BigInteger)
message_access_hash = Column(BigInteger)
user_id = Column(
BigInteger,
ForeignKey("user.id", ondelete="cascade", name="user_fk"),
nullable=True,
index=True,
)
user = relationship("User", foreign_keys="Reference.user_id")
created_at = Column(DateTime, server_default=func.now(), nullable=False)
updated_at = Column(
DateTime, server_default=func.now(), onupdate=func.now(), nullable=False
)
poll_id = Column(
Integer,
ForeignKey("poll.id", ondelete="cascade", name="reference_poll"),
nullable=False,
index=True,
)
poll = relationship("Poll", back_populates="references")
def __init__(
self,
poll,
reference_type,
user=None,
message_id=None,
inline_message_id=None,
):
self.poll = poll
self.type = reference_type
if (
user is not None
and message_id is not None
and reference_type
in [ReferenceType.admin.name, ReferenceType.private_vote.name]
):
self.user = user
self.message_id = message_id
elif (
inline_message_id is not None
and reference_type == ReferenceType.inline.name
):
self.bot_inline_message_id = inline_message_id
else:
raise Exception(
"Tried to create Reference with wrong type or missing parameters"
)
def __repr__(self):
if self.type == ReferenceType.inline.name:
message = f"Reference {self.id}: message_id {self.message_id}"
elif self.type == ReferenceType.admin.name:
message = f"Reference {self.id}: message_id {self.message_id}, admin: {self.user.id}"
else:
message = f"Reference {self.id}: message_id {self.message_id}, user: {self.user.id}"
return message
Index(
"ix_unique_admin_reference",
Reference.poll_id,
Reference.user_id,
Reference.message_id,
unique=True,
postgresql_where=Reference.type == "admin",
)
Index(
"ix_unique_private_vote_reference",
Reference.poll_id,
Reference.user_id,
Reference.message_id,
unique=True,
postgresql_where=Reference.type == "private_vote",
)
Index(
"ix_unique_inline_share",
Reference.poll_id,
Reference.bot_inline_message_id,
unique=True,
postgresql_where=Reference.type == "inline",
)
| true | true |
f71fec1a43d05877719e3969203eaab05fae4883 | 10,145 | py | Python | metadl/core/scoring/scoring.py | mikehuisman/metadl | 61ece0364b08e67412ab87da4a41425b2e88a562 | [
"Apache-2.0"
] | 26 | 2020-09-23T13:04:52.000Z | 2022-03-03T03:07:49.000Z | metadl/core/scoring/scoring.py | mikehuisman/metadl | 61ece0364b08e67412ab87da4a41425b2e88a562 | [
"Apache-2.0"
] | 5 | 2020-11-04T13:26:09.000Z | 2021-09-17T07:42:01.000Z | metadl/core/scoring/scoring.py | mikehuisman/metadl | 61ece0364b08e67412ab87da4a41425b2e88a562 | [
"Apache-2.0"
] | 12 | 2020-11-03T12:01:35.000Z | 2021-12-19T03:58:50.000Z | """ Runs the scoring procedure for the challenge.
It assumes that there exists a ./model_dir folder containing both the
submission code and the saved learner.
It will create a folder named ./scoring_output (default) in which a txt file
will contain the average score over 600 episodes. You can change the folder
name via the score_dir flag.
Usage example executed from the metadl/ repository :
python -m metadl.core.scoring.scoring --meta_test_dir=<path_dataset.meta_test>
"""
import os
from sys import path
import scipy.stats
import gin
import numpy as np
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
from metadl.data.dataset import DataGenerator
from metadl.core.ingestion.ingestion import get_gin_path, show_dir
FLAGS = flags.FLAGS
flags.DEFINE_string('meta_test_dir',
'/Users/adrian/GitInria/meta-dataset/records/',
('Directory of the meta-test dataset. This directory '
+ 'should contain records and a json spec file.'))
flags.DEFINE_string('saved_model_dir',
'./model_dir',
('Directory path that contains the participant\'s code '
+ 'along with the serialized learner from meta-fit.'))
flags.DEFINE_string('score_dir',
'./scoring_output',
'Path to the score directory.')
flags.DEFINE_string('evaltype',
'test',
'Data type on which to perform evaluation. [train, val, test]')
tf.random.set_seed(1234)
def NwayKshot_accuracy(predictions, ground_truth, metric):
""" N-way, K-shot accuracy which corresponds to the accuracy in a
multi-classification context with N classes.
Args:
predictions : tensors, sparse tensors corresponding to the predicted
labels.
ground_truth : tensors, sparse tensors corresponding the ground truth
labels.
metric : keras.metrics , the metric we use to evaluate the
classification performance of the meta-learning algorithm. We use
the SparseCategoricalAccuracy in this challenge.
Retruns:
score : Float, the resulting performance using the given metric.
"""
ground_truth = tf.expand_dims(ground_truth, axis = 1)
predictions = tf.expand_dims(predictions, axis = 1)
logging.debug('Predictions shape : {} - Ground truth shape : {}'.format(
predictions.shape, ground_truth.shape))
metric.update_state(ground_truth, predictions)
score = metric.result()
logging.debug('An episode score: {}'.format(score))
metric.reset_states()
return score
def is_one_hot_vector(x, axis=None, keepdims=False):
"""Check if a vector 'x' is one-hot (i.e. one entry is 1 and others 0)."""
norm_1 = np.linalg.norm(x, ord=1, axis=axis, keepdims=keepdims)
norm_inf = np.linalg.norm(x, ord=np.inf, axis=axis, keepdims=keepdims)
return np.logical_and(norm_1 == 1, norm_inf == 1)
def write_score(score, conf_int, file_score, duration=-1):
"""Write score of the k-th task in the given file_score."""
file_score.write('set1_score: {:.6f}\n'.format(float(score)))
file_score.write('conf_int: {:.3f}\n'.format(float(conf_int)))
file_score.write('Duration: {:.6f}\n'.format(float(duration)))
def extract_elapsed_time(saved_model_dir):
""" Extracts elapsed time from the metadata file. It corresponds to the
meta-training time, the duration of the ingestion process.
"""
if not os.path.isdir(saved_model_dir):
raise ValueError('Saved model directory does not exists.')
if os.path.isfile(os.path.join(saved_model_dir, 'metadata')):
with open(os.path.join(saved_model_dir, 'metadata'), 'r') as f :
lines = f.readlines()
for line in lines :
splitted_line = line.split(' ')
for k, word in enumerate(splitted_line):
if 'elapsed' in splitted_line[k]:
elapsed_time = float(splitted_line[k+1])
return elapsed_time
return -1
def process_task(task):
"""We are using the meta-dataset code to generate episodes from a dataset.
Generated episodes have a specific format. Each is processed such that the
the support and query sets are ready to be used by the participants. Each
set is returned as a tf.data.Dataset object.
The que_labs are kept hidden.
Returns :
support_dataset : tf.data.Dataset containing the support examples and
labels.
query_dataset : tf.data.Dataset containing the query examples
que_labs : tuple (query_batch_size, 1), the query examples labels
i.e. the ground truth labels.
"""
sup_set = tf.data.Dataset.from_tensor_slices(\
(task[0][1], task[0][0]))
dim = task[0][4].shape[1]
arr = np.arange(dim)
np.random.shuffle(arr) # shuffling arr
query_labs = task[0][4]
query_imgs = task[0][3]
query_labs_s = tf.gather(query_labs, arr, axis=1)
query_imgs_s = tf.gather(query_imgs, arr, axis=1)
que_set = tf.data.Dataset.from_tensor_slices(
(query_labs_s, query_imgs_s)
)
new_ds = tf.data.Dataset.zip((sup_set, que_set))
for ((supp_labs, supp_img), (que_labs, que_img)) \
in new_ds :
logging.debug('Supp labs : {}'.format(supp_labs))
logging.debug('Query labs : {}'.format(que_labs))
support_set = tf.data.Dataset.from_tensor_slices(\
(supp_img, supp_labs))
query_set = tf.data.Dataset.from_tensor_slices(\
(que_img,))
support_set = support_set.batch(5)
query_set = query_set.batch(95)
return support_set, query_set, que_labs
def scoring(argv):
"""
For each task, load and fit the Learner with the support set and evaluate
the submission performance with the query set.
A directory 'scoring_output' is created and contains a txt file that
contains the submission score and duration. Note that the former is the
time elapsed during the ingestion program and hence the meta-fit()
duration.
The metric considered here is the Sparse Categorical Accuracy for a
5 classes image classification problem.
"""
del argv
saved_model_dir = FLAGS.saved_model_dir
meta_test_dir = FLAGS.meta_test_dir
eval_type = FLAGS.evaltype
# Making eval type compatible with DataGenerator specs
if eval_type == 'train' or eval_type == 'val':
data_generator_eval_type = 'train'
elif eval_type == 'test':
data_generator_eval_type = 'test'
# Use CodaLab's path `run/input/ref` in parallel with `run/input/res`
if not os.path.isdir(meta_test_dir):
meta_test_dir = os.path.join(saved_model_dir, os.pardir, 'ref')
# Evaluation type scenario: if meta_test is specified -> act as normal
# scoring on meta_test data
if (eval_type == 'train' or eval_type == 'val') and 'meta_test' in meta_test_dir:
raise ValueError('Cannot perform train/val evaluation on meta-test data!')
#if 'meta_test' not in meta_test_dir:
# if eval_type == 'test':
# meta_test_dir = os.path.join(meta_test_dir, 'meta_test')
# else:
# meta_test_dir = os.path.join(meta_test_dir, 'meta_train')
code_dir = os.path.join(saved_model_dir, 'code_dir')
score_dir = FLAGS.score_dir
path.append(code_dir)
from model import MyLearner
if(os.path.exists(os.path.join(code_dir, 'model.gin'))):
gin.parse_config_file(os.path.join(code_dir, 'model.gin'))
logging.info('Ingestion done! Starting scoring process ... ')
logging.info('Creating the meta-test episode generator ... \n ')
generator = DataGenerator(path_to_records=meta_test_dir,
batch_config=None,
episode_config=[28, 5, 1, 19],
pool= data_generator_eval_type,
mode='episode')
if eval_type == 'test':
meta_test_dataset = generator.meta_test_pipeline
elif eval_type == 'train':
meta_test_dataset = generator.meta_train_pipeline
elif eval_type == 'val':
meta_test_dataset = generator.meta_valid_pipeline
else:
raise ValueError('Wrong eval_type : {}'.format(eval_type))
logging.info('Evaluating performance on episodes ... ')
meta_test_dataset = meta_test_dataset.batch(1)
meta_test_dataset = meta_test_dataset.prefetch(5)
learner = MyLearner()
if (not os.path.isdir(score_dir)):
os.mkdir(score_dir)
score_file = os.path.join(score_dir, 'scores.txt')
results = []
metric = tf.metrics.SparseCategoricalAccuracy()
nbr_episodes = 600
for k , task in enumerate(meta_test_dataset) :
support_set, query_set, ground_truth = process_task(task)
learner.load(saved_model_dir)
predictor = learner.fit(support_set)
predictions = predictor.predict(query_set)
score = NwayKshot_accuracy(predictions, ground_truth, metric)
results.append(score)
logging.debug('Score on {} : {}'.format(k, score))
logging.debug('Results : {}'.format(results[:20]))
if(k > nbr_episodes):
break
def mean_confidence_interval(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)
return m, h
m, conf_int = mean_confidence_interval(results)
with open(score_file, 'w') as f :
write_score(m,
conf_int,
f,
extract_elapsed_time(saved_model_dir))
logging.info(('Scoring done! The average score over {} '
+ 'episodes is : {:.3%}').format(nbr_episodes,
sum(results)/len(results))
)
if __name__ == '__main__':
np.random.seed(seed=1234)
tf.get_logger().setLevel('ERROR')
app.run(scoring)
| 38.869732 | 99 | 0.648398 | import os
from sys import path
import scipy.stats
import gin
import numpy as np
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
from metadl.data.dataset import DataGenerator
from metadl.core.ingestion.ingestion import get_gin_path, show_dir
FLAGS = flags.FLAGS
flags.DEFINE_string('meta_test_dir',
'/Users/adrian/GitInria/meta-dataset/records/',
('Directory of the meta-test dataset. This directory '
+ 'should contain records and a json spec file.'))
flags.DEFINE_string('saved_model_dir',
'./model_dir',
('Directory path that contains the participant\'s code '
+ 'along with the serialized learner from meta-fit.'))
flags.DEFINE_string('score_dir',
'./scoring_output',
'Path to the score directory.')
flags.DEFINE_string('evaltype',
'test',
'Data type on which to perform evaluation. [train, val, test]')
tf.random.set_seed(1234)
def NwayKshot_accuracy(predictions, ground_truth, metric):
ground_truth = tf.expand_dims(ground_truth, axis = 1)
predictions = tf.expand_dims(predictions, axis = 1)
logging.debug('Predictions shape : {} - Ground truth shape : {}'.format(
predictions.shape, ground_truth.shape))
metric.update_state(ground_truth, predictions)
score = metric.result()
logging.debug('An episode score: {}'.format(score))
metric.reset_states()
return score
def is_one_hot_vector(x, axis=None, keepdims=False):
norm_1 = np.linalg.norm(x, ord=1, axis=axis, keepdims=keepdims)
norm_inf = np.linalg.norm(x, ord=np.inf, axis=axis, keepdims=keepdims)
return np.logical_and(norm_1 == 1, norm_inf == 1)
def write_score(score, conf_int, file_score, duration=-1):
file_score.write('set1_score: {:.6f}\n'.format(float(score)))
file_score.write('conf_int: {:.3f}\n'.format(float(conf_int)))
file_score.write('Duration: {:.6f}\n'.format(float(duration)))
def extract_elapsed_time(saved_model_dir):
if not os.path.isdir(saved_model_dir):
raise ValueError('Saved model directory does not exists.')
if os.path.isfile(os.path.join(saved_model_dir, 'metadata')):
with open(os.path.join(saved_model_dir, 'metadata'), 'r') as f :
lines = f.readlines()
for line in lines :
splitted_line = line.split(' ')
for k, word in enumerate(splitted_line):
if 'elapsed' in splitted_line[k]:
elapsed_time = float(splitted_line[k+1])
return elapsed_time
return -1
def process_task(task):
sup_set = tf.data.Dataset.from_tensor_slices(\
(task[0][1], task[0][0]))
dim = task[0][4].shape[1]
arr = np.arange(dim)
np.random.shuffle(arr) # shuffling arr
query_labs = task[0][4]
query_imgs = task[0][3]
query_labs_s = tf.gather(query_labs, arr, axis=1)
query_imgs_s = tf.gather(query_imgs, arr, axis=1)
que_set = tf.data.Dataset.from_tensor_slices(
(query_labs_s, query_imgs_s)
)
new_ds = tf.data.Dataset.zip((sup_set, que_set))
for ((supp_labs, supp_img), (que_labs, que_img)) \
in new_ds :
logging.debug('Supp labs : {}'.format(supp_labs))
logging.debug('Query labs : {}'.format(que_labs))
support_set = tf.data.Dataset.from_tensor_slices(\
(supp_img, supp_labs))
query_set = tf.data.Dataset.from_tensor_slices(\
(que_img,))
support_set = support_set.batch(5)
query_set = query_set.batch(95)
return support_set, query_set, que_labs
def scoring(argv):
del argv
saved_model_dir = FLAGS.saved_model_dir
meta_test_dir = FLAGS.meta_test_dir
eval_type = FLAGS.evaltype
# Making eval type compatible with DataGenerator specs
if eval_type == 'train' or eval_type == 'val':
data_generator_eval_type = 'train'
elif eval_type == 'test':
data_generator_eval_type = 'test'
# Use CodaLab's path `run/input/ref` in parallel with `run/input/res`
if not os.path.isdir(meta_test_dir):
meta_test_dir = os.path.join(saved_model_dir, os.pardir, 'ref')
if (eval_type == 'train' or eval_type == 'val') and 'meta_test' in meta_test_dir:
raise ValueError('Cannot perform train/val evaluation on meta-test data!')
code_dir = os.path.join(saved_model_dir, 'code_dir')
score_dir = FLAGS.score_dir
path.append(code_dir)
from model import MyLearner
if(os.path.exists(os.path.join(code_dir, 'model.gin'))):
gin.parse_config_file(os.path.join(code_dir, 'model.gin'))
logging.info('Ingestion done! Starting scoring process ... ')
logging.info('Creating the meta-test episode generator ... \n ')
generator = DataGenerator(path_to_records=meta_test_dir,
batch_config=None,
episode_config=[28, 5, 1, 19],
pool= data_generator_eval_type,
mode='episode')
if eval_type == 'test':
meta_test_dataset = generator.meta_test_pipeline
elif eval_type == 'train':
meta_test_dataset = generator.meta_train_pipeline
elif eval_type == 'val':
meta_test_dataset = generator.meta_valid_pipeline
else:
raise ValueError('Wrong eval_type : {}'.format(eval_type))
logging.info('Evaluating performance on episodes ... ')
meta_test_dataset = meta_test_dataset.batch(1)
meta_test_dataset = meta_test_dataset.prefetch(5)
learner = MyLearner()
if (not os.path.isdir(score_dir)):
os.mkdir(score_dir)
score_file = os.path.join(score_dir, 'scores.txt')
results = []
metric = tf.metrics.SparseCategoricalAccuracy()
nbr_episodes = 600
for k , task in enumerate(meta_test_dataset) :
support_set, query_set, ground_truth = process_task(task)
learner.load(saved_model_dir)
predictor = learner.fit(support_set)
predictions = predictor.predict(query_set)
score = NwayKshot_accuracy(predictions, ground_truth, metric)
results.append(score)
logging.debug('Score on {} : {}'.format(k, score))
logging.debug('Results : {}'.format(results[:20]))
if(k > nbr_episodes):
break
def mean_confidence_interval(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)
return m, h
m, conf_int = mean_confidence_interval(results)
with open(score_file, 'w') as f :
write_score(m,
conf_int,
f,
extract_elapsed_time(saved_model_dir))
logging.info(('Scoring done! The average score over {} '
+ 'episodes is : {:.3%}').format(nbr_episodes,
sum(results)/len(results))
)
if __name__ == '__main__':
np.random.seed(seed=1234)
tf.get_logger().setLevel('ERROR')
app.run(scoring)
| true | true |
f71fed6c463f4fb9305f4215a3d3f237674e9c98 | 6,399 | py | Python | Graph-based/processor/recognition.py | EnTimeMent/Group-Behavior-Recognition | d6606e9e7bef836a9ccc5b4ada66933a4770171c | [
"MIT"
] | 3 | 2020-12-29T04:07:58.000Z | 2022-01-11T14:47:16.000Z | Graph-based/processor/recognition.py | EnTimeMent/Group-Behavior-Recognition | d6606e9e7bef836a9ccc5b4ada66933a4770171c | [
"MIT"
] | 1 | 2021-01-02T10:28:07.000Z | 2021-01-04T18:01:42.000Z | Graph-based/processor/recognition.py | EnTimeMent/Group-Behavior-Recognition | d6606e9e7bef836a9ccc5b4ada66933a4770171c | [
"MIT"
] | 1 | 2022-01-09T12:55:41.000Z | 2022-01-09T12:55:41.000Z | #!/usr/bin/env python
# pylint: disable=W0201
import sys
import argparse
import yaml
import numpy as np
# torch
import torch
import torch.nn as nn
import torch.optim as optim
# torchlight
import torchlight
from torchlight import str2bool
from torchlight import DictAction
from torchlight import import_class
from .processor import Processor
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv1d') != -1:
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.fill_(0)
elif classname.find('Conv2d') != -1:
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class REC_Processor(Processor):
"""
Processor for Skeleton-based Action Recgnition
"""
def load_model(self):
# print("load model")
self.model = self.io.load_model(self.arg.model,
**(self.arg.model_args))
self.model.apply(weights_init)
self.loss = nn.CrossEntropyLoss()
# self.loss = nn.BCEWithLogitsLoss()
def load_optimizer(self):
if self.arg.optimizer == 'SGD':
self.optimizer = optim.SGD(
self.model.parameters(),
lr=self.arg.base_lr,
momentum=0.9,
nesterov=self.arg.nesterov,
weight_decay=self.arg.weight_decay)
elif self.arg.optimizer == 'Adam':
self.optimizer = optim.Adam(
self.model.parameters(),
lr=self.arg.base_lr,
weight_decay=self.arg.weight_decay)
else:
raise ValueError()
def adjust_lr(self):
if self.arg.optimizer == 'SGD' and self.arg.step:
lr = self.arg.base_lr * (
0.1**np.sum(self.meta_info['epoch'] >= np.array(self.arg.step)))
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
self.lr = lr
else:
self.lr = self.arg.base_lr
def show_topk(self, k):
rank = self.result.argsort()
hit_top_k = [l in rank[i, -k:] for i, l in enumerate(self.label)]
accuracy = sum(hit_top_k) * 1.0 / len(hit_top_k)
self.io.print_log('\tTop{}: {:.2f}%'.format(k, 100 * accuracy))
def train(self):
self.model.train()
self.adjust_lr()
loader = self.data_loader['train']
loss_value = []
result_frag = []
label_frag = []
# print("train")
for data, label in loader:
# get data
data = data.float().to(self.dev)
label = label.long().to(self.dev)
# forward
output = self.model(data)
result_frag.extend(
output.data.cpu().numpy().argmax(axis=1))
label_frag.extend(label.data.cpu().numpy())
# print(output)
loss = self.loss(output, label)
# print(label)
# backward
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# statistics
self.iter_info['loss'] = loss.data.item()
self.iter_info['lr'] = '{:.6f}'.format(self.lr)
loss_value.append(self.iter_info['loss'])
self.show_iter_info()
self.meta_info['iter'] += 1
ac = accuracy_score(label_frag, result_frag)
# print(result_frag)
# print(label_frag)
print("train acc: {}".format(ac))
self.epoch_info['mean_loss'] = np.mean(loss_value)
self.show_epoch_info()
# self.io.print_timer()
def test(self, evaluation=True):
self.model.eval()
loader = self.data_loader['test']
loss_value = []
result_frag = []
label_frag = []
for data, label in loader:
# get data
data = data.float().to(self.dev)
label = label.long().to(self.dev)
# inference
with torch.no_grad():
output = self.model(data)
result_frag.append(output.data.cpu().numpy())
# get loss
if evaluation:
loss = self.loss(output, label)
loss_value.append(loss.item())
label_frag.append(label.data.cpu().numpy())
self.result = np.concatenate(result_frag)
# print(self.result)
if evaluation:
self.label = np.concatenate(label_frag)
self.epoch_info['mean_loss'] = np.mean(loss_value)
self.show_epoch_info()
# show top-k accuracy
for k in self.arg.show_topk:
self.show_topk(k)
top = self.result.argmax(axis=1)
print(top)
print(self.label)
cm = confusion_matrix(self.label, top)
print(cm)
@staticmethod
def get_parser(add_help=False):
# parameter priority: command line > config > default
parent_parser = Processor.get_parser(add_help=False)
parser = argparse.ArgumentParser(
add_help=add_help,
parents=[parent_parser],
description='Spatial Temporal Graph Convolution Network')
# region arguments yapf: disable
# evaluation
parser.add_argument('--show_topk', type=int,
default=[1], nargs='+', help='which Top K accuracy will be shown')
# optim
parser.add_argument('--base_lr', type=float,
default=0.01, help='initial learning rate')
parser.add_argument('--step', type=int, default=[], nargs='+',
help='the epoch where optimizer reduce the learning rate')
parser.add_argument('--optimizer', default='SGD',
help='type of optimizer')
parser.add_argument('--nesterov', type=str2bool,
default=True, help='use nesterov or not')
parser.add_argument('--weight_decay', type=float,
default=0.0001, help='weight decay for optimizer')
# endregion yapf: enable
return parser
| 32.482234 | 94 | 0.556493 |
import sys
import argparse
import yaml
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torchlight
from torchlight import str2bool
from torchlight import DictAction
from torchlight import import_class
from .processor import Processor
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv1d') != -1:
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.fill_(0)
elif classname.find('Conv2d') != -1:
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class REC_Processor(Processor):
def load_model(self):
self.model = self.io.load_model(self.arg.model,
**(self.arg.model_args))
self.model.apply(weights_init)
self.loss = nn.CrossEntropyLoss()
def load_optimizer(self):
if self.arg.optimizer == 'SGD':
self.optimizer = optim.SGD(
self.model.parameters(),
lr=self.arg.base_lr,
momentum=0.9,
nesterov=self.arg.nesterov,
weight_decay=self.arg.weight_decay)
elif self.arg.optimizer == 'Adam':
self.optimizer = optim.Adam(
self.model.parameters(),
lr=self.arg.base_lr,
weight_decay=self.arg.weight_decay)
else:
raise ValueError()
def adjust_lr(self):
if self.arg.optimizer == 'SGD' and self.arg.step:
lr = self.arg.base_lr * (
0.1**np.sum(self.meta_info['epoch'] >= np.array(self.arg.step)))
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
self.lr = lr
else:
self.lr = self.arg.base_lr
def show_topk(self, k):
rank = self.result.argsort()
hit_top_k = [l in rank[i, -k:] for i, l in enumerate(self.label)]
accuracy = sum(hit_top_k) * 1.0 / len(hit_top_k)
self.io.print_log('\tTop{}: {:.2f}%'.format(k, 100 * accuracy))
def train(self):
self.model.train()
self.adjust_lr()
loader = self.data_loader['train']
loss_value = []
result_frag = []
label_frag = []
for data, label in loader:
data = data.float().to(self.dev)
label = label.long().to(self.dev)
output = self.model(data)
result_frag.extend(
output.data.cpu().numpy().argmax(axis=1))
label_frag.extend(label.data.cpu().numpy())
loss = self.loss(output, label)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.iter_info['loss'] = loss.data.item()
self.iter_info['lr'] = '{:.6f}'.format(self.lr)
loss_value.append(self.iter_info['loss'])
self.show_iter_info()
self.meta_info['iter'] += 1
ac = accuracy_score(label_frag, result_frag)
print("train acc: {}".format(ac))
self.epoch_info['mean_loss'] = np.mean(loss_value)
self.show_epoch_info()
def test(self, evaluation=True):
self.model.eval()
loader = self.data_loader['test']
loss_value = []
result_frag = []
label_frag = []
for data, label in loader:
data = data.float().to(self.dev)
label = label.long().to(self.dev)
with torch.no_grad():
output = self.model(data)
result_frag.append(output.data.cpu().numpy())
if evaluation:
loss = self.loss(output, label)
loss_value.append(loss.item())
label_frag.append(label.data.cpu().numpy())
self.result = np.concatenate(result_frag)
if evaluation:
self.label = np.concatenate(label_frag)
self.epoch_info['mean_loss'] = np.mean(loss_value)
self.show_epoch_info()
for k in self.arg.show_topk:
self.show_topk(k)
top = self.result.argmax(axis=1)
print(top)
print(self.label)
cm = confusion_matrix(self.label, top)
print(cm)
@staticmethod
def get_parser(add_help=False):
parent_parser = Processor.get_parser(add_help=False)
parser = argparse.ArgumentParser(
add_help=add_help,
parents=[parent_parser],
description='Spatial Temporal Graph Convolution Network')
parser.add_argument('--show_topk', type=int,
default=[1], nargs='+', help='which Top K accuracy will be shown')
parser.add_argument('--base_lr', type=float,
default=0.01, help='initial learning rate')
parser.add_argument('--step', type=int, default=[], nargs='+',
help='the epoch where optimizer reduce the learning rate')
parser.add_argument('--optimizer', default='SGD',
help='type of optimizer')
parser.add_argument('--nesterov', type=str2bool,
default=True, help='use nesterov or not')
parser.add_argument('--weight_decay', type=float,
default=0.0001, help='weight decay for optimizer')
return parser
| true | true |
f71fedf23526603a8b5b482439d51773bdec5bd3 | 3,347 | py | Python | medseer/migrations/0001_initial.py | noureldin-eg/medseer | 8a68cd92a757ab3141081547d322c0c6b2056d66 | [
"Apache-2.0"
] | null | null | null | medseer/migrations/0001_initial.py | noureldin-eg/medseer | 8a68cd92a757ab3141081547d322c0c6b2056d66 | [
"Apache-2.0"
] | null | null | null | medseer/migrations/0001_initial.py | noureldin-eg/medseer | 8a68cd92a757ab3141081547d322c0c6b2056d66 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.2.10 on 2022-01-25 22:13
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('forename', models.CharField(max_length=100)),
('surname', models.CharField(max_length=100)),
('email', models.EmailField(blank=True, max_length=254, null=True, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Journal',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
('rank', models.PositiveSmallIntegerField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Organization',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
('rank', models.PositiveSmallIntegerField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Paper',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pdf', models.FileField(null=True, unique=True, upload_to='pdfs/%Y/%m/%d/')),
('tei', models.FileField(null=True, unique=True, upload_to='xmls/%Y/%m/%d/')),
('title', models.TextField(blank=True, null=True, unique=True)),
('abstract', models.TextField(blank=True, null=True, unique=True)),
('doi', models.CharField(blank=True, max_length=100, null=True, unique=True)),
('url', models.URLField(blank=True, null=True, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('authors', models.ManyToManyField(to='medseer.Author')),
('journal', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='medseer.journal')),
],
),
migrations.AddField(
model_name='author',
name='organization',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='medseer.organization'),
),
migrations.AddConstraint(
model_name='author',
constraint=models.UniqueConstraint(fields=('forename', 'surname'), name='unique_author_name'),
),
]
| 46.486111 | 125 | 0.582611 |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('forename', models.CharField(max_length=100)),
('surname', models.CharField(max_length=100)),
('email', models.EmailField(blank=True, max_length=254, null=True, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Journal',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
('rank', models.PositiveSmallIntegerField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Organization',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
('rank', models.PositiveSmallIntegerField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Paper',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pdf', models.FileField(null=True, unique=True, upload_to='pdfs/%Y/%m/%d/')),
('tei', models.FileField(null=True, unique=True, upload_to='xmls/%Y/%m/%d/')),
('title', models.TextField(blank=True, null=True, unique=True)),
('abstract', models.TextField(blank=True, null=True, unique=True)),
('doi', models.CharField(blank=True, max_length=100, null=True, unique=True)),
('url', models.URLField(blank=True, null=True, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('authors', models.ManyToManyField(to='medseer.Author')),
('journal', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='medseer.journal')),
],
),
migrations.AddField(
model_name='author',
name='organization',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='medseer.organization'),
),
migrations.AddConstraint(
model_name='author',
constraint=models.UniqueConstraint(fields=('forename', 'surname'), name='unique_author_name'),
),
]
| true | true |
f71fee147634858badedf4ea69e3f4bc26bb7e78 | 2,052 | py | Python | tests/parsers/plist_plugins/ipod.py | jeppetrost/plaso | b48008c6ea79950eeeef3a05b3a859086c8704b6 | [
"Apache-2.0"
] | null | null | null | tests/parsers/plist_plugins/ipod.py | jeppetrost/plaso | b48008c6ea79950eeeef3a05b3a859086c8704b6 | [
"Apache-2.0"
] | null | null | null | tests/parsers/plist_plugins/ipod.py | jeppetrost/plaso | b48008c6ea79950eeeef3a05b3a859086c8704b6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the iPod plist plugin."""
from __future__ import unicode_literals
import unittest
from plaso.formatters import ipod as _ # pylint: disable=unused-import
from plaso.lib import definitions
from plaso.parsers.plist_plugins import ipod
from tests import test_lib as shared_test_lib
from tests.parsers.plist_plugins import test_lib
class TestIPodPlugin(test_lib.PlistPluginTestCase):
"""Tests for the iPod plist plugin."""
@shared_test_lib.skipUnlessHasTestFile(['com.apple.iPod.plist'])
def testProcess(self):
"""Tests the Process function."""
plist_name = 'com.apple.iPod.plist'
plugin = ipod.IPodPlugin()
storage_writer = self._ParsePlistFileWithPlugin(
plugin, [plist_name], plist_name)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 4)
# The order in which PlistParser generates events is nondeterministic
# hence we sort the events.
events = list(storage_writer.GetSortedEvents())
event = events[0]
self.CheckTimestamp(event.timestamp, '1995-11-22 18:25:07.000000')
self.assertEqual(event.device_id, '0000A11300000000')
event = events[2]
self.CheckTimestamp(event.timestamp, '2013-10-09 19:27:54.000000')
expected_message = (
'Device ID: 4C6F6F6E65000000 '
'Type: iPhone [10016] '
'Connected 1 times '
'Serial nr: 526F676572 '
'IMEI [012345678901234]')
expected_short_message = '{0:s}...'.format(expected_message[:77])
self._TestGetMessageStrings(event, expected_message, expected_short_message)
self.assertEqual(
event.timestamp_desc, definitions.TIME_DESCRIPTION_LAST_CONNECTED)
self.assertEqual(event.device_class, 'iPhone')
self.assertEqual(event.device_id, '4C6F6F6E65000000')
self.assertEqual(event.firmware_version, 256)
self.assertEqual(event.imei, '012345678901234')
self.assertEqual(event.use_count, 1)
if __name__ == '__main__':
unittest.main()
| 30.176471 | 80 | 0.729532 |
from __future__ import unicode_literals
import unittest
from plaso.formatters import ipod as _
from plaso.lib import definitions
from plaso.parsers.plist_plugins import ipod
from tests import test_lib as shared_test_lib
from tests.parsers.plist_plugins import test_lib
class TestIPodPlugin(test_lib.PlistPluginTestCase):
@shared_test_lib.skipUnlessHasTestFile(['com.apple.iPod.plist'])
def testProcess(self):
plist_name = 'com.apple.iPod.plist'
plugin = ipod.IPodPlugin()
storage_writer = self._ParsePlistFileWithPlugin(
plugin, [plist_name], plist_name)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 4)
events = list(storage_writer.GetSortedEvents())
event = events[0]
self.CheckTimestamp(event.timestamp, '1995-11-22 18:25:07.000000')
self.assertEqual(event.device_id, '0000A11300000000')
event = events[2]
self.CheckTimestamp(event.timestamp, '2013-10-09 19:27:54.000000')
expected_message = (
'Device ID: 4C6F6F6E65000000 '
'Type: iPhone [10016] '
'Connected 1 times '
'Serial nr: 526F676572 '
'IMEI [012345678901234]')
expected_short_message = '{0:s}...'.format(expected_message[:77])
self._TestGetMessageStrings(event, expected_message, expected_short_message)
self.assertEqual(
event.timestamp_desc, definitions.TIME_DESCRIPTION_LAST_CONNECTED)
self.assertEqual(event.device_class, 'iPhone')
self.assertEqual(event.device_id, '4C6F6F6E65000000')
self.assertEqual(event.firmware_version, 256)
self.assertEqual(event.imei, '012345678901234')
self.assertEqual(event.use_count, 1)
if __name__ == '__main__':
unittest.main()
| true | true |
f71fee22e27eb7d42dc3efe0a61407b797d283a4 | 63,809 | py | Python | pandas/core/groupby/groupby.py | paritoshmittal09/pandas | 862d2d89b8fe0a93ec8e714315175e2eba1fa6e5 | [
"BSD-3-Clause"
] | null | null | null | pandas/core/groupby/groupby.py | paritoshmittal09/pandas | 862d2d89b8fe0a93ec8e714315175e2eba1fa6e5 | [
"BSD-3-Clause"
] | null | null | null | pandas/core/groupby/groupby.py | paritoshmittal09/pandas | 862d2d89b8fe0a93ec8e714315175e2eba1fa6e5 | [
"BSD-3-Clause"
] | null | null | null | """
Provide the groupby split-apply-combine paradigm. Define the GroupBy
class providing the base-class of operations.
The SeriesGroupBy and DataFrameGroupBy sub-class
(defined in pandas.core.groupby.generic)
expose these user-facing objects to provide specific functionailty.
"""
import types
from functools import wraps, partial
import datetime
import collections
import warnings
from contextlib import contextmanager
import numpy as np
from pandas._libs import groupby as libgroupby, Timestamp
from pandas.util._validators import validate_kwargs
from pandas.util._decorators import (
cache_readonly, Substitution, Appender)
from pandas import compat
from pandas.compat import zip, range, callable, set_function_name
from pandas.compat.numpy import function as nv
from pandas.core.dtypes.common import (
is_numeric_dtype,
is_scalar,
ensure_float)
from pandas.core.dtypes.cast import maybe_downcast_to_dtype
from pandas.core.dtypes.missing import isna, notna
from pandas.core.groupby import base
from pandas.core.base import (PandasObject, SelectionMixin, GroupByError,
DataError, SpecificationError)
from pandas.core.index import Index, MultiIndex
from pandas.core.generic import NDFrame
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from pandas.core.sorting import get_group_index_sorter
import pandas.core.common as com
import pandas.core.algorithms as algorithms
from pandas.core.config import option_context
_doc_template = """
See also
--------
pandas.Series.%(name)s
pandas.DataFrame.%(name)s
pandas.Panel.%(name)s
"""
_apply_docs = dict(
template="""
Apply function `func` group-wise and combine the results together.
The function passed to `apply` must take a {input} as its first
argument and return a DataFrame, Series or scalar. `apply` will
then take care of combining the results back together into a single
dataframe or series. `apply` is therefore a highly flexible
grouping method.
While `apply` is a very flexible method, its downside is that
using it can be quite a bit slower than using more specific methods
like `agg` or `transform`. Pandas offers a wide range of method that will
be much faster than using `apply` for their specific purposes, so try to
use them before reaching for `apply`.
Parameters
----------
func : callable
A callable that takes a {input} as its first argument, and
returns a dataframe, a series or a scalar. In addition the
callable may take positional and keyword arguments.
args, kwargs : tuple and dict
Optional positional and keyword arguments to pass to `func`.
Returns
-------
applied : Series or DataFrame
Notes
-----
In the current implementation `apply` calls `func` twice on the
first group to decide whether it can take a fast or slow code
path. This can lead to unexpected behavior if `func` has
side-effects, as they will take effect twice for the first
group.
Examples
--------
{examples}
See also
--------
pipe : Apply function to the full GroupBy object instead of to each
group.
aggregate : Apply aggregate function to the GroupBy object.
transform : Apply function column-by-column to the GroupBy object.
Series.apply : Apply a function to a Series.
DataFrame.apply : Apply a function to each row or column of a DataFrame.
""",
dataframe_examples="""
>>> df = pd.DataFrame({'A': 'a a b'.split(),
'B': [1,2,3],
'C': [4,6, 5]})
>>> g = df.groupby('A')
Notice that ``g`` has two groups, ``a`` and ``b``.
Calling `apply` in various ways, we can get different grouping results:
Example 1: below the function passed to `apply` takes a DataFrame as
its argument and returns a DataFrame. `apply` combines the result for
each group together into a new DataFrame:
>>> g[['B', 'C']].apply(lambda x: x / x.sum())
B C
0 0.333333 0.4
1 0.666667 0.6
2 1.000000 1.0
Example 2: The function passed to `apply` takes a DataFrame as
its argument and returns a Series. `apply` combines the result for
each group together into a new DataFrame:
>>> g[['B', 'C']].apply(lambda x: x.max() - x.min())
B C
A
a 1 2
b 0 0
Example 3: The function passed to `apply` takes a DataFrame as
its argument and returns a scalar. `apply` combines the result for
each group together into a Series, including setting the index as
appropriate:
>>> g.apply(lambda x: x.C.max() - x.B.min())
A
a 5
b 2
dtype: int64
""",
series_examples="""
>>> s = pd.Series([0, 1, 2], index='a a b'.split())
>>> g = s.groupby(s.index)
From ``s`` above we can see that ``g`` has two groups, ``a`` and ``b``.
Calling `apply` in various ways, we can get different grouping results:
Example 1: The function passed to `apply` takes a Series as
its argument and returns a Series. `apply` combines the result for
each group together into a new Series:
>>> g.apply(lambda x: x*2 if x.name == 'b' else x/2)
0 0.0
1 0.5
2 4.0
dtype: float64
Example 2: The function passed to `apply` takes a Series as
its argument and returns a scalar. `apply` combines the result for
each group together into a Series, including setting the index as
appropriate:
>>> g.apply(lambda x: x.max() - x.min())
a 1
b 0
dtype: int64
""")
_pipe_template = """\
Apply a function `func` with arguments to this %(klass)s object and return
the function's result.
%(versionadded)s
Use `.pipe` when you want to improve readability by chaining together
functions that expect Series, DataFrames, GroupBy or Resampler objects.
Instead of writing
>>> h(g(f(df.groupby('group')), arg1=a), arg2=b, arg3=c)
You can write
>>> (df.groupby('group')
... .pipe(f)
... .pipe(g, arg1=a)
... .pipe(h, arg2=b, arg3=c))
which is much more readable.
Parameters
----------
func : callable or tuple of (callable, string)
Function to apply to this %(klass)s object or, alternatively,
a `(callable, data_keyword)` tuple where `data_keyword` is a
string indicating the keyword of `callable` that expects the
%(klass)s object.
args : iterable, optional
positional arguments passed into `func`.
kwargs : dict, optional
a dictionary of keyword arguments passed into `func`.
Returns
-------
object : the return type of `func`.
Notes
-----
See more `here
<http://pandas.pydata.org/pandas-docs/stable/groupby.html#piping-function-calls>`_
Examples
--------
%(examples)s
See Also
--------
pandas.Series.pipe : Apply a function with arguments to a series
pandas.DataFrame.pipe: Apply a function with arguments to a dataframe
apply : Apply function to each group instead of to the
full %(klass)s object.
"""
_transform_template = """
Call function producing a like-indexed %(klass)s on each group and
return a %(klass)s having the same indexes as the original object
filled with the transformed values
Parameters
----------
f : function
Function to apply to each group
Notes
-----
Each group is endowed the attribute 'name' in case you need to know
which group you are working on.
The current implementation imposes three requirements on f:
* f must return a value that either has the same shape as the input
subframe or can be broadcast to the shape of the input subframe.
For example, f returns a scalar it will be broadcast to have the
same shape as the input subframe.
* if this is a DataFrame, f must support application column-by-column
in the subframe. If f also supports application to the entire subframe,
then a fast path is used starting from the second chunk.
* f must not mutate groups. Mutation is not supported and may
produce unexpected results.
Returns
-------
%(klass)s
See also
--------
aggregate, transform
Examples
--------
# Same shape
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : ['one', 'one', 'two', 'three',
... 'two', 'two'],
... 'C' : [1, 5, 5, 2, 5, 5],
... 'D' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
C D
0 -1.154701 -0.577350
1 0.577350 0.000000
2 0.577350 1.154701
3 -1.154701 -1.000000
4 0.577350 -0.577350
5 0.577350 1.000000
# Broadcastable
>>> grouped.transform(lambda x: x.max() - x.min())
C D
0 4 6.0
1 3 8.0
2 4 6.0
3 3 8.0
4 4 6.0
5 3 8.0
"""
class GroupByPlot(PandasObject):
"""
Class implementing the .plot attribute for groupby objects
"""
def __init__(self, groupby):
self._groupby = groupby
def __call__(self, *args, **kwargs):
def f(self):
return self.plot(*args, **kwargs)
f.__name__ = 'plot'
return self._groupby.apply(f)
def __getattr__(self, name):
def attr(*args, **kwargs):
def f(self):
return getattr(self.plot, name)(*args, **kwargs)
return self._groupby.apply(f)
return attr
@contextmanager
def _group_selection_context(groupby):
"""
set / reset the _group_selection_context
"""
groupby._set_group_selection()
yield groupby
groupby._reset_group_selection()
class _GroupBy(PandasObject, SelectionMixin):
_group_selection = None
_apply_whitelist = frozenset([])
def __init__(self, obj, keys=None, axis=0, level=None,
grouper=None, exclusions=None, selection=None, as_index=True,
sort=True, group_keys=True, squeeze=False,
observed=False, **kwargs):
self._selection = selection
if isinstance(obj, NDFrame):
obj._consolidate_inplace()
self.level = level
if not as_index:
if not isinstance(obj, DataFrame):
raise TypeError('as_index=False only valid with DataFrame')
if axis != 0:
raise ValueError('as_index=False only valid for axis=0')
self.as_index = as_index
self.keys = keys
self.sort = sort
self.group_keys = group_keys
self.squeeze = squeeze
self.observed = observed
self.mutated = kwargs.pop('mutated', False)
if grouper is None:
from pandas.core.groupby.grouper import _get_grouper
grouper, exclusions, obj = _get_grouper(obj, keys,
axis=axis,
level=level,
sort=sort,
observed=observed,
mutated=self.mutated)
self.obj = obj
self.axis = obj._get_axis_number(axis)
self.grouper = grouper
self.exclusions = set(exclusions) if exclusions else set()
# we accept no other args
validate_kwargs('group', kwargs, {})
def __len__(self):
return len(self.groups)
def __unicode__(self):
# TODO: Better unicode/repr for GroupBy object
return object.__repr__(self)
def _assure_grouper(self):
"""
we create the grouper on instantiation
sub-classes may have a different policy
"""
pass
@property
def groups(self):
""" dict {group name -> group labels} """
self._assure_grouper()
return self.grouper.groups
@property
def ngroups(self):
self._assure_grouper()
return self.grouper.ngroups
@property
def indices(self):
""" dict {group name -> group indices} """
self._assure_grouper()
return self.grouper.indices
def _get_indices(self, names):
"""
safe get multiple indices, translate keys for
datelike to underlying repr
"""
def get_converter(s):
# possibly convert to the actual key types
# in the indices, could be a Timestamp or a np.datetime64
if isinstance(s, (Timestamp, datetime.datetime)):
return lambda key: Timestamp(key)
elif isinstance(s, np.datetime64):
return lambda key: Timestamp(key).asm8
else:
return lambda key: key
if len(names) == 0:
return []
if len(self.indices) > 0:
index_sample = next(iter(self.indices))
else:
index_sample = None # Dummy sample
name_sample = names[0]
if isinstance(index_sample, tuple):
if not isinstance(name_sample, tuple):
msg = ("must supply a tuple to get_group with multiple"
" grouping keys")
raise ValueError(msg)
if not len(name_sample) == len(index_sample):
try:
# If the original grouper was a tuple
return [self.indices[name] for name in names]
except KeyError:
# turns out it wasn't a tuple
msg = ("must supply a a same-length tuple to get_group"
" with multiple grouping keys")
raise ValueError(msg)
converters = [get_converter(s) for s in index_sample]
names = [tuple(f(n) for f, n in zip(converters, name))
for name in names]
else:
converter = get_converter(index_sample)
names = [converter(name) for name in names]
return [self.indices.get(name, []) for name in names]
def _get_index(self, name):
""" safe get index, translate keys for datelike to underlying repr """
return self._get_indices([name])[0]
@cache_readonly
def _selected_obj(self):
if self._selection is None or isinstance(self.obj, Series):
if self._group_selection is not None:
return self.obj[self._group_selection]
return self.obj
else:
return self.obj[self._selection]
def _reset_group_selection(self):
"""
Clear group based selection. Used for methods needing to return info on
each group regardless of whether a group selection was previously set.
"""
if self._group_selection is not None:
# GH12839 clear cached selection too when changing group selection
self._group_selection = None
self._reset_cache('_selected_obj')
def _set_group_selection(self):
"""
Create group based selection. Used when selection is not passed
directly but instead via a grouper.
NOTE: this should be paired with a call to _reset_group_selection
"""
grp = self.grouper
if not (self.as_index and
getattr(grp, 'groupings', None) is not None and
self.obj.ndim > 1 and
self._group_selection is None):
return
ax = self.obj._info_axis
groupers = [g.name for g in grp.groupings
if g.level is None and g.in_axis]
if len(groupers):
# GH12839 clear selected obj cache when group selection changes
self._group_selection = ax.difference(Index(groupers)).tolist()
self._reset_cache('_selected_obj')
def _set_result_index_ordered(self, result):
# set the result index on the passed values object and
# return the new object, xref 8046
# the values/counts are repeated according to the group index
# shortcut if we have an already ordered grouper
if not self.grouper.is_monotonic:
index = Index(np.concatenate(
self._get_indices(self.grouper.result_index)))
result.set_axis(index, axis=self.axis, inplace=True)
result = result.sort_index(axis=self.axis)
result.set_axis(self.obj._get_axis(self.axis), axis=self.axis,
inplace=True)
return result
def _dir_additions(self):
return self.obj._dir_additions() | self._apply_whitelist
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
if hasattr(self.obj, attr):
return self._make_wrapper(attr)
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, attr))
@Substitution(klass='GroupBy',
versionadded='.. versionadded:: 0.21.0',
examples="""\
>>> df = pd.DataFrame({'A': 'a b a b'.split(), 'B': [1, 2, 3, 4]})
>>> df
A B
0 a 1
1 b 2
2 a 3
3 b 4
To get the difference between each groups maximum and minimum value in one
pass, you can do
>>> df.groupby('A').pipe(lambda x: x.max() - x.min())
B
A
a 2
b 2""")
@Appender(_pipe_template)
def pipe(self, func, *args, **kwargs):
return com._pipe(self, func, *args, **kwargs)
plot = property(GroupByPlot)
def _make_wrapper(self, name):
if name not in self._apply_whitelist:
is_callable = callable(getattr(self._selected_obj, name, None))
kind = ' callable ' if is_callable else ' '
msg = ("Cannot access{0}attribute {1!r} of {2!r} objects, try "
"using the 'apply' method".format(kind, name,
type(self).__name__))
raise AttributeError(msg)
self._set_group_selection()
# need to setup the selection
# as are not passed directly but in the grouper
f = getattr(self._selected_obj, name)
if not isinstance(f, types.MethodType):
return self.apply(lambda self: getattr(self, name))
f = getattr(type(self._selected_obj), name)
def wrapper(*args, **kwargs):
# a little trickery for aggregation functions that need an axis
# argument
kwargs_with_axis = kwargs.copy()
if 'axis' not in kwargs_with_axis or \
kwargs_with_axis['axis'] is None:
kwargs_with_axis['axis'] = self.axis
def curried_with_axis(x):
return f(x, *args, **kwargs_with_axis)
def curried(x):
return f(x, *args, **kwargs)
# preserve the name so we can detect it when calling plot methods,
# to avoid duplicates
curried.__name__ = curried_with_axis.__name__ = name
# special case otherwise extra plots are created when catching the
# exception below
if name in base.plotting_methods:
return self.apply(curried)
try:
return self.apply(curried_with_axis)
except Exception:
try:
return self.apply(curried)
except Exception:
# related to : GH3688
# try item-by-item
# this can be called recursively, so need to raise
# ValueError
# if we don't have this method to indicated to aggregate to
# mark this column as an error
try:
return self._aggregate_item_by_item(name,
*args, **kwargs)
except (AttributeError):
raise ValueError
return wrapper
def get_group(self, name, obj=None):
"""
Constructs NDFrame from group with provided name
Parameters
----------
name : object
the name of the group to get as a DataFrame
obj : NDFrame, default None
the NDFrame to take the DataFrame out of. If
it is None, the object groupby was called on will
be used
Returns
-------
group : same type as obj
"""
if obj is None:
obj = self._selected_obj
inds = self._get_index(name)
if not len(inds):
raise KeyError(name)
return obj._take(inds, axis=self.axis)
def __iter__(self):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
return self.grouper.get_iterator(self.obj, axis=self.axis)
@Appender(_apply_docs['template']
.format(input="dataframe",
examples=_apply_docs['dataframe_examples']))
def apply(self, func, *args, **kwargs):
func = self._is_builtin_func(func)
# this is needed so we don't try and wrap strings. If we could
# resolve functions to their callable functions prior, this
# wouldn't be needed
if args or kwargs:
if callable(func):
@wraps(func)
def f(g):
with np.errstate(all='ignore'):
return func(g, *args, **kwargs)
else:
raise ValueError('func must be a callable if args or '
'kwargs are supplied')
else:
f = func
# ignore SettingWithCopy here in case the user mutates
with option_context('mode.chained_assignment', None):
try:
result = self._python_apply_general(f)
except Exception:
# gh-20949
# try again, with .apply acting as a filtering
# operation, by excluding the grouping column
# This would normally not be triggered
# except if the udf is trying an operation that
# fails on *some* columns, e.g. a numeric operation
# on a string grouper column
with _group_selection_context(self):
return self._python_apply_general(f)
return result
def _python_apply_general(self, f):
keys, values, mutated = self.grouper.apply(f, self._selected_obj,
self.axis)
return self._wrap_applied_output(
keys,
values,
not_indexed_same=mutated or self.mutated)
def _iterate_slices(self):
yield self._selection_name, self._selected_obj
def transform(self, func, *args, **kwargs):
raise com.AbstractMethodError(self)
def _cumcount_array(self, ascending=True):
"""
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Notes
-----
this is currently implementing sort=False
(though the default is sort=True) for groupby in general
"""
ids, _, ngroups = self.grouper.group_info
sorter = get_group_index_sorter(ids, ngroups)
ids, count = ids[sorter], len(ids)
if count == 0:
return np.empty(0, dtype=np.int64)
run = np.r_[True, ids[:-1] != ids[1:]]
rep = np.diff(np.r_[np.nonzero(run)[0], count])
out = (~run).cumsum()
if ascending:
out -= np.repeat(out[run], rep)
else:
out = np.repeat(out[np.r_[run[1:], True]], rep) - out
rev = np.empty(count, dtype=np.intp)
rev[sorter] = np.arange(count, dtype=np.intp)
return out[rev].astype(np.int64, copy=False)
def _try_cast(self, result, obj, numeric_only=False):
"""
try to cast the result to our obj original type,
we may have roundtripped thru object in the mean-time
if numeric_only is True, then only try to cast numerics
and not datetimelikes
"""
if obj.ndim > 1:
dtype = obj.values.dtype
else:
dtype = obj.dtype
if not is_scalar(result):
if numeric_only and is_numeric_dtype(dtype) or not numeric_only:
result = maybe_downcast_to_dtype(result, dtype)
return result
def _transform_should_cast(self, func_nm):
"""
Parameters:
-----------
func_nm: str
The name of the aggregation function being performed
Returns:
--------
bool
Whether transform should attempt to cast the result of aggregation
"""
return (self.size().fillna(0) > 0).any() and (
func_nm not in base.cython_cast_blacklist)
def _cython_transform(self, how, numeric_only=True, **kwargs):
output = collections.OrderedDict()
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, names = self.grouper.transform(obj.values, how,
**kwargs)
except NotImplementedError:
continue
except AssertionError as e:
raise GroupByError(str(e))
if self._transform_should_cast(how):
output[name] = self._try_cast(result, obj)
else:
output[name] = result
if len(output) == 0:
raise DataError('No numeric types to aggregate')
return self._wrap_transformed_output(output, names)
def _cython_agg_general(self, how, alt=None, numeric_only=True,
min_count=-1):
output = {}
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, names = self.grouper.aggregate(obj.values, how,
min_count=min_count)
except AssertionError as e:
raise GroupByError(str(e))
output[name] = self._try_cast(result, obj)
if len(output) == 0:
raise DataError('No numeric types to aggregate')
return self._wrap_aggregated_output(output, names)
def _python_agg_general(self, func, *args, **kwargs):
func = self._is_builtin_func(func)
f = lambda x: func(x, *args, **kwargs)
# iterate through "columns" ex exclusions to populate output dict
output = {}
for name, obj in self._iterate_slices():
try:
result, counts = self.grouper.agg_series(obj, f)
output[name] = self._try_cast(result, obj, numeric_only=True)
except TypeError:
continue
if len(output) == 0:
return self._python_apply_general(f)
if self.grouper._filter_empty_groups:
mask = counts.ravel() > 0
for name, result in compat.iteritems(output):
# since we are masking, make sure that we have a float object
values = result
if is_numeric_dtype(values.dtype):
values = ensure_float(values)
output[name] = self._try_cast(values[mask], result)
return self._wrap_aggregated_output(output)
def _wrap_applied_output(self, *args, **kwargs):
raise com.AbstractMethodError(self)
def _concat_objects(self, keys, values, not_indexed_same=False):
from pandas.core.reshape.concat import concat
def reset_identity(values):
# reset the identities of the components
# of the values to prevent aliasing
for v in com._not_none(*values):
ax = v._get_axis(self.axis)
ax._reset_identity()
return values
if not not_indexed_same:
result = concat(values, axis=self.axis)
ax = self._selected_obj._get_axis(self.axis)
if isinstance(result, Series):
result = result.reindex(ax)
else:
# this is a very unfortunate situation
# we have a multi-index that is NOT lexsorted
# and we have a result which is duplicated
# we can't reindex, so we resort to this
# GH 14776
if isinstance(ax, MultiIndex) and not ax.is_unique:
indexer = algorithms.unique1d(
result.index.get_indexer_for(ax.values))
result = result.take(indexer, axis=self.axis)
else:
result = result.reindex(ax, axis=self.axis)
elif self.group_keys:
values = reset_identity(values)
if self.as_index:
# possible MI return case
group_keys = keys
group_levels = self.grouper.levels
group_names = self.grouper.names
result = concat(values, axis=self.axis, keys=group_keys,
levels=group_levels, names=group_names,
sort=False)
else:
# GH5610, returns a MI, with the first level being a
# range index
keys = list(range(len(values)))
result = concat(values, axis=self.axis, keys=keys)
else:
values = reset_identity(values)
result = concat(values, axis=self.axis)
if (isinstance(result, Series) and
getattr(self, '_selection_name', None) is not None):
result.name = self._selection_name
return result
def _apply_filter(self, indices, dropna):
if len(indices) == 0:
indices = np.array([], dtype='int64')
else:
indices = np.sort(np.concatenate(indices))
if dropna:
filtered = self._selected_obj.take(indices, axis=self.axis)
else:
mask = np.empty(len(self._selected_obj.index), dtype=bool)
mask.fill(False)
mask[indices.astype(int)] = True
# mask fails to broadcast when passed to where; broadcast manually.
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
filtered = self._selected_obj.where(mask) # Fill with NaNs.
return filtered
class GroupBy(_GroupBy):
"""
Class for grouping and aggregating relational data. See aggregate,
transform, and apply functions on this object.
It's easiest to use obj.groupby(...) to use GroupBy, but you can also do:
::
grouped = groupby(obj, ...)
Parameters
----------
obj : pandas object
axis : int, default 0
level : int, default None
Level of MultiIndex
groupings : list of Grouping objects
Most users should ignore this
exclusions : array-like, optional
List of columns to exclude
name : string
Most users should ignore this
Notes
-----
After grouping, see aggregate, apply, and transform functions. Here are
some other brief notes about usage. When grouping by multiple groups, the
result index will be a MultiIndex (hierarchical) by default.
Iteration produces (key, group) tuples, i.e. chunking the data by group. So
you can write code like:
::
grouped = obj.groupby(keys, axis=axis)
for key, group in grouped:
# do something with the data
Function calls on GroupBy, if not specially implemented, "dispatch" to the
grouped data. So if you group a DataFrame and wish to invoke the std()
method on each group, you can simply do:
::
df.groupby(mapper).std()
rather than
::
df.groupby(mapper).aggregate(np.std)
You can pass arguments to these "wrapped" functions, too.
See the online documentation for full exposition on these topics and much
more
Returns
-------
**Attributes**
groups : dict
{group name -> group labels}
len(grouped) : int
Number of groups
"""
def _bool_agg(self, val_test, skipna):
"""Shared func to call any / all Cython GroupBy implementations"""
def objs_to_bool(vals):
try:
vals = vals.astype(np.bool)
except ValueError: # for objects
vals = np.array([bool(x) for x in vals])
return vals.view(np.uint8)
def result_to_bool(result):
return result.astype(np.bool, copy=False)
return self._get_cythonized_result('group_any_all', self.grouper,
aggregate=True,
cython_dtype=np.uint8,
needs_values=True,
needs_mask=True,
pre_processing=objs_to_bool,
post_processing=result_to_bool,
val_test=val_test, skipna=skipna)
@Substitution(name='groupby')
@Appender(_doc_template)
def any(self, skipna=True):
"""
Returns True if any value in the group is truthful, else False
Parameters
----------
skipna : bool, default True
Flag to ignore nan values during truth testing
"""
return self._bool_agg('any', skipna)
@Substitution(name='groupby')
@Appender(_doc_template)
def all(self, skipna=True):
"""Returns True if all values in the group are truthful, else False
Parameters
----------
skipna : bool, default True
Flag to ignore nan values during truth testing
"""
return self._bool_agg('all', skipna)
@Substitution(name='groupby')
@Appender(_doc_template)
def count(self):
"""Compute count of group, excluding missing values"""
# defined here for API doc
raise NotImplementedError
@Substitution(name='groupby')
@Appender(_doc_template)
def mean(self, *args, **kwargs):
"""
Compute mean of groups, excluding missing values.
Returns
-------
pandas.Series or pandas.DataFrame
Examples
--------
>>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],
... 'B': [np.nan, 2, 3, 4, 5],
... 'C': [1, 2, 1, 1, 2]}, columns=['A', 'B', 'C'])
Groupby one column and return the mean of the remaining columns in
each group.
>>> df.groupby('A').mean()
>>>
B C
A
1 3.0 1.333333
2 4.0 1.500000
Groupby two columns and return the mean of the remaining column.
>>> df.groupby(['A', 'B']).mean()
>>>
C
A B
1 2.0 2
4.0 1
2 3.0 1
5.0 2
Groupby one column and return the mean of only particular column in
the group.
>>> df.groupby('A')['B'].mean()
>>>
A
1 3.0
2 4.0
Name: B, dtype: float64
"""
nv.validate_groupby_func('mean', args, kwargs, ['numeric_only'])
try:
return self._cython_agg_general('mean', **kwargs)
except GroupByError:
raise
except Exception: # pragma: no cover
with _group_selection_context(self):
f = lambda x: x.mean(axis=self.axis, **kwargs)
return self._python_agg_general(f)
@Substitution(name='groupby')
@Appender(_doc_template)
def median(self, **kwargs):
"""
Compute median of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('median', **kwargs)
except GroupByError:
raise
except Exception: # pragma: no cover
def f(x):
if isinstance(x, np.ndarray):
x = Series(x)
return x.median(axis=self.axis, **kwargs)
with _group_selection_context(self):
return self._python_agg_general(f)
@Substitution(name='groupby')
@Appender(_doc_template)
def std(self, ddof=1, *args, **kwargs):
"""
Compute standard deviation of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
Parameters
----------
ddof : integer, default 1
degrees of freedom
"""
# TODO: implement at Cython level?
nv.validate_groupby_func('std', args, kwargs)
return np.sqrt(self.var(ddof=ddof, **kwargs))
@Substitution(name='groupby')
@Appender(_doc_template)
def var(self, ddof=1, *args, **kwargs):
"""
Compute variance of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
Parameters
----------
ddof : integer, default 1
degrees of freedom
"""
nv.validate_groupby_func('var', args, kwargs)
if ddof == 1:
try:
return self._cython_agg_general('var', **kwargs)
except Exception:
f = lambda x: x.var(ddof=ddof, **kwargs)
with _group_selection_context(self):
return self._python_agg_general(f)
else:
f = lambda x: x.var(ddof=ddof, **kwargs)
with _group_selection_context(self):
return self._python_agg_general(f)
@Substitution(name='groupby')
@Appender(_doc_template)
def sem(self, ddof=1):
"""
Compute standard error of the mean of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
Parameters
----------
ddof : integer, default 1
degrees of freedom
"""
return self.std(ddof=ddof) / np.sqrt(self.count())
@Substitution(name='groupby')
@Appender(_doc_template)
def size(self):
"""Compute group sizes"""
result = self.grouper.size()
if isinstance(self.obj, Series):
result.name = getattr(self.obj, 'name', None)
return result
@classmethod
def _add_numeric_operations(cls):
""" add numeric operations to the GroupBy generically """
def groupby_function(name, alias, npfunc,
numeric_only=True, _convert=False,
min_count=-1):
_local_template = "Compute %(f)s of group values"
@Substitution(name='groupby', f=name)
@Appender(_doc_template)
@Appender(_local_template)
def f(self, **kwargs):
if 'numeric_only' not in kwargs:
kwargs['numeric_only'] = numeric_only
if 'min_count' not in kwargs:
kwargs['min_count'] = min_count
self._set_group_selection()
try:
return self._cython_agg_general(
alias, alt=npfunc, **kwargs)
except AssertionError as e:
raise SpecificationError(str(e))
except Exception:
result = self.aggregate(
lambda x: npfunc(x, axis=self.axis))
if _convert:
result = result._convert(datetime=True)
return result
set_function_name(f, name, cls)
return f
def first_compat(x, axis=0):
def first(x):
x = np.asarray(x)
x = x[notna(x)]
if len(x) == 0:
return np.nan
return x[0]
if isinstance(x, DataFrame):
return x.apply(first, axis=axis)
else:
return first(x)
def last_compat(x, axis=0):
def last(x):
x = np.asarray(x)
x = x[notna(x)]
if len(x) == 0:
return np.nan
return x[-1]
if isinstance(x, DataFrame):
return x.apply(last, axis=axis)
else:
return last(x)
cls.sum = groupby_function('sum', 'add', np.sum, min_count=0)
cls.prod = groupby_function('prod', 'prod', np.prod, min_count=0)
cls.min = groupby_function('min', 'min', np.min, numeric_only=False)
cls.max = groupby_function('max', 'max', np.max, numeric_only=False)
cls.first = groupby_function('first', 'first', first_compat,
numeric_only=False)
cls.last = groupby_function('last', 'last', last_compat,
numeric_only=False)
@Substitution(name='groupby')
@Appender(_doc_template)
def ohlc(self):
"""
Compute sum of values, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self._apply_to_column_groupbys(
lambda x: x._cython_agg_general('ohlc'))
@Appender(DataFrame.describe.__doc__)
def describe(self, **kwargs):
with _group_selection_context(self):
result = self.apply(lambda x: x.describe(**kwargs))
if self.axis == 1:
return result.T
return result.unstack()
@Substitution(name='groupby')
@Appender(_doc_template)
def resample(self, rule, *args, **kwargs):
"""
Provide resampling when using a TimeGrouper
Return a new grouper with our resampler appended
"""
from pandas.core.resample import get_resampler_for_grouping
return get_resampler_for_grouping(self, rule, *args, **kwargs)
@Substitution(name='groupby')
@Appender(_doc_template)
def rolling(self, *args, **kwargs):
"""
Return a rolling grouper, providing rolling
functionality per group
"""
from pandas.core.window import RollingGroupby
return RollingGroupby(self, *args, **kwargs)
@Substitution(name='groupby')
@Appender(_doc_template)
def expanding(self, *args, **kwargs):
"""
Return an expanding grouper, providing expanding
functionality per group
"""
from pandas.core.window import ExpandingGroupby
return ExpandingGroupby(self, *args, **kwargs)
def _fill(self, direction, limit=None):
"""Shared function for `pad` and `backfill` to call Cython method
Parameters
----------
direction : {'ffill', 'bfill'}
Direction passed to underlying Cython function. `bfill` will cause
values to be filled backwards. `ffill` and any other values will
default to a forward fill
limit : int, default None
Maximum number of consecutive values to fill. If `None`, this
method will convert to -1 prior to passing to Cython
Returns
-------
`Series` or `DataFrame` with filled values
See Also
--------
pad
backfill
"""
# Need int value for Cython
if limit is None:
limit = -1
return self._get_cythonized_result('group_fillna_indexer',
self.grouper, needs_mask=True,
cython_dtype=np.int64,
result_is_index=True,
direction=direction, limit=limit)
@Substitution(name='groupby')
def pad(self, limit=None):
"""
Forward fill the values
Parameters
----------
limit : integer, optional
limit of how many values to fill
See Also
--------
Series.pad
DataFrame.pad
Series.fillna
DataFrame.fillna
"""
return self._fill('ffill', limit=limit)
ffill = pad
@Substitution(name='groupby')
def backfill(self, limit=None):
"""
Backward fill the values
Parameters
----------
limit : integer, optional
limit of how many values to fill
See Also
--------
Series.backfill
DataFrame.backfill
Series.fillna
DataFrame.fillna
"""
return self._fill('bfill', limit=limit)
bfill = backfill
@Substitution(name='groupby')
@Appender(_doc_template)
def nth(self, n, dropna=None):
"""
Take the nth row from each group if n is an int, or a subset of rows
if n is a list of ints.
If dropna, will take the nth non-null row, dropna is either
Truthy (if a Series) or 'all', 'any' (if a DataFrame);
this is equivalent to calling dropna(how=dropna) before the
groupby.
Parameters
----------
n : int or list of ints
a single nth value for the row or a list of nth values
dropna : None or str, optional
apply the specified dropna operation before counting which row is
the nth row. Needs to be None, 'any' or 'all'
Examples
--------
>>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],
... 'B': [np.nan, 2, 3, 4, 5]}, columns=['A', 'B'])
>>> g = df.groupby('A')
>>> g.nth(0)
B
A
1 NaN
2 3.0
>>> g.nth(1)
B
A
1 2.0
2 5.0
>>> g.nth(-1)
B
A
1 4.0
2 5.0
>>> g.nth([0, 1])
B
A
1 NaN
1 2.0
2 3.0
2 5.0
Specifying `dropna` allows count ignoring ``NaN``
>>> g.nth(0, dropna='any')
B
A
1 2.0
2 3.0
NaNs denote group exhausted when using dropna
>>> g.nth(3, dropna='any')
B
A
1 NaN
2 NaN
Specifying `as_index=False` in `groupby` keeps the original index.
>>> df.groupby('A', as_index=False).nth(1)
A B
1 1 2.0
4 2 5.0
"""
if isinstance(n, int):
nth_values = [n]
elif isinstance(n, (set, list, tuple)):
nth_values = list(set(n))
if dropna is not None:
raise ValueError(
"dropna option with a list of nth values is not supported")
else:
raise TypeError("n needs to be an int or a list/set/tuple of ints")
nth_values = np.array(nth_values, dtype=np.intp)
self._set_group_selection()
if not dropna:
mask = np.in1d(self._cumcount_array(), nth_values) | \
np.in1d(self._cumcount_array(ascending=False) + 1, -nth_values)
out = self._selected_obj[mask]
if not self.as_index:
return out
ids, _, _ = self.grouper.group_info
out.index = self.grouper.result_index[ids[mask]]
return out.sort_index() if self.sort else out
if dropna not in ['any', 'all']:
if isinstance(self._selected_obj, Series) and dropna is True:
warnings.warn("the dropna={dropna} keyword is deprecated,"
"use dropna='all' instead. "
"For a Series groupby, dropna must be "
"either None, 'any' or 'all'.".format(
dropna=dropna),
FutureWarning,
stacklevel=2)
dropna = 'all'
else:
# Note: when agg-ing picker doesn't raise this,
# just returns NaN
raise ValueError("For a DataFrame groupby, dropna must be "
"either None, 'any' or 'all', "
"(was passed %s)." % (dropna),)
# old behaviour, but with all and any support for DataFrames.
# modified in GH 7559 to have better perf
max_len = n if n >= 0 else - 1 - n
dropped = self.obj.dropna(how=dropna, axis=self.axis)
# get a new grouper for our dropped obj
if self.keys is None and self.level is None:
# we don't have the grouper info available
# (e.g. we have selected out
# a column that is not in the current object)
axis = self.grouper.axis
grouper = axis[axis.isin(dropped.index)]
else:
# create a grouper with the original parameters, but on the dropped
# object
from pandas.core.groupby.grouper import _get_grouper
grouper, _, _ = _get_grouper(dropped, key=self.keys,
axis=self.axis, level=self.level,
sort=self.sort,
mutated=self.mutated)
grb = dropped.groupby(grouper, as_index=self.as_index, sort=self.sort)
sizes, result = grb.size(), grb.nth(n)
mask = (sizes < max_len).values
# set the results which don't meet the criteria
if len(result) and mask.any():
result.loc[mask] = np.nan
# reset/reindex to the original groups
if len(self.obj) == len(dropped) or \
len(result) == len(self.grouper.result_index):
result.index = self.grouper.result_index
else:
result = result.reindex(self.grouper.result_index)
return result
@Substitution(name='groupby')
def ngroup(self, ascending=True):
"""
Number each group from 0 to the number of groups - 1.
This is the enumerative complement of cumcount. Note that the
numbers given to the groups match the order in which the groups
would be seen when iterating over the groupby object, not the
order they are first observed.
.. versionadded:: 0.20.2
Parameters
----------
ascending : bool, default True
If False, number in reverse, from number of group - 1 to 0.
Examples
--------
>>> df = pd.DataFrame({"A": list("aaabba")})
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').ngroup()
0 0
1 0
2 0
3 1
4 1
5 0
dtype: int64
>>> df.groupby('A').ngroup(ascending=False)
0 1
1 1
2 1
3 0
4 0
5 1
dtype: int64
>>> df.groupby(["A", [1,1,2,3,2,1]]).ngroup()
0 0
1 0
2 1
3 3
4 2
5 0
dtype: int64
See also
--------
.cumcount : Number the rows in each group.
"""
with _group_selection_context(self):
index = self._selected_obj.index
result = Series(self.grouper.group_info[0], index)
if not ascending:
result = self.ngroups - 1 - result
return result
@Substitution(name='groupby')
def cumcount(self, ascending=True):
"""
Number each item in each group from 0 to the length of that group - 1.
Essentially this is equivalent to
>>> self.apply(lambda x: pd.Series(np.arange(len(x)), x.index))
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Examples
--------
>>> df = pd.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']],
... columns=['A'])
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').cumcount()
0 0
1 1
2 2
3 0
4 1
5 3
dtype: int64
>>> df.groupby('A').cumcount(ascending=False)
0 3
1 2
2 1
3 1
4 0
5 0
dtype: int64
See also
--------
.ngroup : Number the groups themselves.
"""
with _group_selection_context(self):
index = self._selected_obj.index
cumcounts = self._cumcount_array(ascending=ascending)
return Series(cumcounts, index)
@Substitution(name='groupby')
@Appender(_doc_template)
def rank(self, method='average', ascending=True, na_option='keep',
pct=False, axis=0):
"""
Provides the rank of values within each group.
Parameters
----------
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
* average: average rank of group
* min: lowest rank in group
* max: highest rank in group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups
ascending : boolean, default True
False for ranks by high (1) to low (N)
na_option : {'keep', 'top', 'bottom'}, default 'keep'
* keep: leave NA values where they are
* top: smallest rank if ascending
* bottom: smallest rank if descending
pct : boolean, default False
Compute percentage rank of data within each group
axis : int, default 0
The axis of the object over which to compute the rank.
Returns
-----
DataFrame with ranking of values within each group
"""
if na_option not in {'keep', 'top', 'bottom'}:
msg = "na_option must be one of 'keep', 'top', or 'bottom'"
raise ValueError(msg)
return self._cython_transform('rank', numeric_only=False,
ties_method=method, ascending=ascending,
na_option=na_option, pct=pct, axis=axis)
@Substitution(name='groupby')
@Appender(_doc_template)
def cumprod(self, axis=0, *args, **kwargs):
"""Cumulative product for each group"""
nv.validate_groupby_func('cumprod', args, kwargs,
['numeric_only', 'skipna'])
if axis != 0:
return self.apply(lambda x: x.cumprod(axis=axis, **kwargs))
return self._cython_transform('cumprod', **kwargs)
@Substitution(name='groupby')
@Appender(_doc_template)
def cumsum(self, axis=0, *args, **kwargs):
"""Cumulative sum for each group"""
nv.validate_groupby_func('cumsum', args, kwargs,
['numeric_only', 'skipna'])
if axis != 0:
return self.apply(lambda x: x.cumsum(axis=axis, **kwargs))
return self._cython_transform('cumsum', **kwargs)
@Substitution(name='groupby')
@Appender(_doc_template)
def cummin(self, axis=0, **kwargs):
"""Cumulative min for each group"""
if axis != 0:
return self.apply(lambda x: np.minimum.accumulate(x, axis))
return self._cython_transform('cummin', numeric_only=False)
@Substitution(name='groupby')
@Appender(_doc_template)
def cummax(self, axis=0, **kwargs):
"""Cumulative max for each group"""
if axis != 0:
return self.apply(lambda x: np.maximum.accumulate(x, axis))
return self._cython_transform('cummax', numeric_only=False)
def _get_cythonized_result(self, how, grouper, aggregate=False,
cython_dtype=None, needs_values=False,
needs_mask=False, needs_ngroups=False,
result_is_index=False,
pre_processing=None, post_processing=None,
**kwargs):
"""Get result for Cythonized functions
Parameters
----------
how : str, Cythonized function name to be called
grouper : Grouper object containing pertinent group info
aggregate : bool, default False
Whether the result should be aggregated to match the number of
groups
cython_dtype : default None
Type of the array that will be modified by the Cython call. If
`None`, the type will be inferred from the values of each slice
needs_values : bool, default False
Whether the values should be a part of the Cython call
signature
needs_mask : bool, default False
Whether boolean mask needs to be part of the Cython call
signature
needs_ngroups : bool, default False
Whether number of groups is part of the Cython call signature
result_is_index : bool, default False
Whether the result of the Cython operation is an index of
values to be retrieved, instead of the actual values themselves
pre_processing : function, default None
Function to be applied to `values` prior to passing to Cython
Raises if `needs_values` is False
post_processing : function, default None
Function to be applied to result of Cython function
**kwargs : dict
Extra arguments to be passed back to Cython funcs
Returns
-------
`Series` or `DataFrame` with filled values
"""
if result_is_index and aggregate:
raise ValueError("'result_is_index' and 'aggregate' cannot both "
"be True!")
if post_processing:
if not callable(pre_processing):
raise ValueError("'post_processing' must be a callable!")
if pre_processing:
if not callable(pre_processing):
raise ValueError("'pre_processing' must be a callable!")
if not needs_values:
raise ValueError("Cannot use 'pre_processing' without "
"specifying 'needs_values'!")
labels, _, ngroups = grouper.group_info
output = collections.OrderedDict()
base_func = getattr(libgroupby, how)
for name, obj in self._iterate_slices():
if aggregate:
result_sz = ngroups
else:
result_sz = len(obj.values)
if not cython_dtype:
cython_dtype = obj.values.dtype
result = np.zeros(result_sz, dtype=cython_dtype)
func = partial(base_func, result, labels)
if needs_values:
vals = obj.values
if pre_processing:
vals = pre_processing(vals)
func = partial(func, vals)
if needs_mask:
mask = isna(obj.values).view(np.uint8)
func = partial(func, mask)
if needs_ngroups:
func = partial(func, ngroups)
func(**kwargs) # Call func to modify indexer values in place
if result_is_index:
result = algorithms.take_nd(obj.values, result)
if post_processing:
result = post_processing(result)
output[name] = result
if aggregate:
return self._wrap_aggregated_output(output)
else:
return self._wrap_transformed_output(output)
@Substitution(name='groupby')
@Appender(_doc_template)
def shift(self, periods=1, freq=None, axis=0):
"""
Shift each group by periods observations
Parameters
----------
periods : integer, default 1
number of periods to shift
freq : frequency string
axis : axis to shift, default 0
"""
if freq is not None or axis != 0:
return self.apply(lambda x: x.shift(periods, freq, axis))
return self._get_cythonized_result('group_shift_indexer',
self.grouper, cython_dtype=np.int64,
needs_ngroups=True,
result_is_index=True,
periods=periods)
@Substitution(name='groupby')
@Appender(_doc_template)
def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None,
axis=0):
"""Calculate pct_change of each value to previous entry in group"""
if freq is not None or axis != 0:
return self.apply(lambda x: x.pct_change(periods=periods,
fill_method=fill_method,
limit=limit, freq=freq,
axis=axis))
filled = getattr(self, fill_method)(limit=limit).drop(
self.grouper.names, axis=1)
shifted = filled.shift(periods=periods, freq=freq)
return (filled / shifted) - 1
@Substitution(name='groupby')
@Appender(_doc_template)
def head(self, n=5):
"""
Returns first n rows of each group.
Essentially equivalent to ``.apply(lambda x: x.head(n))``,
except ignores as_index flag.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> df.groupby('A', as_index=False).head(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
"""
self._reset_group_selection()
mask = self._cumcount_array() < n
return self._selected_obj[mask].dropna(subset=[self.keys])
@Substitution(name='groupby')
@Appender(_doc_template)
def tail(self, n=5):
"""
Returns last n rows of each group
Essentially equivalent to ``.apply(lambda x: x.tail(n))``,
except ignores as_index flag.
Examples
--------
>>> df = pd.DataFrame([['a', 1], ['a', 2], ['b', 1], ['b', 2]],
columns=['A', 'B'])
>>> df.groupby('A').tail(1)
A B
1 a 2
3 b 2
>>> df.groupby('A').head(1)
A B
0 a 1
2 b 1
"""
self._reset_group_selection()
mask = self._cumcount_array(ascending=False) < n
return self._selected_obj[mask].dropna(subset=[self.keys])
GroupBy._add_numeric_operations()
@Appender(GroupBy.__doc__)
def groupby(obj, by, **kwds):
if isinstance(obj, Series):
from pandas.core.groupby.generic import SeriesGroupBy
klass = SeriesGroupBy
elif isinstance(obj, DataFrame):
from pandas.core.groupby.generic import DataFrameGroupBy
klass = DataFrameGroupBy
else: # pragma: no cover
raise TypeError('invalid type: %s' % type(obj))
return klass(obj, by, **kwds)
| 32.439756 | 82 | 0.555862 |
import types
from functools import wraps, partial
import datetime
import collections
import warnings
from contextlib import contextmanager
import numpy as np
from pandas._libs import groupby as libgroupby, Timestamp
from pandas.util._validators import validate_kwargs
from pandas.util._decorators import (
cache_readonly, Substitution, Appender)
from pandas import compat
from pandas.compat import zip, range, callable, set_function_name
from pandas.compat.numpy import function as nv
from pandas.core.dtypes.common import (
is_numeric_dtype,
is_scalar,
ensure_float)
from pandas.core.dtypes.cast import maybe_downcast_to_dtype
from pandas.core.dtypes.missing import isna, notna
from pandas.core.groupby import base
from pandas.core.base import (PandasObject, SelectionMixin, GroupByError,
DataError, SpecificationError)
from pandas.core.index import Index, MultiIndex
from pandas.core.generic import NDFrame
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from pandas.core.sorting import get_group_index_sorter
import pandas.core.common as com
import pandas.core.algorithms as algorithms
from pandas.core.config import option_context
_doc_template = """
See also
--------
pandas.Series.%(name)s
pandas.DataFrame.%(name)s
pandas.Panel.%(name)s
"""
_apply_docs = dict(
template="""
Apply function `func` group-wise and combine the results together.
The function passed to `apply` must take a {input} as its first
argument and return a DataFrame, Series or scalar. `apply` will
then take care of combining the results back together into a single
dataframe or series. `apply` is therefore a highly flexible
grouping method.
While `apply` is a very flexible method, its downside is that
using it can be quite a bit slower than using more specific methods
like `agg` or `transform`. Pandas offers a wide range of method that will
be much faster than using `apply` for their specific purposes, so try to
use them before reaching for `apply`.
Parameters
----------
func : callable
A callable that takes a {input} as its first argument, and
returns a dataframe, a series or a scalar. In addition the
callable may take positional and keyword arguments.
args, kwargs : tuple and dict
Optional positional and keyword arguments to pass to `func`.
Returns
-------
applied : Series or DataFrame
Notes
-----
In the current implementation `apply` calls `func` twice on the
first group to decide whether it can take a fast or slow code
path. This can lead to unexpected behavior if `func` has
side-effects, as they will take effect twice for the first
group.
Examples
--------
{examples}
See also
--------
pipe : Apply function to the full GroupBy object instead of to each
group.
aggregate : Apply aggregate function to the GroupBy object.
transform : Apply function column-by-column to the GroupBy object.
Series.apply : Apply a function to a Series.
DataFrame.apply : Apply a function to each row or column of a DataFrame.
""",
dataframe_examples="""
>>> df = pd.DataFrame({'A': 'a a b'.split(),
'B': [1,2,3],
'C': [4,6, 5]})
>>> g = df.groupby('A')
Notice that ``g`` has two groups, ``a`` and ``b``.
Calling `apply` in various ways, we can get different grouping results:
Example 1: below the function passed to `apply` takes a DataFrame as
its argument and returns a DataFrame. `apply` combines the result for
each group together into a new DataFrame:
>>> g[['B', 'C']].apply(lambda x: x / x.sum())
B C
0 0.333333 0.4
1 0.666667 0.6
2 1.000000 1.0
Example 2: The function passed to `apply` takes a DataFrame as
its argument and returns a Series. `apply` combines the result for
each group together into a new DataFrame:
>>> g[['B', 'C']].apply(lambda x: x.max() - x.min())
B C
A
a 1 2
b 0 0
Example 3: The function passed to `apply` takes a DataFrame as
its argument and returns a scalar. `apply` combines the result for
each group together into a Series, including setting the index as
appropriate:
>>> g.apply(lambda x: x.C.max() - x.B.min())
A
a 5
b 2
dtype: int64
""",
series_examples="""
>>> s = pd.Series([0, 1, 2], index='a a b'.split())
>>> g = s.groupby(s.index)
From ``s`` above we can see that ``g`` has two groups, ``a`` and ``b``.
Calling `apply` in various ways, we can get different grouping results:
Example 1: The function passed to `apply` takes a Series as
its argument and returns a Series. `apply` combines the result for
each group together into a new Series:
>>> g.apply(lambda x: x*2 if x.name == 'b' else x/2)
0 0.0
1 0.5
2 4.0
dtype: float64
Example 2: The function passed to `apply` takes a Series as
its argument and returns a scalar. `apply` combines the result for
each group together into a Series, including setting the index as
appropriate:
>>> g.apply(lambda x: x.max() - x.min())
a 1
b 0
dtype: int64
""")
_pipe_template = """\
Apply a function `func` with arguments to this %(klass)s object and return
the function's result.
%(versionadded)s
Use `.pipe` when you want to improve readability by chaining together
functions that expect Series, DataFrames, GroupBy or Resampler objects.
Instead of writing
>>> h(g(f(df.groupby('group')), arg1=a), arg2=b, arg3=c)
You can write
>>> (df.groupby('group')
... .pipe(f)
... .pipe(g, arg1=a)
... .pipe(h, arg2=b, arg3=c))
which is much more readable.
Parameters
----------
func : callable or tuple of (callable, string)
Function to apply to this %(klass)s object or, alternatively,
a `(callable, data_keyword)` tuple where `data_keyword` is a
string indicating the keyword of `callable` that expects the
%(klass)s object.
args : iterable, optional
positional arguments passed into `func`.
kwargs : dict, optional
a dictionary of keyword arguments passed into `func`.
Returns
-------
object : the return type of `func`.
Notes
-----
See more `here
<http://pandas.pydata.org/pandas-docs/stable/groupby.html#piping-function-calls>`_
Examples
--------
%(examples)s
See Also
--------
pandas.Series.pipe : Apply a function with arguments to a series
pandas.DataFrame.pipe: Apply a function with arguments to a dataframe
apply : Apply function to each group instead of to the
full %(klass)s object.
"""
_transform_template = """
Call function producing a like-indexed %(klass)s on each group and
return a %(klass)s having the same indexes as the original object
filled with the transformed values
Parameters
----------
f : function
Function to apply to each group
Notes
-----
Each group is endowed the attribute 'name' in case you need to know
which group you are working on.
The current implementation imposes three requirements on f:
* f must return a value that either has the same shape as the input
subframe or can be broadcast to the shape of the input subframe.
For example, f returns a scalar it will be broadcast to have the
same shape as the input subframe.
* if this is a DataFrame, f must support application column-by-column
in the subframe. If f also supports application to the entire subframe,
then a fast path is used starting from the second chunk.
* f must not mutate groups. Mutation is not supported and may
produce unexpected results.
Returns
-------
%(klass)s
See also
--------
aggregate, transform
Examples
--------
# Same shape
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : ['one', 'one', 'two', 'three',
... 'two', 'two'],
... 'C' : [1, 5, 5, 2, 5, 5],
... 'D' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
C D
0 -1.154701 -0.577350
1 0.577350 0.000000
2 0.577350 1.154701
3 -1.154701 -1.000000
4 0.577350 -0.577350
5 0.577350 1.000000
# Broadcastable
>>> grouped.transform(lambda x: x.max() - x.min())
C D
0 4 6.0
1 3 8.0
2 4 6.0
3 3 8.0
4 4 6.0
5 3 8.0
"""
class GroupByPlot(PandasObject):
def __init__(self, groupby):
self._groupby = groupby
def __call__(self, *args, **kwargs):
def f(self):
return self.plot(*args, **kwargs)
f.__name__ = 'plot'
return self._groupby.apply(f)
def __getattr__(self, name):
def attr(*args, **kwargs):
def f(self):
return getattr(self.plot, name)(*args, **kwargs)
return self._groupby.apply(f)
return attr
@contextmanager
def _group_selection_context(groupby):
groupby._set_group_selection()
yield groupby
groupby._reset_group_selection()
class _GroupBy(PandasObject, SelectionMixin):
_group_selection = None
_apply_whitelist = frozenset([])
def __init__(self, obj, keys=None, axis=0, level=None,
grouper=None, exclusions=None, selection=None, as_index=True,
sort=True, group_keys=True, squeeze=False,
observed=False, **kwargs):
self._selection = selection
if isinstance(obj, NDFrame):
obj._consolidate_inplace()
self.level = level
if not as_index:
if not isinstance(obj, DataFrame):
raise TypeError('as_index=False only valid with DataFrame')
if axis != 0:
raise ValueError('as_index=False only valid for axis=0')
self.as_index = as_index
self.keys = keys
self.sort = sort
self.group_keys = group_keys
self.squeeze = squeeze
self.observed = observed
self.mutated = kwargs.pop('mutated', False)
if grouper is None:
from pandas.core.groupby.grouper import _get_grouper
grouper, exclusions, obj = _get_grouper(obj, keys,
axis=axis,
level=level,
sort=sort,
observed=observed,
mutated=self.mutated)
self.obj = obj
self.axis = obj._get_axis_number(axis)
self.grouper = grouper
self.exclusions = set(exclusions) if exclusions else set()
# we accept no other args
validate_kwargs('group', kwargs, {})
def __len__(self):
return len(self.groups)
def __unicode__(self):
# TODO: Better unicode/repr for GroupBy object
return object.__repr__(self)
def _assure_grouper(self):
pass
@property
def groups(self):
self._assure_grouper()
return self.grouper.groups
@property
def ngroups(self):
self._assure_grouper()
return self.grouper.ngroups
@property
def indices(self):
self._assure_grouper()
return self.grouper.indices
def _get_indices(self, names):
def get_converter(s):
# possibly convert to the actual key types
# in the indices, could be a Timestamp or a np.datetime64
if isinstance(s, (Timestamp, datetime.datetime)):
return lambda key: Timestamp(key)
elif isinstance(s, np.datetime64):
return lambda key: Timestamp(key).asm8
else:
return lambda key: key
if len(names) == 0:
return []
if len(self.indices) > 0:
index_sample = next(iter(self.indices))
else:
index_sample = None # Dummy sample
name_sample = names[0]
if isinstance(index_sample, tuple):
if not isinstance(name_sample, tuple):
msg = ("must supply a tuple to get_group with multiple"
" grouping keys")
raise ValueError(msg)
if not len(name_sample) == len(index_sample):
try:
# If the original grouper was a tuple
return [self.indices[name] for name in names]
except KeyError:
# turns out it wasn't a tuple
msg = ("must supply a a same-length tuple to get_group"
" with multiple grouping keys")
raise ValueError(msg)
converters = [get_converter(s) for s in index_sample]
names = [tuple(f(n) for f, n in zip(converters, name))
for name in names]
else:
converter = get_converter(index_sample)
names = [converter(name) for name in names]
return [self.indices.get(name, []) for name in names]
def _get_index(self, name):
return self._get_indices([name])[0]
@cache_readonly
def _selected_obj(self):
if self._selection is None or isinstance(self.obj, Series):
if self._group_selection is not None:
return self.obj[self._group_selection]
return self.obj
else:
return self.obj[self._selection]
def _reset_group_selection(self):
if self._group_selection is not None:
self._group_selection = None
self._reset_cache('_selected_obj')
def _set_group_selection(self):
grp = self.grouper
if not (self.as_index and
getattr(grp, 'groupings', None) is not None and
self.obj.ndim > 1 and
self._group_selection is None):
return
ax = self.obj._info_axis
groupers = [g.name for g in grp.groupings
if g.level is None and g.in_axis]
if len(groupers):
self._group_selection = ax.difference(Index(groupers)).tolist()
self._reset_cache('_selected_obj')
def _set_result_index_ordered(self, result):
if not self.grouper.is_monotonic:
index = Index(np.concatenate(
self._get_indices(self.grouper.result_index)))
result.set_axis(index, axis=self.axis, inplace=True)
result = result.sort_index(axis=self.axis)
result.set_axis(self.obj._get_axis(self.axis), axis=self.axis,
inplace=True)
return result
def _dir_additions(self):
return self.obj._dir_additions() | self._apply_whitelist
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
if hasattr(self.obj, attr):
return self._make_wrapper(attr)
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, attr))
@Substitution(klass='GroupBy',
versionadded='.. versionadded:: 0.21.0',
examples="""\
>>> df = pd.DataFrame({'A': 'a b a b'.split(), 'B': [1, 2, 3, 4]})
>>> df
A B
0 a 1
1 b 2
2 a 3
3 b 4
To get the difference between each groups maximum and minimum value in one
pass, you can do
>>> df.groupby('A').pipe(lambda x: x.max() - x.min())
B
A
a 2
b 2""")
@Appender(_pipe_template)
def pipe(self, func, *args, **kwargs):
return com._pipe(self, func, *args, **kwargs)
plot = property(GroupByPlot)
def _make_wrapper(self, name):
if name not in self._apply_whitelist:
is_callable = callable(getattr(self._selected_obj, name, None))
kind = ' callable ' if is_callable else ' '
msg = ("Cannot access{0}attribute {1!r} of {2!r} objects, try "
"using the 'apply' method".format(kind, name,
type(self).__name__))
raise AttributeError(msg)
self._set_group_selection()
f = getattr(self._selected_obj, name)
if not isinstance(f, types.MethodType):
return self.apply(lambda self: getattr(self, name))
f = getattr(type(self._selected_obj), name)
def wrapper(*args, **kwargs):
kwargs_with_axis = kwargs.copy()
if 'axis' not in kwargs_with_axis or \
kwargs_with_axis['axis'] is None:
kwargs_with_axis['axis'] = self.axis
def curried_with_axis(x):
return f(x, *args, **kwargs_with_axis)
def curried(x):
return f(x, *args, **kwargs)
curried.__name__ = curried_with_axis.__name__ = name
if name in base.plotting_methods:
return self.apply(curried)
try:
return self.apply(curried_with_axis)
except Exception:
try:
return self.apply(curried)
except Exception:
# mark this column as an error
try:
return self._aggregate_item_by_item(name,
*args, **kwargs)
except (AttributeError):
raise ValueError
return wrapper
def get_group(self, name, obj=None):
if obj is None:
obj = self._selected_obj
inds = self._get_index(name)
if not len(inds):
raise KeyError(name)
return obj._take(inds, axis=self.axis)
def __iter__(self):
return self.grouper.get_iterator(self.obj, axis=self.axis)
@Appender(_apply_docs['template']
.format(input="dataframe",
examples=_apply_docs['dataframe_examples']))
def apply(self, func, *args, **kwargs):
func = self._is_builtin_func(func)
# this is needed so we don't try and wrap strings. If we could
if args or kwargs:
if callable(func):
@wraps(func)
def f(g):
with np.errstate(all='ignore'):
return func(g, *args, **kwargs)
else:
raise ValueError('func must be a callable if args or '
'kwargs are supplied')
else:
f = func
# ignore SettingWithCopy here in case the user mutates
with option_context('mode.chained_assignment', None):
try:
result = self._python_apply_general(f)
except Exception:
# gh-20949
# try again, with .apply acting as a filtering
# operation, by excluding the grouping column
# This would normally not be triggered
# except if the udf is trying an operation that
# fails on *some* columns, e.g. a numeric operation
# on a string grouper column
with _group_selection_context(self):
return self._python_apply_general(f)
return result
def _python_apply_general(self, f):
keys, values, mutated = self.grouper.apply(f, self._selected_obj,
self.axis)
return self._wrap_applied_output(
keys,
values,
not_indexed_same=mutated or self.mutated)
def _iterate_slices(self):
yield self._selection_name, self._selected_obj
def transform(self, func, *args, **kwargs):
raise com.AbstractMethodError(self)
def _cumcount_array(self, ascending=True):
ids, _, ngroups = self.grouper.group_info
sorter = get_group_index_sorter(ids, ngroups)
ids, count = ids[sorter], len(ids)
if count == 0:
return np.empty(0, dtype=np.int64)
run = np.r_[True, ids[:-1] != ids[1:]]
rep = np.diff(np.r_[np.nonzero(run)[0], count])
out = (~run).cumsum()
if ascending:
out -= np.repeat(out[run], rep)
else:
out = np.repeat(out[np.r_[run[1:], True]], rep) - out
rev = np.empty(count, dtype=np.intp)
rev[sorter] = np.arange(count, dtype=np.intp)
return out[rev].astype(np.int64, copy=False)
def _try_cast(self, result, obj, numeric_only=False):
if obj.ndim > 1:
dtype = obj.values.dtype
else:
dtype = obj.dtype
if not is_scalar(result):
if numeric_only and is_numeric_dtype(dtype) or not numeric_only:
result = maybe_downcast_to_dtype(result, dtype)
return result
def _transform_should_cast(self, func_nm):
return (self.size().fillna(0) > 0).any() and (
func_nm not in base.cython_cast_blacklist)
def _cython_transform(self, how, numeric_only=True, **kwargs):
output = collections.OrderedDict()
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, names = self.grouper.transform(obj.values, how,
**kwargs)
except NotImplementedError:
continue
except AssertionError as e:
raise GroupByError(str(e))
if self._transform_should_cast(how):
output[name] = self._try_cast(result, obj)
else:
output[name] = result
if len(output) == 0:
raise DataError('No numeric types to aggregate')
return self._wrap_transformed_output(output, names)
def _cython_agg_general(self, how, alt=None, numeric_only=True,
min_count=-1):
output = {}
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, names = self.grouper.aggregate(obj.values, how,
min_count=min_count)
except AssertionError as e:
raise GroupByError(str(e))
output[name] = self._try_cast(result, obj)
if len(output) == 0:
raise DataError('No numeric types to aggregate')
return self._wrap_aggregated_output(output, names)
def _python_agg_general(self, func, *args, **kwargs):
func = self._is_builtin_func(func)
f = lambda x: func(x, *args, **kwargs)
# iterate through "columns" ex exclusions to populate output dict
output = {}
for name, obj in self._iterate_slices():
try:
result, counts = self.grouper.agg_series(obj, f)
output[name] = self._try_cast(result, obj, numeric_only=True)
except TypeError:
continue
if len(output) == 0:
return self._python_apply_general(f)
if self.grouper._filter_empty_groups:
mask = counts.ravel() > 0
for name, result in compat.iteritems(output):
# since we are masking, make sure that we have a float object
values = result
if is_numeric_dtype(values.dtype):
values = ensure_float(values)
output[name] = self._try_cast(values[mask], result)
return self._wrap_aggregated_output(output)
def _wrap_applied_output(self, *args, **kwargs):
raise com.AbstractMethodError(self)
def _concat_objects(self, keys, values, not_indexed_same=False):
from pandas.core.reshape.concat import concat
def reset_identity(values):
# reset the identities of the components
# of the values to prevent aliasing
for v in com._not_none(*values):
ax = v._get_axis(self.axis)
ax._reset_identity()
return values
if not not_indexed_same:
result = concat(values, axis=self.axis)
ax = self._selected_obj._get_axis(self.axis)
if isinstance(result, Series):
result = result.reindex(ax)
else:
# this is a very unfortunate situation
# we have a multi-index that is NOT lexsorted
# and we have a result which is duplicated
# we can't reindex, so we resort to this
if isinstance(ax, MultiIndex) and not ax.is_unique:
indexer = algorithms.unique1d(
result.index.get_indexer_for(ax.values))
result = result.take(indexer, axis=self.axis)
else:
result = result.reindex(ax, axis=self.axis)
elif self.group_keys:
values = reset_identity(values)
if self.as_index:
group_keys = keys
group_levels = self.grouper.levels
group_names = self.grouper.names
result = concat(values, axis=self.axis, keys=group_keys,
levels=group_levels, names=group_names,
sort=False)
else:
keys = list(range(len(values)))
result = concat(values, axis=self.axis, keys=keys)
else:
values = reset_identity(values)
result = concat(values, axis=self.axis)
if (isinstance(result, Series) and
getattr(self, '_selection_name', None) is not None):
result.name = self._selection_name
return result
def _apply_filter(self, indices, dropna):
if len(indices) == 0:
indices = np.array([], dtype='int64')
else:
indices = np.sort(np.concatenate(indices))
if dropna:
filtered = self._selected_obj.take(indices, axis=self.axis)
else:
mask = np.empty(len(self._selected_obj.index), dtype=bool)
mask.fill(False)
mask[indices.astype(int)] = True
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
filtered = self._selected_obj.where(mask)
return filtered
class GroupBy(_GroupBy):
def _bool_agg(self, val_test, skipna):
def objs_to_bool(vals):
try:
vals = vals.astype(np.bool)
except ValueError:
vals = np.array([bool(x) for x in vals])
return vals.view(np.uint8)
def result_to_bool(result):
return result.astype(np.bool, copy=False)
return self._get_cythonized_result('group_any_all', self.grouper,
aggregate=True,
cython_dtype=np.uint8,
needs_values=True,
needs_mask=True,
pre_processing=objs_to_bool,
post_processing=result_to_bool,
val_test=val_test, skipna=skipna)
@Substitution(name='groupby')
@Appender(_doc_template)
def any(self, skipna=True):
return self._bool_agg('any', skipna)
@Substitution(name='groupby')
@Appender(_doc_template)
def all(self, skipna=True):
return self._bool_agg('all', skipna)
@Substitution(name='groupby')
@Appender(_doc_template)
def count(self):
raise NotImplementedError
@Substitution(name='groupby')
@Appender(_doc_template)
def mean(self, *args, **kwargs):
nv.validate_groupby_func('mean', args, kwargs, ['numeric_only'])
try:
return self._cython_agg_general('mean', **kwargs)
except GroupByError:
raise
except Exception:
with _group_selection_context(self):
f = lambda x: x.mean(axis=self.axis, **kwargs)
return self._python_agg_general(f)
@Substitution(name='groupby')
@Appender(_doc_template)
def median(self, **kwargs):
try:
return self._cython_agg_general('median', **kwargs)
except GroupByError:
raise
except Exception:
def f(x):
if isinstance(x, np.ndarray):
x = Series(x)
return x.median(axis=self.axis, **kwargs)
with _group_selection_context(self):
return self._python_agg_general(f)
@Substitution(name='groupby')
@Appender(_doc_template)
def std(self, ddof=1, *args, **kwargs):
nv.validate_groupby_func('std', args, kwargs)
return np.sqrt(self.var(ddof=ddof, **kwargs))
@Substitution(name='groupby')
@Appender(_doc_template)
def var(self, ddof=1, *args, **kwargs):
nv.validate_groupby_func('var', args, kwargs)
if ddof == 1:
try:
return self._cython_agg_general('var', **kwargs)
except Exception:
f = lambda x: x.var(ddof=ddof, **kwargs)
with _group_selection_context(self):
return self._python_agg_general(f)
else:
f = lambda x: x.var(ddof=ddof, **kwargs)
with _group_selection_context(self):
return self._python_agg_general(f)
@Substitution(name='groupby')
@Appender(_doc_template)
def sem(self, ddof=1):
return self.std(ddof=ddof) / np.sqrt(self.count())
@Substitution(name='groupby')
@Appender(_doc_template)
def size(self):
result = self.grouper.size()
if isinstance(self.obj, Series):
result.name = getattr(self.obj, 'name', None)
return result
@classmethod
def _add_numeric_operations(cls):
def groupby_function(name, alias, npfunc,
numeric_only=True, _convert=False,
min_count=-1):
_local_template = "Compute %(f)s of group values"
@Substitution(name='groupby', f=name)
@Appender(_doc_template)
@Appender(_local_template)
def f(self, **kwargs):
if 'numeric_only' not in kwargs:
kwargs['numeric_only'] = numeric_only
if 'min_count' not in kwargs:
kwargs['min_count'] = min_count
self._set_group_selection()
try:
return self._cython_agg_general(
alias, alt=npfunc, **kwargs)
except AssertionError as e:
raise SpecificationError(str(e))
except Exception:
result = self.aggregate(
lambda x: npfunc(x, axis=self.axis))
if _convert:
result = result._convert(datetime=True)
return result
set_function_name(f, name, cls)
return f
def first_compat(x, axis=0):
def first(x):
x = np.asarray(x)
x = x[notna(x)]
if len(x) == 0:
return np.nan
return x[0]
if isinstance(x, DataFrame):
return x.apply(first, axis=axis)
else:
return first(x)
def last_compat(x, axis=0):
def last(x):
x = np.asarray(x)
x = x[notna(x)]
if len(x) == 0:
return np.nan
return x[-1]
if isinstance(x, DataFrame):
return x.apply(last, axis=axis)
else:
return last(x)
cls.sum = groupby_function('sum', 'add', np.sum, min_count=0)
cls.prod = groupby_function('prod', 'prod', np.prod, min_count=0)
cls.min = groupby_function('min', 'min', np.min, numeric_only=False)
cls.max = groupby_function('max', 'max', np.max, numeric_only=False)
cls.first = groupby_function('first', 'first', first_compat,
numeric_only=False)
cls.last = groupby_function('last', 'last', last_compat,
numeric_only=False)
@Substitution(name='groupby')
@Appender(_doc_template)
def ohlc(self):
return self._apply_to_column_groupbys(
lambda x: x._cython_agg_general('ohlc'))
@Appender(DataFrame.describe.__doc__)
def describe(self, **kwargs):
with _group_selection_context(self):
result = self.apply(lambda x: x.describe(**kwargs))
if self.axis == 1:
return result.T
return result.unstack()
@Substitution(name='groupby')
@Appender(_doc_template)
def resample(self, rule, *args, **kwargs):
from pandas.core.resample import get_resampler_for_grouping
return get_resampler_for_grouping(self, rule, *args, **kwargs)
@Substitution(name='groupby')
@Appender(_doc_template)
def rolling(self, *args, **kwargs):
from pandas.core.window import RollingGroupby
return RollingGroupby(self, *args, **kwargs)
@Substitution(name='groupby')
@Appender(_doc_template)
def expanding(self, *args, **kwargs):
from pandas.core.window import ExpandingGroupby
return ExpandingGroupby(self, *args, **kwargs)
def _fill(self, direction, limit=None):
if limit is None:
limit = -1
return self._get_cythonized_result('group_fillna_indexer',
self.grouper, needs_mask=True,
cython_dtype=np.int64,
result_is_index=True,
direction=direction, limit=limit)
@Substitution(name='groupby')
def pad(self, limit=None):
return self._fill('ffill', limit=limit)
ffill = pad
@Substitution(name='groupby')
def backfill(self, limit=None):
return self._fill('bfill', limit=limit)
bfill = backfill
@Substitution(name='groupby')
@Appender(_doc_template)
def nth(self, n, dropna=None):
if isinstance(n, int):
nth_values = [n]
elif isinstance(n, (set, list, tuple)):
nth_values = list(set(n))
if dropna is not None:
raise ValueError(
"dropna option with a list of nth values is not supported")
else:
raise TypeError("n needs to be an int or a list/set/tuple of ints")
nth_values = np.array(nth_values, dtype=np.intp)
self._set_group_selection()
if not dropna:
mask = np.in1d(self._cumcount_array(), nth_values) | \
np.in1d(self._cumcount_array(ascending=False) + 1, -nth_values)
out = self._selected_obj[mask]
if not self.as_index:
return out
ids, _, _ = self.grouper.group_info
out.index = self.grouper.result_index[ids[mask]]
return out.sort_index() if self.sort else out
if dropna not in ['any', 'all']:
if isinstance(self._selected_obj, Series) and dropna is True:
warnings.warn("the dropna={dropna} keyword is deprecated,"
"use dropna='all' instead. "
"For a Series groupby, dropna must be "
"either None, 'any' or 'all'.".format(
dropna=dropna),
FutureWarning,
stacklevel=2)
dropna = 'all'
else:
# just returns NaN
raise ValueError("For a DataFrame groupby, dropna must be "
"either None, 'any' or 'all', "
"(was passed %s)." % (dropna),)
# old behaviour, but with all and any support for DataFrames.
# modified in GH 7559 to have better perf
max_len = n if n >= 0 else - 1 - n
dropped = self.obj.dropna(how=dropna, axis=self.axis)
# get a new grouper for our dropped obj
if self.keys is None and self.level is None:
# we don't have the grouper info available
axis = self.grouper.axis
grouper = axis[axis.isin(dropped.index)]
else:
from pandas.core.groupby.grouper import _get_grouper
grouper, _, _ = _get_grouper(dropped, key=self.keys,
axis=self.axis, level=self.level,
sort=self.sort,
mutated=self.mutated)
grb = dropped.groupby(grouper, as_index=self.as_index, sort=self.sort)
sizes, result = grb.size(), grb.nth(n)
mask = (sizes < max_len).values
if len(result) and mask.any():
result.loc[mask] = np.nan
# reset/reindex to the original groups
if len(self.obj) == len(dropped) or \
len(result) == len(self.grouper.result_index):
result.index = self.grouper.result_index
else:
result = result.reindex(self.grouper.result_index)
return result
@Substitution(name='groupby')
def ngroup(self, ascending=True):
with _group_selection_context(self):
index = self._selected_obj.index
result = Series(self.grouper.group_info[0], index)
if not ascending:
result = self.ngroups - 1 - result
return result
@Substitution(name='groupby')
def cumcount(self, ascending=True):
with _group_selection_context(self):
index = self._selected_obj.index
cumcounts = self._cumcount_array(ascending=ascending)
return Series(cumcounts, index)
@Substitution(name='groupby')
@Appender(_doc_template)
def rank(self, method='average', ascending=True, na_option='keep',
pct=False, axis=0):
if na_option not in {'keep', 'top', 'bottom'}:
msg = "na_option must be one of 'keep', 'top', or 'bottom'"
raise ValueError(msg)
return self._cython_transform('rank', numeric_only=False,
ties_method=method, ascending=ascending,
na_option=na_option, pct=pct, axis=axis)
@Substitution(name='groupby')
@Appender(_doc_template)
def cumprod(self, axis=0, *args, **kwargs):
nv.validate_groupby_func('cumprod', args, kwargs,
['numeric_only', 'skipna'])
if axis != 0:
return self.apply(lambda x: x.cumprod(axis=axis, **kwargs))
return self._cython_transform('cumprod', **kwargs)
@Substitution(name='groupby')
@Appender(_doc_template)
def cumsum(self, axis=0, *args, **kwargs):
nv.validate_groupby_func('cumsum', args, kwargs,
['numeric_only', 'skipna'])
if axis != 0:
return self.apply(lambda x: x.cumsum(axis=axis, **kwargs))
return self._cython_transform('cumsum', **kwargs)
@Substitution(name='groupby')
@Appender(_doc_template)
def cummin(self, axis=0, **kwargs):
if axis != 0:
return self.apply(lambda x: np.minimum.accumulate(x, axis))
return self._cython_transform('cummin', numeric_only=False)
@Substitution(name='groupby')
@Appender(_doc_template)
def cummax(self, axis=0, **kwargs):
if axis != 0:
return self.apply(lambda x: np.maximum.accumulate(x, axis))
return self._cython_transform('cummax', numeric_only=False)
def _get_cythonized_result(self, how, grouper, aggregate=False,
cython_dtype=None, needs_values=False,
needs_mask=False, needs_ngroups=False,
result_is_index=False,
pre_processing=None, post_processing=None,
**kwargs):
if result_is_index and aggregate:
raise ValueError("'result_is_index' and 'aggregate' cannot both "
"be True!")
if post_processing:
if not callable(pre_processing):
raise ValueError("'post_processing' must be a callable!")
if pre_processing:
if not callable(pre_processing):
raise ValueError("'pre_processing' must be a callable!")
if not needs_values:
raise ValueError("Cannot use 'pre_processing' without "
"specifying 'needs_values'!")
labels, _, ngroups = grouper.group_info
output = collections.OrderedDict()
base_func = getattr(libgroupby, how)
for name, obj in self._iterate_slices():
if aggregate:
result_sz = ngroups
else:
result_sz = len(obj.values)
if not cython_dtype:
cython_dtype = obj.values.dtype
result = np.zeros(result_sz, dtype=cython_dtype)
func = partial(base_func, result, labels)
if needs_values:
vals = obj.values
if pre_processing:
vals = pre_processing(vals)
func = partial(func, vals)
if needs_mask:
mask = isna(obj.values).view(np.uint8)
func = partial(func, mask)
if needs_ngroups:
func = partial(func, ngroups)
func(**kwargs) # Call func to modify indexer values in place
if result_is_index:
result = algorithms.take_nd(obj.values, result)
if post_processing:
result = post_processing(result)
output[name] = result
if aggregate:
return self._wrap_aggregated_output(output)
else:
return self._wrap_transformed_output(output)
@Substitution(name='groupby')
@Appender(_doc_template)
def shift(self, periods=1, freq=None, axis=0):
if freq is not None or axis != 0:
return self.apply(lambda x: x.shift(periods, freq, axis))
return self._get_cythonized_result('group_shift_indexer',
self.grouper, cython_dtype=np.int64,
needs_ngroups=True,
result_is_index=True,
periods=periods)
@Substitution(name='groupby')
@Appender(_doc_template)
def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None,
axis=0):
if freq is not None or axis != 0:
return self.apply(lambda x: x.pct_change(periods=periods,
fill_method=fill_method,
limit=limit, freq=freq,
axis=axis))
filled = getattr(self, fill_method)(limit=limit).drop(
self.grouper.names, axis=1)
shifted = filled.shift(periods=periods, freq=freq)
return (filled / shifted) - 1
@Substitution(name='groupby')
@Appender(_doc_template)
def head(self, n=5):
self._reset_group_selection()
mask = self._cumcount_array() < n
return self._selected_obj[mask].dropna(subset=[self.keys])
@Substitution(name='groupby')
@Appender(_doc_template)
def tail(self, n=5):
self._reset_group_selection()
mask = self._cumcount_array(ascending=False) < n
return self._selected_obj[mask].dropna(subset=[self.keys])
GroupBy._add_numeric_operations()
@Appender(GroupBy.__doc__)
def groupby(obj, by, **kwds):
if isinstance(obj, Series):
from pandas.core.groupby.generic import SeriesGroupBy
klass = SeriesGroupBy
elif isinstance(obj, DataFrame):
from pandas.core.groupby.generic import DataFrameGroupBy
klass = DataFrameGroupBy
else: # pragma: no cover
raise TypeError('invalid type: %s' % type(obj))
return klass(obj, by, **kwds)
| true | true |
f71fef0c42afe5737fff68a898b0a1503169b16b | 303 | py | Python | mundo 3/des085.py | Pedroluis1/python | d949fa2646c049aa51a41a32dc62de7b14eae90f | [
"MIT"
] | null | null | null | mundo 3/des085.py | Pedroluis1/python | d949fa2646c049aa51a41a32dc62de7b14eae90f | [
"MIT"
] | null | null | null | mundo 3/des085.py | Pedroluis1/python | d949fa2646c049aa51a41a32dc62de7b14eae90f | [
"MIT"
] | null | null | null | valores = [[], []]
val = 0
for c in range(1, 8):
val = int(input(f'Digite o {c}° valor: '))
if val % 2 == 0:
valores[0].append(val)
else:
valores[1].append(val)
valores[0].sort()
valores[1].sort()
print(f'valores impares: {valores[1]}')
print(f'valores pares: {valores[0]}')
| 23.307692 | 46 | 0.567657 | valores = [[], []]
val = 0
for c in range(1, 8):
val = int(input(f'Digite o {c}° valor: '))
if val % 2 == 0:
valores[0].append(val)
else:
valores[1].append(val)
valores[0].sort()
valores[1].sort()
print(f'valores impares: {valores[1]}')
print(f'valores pares: {valores[0]}')
| true | true |
f71fefeee470de4abd81815f4b20fd0e5aa1ae84 | 6,228 | py | Python | pypy/translator/platform/posix.py | woodrow/pyoac | b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7 | [
"MIT"
] | 1 | 2019-05-27T00:58:46.000Z | 2019-05-27T00:58:46.000Z | pypy/translator/platform/posix.py | woodrow/pyoac | b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7 | [
"MIT"
] | null | null | null | pypy/translator/platform/posix.py | woodrow/pyoac | b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7 | [
"MIT"
] | null | null | null |
""" Base class for all posixish platforms
"""
from pypy.translator.platform import Platform, log, _run_subprocess
from pypy.tool import autopath
import py, os
class BasePosix(Platform):
exe_ext = ''
def __init__(self, cc=None):
if cc is None:
cc = 'gcc'
self.cc = cc
def _libs(self, libraries):
return ['-l%s' % (lib,) for lib in libraries]
def _libdirs(self, library_dirs):
return ['-L%s' % (ldir,) for ldir in library_dirs]
def _includedirs(self, include_dirs):
return ['-I%s' % (idir,) for idir in include_dirs]
def _linkfiles(self, link_files):
return list(link_files)
def _compile_c_file(self, cc, cfile, compile_args):
oname = cfile.new(ext='o')
args = ['-c'] + compile_args + [str(cfile), '-o', str(oname)]
self._execute_c_compiler(cc, args, oname)
return oname
def _link(self, cc, ofiles, link_args, standalone, exe_name):
args = [str(ofile) for ofile in ofiles] + link_args
args += ['-o', str(exe_name)]
if not standalone:
args = self._args_for_shared(args)
self._execute_c_compiler(cc, args, exe_name)
return exe_name
def _preprocess_dirs(self, include_dirs):
# hook for maemo
return include_dirs
def gen_makefile(self, cfiles, eci, exe_name=None, path=None):
cfiles = [py.path.local(f) for f in cfiles]
cfiles += [py.path.local(f) for f in eci.separate_module_files]
if path is None:
path = cfiles[0].dirpath()
pypypath = py.path.local(autopath.pypydir)
if exe_name is None:
exe_name = cfiles[0].new(ext=self.exe_ext)
m = GnuMakefile(path)
m.exe_name = exe_name
m.eci = eci
def pypyrel(fpath):
rel = py.path.local(fpath).relto(pypypath)
if rel:
return os.path.join('$(PYPYDIR)', rel)
else:
return fpath
rel_cfiles = [m.pathrel(cfile) for cfile in cfiles]
rel_ofiles = [rel_cfile[:-2]+'.o' for rel_cfile in rel_cfiles]
m.cfiles = rel_cfiles
rel_includedirs = [pypyrel(incldir) for incldir in
self._preprocess_dirs(eci.include_dirs)]
m.comment('automatically generated makefile')
definitions = [
('PYPYDIR', autopath.pypydir),
('TARGET', exe_name.basename),
('DEFAULT_TARGET', '$(TARGET)'),
('SOURCES', rel_cfiles),
('OBJECTS', rel_ofiles),
('LIBS', self._libs(eci.libraries)),
('LIBDIRS', self._libdirs(eci.library_dirs)),
('INCLUDEDIRS', self._includedirs(rel_includedirs)),
('CFLAGS', self.cflags + list(eci.compile_extra)),
('LDFLAGS', self.link_flags + list(eci.link_extra)),
('CC', self.cc)
]
for args in definitions:
m.definition(*args)
rules = [
('all', '$(DEFAULT_TARGET)', []),
('$(TARGET)', '$(OBJECTS)', '$(CC) $(LDFLAGS) -o $@ $(OBJECTS) $(LIBDIRS) $(LIBS)'),
('%.o', '%.c', '$(CC) $(CFLAGS) -o $@ -c $< $(INCLUDEDIRS)'),
]
for rule in rules:
m.rule(*rule)
return m
def execute_makefile(self, path_to_makefile):
if isinstance(path_to_makefile, GnuMakefile):
path = path_to_makefile.makefile_dir
else:
path = path_to_makefile
log.execute('make in %s' % (path,))
returncode, stdout, stderr = _run_subprocess('make', ['-C', str(path)])
self._handle_error(returncode, stdout, stderr, path.join('make'))
class Definition(object):
def __init__(self, name, value):
self.name = name
self.value = value
def write(self, f):
def write_list(prefix, lst):
for i, fn in enumerate(lst):
print >> f, prefix, fn,
if i < len(lst)-1:
print >> f, '\\'
else:
print >> f
prefix = ' ' * len(prefix)
name, value = self.name, self.value
if isinstance(value, str):
f.write('%s = %s\n' % (name, value))
else:
write_list('%s =' % (name,), value)
if value:
f.write('\n')
class Rule(object):
def __init__(self, target, deps, body):
self.target = target
self.deps = deps
self.body = body
def write(self, f):
target, deps, body = self.target, self.deps, self.body
if isinstance(deps, str):
dep_s = deps
else:
dep_s = ' '.join(deps)
f.write('%s: %s\n' % (target, dep_s))
if isinstance(body, str):
f.write('\t%s\n' % body)
elif body:
f.write('\t%s\n' % '\n\t'.join(body))
f.write('\n')
class Comment(object):
def __init__(self, body):
self.body = body
def write(self, f):
f.write('# %s\n' % (self.body,))
class GnuMakefile(object):
def __init__(self, path=None):
self.defs = {}
self.lines = []
self.makefile_dir = py.path.local(path)
def pathrel(self, fpath):
if fpath.dirpath() == self.makefile_dir:
return fpath.basename
elif fpath.dirpath().dirpath() == self.makefile_dir.dirpath():
return '../' + fpath.relto(self.makefile_dir.dirpath())
else:
return str(fpath)
def definition(self, name, value):
defs = self.defs
defn = Definition(name, value)
if name in defs:
self.lines[defs[name]] = defn
else:
defs[name] = len(self.lines)
self.lines.append(defn)
def rule(self, target, deps, body):
self.lines.append(Rule(target, deps, body))
def comment(self, body):
self.lines.append(Comment(body))
def write(self, out=None):
if out is None:
f = self.makefile_dir.join('Makefile').open('w')
else:
f = out
for line in self.lines:
line.write(f)
f.flush()
if out is None:
f.close()
| 30.985075 | 96 | 0.538536 |
from pypy.translator.platform import Platform, log, _run_subprocess
from pypy.tool import autopath
import py, os
class BasePosix(Platform):
exe_ext = ''
def __init__(self, cc=None):
if cc is None:
cc = 'gcc'
self.cc = cc
def _libs(self, libraries):
return ['-l%s' % (lib,) for lib in libraries]
def _libdirs(self, library_dirs):
return ['-L%s' % (ldir,) for ldir in library_dirs]
def _includedirs(self, include_dirs):
return ['-I%s' % (idir,) for idir in include_dirs]
def _linkfiles(self, link_files):
return list(link_files)
def _compile_c_file(self, cc, cfile, compile_args):
oname = cfile.new(ext='o')
args = ['-c'] + compile_args + [str(cfile), '-o', str(oname)]
self._execute_c_compiler(cc, args, oname)
return oname
def _link(self, cc, ofiles, link_args, standalone, exe_name):
args = [str(ofile) for ofile in ofiles] + link_args
args += ['-o', str(exe_name)]
if not standalone:
args = self._args_for_shared(args)
self._execute_c_compiler(cc, args, exe_name)
return exe_name
def _preprocess_dirs(self, include_dirs):
return include_dirs
def gen_makefile(self, cfiles, eci, exe_name=None, path=None):
cfiles = [py.path.local(f) for f in cfiles]
cfiles += [py.path.local(f) for f in eci.separate_module_files]
if path is None:
path = cfiles[0].dirpath()
pypypath = py.path.local(autopath.pypydir)
if exe_name is None:
exe_name = cfiles[0].new(ext=self.exe_ext)
m = GnuMakefile(path)
m.exe_name = exe_name
m.eci = eci
def pypyrel(fpath):
rel = py.path.local(fpath).relto(pypypath)
if rel:
return os.path.join('$(PYPYDIR)', rel)
else:
return fpath
rel_cfiles = [m.pathrel(cfile) for cfile in cfiles]
rel_ofiles = [rel_cfile[:-2]+'.o' for rel_cfile in rel_cfiles]
m.cfiles = rel_cfiles
rel_includedirs = [pypyrel(incldir) for incldir in
self._preprocess_dirs(eci.include_dirs)]
m.comment('automatically generated makefile')
definitions = [
('PYPYDIR', autopath.pypydir),
('TARGET', exe_name.basename),
('DEFAULT_TARGET', '$(TARGET)'),
('SOURCES', rel_cfiles),
('OBJECTS', rel_ofiles),
('LIBS', self._libs(eci.libraries)),
('LIBDIRS', self._libdirs(eci.library_dirs)),
('INCLUDEDIRS', self._includedirs(rel_includedirs)),
('CFLAGS', self.cflags + list(eci.compile_extra)),
('LDFLAGS', self.link_flags + list(eci.link_extra)),
('CC', self.cc)
]
for args in definitions:
m.definition(*args)
rules = [
('all', '$(DEFAULT_TARGET)', []),
('$(TARGET)', '$(OBJECTS)', '$(CC) $(LDFLAGS) -o $@ $(OBJECTS) $(LIBDIRS) $(LIBS)'),
('%.o', '%.c', '$(CC) $(CFLAGS) -o $@ -c $< $(INCLUDEDIRS)'),
]
for rule in rules:
m.rule(*rule)
return m
def execute_makefile(self, path_to_makefile):
if isinstance(path_to_makefile, GnuMakefile):
path = path_to_makefile.makefile_dir
else:
path = path_to_makefile
log.execute('make in %s' % (path,))
returncode, stdout, stderr = _run_subprocess('make', ['-C', str(path)])
self._handle_error(returncode, stdout, stderr, path.join('make'))
class Definition(object):
def __init__(self, name, value):
self.name = name
self.value = value
def write(self, f):
def write_list(prefix, lst):
for i, fn in enumerate(lst):
print >> f, prefix, fn,
if i < len(lst)-1:
print >> f, '\\'
else:
print >> f
prefix = ' ' * len(prefix)
name, value = self.name, self.value
if isinstance(value, str):
f.write('%s = %s\n' % (name, value))
else:
write_list('%s =' % (name,), value)
if value:
f.write('\n')
class Rule(object):
def __init__(self, target, deps, body):
self.target = target
self.deps = deps
self.body = body
def write(self, f):
target, deps, body = self.target, self.deps, self.body
if isinstance(deps, str):
dep_s = deps
else:
dep_s = ' '.join(deps)
f.write('%s: %s\n' % (target, dep_s))
if isinstance(body, str):
f.write('\t%s\n' % body)
elif body:
f.write('\t%s\n' % '\n\t'.join(body))
f.write('\n')
class Comment(object):
def __init__(self, body):
self.body = body
def write(self, f):
f.write('# %s\n' % (self.body,))
class GnuMakefile(object):
def __init__(self, path=None):
self.defs = {}
self.lines = []
self.makefile_dir = py.path.local(path)
def pathrel(self, fpath):
if fpath.dirpath() == self.makefile_dir:
return fpath.basename
elif fpath.dirpath().dirpath() == self.makefile_dir.dirpath():
return '../' + fpath.relto(self.makefile_dir.dirpath())
else:
return str(fpath)
def definition(self, name, value):
defs = self.defs
defn = Definition(name, value)
if name in defs:
self.lines[defs[name]] = defn
else:
defs[name] = len(self.lines)
self.lines.append(defn)
def rule(self, target, deps, body):
self.lines.append(Rule(target, deps, body))
def comment(self, body):
self.lines.append(Comment(body))
def write(self, out=None):
if out is None:
f = self.makefile_dir.join('Makefile').open('w')
else:
f = out
for line in self.lines:
line.write(f)
f.flush()
if out is None:
f.close()
| true | true |
f71ff11e0a47dc49e9286ac8954a588df8c45b2e | 64 | py | Python | test/core/metaflow_custom/toplevel/__init__.py | saikonen/metaflow | 48e37bea3ea4e83ddab8227869bbe56b52d9957d | [
"Apache-2.0"
] | 7 | 2020-07-24T17:07:58.000Z | 2021-05-19T21:47:12.000Z | test/core/metaflow_custom/toplevel/__init__.py | saikonen/metaflow | 48e37bea3ea4e83ddab8227869bbe56b52d9957d | [
"Apache-2.0"
] | 55 | 2020-07-20T16:56:27.000Z | 2022-03-28T12:51:15.000Z | test/core/metaflow_custom/toplevel/__init__.py | saikonen/metaflow | 48e37bea3ea4e83ddab8227869bbe56b52d9957d | [
"Apache-2.0"
] | 6 | 2020-10-15T18:38:35.000Z | 2021-06-20T03:05:43.000Z | __mf_customization__ = 'test'
tl_value = 42
__version__ = None | 12.8 | 29 | 0.765625 | __mf_customization__ = 'test'
tl_value = 42
__version__ = None | true | true |
f71ff1359f7c6ddeef12a05d12e6963d96fa007e | 2,569 | py | Python | jspp_imageutils/annotations/convert.py | jspaezp/jspp_imageutils | 6376e274a1b0675622a7979c181b9effc125aa09 | [
"Apache-2.0"
] | null | null | null | jspp_imageutils/annotations/convert.py | jspaezp/jspp_imageutils | 6376e274a1b0675622a7979c181b9effc125aa09 | [
"Apache-2.0"
] | null | null | null | jspp_imageutils/annotations/convert.py | jspaezp/jspp_imageutils | 6376e274a1b0675622a7979c181b9effc125aa09 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# modified from:
# https://gist.github.com/rotemtam/88d9a4efae243fc77ed4a0f9917c8f6c
import os
import glob
import click
import pandas as pd
import xml.etree.ElementTree as ET
def xml_to_csv(path: str) -> pd.DataFrame:
xml_list = []
for xml_file in glob.glob(path):
tree = ET.parse(xml_file)
root = tree.getroot()
for member in root.findall('object'):
bbx = member.find('bndbox')
xmin = int(bbx.find('xmin').text)
ymin = int(bbx.find('ymin').text)
xmax = int(bbx.find('xmax').text)
ymax = int(bbx.find('ymax').text)
label = member.find('name').text
# The columns are organized as the csv required by keras-retinanet
# https://github.com/fizyr/keras-retinanet#csv-datasets
# path/to/image.jpg,x1,y1,x2,y2,class_name
value = (root.find('filename').text,
# int(root.find('size')[0].text),
# int(root.find('size')[1].text),
xmin, ymin,
xmax, ymax,
label)
xml_list.append(value)
column_name = ['filename',
# 'width',
# 'height',
'xmin',
'ymin',
'xmax',
'ymax',
'class']
xml_df = pd.DataFrame(xml_list, columns=column_name)
return xml_df
def xml_to_csv_file(infile: str, outfile: str):
xml_df = xml_to_csv(infile)
print(xml_df)
xml_df.to_csv(outfile, index=None)
@click.group(help='Converts a pascal xml to csv')
def cli():
pass
@cli.command()
@click.option('--dir', type=str,
help='Name of source directory,' +
' will convert all xml files in it')
@click.option('--out_dir', type=str, help='Name of the destination directory')
def directory(dir, out_dir):
files_convert = [x for x in os.listdir(dir) if x.endswith("xml")]
for xml_file in files_convert:
base = os.path.basename(xml_file)
filename = os.path.splitext(base)[0]
out_filename = filename + ".csv"
out_path = os.path.join(out_dir, out_filename)
xml_to_csv_file(os.path.join(dir, xml_file), out_path)
@cli.command()
@click.option('--file', type=str, help='File to be converted to csv')
@click.option('--out', type=str, help='Name of the destination file')
def xml(file, out):
xml_to_csv_file(file, out)
if __name__ == '__main__':
cli()
| 29.528736 | 79 | 0.570261 |
import os
import glob
import click
import pandas as pd
import xml.etree.ElementTree as ET
def xml_to_csv(path: str) -> pd.DataFrame:
xml_list = []
for xml_file in glob.glob(path):
tree = ET.parse(xml_file)
root = tree.getroot()
for member in root.findall('object'):
bbx = member.find('bndbox')
xmin = int(bbx.find('xmin').text)
ymin = int(bbx.find('ymin').text)
xmax = int(bbx.find('xmax').text)
ymax = int(bbx.find('ymax').text)
label = member.find('name').text
value = (root.find('filename').text,
xmin, ymin,
xmax, ymax,
label)
xml_list.append(value)
column_name = ['filename',
'xmin',
'ymin',
'xmax',
'ymax',
'class']
xml_df = pd.DataFrame(xml_list, columns=column_name)
return xml_df
def xml_to_csv_file(infile: str, outfile: str):
xml_df = xml_to_csv(infile)
print(xml_df)
xml_df.to_csv(outfile, index=None)
@click.group(help='Converts a pascal xml to csv')
def cli():
pass
@cli.command()
@click.option('--dir', type=str,
help='Name of source directory,' +
' will convert all xml files in it')
@click.option('--out_dir', type=str, help='Name of the destination directory')
def directory(dir, out_dir):
files_convert = [x for x in os.listdir(dir) if x.endswith("xml")]
for xml_file in files_convert:
base = os.path.basename(xml_file)
filename = os.path.splitext(base)[0]
out_filename = filename + ".csv"
out_path = os.path.join(out_dir, out_filename)
xml_to_csv_file(os.path.join(dir, xml_file), out_path)
@cli.command()
@click.option('--file', type=str, help='File to be converted to csv')
@click.option('--out', type=str, help='Name of the destination file')
def xml(file, out):
xml_to_csv_file(file, out)
if __name__ == '__main__':
cli()
| true | true |
f71ff15ecd2f2844f2c2b918043ff8217bac2c9b | 3,796 | py | Python | Week6/AdvML_Week6_ex2.py | mikkokotola/AdvancedMachineLearning | 574e82d4104ac04f1cb9889beb5be7d122bd0d01 | [
"MIT"
] | 1 | 2020-03-18T08:51:44.000Z | 2020-03-18T08:51:44.000Z | Week6/AdvML_Week6_ex2.py | mikkokotola/AdvancedMachineLearning | 574e82d4104ac04f1cb9889beb5be7d122bd0d01 | [
"MIT"
] | null | null | null | Week6/AdvML_Week6_ex2.py | mikkokotola/AdvancedMachineLearning | 574e82d4104ac04f1cb9889beb5be7d122bd0d01 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[8]:
## Advanced Course in Machine Learning
## Week 6
## Exercise 2 / Random forest
import numpy as np
import scipy
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from numpy import linalg as LA
from sklearn import decomposition
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import pairwise_distances
from sklearn.manifold import TSNE
import math
import sys
import mnist
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import cross_val_score
sns.set_style("darkgrid")
# In[4]:
x_train, t_train, x_test, t_test = mnist.load()
# In[48]:
x_train = x_train[0:50000,:]
t_train = t_train[0:50000]
# In[49]:
print(x_train.shape)
print(t_train.shape)
print(x_test.shape)
print(t_test.shape)
# In[69]:
startTestIx = 0
endTestIx = 100
# clf.classes_
# clf.feature_importances_
# print(clf.max_features_)
# print(clf.n_classes_)
# print(clf.n_features_)
# print(clf.n_outputs_)
# #clf.tree_
# In[70]:
# Randomly select the samples and features for the tree
def sample(n, k, x_train, t_train):
idx = np.random.randint(x_train.shape[0], size=n)
fidx = np.random.randint(x_train.shape[1], size=k)
x = x_train[idx, :]
x = x[:, fidx]
y = t_train[idx]
return x, y, idx, fidx
#print("Rows: ", idx, ", features ", fidx)
#print(x.shape)
#print(y.shape)
# In[71]:
def trainTree(x_train, t_train):
clf = DecisionTreeClassifier(random_state=0)
clf = clf.fit(x_train, t_train)
return clf
#cross_val_score(clf, x_train, t_train, cv=10)
# In[72]:
def ensureAllClasses(newPred, clf):
for i in range(10):
if i not in clf.classes_:
newPred = np.insert(newPred, i, 0, axis=1)
return newPred
# In[75]:
# Main loop
def main(M, n, k):
pred = np.zeros(shape = (endTestIx - startTestIx, 10), dtype = 'float32')
for m in range(M):
x, y, idx, fidx = sample(n, k, x_train, t_train)
clf = trainTree(x, y)
newPred = clf.predict_proba(x_test[startTestIx:endTestIx,fidx])
newPred = ensureAllClasses(newPred, clf)
pred = np.add(pred, newPred)
pred_classes = np.argmax(pred, axis=1)
correct = pred_classes == t_test[startTestIx:endTestIx]
acc = sum(correct)/len(correct)
#print(pred_classes)
#print (acc)
return acc
# In[85]:
Mmax = 100
n = 1000
k = 20
accs = list()
for m in range(1, Mmax):
accs.append(main(m, n, k))
plt.figure(num=None, figsize=(8, 6), dpi=100, facecolor='w', edgecolor='k')
sns.lineplot(range(1,Mmax), accs)
plt.xlabel('Number of trees (M)')
plt.ylabel('Accuracy of predictions (%)')
plt.title('Number of trees vs. accuracy, n = {0}, k = {1}'.format(n, k))
plt.show()
# In[80]:
M = 100
n = 1000
kmax = 200
accs = list()
for k in range(1, kmax, 10):
accs.append(main(M, n, k))
plt.figure(num=None, figsize=(8, 6), dpi=100, facecolor='w', edgecolor='k')
sns.lineplot(range(1,kmax,10), accs)
plt.xlabel('Number of features per tree (k)')
plt.ylabel('Accuracy of predictions (%)')
plt.title('Number of features per tree vs. accuracy, M = {0}, n = {1}'.format(M, n))
plt.show()
# In[81]:
M = 100
nmax = 5000
k = 50
accs = list()
for n in range(1, nmax, 100):
accs.append(main(M, n, k))
plt.figure(num=None, figsize=(8, 6), dpi=100, facecolor='w', edgecolor='k')
sns.lineplot(range(1, nmax, 100), accs)
plt.xlabel('Number of samples per tree (n)')
plt.ylabel('Accuracy of predictions (%)')
plt.title('Number of samples per tree vs. accuracy, M = {0}, k = {1}'.format(M, k))
plt.show()
# In[84]:
M = 100
n = 1000
k = 50
repeats = 50
accs = list()
for i in range(50):
accs.append(main(M, n, k))
avAcc = sum(accs)/len(accs)
print(avAcc)
| 19.171717 | 84 | 0.663593 |
sns
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from numpy import linalg as LA
from sklearn import decomposition
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import pairwise_distances
from sklearn.manifold import TSNE
import math
import sys
import mnist
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import cross_val_score
sns.set_style("darkgrid")
x_train, t_train, x_test, t_test = mnist.load()
x_train = x_train[0:50000,:]
t_train = t_train[0:50000]
print(x_train.shape)
print(t_train.shape)
print(x_test.shape)
print(t_test.shape)
startTestIx = 0
endTestIx = 100
sample(n, k, x_train, t_train):
idx = np.random.randint(x_train.shape[0], size=n)
fidx = np.random.randint(x_train.shape[1], size=k)
x = x_train[idx, :]
x = x[:, fidx]
y = t_train[idx]
return x, y, idx, fidx
def trainTree(x_train, t_train):
clf = DecisionTreeClassifier(random_state=0)
clf = clf.fit(x_train, t_train)
return clf
def ensureAllClasses(newPred, clf):
for i in range(10):
if i not in clf.classes_:
newPred = np.insert(newPred, i, 0, axis=1)
return newPred
def main(M, n, k):
pred = np.zeros(shape = (endTestIx - startTestIx, 10), dtype = 'float32')
for m in range(M):
x, y, idx, fidx = sample(n, k, x_train, t_train)
clf = trainTree(x, y)
newPred = clf.predict_proba(x_test[startTestIx:endTestIx,fidx])
newPred = ensureAllClasses(newPred, clf)
pred = np.add(pred, newPred)
pred_classes = np.argmax(pred, axis=1)
correct = pred_classes == t_test[startTestIx:endTestIx]
acc = sum(correct)/len(correct)
return acc
Mmax = 100
n = 1000
k = 20
accs = list()
for m in range(1, Mmax):
accs.append(main(m, n, k))
plt.figure(num=None, figsize=(8, 6), dpi=100, facecolor='w', edgecolor='k')
sns.lineplot(range(1,Mmax), accs)
plt.xlabel('Number of trees (M)')
plt.ylabel('Accuracy of predictions (%)')
plt.title('Number of trees vs. accuracy, n = {0}, k = {1}'.format(n, k))
plt.show()
M = 100
n = 1000
kmax = 200
accs = list()
for k in range(1, kmax, 10):
accs.append(main(M, n, k))
plt.figure(num=None, figsize=(8, 6), dpi=100, facecolor='w', edgecolor='k')
sns.lineplot(range(1,kmax,10), accs)
plt.xlabel('Number of features per tree (k)')
plt.ylabel('Accuracy of predictions (%)')
plt.title('Number of features per tree vs. accuracy, M = {0}, n = {1}'.format(M, n))
plt.show()
M = 100
nmax = 5000
k = 50
accs = list()
for n in range(1, nmax, 100):
accs.append(main(M, n, k))
plt.figure(num=None, figsize=(8, 6), dpi=100, facecolor='w', edgecolor='k')
sns.lineplot(range(1, nmax, 100), accs)
plt.xlabel('Number of samples per tree (n)')
plt.ylabel('Accuracy of predictions (%)')
plt.title('Number of samples per tree vs. accuracy, M = {0}, k = {1}'.format(M, k))
plt.show()
M = 100
n = 1000
k = 50
repeats = 50
accs = list()
for i in range(50):
accs.append(main(M, n, k))
avAcc = sum(accs)/len(accs)
print(avAcc)
| true | true |
f71ff26a2521e900d339ce88a0f2dc11b89982ac | 8,979 | py | Python | models/layers/mesh_pool.py | yamaguchi1024/MeshCNN | 197530eab2aa4c2419511c1854dcbc662377f340 | [
"MIT"
] | null | null | null | models/layers/mesh_pool.py | yamaguchi1024/MeshCNN | 197530eab2aa4c2419511c1854dcbc662377f340 | [
"MIT"
] | null | null | null | models/layers/mesh_pool.py | yamaguchi1024/MeshCNN | 197530eab2aa4c2419511c1854dcbc662377f340 | [
"MIT"
] | 1 | 2020-10-10T23:31:50.000Z | 2020-10-10T23:31:50.000Z | import torch
import torch.nn as nn
from threading import Thread
from models.layers.mesh_union import MeshUnion
import numpy as np
from heapq import heappop, heapify
class MeshPool(nn.Module):
def __init__(self, target, multi_thread=False):
super(MeshPool, self).__init__()
self.__out_target = target
self.__multi_thread = multi_thread
self.__fe = None
self.__updated_fe = None
self.__meshes = None
self.__merge_edges = [-1, -1]
def __call__(self, fe, meshes):
return self.forward(fe, meshes)
def forward(self, fe, meshes):
self.__updated_fe = [[] for _ in range(len(meshes))]
pool_threads = []
self.__fe = fe
self.__meshes = meshes
# iterate over batch
for mesh_index in range(len(meshes)):
if self.__multi_thread:
pool_threads.append(Thread(target=self.__pool_main, args=(mesh_index,)))
pool_threads[-1].start()
else:
self.__pool_main(mesh_index)
if self.__multi_thread:
for mesh_index in range(len(meshes)):
pool_threads[mesh_index].join()
out_features = torch.cat(self.__updated_fe).view(len(meshes), -1, self.__out_target)
return out_features
def __pool_main(self, mesh_index):
mesh = self.__meshes[mesh_index]
queue = self.__build_queue(self.__fe[mesh_index, :, :mesh.edges_count], mesh.edges_count)
# recycle = []
# last_queue_len = len(queue)
last_count = mesh.edges_count + 1
mask = np.ones(mesh.edges_count, dtype=np.uint8)
edge_groups = MeshUnion(mesh.edges_count, self.__fe.device)
while mesh.edges_count > self.__out_target:
value, edge_id = heappop(queue)
edge_id = int(edge_id)
if mask[edge_id]:
self.__pool_edge(mesh, edge_id, mask, edge_groups)
mesh.clean(mask, edge_groups)
fe = edge_groups.rebuild_features(self.__fe[mesh_index], mask, self.__out_target)
self.__updated_fe[mesh_index] = fe
def __pool_edge(self, mesh, edge_id, mask, edge_groups):
if self.has_boundaries(mesh, edge_id):
return False
elif self.__clean_side(mesh, edge_id, mask, edge_groups, 0)\
and self.__clean_side(mesh, edge_id, mask, edge_groups, 2) \
and self.__is_one_ring_valid(mesh, edge_id):
self.__merge_edges[0] = self.__pool_side(mesh, edge_id, mask, edge_groups, 0)
self.__merge_edges[1] = self.__pool_side(mesh, edge_id, mask, edge_groups, 2)
mesh.merge_vertices(edge_id)
mask[edge_id] = False
MeshPool.__remove_group(mesh, edge_groups, edge_id)
mesh.edges_count -= 1
return True
else:
return False
def __clean_side(self, mesh, edge_id, mask, edge_groups, side):
if mesh.edges_count <= self.__out_target:
return False
invalid_edges = MeshPool.__get_invalids(mesh, edge_id, edge_groups, side)
while len(invalid_edges) != 0 and mesh.edges_count > self.__out_target:
self.__remove_triplete(mesh, mask, edge_groups, invalid_edges)
if mesh.edges_count <= self.__out_target:
return False
if self.has_boundaries(mesh, edge_id):
return False
invalid_edges = self.__get_invalids(mesh, edge_id, edge_groups, side)
return True
@staticmethod
def has_boundaries(mesh, edge_id):
for edge in mesh.gemm_edges[edge_id]:
if edge == -1 or -1 in mesh.gemm_edges[edge]:
return True
return False
@staticmethod
def __is_one_ring_valid(mesh, edge_id):
v_a = set(mesh.edges[mesh.ve[mesh.edges[edge_id, 0]]].reshape(-1))
v_b = set(mesh.edges[mesh.ve[mesh.edges[edge_id, 1]]].reshape(-1))
shared = v_a & v_b - set(mesh.edges[edge_id])
return len(shared) == 2
def __pool_side(self, mesh, edge_id, mask, edge_groups, side):
info = MeshPool.__get_face_info(mesh, edge_id, side)
key_a, key_b, side_a, side_b, _, other_side_b, _, other_keys_b = info
self.__redirect_edges(mesh, key_a, side_a - side_a % 2, other_keys_b[0], mesh.sides[key_b, other_side_b])
self.__redirect_edges(mesh, key_a, side_a - side_a % 2 + 1, other_keys_b[1], mesh.sides[key_b, other_side_b + 1])
MeshPool.__union_groups(mesh, edge_groups, key_b, key_a)
MeshPool.__union_groups(mesh, edge_groups, edge_id, key_a)
mask[key_b] = False
MeshPool.__remove_group(mesh, edge_groups, key_b)
mesh.remove_edge(key_b)
mesh.edges_count -= 1
return key_a
@staticmethod
def __get_invalids(mesh, edge_id, edge_groups, side):
info = MeshPool.__get_face_info(mesh, edge_id, side)
key_a, key_b, side_a, side_b, other_side_a, other_side_b, other_keys_a, other_keys_b = info
shared_items = MeshPool.__get_shared_items(other_keys_a, other_keys_b)
if len(shared_items) == 0:
return []
else:
assert (len(shared_items) == 2)
middle_edge = other_keys_a[shared_items[0]]
update_key_a = other_keys_a[1 - shared_items[0]]
update_key_b = other_keys_b[1 - shared_items[1]]
update_side_a = mesh.sides[key_a, other_side_a + 1 - shared_items[0]]
update_side_b = mesh.sides[key_b, other_side_b + 1 - shared_items[1]]
MeshPool.__redirect_edges(mesh, edge_id, side, update_key_a, update_side_a)
MeshPool.__redirect_edges(mesh, edge_id, side + 1, update_key_b, update_side_b)
MeshPool.__redirect_edges(mesh, update_key_a, MeshPool.__get_other_side(update_side_a), update_key_b, MeshPool.__get_other_side(update_side_b))
MeshPool.__union_groups(mesh, edge_groups, key_a, edge_id)
MeshPool.__union_groups(mesh, edge_groups, key_b, edge_id)
MeshPool.__union_groups(mesh, edge_groups, key_a, update_key_a)
MeshPool.__union_groups(mesh, edge_groups, middle_edge, update_key_a)
MeshPool.__union_groups(mesh, edge_groups, key_b, update_key_b)
MeshPool.__union_groups(mesh, edge_groups, middle_edge, update_key_b)
return [key_a, key_b, middle_edge]
@staticmethod
def __redirect_edges(mesh, edge_a_key, side_a, edge_b_key, side_b):
mesh.gemm_edges[edge_a_key, side_a] = edge_b_key
mesh.gemm_edges[edge_b_key, side_b] = edge_a_key
mesh.sides[edge_a_key, side_a] = side_b
mesh.sides[edge_b_key, side_b] = side_a
@staticmethod
def __get_shared_items(list_a, list_b):
shared_items = []
for i in range(len(list_a)):
for j in range(len(list_b)):
if list_a[i] == list_b[j]:
shared_items.extend([i, j])
return shared_items
@staticmethod
def __get_other_side(side):
return side + 1 - 2 * (side % 2)
@staticmethod
def __get_face_info(mesh, edge_id, side):
key_a = mesh.gemm_edges[edge_id, side]
key_b = mesh.gemm_edges[edge_id, side + 1]
side_a = mesh.sides[edge_id, side]
side_b = mesh.sides[edge_id, side + 1]
other_side_a = (side_a - (side_a % 2) + 2) % 4
other_side_b = (side_b - (side_b % 2) + 2) % 4
other_keys_a = [mesh.gemm_edges[key_a, other_side_a], mesh.gemm_edges[key_a, other_side_a + 1]]
other_keys_b = [mesh.gemm_edges[key_b, other_side_b], mesh.gemm_edges[key_b, other_side_b + 1]]
return key_a, key_b, side_a, side_b, other_side_a, other_side_b, other_keys_a, other_keys_b
@staticmethod
def __remove_triplete(mesh, mask, edge_groups, invalid_edges):
vertex = set(mesh.edges[invalid_edges[0]])
for edge_key in invalid_edges:
vertex &= set(mesh.edges[edge_key])
mask[edge_key] = False
MeshPool.__remove_group(mesh, edge_groups, edge_key)
mesh.edges_count -= 3
vertex = list(vertex)
assert(len(vertex) == 1)
mesh.remove_vertex(vertex[0])
def __build_queue(self, features, edges_count):
# delete edges with smallest norm
squared_magnitude = torch.sum(features * features, 0)
if squared_magnitude.shape[-1] != 1:
squared_magnitude = squared_magnitude.unsqueeze(-1)
edge_ids = torch.arange(edges_count, device=squared_magnitude.device, dtype=torch.float32).unsqueeze(-1)
heap = torch.cat((squared_magnitude, edge_ids), dim=-1).tolist()
heapify(heap)
return heap
@staticmethod
def __union_groups(mesh, edge_groups, source, target):
edge_groups.union(source, target)
mesh.union_groups(source, target)
@staticmethod
def __remove_group(mesh, edge_groups, index):
edge_groups.remove_group(index)
mesh.remove_group(index)
| 44.014706 | 155 | 0.646397 | import torch
import torch.nn as nn
from threading import Thread
from models.layers.mesh_union import MeshUnion
import numpy as np
from heapq import heappop, heapify
class MeshPool(nn.Module):
def __init__(self, target, multi_thread=False):
super(MeshPool, self).__init__()
self.__out_target = target
self.__multi_thread = multi_thread
self.__fe = None
self.__updated_fe = None
self.__meshes = None
self.__merge_edges = [-1, -1]
def __call__(self, fe, meshes):
return self.forward(fe, meshes)
def forward(self, fe, meshes):
self.__updated_fe = [[] for _ in range(len(meshes))]
pool_threads = []
self.__fe = fe
self.__meshes = meshes
for mesh_index in range(len(meshes)):
if self.__multi_thread:
pool_threads.append(Thread(target=self.__pool_main, args=(mesh_index,)))
pool_threads[-1].start()
else:
self.__pool_main(mesh_index)
if self.__multi_thread:
for mesh_index in range(len(meshes)):
pool_threads[mesh_index].join()
out_features = torch.cat(self.__updated_fe).view(len(meshes), -1, self.__out_target)
return out_features
def __pool_main(self, mesh_index):
mesh = self.__meshes[mesh_index]
queue = self.__build_queue(self.__fe[mesh_index, :, :mesh.edges_count], mesh.edges_count)
last_count = mesh.edges_count + 1
mask = np.ones(mesh.edges_count, dtype=np.uint8)
edge_groups = MeshUnion(mesh.edges_count, self.__fe.device)
while mesh.edges_count > self.__out_target:
value, edge_id = heappop(queue)
edge_id = int(edge_id)
if mask[edge_id]:
self.__pool_edge(mesh, edge_id, mask, edge_groups)
mesh.clean(mask, edge_groups)
fe = edge_groups.rebuild_features(self.__fe[mesh_index], mask, self.__out_target)
self.__updated_fe[mesh_index] = fe
def __pool_edge(self, mesh, edge_id, mask, edge_groups):
if self.has_boundaries(mesh, edge_id):
return False
elif self.__clean_side(mesh, edge_id, mask, edge_groups, 0)\
and self.__clean_side(mesh, edge_id, mask, edge_groups, 2) \
and self.__is_one_ring_valid(mesh, edge_id):
self.__merge_edges[0] = self.__pool_side(mesh, edge_id, mask, edge_groups, 0)
self.__merge_edges[1] = self.__pool_side(mesh, edge_id, mask, edge_groups, 2)
mesh.merge_vertices(edge_id)
mask[edge_id] = False
MeshPool.__remove_group(mesh, edge_groups, edge_id)
mesh.edges_count -= 1
return True
else:
return False
def __clean_side(self, mesh, edge_id, mask, edge_groups, side):
if mesh.edges_count <= self.__out_target:
return False
invalid_edges = MeshPool.__get_invalids(mesh, edge_id, edge_groups, side)
while len(invalid_edges) != 0 and mesh.edges_count > self.__out_target:
self.__remove_triplete(mesh, mask, edge_groups, invalid_edges)
if mesh.edges_count <= self.__out_target:
return False
if self.has_boundaries(mesh, edge_id):
return False
invalid_edges = self.__get_invalids(mesh, edge_id, edge_groups, side)
return True
@staticmethod
def has_boundaries(mesh, edge_id):
for edge in mesh.gemm_edges[edge_id]:
if edge == -1 or -1 in mesh.gemm_edges[edge]:
return True
return False
@staticmethod
def __is_one_ring_valid(mesh, edge_id):
v_a = set(mesh.edges[mesh.ve[mesh.edges[edge_id, 0]]].reshape(-1))
v_b = set(mesh.edges[mesh.ve[mesh.edges[edge_id, 1]]].reshape(-1))
shared = v_a & v_b - set(mesh.edges[edge_id])
return len(shared) == 2
def __pool_side(self, mesh, edge_id, mask, edge_groups, side):
info = MeshPool.__get_face_info(mesh, edge_id, side)
key_a, key_b, side_a, side_b, _, other_side_b, _, other_keys_b = info
self.__redirect_edges(mesh, key_a, side_a - side_a % 2, other_keys_b[0], mesh.sides[key_b, other_side_b])
self.__redirect_edges(mesh, key_a, side_a - side_a % 2 + 1, other_keys_b[1], mesh.sides[key_b, other_side_b + 1])
MeshPool.__union_groups(mesh, edge_groups, key_b, key_a)
MeshPool.__union_groups(mesh, edge_groups, edge_id, key_a)
mask[key_b] = False
MeshPool.__remove_group(mesh, edge_groups, key_b)
mesh.remove_edge(key_b)
mesh.edges_count -= 1
return key_a
@staticmethod
def __get_invalids(mesh, edge_id, edge_groups, side):
info = MeshPool.__get_face_info(mesh, edge_id, side)
key_a, key_b, side_a, side_b, other_side_a, other_side_b, other_keys_a, other_keys_b = info
shared_items = MeshPool.__get_shared_items(other_keys_a, other_keys_b)
if len(shared_items) == 0:
return []
else:
assert (len(shared_items) == 2)
middle_edge = other_keys_a[shared_items[0]]
update_key_a = other_keys_a[1 - shared_items[0]]
update_key_b = other_keys_b[1 - shared_items[1]]
update_side_a = mesh.sides[key_a, other_side_a + 1 - shared_items[0]]
update_side_b = mesh.sides[key_b, other_side_b + 1 - shared_items[1]]
MeshPool.__redirect_edges(mesh, edge_id, side, update_key_a, update_side_a)
MeshPool.__redirect_edges(mesh, edge_id, side + 1, update_key_b, update_side_b)
MeshPool.__redirect_edges(mesh, update_key_a, MeshPool.__get_other_side(update_side_a), update_key_b, MeshPool.__get_other_side(update_side_b))
MeshPool.__union_groups(mesh, edge_groups, key_a, edge_id)
MeshPool.__union_groups(mesh, edge_groups, key_b, edge_id)
MeshPool.__union_groups(mesh, edge_groups, key_a, update_key_a)
MeshPool.__union_groups(mesh, edge_groups, middle_edge, update_key_a)
MeshPool.__union_groups(mesh, edge_groups, key_b, update_key_b)
MeshPool.__union_groups(mesh, edge_groups, middle_edge, update_key_b)
return [key_a, key_b, middle_edge]
@staticmethod
def __redirect_edges(mesh, edge_a_key, side_a, edge_b_key, side_b):
mesh.gemm_edges[edge_a_key, side_a] = edge_b_key
mesh.gemm_edges[edge_b_key, side_b] = edge_a_key
mesh.sides[edge_a_key, side_a] = side_b
mesh.sides[edge_b_key, side_b] = side_a
@staticmethod
def __get_shared_items(list_a, list_b):
shared_items = []
for i in range(len(list_a)):
for j in range(len(list_b)):
if list_a[i] == list_b[j]:
shared_items.extend([i, j])
return shared_items
@staticmethod
def __get_other_side(side):
return side + 1 - 2 * (side % 2)
@staticmethod
def __get_face_info(mesh, edge_id, side):
key_a = mesh.gemm_edges[edge_id, side]
key_b = mesh.gemm_edges[edge_id, side + 1]
side_a = mesh.sides[edge_id, side]
side_b = mesh.sides[edge_id, side + 1]
other_side_a = (side_a - (side_a % 2) + 2) % 4
other_side_b = (side_b - (side_b % 2) + 2) % 4
other_keys_a = [mesh.gemm_edges[key_a, other_side_a], mesh.gemm_edges[key_a, other_side_a + 1]]
other_keys_b = [mesh.gemm_edges[key_b, other_side_b], mesh.gemm_edges[key_b, other_side_b + 1]]
return key_a, key_b, side_a, side_b, other_side_a, other_side_b, other_keys_a, other_keys_b
@staticmethod
def __remove_triplete(mesh, mask, edge_groups, invalid_edges):
vertex = set(mesh.edges[invalid_edges[0]])
for edge_key in invalid_edges:
vertex &= set(mesh.edges[edge_key])
mask[edge_key] = False
MeshPool.__remove_group(mesh, edge_groups, edge_key)
mesh.edges_count -= 3
vertex = list(vertex)
assert(len(vertex) == 1)
mesh.remove_vertex(vertex[0])
def __build_queue(self, features, edges_count):
squared_magnitude = torch.sum(features * features, 0)
if squared_magnitude.shape[-1] != 1:
squared_magnitude = squared_magnitude.unsqueeze(-1)
edge_ids = torch.arange(edges_count, device=squared_magnitude.device, dtype=torch.float32).unsqueeze(-1)
heap = torch.cat((squared_magnitude, edge_ids), dim=-1).tolist()
heapify(heap)
return heap
@staticmethod
def __union_groups(mesh, edge_groups, source, target):
edge_groups.union(source, target)
mesh.union_groups(source, target)
@staticmethod
def __remove_group(mesh, edge_groups, index):
edge_groups.remove_group(index)
mesh.remove_group(index)
| true | true |
f71ff2713f88d105f4975f0cee61ae6ef8e14fed | 896 | py | Python | nn_dataflow/nns/mlp_s.py | joeshow79/nn_dataflow | 279440452148ebf327992bd178a37cd5fd5330c5 | [
"BSD-3-Clause"
] | null | null | null | nn_dataflow/nns/mlp_s.py | joeshow79/nn_dataflow | 279440452148ebf327992bd178a37cd5fd5330c5 | [
"BSD-3-Clause"
] | null | null | null | nn_dataflow/nns/mlp_s.py | joeshow79/nn_dataflow | 279440452148ebf327992bd178a37cd5fd5330c5 | [
"BSD-3-Clause"
] | null | null | null | """ $lic$
Copyright (C) 2016-2019 by The Board of Trustees of Stanford University
This program is free software: you can redistribute it and/or modify it under
the terms of the Modified BSD-3 License as published by the Open Source
Initiative.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the BSD-3 License for more details.
You should have received a copy of the Modified BSD-3 License along with this
program. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
"""
from nn_dataflow.core import Network
from nn_dataflow.core import InputLayer, FCLayer
'''
MLP-S
PRIME, 2016
'''
NN = Network('MLP-S')
NN.set_input_layer(InputLayer(784, 1))
NN.add('fc1', FCLayer(784, 500))
NN.add('fc2', FCLayer(500, 250))
NN.add('fc3', FCLayer(250, 10))
| 27.151515 | 79 | 0.756696 |
from nn_dataflow.core import Network
from nn_dataflow.core import InputLayer, FCLayer
NN = Network('MLP-S')
NN.set_input_layer(InputLayer(784, 1))
NN.add('fc1', FCLayer(784, 500))
NN.add('fc2', FCLayer(500, 250))
NN.add('fc3', FCLayer(250, 10))
| true | true |
f71ff2d8ade82949986924ccc275d50947e1700f | 3,753 | py | Python | nexuscasc/config/k8s_config_handler.py | vjda/nexus3-casc-cli | 6d33503c19f75b73b656293141911e083331faf0 | [
"MIT"
] | 7 | 2020-06-30T08:15:53.000Z | 2022-03-22T10:28:09.000Z | nexuscasc/config/k8s_config_handler.py | vjda/nexus3-casc-cli | 6d33503c19f75b73b656293141911e083331faf0 | [
"MIT"
] | 1 | 2021-09-27T03:24:46.000Z | 2021-09-27T03:24:46.000Z | nexuscasc/config/k8s_config_handler.py | vjda/nexus3-casc-cli | 6d33503c19f75b73b656293141911e083331faf0 | [
"MIT"
] | 1 | 2020-12-07T14:55:30.000Z | 2020-12-07T14:55:30.000Z | import base64
import re
from dataclasses import dataclass
from enum import Enum
from typing import Union, List
from kubernetes import client, config
from kubernetes.client import V1ConfigMapList, V1SecretList, CoreV1Api, V1Secret, V1ConfigMap
from nexuscasc.logger import Logger
class ResourceType(Enum):
SECRET, CONFIGMAP = range(2)
@dataclass
class WatchedResource:
name: str
version: str
type: ResourceType
class K8sConfigHandler:
v1: CoreV1Api = None
watch_list: List[WatchedResource] = list()
def __init__(self, local: bool = False):
if local:
config.load_kube_config()
else:
config.load_incluster_config()
self.v1 = client.CoreV1Api()
@staticmethod
def filter_resources(
resources: Union[V1ConfigMapList, V1SecretList],
label_value: str = None
) -> List[Union[V1ConfigMap, V1Secret]]:
matches = list()
for res in resources.items:
if label_value is None:
matches.append(res)
elif len(list(filter(lambda x: res.metadata.labels[x] == label_value, res.metadata.labels.keys()))) > 0:
matches.append(res)
return matches
def find_config_maps(self, namespace: str, label: str, label_value: str = None) -> List[V1ConfigMap]:
config_maps = self.v1.list_namespaced_config_map(namespace=namespace, label_selector=label)
return self.filter_resources(config_maps, label_value)
def find_secrets(self, namespace: str, label: str, label_value: str = None) -> List[V1Secret]:
secrets = self.v1.list_namespaced_secret(namespace=namespace, label_selector=label)
return self.filter_resources(secrets, label_value)
@staticmethod
def extract_yaml_strings_from_resources(resources: List[Union[V1ConfigMap, V1Secret]]) -> List[str]:
yaml_str = list()
for res in resources:
for k in filter(lambda key: re.search("\\.yml|\\.yaml$", key), res.data.keys()):
if type(res) == V1Secret:
Logger.debug(f"Found yaml in key '{k}' for secret '{res.metadata.name}'")
yaml_str.append(base64.b64decode(res.data[k]).decode())
else:
Logger.debug(f"Found yaml in key '{k}' for configmap '{res.metadata.name}'")
yaml_str.append(res.data[k])
return yaml_str
def any_resource_has_changed(self, resources: List[Union[V1ConfigMap, V1Secret]]) -> bool:
has_changed = False
if len(self.watch_list) == 0:
has_changed = True
for res in resources:
self.watch_resource(res)
else:
for res in resources:
r_name = res.metadata.name
r_type = ResourceType.SECRET if type(res) == V1Secret else ResourceType.CONFIGMAP
watched_resource = next(filter(lambda r: r_name == r.name and r_type == r.type, self.watch_list), None)
if watched_resource is None:
self.watch_resource(res)
has_changed = True
break
elif watched_resource.version != res.metadata.resource_version:
watched_resource.version = res.metadata.resource_version
has_changed = True
break
return has_changed
def watch_resource(self, resource: Union[V1ConfigMap, V1Secret]):
self.watch_list.append(
WatchedResource(
name=resource.metadata.name,
version=resource.metadata.resource_version,
type=ResourceType.SECRET if type(resource) == V1Secret else ResourceType.CONFIGMAP
))
| 38.295918 | 119 | 0.625633 | import base64
import re
from dataclasses import dataclass
from enum import Enum
from typing import Union, List
from kubernetes import client, config
from kubernetes.client import V1ConfigMapList, V1SecretList, CoreV1Api, V1Secret, V1ConfigMap
from nexuscasc.logger import Logger
class ResourceType(Enum):
SECRET, CONFIGMAP = range(2)
@dataclass
class WatchedResource:
name: str
version: str
type: ResourceType
class K8sConfigHandler:
v1: CoreV1Api = None
watch_list: List[WatchedResource] = list()
def __init__(self, local: bool = False):
if local:
config.load_kube_config()
else:
config.load_incluster_config()
self.v1 = client.CoreV1Api()
@staticmethod
def filter_resources(
resources: Union[V1ConfigMapList, V1SecretList],
label_value: str = None
) -> List[Union[V1ConfigMap, V1Secret]]:
matches = list()
for res in resources.items:
if label_value is None:
matches.append(res)
elif len(list(filter(lambda x: res.metadata.labels[x] == label_value, res.metadata.labels.keys()))) > 0:
matches.append(res)
return matches
def find_config_maps(self, namespace: str, label: str, label_value: str = None) -> List[V1ConfigMap]:
config_maps = self.v1.list_namespaced_config_map(namespace=namespace, label_selector=label)
return self.filter_resources(config_maps, label_value)
def find_secrets(self, namespace: str, label: str, label_value: str = None) -> List[V1Secret]:
secrets = self.v1.list_namespaced_secret(namespace=namespace, label_selector=label)
return self.filter_resources(secrets, label_value)
@staticmethod
def extract_yaml_strings_from_resources(resources: List[Union[V1ConfigMap, V1Secret]]) -> List[str]:
yaml_str = list()
for res in resources:
for k in filter(lambda key: re.search("\\.yml|\\.yaml$", key), res.data.keys()):
if type(res) == V1Secret:
Logger.debug(f"Found yaml in key '{k}' for secret '{res.metadata.name}'")
yaml_str.append(base64.b64decode(res.data[k]).decode())
else:
Logger.debug(f"Found yaml in key '{k}' for configmap '{res.metadata.name}'")
yaml_str.append(res.data[k])
return yaml_str
def any_resource_has_changed(self, resources: List[Union[V1ConfigMap, V1Secret]]) -> bool:
has_changed = False
if len(self.watch_list) == 0:
has_changed = True
for res in resources:
self.watch_resource(res)
else:
for res in resources:
r_name = res.metadata.name
r_type = ResourceType.SECRET if type(res) == V1Secret else ResourceType.CONFIGMAP
watched_resource = next(filter(lambda r: r_name == r.name and r_type == r.type, self.watch_list), None)
if watched_resource is None:
self.watch_resource(res)
has_changed = True
break
elif watched_resource.version != res.metadata.resource_version:
watched_resource.version = res.metadata.resource_version
has_changed = True
break
return has_changed
def watch_resource(self, resource: Union[V1ConfigMap, V1Secret]):
self.watch_list.append(
WatchedResource(
name=resource.metadata.name,
version=resource.metadata.resource_version,
type=ResourceType.SECRET if type(resource) == V1Secret else ResourceType.CONFIGMAP
))
| true | true |
f71ff2dfd267b7ca272fcc6a2a50017e19cd8ff1 | 3,423 | py | Python | tests/commands/spot/test_limit_order_cmd.py | mpetrinidev/bnb-cli | d10fc36b6a1e2dd34597d9f31ea143019ba6f4b9 | [
"MIT"
] | 5 | 2021-04-02T20:49:19.000Z | 2021-06-11T06:22:24.000Z | tests/commands/spot/test_limit_order_cmd.py | mpetrinidev/bnb-cli | d10fc36b6a1e2dd34597d9f31ea143019ba6f4b9 | [
"MIT"
] | 2 | 2021-06-13T20:22:09.000Z | 2021-06-14T23:09:14.000Z | tests/commands/spot/test_limit_order_cmd.py | mpetrinidev/bnc-cli | d10fc36b6a1e2dd34597d9f31ea143019ba6f4b9 | [
"MIT"
] | null | null | null | import datetime
import os
from unittest.mock import Mock
from bnc.cli import cli
from bnc.utils.utils import json_to_str
from tests.commands.common import read_json_test_file, get_headers
from tests.commands.common_fixtures import *
def get_json_filename():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'res', 'new_order.json')
@pytest.fixture(scope='session')
def data():
return read_json_test_file(get_json_filename())
@pytest.mark.parametrize("params", [
['spot', 'new_order', 'limit', '-sy', 'LTCBTC', '-si', 'BUY', '-tif', 'GTC', '-q', 1, '-p', 0.003621],
['spot', 'new_order', 'limit', '--symbol', 'LTCBTC', '--side', 'BUY', '--time_in_force', 'GTC', '--quantity', 1, '--price', 0.003621],
['spot', 'new_order', 'limit', '-sy', 'LTCBTC', '-si', 'BUY', '-tif', 'GTC', '-q', 1, '-p', 0.003621, '-ncoid', "custom_id"],
['spot', 'new_order', 'limit', '--symbol', 'LTCBTC', '--side', 'BUY', '--time_in_force', 'GTC', '--quantity', 1, '--price', 0.003621,
'--new_client_order_id', "custom_id"],
['spot', 'new_order', 'limit', '-sy', 'LTCBTC', '-si', 'BUY', '-tif', 'GTC', '-q', 1, '-p', 0.003621, '-iq', 0.20],
['spot', 'new_order', 'limit', '--symbol', 'LTCBTC', '--side', 'BUY', '--time_in_force', 'GTC', '--quantity', 1, '--price', 0.003621,
'--iceberg_qty', 0.20]
])
def test_new_order_limit_return_full_resp(runner, params, mock_default_deps, data):
mock_response = Mock(status_code=200, elapsed=datetime.datetime.now(), headers=get_headers())
mock_response.json.return_value = data['limit_full']
mock_default_deps.patch('bnc.builder.requests.post', return_value=mock_response)
result = runner.invoke(cli, params)
assert result.exit_code == 0
assert result.output == json_to_str(data['limit_full']) + '\n'
@pytest.mark.parametrize("params", [
['spot', 'new_order', 'limit', '-sy', 'LTCBTC', '-si', 'BUY', '-tif', 'GTC', '-q', 1, '-p', 0.003621, '-nort', 'ACK'],
['spot', 'new_order', 'limit', '--symbol', 'LTCBTC', '--side', 'BUY', '--time_in_force', 'GTC', '--quantity', 1, '--price', 0.003621,
'--new_order_resp_type', 'ACK']
])
def test_new_order_limit_return_ack_resp(runner, params, mock_default_deps, data):
mock_response = Mock(status_code=200, elapsed=datetime.datetime.now(), headers=get_headers())
mock_response.json.return_value = data['limit_ack']
mock_default_deps.patch('bnc.builder.requests.post', return_value=mock_response)
result = runner.invoke(cli, params)
assert result.exit_code == 0
assert result.output == json_to_str(data['limit_ack']) + '\n'
@pytest.mark.parametrize("params", [
['spot', 'new_order', 'limit', '-sy', 'LTCBTC', '-si', 'BUY', '-tif', 'GTC', '-q', 1, '-p', 0.003621, '-nort', 'RESULT'],
['spot', 'new_order', 'limit', '--symbol', 'LTCBTC', '--side', 'BUY', '--time_in_force', 'GTC', '--quantity', 1, '--price', 0.003621,
'--new_order_resp_type', 'RESULT']
])
def test_new_order_limit_return_ack_resp(runner, params, mock_default_deps, data):
mock_response = Mock(status_code=200, elapsed=datetime.datetime.now(), headers=get_headers())
mock_response.json.return_value = data['limit_result']
mock_default_deps.patch('bnc.builder.requests.post', return_value=mock_response)
result = runner.invoke(cli, params)
assert result.exit_code == 0
assert result.output == json_to_str(data['limit_result']) + '\n'
| 48.211268 | 138 | 0.650891 | import datetime
import os
from unittest.mock import Mock
from bnc.cli import cli
from bnc.utils.utils import json_to_str
from tests.commands.common import read_json_test_file, get_headers
from tests.commands.common_fixtures import *
def get_json_filename():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'res', 'new_order.json')
@pytest.fixture(scope='session')
def data():
return read_json_test_file(get_json_filename())
@pytest.mark.parametrize("params", [
['spot', 'new_order', 'limit', '-sy', 'LTCBTC', '-si', 'BUY', '-tif', 'GTC', '-q', 1, '-p', 0.003621],
['spot', 'new_order', 'limit', '--symbol', 'LTCBTC', '--side', 'BUY', '--time_in_force', 'GTC', '--quantity', 1, '--price', 0.003621],
['spot', 'new_order', 'limit', '-sy', 'LTCBTC', '-si', 'BUY', '-tif', 'GTC', '-q', 1, '-p', 0.003621, '-ncoid', "custom_id"],
['spot', 'new_order', 'limit', '--symbol', 'LTCBTC', '--side', 'BUY', '--time_in_force', 'GTC', '--quantity', 1, '--price', 0.003621,
'--new_client_order_id', "custom_id"],
['spot', 'new_order', 'limit', '-sy', 'LTCBTC', '-si', 'BUY', '-tif', 'GTC', '-q', 1, '-p', 0.003621, '-iq', 0.20],
['spot', 'new_order', 'limit', '--symbol', 'LTCBTC', '--side', 'BUY', '--time_in_force', 'GTC', '--quantity', 1, '--price', 0.003621,
'--iceberg_qty', 0.20]
])
def test_new_order_limit_return_full_resp(runner, params, mock_default_deps, data):
mock_response = Mock(status_code=200, elapsed=datetime.datetime.now(), headers=get_headers())
mock_response.json.return_value = data['limit_full']
mock_default_deps.patch('bnc.builder.requests.post', return_value=mock_response)
result = runner.invoke(cli, params)
assert result.exit_code == 0
assert result.output == json_to_str(data['limit_full']) + '\n'
@pytest.mark.parametrize("params", [
['spot', 'new_order', 'limit', '-sy', 'LTCBTC', '-si', 'BUY', '-tif', 'GTC', '-q', 1, '-p', 0.003621, '-nort', 'ACK'],
['spot', 'new_order', 'limit', '--symbol', 'LTCBTC', '--side', 'BUY', '--time_in_force', 'GTC', '--quantity', 1, '--price', 0.003621,
'--new_order_resp_type', 'ACK']
])
def test_new_order_limit_return_ack_resp(runner, params, mock_default_deps, data):
mock_response = Mock(status_code=200, elapsed=datetime.datetime.now(), headers=get_headers())
mock_response.json.return_value = data['limit_ack']
mock_default_deps.patch('bnc.builder.requests.post', return_value=mock_response)
result = runner.invoke(cli, params)
assert result.exit_code == 0
assert result.output == json_to_str(data['limit_ack']) + '\n'
@pytest.mark.parametrize("params", [
['spot', 'new_order', 'limit', '-sy', 'LTCBTC', '-si', 'BUY', '-tif', 'GTC', '-q', 1, '-p', 0.003621, '-nort', 'RESULT'],
['spot', 'new_order', 'limit', '--symbol', 'LTCBTC', '--side', 'BUY', '--time_in_force', 'GTC', '--quantity', 1, '--price', 0.003621,
'--new_order_resp_type', 'RESULT']
])
def test_new_order_limit_return_ack_resp(runner, params, mock_default_deps, data):
mock_response = Mock(status_code=200, elapsed=datetime.datetime.now(), headers=get_headers())
mock_response.json.return_value = data['limit_result']
mock_default_deps.patch('bnc.builder.requests.post', return_value=mock_response)
result = runner.invoke(cli, params)
assert result.exit_code == 0
assert result.output == json_to_str(data['limit_result']) + '\n'
| true | true |
f71ff3f9334377468b2b24b73aa343bc3c717efb | 2,218 | py | Python | src/odontology/core/migrations/0009_chapter_tariff.py | nanomolina/JP | 248a47bced4dac850f85d28968ddf279cd123400 | [
"Apache-2.0"
] | 2 | 2016-06-23T15:35:29.000Z | 2022-01-11T00:55:21.000Z | src/odontology/core/migrations/0009_chapter_tariff.py | nanomolina/JP | 248a47bced4dac850f85d28968ddf279cd123400 | [
"Apache-2.0"
] | 27 | 2016-06-24T12:28:01.000Z | 2022-01-13T00:37:25.000Z | src/odontology/core/migrations/0009_chapter_tariff.py | nanomolina/JP | 248a47bced4dac850f85d28968ddf279cd123400 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-09-02 20:22
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0008_auto_20160801_1937'),
]
operations = [
migrations.CreateModel(
name='Chapter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=250)),
('number', models.PositiveSmallIntegerField()),
('date', models.DateField(blank=True, null=True)),
('date_created', models.DateField(auto_now_add=True)),
('date_modified', models.DateField(auto_now=True)),
],
),
migrations.CreateModel(
name='Tariff',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('index', models.PositiveSmallIntegerField(blank=True, null=True)),
('sub_index', models.PositiveSmallIntegerField(blank=True, null=True)),
('name', models.CharField(blank=True, max_length=250)),
('variable_cost', models.DecimalField(decimal_places=2, default=0, max_digits=12, verbose_name=b'Costo variable')),
('fixed_cost', models.DecimalField(decimal_places=2, default=0, max_digits=12, verbose_name=b'Costo fijo')),
('workshop_cost', models.DecimalField(decimal_places=2, default=0, max_digits=12, verbose_name=b'Costo taller')),
('total_cost', models.DecimalField(decimal_places=2, default=0, max_digits=12, verbose_name=b'Costo total')),
('fees', models.DecimalField(decimal_places=2, default=0, max_digits=12, verbose_name=b'Honorarios')),
('total_tariff', models.DecimalField(decimal_places=2, default=0, max_digits=12, verbose_name=b'Total arancel')),
('chapter', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Chapter')),
],
),
]
| 50.409091 | 131 | 0.625338 |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0008_auto_20160801_1937'),
]
operations = [
migrations.CreateModel(
name='Chapter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=250)),
('number', models.PositiveSmallIntegerField()),
('date', models.DateField(blank=True, null=True)),
('date_created', models.DateField(auto_now_add=True)),
('date_modified', models.DateField(auto_now=True)),
],
),
migrations.CreateModel(
name='Tariff',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('index', models.PositiveSmallIntegerField(blank=True, null=True)),
('sub_index', models.PositiveSmallIntegerField(blank=True, null=True)),
('name', models.CharField(blank=True, max_length=250)),
('variable_cost', models.DecimalField(decimal_places=2, default=0, max_digits=12, verbose_name=b'Costo variable')),
('fixed_cost', models.DecimalField(decimal_places=2, default=0, max_digits=12, verbose_name=b'Costo fijo')),
('workshop_cost', models.DecimalField(decimal_places=2, default=0, max_digits=12, verbose_name=b'Costo taller')),
('total_cost', models.DecimalField(decimal_places=2, default=0, max_digits=12, verbose_name=b'Costo total')),
('fees', models.DecimalField(decimal_places=2, default=0, max_digits=12, verbose_name=b'Honorarios')),
('total_tariff', models.DecimalField(decimal_places=2, default=0, max_digits=12, verbose_name=b'Total arancel')),
('chapter', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Chapter')),
],
),
]
| true | true |
f71ff457dfbaab5f3f5847da75668942d7052f7a | 3,330 | py | Python | dockerdjango/dockerdjango/settings.py | ducanhvna/DockerDjango | ab5dcb801691fa1ef15a38f05e75fe58066c61ae | [
"MIT"
] | null | null | null | dockerdjango/dockerdjango/settings.py | ducanhvna/DockerDjango | ab5dcb801691fa1ef15a38f05e75fe58066c61ae | [
"MIT"
] | 9 | 2019-12-04T23:29:22.000Z | 2022-02-10T12:19:23.000Z | dockerdjango/dockerdjango/settings.py | ducanhvna/DockerDjango | ab5dcb801691fa1ef15a38f05e75fe58066c61ae | [
"MIT"
] | null | null | null | """
Django settings for dockerdjango project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'gc(_1u30_&as3g*xqy8k@u$aj22*@_#d$ylw@s8cjfjmz%ukq('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'dockerdjango.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'dockerdjango.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
DATABASES = {
'default':{
'ENGINE': 'django.db.backends.mysql',
'NAME': 'my-app-db',
'USER': 'root',
'PASSWORD': 'password',
'HOST': 'db',
'PORT': 3306
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| 25.419847 | 91 | 0.678378 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'gc(_1u30_&as3g*xqy8k@u$aj22*@_#d$ylw@s8cjfjmz%ukq('
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'dockerdjango.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'dockerdjango.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
DATABASES = {
'default':{
'ENGINE': 'django.db.backends.mysql',
'NAME': 'my-app-db',
'USER': 'root',
'PASSWORD': 'password',
'HOST': 'db',
'PORT': 3306
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| true | true |
f71ff4f192b3057091ebc889bf78714a2e21a9d6 | 2,455 | py | Python | examples/pybullet/examples/quadruped_setup_playback.py | felipeek/bullet3 | 6a59241074720e9df119f2f86bc01765917feb1e | [
"Zlib"
] | 9,136 | 2015-01-02T00:41:45.000Z | 2022-03-31T15:30:02.000Z | examples/pybullet/examples/quadruped_setup_playback.py | felipeek/bullet3 | 6a59241074720e9df119f2f86bc01765917feb1e | [
"Zlib"
] | 2,424 | 2015-01-05T08:55:58.000Z | 2022-03-30T19:34:55.000Z | examples/pybullet/examples/quadruped_setup_playback.py | felipeek/bullet3 | 6a59241074720e9df119f2f86bc01765917feb1e | [
"Zlib"
] | 2,921 | 2015-01-02T10:19:30.000Z | 2022-03-31T02:48:42.000Z | import pybullet as p
import pybullet_data
p.connect(p.SHARED_MEMORY)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
objects = [
p.loadURDF("plane.urdf", 0.000000, 0.000000, -.300000, 0.000000, 0.000000, 0.000000, 1.000000)
]
objects = [
p.loadURDF("quadruped/minitaur.urdf", [-0.000046, -0.000068, 0.200774],
[-0.000701, 0.000387, -0.000252, 1.000000],
useFixedBase=False)
]
ob = objects[0]
jointPositions = [
0.000000, 1.531256, 0.000000, -2.240112, 1.527979, 0.000000, -2.240646, 1.533105, 0.000000,
-2.238254, 1.530335, 0.000000, -2.238298, 0.000000, -1.528038, 0.000000, 2.242656, -1.525193,
0.000000, 2.244008, -1.530011, 0.000000, 2.240683, -1.528687, 0.000000, 2.240517
]
for ji in range(p.getNumJoints(ob)):
p.resetJointState(ob, ji, jointPositions[ji])
p.setJointMotorControl2(bodyIndex=ob, jointIndex=ji, controlMode=p.VELOCITY_CONTROL, force=0)
cid0 = p.createConstraint(1, 3, 1, 6, p.JOINT_POINT2POINT, [0.000000, 0.000000, 0.000000],
[0.000000, 0.005000, 0.200000], [0.000000, 0.010000, 0.200000],
[0.000000, 0.000000, 0.000000, 1.000000],
[0.000000, 0.000000, 0.000000, 1.000000])
p.changeConstraint(cid0, maxForce=500.000000)
cid1 = p.createConstraint(1, 16, 1, 19, p.JOINT_POINT2POINT, [0.000000, 0.000000, 0.000000],
[0.000000, 0.005000, 0.200000], [0.000000, 0.010000, 0.200000],
[0.000000, 0.000000, 0.000000, 1.000000],
[0.000000, 0.000000, 0.000000, 1.000000])
p.changeConstraint(cid1, maxForce=500.000000)
cid2 = p.createConstraint(1, 9, 1, 12, p.JOINT_POINT2POINT, [0.000000, 0.000000, 0.000000],
[0.000000, 0.005000, 0.200000], [0.000000, 0.010000, 0.200000],
[0.000000, 0.000000, 0.000000, 1.000000],
[0.000000, 0.000000, 0.000000, 1.000000])
p.changeConstraint(cid2, maxForce=500.000000)
cid3 = p.createConstraint(1, 22, 1, 25, p.JOINT_POINT2POINT, [0.000000, 0.000000, 0.000000],
[0.000000, 0.005000, 0.200000], [0.000000, 0.010000, 0.200000],
[0.000000, 0.000000, 0.000000, 1.000000],
[0.000000, 0.000000, 0.000000, 1.000000])
p.changeConstraint(cid3, maxForce=500.000000)
p.setGravity(0.000000, 0.000000, 0.000000)
p.stepSimulation()
p.disconnect()
| 51.145833 | 98 | 0.608147 | import pybullet as p
import pybullet_data
p.connect(p.SHARED_MEMORY)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
objects = [
p.loadURDF("plane.urdf", 0.000000, 0.000000, -.300000, 0.000000, 0.000000, 0.000000, 1.000000)
]
objects = [
p.loadURDF("quadruped/minitaur.urdf", [-0.000046, -0.000068, 0.200774],
[-0.000701, 0.000387, -0.000252, 1.000000],
useFixedBase=False)
]
ob = objects[0]
jointPositions = [
0.000000, 1.531256, 0.000000, -2.240112, 1.527979, 0.000000, -2.240646, 1.533105, 0.000000,
-2.238254, 1.530335, 0.000000, -2.238298, 0.000000, -1.528038, 0.000000, 2.242656, -1.525193,
0.000000, 2.244008, -1.530011, 0.000000, 2.240683, -1.528687, 0.000000, 2.240517
]
for ji in range(p.getNumJoints(ob)):
p.resetJointState(ob, ji, jointPositions[ji])
p.setJointMotorControl2(bodyIndex=ob, jointIndex=ji, controlMode=p.VELOCITY_CONTROL, force=0)
cid0 = p.createConstraint(1, 3, 1, 6, p.JOINT_POINT2POINT, [0.000000, 0.000000, 0.000000],
[0.000000, 0.005000, 0.200000], [0.000000, 0.010000, 0.200000],
[0.000000, 0.000000, 0.000000, 1.000000],
[0.000000, 0.000000, 0.000000, 1.000000])
p.changeConstraint(cid0, maxForce=500.000000)
cid1 = p.createConstraint(1, 16, 1, 19, p.JOINT_POINT2POINT, [0.000000, 0.000000, 0.000000],
[0.000000, 0.005000, 0.200000], [0.000000, 0.010000, 0.200000],
[0.000000, 0.000000, 0.000000, 1.000000],
[0.000000, 0.000000, 0.000000, 1.000000])
p.changeConstraint(cid1, maxForce=500.000000)
cid2 = p.createConstraint(1, 9, 1, 12, p.JOINT_POINT2POINT, [0.000000, 0.000000, 0.000000],
[0.000000, 0.005000, 0.200000], [0.000000, 0.010000, 0.200000],
[0.000000, 0.000000, 0.000000, 1.000000],
[0.000000, 0.000000, 0.000000, 1.000000])
p.changeConstraint(cid2, maxForce=500.000000)
cid3 = p.createConstraint(1, 22, 1, 25, p.JOINT_POINT2POINT, [0.000000, 0.000000, 0.000000],
[0.000000, 0.005000, 0.200000], [0.000000, 0.010000, 0.200000],
[0.000000, 0.000000, 0.000000, 1.000000],
[0.000000, 0.000000, 0.000000, 1.000000])
p.changeConstraint(cid3, maxForce=500.000000)
p.setGravity(0.000000, 0.000000, 0.000000)
p.stepSimulation()
p.disconnect()
| true | true |
f71ff582921b55fc70764b796e2441625d14a39b | 3,212 | py | Python | quati/dataset/corpora/imdb.py | onenoc/quati | ba372b2ad14076294af62cbcbc27e1b3ca8421c1 | [
"MIT"
] | 2 | 2021-01-30T21:20:36.000Z | 2021-01-30T22:15:07.000Z | quati/dataset/corpora/imdb.py | onenoc/quati | ba372b2ad14076294af62cbcbc27e1b3ca8421c1 | [
"MIT"
] | null | null | null | quati/dataset/corpora/imdb.py | onenoc/quati | ba372b2ad14076294af62cbcbc27e1b3ca8421c1 | [
"MIT"
] | 1 | 2021-01-18T23:12:18.000Z | 2021-01-18T23:12:18.000Z | from itertools import chain
from pathlib import Path
import nltk
import torchtext
from quati.dataset.fields.words import WordsField
from quati.dataset.fields.tags import TagsField
from quati.dataset.corpora.corpus import Corpus
def create_single_file_for_pos_and_neg(corpus_path):
new_file_path = Path(corpus_path, 'data.txt')
# do not create this file again if it is already there
if not new_file_path.exists():
neg_files = sorted(Path(corpus_path, 'neg').glob('*.txt'))
pos_files = sorted(Path(corpus_path, 'pos').glob('*.txt'))
paths = chain(neg_files, pos_files)
new_file = new_file_path.open('w', encoding='utf8')
for file_path in paths:
content = file_path.read_text().strip()
content = content.replace('<br>', ' <br> ')
content = content.replace('<br >', ' <br> ')
content = content.replace('<br />', ' <br> ')
content = content.replace('<br/>', ' <br> ')
label = '1' if 'pos' in str(file_path) else '0'
new_file.write(label + ' ' + content + '\n')
new_file.seek(0)
new_file.close()
return new_file_path
class IMDBCorpus(Corpus):
task = 'doc'
@staticmethod
def create_fields_tuples():
# if you choose tokenizer='spacy', please install the en package:
# python3 -m spacy download en
tokenizer = nltk.WordPunctTokenizer()
# tokenizer = nltk.TreebankWordTokenizer()
fields_tuples = [
('words', WordsField(tokenize=tokenizer.tokenize)),
('target', TagsField())
]
return fields_tuples
def read(self, corpus_path):
"""
First, read the positive and negative examples, which are located in
different folders: `pos/` and `neg/`.
Second, split the `<br>` tags from other tokens.
Third, save a new file called `data.txt` in the root directory, with
the following structure:
label_0 text_0
label_1 text_1
...
label_M text_M
Args:
corpus_path: path to the root directory where `pos/` and `neg/`
are located.
"""
new_file_path = create_single_file_for_pos_and_neg(corpus_path)
self.corpus_path = str(new_file_path)
self.open(self.corpus_path)
if self.lazy is True:
return self
else:
return list(self)
def _read(self, file):
for line in file:
line = line.strip().split()
if line:
label = line[0]
text = ' '.join(line[1:])
yield self.make_torchtext_example(text, label)
def make_torchtext_example(self, text, label=None):
ex = {'words': text, 'target': label}
if 'target' not in self.fields_dict.keys():
del ex['target']
assert ex.keys() == self.fields_dict.keys()
return torchtext.data.Example.fromdict(ex, self.fields_dict)
if __name__ == '__main__':
from quati.dataset.corpora.test_corpus import quick_test
quick_test(
IMDBCorpus,
'../../../data/corpus/imdb/test/',
lazy=True,
)
| 33.113402 | 76 | 0.59589 | from itertools import chain
from pathlib import Path
import nltk
import torchtext
from quati.dataset.fields.words import WordsField
from quati.dataset.fields.tags import TagsField
from quati.dataset.corpora.corpus import Corpus
def create_single_file_for_pos_and_neg(corpus_path):
new_file_path = Path(corpus_path, 'data.txt')
if not new_file_path.exists():
neg_files = sorted(Path(corpus_path, 'neg').glob('*.txt'))
pos_files = sorted(Path(corpus_path, 'pos').glob('*.txt'))
paths = chain(neg_files, pos_files)
new_file = new_file_path.open('w', encoding='utf8')
for file_path in paths:
content = file_path.read_text().strip()
content = content.replace('<br>', ' <br> ')
content = content.replace('<br >', ' <br> ')
content = content.replace('<br />', ' <br> ')
content = content.replace('<br/>', ' <br> ')
label = '1' if 'pos' in str(file_path) else '0'
new_file.write(label + ' ' + content + '\n')
new_file.seek(0)
new_file.close()
return new_file_path
class IMDBCorpus(Corpus):
task = 'doc'
@staticmethod
def create_fields_tuples():
tokenizer = nltk.WordPunctTokenizer()
fields_tuples = [
('words', WordsField(tokenize=tokenizer.tokenize)),
('target', TagsField())
]
return fields_tuples
def read(self, corpus_path):
new_file_path = create_single_file_for_pos_and_neg(corpus_path)
self.corpus_path = str(new_file_path)
self.open(self.corpus_path)
if self.lazy is True:
return self
else:
return list(self)
def _read(self, file):
for line in file:
line = line.strip().split()
if line:
label = line[0]
text = ' '.join(line[1:])
yield self.make_torchtext_example(text, label)
def make_torchtext_example(self, text, label=None):
ex = {'words': text, 'target': label}
if 'target' not in self.fields_dict.keys():
del ex['target']
assert ex.keys() == self.fields_dict.keys()
return torchtext.data.Example.fromdict(ex, self.fields_dict)
if __name__ == '__main__':
from quati.dataset.corpora.test_corpus import quick_test
quick_test(
IMDBCorpus,
'../../../data/corpus/imdb/test/',
lazy=True,
)
| true | true |
f71ff58b38aa884ffd4cc5997958ad2274a7a77b | 491 | py | Python | tests/system/test_base.py | aalmazanarbs/memstatsbeat | 554be8f67c385f4e9aeccfdc09b838075c8e1714 | [
"Apache-2.0"
] | null | null | null | tests/system/test_base.py | aalmazanarbs/memstatsbeat | 554be8f67c385f4e9aeccfdc09b838075c8e1714 | [
"Apache-2.0"
] | null | null | null | tests/system/test_base.py | aalmazanarbs/memstatsbeat | 554be8f67c385f4e9aeccfdc09b838075c8e1714 | [
"Apache-2.0"
] | null | null | null | from memstatsbeat import BaseTest
import os
class Test(BaseTest):
def test_base(self):
"""
Basic test with exiting Memstatsbeat normally
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*"
)
memstatsbeat_proc = self.start_beat()
self.wait_until(lambda: self.log_contains("memstatsbeat is running"))
exit_code = memstatsbeat_proc.kill_and_wait()
assert exit_code == 0
| 24.55 | 77 | 0.641548 | from memstatsbeat import BaseTest
import os
class Test(BaseTest):
def test_base(self):
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*"
)
memstatsbeat_proc = self.start_beat()
self.wait_until(lambda: self.log_contains("memstatsbeat is running"))
exit_code = memstatsbeat_proc.kill_and_wait()
assert exit_code == 0
| true | true |
f71ff5e2374591301bbf01a9ea272bd250502167 | 7,448 | py | Python | RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/instruction.py | ralfjon/IxNetwork | c0c834fbc465af69c12fd6b7cee4628baba7fff1 | [
"MIT"
] | null | null | null | RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/instruction.py | ralfjon/IxNetwork | c0c834fbc465af69c12fd6b7cee4628baba7fff1 | [
"MIT"
] | null | null | null | RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/instruction.py | ralfjon/IxNetwork | c0c834fbc465af69c12fd6b7cee4628baba7fff1 | [
"MIT"
] | null | null | null |
# Copyright 1997 - 2018 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class Instruction(Base):
"""The Instruction class encapsulates a user managed instruction node in the ixnetwork hierarchy.
An instance of the class can be obtained by accessing the Instruction property from a parent instance.
The internal properties list will be empty when the property is accessed and is populated from the server using the find method.
The internal properties list can be managed by the user by using the add and remove methods.
"""
_SDM_NAME = 'instruction'
def __init__(self, parent):
super(Instruction, self).__init__(parent)
@property
def Actions(self):
"""An instance of the Actions class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.actions.Actions)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.actions import Actions
return Actions(self)
@property
def Field(self):
"""An instance of the Field class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.field.Field)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.field import Field
return Field(self)
@property
def Count(self):
"""Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group
Returns:
number
"""
return self._get_attribute('count')
@property
def Description(self):
"""Description of the field.
Returns:
str
"""
return self._get_attribute('description')
@Description.setter
def Description(self, value):
self._set_attribute('description', value)
@property
def DisplayName(self):
"""Display name used by GUI.
Returns:
str
"""
return self._get_attribute('displayName')
@property
def IsEditable(self):
"""Information on the requirement of the field.
Returns:
bool
"""
return self._get_attribute('isEditable')
@IsEditable.setter
def IsEditable(self, value):
self._set_attribute('isEditable', value)
@property
def IsEnabled(self):
"""Enables disables the field.
Returns:
bool
"""
return self._get_attribute('isEnabled')
@IsEnabled.setter
def IsEnabled(self, value):
self._set_attribute('isEnabled', value)
@property
def IsRequired(self):
"""Information on the requirement of the field.
Returns:
bool
"""
return self._get_attribute('isRequired')
@IsRequired.setter
def IsRequired(self, value):
self._set_attribute('isRequired', value)
@property
def Name(self):
"""Name of packet field
Returns:
str
"""
return self._get_attribute('name')
@Name.setter
def Name(self, value):
self._set_attribute('name', value)
def add(self, Description=None, IsEditable=None, IsEnabled=None, IsRequired=None, Name=None):
"""Adds a new instruction node on the server and retrieves it in this instance.
Args:
Description (str): Description of the field.
IsEditable (bool): Information on the requirement of the field.
IsEnabled (bool): Enables disables the field.
IsRequired (bool): Information on the requirement of the field.
Name (str): Name of packet field
Returns:
self: This instance with all currently retrieved instruction data using find and the newly added instruction data available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._create(locals())
def remove(self):
"""Deletes all the instruction data in this instance from server.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, Count=None, Description=None, DisplayName=None, IsEditable=None, IsEnabled=None, IsRequired=None, Name=None):
"""Finds and retrieves instruction data from the server.
All named parameters support regex and can be used to selectively retrieve instruction data from the server.
By default the find method takes no parameters and will retrieve all instruction data from the server.
Args:
Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group
Description (str): Description of the field.
DisplayName (str): Display name used by GUI.
IsEditable (bool): Information on the requirement of the field.
IsEnabled (bool): Enables disables the field.
IsRequired (bool): Information on the requirement of the field.
Name (str): Name of packet field
Returns:
self: This instance with matching instruction data retrieved from the server available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._select(locals())
def read(self, href):
"""Retrieves a single instance of instruction data from the server.
Args:
href (str): An href to the instance to be retrieved
Returns:
self: This instance with the instruction data from the server available through an iterator or index
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def AddAction(self, Arg2):
"""Executes the addAction operation on the server.
Adds an Action item.
Args:
Arg1 (str(None|/api/v1/sessions/1/ixnetwork/topology)): The method internally sets Arg1 to the current href for this instance
Arg2 (str):
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self.href
return self._execute('AddAction', payload=locals(), response_object=None)
| 33.102222 | 156 | 0.733217 |
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class Instruction(Base):
_SDM_NAME = 'instruction'
def __init__(self, parent):
super(Instruction, self).__init__(parent)
@property
def Actions(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.actions import Actions
return Actions(self)
@property
def Field(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.field import Field
return Field(self)
@property
def Count(self):
return self._get_attribute('count')
@property
def Description(self):
return self._get_attribute('description')
@Description.setter
def Description(self, value):
self._set_attribute('description', value)
@property
def DisplayName(self):
return self._get_attribute('displayName')
@property
def IsEditable(self):
return self._get_attribute('isEditable')
@IsEditable.setter
def IsEditable(self, value):
self._set_attribute('isEditable', value)
@property
def IsEnabled(self):
return self._get_attribute('isEnabled')
@IsEnabled.setter
def IsEnabled(self, value):
self._set_attribute('isEnabled', value)
@property
def IsRequired(self):
return self._get_attribute('isRequired')
@IsRequired.setter
def IsRequired(self, value):
self._set_attribute('isRequired', value)
@property
def Name(self):
return self._get_attribute('name')
@Name.setter
def Name(self, value):
self._set_attribute('name', value)
def add(self, Description=None, IsEditable=None, IsEnabled=None, IsRequired=None, Name=None):
return self._create(locals())
def remove(self):
self._delete()
def find(self, Count=None, Description=None, DisplayName=None, IsEditable=None, IsEnabled=None, IsRequired=None, Name=None):
return self._select(locals())
def read(self, href):
return self._read(href)
def AddAction(self, Arg2):
Arg1 = self.href
return self._execute('AddAction', payload=locals(), response_object=None)
| true | true |
f71ff7f5175b2552338f6b41f9f1520efde5ebe9 | 1,203 | py | Python | pytorch_basic_template/model/model_entry.py | ldylab/deep_learning_with_pytorch | c86a2e24ee94ade1a78b66f10eb69b6e1fdd4463 | [
"MIT"
] | null | null | null | pytorch_basic_template/model/model_entry.py | ldylab/deep_learning_with_pytorch | c86a2e24ee94ade1a78b66f10eb69b6e1fdd4463 | [
"MIT"
] | null | null | null | pytorch_basic_template/model/model_entry.py | ldylab/deep_learning_with_pytorch | c86a2e24ee94ade1a78b66f10eb69b6e1fdd4463 | [
"MIT"
] | null | null | null | # from model.base.fcn import CustomFcn
# from model.best.fcn import DeepLabv3Fcn
# from model.better.fcn import Resnet101Fcn
# from model.sota.fcn import LightFcn
from model.alexnet.alexnet_model import AlexNet
from model.lenet5.lenet_5_model import LeNet5
from model.vggnet.vggnet16 import VGG16
from model.densenet.densenet_model import DenseNet121
from model.resnet.resnet34_model import resnet34
from model.resnet.resnet101_model import resnet101, resnet50
from model.cotnet.cotnet_model import cotnet50
import torch.nn as nn
def select_model(args):
type2model = {
'alexnet_fcn': AlexNet(args),
'lenet5_fcn': LeNet5(args),
'vggnet16_fcn': VGG16(args),
'densenet121_fcn': DenseNet121(num_classes=args.classes_num, grayscale=False),
'resnet34_fcn': resnet34(num_classes=args.classes_num),
'resnet101_fcn': resnet101(num_classes=args.classes_num),
'resnet50_fcn': resnet50(num_classes=args.classes_num),
'cotnet50_fcn': cotnet50(num_classes=args.classes_num)
}
model = type2model[args.model_type]
return model
def equip_multi_gpu(model, args):
model = nn.DataParallel(model, device_ids=args.gpus)
return model
| 36.454545 | 86 | 0.758936 |
from model.alexnet.alexnet_model import AlexNet
from model.lenet5.lenet_5_model import LeNet5
from model.vggnet.vggnet16 import VGG16
from model.densenet.densenet_model import DenseNet121
from model.resnet.resnet34_model import resnet34
from model.resnet.resnet101_model import resnet101, resnet50
from model.cotnet.cotnet_model import cotnet50
import torch.nn as nn
def select_model(args):
type2model = {
'alexnet_fcn': AlexNet(args),
'lenet5_fcn': LeNet5(args),
'vggnet16_fcn': VGG16(args),
'densenet121_fcn': DenseNet121(num_classes=args.classes_num, grayscale=False),
'resnet34_fcn': resnet34(num_classes=args.classes_num),
'resnet101_fcn': resnet101(num_classes=args.classes_num),
'resnet50_fcn': resnet50(num_classes=args.classes_num),
'cotnet50_fcn': cotnet50(num_classes=args.classes_num)
}
model = type2model[args.model_type]
return model
def equip_multi_gpu(model, args):
model = nn.DataParallel(model, device_ids=args.gpus)
return model
| true | true |
f71ff8d04b827e68cb215f95a82095cadf50e4ca | 1,094 | py | Python | data/p4VQE/R4/benchmark/startPyquil330.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p4VQE/R4/benchmark/startPyquil330.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p4VQE/R4/benchmark/startPyquil330.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=4
# total number=10
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(1) # number=2
prog += H(2) # number=3
prog += H(3) # number=4
prog += Y(3) # number=5
prog += SWAP(1,0) # number=6
prog += Y(2) # number=8
prog += Y(2) # number=9
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil330.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| 22.326531 | 64 | 0.608775 |
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program()
prog += H(1)
prog += H(2)
prog += H(3)
prog += Y(3)
prog += SWAP(1,0)
prog += Y(2)
prog += Y(2)
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil330.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| true | true |
f71ff98e011cc1d66aab1506e6db05f161b7b1cb | 150 | py | Python | src/blog/views.py | master-stm/blog-dhango-ar | dd904e2af9bc6b7f85da6063f2abcaf12d572b47 | [
"bzip2-1.0.6"
] | null | null | null | src/blog/views.py | master-stm/blog-dhango-ar | dd904e2af9bc6b7f85da6063f2abcaf12d572b47 | [
"bzip2-1.0.6"
] | null | null | null | src/blog/views.py | master-stm/blog-dhango-ar | dd904e2af9bc6b7f85da6063f2abcaf12d572b47 | [
"bzip2-1.0.6"
] | null | null | null | from django.shortcuts import render
# Create your views here.
def home(request):
return render(request, 'blog/index.html', {'title': 'Home'})
| 16.666667 | 64 | 0.7 | from django.shortcuts import render
def home(request):
return render(request, 'blog/index.html', {'title': 'Home'})
| true | true |
f71ffa32ecb22bbfb515cf38fa8b15f86b7fb720 | 5,517 | py | Python | atron_cli/atron.py | atron-cc/atron | 49244fbd5ca3d372f6e3e74cda388ddea3acf00e | [
"MIT"
] | 4 | 2019-05-11T01:21:15.000Z | 2020-02-08T18:00:39.000Z | atron_cli/atron.py | atron-cc/atron | 49244fbd5ca3d372f6e3e74cda388ddea3acf00e | [
"MIT"
] | null | null | null | atron_cli/atron.py | atron-cc/atron | 49244fbd5ca3d372f6e3e74cda388ddea3acf00e | [
"MIT"
] | null | null | null | import click
import time
import platform
import os
from minifier import minify
from .board import Board, BoardException, DirectoryExistsError
from .board import PyboardError
_board = None
@click.group()
@click.option(
"--port",
"-p",
envvar="ATRON_PORT",
default="",
type=click.STRING,
help="Name of serial port for connected board. Can optionally specify with ATRON_PORT environment variable.",
metavar="PORT",
)
@click.option(
"--baud",
"-b",
envvar="ATRON_BAUD",
default=115200,
type=click.INT,
help="Baud rate for the serial connection (default 115200). Can optionally specify with ATRON_BAUD environment variable.",
metavar="BAUD",
)
@click.version_option()
def cli(port, baud):
global _board
if platform.system() == "Windows":
if port == '':
click.secho('you have to choose a COM port.', bold=True, fg='red')
return
if not re.match("^COM(\d+)$", port):
click.secho('invalid port {}'.format(port), fg='red')
return
else:
if port == '':
port = '/dev/ttyUSB0'
seconds = 1
while True:
try:
_board = Board(port, baud)
break
except BoardException as error:
click.secho(str(error), bold=True, fg='yellow')
click.secho(
'reonnecting to board after {} seconds. press ctrl+c to cancel'.format(seconds), fg='green')
time.sleep(seconds)
seconds *= 2
@cli.command()
@click.option(
"-h",
"--hard",
"hard",
is_flag=True,
default=False,
help="Perform a hard reboot, including running init.py",
)
def reset(hard):
if not hard:
_board.soft_reset()
return
# TODO: Hard reset is not implemented.
@cli.command()
def raw_command():
click.secho(
'the raw-command is under construction and may have some bugs.', fg='yellow')
click.secho('entering raw-command mode ...', fg='green')
_board.soft_reset()
time.sleep(1)
_board.board.enter_raw_repl()
try:
while True:
command = raw_input(">>> ")
result = _board.board.exec_raw(command)
if result[0]:
print(result[0])
finally:
_board.board.exit_raw_repl()
_board.soft_reset()
@cli.command()
@click.argument("remote_folder")
def rmdir(remote_folder):
_board.files.rmdir(remote_folder)
@cli.command()
@click.argument(
"local",
default="main.py",
)
@click.argument(
"remote",
default="main.py",
)
def upload(local, remote):
if remote is None:
remote = os.path.basename(os.path.abspath(local))
_board.files.put(remote, minify(local))
@cli.command()
@click.argument(
"local",
default="main.py",
)
@click.argument(
"remote",
required=False,
)
def put(local, remote):
if remote is None:
remote = os.path.basename(os.path.abspath(local))
if os.path.isdir(local):
board_files = _board.files
for parent, child_dirs, child_files in os.walk(local):
remote_parent = posixpath.normpath(
posixpath.join(remote, os.path.relpath(parent, local))
)
try:
board_files.mkdir(remote_parent)
for filename in child_files:
with open(os.path.join(parent, filename), "rb") as infile:
remote_filename = posixpath.join(
remote_parent, filename)
board_files.put(remote_filename, infile.read())
except DirectoryExistsError:
pass
else:
with open(local, "rb") as infile:
_board.files.put(remote, infile.read())
@cli.command()
@click.argument("remote_file")
def rm(remote_file):
_board.files.rm(remote_file)
@cli.command()
@click.argument("local_file")
@click.option(
"--no-output",
"-n",
is_flag=True,
help="Run the code without waiting for it to finish and print output. Use this when running code with main loops that never return.",
)
def run(local_file, no_output):
try:
output = _board.files.run(local_file, not no_output)
if output is not None:
click.secho(output.decode("utf-8"))
except IOError:
click.echo(
"Failed to find or read input file: {0}".format(local_file), err=True
)
@cli.command()
@click.argument("directory", default="/")
@click.option(
"--long_format",
"-l",
is_flag=True,
help="Print long format info including size of files. Note the size of directories is not supported and will show 0 values.",
)
@click.option(
"--recursive",
"-r",
is_flag=True,
help="recursively list all files and (empty) directories.",
)
def ls(directory, long_format, recursive):
try:
files = _board.files.ls(directory, long_format=long_format, recursive=recursive)
except PyboardError as err:
click.secho('PyBoard Exception.', fg='red')
click.secho(str(err), fg='yellow')
return
for f in files:
if not long_format:
click.secho(
f,
fg='green' if os.path.splitext(f)[1].lower() == '.py' else 'white',
)
else:
click.echo(f)
if __name__ == '__main__':
try:
cli()
finally:
if _board is not None:
try:
_board.close()
except:
pass
| 25.780374 | 138 | 0.589813 | import click
import time
import platform
import os
from minifier import minify
from .board import Board, BoardException, DirectoryExistsError
from .board import PyboardError
_board = None
@click.group()
@click.option(
"--port",
"-p",
envvar="ATRON_PORT",
default="",
type=click.STRING,
help="Name of serial port for connected board. Can optionally specify with ATRON_PORT environment variable.",
metavar="PORT",
)
@click.option(
"--baud",
"-b",
envvar="ATRON_BAUD",
default=115200,
type=click.INT,
help="Baud rate for the serial connection (default 115200). Can optionally specify with ATRON_BAUD environment variable.",
metavar="BAUD",
)
@click.version_option()
def cli(port, baud):
global _board
if platform.system() == "Windows":
if port == '':
click.secho('you have to choose a COM port.', bold=True, fg='red')
return
if not re.match("^COM(\d+)$", port):
click.secho('invalid port {}'.format(port), fg='red')
return
else:
if port == '':
port = '/dev/ttyUSB0'
seconds = 1
while True:
try:
_board = Board(port, baud)
break
except BoardException as error:
click.secho(str(error), bold=True, fg='yellow')
click.secho(
'reonnecting to board after {} seconds. press ctrl+c to cancel'.format(seconds), fg='green')
time.sleep(seconds)
seconds *= 2
@cli.command()
@click.option(
"-h",
"--hard",
"hard",
is_flag=True,
default=False,
help="Perform a hard reboot, including running init.py",
)
def reset(hard):
if not hard:
_board.soft_reset()
return
@cli.command()
def raw_command():
click.secho(
'the raw-command is under construction and may have some bugs.', fg='yellow')
click.secho('entering raw-command mode ...', fg='green')
_board.soft_reset()
time.sleep(1)
_board.board.enter_raw_repl()
try:
while True:
command = raw_input(">>> ")
result = _board.board.exec_raw(command)
if result[0]:
print(result[0])
finally:
_board.board.exit_raw_repl()
_board.soft_reset()
@cli.command()
@click.argument("remote_folder")
def rmdir(remote_folder):
_board.files.rmdir(remote_folder)
@cli.command()
@click.argument(
"local",
default="main.py",
)
@click.argument(
"remote",
default="main.py",
)
def upload(local, remote):
if remote is None:
remote = os.path.basename(os.path.abspath(local))
_board.files.put(remote, minify(local))
@cli.command()
@click.argument(
"local",
default="main.py",
)
@click.argument(
"remote",
required=False,
)
def put(local, remote):
if remote is None:
remote = os.path.basename(os.path.abspath(local))
if os.path.isdir(local):
board_files = _board.files
for parent, child_dirs, child_files in os.walk(local):
remote_parent = posixpath.normpath(
posixpath.join(remote, os.path.relpath(parent, local))
)
try:
board_files.mkdir(remote_parent)
for filename in child_files:
with open(os.path.join(parent, filename), "rb") as infile:
remote_filename = posixpath.join(
remote_parent, filename)
board_files.put(remote_filename, infile.read())
except DirectoryExistsError:
pass
else:
with open(local, "rb") as infile:
_board.files.put(remote, infile.read())
@cli.command()
@click.argument("remote_file")
def rm(remote_file):
_board.files.rm(remote_file)
@cli.command()
@click.argument("local_file")
@click.option(
"--no-output",
"-n",
is_flag=True,
help="Run the code without waiting for it to finish and print output. Use this when running code with main loops that never return.",
)
def run(local_file, no_output):
try:
output = _board.files.run(local_file, not no_output)
if output is not None:
click.secho(output.decode("utf-8"))
except IOError:
click.echo(
"Failed to find or read input file: {0}".format(local_file), err=True
)
@cli.command()
@click.argument("directory", default="/")
@click.option(
"--long_format",
"-l",
is_flag=True,
help="Print long format info including size of files. Note the size of directories is not supported and will show 0 values.",
)
@click.option(
"--recursive",
"-r",
is_flag=True,
help="recursively list all files and (empty) directories.",
)
def ls(directory, long_format, recursive):
try:
files = _board.files.ls(directory, long_format=long_format, recursive=recursive)
except PyboardError as err:
click.secho('PyBoard Exception.', fg='red')
click.secho(str(err), fg='yellow')
return
for f in files:
if not long_format:
click.secho(
f,
fg='green' if os.path.splitext(f)[1].lower() == '.py' else 'white',
)
else:
click.echo(f)
if __name__ == '__main__':
try:
cli()
finally:
if _board is not None:
try:
_board.close()
except:
pass
| true | true |
f71ffa36ec9cfc1d58a5f56d219d392194ca7a79 | 331 | py | Python | Projetos/surf05.py | anderson-br-ti/python | d65d851f0934267dff9256dfdac09b100efb3b45 | [
"MIT"
] | null | null | null | Projetos/surf05.py | anderson-br-ti/python | d65d851f0934267dff9256dfdac09b100efb3b45 | [
"MIT"
] | null | null | null | Projetos/surf05.py | anderson-br-ti/python | d65d851f0934267dff9256dfdac09b100efb3b45 | [
"MIT"
] | null | null | null | f = open('surf.txt')
notas = []
nomes = []
for linha in f:
nome, pontos = linha.split()
notas.append(float(pontos))
nomes.append(nome)
f.close()
notas.sort(reverse=True)
nomes.sort(reverse=True)
print ('%s %4.2f' %(nomes[0], notas[0]))
print ('%s %4.2f' %(nomes[1], notas[1]))
print ('%s %4.2f' %(nomes[2], notas[2]))
| 22.066667 | 40 | 0.601208 | f = open('surf.txt')
notas = []
nomes = []
for linha in f:
nome, pontos = linha.split()
notas.append(float(pontos))
nomes.append(nome)
f.close()
notas.sort(reverse=True)
nomes.sort(reverse=True)
print ('%s %4.2f' %(nomes[0], notas[0]))
print ('%s %4.2f' %(nomes[1], notas[1]))
print ('%s %4.2f' %(nomes[2], notas[2]))
| true | true |
f71ffa96a121e4b599332fda00ecec1c3e395215 | 381 | py | Python | sayhello/__init__.py | IshunChin/cfn-tutorial | 0a282fe3a2affa60c5c46702206128bb19e60869 | [
"MIT"
] | null | null | null | sayhello/__init__.py | IshunChin/cfn-tutorial | 0a282fe3a2affa60c5c46702206128bb19e60869 | [
"MIT"
] | null | null | null | sayhello/__init__.py | IshunChin/cfn-tutorial | 0a282fe3a2affa60c5c46702206128bb19e60869 | [
"MIT"
] | null | null | null | from flask import Flask
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from flask_sqlalchemy import SQLAlchemy
app = Flask('sayhello')
app.config.from_pyfile('settings.py')
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
db = SQLAlchemy(app)
bootstrap = Bootstrap(app)
moment = Moment(app)
from sayhello import views, errors, commands
| 23.8125 | 44 | 0.808399 | from flask import Flask
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from flask_sqlalchemy import SQLAlchemy
app = Flask('sayhello')
app.config.from_pyfile('settings.py')
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
db = SQLAlchemy(app)
bootstrap = Bootstrap(app)
moment = Moment(app)
from sayhello import views, errors, commands
| true | true |
f71ffb327a8627122eb965383d8d9493e4611a68 | 23,768 | py | Python | examples/sentence_similarity/gensen_train.py | gohanlon/nlp | 7b07109a2066eb2152c370ef38600230668a9c8d | [
"MIT"
] | 4,407 | 2019-10-29T21:35:19.000Z | 2022-03-31T13:56:37.000Z | examples/sentence_similarity/gensen_train.py | shubham9g17/nlp-recipes | a5cd2303187239799ae0b1597a7c16eb99a97108 | [
"MIT"
] | 134 | 2019-10-30T23:38:59.000Z | 2022-03-01T11:42:53.000Z | examples/sentence_similarity/gensen_train.py | shubham9g17/nlp-recipes | a5cd2303187239799ae0b1597a7c16eb99a97108 | [
"MIT"
] | 726 | 2019-10-31T15:21:52.000Z | 2022-03-31T10:18:22.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""
The GenSen training process follows the steps:
1. Create or load the dataset vocabulary
2. Train on the training dataset for each batch epoch (batch size = 48 updates)
3. Evaluate on the validation dataset for every 10 epoches
4. Find the local minimum point on validation loss
5. Save the best model and stop the training process
AzureML provides AI Compute to train the model and track the performance.
This training process is based on GPU only.
"""
import argparse
import json
import logging
import os
import time
import horovod.torch as hvd
import mlflow
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as f
import torch.optim as optim
from utils_nlp.models.gensen.multi_task_model import MultitaskModel
from utils_nlp.models.gensen.utils import (
BufferedDataIterator,
NLIIterator,
compute_validation_loss,
)
cudnn.benchmark = True
logger = logging.getLogger(__name__)
hvd.init()
if torch.cuda.is_available():
# Horovod: pin GPU to local rank.
torch.cuda.set_device(hvd.local_rank())
def metric_average(value, name):
"""
Sync the validation loss with nodes.
:param value:
:param name:
:return:
"""
tensor = torch.tensor(value)
avg_tensor = hvd.allreduce(tensor, name=name)
return avg_tensor.item()
def setup_horovod(model, learning_rate):
""" Setup for Horovod usage.
Args:
model(MultitaskModel): The MultitaskModel object.
learning_rate(float): Learning rate for the model.
Returns: hvd.DistributedOptimizer: Optimizer to use for computing
gradients and applying updates.
"""
# Horovod: scale learning rate by the number of GPUs.
optimizer = optim.Adam(model.parameters(), lr=learning_rate * hvd.size())
# Horovod: broadcast parameters & optimizer state.
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
# Horovod: (optional) compression algorithm.
compression = hvd.Compression.fp16
# Horovod: wrap optimizer with DistributedOptimizer.
optimizer = hvd.DistributedOptimizer(
optimizer,
named_parameters=model.named_parameters(),
compression=compression,
)
return optimizer
def setup_logging(config):
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(levelname)s - %(message)s",
filename="log/%s" % (config["data"]["task"]),
filemode="w",
)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
console.setFormatter(formatter)
logging.getLogger("").addHandler(console)
def log_config(config):
logging.info("Model Parameters : ")
logging.info("Task : %s " % (config["data"]["task"]))
logging.info(
"Source Word Embedding Dim : %s" % (config["model"]["dim_word_src"])
)
logging.info(
"Target Word Embedding Dim : %s" % (config["model"]["dim_word_trg"])
)
logging.info("Source RNN Hidden Dim : %s" % (config["model"]["dim_src"]))
logging.info("Target RNN Hidden Dim : %s" % (config["model"]["dim_trg"]))
logging.info(
"Source RNN Bidirectional : %s" % (config["model"]["bidirectional"])
)
logging.info("Batch Size : %d " % (config["training"]["batch_size"]))
logging.info("Optimizer : %s " % (config["training"]["optimizer"]))
logging.info("Learning Rate : %f " % (config["training"]["lrate"]))
def evaluate(
config,
train_iterator,
model,
loss_criterion,
monitor_epoch,
min_val_loss,
min_val_loss_epoch,
save_dir,
starting_time,
model_state,
max_epoch,
):
""" Function to validate the model.
Args:
max_epoch(int): Limit training to specified number of epochs.
model_state(dict): Saved model weights.
config(dict): Config object.
train_iterator(BufferedDataIterator): BufferedDataIterator object.
model(MultitaskModel): The MultitaskModel object.
loss_criterion(nn.CrossEntropyLoss): Cross entropy loss.
monitor_epoch(int): Current epoch count.
min_val_loss(float): Minimum validation loss
min_val_loss_epoch(int): Epoch where the minimum validation
loss was seen.
save_dir(str): Directory path to save the model dictionary.
starting_time(time.Time): Starting time of the training.
Returns:
bool: Whether to continue training or not.
"""
break_flag = 0
for task_idx, task in enumerate(train_iterator.tasknames):
if "skipthought" in task:
continue
validation_loss = compute_validation_loss(
config,
model,
train_iterator,
loss_criterion,
task_idx,
lowercase=True,
)
validation_loss = metric_average(validation_loss, "val_loss")
logging.info("%s Validation Loss : %.3f" % (task, validation_loss))
# Horovod: print output only on first rank.
if hvd.rank() == 0:
# log the best val accuracy to AML run
logging.info(
"Best Validation Loss: {}".format(np.float(validation_loss))
)
# If the validation loss is small enough, and it starts to go up.
# Should stop training.
# Small is defined by the number of epochs it lasts.
if validation_loss < min_val_loss:
min_val_loss = validation_loss
min_val_loss_epoch = monitor_epoch
model_state = model.state_dict()
logging.info(
"Monitor epoch: %d Validation Loss: %.3f Min Validation Epoch: "
"%d Loss : %.3f "
% (
monitor_epoch,
validation_loss,
min_val_loss_epoch,
min_val_loss,
)
)
if (monitor_epoch - min_val_loss_epoch) > config["training"][
"stop_patience"
] or (max_epoch is not None and monitor_epoch >= max_epoch):
logging.info("Saving model ...")
# Save the name with validation loss.
torch.save(
model_state,
open(os.path.join(save_dir, "best_model.model"), "wb"),
)
# Let the training end.
break_flag = 1
break
if break_flag == 1:
logging.info("##### Training stopped at ##### %f" % min_val_loss)
logging.info(
"##### Training Time ##### %f seconds"
% (time.time() - starting_time)
)
return True, min_val_loss_epoch, min_val_loss, model_state
else:
return False, min_val_loss_epoch, min_val_loss, model_state
def evaluate_nli(nli_iterator, model, batch_size, n_gpus):
"""
Args:
nli_iterator(NLIIterator): NLIIterator object.
model(MultitaskModel): Multitask model object.
batch_size(int): Batch size.
n_gpus(int): Number of gpus
"""
n_correct = 0.0
n_wrong = 0.0
for j in range(0, len(nli_iterator.dev_lines), batch_size * n_gpus):
minibatch = nli_iterator.get_parallel_minibatch(
j, batch_size * n_gpus, "dev"
)
class_logits = model(
minibatch, -1, return_hidden=False, paired_trg=None
)
class_preds = (
f.softmax(class_logits).data.cpu().numpy().argmax(axis=-1)
)
labels = minibatch["labels"].data.cpu().numpy()
for pred, label in zip(class_preds, labels):
if pred == label:
n_correct += 1.0
else:
n_wrong += 1.0
logging.info("NLI Dev Acc : %.5f" % (n_correct / (n_correct + n_wrong)))
n_correct = 0.0
n_wrong = 0.0
for j in range(0, len(nli_iterator.test_lines), batch_size * n_gpus):
minibatch = nli_iterator.get_parallel_minibatch(
j, batch_size * n_gpus, "test"
)
class_logits = model(
minibatch, -1, return_hidden=False, paired_trg=None
)
class_preds = (
f.softmax(class_logits).data.cpu().numpy().argmax(axis=-1)
)
labels = minibatch["labels"].data.cpu().numpy()
for pred, label in zip(class_preds, labels):
if pred == label:
n_correct += 1.0
else:
n_wrong += 1.0
logging.info("NLI Test Acc : %.5f" % (n_correct / (n_correct + n_wrong)))
logging.info("******************************************************")
def train(config, data_folder, learning_rate=0.0001, max_epoch=None):
""" Train the Gensen model.
Args:
max_epoch(int): Limit training to specified number of epochs.
config(dict): Loaded json file as a python object.
data_folder(str): Path to the folder containing the data.
learning_rate(float): Learning rate for the model.
"""
owd = os.getcwd()
os.chdir(data_folder)
try:
with mlflow.start_run():
save_dir = config["data"]["save_dir"]
if not os.path.exists("./log"):
os.makedirs("./log")
os.makedirs(save_dir, exist_ok=True)
setup_logging(config)
batch_size = config["training"]["batch_size"]
src_vocab_size = config["model"]["n_words_src"]
trg_vocab_size = config["model"]["n_words_trg"]
max_len_src = config["data"]["max_src_length"]
max_len_trg = config["data"]["max_trg_length"]
model_state = {}
train_src = [item["train_src"] for item in config["data"]["paths"]]
train_trg = [item["train_trg"] for item in config["data"]["paths"]]
tasknames = [item["taskname"] for item in config["data"]["paths"]]
# Keep track of indicies to train forward and backward jointly
if (
"skipthought_next" in tasknames
and "skipthought_previous" in tasknames
):
skipthought_idx = tasknames.index("skipthought_next")
skipthought_backward_idx = tasknames.index(
"skipthought_previous"
)
paired_tasks = {
skipthought_idx: skipthought_backward_idx,
skipthought_backward_idx: skipthought_idx,
}
else:
paired_tasks = None
skipthought_idx = None
skipthought_backward_idx = None
train_iterator = BufferedDataIterator(
train_src,
train_trg,
src_vocab_size,
trg_vocab_size,
tasknames,
save_dir,
buffer_size=1e6,
lowercase=True,
seed=(hvd.rank() + 1) * 12345,
)
nli_iterator = NLIIterator(
train=config["data"]["nli_train"],
dev=config["data"]["nli_dev"],
test=config["data"]["nli_test"],
vocab_size=-1,
vocab=os.path.join(save_dir, "src_vocab.pkl"),
seed=(hvd.rank() + 1) * 12345,
)
src_vocab_size = len(train_iterator.src[0]["word2id"])
trg_vocab_size = len(train_iterator.trg[0]["word2id"])
# Logging set up.
logging.info("Finished creating iterator ...")
log_config(config)
logging.info(
"Found %d words in source : "
% (len(train_iterator.src[0]["id2word"]))
)
for idx, taskname in enumerate(tasknames):
logging.info(
"Found %d target words in task %s "
% (len(train_iterator.trg[idx]["id2word"]), taskname)
)
logging.info("Found %d words in src " % src_vocab_size)
logging.info("Found %d words in trg " % trg_vocab_size)
weight_mask = torch.ones(trg_vocab_size).cuda()
weight_mask[train_iterator.trg[0]["word2id"]["<pad>"]] = 0
loss_criterion = nn.CrossEntropyLoss(weight=weight_mask).cuda()
nli_criterion = nn.CrossEntropyLoss().cuda()
model = MultitaskModel(
src_emb_dim=config["model"]["dim_word_src"],
trg_emb_dim=config["model"]["dim_word_trg"],
src_vocab_size=src_vocab_size,
trg_vocab_size=trg_vocab_size,
src_hidden_dim=config["model"]["dim_src"],
trg_hidden_dim=config["model"]["dim_trg"],
bidirectional=config["model"]["bidirectional"],
pad_token_src=train_iterator.src[0]["word2id"]["<pad>"],
pad_token_trg=train_iterator.trg[0]["word2id"]["<pad>"],
nlayers_src=config["model"]["n_layers_src"],
dropout=config["model"]["dropout"],
num_tasks=len(train_iterator.src),
paired_tasks=paired_tasks,
).cuda()
optimizer = setup_horovod(model, learning_rate=learning_rate)
logging.info(model)
n_gpus = config["training"]["n_gpus"]
model = torch.nn.DataParallel(model, device_ids=range(n_gpus))
task_losses = [[] for _ in tasknames]
task_idxs = [0 for _ in tasknames]
nli_losses = []
updates = 0
nli_ctr = 0
nli_epoch = 0
monitor_epoch = 0
nli_mbatch_ctr = 0
mbatch_times = []
min_val_loss = 10000000
min_val_loss_epoch = -1
rng_num_tasks = (
len(tasknames) - 1 if paired_tasks else len(tasknames)
)
logging.info("OS Environ: \n {} \n\n".format(os.environ))
mlflow.log_param("learning_rate", learning_rate)
logging.info("Commencing Training ...")
start = time.time()
while True:
batch_start_time = time.time()
# Train NLI once every 10 minibatches of other tasks
if nli_ctr % 10 == 0:
minibatch = nli_iterator.get_parallel_minibatch(
nli_mbatch_ctr, batch_size * n_gpus
)
optimizer.zero_grad()
class_logits = model(
minibatch, -1, return_hidden=False, paired_trg=None
)
loss = nli_criterion(
class_logits.contiguous().view(
-1, class_logits.size(1)
),
minibatch["labels"].contiguous().view(-1),
)
# nli_losses.append(loss.data[0])
nli_losses.append(loss.item())
loss.backward()
torch.nn.utils.clip_grad_norm(model.parameters(), 1.0)
optimizer.step()
nli_mbatch_ctr += batch_size * n_gpus
if nli_mbatch_ctr >= len(nli_iterator.train_lines):
nli_mbatch_ctr = 0
nli_epoch += 1
else:
# Sample a random task
task_idx = np.random.randint(low=0, high=rng_num_tasks)
# Get a minibatch corresponding to the sampled task
minibatch = train_iterator.get_parallel_minibatch(
task_idx,
task_idxs[task_idx],
batch_size * n_gpus,
max_len_src,
max_len_trg,
)
"""Increment pointer into task and if current buffer is
exhausted, fetch new buffer. """
task_idxs[task_idx] += batch_size * n_gpus
if task_idxs[task_idx] >= train_iterator.buffer_size:
train_iterator.fetch_buffer(task_idx)
task_idxs[task_idx] = 0
if task_idx == skipthought_idx:
minibatch_back = train_iterator.get_parallel_minibatch(
skipthought_backward_idx,
task_idxs[skipthought_backward_idx],
batch_size * n_gpus,
max_len_src,
max_len_trg,
)
task_idxs[skipthought_backward_idx] += (
batch_size * n_gpus
)
if (
task_idxs[skipthought_backward_idx]
>= train_iterator.buffer_size
):
train_iterator.fetch_buffer(
skipthought_backward_idx
)
task_idxs[skipthought_backward_idx] = 0
optimizer.zero_grad()
decoder_logit, decoder_logit_2 = model(
minibatch,
task_idx,
paired_trg=minibatch_back["input_trg"],
)
loss_f = loss_criterion(
decoder_logit.contiguous().view(
-1, decoder_logit.size(2)
),
minibatch["output_trg"].contiguous().view(-1),
)
loss_b = loss_criterion(
decoder_logit_2.contiguous().view(
-1, decoder_logit_2.size(2)
),
minibatch_back["output_trg"].contiguous().view(-1),
)
task_losses[task_idx].append(loss_f.data[0])
task_losses[skipthought_backward_idx].append(
loss_b.data[0]
)
loss = loss_f + loss_b
else:
optimizer.zero_grad()
decoder_logit = model(minibatch, task_idx)
loss = loss_criterion(
decoder_logit.contiguous().view(
-1, decoder_logit.size(2)
),
minibatch["output_trg"].contiguous().view(-1),
)
task_losses[task_idx].append(loss.item())
loss.backward()
# For distributed optimizer need to sync before gradient
# clipping.
optimizer.synchronize()
torch.nn.utils.clip_grad_norm(model.parameters(), 1.0)
optimizer.step()
end = time.time()
mbatch_times.append(end - batch_start_time)
# Validations
if (
updates % config["management"]["monitor_loss"] == 0
and updates != 0
):
monitor_epoch += 1
for idx, task in enumerate(tasknames):
logging.info(
"Seq2Seq Examples Processed : %d %s Loss : %.5f Num %s "
"minibatches : %d"
% (
updates,
task,
np.mean(task_losses[idx]),
task,
len(task_losses[idx]),
)
)
mlflow.log_metric(
"validation_loss",
np.mean(task_losses[idx]),
step=monitor_epoch,
)
logging.info(
"Round: %d NLI Epoch : %d NLI Examples Processed : %d NLI "
"Loss : %.5f "
% (
nli_ctr,
nli_epoch,
nli_mbatch_ctr,
np.mean(nli_losses),
)
)
mlflow.log_metric(
"nli_loss", np.mean(nli_losses), step=nli_epoch
)
logging.info(
"Average time per minibatch : %.5f"
% (np.mean(mbatch_times))
)
mlflow.log_metric(
"minibatch_avg_duration", np.mean(mbatch_times)
)
task_losses = [[] for _ in tasknames]
mbatch_times = []
nli_losses = []
# For validate and break if done.
logging.info("############################")
logging.info("##### Evaluating model #####")
logging.info("############################")
training_complete, min_val_loss_epoch, min_val_loss, model_state = evaluate(
config=config,
train_iterator=train_iterator,
model=model,
loss_criterion=loss_criterion,
monitor_epoch=monitor_epoch,
min_val_loss=min_val_loss,
min_val_loss_epoch=min_val_loss_epoch,
save_dir=save_dir,
starting_time=start,
model_state=model_state,
max_epoch=max_epoch,
)
if training_complete:
mlflow.log_metric("min_val_loss", float(min_val_loss))
mlflow.log_metric("learning_rate", learning_rate)
break
logging.info("Evaluating on NLI")
evaluate_nli(
nli_iterator=nli_iterator,
model=model,
n_gpus=n_gpus,
batch_size=batch_size,
)
updates += batch_size * n_gpus
nli_ctr += 1
logging.info("Updates: %d" % updates)
finally:
os.chdir(owd)
def read_config(json_file):
"""Read JSON config."""
json_object = json.load(open(json_file, "r", encoding="utf-8"))
return json_object
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--config", help="path to json config", required=True)
parser.add_argument("--data_folder", type=str, help="data folder")
# Add learning rate to tune model.
parser.add_argument(
"--learning_rate", type=float, default=0.0001, help="learning rate"
)
parser.add_argument(
"--max_epoch",
type=int,
default=None,
help="Limit training to specified number of epochs.",
)
args = parser.parse_args()
data_path = args.data_folder
lr = args.learning_rate
config_file_path = args.config
max_epoch = args.max_epoch
config_obj = read_config(config_file_path)
train(config_obj, data_path, lr, max_epoch)
| 36.849612 | 96 | 0.51847 |
import argparse
import json
import logging
import os
import time
import horovod.torch as hvd
import mlflow
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as f
import torch.optim as optim
from utils_nlp.models.gensen.multi_task_model import MultitaskModel
from utils_nlp.models.gensen.utils import (
BufferedDataIterator,
NLIIterator,
compute_validation_loss,
)
cudnn.benchmark = True
logger = logging.getLogger(__name__)
hvd.init()
if torch.cuda.is_available():
torch.cuda.set_device(hvd.local_rank())
def metric_average(value, name):
tensor = torch.tensor(value)
avg_tensor = hvd.allreduce(tensor, name=name)
return avg_tensor.item()
def setup_horovod(model, learning_rate):
optimizer = optim.Adam(model.parameters(), lr=learning_rate * hvd.size())
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
compression = hvd.Compression.fp16
optimizer = hvd.DistributedOptimizer(
optimizer,
named_parameters=model.named_parameters(),
compression=compression,
)
return optimizer
def setup_logging(config):
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(levelname)s - %(message)s",
filename="log/%s" % (config["data"]["task"]),
filemode="w",
)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
console.setFormatter(formatter)
logging.getLogger("").addHandler(console)
def log_config(config):
logging.info("Model Parameters : ")
logging.info("Task : %s " % (config["data"]["task"]))
logging.info(
"Source Word Embedding Dim : %s" % (config["model"]["dim_word_src"])
)
logging.info(
"Target Word Embedding Dim : %s" % (config["model"]["dim_word_trg"])
)
logging.info("Source RNN Hidden Dim : %s" % (config["model"]["dim_src"]))
logging.info("Target RNN Hidden Dim : %s" % (config["model"]["dim_trg"]))
logging.info(
"Source RNN Bidirectional : %s" % (config["model"]["bidirectional"])
)
logging.info("Batch Size : %d " % (config["training"]["batch_size"]))
logging.info("Optimizer : %s " % (config["training"]["optimizer"]))
logging.info("Learning Rate : %f " % (config["training"]["lrate"]))
def evaluate(
config,
train_iterator,
model,
loss_criterion,
monitor_epoch,
min_val_loss,
min_val_loss_epoch,
save_dir,
starting_time,
model_state,
max_epoch,
):
break_flag = 0
for task_idx, task in enumerate(train_iterator.tasknames):
if "skipthought" in task:
continue
validation_loss = compute_validation_loss(
config,
model,
train_iterator,
loss_criterion,
task_idx,
lowercase=True,
)
validation_loss = metric_average(validation_loss, "val_loss")
logging.info("%s Validation Loss : %.3f" % (task, validation_loss))
if hvd.rank() == 0:
logging.info(
"Best Validation Loss: {}".format(np.float(validation_loss))
)
if validation_loss < min_val_loss:
min_val_loss = validation_loss
min_val_loss_epoch = monitor_epoch
model_state = model.state_dict()
logging.info(
"Monitor epoch: %d Validation Loss: %.3f Min Validation Epoch: "
"%d Loss : %.3f "
% (
monitor_epoch,
validation_loss,
min_val_loss_epoch,
min_val_loss,
)
)
if (monitor_epoch - min_val_loss_epoch) > config["training"][
"stop_patience"
] or (max_epoch is not None and monitor_epoch >= max_epoch):
logging.info("Saving model ...")
torch.save(
model_state,
open(os.path.join(save_dir, "best_model.model"), "wb"),
)
break_flag = 1
break
if break_flag == 1:
logging.info("##### Training stopped at ##### %f" % min_val_loss)
logging.info(
"##### Training Time ##### %f seconds"
% (time.time() - starting_time)
)
return True, min_val_loss_epoch, min_val_loss, model_state
else:
return False, min_val_loss_epoch, min_val_loss, model_state
def evaluate_nli(nli_iterator, model, batch_size, n_gpus):
n_correct = 0.0
n_wrong = 0.0
for j in range(0, len(nli_iterator.dev_lines), batch_size * n_gpus):
minibatch = nli_iterator.get_parallel_minibatch(
j, batch_size * n_gpus, "dev"
)
class_logits = model(
minibatch, -1, return_hidden=False, paired_trg=None
)
class_preds = (
f.softmax(class_logits).data.cpu().numpy().argmax(axis=-1)
)
labels = minibatch["labels"].data.cpu().numpy()
for pred, label in zip(class_preds, labels):
if pred == label:
n_correct += 1.0
else:
n_wrong += 1.0
logging.info("NLI Dev Acc : %.5f" % (n_correct / (n_correct + n_wrong)))
n_correct = 0.0
n_wrong = 0.0
for j in range(0, len(nli_iterator.test_lines), batch_size * n_gpus):
minibatch = nli_iterator.get_parallel_minibatch(
j, batch_size * n_gpus, "test"
)
class_logits = model(
minibatch, -1, return_hidden=False, paired_trg=None
)
class_preds = (
f.softmax(class_logits).data.cpu().numpy().argmax(axis=-1)
)
labels = minibatch["labels"].data.cpu().numpy()
for pred, label in zip(class_preds, labels):
if pred == label:
n_correct += 1.0
else:
n_wrong += 1.0
logging.info("NLI Test Acc : %.5f" % (n_correct / (n_correct + n_wrong)))
logging.info("******************************************************")
def train(config, data_folder, learning_rate=0.0001, max_epoch=None):
owd = os.getcwd()
os.chdir(data_folder)
try:
with mlflow.start_run():
save_dir = config["data"]["save_dir"]
if not os.path.exists("./log"):
os.makedirs("./log")
os.makedirs(save_dir, exist_ok=True)
setup_logging(config)
batch_size = config["training"]["batch_size"]
src_vocab_size = config["model"]["n_words_src"]
trg_vocab_size = config["model"]["n_words_trg"]
max_len_src = config["data"]["max_src_length"]
max_len_trg = config["data"]["max_trg_length"]
model_state = {}
train_src = [item["train_src"] for item in config["data"]["paths"]]
train_trg = [item["train_trg"] for item in config["data"]["paths"]]
tasknames = [item["taskname"] for item in config["data"]["paths"]]
if (
"skipthought_next" in tasknames
and "skipthought_previous" in tasknames
):
skipthought_idx = tasknames.index("skipthought_next")
skipthought_backward_idx = tasknames.index(
"skipthought_previous"
)
paired_tasks = {
skipthought_idx: skipthought_backward_idx,
skipthought_backward_idx: skipthought_idx,
}
else:
paired_tasks = None
skipthought_idx = None
skipthought_backward_idx = None
train_iterator = BufferedDataIterator(
train_src,
train_trg,
src_vocab_size,
trg_vocab_size,
tasknames,
save_dir,
buffer_size=1e6,
lowercase=True,
seed=(hvd.rank() + 1) * 12345,
)
nli_iterator = NLIIterator(
train=config["data"]["nli_train"],
dev=config["data"]["nli_dev"],
test=config["data"]["nli_test"],
vocab_size=-1,
vocab=os.path.join(save_dir, "src_vocab.pkl"),
seed=(hvd.rank() + 1) * 12345,
)
src_vocab_size = len(train_iterator.src[0]["word2id"])
trg_vocab_size = len(train_iterator.trg[0]["word2id"])
logging.info("Finished creating iterator ...")
log_config(config)
logging.info(
"Found %d words in source : "
% (len(train_iterator.src[0]["id2word"]))
)
for idx, taskname in enumerate(tasknames):
logging.info(
"Found %d target words in task %s "
% (len(train_iterator.trg[idx]["id2word"]), taskname)
)
logging.info("Found %d words in src " % src_vocab_size)
logging.info("Found %d words in trg " % trg_vocab_size)
weight_mask = torch.ones(trg_vocab_size).cuda()
weight_mask[train_iterator.trg[0]["word2id"]["<pad>"]] = 0
loss_criterion = nn.CrossEntropyLoss(weight=weight_mask).cuda()
nli_criterion = nn.CrossEntropyLoss().cuda()
model = MultitaskModel(
src_emb_dim=config["model"]["dim_word_src"],
trg_emb_dim=config["model"]["dim_word_trg"],
src_vocab_size=src_vocab_size,
trg_vocab_size=trg_vocab_size,
src_hidden_dim=config["model"]["dim_src"],
trg_hidden_dim=config["model"]["dim_trg"],
bidirectional=config["model"]["bidirectional"],
pad_token_src=train_iterator.src[0]["word2id"]["<pad>"],
pad_token_trg=train_iterator.trg[0]["word2id"]["<pad>"],
nlayers_src=config["model"]["n_layers_src"],
dropout=config["model"]["dropout"],
num_tasks=len(train_iterator.src),
paired_tasks=paired_tasks,
).cuda()
optimizer = setup_horovod(model, learning_rate=learning_rate)
logging.info(model)
n_gpus = config["training"]["n_gpus"]
model = torch.nn.DataParallel(model, device_ids=range(n_gpus))
task_losses = [[] for _ in tasknames]
task_idxs = [0 for _ in tasknames]
nli_losses = []
updates = 0
nli_ctr = 0
nli_epoch = 0
monitor_epoch = 0
nli_mbatch_ctr = 0
mbatch_times = []
min_val_loss = 10000000
min_val_loss_epoch = -1
rng_num_tasks = (
len(tasknames) - 1 if paired_tasks else len(tasknames)
)
logging.info("OS Environ: \n {} \n\n".format(os.environ))
mlflow.log_param("learning_rate", learning_rate)
logging.info("Commencing Training ...")
start = time.time()
while True:
batch_start_time = time.time()
if nli_ctr % 10 == 0:
minibatch = nli_iterator.get_parallel_minibatch(
nli_mbatch_ctr, batch_size * n_gpus
)
optimizer.zero_grad()
class_logits = model(
minibatch, -1, return_hidden=False, paired_trg=None
)
loss = nli_criterion(
class_logits.contiguous().view(
-1, class_logits.size(1)
),
minibatch["labels"].contiguous().view(-1),
)
nli_losses.append(loss.item())
loss.backward()
torch.nn.utils.clip_grad_norm(model.parameters(), 1.0)
optimizer.step()
nli_mbatch_ctr += batch_size * n_gpus
if nli_mbatch_ctr >= len(nli_iterator.train_lines):
nli_mbatch_ctr = 0
nli_epoch += 1
else:
task_idx = np.random.randint(low=0, high=rng_num_tasks)
minibatch = train_iterator.get_parallel_minibatch(
task_idx,
task_idxs[task_idx],
batch_size * n_gpus,
max_len_src,
max_len_trg,
)
"""Increment pointer into task and if current buffer is
exhausted, fetch new buffer. """
task_idxs[task_idx] += batch_size * n_gpus
if task_idxs[task_idx] >= train_iterator.buffer_size:
train_iterator.fetch_buffer(task_idx)
task_idxs[task_idx] = 0
if task_idx == skipthought_idx:
minibatch_back = train_iterator.get_parallel_minibatch(
skipthought_backward_idx,
task_idxs[skipthought_backward_idx],
batch_size * n_gpus,
max_len_src,
max_len_trg,
)
task_idxs[skipthought_backward_idx] += (
batch_size * n_gpus
)
if (
task_idxs[skipthought_backward_idx]
>= train_iterator.buffer_size
):
train_iterator.fetch_buffer(
skipthought_backward_idx
)
task_idxs[skipthought_backward_idx] = 0
optimizer.zero_grad()
decoder_logit, decoder_logit_2 = model(
minibatch,
task_idx,
paired_trg=minibatch_back["input_trg"],
)
loss_f = loss_criterion(
decoder_logit.contiguous().view(
-1, decoder_logit.size(2)
),
minibatch["output_trg"].contiguous().view(-1),
)
loss_b = loss_criterion(
decoder_logit_2.contiguous().view(
-1, decoder_logit_2.size(2)
),
minibatch_back["output_trg"].contiguous().view(-1),
)
task_losses[task_idx].append(loss_f.data[0])
task_losses[skipthought_backward_idx].append(
loss_b.data[0]
)
loss = loss_f + loss_b
else:
optimizer.zero_grad()
decoder_logit = model(minibatch, task_idx)
loss = loss_criterion(
decoder_logit.contiguous().view(
-1, decoder_logit.size(2)
),
minibatch["output_trg"].contiguous().view(-1),
)
task_losses[task_idx].append(loss.item())
loss.backward()
optimizer.synchronize()
torch.nn.utils.clip_grad_norm(model.parameters(), 1.0)
optimizer.step()
end = time.time()
mbatch_times.append(end - batch_start_time)
if (
updates % config["management"]["monitor_loss"] == 0
and updates != 0
):
monitor_epoch += 1
for idx, task in enumerate(tasknames):
logging.info(
"Seq2Seq Examples Processed : %d %s Loss : %.5f Num %s "
"minibatches : %d"
% (
updates,
task,
np.mean(task_losses[idx]),
task,
len(task_losses[idx]),
)
)
mlflow.log_metric(
"validation_loss",
np.mean(task_losses[idx]),
step=monitor_epoch,
)
logging.info(
"Round: %d NLI Epoch : %d NLI Examples Processed : %d NLI "
"Loss : %.5f "
% (
nli_ctr,
nli_epoch,
nli_mbatch_ctr,
np.mean(nli_losses),
)
)
mlflow.log_metric(
"nli_loss", np.mean(nli_losses), step=nli_epoch
)
logging.info(
"Average time per minibatch : %.5f"
% (np.mean(mbatch_times))
)
mlflow.log_metric(
"minibatch_avg_duration", np.mean(mbatch_times)
)
task_losses = [[] for _ in tasknames]
mbatch_times = []
nli_losses = []
logging.info("############################")
logging.info("##### Evaluating model #####")
logging.info("############################")
training_complete, min_val_loss_epoch, min_val_loss, model_state = evaluate(
config=config,
train_iterator=train_iterator,
model=model,
loss_criterion=loss_criterion,
monitor_epoch=monitor_epoch,
min_val_loss=min_val_loss,
min_val_loss_epoch=min_val_loss_epoch,
save_dir=save_dir,
starting_time=start,
model_state=model_state,
max_epoch=max_epoch,
)
if training_complete:
mlflow.log_metric("min_val_loss", float(min_val_loss))
mlflow.log_metric("learning_rate", learning_rate)
break
logging.info("Evaluating on NLI")
evaluate_nli(
nli_iterator=nli_iterator,
model=model,
n_gpus=n_gpus,
batch_size=batch_size,
)
updates += batch_size * n_gpus
nli_ctr += 1
logging.info("Updates: %d" % updates)
finally:
os.chdir(owd)
def read_config(json_file):
json_object = json.load(open(json_file, "r", encoding="utf-8"))
return json_object
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--config", help="path to json config", required=True)
parser.add_argument("--data_folder", type=str, help="data folder")
parser.add_argument(
"--learning_rate", type=float, default=0.0001, help="learning rate"
)
parser.add_argument(
"--max_epoch",
type=int,
default=None,
help="Limit training to specified number of epochs.",
)
args = parser.parse_args()
data_path = args.data_folder
lr = args.learning_rate
config_file_path = args.config
max_epoch = args.max_epoch
config_obj = read_config(config_file_path)
train(config_obj, data_path, lr, max_epoch)
| true | true |
f71ffb8011fd1e0f44abc963af4799e254cbef2d | 6,006 | py | Python | rb_missions/scripts/acoustic_docking.py | vanttec/vanttec_usv | 5c7b45a61728404b4c957028eac7bc361f1b2077 | [
"MIT"
] | 13 | 2020-08-18T18:47:11.000Z | 2022-03-30T08:07:25.000Z | rb_missions/scripts/acoustic_docking.py | vanttec/vanttec_usv | 5c7b45a61728404b4c957028eac7bc361f1b2077 | [
"MIT"
] | 2 | 2021-05-07T03:56:11.000Z | 2021-08-10T04:18:21.000Z | rb_missions/scripts/acoustic_docking.py | vanttec/vanttec_usv | 5c7b45a61728404b4c957028eac7bc361f1b2077 | [
"MIT"
] | 5 | 2020-12-21T17:29:29.000Z | 2022-02-15T07:51:07.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
----------------------------------------------------------
@file: acoustic_docking.py
@date: Wed Jun 3, 2020
@author: Alejandro Gonzalez Garcia
@e-mail: alexglzg97@gmail.com
@brief: Motion planning. ROS node to follow an acoustic
signal for autonomous docking.
@version: 1.0
Open source
---------------------------------------------------------
'''
import math
import time
import os
import numpy as np
import rospy
from geometry_msgs.msg import Pose, Pose2D, PoseArray
from std_msgs.msg import Int32, Float32MultiArray, Float64, String
from visualization_msgs.msg import Marker, MarkerArray
class AcousticDocking:
def __init__(self):
self.ned_x = 0
self.ned_y = 0
self.yaw = 0
self.activated = True
self.distance = 0
self.signal_angle = 0
self.x1 = 0
self.y1 = 0
self.x2 = 0
self.y2 = 0
self.x_body_origin = 0
self.y_body_origin = 0
self.correction_distance = 2
# ROS Subscribers
rospy.Subscriber("/vectornav/ins_2d/NED_pose", Pose2D, self.ins_pose_callback)
rospy.Subscriber("/usv_perception/hydrophones/acoustic_signal", Float64, self.signal_callback)
rospy.Subscriber("/usv_perception/lidar_detector/dock", PoseArray, self.dock_callback)
# ROS Publishers
self.path_pub = rospy.Publisher("/mission/waypoints", Float32MultiArray, queue_size=10)
self.status_pub = rospy.Publisher("/mission/status", Int32, queue_size=10)
self.test = rospy.Publisher("/mission/state", Int32, queue_size=10)
def ins_pose_callback(self,pose):
self.ned_x = pose.x
self.ned_y = pose.y
self.yaw = pose.theta
def signal_callback(self, signal):
self.signal_angle = signal.data
def dock_callback(self, dock):
self.x1 = dock.poses[0].position.x
self.y1 = dock.poses[0].position.y
self.x2 = dock.poses[1].position.x
self.y2 = dock.poses[1].position.y
def calculate_distance_to_dock(self):
'''
@name: calculate_distance_to_dock
@brief: Calculates the distance between the USV and the dock.
@param: --
@return: --
'''
xc = min([self.x1,self.x2]) + abs(self.x1 - self.x2)/2
yc = min([self.y1,self.y2]) + abs(self.y1 - self.y2)/2
self.distance = math.pow(xc*xc + yc*yc, 0.5)
def dock(self):
'''
@name: dock
@brief: Calculates the intersection point between the USV and the pinger
location at the dock. Returns two waypoints as desired positions. The first
waypoint is perpendicularly in front of the pinger to straighten the path.
the second waypoint is the location of the pinger in the dock, for docking.
@param: --
@return: --
'''
if self.y1 < self.y2:
yl = self.y1
xl = self.x1
yr = self.y2
xr = self.x2
else:
yl = self.y2
xl = self.x2
yr = self.y1
xr = self.x1
yd = yl - yr
xd = xl - xr
alpha = math.atan2(yd,xd) + math.pi/2
if (abs(alpha) > (math.pi)):
alpha = (alpha/abs(alpha))*(abs(alpha) - 2*math.pi)
x_beta, y_beta = self.aux_to_body(1,0,self.signal_angle,self.x_body_origin,self.y_body_origin)
common_denominator = (xl - xr)*(self.y_body_origin - y_beta) - (yl - yr)*(self.x_body_origin - x_beta)
x_pinger = ((xl*yr-yl*xr)*(self.x_body_origin-x_beta)-(xl-xr)*(self.x_body_origin*y_beta-self.y_body_origin*x_beta)) / common_denominator
y_pinger = ((xl*yr-yl*xr)*(self.y_body_origin-y_beta)-(yl-yr)*(self.x_body_origin*y_beta-self.y_body_origin*x_beta)) / common_denominator
x_aux, y_aux = self.aux_to_body(-self.correction_distance,0,alpha,x_pinger,y_pinger)
path_array = Float32MultiArray()
path_array.layout.data_offset = 5
path_array.data = [x_aux, y_aux, x_pinger, y_pinger, 2]
self.desired(path_array)
def aux_to_body(self, aux_x2, aux_y2, alpha, body_x1, body_y1):
'''
@name: aux_to_body
@brief: Coordinate transformation between auxiliary and body reference frames.
@param: aux_x2: target x coordinate in aux reference frame
aux_y2: target y coordinate in aux reference frame
alpha: angle between aux and body reference frames
body_x1: aux x coordinate in body reference frame
body_y1: aux y coordinate in body reference frame
@return: body_x2: target x coordinate in body reference frame
body_y2: target y coordinate in body reference frame
'''
p = np.array([[aux_x2],[aux_y2]])
J = self.rotation_matrix(alpha)
n = J.dot(p)
body_x2 = n[0] + body_x1
body_y2 = n[1] + body_y1
return (body_x2, body_y2)
def rotation_matrix(self, angle):
'''
@name: rotation_matrix
@brief: Transformation matrix template.
@param: angle: angle of rotation
@return: J: transformation matrix
'''
J = np.array([[math.cos(angle), -1*math.sin(angle)],
[math.sin(angle), math.cos(angle)]])
return (J)
def desired(self, path):
self.path_pub.publish(path)
def main():
rospy.init_node("acoustic_docking", anonymous=False)
rate = rospy.Rate(20)
acousticDocking = AcousticDocking()
last_detection = []
while not rospy.is_shutdown() and acousticDocking.activated:
acousticDocking.calculate_distance_to_dock()
if (acousticDocking.distance >= 5):
acousticDocking.dock()
else:
acousticDocking.status_pub.publish(1)
rate.sleep()
rospy.spin()
if __name__ == "__main__":
try:
main()
except rospy.ROSInterruptException:
pass
| 33.741573 | 145 | 0.604729 |
import math
import time
import os
import numpy as np
import rospy
from geometry_msgs.msg import Pose, Pose2D, PoseArray
from std_msgs.msg import Int32, Float32MultiArray, Float64, String
from visualization_msgs.msg import Marker, MarkerArray
class AcousticDocking:
def __init__(self):
self.ned_x = 0
self.ned_y = 0
self.yaw = 0
self.activated = True
self.distance = 0
self.signal_angle = 0
self.x1 = 0
self.y1 = 0
self.x2 = 0
self.y2 = 0
self.x_body_origin = 0
self.y_body_origin = 0
self.correction_distance = 2
rospy.Subscriber("/vectornav/ins_2d/NED_pose", Pose2D, self.ins_pose_callback)
rospy.Subscriber("/usv_perception/hydrophones/acoustic_signal", Float64, self.signal_callback)
rospy.Subscriber("/usv_perception/lidar_detector/dock", PoseArray, self.dock_callback)
self.path_pub = rospy.Publisher("/mission/waypoints", Float32MultiArray, queue_size=10)
self.status_pub = rospy.Publisher("/mission/status", Int32, queue_size=10)
self.test = rospy.Publisher("/mission/state", Int32, queue_size=10)
def ins_pose_callback(self,pose):
self.ned_x = pose.x
self.ned_y = pose.y
self.yaw = pose.theta
def signal_callback(self, signal):
self.signal_angle = signal.data
def dock_callback(self, dock):
self.x1 = dock.poses[0].position.x
self.y1 = dock.poses[0].position.y
self.x2 = dock.poses[1].position.x
self.y2 = dock.poses[1].position.y
def calculate_distance_to_dock(self):
xc = min([self.x1,self.x2]) + abs(self.x1 - self.x2)/2
yc = min([self.y1,self.y2]) + abs(self.y1 - self.y2)/2
self.distance = math.pow(xc*xc + yc*yc, 0.5)
def dock(self):
if self.y1 < self.y2:
yl = self.y1
xl = self.x1
yr = self.y2
xr = self.x2
else:
yl = self.y2
xl = self.x2
yr = self.y1
xr = self.x1
yd = yl - yr
xd = xl - xr
alpha = math.atan2(yd,xd) + math.pi/2
if (abs(alpha) > (math.pi)):
alpha = (alpha/abs(alpha))*(abs(alpha) - 2*math.pi)
x_beta, y_beta = self.aux_to_body(1,0,self.signal_angle,self.x_body_origin,self.y_body_origin)
common_denominator = (xl - xr)*(self.y_body_origin - y_beta) - (yl - yr)*(self.x_body_origin - x_beta)
x_pinger = ((xl*yr-yl*xr)*(self.x_body_origin-x_beta)-(xl-xr)*(self.x_body_origin*y_beta-self.y_body_origin*x_beta)) / common_denominator
y_pinger = ((xl*yr-yl*xr)*(self.y_body_origin-y_beta)-(yl-yr)*(self.x_body_origin*y_beta-self.y_body_origin*x_beta)) / common_denominator
x_aux, y_aux = self.aux_to_body(-self.correction_distance,0,alpha,x_pinger,y_pinger)
path_array = Float32MultiArray()
path_array.layout.data_offset = 5
path_array.data = [x_aux, y_aux, x_pinger, y_pinger, 2]
self.desired(path_array)
def aux_to_body(self, aux_x2, aux_y2, alpha, body_x1, body_y1):
p = np.array([[aux_x2],[aux_y2]])
J = self.rotation_matrix(alpha)
n = J.dot(p)
body_x2 = n[0] + body_x1
body_y2 = n[1] + body_y1
return (body_x2, body_y2)
def rotation_matrix(self, angle):
J = np.array([[math.cos(angle), -1*math.sin(angle)],
[math.sin(angle), math.cos(angle)]])
return (J)
def desired(self, path):
self.path_pub.publish(path)
def main():
rospy.init_node("acoustic_docking", anonymous=False)
rate = rospy.Rate(20)
acousticDocking = AcousticDocking()
last_detection = []
while not rospy.is_shutdown() and acousticDocking.activated:
acousticDocking.calculate_distance_to_dock()
if (acousticDocking.distance >= 5):
acousticDocking.dock()
else:
acousticDocking.status_pub.publish(1)
rate.sleep()
rospy.spin()
if __name__ == "__main__":
try:
main()
except rospy.ROSInterruptException:
pass
| true | true |
f71ffbe737f15846e14c72d6820690ac7cf93d67 | 4,873 | py | Python | lula/util.py | wiseodd/lula | a52b27c118ed136a62d8d7d1a898067d5ac685fb | [
"MIT"
] | 15 | 2021-06-07T14:25:35.000Z | 2021-12-26T16:41:01.000Z | lula/util.py | wiseodd/lula | a52b27c118ed136a62d8d7d1a898067d5ac685fb | [
"MIT"
] | 1 | 2022-03-11T01:03:12.000Z | 2022-03-11T01:03:12.000Z | lula/util.py | wiseodd/lula | a52b27c118ed136a62d8d7d1a898067d5ac685fb | [
"MIT"
] | 2 | 2021-06-19T05:41:05.000Z | 2022-03-23T11:51:06.000Z | import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
class MaskedLinear(nn.Module):
def __init__(self, base_layer, m_in, m_out):
"""
The standard nn.Linear layer, but with gradient masking to enforce the LULA construction.
"""
super(MaskedLinear, self).__init__()
# Extend the weight matrix
W_base = base_layer.weight.data.clone() # (n_out, n_in)
n_out, n_in = W_base.shape
W = torch.randn(n_out+m_out, n_in+m_in)
W[0:n_out, 0:n_in] = W_base.clone()
W[0:n_out, n_in:] = 0 # Upper-right quadrant
self.weight = nn.Parameter(W)
# Extend the bias vector
if base_layer.bias is not None:
b_base = base_layer.bias.data.clone()
b = torch.randn(n_out+m_out)
b[:n_out] = b_base.clone()
self.bias = nn.Parameter(b)
else:
self.bias = None
# Apply gradient mask to the weight and bias
self.mask_w = torch.zeros(n_out+m_out, n_in+m_in)
self.mask_w[n_out:, :] = 1 # Lower half
self.mask_b = torch.zeros(n_out+m_out)
self.mask_b[n_out:] = 1
self.switch_grad_mask(True)
# For safekeeping
self.W_base, self.b_base = W_base, b_base
self.n_out, self.n_in = n_out, n_in
self.m_out, self.m_in = m_out, m_in
def forward(self, x):
return F.linear(x, self.weight, self.bias)
def switch_grad_mask(self, on=True):
if on:
self.grad_handle_w = self.weight.register_hook(lambda grad: grad.mul_(self.mask_w))
self.grad_handle_b = self.bias.register_hook(lambda grad: grad.mul_(self.mask_b))
else:
self.grad_handle_w.remove()
self.grad_handle_b.remove()
def to_gpu(self):
self.mask_w = self.mask_w.cuda()
self.mask_b = self.mask_b.cuda()
def to_unmasked(self):
lin = nn.Linear(self.n_in+self.m_in, self.n_out+self.m_out)
lin.weight = self.weight
lin.bias = self.bias
return lin
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(
self.n_in+self.m_in, self.n_out+self.m_out, self.bias is not None
)
class MaskedConv2d(nn.Module):
def __init__(self, base_layer, m_in, m_out):
"""
The standard nn.Conv2d layer, but with gradient masking to enforce the LULA construction.
"""
super(MaskedConv2d, self).__init__()
self.kernel_size = base_layer.kernel_size
self.stride = base_layer.stride
self.padding = base_layer.padding
self.dilation = base_layer.dilation
self.groups = base_layer.groups
# Extend the weight matrix
W_base = base_layer.weight.data.clone() # (n_out, n_in, k, k)
n_out, n_in, k, _ = W_base.shape # Num of channels
W = torch.randn(n_out+m_out, n_in+m_in, k, k)
W[0:n_out, 0:n_in, :, :] = W_base.clone()
W[0:n_out, n_in:, :, :] = 0 # Upper-right quadrant
self.weight = nn.Parameter(W)
# Extend the bias vector
if base_layer.bias is not None:
b_base = base_layer.bias.data.clone()
b = torch.randn(n_out+m_out)
b[:n_out] = b_base.clone()
self.bias = nn.Parameter(b)
else:
self.bias = None
# Apply gradient mask to the weight and bias
self.mask_w = torch.zeros(n_out+m_out, n_in+m_in, k, k)
self.mask_w[n_out:, :, :, :] = 1 # Lower half
self.mask_b = torch.zeros(n_out+m_out)
self.mask_b[n_out:] = 1
self.switch_grad_mask(True)
# For safekeeping
self.W_base, self.b_base = W_base, b_base
self.n_out, self.n_in = n_out, n_in
self.m_out, self.m_in = m_out, m_in
def forward(self, x):
return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
def switch_grad_mask(self, on=True):
if on:
self.grad_handle_w = self.weight.register_hook(lambda grad: grad.mul_(self.mask_w))
self.grad_handle_b = self.bias.register_hook(lambda grad: grad.mul_(self.mask_b))
else:
self.grad_handle_w.remove()
self.grad_handle_b.remove()
def to_gpu(self):
self.mask_w = self.mask_w.cuda()
self.mask_b = self.mask_b.cuda()
def to_unmasked(self):
conv = nn.Conv2d(self.n_in+self.m_in, self.n_out+self.m_out, self.kernel_size, self.stride, self.padding, self.dilation, self.groups)
conv.weight = self.weight
conv.bias = self.bias
return conv
def extra_repr(self):
return 'in_channels={}, out_channels={}, bias={}'.format(
self.n_in+self.m_in, self.n_out+self.m_out, self.bias is not None
)
| 32.059211 | 141 | 0.60353 | import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
class MaskedLinear(nn.Module):
def __init__(self, base_layer, m_in, m_out):
super(MaskedLinear, self).__init__()
W_base = base_layer.weight.data.clone()
n_out, n_in = W_base.shape
W = torch.randn(n_out+m_out, n_in+m_in)
W[0:n_out, 0:n_in] = W_base.clone()
W[0:n_out, n_in:] = 0
self.weight = nn.Parameter(W)
if base_layer.bias is not None:
b_base = base_layer.bias.data.clone()
b = torch.randn(n_out+m_out)
b[:n_out] = b_base.clone()
self.bias = nn.Parameter(b)
else:
self.bias = None
self.mask_w = torch.zeros(n_out+m_out, n_in+m_in)
self.mask_w[n_out:, :] = 1
self.mask_b = torch.zeros(n_out+m_out)
self.mask_b[n_out:] = 1
self.switch_grad_mask(True)
self.W_base, self.b_base = W_base, b_base
self.n_out, self.n_in = n_out, n_in
self.m_out, self.m_in = m_out, m_in
def forward(self, x):
return F.linear(x, self.weight, self.bias)
def switch_grad_mask(self, on=True):
if on:
self.grad_handle_w = self.weight.register_hook(lambda grad: grad.mul_(self.mask_w))
self.grad_handle_b = self.bias.register_hook(lambda grad: grad.mul_(self.mask_b))
else:
self.grad_handle_w.remove()
self.grad_handle_b.remove()
def to_gpu(self):
self.mask_w = self.mask_w.cuda()
self.mask_b = self.mask_b.cuda()
def to_unmasked(self):
lin = nn.Linear(self.n_in+self.m_in, self.n_out+self.m_out)
lin.weight = self.weight
lin.bias = self.bias
return lin
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(
self.n_in+self.m_in, self.n_out+self.m_out, self.bias is not None
)
class MaskedConv2d(nn.Module):
def __init__(self, base_layer, m_in, m_out):
super(MaskedConv2d, self).__init__()
self.kernel_size = base_layer.kernel_size
self.stride = base_layer.stride
self.padding = base_layer.padding
self.dilation = base_layer.dilation
self.groups = base_layer.groups
W_base = base_layer.weight.data.clone()
n_out, n_in, k, _ = W_base.shape
W = torch.randn(n_out+m_out, n_in+m_in, k, k)
W[0:n_out, 0:n_in, :, :] = W_base.clone()
W[0:n_out, n_in:, :, :] = 0
self.weight = nn.Parameter(W)
if base_layer.bias is not None:
b_base = base_layer.bias.data.clone()
b = torch.randn(n_out+m_out)
b[:n_out] = b_base.clone()
self.bias = nn.Parameter(b)
else:
self.bias = None
self.mask_w = torch.zeros(n_out+m_out, n_in+m_in, k, k)
self.mask_w[n_out:, :, :, :] = 1
self.mask_b = torch.zeros(n_out+m_out)
self.mask_b[n_out:] = 1
self.switch_grad_mask(True)
self.W_base, self.b_base = W_base, b_base
self.n_out, self.n_in = n_out, n_in
self.m_out, self.m_in = m_out, m_in
def forward(self, x):
return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
def switch_grad_mask(self, on=True):
if on:
self.grad_handle_w = self.weight.register_hook(lambda grad: grad.mul_(self.mask_w))
self.grad_handle_b = self.bias.register_hook(lambda grad: grad.mul_(self.mask_b))
else:
self.grad_handle_w.remove()
self.grad_handle_b.remove()
def to_gpu(self):
self.mask_w = self.mask_w.cuda()
self.mask_b = self.mask_b.cuda()
def to_unmasked(self):
conv = nn.Conv2d(self.n_in+self.m_in, self.n_out+self.m_out, self.kernel_size, self.stride, self.padding, self.dilation, self.groups)
conv.weight = self.weight
conv.bias = self.bias
return conv
def extra_repr(self):
return 'in_channels={}, out_channels={}, bias={}'.format(
self.n_in+self.m_in, self.n_out+self.m_out, self.bias is not None
)
| true | true |
f71ffe7210fe58d0bbb802af2106a7f260b2e296 | 5,184 | py | Python | line6.py | ChrBarth/pypod | 4dccf6e5f5f3584672e2bab5281220a15ee51de5 | [
"MIT"
] | 4 | 2021-04-26T07:24:27.000Z | 2022-01-17T23:10:47.000Z | line6.py | ChrBarth/pypod | 4dccf6e5f5f3584672e2bab5281220a15ee51de5 | [
"MIT"
] | null | null | null | line6.py | ChrBarth/pypod | 4dccf6e5f5f3584672e2bab5281220a15ee51de5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Some useful POD-Variables
# The Program names:
PROGRAMS = [ "1A", "1B", "1C", "1D",
"2A", "2B", "2C", "2D",
"3A", "3B", "3C", "3D",
"4A", "4B", "4C", "4D",
"5A", "5B", "5C", "5D",
"6A", "6B", "6C", "6D",
"7A", "7B", "7C", "7D",
"8A", "8B", "8C", "8D",
"9A", "9B", "9C", "9D" ]
# The Amp Models:
amp_names = [
'Tube Preamp',
'POD Clean Line 6',
'POD Crunch Line 6',
'POD Drive Line 6',
'POD Layer Line 6',
'Small Tweed',
'Tweed Blues',
'Black Panel',
'Modern Class A',
'Brit Class A',
'Brit Blues',
'Brit Classic',
'Brit Hi Gain',
'Rectified ’94',
'Modern Hi Gain',
'Fuzz Box',
'Jazz Clean',
'Boutique #1',
'Boutique #2',
'Brit Class A #2',
'Brit Class A #3',
'Small Tweed #2',
'Black Panel #2',
'Boutique #3',
'California Crunch #1',
'California Crunch #2',
'Rectified #2',
'Modern Hi Gain #2',
'Line 6 Twang',
'Line 6 Crunch #2',
'Line 6 Blues',
'Line 6 Insane' ]
# The Cab names:
cab_names = [
"1x 8 ’60 Fender Tweed Champ",
"1x12 ’52 Fender Tweed Deluxe",
"1x12 ’60 Vox AC15",
"1x12 ’64 Fender Blackface Deluxe",
"1x12 ’98 Line 6 Flextone",
"2x12 ’65 Fender Blackface Twin",
"2x12 ’67 VOX AC30",
"2x12 ’95 Matchless Chieftain",
"2x12 ’98 Pod custom 2x12",
"4x10 ’59 Fender Bassman",
"4x10 ’98 Pod custom 4x10 cab",
"4x12 ’96 Marshall with V30s",
"4x12 ’78 Marshall with 70s",
"4x12 ’97 Marshall off axis",
"4x12 ’98 Pod custom 4x12",
"No Cabinet" ]
# The effect types:
fx_names = [
"Chorus2",
"Flanger1",
"Rotary",
"Flanger2",
"Delay/Chorus1",
"Delay/Tremolo",
"Delay",
"Delay/Comp",
"Chorus1",
"Tremolo",
"Bypass",
"Compressor",
"Delay/Chorus2",
"Delay/Flanger1",
"Delay/Swell",
"Delay/Flanger2" ]
cc_commands = {
"AmpModel (0-32)": 12, # 0-32 (0=Tube Preamp,...)
"Drive": 13, # 0-127
"Bass": 14, # 0-127
"Mid": 15, # 0-127
"Treble": 16, # 0-127
"BrightSwitch (0-63: OFF, 64-127: ON)": 73, # 0-63: OFF, 64-127: ON
"Channel Vol": 17, # 0-127
"Presence": 21, # 0-127
"Noise Gate (0-63: OFF, 64-127: ON)": 22, # 0-63: OFF, 64-127: ON
"GateThreshhold": 23, # 0-127
"GateDecay": 24, # 0-127
"Effect": 19, # 0-15 (0=Bypass,...)
"EffectTweak": 1, # 0-127
"Distortion (0-63: OFF, 64-127: ON)": 25, # 0-63: OFF, 64-127: ON
"DriveBoost (0-63: OFF, 64-127: ON)": 26, # 0-63: OFF, 64-127: ON
"Presence (0-63: OFF, 64-127: ON)": 27, # 0-63: OFF, 64-127: ON
"Delay (0-63: OFF, 64-127: ON)": 28, # 0-63: OFF, 64-127: ON
"DelayTime": 30, # 0-127 = 0-3150ms
"DelayTime2": 62, # 0-127 (Extra precision (???))
"DelayRepeats": 32, # 0-127
"DelayLevel": 34, # 0-127
"Reverb (0-63: OFF, 64-127: ON)": 36, # 0-63: OFF; 64-127: ON
"ReverbType (0-63: Spring, 64-127: Hall)": 37, # 0-63: SPRING, 64-127: HALL
"ReverbDecay": 38, # 0-127
"ReverbTone": 39, # 0-127
"ReverbDiffusion": 40, # 0-127
"ReverbDensity": 41, # 0-127
"ReverbLevel": 18, # 0-127
"CompressionRatio": 42, # 0-21=OFF, 22-44=1.4:1, 45-67=2:1, 68-90=3:1, 91-113=6:1, 114-127=INF
"Wah (0-63: OFF, 64-127: ON)": 43, # 0-63: OFF, 64-127: ON
"WahPedal": 4, # 0-127 (Pedal Position)
"WahBottom": 44, # 0-127 (Bottom frequency)
"WahTop": 45, # 0-127 (Top frequency)
"Volume": 7, # 0-127 (Volume Pedal)
"VolumeMin": 46, # 0-127 ???
"VolumePrePost (0-63: Pre Tube, 64-127: Post Tube)": 47, # 0-63: PRE TUBE, 64-127: POST TUBE
"VolSwell (0-63: OFF, 64-127: ON)": 48, # 0-63: OFF, 64-127: ON
"VolSwellRamp": 49, # 0-127
#"TapTempo": 64, # 64-127 = A TAP (=sending 2 in a second sets to 120bpm?)
"Modulation (0-63: OFF, 64-127: ON)": 50, # 0-63: OFF, 64-127: ON (Chorus/Rotary/Tremolo)
"Speed": 51, # 0-127 (Chorus/Flanger)
"Depth": 52, # 0-127 (Chorus/Flanger)
"Feedback": 53, # 0-63: NEGATIVE: 64-127: POSITIVE
"ChorusPreDelay": 54, # 0-127
"RotarySpeed": 55, # 0-127
"RotaryMaxSpeed": 56, # 0-127
"RotaryMinSpeed": 57, # 0-127
"TremoloSpeed": 58, # 0-127
"TremoloDepth": 59, # 0-127
"CabinetType (0-15)": 71, # 0-15 (0=No Cab, ...)
"AIRAmbienceLevel": 72 # 0-127
}
compression_values = [ [ 0, "Off" ],
[ 22, "1.4:1" ],
[ 45, "2:1" ],
[ 68, "3:1" ],
[ 91, "6:1" ],
[ 114, "INF" ] ]
reverb_types = [ [ 0, "Spring" ], [ 64, "Hall" ] ]
volume_pos = [ [ 0, "Pre-Tube" ], [ 64, "Post-Tube" ] ]
| 33.882353 | 103 | 0.470486 |
PROGRAMS = [ "1A", "1B", "1C", "1D",
"2A", "2B", "2C", "2D",
"3A", "3B", "3C", "3D",
"4A", "4B", "4C", "4D",
"5A", "5B", "5C", "5D",
"6A", "6B", "6C", "6D",
"7A", "7B", "7C", "7D",
"8A", "8B", "8C", "8D",
"9A", "9B", "9C", "9D" ]
amp_names = [
'Tube Preamp',
'POD Clean Line 6',
'POD Crunch Line 6',
'POD Drive Line 6',
'POD Layer Line 6',
'Small Tweed',
'Tweed Blues',
'Black Panel',
'Modern Class A',
'Brit Class A',
'Brit Blues',
'Brit Classic',
'Brit Hi Gain',
'Rectified ’94',
'Modern Hi Gain',
'Fuzz Box',
'Jazz Clean',
'Boutique #1',
'Boutique #2',
'Brit Class A #2',
'Brit Class A #3',
'Small Tweed #2',
'Black Panel #2',
'Boutique #3',
'California Crunch #1',
'California Crunch #2',
'Rectified #2',
'Modern Hi Gain #2',
'Line 6 Twang',
'Line 6 Crunch #2',
'Line 6 Blues',
'Line 6 Insane' ]
cab_names = [
"1x 8 ’60 Fender Tweed Champ",
"1x12 ’52 Fender Tweed Deluxe",
"1x12 ’60 Vox AC15",
"1x12 ’64 Fender Blackface Deluxe",
"1x12 ’98 Line 6 Flextone",
"2x12 ’65 Fender Blackface Twin",
"2x12 ’67 VOX AC30",
"2x12 ’95 Matchless Chieftain",
"2x12 ’98 Pod custom 2x12",
"4x10 ’59 Fender Bassman",
"4x10 ’98 Pod custom 4x10 cab",
"4x12 ’96 Marshall with V30s",
"4x12 ’78 Marshall with 70s",
"4x12 ’97 Marshall off axis",
"4x12 ’98 Pod custom 4x12",
"No Cabinet" ]
fx_names = [
"Chorus2",
"Flanger1",
"Rotary",
"Flanger2",
"Delay/Chorus1",
"Delay/Tremolo",
"Delay",
"Delay/Comp",
"Chorus1",
"Tremolo",
"Bypass",
"Compressor",
"Delay/Chorus2",
"Delay/Flanger1",
"Delay/Swell",
"Delay/Flanger2" ]
cc_commands = {
"AmpModel (0-32)": 12,
"Drive": 13,
"Bass": 14,
"Mid": 15,
"Treble": 16,
"BrightSwitch (0-63: OFF, 64-127: ON)": 73,
"Channel Vol": 17,
"Presence": 21,
"Noise Gate (0-63: OFF, 64-127: ON)": 22,
"GateThreshhold": 23,
"GateDecay": 24,
"Effect": 19,
"EffectTweak": 1,
"Distortion (0-63: OFF, 64-127: ON)": 25,
"DriveBoost (0-63: OFF, 64-127: ON)": 26,
"Presence (0-63: OFF, 64-127: ON)": 27,
"Delay (0-63: OFF, 64-127: ON)": 28,
"DelayTime": 30,
"DelayTime2": 62,
"DelayRepeats": 32,
"DelayLevel": 34,
"Reverb (0-63: OFF, 64-127: ON)": 36,
"ReverbType (0-63: Spring, 64-127: Hall)": 37,
"ReverbDecay": 38,
"ReverbTone": 39,
"ReverbDiffusion": 40,
"ReverbDensity": 41,
"ReverbLevel": 18,
"CompressionRatio": 42,
"Wah (0-63: OFF, 64-127: ON)": 43,
"WahPedal": 4,
"WahBottom": 44,
"WahTop": 45,
"Volume": 7,
"VolumeMin": 46,
"VolumePrePost (0-63: Pre Tube, 64-127: Post Tube)": 47,
"VolSwell (0-63: OFF, 64-127: ON)": 48,
"VolSwellRamp": 49,
"Speed": 51,
"Depth": 52,
"Feedback": 53,
"ChorusPreDelay": 54,
"RotarySpeed": 55,
"RotaryMaxSpeed": 56,
"RotaryMinSpeed": 57,
"TremoloSpeed": 58,
"TremoloDepth": 59,
"CabinetType (0-15)": 71,
"AIRAmbienceLevel": 72
}
compression_values = [ [ 0, "Off" ],
[ 22, "1.4:1" ],
[ 45, "2:1" ],
[ 68, "3:1" ],
[ 91, "6:1" ],
[ 114, "INF" ] ]
reverb_types = [ [ 0, "Spring" ], [ 64, "Hall" ] ]
volume_pos = [ [ 0, "Pre-Tube" ], [ 64, "Post-Tube" ] ]
| true | true |
f71fff32cfd2c5d668696fc8401b13ffda826fad | 118 | py | Python | commands/enlist.py | SirChopwood/Arma-3-Bot | 5aa751beb6a362af1fcefe3c8b1d2572b3ffc76f | [
"MIT"
] | 1 | 2020-10-30T18:37:39.000Z | 2020-10-30T18:37:39.000Z | commands/enlist.py | SirChopwood/Requisitions-Officer-Bot | 5aa751beb6a362af1fcefe3c8b1d2572b3ffc76f | [
"MIT"
] | null | null | null | commands/enlist.py | SirChopwood/Requisitions-Officer-Bot | 5aa751beb6a362af1fcefe3c8b1d2572b3ffc76f | [
"MIT"
] | null | null | null | async def Main(self, message, command, arguments):
await self.run_file("section_slot_assign", message, arguments)
| 39.333333 | 66 | 0.771186 | async def Main(self, message, command, arguments):
await self.run_file("section_slot_assign", message, arguments)
| true | true |
f71fffe42a961cd5e75a8c0d5975af883c4d0f2f | 2,763 | py | Python | _preflight_hook_experiment.py | BapeHiks/pythonista_startup | 060c355e9ecefa069227ae80c061cf532f9148e1 | [
"MIT"
] | 22 | 2016-04-05T14:56:11.000Z | 2022-02-03T02:52:23.000Z | _preflight_hook_experiment.py | BapeHiks/pythonista_startup | 060c355e9ecefa069227ae80c061cf532f9148e1 | [
"MIT"
] | 2 | 2016-04-28T08:45:16.000Z | 2017-04-24T21:55:37.000Z | _preflight_hook_experiment.py | BapeHiks/pythonista_startup | 060c355e9ecefa069227ae80c061cf532f9148e1 | [
"MIT"
] | 3 | 2017-04-23T16:47:33.000Z | 2020-08-05T16:14:49.000Z | """Highly unreliable way to register "preflight hooks", which are run every time you run a script (but not an editor action)."""
from __future__ import absolute_import, division, print_function
def run():
print(u"Installing preflight hooks...")
# There's no official way to add hooks that run before every script run.
# However Pythonista's preflight code imports pythonista_startup once to check what names it contains.
# So we hack __import__ to run all functions in preflight_hooks whenever pythonista_startup is imported by specific bytecodes.
try:
import builtins
except ImportError:
import __builtin__ as builtins
preflight_hooks = []
def _make_new_import():
import sys
_real_import = builtins.__import__
def __import__(name, *args, **kwargs):
if name == "pythonista_startup":
try:
f = sys._getframe(1)
except ValueError:
pass
else:
# These blobs are the bytecodes of the main function of Pythonista's preflight code (from Pythonista 2 and 3 respectively), which is run once before every script run.
if f.f_code.co_code in (
b'y\x0e\x00d\x00\x00d\x01\x00l\x00\x00TWn\x07\x00\x01\x01\x01n\x01\x00Xd\x02\x00S',
##b'y\x1c\x00d\x01\x00d\x00\x00l\x00\x00}\x00\x00t\x01\x00|\x00\x00\x83\x01\x00}\x01\x00Wn\x0e\x00\x01\x01\x01g\x00\x00}\x01\x00Yn\x01\x00Xy\x15\x00t\x02\x00\x83\x00\x00\x01t\x03\x00|\x01\x00\x83\x01\x00\x01Wn\x08\x00\x01\x01\x01Yn\x01\x00Xd\x00\x00S',
b'y\x1c\x00d\x01\x00d\x00\x00l\x00\x00}\x00\x00t\x01\x00|\x00\x00\x83\x01\x00}\x01\x00Wn\x0e\x00\x01\x01\x01g\x00\x00}\x01\x00Yn\x01\x00Xyy\x00d\x01\x00d\x00\x00l\x02\x00}\x02\x00d\x01\x00d\x00\x00l\x03\x00}\x03\x00d\x01\x00d\x00\x00l\x04\x00}\x04\x00d\x01\x00d\x00\x00l\x05\x00}\x05\x00|\x02\x00j\x06\x00d\x00\x00\x83\x01\x00\x01|\x03\x00j\x06\x00d\x00\x00\x83\x01\x00\x01|\x04\x00j\x06\x00d\x00\x00\x83\x01\x00\x01|\x05\x00j\x06\x00d\x00\x00\x83\x01\x00\x01t\x07\x00\x83\x00\x00\x01t\x08\x00|\x01\x00\x83\x01\x00\x01Wn\x08\x00\x01\x01\x01Yn\x01\x00Xd\x00\x00S',
):
for hook in preflight_hooks:
hook()
return _real_import(name, *args, **kwargs)
__import__.patched = True
return __import__
if not getattr(builtins.__import__, "patched", False):
builtins.__import__ = _make_new_import()
del builtins
del _make_new_import
print(u"Done installing preflight hooks.")
if __name__ == "__main__":
run()
| 49.339286 | 587 | 0.631198 |
from __future__ import absolute_import, division, print_function
def run():
print(u"Installing preflight hooks...")
# However Pythonista's preflight code imports pythonista_startup once to check what names it contains.
try:
import builtins
except ImportError:
import __builtin__ as builtins
preflight_hooks = []
def _make_new_import():
import sys
_real_import = builtins.__import__
def __import__(name, *args, **kwargs):
if name == "pythonista_startup":
try:
f = sys._getframe(1)
except ValueError:
pass
else:
if f.f_code.co_code in (
b'y\x0e\x00d\x00\x00d\x01\x00l\x00\x00TWn\x07\x00\x01\x01\x01n\x01\x00Xd\x02\x00S',
##b'y\x1c\x00d\x01\x00d\x00\x00l\x00\x00}\x00\x00t\x01\x00|\x00\x00\x83\x01\x00}\x01\x00Wn\x0e\x00\x01\x01\x01g\x00\x00}\x01\x00Yn\x01\x00Xy\x15\x00t\x02\x00\x83\x00\x00\x01t\x03\x00|\x01\x00\x83\x01\x00\x01Wn\x08\x00\x01\x01\x01Yn\x01\x00Xd\x00\x00S',
b'y\x1c\x00d\x01\x00d\x00\x00l\x00\x00}\x00\x00t\x01\x00|\x00\x00\x83\x01\x00}\x01\x00Wn\x0e\x00\x01\x01\x01g\x00\x00}\x01\x00Yn\x01\x00Xyy\x00d\x01\x00d\x00\x00l\x02\x00}\x02\x00d\x01\x00d\x00\x00l\x03\x00}\x03\x00d\x01\x00d\x00\x00l\x04\x00}\x04\x00d\x01\x00d\x00\x00l\x05\x00}\x05\x00|\x02\x00j\x06\x00d\x00\x00\x83\x01\x00\x01|\x03\x00j\x06\x00d\x00\x00\x83\x01\x00\x01|\x04\x00j\x06\x00d\x00\x00\x83\x01\x00\x01|\x05\x00j\x06\x00d\x00\x00\x83\x01\x00\x01t\x07\x00\x83\x00\x00\x01t\x08\x00|\x01\x00\x83\x01\x00\x01Wn\x08\x00\x01\x01\x01Yn\x01\x00Xd\x00\x00S',
):
for hook in preflight_hooks:
hook()
return _real_import(name, *args, **kwargs)
__import__.patched = True
return __import__
if not getattr(builtins.__import__, "patched", False):
builtins.__import__ = _make_new_import()
del builtins
del _make_new_import
print(u"Done installing preflight hooks.")
if __name__ == "__main__":
run()
| true | true |
f7200016b3e4bb76f1473df3974edfb197cc475a | 2,321 | py | Python | tests/hazmat/backends/test_commoncrypto.py | balabit-deps/balabit-os-6-python-cryptography | c31d184a56a18bad89a6444313367be71b5b0877 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2015-09-25T16:03:32.000Z | 2015-09-25T16:03:32.000Z | tests/hazmat/backends/test_commoncrypto.py | balabit-deps/balabit-os-6-python-cryptography | c31d184a56a18bad89a6444313367be71b5b0877 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | tests/hazmat/backends/test_commoncrypto.py | balabit-deps/balabit-os-6-python-cryptography | c31d184a56a18bad89a6444313367be71b5b0877 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2020-07-17T12:26:45.000Z | 2020-07-17T12:26:45.000Z | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import pytest
from cryptography import utils
from cryptography.exceptions import InternalError, _Reasons
from cryptography.hazmat.backends import _available_backends
from cryptography.hazmat.primitives.ciphers import Cipher, CipherAlgorithm
from cryptography.hazmat.primitives.ciphers.algorithms import AES
from cryptography.hazmat.primitives.ciphers.modes import CBC, GCM
from ...utils import raises_unsupported_algorithm
@utils.register_interface(CipherAlgorithm)
class DummyCipher(object):
name = "dummy-cipher"
block_size = None
key_size = None
@pytest.mark.skipif("commoncrypto" not in
[i.name for i in _available_backends()],
reason="CommonCrypto not available")
class TestCommonCrypto(object):
def test_supports_cipher(self):
from cryptography.hazmat.backends.commoncrypto.backend import backend
assert backend.cipher_supported(None, None) is False
def test_register_duplicate_cipher_adapter(self):
from cryptography.hazmat.backends.commoncrypto.backend import backend
with pytest.raises(ValueError):
backend._register_cipher_adapter(
AES, backend._lib.kCCAlgorithmAES128,
CBC, backend._lib.kCCModeCBC
)
def test_handle_response(self):
from cryptography.hazmat.backends.commoncrypto.backend import backend
with pytest.raises(ValueError):
backend._check_cipher_response(backend._lib.kCCAlignmentError)
with pytest.raises(InternalError):
backend._check_cipher_response(backend._lib.kCCMemoryFailure)
with pytest.raises(InternalError):
backend._check_cipher_response(backend._lib.kCCDecodeError)
def test_nonexistent_aead_cipher(self):
from cryptography.hazmat.backends.commoncrypto.backend import Backend
b = Backend()
cipher = Cipher(
DummyCipher(), GCM(b"fake_iv_here"), backend=b,
)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_CIPHER):
cipher.encryptor()
| 37.435484 | 79 | 0.734166 |
from __future__ import absolute_import, division, print_function
import pytest
from cryptography import utils
from cryptography.exceptions import InternalError, _Reasons
from cryptography.hazmat.backends import _available_backends
from cryptography.hazmat.primitives.ciphers import Cipher, CipherAlgorithm
from cryptography.hazmat.primitives.ciphers.algorithms import AES
from cryptography.hazmat.primitives.ciphers.modes import CBC, GCM
from ...utils import raises_unsupported_algorithm
@utils.register_interface(CipherAlgorithm)
class DummyCipher(object):
name = "dummy-cipher"
block_size = None
key_size = None
@pytest.mark.skipif("commoncrypto" not in
[i.name for i in _available_backends()],
reason="CommonCrypto not available")
class TestCommonCrypto(object):
def test_supports_cipher(self):
from cryptography.hazmat.backends.commoncrypto.backend import backend
assert backend.cipher_supported(None, None) is False
def test_register_duplicate_cipher_adapter(self):
from cryptography.hazmat.backends.commoncrypto.backend import backend
with pytest.raises(ValueError):
backend._register_cipher_adapter(
AES, backend._lib.kCCAlgorithmAES128,
CBC, backend._lib.kCCModeCBC
)
def test_handle_response(self):
from cryptography.hazmat.backends.commoncrypto.backend import backend
with pytest.raises(ValueError):
backend._check_cipher_response(backend._lib.kCCAlignmentError)
with pytest.raises(InternalError):
backend._check_cipher_response(backend._lib.kCCMemoryFailure)
with pytest.raises(InternalError):
backend._check_cipher_response(backend._lib.kCCDecodeError)
def test_nonexistent_aead_cipher(self):
from cryptography.hazmat.backends.commoncrypto.backend import Backend
b = Backend()
cipher = Cipher(
DummyCipher(), GCM(b"fake_iv_here"), backend=b,
)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_CIPHER):
cipher.encryptor()
| true | true |
f720016578b11272f4a943f87114d1bf4673f739 | 15,319 | py | Python | sdk/python/pulumi_azure_nextgen/storagesync/latest/get_registered_server.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_nextgen/storagesync/latest/get_registered_server.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_nextgen/storagesync/latest/get_registered_server.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetRegisteredServerResult',
'AwaitableGetRegisteredServerResult',
'get_registered_server',
]
warnings.warn("""The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:storagesync:getRegisteredServer'.""", DeprecationWarning)
@pulumi.output_type
class GetRegisteredServerResult:
"""
Registered Server resource.
"""
def __init__(__self__, agent_version=None, cluster_id=None, cluster_name=None, discovery_endpoint_uri=None, friendly_name=None, id=None, last_heart_beat=None, last_operation_name=None, last_workflow_id=None, management_endpoint_uri=None, monitoring_configuration=None, monitoring_endpoint_uri=None, name=None, provisioning_state=None, resource_location=None, server_certificate=None, server_id=None, server_management_error_code=None, server_os_version=None, server_role=None, service_location=None, storage_sync_service_uid=None, type=None):
if agent_version and not isinstance(agent_version, str):
raise TypeError("Expected argument 'agent_version' to be a str")
pulumi.set(__self__, "agent_version", agent_version)
if cluster_id and not isinstance(cluster_id, str):
raise TypeError("Expected argument 'cluster_id' to be a str")
pulumi.set(__self__, "cluster_id", cluster_id)
if cluster_name and not isinstance(cluster_name, str):
raise TypeError("Expected argument 'cluster_name' to be a str")
pulumi.set(__self__, "cluster_name", cluster_name)
if discovery_endpoint_uri and not isinstance(discovery_endpoint_uri, str):
raise TypeError("Expected argument 'discovery_endpoint_uri' to be a str")
pulumi.set(__self__, "discovery_endpoint_uri", discovery_endpoint_uri)
if friendly_name and not isinstance(friendly_name, str):
raise TypeError("Expected argument 'friendly_name' to be a str")
pulumi.set(__self__, "friendly_name", friendly_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if last_heart_beat and not isinstance(last_heart_beat, str):
raise TypeError("Expected argument 'last_heart_beat' to be a str")
pulumi.set(__self__, "last_heart_beat", last_heart_beat)
if last_operation_name and not isinstance(last_operation_name, str):
raise TypeError("Expected argument 'last_operation_name' to be a str")
pulumi.set(__self__, "last_operation_name", last_operation_name)
if last_workflow_id and not isinstance(last_workflow_id, str):
raise TypeError("Expected argument 'last_workflow_id' to be a str")
pulumi.set(__self__, "last_workflow_id", last_workflow_id)
if management_endpoint_uri and not isinstance(management_endpoint_uri, str):
raise TypeError("Expected argument 'management_endpoint_uri' to be a str")
pulumi.set(__self__, "management_endpoint_uri", management_endpoint_uri)
if monitoring_configuration and not isinstance(monitoring_configuration, str):
raise TypeError("Expected argument 'monitoring_configuration' to be a str")
pulumi.set(__self__, "monitoring_configuration", monitoring_configuration)
if monitoring_endpoint_uri and not isinstance(monitoring_endpoint_uri, str):
raise TypeError("Expected argument 'monitoring_endpoint_uri' to be a str")
pulumi.set(__self__, "monitoring_endpoint_uri", monitoring_endpoint_uri)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_location and not isinstance(resource_location, str):
raise TypeError("Expected argument 'resource_location' to be a str")
pulumi.set(__self__, "resource_location", resource_location)
if server_certificate and not isinstance(server_certificate, str):
raise TypeError("Expected argument 'server_certificate' to be a str")
pulumi.set(__self__, "server_certificate", server_certificate)
if server_id and not isinstance(server_id, str):
raise TypeError("Expected argument 'server_id' to be a str")
pulumi.set(__self__, "server_id", server_id)
if server_management_error_code and not isinstance(server_management_error_code, int):
raise TypeError("Expected argument 'server_management_error_code' to be a int")
pulumi.set(__self__, "server_management_error_code", server_management_error_code)
if server_os_version and not isinstance(server_os_version, str):
raise TypeError("Expected argument 'server_os_version' to be a str")
pulumi.set(__self__, "server_os_version", server_os_version)
if server_role and not isinstance(server_role, str):
raise TypeError("Expected argument 'server_role' to be a str")
pulumi.set(__self__, "server_role", server_role)
if service_location and not isinstance(service_location, str):
raise TypeError("Expected argument 'service_location' to be a str")
pulumi.set(__self__, "service_location", service_location)
if storage_sync_service_uid and not isinstance(storage_sync_service_uid, str):
raise TypeError("Expected argument 'storage_sync_service_uid' to be a str")
pulumi.set(__self__, "storage_sync_service_uid", storage_sync_service_uid)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="agentVersion")
def agent_version(self) -> Optional[str]:
"""
Registered Server Agent Version
"""
return pulumi.get(self, "agent_version")
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> Optional[str]:
"""
Registered Server clusterId
"""
return pulumi.get(self, "cluster_id")
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> Optional[str]:
"""
Registered Server clusterName
"""
return pulumi.get(self, "cluster_name")
@property
@pulumi.getter(name="discoveryEndpointUri")
def discovery_endpoint_uri(self) -> Optional[str]:
"""
Resource discoveryEndpointUri
"""
return pulumi.get(self, "discovery_endpoint_uri")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[str]:
"""
Friendly Name
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lastHeartBeat")
def last_heart_beat(self) -> Optional[str]:
"""
Registered Server last heart beat
"""
return pulumi.get(self, "last_heart_beat")
@property
@pulumi.getter(name="lastOperationName")
def last_operation_name(self) -> Optional[str]:
"""
Resource Last Operation Name
"""
return pulumi.get(self, "last_operation_name")
@property
@pulumi.getter(name="lastWorkflowId")
def last_workflow_id(self) -> Optional[str]:
"""
Registered Server lastWorkflowId
"""
return pulumi.get(self, "last_workflow_id")
@property
@pulumi.getter(name="managementEndpointUri")
def management_endpoint_uri(self) -> Optional[str]:
"""
Management Endpoint Uri
"""
return pulumi.get(self, "management_endpoint_uri")
@property
@pulumi.getter(name="monitoringConfiguration")
def monitoring_configuration(self) -> Optional[str]:
"""
Monitoring Configuration
"""
return pulumi.get(self, "monitoring_configuration")
@property
@pulumi.getter(name="monitoringEndpointUri")
def monitoring_endpoint_uri(self) -> Optional[str]:
"""
Telemetry Endpoint Uri
"""
return pulumi.get(self, "monitoring_endpoint_uri")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Registered Server Provisioning State
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceLocation")
def resource_location(self) -> Optional[str]:
"""
Resource Location
"""
return pulumi.get(self, "resource_location")
@property
@pulumi.getter(name="serverCertificate")
def server_certificate(self) -> Optional[str]:
"""
Registered Server Certificate
"""
return pulumi.get(self, "server_certificate")
@property
@pulumi.getter(name="serverId")
def server_id(self) -> Optional[str]:
"""
Registered Server serverId
"""
return pulumi.get(self, "server_id")
@property
@pulumi.getter(name="serverManagementErrorCode")
def server_management_error_code(self) -> Optional[int]:
"""
Registered Server Management Error Code
"""
return pulumi.get(self, "server_management_error_code")
@property
@pulumi.getter(name="serverOSVersion")
def server_os_version(self) -> Optional[str]:
"""
Registered Server OS Version
"""
return pulumi.get(self, "server_os_version")
@property
@pulumi.getter(name="serverRole")
def server_role(self) -> Optional[str]:
"""
Registered Server serverRole
"""
return pulumi.get(self, "server_role")
@property
@pulumi.getter(name="serviceLocation")
def service_location(self) -> Optional[str]:
"""
Service Location
"""
return pulumi.get(self, "service_location")
@property
@pulumi.getter(name="storageSyncServiceUid")
def storage_sync_service_uid(self) -> Optional[str]:
"""
Registered Server storageSyncServiceUid
"""
return pulumi.get(self, "storage_sync_service_uid")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetRegisteredServerResult(GetRegisteredServerResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetRegisteredServerResult(
agent_version=self.agent_version,
cluster_id=self.cluster_id,
cluster_name=self.cluster_name,
discovery_endpoint_uri=self.discovery_endpoint_uri,
friendly_name=self.friendly_name,
id=self.id,
last_heart_beat=self.last_heart_beat,
last_operation_name=self.last_operation_name,
last_workflow_id=self.last_workflow_id,
management_endpoint_uri=self.management_endpoint_uri,
monitoring_configuration=self.monitoring_configuration,
monitoring_endpoint_uri=self.monitoring_endpoint_uri,
name=self.name,
provisioning_state=self.provisioning_state,
resource_location=self.resource_location,
server_certificate=self.server_certificate,
server_id=self.server_id,
server_management_error_code=self.server_management_error_code,
server_os_version=self.server_os_version,
server_role=self.server_role,
service_location=self.service_location,
storage_sync_service_uid=self.storage_sync_service_uid,
type=self.type)
def get_registered_server(resource_group_name: Optional[str] = None,
server_id: Optional[str] = None,
storage_sync_service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRegisteredServerResult:
"""
Registered Server resource.
Latest API Version: 2020-03-01.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str server_id: GUID identifying the on-premises server.
:param str storage_sync_service_name: Name of Storage Sync Service resource.
"""
pulumi.log.warn("get_registered_server is deprecated: The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:storagesync:getRegisteredServer'.")
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['serverId'] = server_id
__args__['storageSyncServiceName'] = storage_sync_service_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:storagesync/latest:getRegisteredServer', __args__, opts=opts, typ=GetRegisteredServerResult).value
return AwaitableGetRegisteredServerResult(
agent_version=__ret__.agent_version,
cluster_id=__ret__.cluster_id,
cluster_name=__ret__.cluster_name,
discovery_endpoint_uri=__ret__.discovery_endpoint_uri,
friendly_name=__ret__.friendly_name,
id=__ret__.id,
last_heart_beat=__ret__.last_heart_beat,
last_operation_name=__ret__.last_operation_name,
last_workflow_id=__ret__.last_workflow_id,
management_endpoint_uri=__ret__.management_endpoint_uri,
monitoring_configuration=__ret__.monitoring_configuration,
monitoring_endpoint_uri=__ret__.monitoring_endpoint_uri,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
resource_location=__ret__.resource_location,
server_certificate=__ret__.server_certificate,
server_id=__ret__.server_id,
server_management_error_code=__ret__.server_management_error_code,
server_os_version=__ret__.server_os_version,
server_role=__ret__.server_role,
service_location=__ret__.service_location,
storage_sync_service_uid=__ret__.storage_sync_service_uid,
type=__ret__.type)
| 42.671309 | 546 | 0.687447 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetRegisteredServerResult',
'AwaitableGetRegisteredServerResult',
'get_registered_server',
]
warnings.warn("""The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:storagesync:getRegisteredServer'.""", DeprecationWarning)
@pulumi.output_type
class GetRegisteredServerResult:
def __init__(__self__, agent_version=None, cluster_id=None, cluster_name=None, discovery_endpoint_uri=None, friendly_name=None, id=None, last_heart_beat=None, last_operation_name=None, last_workflow_id=None, management_endpoint_uri=None, monitoring_configuration=None, monitoring_endpoint_uri=None, name=None, provisioning_state=None, resource_location=None, server_certificate=None, server_id=None, server_management_error_code=None, server_os_version=None, server_role=None, service_location=None, storage_sync_service_uid=None, type=None):
if agent_version and not isinstance(agent_version, str):
raise TypeError("Expected argument 'agent_version' to be a str")
pulumi.set(__self__, "agent_version", agent_version)
if cluster_id and not isinstance(cluster_id, str):
raise TypeError("Expected argument 'cluster_id' to be a str")
pulumi.set(__self__, "cluster_id", cluster_id)
if cluster_name and not isinstance(cluster_name, str):
raise TypeError("Expected argument 'cluster_name' to be a str")
pulumi.set(__self__, "cluster_name", cluster_name)
if discovery_endpoint_uri and not isinstance(discovery_endpoint_uri, str):
raise TypeError("Expected argument 'discovery_endpoint_uri' to be a str")
pulumi.set(__self__, "discovery_endpoint_uri", discovery_endpoint_uri)
if friendly_name and not isinstance(friendly_name, str):
raise TypeError("Expected argument 'friendly_name' to be a str")
pulumi.set(__self__, "friendly_name", friendly_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if last_heart_beat and not isinstance(last_heart_beat, str):
raise TypeError("Expected argument 'last_heart_beat' to be a str")
pulumi.set(__self__, "last_heart_beat", last_heart_beat)
if last_operation_name and not isinstance(last_operation_name, str):
raise TypeError("Expected argument 'last_operation_name' to be a str")
pulumi.set(__self__, "last_operation_name", last_operation_name)
if last_workflow_id and not isinstance(last_workflow_id, str):
raise TypeError("Expected argument 'last_workflow_id' to be a str")
pulumi.set(__self__, "last_workflow_id", last_workflow_id)
if management_endpoint_uri and not isinstance(management_endpoint_uri, str):
raise TypeError("Expected argument 'management_endpoint_uri' to be a str")
pulumi.set(__self__, "management_endpoint_uri", management_endpoint_uri)
if monitoring_configuration and not isinstance(monitoring_configuration, str):
raise TypeError("Expected argument 'monitoring_configuration' to be a str")
pulumi.set(__self__, "monitoring_configuration", monitoring_configuration)
if monitoring_endpoint_uri and not isinstance(monitoring_endpoint_uri, str):
raise TypeError("Expected argument 'monitoring_endpoint_uri' to be a str")
pulumi.set(__self__, "monitoring_endpoint_uri", monitoring_endpoint_uri)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_location and not isinstance(resource_location, str):
raise TypeError("Expected argument 'resource_location' to be a str")
pulumi.set(__self__, "resource_location", resource_location)
if server_certificate and not isinstance(server_certificate, str):
raise TypeError("Expected argument 'server_certificate' to be a str")
pulumi.set(__self__, "server_certificate", server_certificate)
if server_id and not isinstance(server_id, str):
raise TypeError("Expected argument 'server_id' to be a str")
pulumi.set(__self__, "server_id", server_id)
if server_management_error_code and not isinstance(server_management_error_code, int):
raise TypeError("Expected argument 'server_management_error_code' to be a int")
pulumi.set(__self__, "server_management_error_code", server_management_error_code)
if server_os_version and not isinstance(server_os_version, str):
raise TypeError("Expected argument 'server_os_version' to be a str")
pulumi.set(__self__, "server_os_version", server_os_version)
if server_role and not isinstance(server_role, str):
raise TypeError("Expected argument 'server_role' to be a str")
pulumi.set(__self__, "server_role", server_role)
if service_location and not isinstance(service_location, str):
raise TypeError("Expected argument 'service_location' to be a str")
pulumi.set(__self__, "service_location", service_location)
if storage_sync_service_uid and not isinstance(storage_sync_service_uid, str):
raise TypeError("Expected argument 'storage_sync_service_uid' to be a str")
pulumi.set(__self__, "storage_sync_service_uid", storage_sync_service_uid)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="agentVersion")
def agent_version(self) -> Optional[str]:
return pulumi.get(self, "agent_version")
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> Optional[str]:
return pulumi.get(self, "cluster_id")
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> Optional[str]:
return pulumi.get(self, "cluster_name")
@property
@pulumi.getter(name="discoveryEndpointUri")
def discovery_endpoint_uri(self) -> Optional[str]:
return pulumi.get(self, "discovery_endpoint_uri")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[str]:
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lastHeartBeat")
def last_heart_beat(self) -> Optional[str]:
return pulumi.get(self, "last_heart_beat")
@property
@pulumi.getter(name="lastOperationName")
def last_operation_name(self) -> Optional[str]:
return pulumi.get(self, "last_operation_name")
@property
@pulumi.getter(name="lastWorkflowId")
def last_workflow_id(self) -> Optional[str]:
return pulumi.get(self, "last_workflow_id")
@property
@pulumi.getter(name="managementEndpointUri")
def management_endpoint_uri(self) -> Optional[str]:
return pulumi.get(self, "management_endpoint_uri")
@property
@pulumi.getter(name="monitoringConfiguration")
def monitoring_configuration(self) -> Optional[str]:
return pulumi.get(self, "monitoring_configuration")
@property
@pulumi.getter(name="monitoringEndpointUri")
def monitoring_endpoint_uri(self) -> Optional[str]:
return pulumi.get(self, "monitoring_endpoint_uri")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceLocation")
def resource_location(self) -> Optional[str]:
return pulumi.get(self, "resource_location")
@property
@pulumi.getter(name="serverCertificate")
def server_certificate(self) -> Optional[str]:
return pulumi.get(self, "server_certificate")
@property
@pulumi.getter(name="serverId")
def server_id(self) -> Optional[str]:
return pulumi.get(self, "server_id")
@property
@pulumi.getter(name="serverManagementErrorCode")
def server_management_error_code(self) -> Optional[int]:
return pulumi.get(self, "server_management_error_code")
@property
@pulumi.getter(name="serverOSVersion")
def server_os_version(self) -> Optional[str]:
return pulumi.get(self, "server_os_version")
@property
@pulumi.getter(name="serverRole")
def server_role(self) -> Optional[str]:
return pulumi.get(self, "server_role")
@property
@pulumi.getter(name="serviceLocation")
def service_location(self) -> Optional[str]:
return pulumi.get(self, "service_location")
@property
@pulumi.getter(name="storageSyncServiceUid")
def storage_sync_service_uid(self) -> Optional[str]:
return pulumi.get(self, "storage_sync_service_uid")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
class AwaitableGetRegisteredServerResult(GetRegisteredServerResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetRegisteredServerResult(
agent_version=self.agent_version,
cluster_id=self.cluster_id,
cluster_name=self.cluster_name,
discovery_endpoint_uri=self.discovery_endpoint_uri,
friendly_name=self.friendly_name,
id=self.id,
last_heart_beat=self.last_heart_beat,
last_operation_name=self.last_operation_name,
last_workflow_id=self.last_workflow_id,
management_endpoint_uri=self.management_endpoint_uri,
monitoring_configuration=self.monitoring_configuration,
monitoring_endpoint_uri=self.monitoring_endpoint_uri,
name=self.name,
provisioning_state=self.provisioning_state,
resource_location=self.resource_location,
server_certificate=self.server_certificate,
server_id=self.server_id,
server_management_error_code=self.server_management_error_code,
server_os_version=self.server_os_version,
server_role=self.server_role,
service_location=self.service_location,
storage_sync_service_uid=self.storage_sync_service_uid,
type=self.type)
def get_registered_server(resource_group_name: Optional[str] = None,
server_id: Optional[str] = None,
storage_sync_service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRegisteredServerResult:
pulumi.log.warn("get_registered_server is deprecated: The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:storagesync:getRegisteredServer'.")
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['serverId'] = server_id
__args__['storageSyncServiceName'] = storage_sync_service_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:storagesync/latest:getRegisteredServer', __args__, opts=opts, typ=GetRegisteredServerResult).value
return AwaitableGetRegisteredServerResult(
agent_version=__ret__.agent_version,
cluster_id=__ret__.cluster_id,
cluster_name=__ret__.cluster_name,
discovery_endpoint_uri=__ret__.discovery_endpoint_uri,
friendly_name=__ret__.friendly_name,
id=__ret__.id,
last_heart_beat=__ret__.last_heart_beat,
last_operation_name=__ret__.last_operation_name,
last_workflow_id=__ret__.last_workflow_id,
management_endpoint_uri=__ret__.management_endpoint_uri,
monitoring_configuration=__ret__.monitoring_configuration,
monitoring_endpoint_uri=__ret__.monitoring_endpoint_uri,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
resource_location=__ret__.resource_location,
server_certificate=__ret__.server_certificate,
server_id=__ret__.server_id,
server_management_error_code=__ret__.server_management_error_code,
server_os_version=__ret__.server_os_version,
server_role=__ret__.server_role,
service_location=__ret__.service_location,
storage_sync_service_uid=__ret__.storage_sync_service_uid,
type=__ret__.type)
| true | true |
f72001cd4e99d72b538406d817ae842ce6ada978 | 37,141 | py | Python | test/functional/rpc_psbt.py | GumFruit/cpuchain | c2fb213eb1e376a7457a8eecf907eca719eb4c99 | [
"MIT"
] | null | null | null | test/functional/rpc_psbt.py | GumFruit/cpuchain | c2fb213eb1e376a7457a8eecf907eca719eb4c99 | [
"MIT"
] | null | null | null | test/functional/rpc_psbt.py | GumFruit/cpuchain | c2fb213eb1e376a7457a8eecf907eca719eb4c99 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2018-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the Partially Signed Transaction RPCs.
"""
from decimal import Decimal
from itertools import product
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_approx,
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
find_output,
)
import json
import os
MAX_BIP125_RBF_SEQUENCE = 0xfffffffd
# Create one-input, one-output, no-fee transaction:
class PSBTTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.extra_args = [
["-walletrbf=1"],
["-walletrbf=0", "-changetype=legacy"],
[]
]
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
# TODO: Re-enable this test with segwit v1
def test_utxo_conversion(self):
mining_node = self.nodes[2]
offline_node = self.nodes[0]
online_node = self.nodes[1]
# Disconnect offline node from others
# Topology of test network is linear, so this one call is enough
self.disconnect_nodes(0, 1)
# Create watchonly on online_node
online_node.createwallet(wallet_name='wonline', disable_private_keys=True)
wonline = online_node.get_wallet_rpc('wonline')
w2 = online_node.get_wallet_rpc('')
# Mine a transaction that credits the offline address
offline_addr = offline_node.getnewaddress(address_type="p2sh-segwit")
online_addr = w2.getnewaddress(address_type="p2sh-segwit")
wonline.importaddress(offline_addr, "", False)
mining_node.sendtoaddress(address=offline_addr, amount=1.0)
mining_node.generate(nblocks=1)
self.sync_blocks([mining_node, online_node])
# Construct an unsigned PSBT on the online node (who doesn't know the output is Segwit, so will include a non-witness UTXO)
utxos = wonline.listunspent(addresses=[offline_addr])
raw = wonline.createrawtransaction([{"txid":utxos[0]["txid"], "vout":utxos[0]["vout"]}],[{online_addr:0.9999}])
psbt = wonline.walletprocesspsbt(online_node.converttopsbt(raw))["psbt"]
assert "non_witness_utxo" in mining_node.decodepsbt(psbt)["inputs"][0]
# Have the offline node sign the PSBT (which will update the UTXO to segwit)
signed_psbt = offline_node.walletprocesspsbt(psbt)["psbt"]
assert "witness_utxo" in mining_node.decodepsbt(signed_psbt)["inputs"][0]
# Make sure we can mine the resulting transaction
txid = mining_node.sendrawtransaction(mining_node.finalizepsbt(signed_psbt)["hex"])
mining_node.generate(1)
self.sync_blocks([mining_node, online_node])
assert_equal(online_node.gettxout(txid,0)["confirmations"], 1)
wonline.unloadwallet()
# Reconnect
self.connect_nodes(0, 1)
self.connect_nodes(0, 2)
def assert_change_type(self, psbtx, expected_type):
"""Assert that the given PSBT has a change output with the given type."""
# The decodepsbt RPC is stateless and independent of any settings, we can always just call it on the first node
decoded_psbt = self.nodes[0].decodepsbt(psbtx["psbt"])
changepos = psbtx["changepos"]
assert_equal(decoded_psbt["tx"]["vout"][changepos]["scriptPubKey"]["type"], expected_type)
def run_test(self):
# Create and fund a raw tx for sending 10 CPU
psbtx1 = self.nodes[0].walletcreatefundedpsbt([], {self.nodes[2].getnewaddress():10})['psbt']
# If inputs are specified, do not automatically add more:
utxo1 = self.nodes[0].listunspent()[0]
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[0].walletcreatefundedpsbt, [{"txid": utxo1['txid'], "vout": utxo1['vout']}], {self.nodes[2].getnewaddress():90})
psbtx1 = self.nodes[0].walletcreatefundedpsbt([{"txid": utxo1['txid'], "vout": utxo1['vout']}], {self.nodes[2].getnewaddress():90}, 0, {"add_inputs": True})['psbt']
assert_equal(len(self.nodes[0].decodepsbt(psbtx1)['tx']['vin']), 2)
# Inputs argument can be null
self.nodes[0].walletcreatefundedpsbt(None, {self.nodes[2].getnewaddress():10})
# Node 1 should not be able to add anything to it but still return the psbtx same as before
psbtx = self.nodes[1].walletprocesspsbt(psbtx1)['psbt']
assert_equal(psbtx1, psbtx)
# Sign the transaction and send
signed_tx = self.nodes[0].walletprocesspsbt(psbtx)['psbt']
final_tx = self.nodes[0].finalizepsbt(signed_tx)['hex']
self.nodes[0].sendrawtransaction(final_tx)
# Manually selected inputs can be locked:
assert_equal(len(self.nodes[0].listlockunspent()), 0)
utxo1 = self.nodes[0].listunspent()[0]
psbtx1 = self.nodes[0].walletcreatefundedpsbt([{"txid": utxo1['txid'], "vout": utxo1['vout']}], {self.nodes[2].getnewaddress():1}, 0,{"lockUnspents": True})["psbt"]
assert_equal(len(self.nodes[0].listlockunspent()), 1)
# Locks are ignored for manually selected inputs
self.nodes[0].walletcreatefundedpsbt([{"txid": utxo1['txid'], "vout": utxo1['vout']}], {self.nodes[2].getnewaddress():1}, 0)
# Create p2sh, p2wpkh, and p2wsh addresses
pubkey0 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())['pubkey']
pubkey1 = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress())['pubkey']
pubkey2 = self.nodes[2].getaddressinfo(self.nodes[2].getnewaddress())['pubkey']
# Setup watchonly wallets
self.nodes[2].createwallet(wallet_name='wmulti', disable_private_keys=True)
wmulti = self.nodes[2].get_wallet_rpc('wmulti')
# Create all the addresses
p2sh = wmulti.addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "legacy")['address']
p2wsh = wmulti.addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "bech32")['address']
p2sh_p2wsh = wmulti.addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "p2sh-segwit")['address']
if not self.options.descriptors:
wmulti.importaddress(p2sh)
wmulti.importaddress(p2wsh)
wmulti.importaddress(p2sh_p2wsh)
p2wpkh = self.nodes[1].getnewaddress("", "bech32")
p2pkh = self.nodes[1].getnewaddress("", "legacy")
p2sh_p2wpkh = self.nodes[1].getnewaddress("", "p2sh-segwit")
# fund those addresses
rawtx = self.nodes[0].createrawtransaction([], {p2sh:10, p2wsh:10, p2wpkh:10, p2sh_p2wsh:10, p2sh_p2wpkh:10, p2pkh:10})
rawtx = self.nodes[0].fundrawtransaction(rawtx, {"changePosition":3})
signed_tx = self.nodes[0].signrawtransactionwithwallet(rawtx['hex'])['hex']
txid = self.nodes[0].sendrawtransaction(signed_tx)
self.nodes[0].generate(6)
self.sync_all()
# Find the output pos
p2sh_pos = -1
p2wsh_pos = -1
p2wpkh_pos = -1
p2pkh_pos = -1
p2sh_p2wsh_pos = -1
p2sh_p2wpkh_pos = -1
decoded = self.nodes[0].decoderawtransaction(signed_tx)
for out in decoded['vout']:
if out['scriptPubKey']['address'] == p2sh:
p2sh_pos = out['n']
elif out['scriptPubKey']['address'] == p2wsh:
p2wsh_pos = out['n']
elif out['scriptPubKey']['address'] == p2wpkh:
p2wpkh_pos = out['n']
elif out['scriptPubKey']['address'] == p2sh_p2wsh:
p2sh_p2wsh_pos = out['n']
elif out['scriptPubKey']['address'] == p2sh_p2wpkh:
p2sh_p2wpkh_pos = out['n']
elif out['scriptPubKey']['address'] == p2pkh:
p2pkh_pos = out['n']
inputs = [{"txid": txid, "vout": p2wpkh_pos}, {"txid": txid, "vout": p2sh_p2wpkh_pos}, {"txid": txid, "vout": p2pkh_pos}]
outputs = [{self.nodes[1].getnewaddress(): 29.99}]
# spend single key from node 1
created_psbt = self.nodes[1].walletcreatefundedpsbt(inputs, outputs)
walletprocesspsbt_out = self.nodes[1].walletprocesspsbt(created_psbt['psbt'])
# Make sure it has both types of UTXOs
decoded = self.nodes[1].decodepsbt(walletprocesspsbt_out['psbt'])
assert 'non_witness_utxo' in decoded['inputs'][0]
assert 'witness_utxo' in decoded['inputs'][0]
# Check decodepsbt fee calculation (input values shall only be counted once per UTXO)
assert_equal(decoded['fee'], created_psbt['fee'])
assert_equal(walletprocesspsbt_out['complete'], True)
self.nodes[1].sendrawtransaction(self.nodes[1].finalizepsbt(walletprocesspsbt_out['psbt'])['hex'])
self.log.info("Test walletcreatefundedpsbt fee rate of 10000 sat/vB and 0.1 CPU/kvB produces a total fee at or slightly below -maxtxfee (~0.05290000)")
res1 = self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {"fee_rate": 10000, "add_inputs": True})
assert_approx(res1["fee"], 0.055, 0.005)
res2 = self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {"feeRate": "0.1", "add_inputs": True})
assert_approx(res2["fee"], 0.055, 0.005)
self.log.info("Test min fee rate checks with walletcreatefundedpsbt are bypassed, e.g. a fee_rate under 1 sat/vB is allowed")
res3 = self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {"fee_rate": "0.999", "add_inputs": True})
assert_approx(res3["fee"], 0.00000381, 0.0000001)
res4 = self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {"feeRate": 0.00000999, "add_inputs": True})
assert_approx(res4["fee"], 0.00000381, 0.0000001)
self.log.info("Test min fee rate checks with walletcreatefundedpsbt are bypassed and that funding non-standard 'zero-fee' transactions is valid")
for param, zero_value in product(["fee_rate", "feeRate"], [0, 0.000, 0.00000000, "0", "0.000", "0.00000000"]):
assert_equal(0, self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {param: zero_value, "add_inputs": True})["fee"])
self.log.info("Test invalid fee rate settings")
for param, value in {("fee_rate", 100000), ("feeRate", 1)}:
assert_raises_rpc_error(-4, "Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {param: value, "add_inputs": True})
assert_raises_rpc_error(-3, "Amount out of range",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {param: -1, "add_inputs": True})
assert_raises_rpc_error(-3, "Amount is not a number or string",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {param: {"foo": "bar"}, "add_inputs": True})
# Test fee rate values that don't pass fixed-point parsing checks.
for invalid_value in ["", 0.000000001, 1e-09, 1.111111111, 1111111111111111, "31.999999999999999999999"]:
assert_raises_rpc_error(-3, "Invalid amount",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {param: invalid_value, "add_inputs": True})
# Test fee_rate values that cannot be represented in sat/vB.
for invalid_value in [0.0001, 0.00000001, 0.00099999, 31.99999999, "0.0001", "0.00000001", "0.00099999", "31.99999999"]:
assert_raises_rpc_error(-3, "Invalid amount",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"fee_rate": invalid_value, "add_inputs": True})
self.log.info("- raises RPC error if both feeRate and fee_rate are passed")
assert_raises_rpc_error(-8, "Cannot specify both fee_rate (sat/vB) and feeRate (CPU/kvB)",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"fee_rate": 0.1, "feeRate": 0.1, "add_inputs": True})
self.log.info("- raises RPC error if both feeRate and estimate_mode passed")
assert_raises_rpc_error(-8, "Cannot specify both estimate_mode and feeRate",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": "economical", "feeRate": 0.1, "add_inputs": True})
for param in ["feeRate", "fee_rate"]:
self.log.info("- raises RPC error if both {} and conf_target are passed".format(param))
assert_raises_rpc_error(-8, "Cannot specify both conf_target and {}. Please provide either a confirmation "
"target in blocks for automatic fee estimation, or an explicit fee rate.".format(param),
self.nodes[1].walletcreatefundedpsbt ,inputs, outputs, 0, {param: 1, "conf_target": 1, "add_inputs": True})
self.log.info("- raises RPC error if both fee_rate and estimate_mode are passed")
assert_raises_rpc_error(-8, "Cannot specify both estimate_mode and fee_rate",
self.nodes[1].walletcreatefundedpsbt ,inputs, outputs, 0, {"fee_rate": 1, "estimate_mode": "economical", "add_inputs": True})
self.log.info("- raises RPC error with invalid estimate_mode settings")
for k, v in {"number": 42, "object": {"foo": "bar"}}.items():
assert_raises_rpc_error(-3, "Expected type string for estimate_mode, got {}".format(k),
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": v, "conf_target": 0.1, "add_inputs": True})
for mode in ["", "foo", Decimal("3.141592")]:
assert_raises_rpc_error(-8, 'Invalid estimate_mode parameter, must be one of: "unset", "economical", "conservative"',
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": mode, "conf_target": 0.1, "add_inputs": True})
self.log.info("- raises RPC error with invalid conf_target settings")
for mode in ["unset", "economical", "conservative"]:
self.log.debug("{}".format(mode))
for k, v in {"string": "", "object": {"foo": "bar"}}.items():
assert_raises_rpc_error(-3, "Expected type number for conf_target, got {}".format(k),
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": mode, "conf_target": v, "add_inputs": True})
for n in [-1, 0, 1009]:
assert_raises_rpc_error(-8, "Invalid conf_target, must be between 1 and 1008", # max value of 1008 per src/policy/fees.h
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": mode, "conf_target": n, "add_inputs": True})
self.log.info("Test walletcreatefundedpsbt with too-high fee rate produces total fee well above -maxtxfee and raises RPC error")
# previously this was silently capped at -maxtxfee
for bool_add, outputs_array in {True: outputs, False: [{self.nodes[1].getnewaddress(): 1}]}.items():
msg = "Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)"
assert_raises_rpc_error(-4, msg, self.nodes[1].walletcreatefundedpsbt, inputs, outputs_array, 0, {"fee_rate": 1000000, "add_inputs": bool_add})
assert_raises_rpc_error(-4, msg, self.nodes[1].walletcreatefundedpsbt, inputs, outputs_array, 0, {"feeRate": 1, "add_inputs": bool_add})
self.log.info("Test various PSBT operations")
# partially sign multisig things with node 1
psbtx = wmulti.walletcreatefundedpsbt(inputs=[{"txid":txid,"vout":p2wsh_pos},{"txid":txid,"vout":p2sh_pos},{"txid":txid,"vout":p2sh_p2wsh_pos}], outputs={self.nodes[1].getnewaddress():29.99}, options={'changeAddress': self.nodes[1].getrawchangeaddress()})['psbt']
walletprocesspsbt_out = self.nodes[1].walletprocesspsbt(psbtx)
psbtx = walletprocesspsbt_out['psbt']
assert_equal(walletprocesspsbt_out['complete'], False)
# Unload wmulti, we don't need it anymore
wmulti.unloadwallet()
# partially sign with node 2. This should be complete and sendable
walletprocesspsbt_out = self.nodes[2].walletprocesspsbt(psbtx)
assert_equal(walletprocesspsbt_out['complete'], True)
self.nodes[2].sendrawtransaction(self.nodes[2].finalizepsbt(walletprocesspsbt_out['psbt'])['hex'])
# check that walletprocesspsbt fails to decode a non-psbt
rawtx = self.nodes[1].createrawtransaction([{"txid":txid,"vout":p2wpkh_pos}], {self.nodes[1].getnewaddress():9.99})
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[1].walletprocesspsbt, rawtx)
# Convert a non-psbt to psbt and make sure we can decode it
rawtx = self.nodes[0].createrawtransaction([], {self.nodes[1].getnewaddress():10})
rawtx = self.nodes[0].fundrawtransaction(rawtx)
new_psbt = self.nodes[0].converttopsbt(rawtx['hex'])
self.nodes[0].decodepsbt(new_psbt)
# Make sure that a non-psbt with signatures cannot be converted
# Error could be either "TX decode failed" (segwit inputs causes parsing to fail) or "Inputs must not have scriptSigs and scriptWitnesses"
# We must set iswitness=True because the serialized transaction has inputs and is therefore a witness transaction
signedtx = self.nodes[0].signrawtransactionwithwallet(rawtx['hex'])
assert_raises_rpc_error(-22, "", self.nodes[0].converttopsbt, hexstring=signedtx['hex'], iswitness=True)
assert_raises_rpc_error(-22, "", self.nodes[0].converttopsbt, hexstring=signedtx['hex'], permitsigdata=False, iswitness=True)
# Unless we allow it to convert and strip signatures
self.nodes[0].converttopsbt(signedtx['hex'], True)
# Explicitly allow converting non-empty txs
new_psbt = self.nodes[0].converttopsbt(rawtx['hex'])
self.nodes[0].decodepsbt(new_psbt)
# Create outputs to nodes 1 and 2
node1_addr = self.nodes[1].getnewaddress()
node2_addr = self.nodes[2].getnewaddress()
txid1 = self.nodes[0].sendtoaddress(node1_addr, 13)
txid2 = self.nodes[0].sendtoaddress(node2_addr, 13)
blockhash = self.nodes[0].generate(6)[0]
self.sync_all()
vout1 = find_output(self.nodes[1], txid1, 13, blockhash=blockhash)
vout2 = find_output(self.nodes[2], txid2, 13, blockhash=blockhash)
# Create a psbt spending outputs from nodes 1 and 2
psbt_orig = self.nodes[0].createpsbt([{"txid":txid1, "vout":vout1}, {"txid":txid2, "vout":vout2}], {self.nodes[0].getnewaddress():25.999})
# Update psbts, should only have data for one input and not the other
psbt1 = self.nodes[1].walletprocesspsbt(psbt_orig, False, "ALL")['psbt']
psbt1_decoded = self.nodes[0].decodepsbt(psbt1)
assert psbt1_decoded['inputs'][0] and not psbt1_decoded['inputs'][1]
# Check that BIP32 path was added
assert "bip32_derivs" in psbt1_decoded['inputs'][0]
psbt2 = self.nodes[2].walletprocesspsbt(psbt_orig, False, "ALL", False)['psbt']
psbt2_decoded = self.nodes[0].decodepsbt(psbt2)
assert not psbt2_decoded['inputs'][0] and psbt2_decoded['inputs'][1]
# Check that BIP32 paths were not added
assert "bip32_derivs" not in psbt2_decoded['inputs'][1]
# Sign PSBTs (workaround issue #18039)
psbt1 = self.nodes[1].walletprocesspsbt(psbt_orig)['psbt']
psbt2 = self.nodes[2].walletprocesspsbt(psbt_orig)['psbt']
# Combine, finalize, and send the psbts
combined = self.nodes[0].combinepsbt([psbt1, psbt2])
finalized = self.nodes[0].finalizepsbt(combined)['hex']
self.nodes[0].sendrawtransaction(finalized)
self.nodes[0].generate(6)
self.sync_all()
# Test additional args in walletcreatepsbt
# Make sure both pre-included and funded inputs
# have the correct sequence numbers based on
# replaceable arg
block_height = self.nodes[0].getblockcount()
unspent = self.nodes[0].listunspent()[0]
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height+2, {"replaceable": False, "add_inputs": True}, False)
decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"])
for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert_greater_than(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)
assert "bip32_derivs" not in psbt_in
assert_equal(decoded_psbt["tx"]["locktime"], block_height+2)
# Same construction with only locktime set and RBF explicitly enabled
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height, {"replaceable": True, "add_inputs": True}, True)
decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"])
for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert_equal(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)
assert "bip32_derivs" in psbt_in
assert_equal(decoded_psbt["tx"]["locktime"], block_height)
# Same construction without optional arguments
psbtx_info = self.nodes[0].walletcreatefundedpsbt([], [{self.nodes[2].getnewaddress():unspent["amount"]+1}])
decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"])
for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert_equal(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)
assert "bip32_derivs" in psbt_in
assert_equal(decoded_psbt["tx"]["locktime"], 0)
# Same construction without optional arguments, for a node with -walletrbf=0
unspent1 = self.nodes[1].listunspent()[0]
psbtx_info = self.nodes[1].walletcreatefundedpsbt([{"txid":unspent1["txid"], "vout":unspent1["vout"]}], [{self.nodes[2].getnewaddress():unspent1["amount"]+1}], block_height, {"add_inputs": True})
decoded_psbt = self.nodes[1].decodepsbt(psbtx_info["psbt"])
for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert_greater_than(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)
assert "bip32_derivs" in psbt_in
# Make sure change address wallet does not have P2SH innerscript access to results in success
# when attempting BnB coin selection
self.nodes[0].walletcreatefundedpsbt([], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height+2, {"changeAddress":self.nodes[1].getnewaddress()}, False)
# Make sure the wallet's change type is respected by default
small_output = {self.nodes[0].getnewaddress():0.1}
psbtx_native = self.nodes[0].walletcreatefundedpsbt([], [small_output])
self.assert_change_type(psbtx_native, "witness_v0_keyhash")
psbtx_legacy = self.nodes[1].walletcreatefundedpsbt([], [small_output])
self.assert_change_type(psbtx_legacy, "pubkeyhash")
# Make sure the change type of the wallet can also be overwritten
psbtx_np2wkh = self.nodes[1].walletcreatefundedpsbt([], [small_output], 0, {"change_type":"p2sh-segwit"})
self.assert_change_type(psbtx_np2wkh, "scripthash")
# Make sure the change type cannot be specified if a change address is given
invalid_options = {"change_type":"legacy","changeAddress":self.nodes[0].getnewaddress()}
assert_raises_rpc_error(-8, "both change address and address type options", self.nodes[0].walletcreatefundedpsbt, [], [small_output], 0, invalid_options)
# Regression test for 14473 (mishandling of already-signed witness transaction):
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], 0, {"add_inputs": True})
complete_psbt = self.nodes[0].walletprocesspsbt(psbtx_info["psbt"])
double_processed_psbt = self.nodes[0].walletprocesspsbt(complete_psbt["psbt"])
assert_equal(complete_psbt, double_processed_psbt)
# We don't care about the decode result, but decoding must succeed.
self.nodes[0].decodepsbt(double_processed_psbt["psbt"])
# Make sure unsafe inputs are included if specified
self.nodes[2].createwallet(wallet_name="unsafe")
wunsafe = self.nodes[2].get_wallet_rpc("unsafe")
self.nodes[0].sendtoaddress(wunsafe.getnewaddress(), 2)
self.sync_mempools()
assert_raises_rpc_error(-4, "Insufficient funds", wunsafe.walletcreatefundedpsbt, [], [{self.nodes[0].getnewaddress(): 1}])
wunsafe.walletcreatefundedpsbt([], [{self.nodes[0].getnewaddress(): 1}], 0, {"include_unsafe": True})
# BIP 174 Test Vectors
# Check that unknown values are just passed through
unknown_psbt = "cHNidP8BAD8CAAAAAf//////////////////////////////////////////AAAAAAD/////AQAAAAAAAAAAA2oBAAAAAAAACg8BAgMEBQYHCAkPAQIDBAUGBwgJCgsMDQ4PAAA="
unknown_out = self.nodes[0].walletprocesspsbt(unknown_psbt)['psbt']
assert_equal(unknown_psbt, unknown_out)
# Open the data file
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/rpc_psbt.json'), encoding='utf-8') as f:
d = json.load(f)
invalids = d['invalid']
valids = d['valid']
creators = d['creator']
signers = d['signer']
combiners = d['combiner']
finalizers = d['finalizer']
extractors = d['extractor']
# Invalid PSBTs
for invalid in invalids:
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decodepsbt, invalid)
# Valid PSBTs
for valid in valids:
self.nodes[0].decodepsbt(valid)
# Creator Tests
for creator in creators:
created_tx = self.nodes[0].createpsbt(creator['inputs'], creator['outputs'])
assert_equal(created_tx, creator['result'])
# Signer tests
for i, signer in enumerate(signers):
self.nodes[2].createwallet(wallet_name="wallet{}".format(i))
wrpc = self.nodes[2].get_wallet_rpc("wallet{}".format(i))
for key in signer['privkeys']:
wrpc.importprivkey(key)
signed_tx = wrpc.walletprocesspsbt(signer['psbt'])['psbt']
assert_equal(signed_tx, signer['result'])
# Combiner test
for combiner in combiners:
combined = self.nodes[2].combinepsbt(combiner['combine'])
assert_equal(combined, combiner['result'])
# Empty combiner test
assert_raises_rpc_error(-8, "Parameter 'txs' cannot be empty", self.nodes[0].combinepsbt, [])
# Finalizer test
for finalizer in finalizers:
finalized = self.nodes[2].finalizepsbt(finalizer['finalize'], False)['psbt']
assert_equal(finalized, finalizer['result'])
# Extractor test
for extractor in extractors:
extracted = self.nodes[2].finalizepsbt(extractor['extract'], True)['hex']
assert_equal(extracted, extractor['result'])
# Unload extra wallets
for i, signer in enumerate(signers):
self.nodes[2].unloadwallet("wallet{}".format(i))
# TODO: Re-enable this for segwit v1
# self.test_utxo_conversion()
# Test that psbts with p2pkh outputs are created properly
p2pkh = self.nodes[0].getnewaddress(address_type='legacy')
psbt = self.nodes[1].walletcreatefundedpsbt([], [{p2pkh : 1}], 0, {"includeWatching" : True}, True)
self.nodes[0].decodepsbt(psbt['psbt'])
# Test decoding error: invalid base64
assert_raises_rpc_error(-22, "TX decode failed invalid base64", self.nodes[0].decodepsbt, ";definitely not base64;")
# Send to all types of addresses
addr1 = self.nodes[1].getnewaddress("", "bech32")
txid1 = self.nodes[0].sendtoaddress(addr1, 11)
vout1 = find_output(self.nodes[0], txid1, 11)
addr2 = self.nodes[1].getnewaddress("", "legacy")
txid2 = self.nodes[0].sendtoaddress(addr2, 11)
vout2 = find_output(self.nodes[0], txid2, 11)
addr3 = self.nodes[1].getnewaddress("", "p2sh-segwit")
txid3 = self.nodes[0].sendtoaddress(addr3, 11)
vout3 = find_output(self.nodes[0], txid3, 11)
self.sync_all()
def test_psbt_input_keys(psbt_input, keys):
"""Check that the psbt input has only the expected keys."""
assert_equal(set(keys), set(psbt_input.keys()))
# Create a PSBT. None of the inputs are filled initially
psbt = self.nodes[1].createpsbt([{"txid":txid1, "vout":vout1},{"txid":txid2, "vout":vout2},{"txid":txid3, "vout":vout3}], {self.nodes[0].getnewaddress():32.999})
decoded = self.nodes[1].decodepsbt(psbt)
test_psbt_input_keys(decoded['inputs'][0], [])
test_psbt_input_keys(decoded['inputs'][1], [])
test_psbt_input_keys(decoded['inputs'][2], [])
# Update a PSBT with UTXOs from the node
# Bech32 inputs should be filled with witness UTXO. Other inputs should not be filled because they are non-witness
updated = self.nodes[1].utxoupdatepsbt(psbt)
decoded = self.nodes[1].decodepsbt(updated)
test_psbt_input_keys(decoded['inputs'][0], ['witness_utxo'])
test_psbt_input_keys(decoded['inputs'][1], [])
test_psbt_input_keys(decoded['inputs'][2], [])
# Try again, now while providing descriptors, making P2SH-segwit work, and causing bip32_derivs and redeem_script to be filled in
descs = [self.nodes[1].getaddressinfo(addr)['desc'] for addr in [addr1,addr2,addr3]]
updated = self.nodes[1].utxoupdatepsbt(psbt=psbt, descriptors=descs)
decoded = self.nodes[1].decodepsbt(updated)
test_psbt_input_keys(decoded['inputs'][0], ['witness_utxo', 'bip32_derivs'])
test_psbt_input_keys(decoded['inputs'][1], [])
test_psbt_input_keys(decoded['inputs'][2], ['witness_utxo', 'bip32_derivs', 'redeem_script'])
# Two PSBTs with a common input should not be joinable
psbt1 = self.nodes[1].createpsbt([{"txid":txid1, "vout":vout1}], {self.nodes[0].getnewaddress():Decimal('10.999')})
assert_raises_rpc_error(-8, "exists in multiple PSBTs", self.nodes[1].joinpsbts, [psbt1, updated])
# Join two distinct PSBTs
addr4 = self.nodes[1].getnewaddress("", "p2sh-segwit")
txid4 = self.nodes[0].sendtoaddress(addr4, 5)
vout4 = find_output(self.nodes[0], txid4, 5)
self.nodes[0].generate(6)
self.sync_all()
psbt2 = self.nodes[1].createpsbt([{"txid":txid4, "vout":vout4}], {self.nodes[0].getnewaddress():Decimal('4.999')})
psbt2 = self.nodes[1].walletprocesspsbt(psbt2)['psbt']
psbt2_decoded = self.nodes[0].decodepsbt(psbt2)
assert "final_scriptwitness" in psbt2_decoded['inputs'][0] and "final_scriptSig" in psbt2_decoded['inputs'][0]
joined = self.nodes[0].joinpsbts([psbt, psbt2])
joined_decoded = self.nodes[0].decodepsbt(joined)
assert len(joined_decoded['inputs']) == 4 and len(joined_decoded['outputs']) == 2 and "final_scriptwitness" not in joined_decoded['inputs'][3] and "final_scriptSig" not in joined_decoded['inputs'][3]
# Check that joining shuffles the inputs and outputs
# 10 attempts should be enough to get a shuffled join
shuffled = False
for _ in range(10):
shuffled_joined = self.nodes[0].joinpsbts([psbt, psbt2])
shuffled |= joined != shuffled_joined
if shuffled:
break
assert shuffled
# Newly created PSBT needs UTXOs and updating
addr = self.nodes[1].getnewaddress("", "p2sh-segwit")
txid = self.nodes[0].sendtoaddress(addr, 7)
addrinfo = self.nodes[1].getaddressinfo(addr)
blockhash = self.nodes[0].generate(6)[0]
self.sync_all()
vout = find_output(self.nodes[0], txid, 7, blockhash=blockhash)
psbt = self.nodes[1].createpsbt([{"txid":txid, "vout":vout}], {self.nodes[0].getnewaddress("", "p2sh-segwit"):Decimal('6.999')})
analyzed = self.nodes[0].analyzepsbt(psbt)
assert not analyzed['inputs'][0]['has_utxo'] and not analyzed['inputs'][0]['is_final'] and analyzed['inputs'][0]['next'] == 'updater' and analyzed['next'] == 'updater'
# After update with wallet, only needs signing
updated = self.nodes[1].walletprocesspsbt(psbt, False, 'ALL', True)['psbt']
analyzed = self.nodes[0].analyzepsbt(updated)
assert analyzed['inputs'][0]['has_utxo'] and not analyzed['inputs'][0]['is_final'] and analyzed['inputs'][0]['next'] == 'signer' and analyzed['next'] == 'signer' and analyzed['inputs'][0]['missing']['signatures'][0] == addrinfo['embedded']['witness_program']
# Check fee and size things
assert analyzed['fee'] == Decimal('0.001') and analyzed['estimated_vsize'] == 134 and analyzed['estimated_feerate'] == Decimal('0.00746268')
# After signing and finalizing, needs extracting
signed = self.nodes[1].walletprocesspsbt(updated)['psbt']
analyzed = self.nodes[0].analyzepsbt(signed)
assert analyzed['inputs'][0]['has_utxo'] and analyzed['inputs'][0]['is_final'] and analyzed['next'] == 'extractor'
self.log.info("PSBT spending unspendable outputs should have error message and Creator as next")
analysis = self.nodes[0].analyzepsbt('cHNidP8BAJoCAAAAAljoeiG1ba8MI76OcHBFbDNvfLqlyHV5JPVFiHuyq911AAAAAAD/////g40EJ9DsZQpoqka7CwmK6kQiwHGyyng1Kgd5WdB86h0BAAAAAP////8CcKrwCAAAAAAWAEHYXCtx0AYLCcmIauuBXlCZHdoSTQDh9QUAAAAAFv8/wADXYP/7//////8JxOh0LR2HAI8AAAAAAAEBIADC6wsAAAAAF2oUt/X69ELjeX2nTof+fZ10l+OyAokDAQcJAwEHEAABAACAAAEBIADC6wsAAAAAF2oUt/X69ELjeX2nTof+fZ10l+OyAokDAQcJAwEHENkMak8AAAAA')
assert_equal(analysis['next'], 'creator')
assert_equal(analysis['error'], 'PSBT is not valid. Input 0 spends unspendable output')
self.log.info("PSBT with invalid values should have error message and Creator as next")
analysis = self.nodes[0].analyzepsbt('cHNidP8BAHECAAAAAfA00BFgAm6tp86RowwH6BMImQNL5zXUcTT97XoLGz0BAAAAAAD/////AgD5ApUAAAAAFgAUKNw0x8HRctAgmvoevm4u1SbN7XL87QKVAAAAABYAFPck4gF7iL4NL4wtfRAKgQbghiTUAAAAAAABAR8AgIFq49AHABYAFJUDtxf2PHo641HEOBOAIvFMNTr2AAAA')
assert_equal(analysis['next'], 'creator')
assert_equal(analysis['error'], 'PSBT is not valid. Input 0 has invalid value')
self.log.info("PSBT with signed, but not finalized, inputs should have Finalizer as next")
analysis = self.nodes[0].analyzepsbt('cHNidP8BAHECAAAAAZYezcxdnbXoQCmrD79t/LzDgtUo9ERqixk8wgioAobrAAAAAAD9////AlDDAAAAAAAAFgAUy/UxxZuzZswcmFnN/E9DGSiHLUsuGPUFAAAAABYAFLsH5o0R38wXx+X2cCosTMCZnQ4baAAAAAABAR8A4fUFAAAAABYAFOBI2h5thf3+Lflb2LGCsVSZwsltIgIC/i4dtVARCRWtROG0HHoGcaVklzJUcwo5homgGkSNAnJHMEQCIGx7zKcMIGr7cEES9BR4Kdt/pzPTK3fKWcGyCJXb7MVnAiALOBgqlMH4GbC1HDh/HmylmO54fyEy4lKde7/BT/PWxwEBAwQBAAAAIgYC/i4dtVARCRWtROG0HHoGcaVklzJUcwo5homgGkSNAnIYDwVpQ1QAAIABAACAAAAAgAAAAAAAAAAAAAAiAgL+CIiB59NSCssOJRGiMYQK1chahgAaaJpIXE41Cyir+xgPBWlDVAAAgAEAAIAAAACAAQAAAAAAAAAA')
assert_equal(analysis['next'], 'finalizer')
analysis = self.nodes[0].analyzepsbt('cHNidP8BAHECAAAAAfA00BFgAm6tp86RowwH6BMImQNL5zXUcTT97XoLGz0BAAAAAAD/////AgCAgWrj0AcAFgAUKNw0x8HRctAgmvoevm4u1SbN7XL87QKVAAAAABYAFPck4gF7iL4NL4wtfRAKgQbghiTUAAAAAAABAR8A8gUqAQAAABYAFJUDtxf2PHo641HEOBOAIvFMNTr2AAAA')
assert_equal(analysis['next'], 'creator')
assert_equal(analysis['error'], 'PSBT is not valid. Output amount invalid')
analysis = self.nodes[0].analyzepsbt('cHNidP8BAJoCAAAAAkvEW8NnDtdNtDpsmze+Ht2LH35IJcKv00jKAlUs21RrAwAAAAD/////S8Rbw2cO1020OmybN74e3Ysffkglwq/TSMoCVSzbVGsBAAAAAP7///8CwLYClQAAAAAWABSNJKzjaUb3uOxixsvh1GGE3fW7zQD5ApUAAAAAFgAUKNw0x8HRctAgmvoevm4u1SbN7XIAAAAAAAEAnQIAAAACczMa321tVHuN4GKWKRncycI22aX3uXgwSFUKM2orjRsBAAAAAP7///9zMxrfbW1Ue43gYpYpGdzJwjbZpfe5eDBIVQozaiuNGwAAAAAA/v///wIA+QKVAAAAABl2qRT9zXUVA8Ls5iVqynLHe5/vSe1XyYisQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAAAAAQEfQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAA==')
assert_equal(analysis['next'], 'creator')
assert_equal(analysis['error'], 'PSBT is not valid. Input 0 specifies invalid prevout')
assert_raises_rpc_error(-25, 'Inputs missing or spent', self.nodes[0].walletprocesspsbt, 'cHNidP8BAJoCAAAAAkvEW8NnDtdNtDpsmze+Ht2LH35IJcKv00jKAlUs21RrAwAAAAD/////S8Rbw2cO1020OmybN74e3Ysffkglwq/TSMoCVSzbVGsBAAAAAP7///8CwLYClQAAAAAWABSNJKzjaUb3uOxixsvh1GGE3fW7zQD5ApUAAAAAFgAUKNw0x8HRctAgmvoevm4u1SbN7XIAAAAAAAEAnQIAAAACczMa321tVHuN4GKWKRncycI22aX3uXgwSFUKM2orjRsBAAAAAP7///9zMxrfbW1Ue43gYpYpGdzJwjbZpfe5eDBIVQozaiuNGwAAAAAA/v///wIA+QKVAAAAABl2qRT9zXUVA8Ls5iVqynLHe5/vSe1XyYisQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAAAAAQEfQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAA==')
if __name__ == '__main__':
PSBTTest().main()
| 61.593698 | 584 | 0.675991 |
from decimal import Decimal
from itertools import product
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_approx,
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
find_output,
)
import json
import os
MAX_BIP125_RBF_SEQUENCE = 0xfffffffd
class PSBTTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.extra_args = [
["-walletrbf=1"],
["-walletrbf=0", "-changetype=legacy"],
[]
]
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def test_utxo_conversion(self):
mining_node = self.nodes[2]
offline_node = self.nodes[0]
online_node = self.nodes[1]
self.disconnect_nodes(0, 1)
online_node.createwallet(wallet_name='wonline', disable_private_keys=True)
wonline = online_node.get_wallet_rpc('wonline')
w2 = online_node.get_wallet_rpc('')
offline_addr = offline_node.getnewaddress(address_type="p2sh-segwit")
online_addr = w2.getnewaddress(address_type="p2sh-segwit")
wonline.importaddress(offline_addr, "", False)
mining_node.sendtoaddress(address=offline_addr, amount=1.0)
mining_node.generate(nblocks=1)
self.sync_blocks([mining_node, online_node])
utxos = wonline.listunspent(addresses=[offline_addr])
raw = wonline.createrawtransaction([{"txid":utxos[0]["txid"], "vout":utxos[0]["vout"]}],[{online_addr:0.9999}])
psbt = wonline.walletprocesspsbt(online_node.converttopsbt(raw))["psbt"]
assert "non_witness_utxo" in mining_node.decodepsbt(psbt)["inputs"][0]
# Have the offline node sign the PSBT (which will update the UTXO to segwit)
signed_psbt = offline_node.walletprocesspsbt(psbt)["psbt"]
assert "witness_utxo" in mining_node.decodepsbt(signed_psbt)["inputs"][0]
# Make sure we can mine the resulting transaction
txid = mining_node.sendrawtransaction(mining_node.finalizepsbt(signed_psbt)["hex"])
mining_node.generate(1)
self.sync_blocks([mining_node, online_node])
assert_equal(online_node.gettxout(txid,0)["confirmations"], 1)
wonline.unloadwallet()
# Reconnect
self.connect_nodes(0, 1)
self.connect_nodes(0, 2)
def assert_change_type(self, psbtx, expected_type):
# The decodepsbt RPC is stateless and independent of any settings, we can always just call it on the first node
decoded_psbt = self.nodes[0].decodepsbt(psbtx["psbt"])
changepos = psbtx["changepos"]
assert_equal(decoded_psbt["tx"]["vout"][changepos]["scriptPubKey"]["type"], expected_type)
def run_test(self):
# Create and fund a raw tx for sending 10 CPU
psbtx1 = self.nodes[0].walletcreatefundedpsbt([], {self.nodes[2].getnewaddress():10})['psbt']
# If inputs are specified, do not automatically add more:
utxo1 = self.nodes[0].listunspent()[0]
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[0].walletcreatefundedpsbt, [{"txid": utxo1['txid'], "vout": utxo1['vout']}], {self.nodes[2].getnewaddress():90})
psbtx1 = self.nodes[0].walletcreatefundedpsbt([{"txid": utxo1['txid'], "vout": utxo1['vout']}], {self.nodes[2].getnewaddress():90}, 0, {"add_inputs": True})['psbt']
assert_equal(len(self.nodes[0].decodepsbt(psbtx1)['tx']['vin']), 2)
# Inputs argument can be null
self.nodes[0].walletcreatefundedpsbt(None, {self.nodes[2].getnewaddress():10})
# Node 1 should not be able to add anything to it but still return the psbtx same as before
psbtx = self.nodes[1].walletprocesspsbt(psbtx1)['psbt']
assert_equal(psbtx1, psbtx)
# Sign the transaction and send
signed_tx = self.nodes[0].walletprocesspsbt(psbtx)['psbt']
final_tx = self.nodes[0].finalizepsbt(signed_tx)['hex']
self.nodes[0].sendrawtransaction(final_tx)
# Manually selected inputs can be locked:
assert_equal(len(self.nodes[0].listlockunspent()), 0)
utxo1 = self.nodes[0].listunspent()[0]
psbtx1 = self.nodes[0].walletcreatefundedpsbt([{"txid": utxo1['txid'], "vout": utxo1['vout']}], {self.nodes[2].getnewaddress():1}, 0,{"lockUnspents": True})["psbt"]
assert_equal(len(self.nodes[0].listlockunspent()), 1)
# Locks are ignored for manually selected inputs
self.nodes[0].walletcreatefundedpsbt([{"txid": utxo1['txid'], "vout": utxo1['vout']}], {self.nodes[2].getnewaddress():1}, 0)
# Create p2sh, p2wpkh, and p2wsh addresses
pubkey0 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())['pubkey']
pubkey1 = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress())['pubkey']
pubkey2 = self.nodes[2].getaddressinfo(self.nodes[2].getnewaddress())['pubkey']
# Setup watchonly wallets
self.nodes[2].createwallet(wallet_name='wmulti', disable_private_keys=True)
wmulti = self.nodes[2].get_wallet_rpc('wmulti')
# Create all the addresses
p2sh = wmulti.addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "legacy")['address']
p2wsh = wmulti.addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "bech32")['address']
p2sh_p2wsh = wmulti.addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "p2sh-segwit")['address']
if not self.options.descriptors:
wmulti.importaddress(p2sh)
wmulti.importaddress(p2wsh)
wmulti.importaddress(p2sh_p2wsh)
p2wpkh = self.nodes[1].getnewaddress("", "bech32")
p2pkh = self.nodes[1].getnewaddress("", "legacy")
p2sh_p2wpkh = self.nodes[1].getnewaddress("", "p2sh-segwit")
# fund those addresses
rawtx = self.nodes[0].createrawtransaction([], {p2sh:10, p2wsh:10, p2wpkh:10, p2sh_p2wsh:10, p2sh_p2wpkh:10, p2pkh:10})
rawtx = self.nodes[0].fundrawtransaction(rawtx, {"changePosition":3})
signed_tx = self.nodes[0].signrawtransactionwithwallet(rawtx['hex'])['hex']
txid = self.nodes[0].sendrawtransaction(signed_tx)
self.nodes[0].generate(6)
self.sync_all()
# Find the output pos
p2sh_pos = -1
p2wsh_pos = -1
p2wpkh_pos = -1
p2pkh_pos = -1
p2sh_p2wsh_pos = -1
p2sh_p2wpkh_pos = -1
decoded = self.nodes[0].decoderawtransaction(signed_tx)
for out in decoded['vout']:
if out['scriptPubKey']['address'] == p2sh:
p2sh_pos = out['n']
elif out['scriptPubKey']['address'] == p2wsh:
p2wsh_pos = out['n']
elif out['scriptPubKey']['address'] == p2wpkh:
p2wpkh_pos = out['n']
elif out['scriptPubKey']['address'] == p2sh_p2wsh:
p2sh_p2wsh_pos = out['n']
elif out['scriptPubKey']['address'] == p2sh_p2wpkh:
p2sh_p2wpkh_pos = out['n']
elif out['scriptPubKey']['address'] == p2pkh:
p2pkh_pos = out['n']
inputs = [{"txid": txid, "vout": p2wpkh_pos}, {"txid": txid, "vout": p2sh_p2wpkh_pos}, {"txid": txid, "vout": p2pkh_pos}]
outputs = [{self.nodes[1].getnewaddress(): 29.99}]
# spend single key from node 1
created_psbt = self.nodes[1].walletcreatefundedpsbt(inputs, outputs)
walletprocesspsbt_out = self.nodes[1].walletprocesspsbt(created_psbt['psbt'])
# Make sure it has both types of UTXOs
decoded = self.nodes[1].decodepsbt(walletprocesspsbt_out['psbt'])
assert 'non_witness_utxo' in decoded['inputs'][0]
assert 'witness_utxo' in decoded['inputs'][0]
# Check decodepsbt fee calculation (input values shall only be counted once per UTXO)
assert_equal(decoded['fee'], created_psbt['fee'])
assert_equal(walletprocesspsbt_out['complete'], True)
self.nodes[1].sendrawtransaction(self.nodes[1].finalizepsbt(walletprocesspsbt_out['psbt'])['hex'])
self.log.info("Test walletcreatefundedpsbt fee rate of 10000 sat/vB and 0.1 CPU/kvB produces a total fee at or slightly below -maxtxfee (~0.05290000)")
res1 = self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {"fee_rate": 10000, "add_inputs": True})
assert_approx(res1["fee"], 0.055, 0.005)
res2 = self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {"feeRate": "0.1", "add_inputs": True})
assert_approx(res2["fee"], 0.055, 0.005)
self.log.info("Test min fee rate checks with walletcreatefundedpsbt are bypassed, e.g. a fee_rate under 1 sat/vB is allowed")
res3 = self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {"fee_rate": "0.999", "add_inputs": True})
assert_approx(res3["fee"], 0.00000381, 0.0000001)
res4 = self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {"feeRate": 0.00000999, "add_inputs": True})
assert_approx(res4["fee"], 0.00000381, 0.0000001)
self.log.info("Test min fee rate checks with walletcreatefundedpsbt are bypassed and that funding non-standard 'zero-fee' transactions is valid")
for param, zero_value in product(["fee_rate", "feeRate"], [0, 0.000, 0.00000000, "0", "0.000", "0.00000000"]):
assert_equal(0, self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {param: zero_value, "add_inputs": True})["fee"])
self.log.info("Test invalid fee rate settings")
for param, value in {("fee_rate", 100000), ("feeRate", 1)}:
assert_raises_rpc_error(-4, "Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {param: value, "add_inputs": True})
assert_raises_rpc_error(-3, "Amount out of range",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {param: -1, "add_inputs": True})
assert_raises_rpc_error(-3, "Amount is not a number or string",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {param: {"foo": "bar"}, "add_inputs": True})
# Test fee rate values that don't pass fixed-point parsing checks.
for invalid_value in ["", 0.000000001, 1e-09, 1.111111111, 1111111111111111, "31.999999999999999999999"]:
assert_raises_rpc_error(-3, "Invalid amount",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {param: invalid_value, "add_inputs": True})
for invalid_value in [0.0001, 0.00000001, 0.00099999, 31.99999999, "0.0001", "0.00000001", "0.00099999", "31.99999999"]:
assert_raises_rpc_error(-3, "Invalid amount",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"fee_rate": invalid_value, "add_inputs": True})
self.log.info("- raises RPC error if both feeRate and fee_rate are passed")
assert_raises_rpc_error(-8, "Cannot specify both fee_rate (sat/vB) and feeRate (CPU/kvB)",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"fee_rate": 0.1, "feeRate": 0.1, "add_inputs": True})
self.log.info("- raises RPC error if both feeRate and estimate_mode passed")
assert_raises_rpc_error(-8, "Cannot specify both estimate_mode and feeRate",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": "economical", "feeRate": 0.1, "add_inputs": True})
for param in ["feeRate", "fee_rate"]:
self.log.info("- raises RPC error if both {} and conf_target are passed".format(param))
assert_raises_rpc_error(-8, "Cannot specify both conf_target and {}. Please provide either a confirmation "
"target in blocks for automatic fee estimation, or an explicit fee rate.".format(param),
self.nodes[1].walletcreatefundedpsbt ,inputs, outputs, 0, {param: 1, "conf_target": 1, "add_inputs": True})
self.log.info("- raises RPC error if both fee_rate and estimate_mode are passed")
assert_raises_rpc_error(-8, "Cannot specify both estimate_mode and fee_rate",
self.nodes[1].walletcreatefundedpsbt ,inputs, outputs, 0, {"fee_rate": 1, "estimate_mode": "economical", "add_inputs": True})
self.log.info("- raises RPC error with invalid estimate_mode settings")
for k, v in {"number": 42, "object": {"foo": "bar"}}.items():
assert_raises_rpc_error(-3, "Expected type string for estimate_mode, got {}".format(k),
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": v, "conf_target": 0.1, "add_inputs": True})
for mode in ["", "foo", Decimal("3.141592")]:
assert_raises_rpc_error(-8, 'Invalid estimate_mode parameter, must be one of: "unset", "economical", "conservative"',
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": mode, "conf_target": 0.1, "add_inputs": True})
self.log.info("- raises RPC error with invalid conf_target settings")
for mode in ["unset", "economical", "conservative"]:
self.log.debug("{}".format(mode))
for k, v in {"string": "", "object": {"foo": "bar"}}.items():
assert_raises_rpc_error(-3, "Expected type number for conf_target, got {}".format(k),
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": mode, "conf_target": v, "add_inputs": True})
for n in [-1, 0, 1009]:
assert_raises_rpc_error(-8, "Invalid conf_target, must be between 1 and 1008",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": mode, "conf_target": n, "add_inputs": True})
self.log.info("Test walletcreatefundedpsbt with too-high fee rate produces total fee well above -maxtxfee and raises RPC error")
for bool_add, outputs_array in {True: outputs, False: [{self.nodes[1].getnewaddress(): 1}]}.items():
msg = "Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)"
assert_raises_rpc_error(-4, msg, self.nodes[1].walletcreatefundedpsbt, inputs, outputs_array, 0, {"fee_rate": 1000000, "add_inputs": bool_add})
assert_raises_rpc_error(-4, msg, self.nodes[1].walletcreatefundedpsbt, inputs, outputs_array, 0, {"feeRate": 1, "add_inputs": bool_add})
self.log.info("Test various PSBT operations")
psbtx = wmulti.walletcreatefundedpsbt(inputs=[{"txid":txid,"vout":p2wsh_pos},{"txid":txid,"vout":p2sh_pos},{"txid":txid,"vout":p2sh_p2wsh_pos}], outputs={self.nodes[1].getnewaddress():29.99}, options={'changeAddress': self.nodes[1].getrawchangeaddress()})['psbt']
walletprocesspsbt_out = self.nodes[1].walletprocesspsbt(psbtx)
psbtx = walletprocesspsbt_out['psbt']
assert_equal(walletprocesspsbt_out['complete'], False)
wmulti.unloadwallet()
# partially sign with node 2. This should be complete and sendable
walletprocesspsbt_out = self.nodes[2].walletprocesspsbt(psbtx)
assert_equal(walletprocesspsbt_out['complete'], True)
self.nodes[2].sendrawtransaction(self.nodes[2].finalizepsbt(walletprocesspsbt_out['psbt'])['hex'])
# check that walletprocesspsbt fails to decode a non-psbt
rawtx = self.nodes[1].createrawtransaction([{"txid":txid,"vout":p2wpkh_pos}], {self.nodes[1].getnewaddress():9.99})
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[1].walletprocesspsbt, rawtx)
# Convert a non-psbt to psbt and make sure we can decode it
rawtx = self.nodes[0].createrawtransaction([], {self.nodes[1].getnewaddress():10})
rawtx = self.nodes[0].fundrawtransaction(rawtx)
new_psbt = self.nodes[0].converttopsbt(rawtx['hex'])
self.nodes[0].decodepsbt(new_psbt)
# Make sure that a non-psbt with signatures cannot be converted
# Error could be either "TX decode failed" (segwit inputs causes parsing to fail) or "Inputs must not have scriptSigs and scriptWitnesses"
# We must set iswitness=True because the serialized transaction has inputs and is therefore a witness transaction
signedtx = self.nodes[0].signrawtransactionwithwallet(rawtx['hex'])
assert_raises_rpc_error(-22, "", self.nodes[0].converttopsbt, hexstring=signedtx['hex'], iswitness=True)
assert_raises_rpc_error(-22, "", self.nodes[0].converttopsbt, hexstring=signedtx['hex'], permitsigdata=False, iswitness=True)
# Unless we allow it to convert and strip signatures
self.nodes[0].converttopsbt(signedtx['hex'], True)
# Explicitly allow converting non-empty txs
new_psbt = self.nodes[0].converttopsbt(rawtx['hex'])
self.nodes[0].decodepsbt(new_psbt)
# Create outputs to nodes 1 and 2
node1_addr = self.nodes[1].getnewaddress()
node2_addr = self.nodes[2].getnewaddress()
txid1 = self.nodes[0].sendtoaddress(node1_addr, 13)
txid2 = self.nodes[0].sendtoaddress(node2_addr, 13)
blockhash = self.nodes[0].generate(6)[0]
self.sync_all()
vout1 = find_output(self.nodes[1], txid1, 13, blockhash=blockhash)
vout2 = find_output(self.nodes[2], txid2, 13, blockhash=blockhash)
# Create a psbt spending outputs from nodes 1 and 2
psbt_orig = self.nodes[0].createpsbt([{"txid":txid1, "vout":vout1}, {"txid":txid2, "vout":vout2}], {self.nodes[0].getnewaddress():25.999})
# Update psbts, should only have data for one input and not the other
psbt1 = self.nodes[1].walletprocesspsbt(psbt_orig, False, "ALL")['psbt']
psbt1_decoded = self.nodes[0].decodepsbt(psbt1)
assert psbt1_decoded['inputs'][0] and not psbt1_decoded['inputs'][1]
# Check that BIP32 path was added
assert "bip32_derivs" in psbt1_decoded['inputs'][0]
psbt2 = self.nodes[2].walletprocesspsbt(psbt_orig, False, "ALL", False)['psbt']
psbt2_decoded = self.nodes[0].decodepsbt(psbt2)
assert not psbt2_decoded['inputs'][0] and psbt2_decoded['inputs'][1]
# Check that BIP32 paths were not added
assert "bip32_derivs" not in psbt2_decoded['inputs'][1]
# Sign PSBTs (workaround issue #18039)
psbt1 = self.nodes[1].walletprocesspsbt(psbt_orig)['psbt']
psbt2 = self.nodes[2].walletprocesspsbt(psbt_orig)['psbt']
# Combine, finalize, and send the psbts
combined = self.nodes[0].combinepsbt([psbt1, psbt2])
finalized = self.nodes[0].finalizepsbt(combined)['hex']
self.nodes[0].sendrawtransaction(finalized)
self.nodes[0].generate(6)
self.sync_all()
# Test additional args in walletcreatepsbt
# Make sure both pre-included and funded inputs
# have the correct sequence numbers based on
# replaceable arg
block_height = self.nodes[0].getblockcount()
unspent = self.nodes[0].listunspent()[0]
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height+2, {"replaceable": False, "add_inputs": True}, False)
decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"])
for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert_greater_than(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)
assert "bip32_derivs" not in psbt_in
assert_equal(decoded_psbt["tx"]["locktime"], block_height+2)
# Same construction with only locktime set and RBF explicitly enabled
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height, {"replaceable": True, "add_inputs": True}, True)
decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"])
for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert_equal(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)
assert "bip32_derivs" in psbt_in
assert_equal(decoded_psbt["tx"]["locktime"], block_height)
# Same construction without optional arguments
psbtx_info = self.nodes[0].walletcreatefundedpsbt([], [{self.nodes[2].getnewaddress():unspent["amount"]+1}])
decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"])
for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert_equal(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)
assert "bip32_derivs" in psbt_in
assert_equal(decoded_psbt["tx"]["locktime"], 0)
# Same construction without optional arguments, for a node with -walletrbf=0
unspent1 = self.nodes[1].listunspent()[0]
psbtx_info = self.nodes[1].walletcreatefundedpsbt([{"txid":unspent1["txid"], "vout":unspent1["vout"]}], [{self.nodes[2].getnewaddress():unspent1["amount"]+1}], block_height, {"add_inputs": True})
decoded_psbt = self.nodes[1].decodepsbt(psbtx_info["psbt"])
for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert_greater_than(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)
assert "bip32_derivs" in psbt_in
# Make sure change address wallet does not have P2SH innerscript access to results in success
# when attempting BnB coin selection
self.nodes[0].walletcreatefundedpsbt([], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height+2, {"changeAddress":self.nodes[1].getnewaddress()}, False)
# Make sure the wallet's change type is respected by default
small_output = {self.nodes[0].getnewaddress():0.1}
psbtx_native = self.nodes[0].walletcreatefundedpsbt([], [small_output])
self.assert_change_type(psbtx_native, "witness_v0_keyhash")
psbtx_legacy = self.nodes[1].walletcreatefundedpsbt([], [small_output])
self.assert_change_type(psbtx_legacy, "pubkeyhash")
psbtx_np2wkh = self.nodes[1].walletcreatefundedpsbt([], [small_output], 0, {"change_type":"p2sh-segwit"})
self.assert_change_type(psbtx_np2wkh, "scripthash")
invalid_options = {"change_type":"legacy","changeAddress":self.nodes[0].getnewaddress()}
assert_raises_rpc_error(-8, "both change address and address type options", self.nodes[0].walletcreatefundedpsbt, [], [small_output], 0, invalid_options)
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], 0, {"add_inputs": True})
complete_psbt = self.nodes[0].walletprocesspsbt(psbtx_info["psbt"])
double_processed_psbt = self.nodes[0].walletprocesspsbt(complete_psbt["psbt"])
assert_equal(complete_psbt, double_processed_psbt)
self.nodes[0].decodepsbt(double_processed_psbt["psbt"])
# Make sure unsafe inputs are included if specified
self.nodes[2].createwallet(wallet_name="unsafe")
wunsafe = self.nodes[2].get_wallet_rpc("unsafe")
self.nodes[0].sendtoaddress(wunsafe.getnewaddress(), 2)
self.sync_mempools()
assert_raises_rpc_error(-4, "Insufficient funds", wunsafe.walletcreatefundedpsbt, [], [{self.nodes[0].getnewaddress(): 1}])
wunsafe.walletcreatefundedpsbt([], [{self.nodes[0].getnewaddress(): 1}], 0, {"include_unsafe": True})
# BIP 174 Test Vectors
# Check that unknown values are just passed through
unknown_psbt = "cHNidP8BAD8CAAAAAf//////////////////////////////////////////AAAAAAD/////AQAAAAAAAAAAA2oBAAAAAAAACg8BAgMEBQYHCAkPAQIDBAUGBwgJCgsMDQ4PAAA="
unknown_out = self.nodes[0].walletprocesspsbt(unknown_psbt)['psbt']
assert_equal(unknown_psbt, unknown_out)
# Open the data file
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/rpc_psbt.json'), encoding='utf-8') as f:
d = json.load(f)
invalids = d['invalid']
valids = d['valid']
creators = d['creator']
signers = d['signer']
combiners = d['combiner']
finalizers = d['finalizer']
extractors = d['extractor']
# Invalid PSBTs
for invalid in invalids:
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decodepsbt, invalid)
# Valid PSBTs
for valid in valids:
self.nodes[0].decodepsbt(valid)
# Creator Tests
for creator in creators:
created_tx = self.nodes[0].createpsbt(creator['inputs'], creator['outputs'])
assert_equal(created_tx, creator['result'])
# Signer tests
for i, signer in enumerate(signers):
self.nodes[2].createwallet(wallet_name="wallet{}".format(i))
wrpc = self.nodes[2].get_wallet_rpc("wallet{}".format(i))
for key in signer['privkeys']:
wrpc.importprivkey(key)
signed_tx = wrpc.walletprocesspsbt(signer['psbt'])['psbt']
assert_equal(signed_tx, signer['result'])
# Combiner test
for combiner in combiners:
combined = self.nodes[2].combinepsbt(combiner['combine'])
assert_equal(combined, combiner['result'])
# Empty combiner test
assert_raises_rpc_error(-8, "Parameter 'txs' cannot be empty", self.nodes[0].combinepsbt, [])
# Finalizer test
for finalizer in finalizers:
finalized = self.nodes[2].finalizepsbt(finalizer['finalize'], False)['psbt']
assert_equal(finalized, finalizer['result'])
# Extractor test
for extractor in extractors:
extracted = self.nodes[2].finalizepsbt(extractor['extract'], True)['hex']
assert_equal(extracted, extractor['result'])
# Unload extra wallets
for i, signer in enumerate(signers):
self.nodes[2].unloadwallet("wallet{}".format(i))
# TODO: Re-enable this for segwit v1
# self.test_utxo_conversion()
# Test that psbts with p2pkh outputs are created properly
p2pkh = self.nodes[0].getnewaddress(address_type='legacy')
psbt = self.nodes[1].walletcreatefundedpsbt([], [{p2pkh : 1}], 0, {"includeWatching" : True}, True)
self.nodes[0].decodepsbt(psbt['psbt'])
# Test decoding error: invalid base64
assert_raises_rpc_error(-22, "TX decode failed invalid base64", self.nodes[0].decodepsbt, ";definitely not base64;")
# Send to all types of addresses
addr1 = self.nodes[1].getnewaddress("", "bech32")
txid1 = self.nodes[0].sendtoaddress(addr1, 11)
vout1 = find_output(self.nodes[0], txid1, 11)
addr2 = self.nodes[1].getnewaddress("", "legacy")
txid2 = self.nodes[0].sendtoaddress(addr2, 11)
vout2 = find_output(self.nodes[0], txid2, 11)
addr3 = self.nodes[1].getnewaddress("", "p2sh-segwit")
txid3 = self.nodes[0].sendtoaddress(addr3, 11)
vout3 = find_output(self.nodes[0], txid3, 11)
self.sync_all()
def test_psbt_input_keys(psbt_input, keys):
assert_equal(set(keys), set(psbt_input.keys()))
# Create a PSBT. None of the inputs are filled initially
psbt = self.nodes[1].createpsbt([{"txid":txid1, "vout":vout1},{"txid":txid2, "vout":vout2},{"txid":txid3, "vout":vout3}], {self.nodes[0].getnewaddress():32.999})
decoded = self.nodes[1].decodepsbt(psbt)
test_psbt_input_keys(decoded['inputs'][0], [])
test_psbt_input_keys(decoded['inputs'][1], [])
test_psbt_input_keys(decoded['inputs'][2], [])
# Update a PSBT with UTXOs from the node
# Bech32 inputs should be filled with witness UTXO. Other inputs should not be filled because they are non-witness
updated = self.nodes[1].utxoupdatepsbt(psbt)
decoded = self.nodes[1].decodepsbt(updated)
test_psbt_input_keys(decoded['inputs'][0], ['witness_utxo'])
test_psbt_input_keys(decoded['inputs'][1], [])
test_psbt_input_keys(decoded['inputs'][2], [])
# Try again, now while providing descriptors, making P2SH-segwit work, and causing bip32_derivs and redeem_script to be filled in
descs = [self.nodes[1].getaddressinfo(addr)['desc'] for addr in [addr1,addr2,addr3]]
updated = self.nodes[1].utxoupdatepsbt(psbt=psbt, descriptors=descs)
decoded = self.nodes[1].decodepsbt(updated)
test_psbt_input_keys(decoded['inputs'][0], ['witness_utxo', 'bip32_derivs'])
test_psbt_input_keys(decoded['inputs'][1], [])
test_psbt_input_keys(decoded['inputs'][2], ['witness_utxo', 'bip32_derivs', 'redeem_script'])
# Two PSBTs with a common input should not be joinable
psbt1 = self.nodes[1].createpsbt([{"txid":txid1, "vout":vout1}], {self.nodes[0].getnewaddress():Decimal('10.999')})
assert_raises_rpc_error(-8, "exists in multiple PSBTs", self.nodes[1].joinpsbts, [psbt1, updated])
# Join two distinct PSBTs
addr4 = self.nodes[1].getnewaddress("", "p2sh-segwit")
txid4 = self.nodes[0].sendtoaddress(addr4, 5)
vout4 = find_output(self.nodes[0], txid4, 5)
self.nodes[0].generate(6)
self.sync_all()
psbt2 = self.nodes[1].createpsbt([{"txid":txid4, "vout":vout4}], {self.nodes[0].getnewaddress():Decimal('4.999')})
psbt2 = self.nodes[1].walletprocesspsbt(psbt2)['psbt']
psbt2_decoded = self.nodes[0].decodepsbt(psbt2)
assert "final_scriptwitness" in psbt2_decoded['inputs'][0] and "final_scriptSig" in psbt2_decoded['inputs'][0]
joined = self.nodes[0].joinpsbts([psbt, psbt2])
joined_decoded = self.nodes[0].decodepsbt(joined)
assert len(joined_decoded['inputs']) == 4 and len(joined_decoded['outputs']) == 2 and "final_scriptwitness" not in joined_decoded['inputs'][3] and "final_scriptSig" not in joined_decoded['inputs'][3]
# Check that joining shuffles the inputs and outputs
# 10 attempts should be enough to get a shuffled join
shuffled = False
for _ in range(10):
shuffled_joined = self.nodes[0].joinpsbts([psbt, psbt2])
shuffled |= joined != shuffled_joined
if shuffled:
break
assert shuffled
# Newly created PSBT needs UTXOs and updating
addr = self.nodes[1].getnewaddress("", "p2sh-segwit")
txid = self.nodes[0].sendtoaddress(addr, 7)
addrinfo = self.nodes[1].getaddressinfo(addr)
blockhash = self.nodes[0].generate(6)[0]
self.sync_all()
vout = find_output(self.nodes[0], txid, 7, blockhash=blockhash)
psbt = self.nodes[1].createpsbt([{"txid":txid, "vout":vout}], {self.nodes[0].getnewaddress("", "p2sh-segwit"):Decimal('6.999')})
analyzed = self.nodes[0].analyzepsbt(psbt)
assert not analyzed['inputs'][0]['has_utxo'] and not analyzed['inputs'][0]['is_final'] and analyzed['inputs'][0]['next'] == 'updater' and analyzed['next'] == 'updater'
# After update with wallet, only needs signing
updated = self.nodes[1].walletprocesspsbt(psbt, False, 'ALL', True)['psbt']
analyzed = self.nodes[0].analyzepsbt(updated)
assert analyzed['inputs'][0]['has_utxo'] and not analyzed['inputs'][0]['is_final'] and analyzed['inputs'][0]['next'] == 'signer' and analyzed['next'] == 'signer' and analyzed['inputs'][0]['missing']['signatures'][0] == addrinfo['embedded']['witness_program']
# Check fee and size things
assert analyzed['fee'] == Decimal('0.001') and analyzed['estimated_vsize'] == 134 and analyzed['estimated_feerate'] == Decimal('0.00746268')
# After signing and finalizing, needs extracting
signed = self.nodes[1].walletprocesspsbt(updated)['psbt']
analyzed = self.nodes[0].analyzepsbt(signed)
assert analyzed['inputs'][0]['has_utxo'] and analyzed['inputs'][0]['is_final'] and analyzed['next'] == 'extractor'
self.log.info("PSBT spending unspendable outputs should have error message and Creator as next")
analysis = self.nodes[0].analyzepsbt('cHNidP8BAJoCAAAAAljoeiG1ba8MI76OcHBFbDNvfLqlyHV5JPVFiHuyq911AAAAAAD/////g40EJ9DsZQpoqka7CwmK6kQiwHGyyng1Kgd5WdB86h0BAAAAAP////8CcKrwCAAAAAAWAEHYXCtx0AYLCcmIauuBXlCZHdoSTQDh9QUAAAAAFv8/wADXYP/7//////8JxOh0LR2HAI8AAAAAAAEBIADC6wsAAAAAF2oUt/X69ELjeX2nTof+fZ10l+OyAokDAQcJAwEHEAABAACAAAEBIADC6wsAAAAAF2oUt/X69ELjeX2nTof+fZ10l+OyAokDAQcJAwEHENkMak8AAAAA')
assert_equal(analysis['next'], 'creator')
assert_equal(analysis['error'], 'PSBT is not valid. Input 0 spends unspendable output')
self.log.info("PSBT with invalid values should have error message and Creator as next")
analysis = self.nodes[0].analyzepsbt('cHNidP8BAHECAAAAAfA00BFgAm6tp86RowwH6BMImQNL5zXUcTT97XoLGz0BAAAAAAD/////AgD5ApUAAAAAFgAUKNw0x8HRctAgmvoevm4u1SbN7XL87QKVAAAAABYAFPck4gF7iL4NL4wtfRAKgQbghiTUAAAAAAABAR8AgIFq49AHABYAFJUDtxf2PHo641HEOBOAIvFMNTr2AAAA')
assert_equal(analysis['next'], 'creator')
assert_equal(analysis['error'], 'PSBT is not valid. Input 0 has invalid value')
self.log.info("PSBT with signed, but not finalized, inputs should have Finalizer as next")
analysis = self.nodes[0].analyzepsbt('cHNidP8BAHECAAAAAZYezcxdnbXoQCmrD79t/LzDgtUo9ERqixk8wgioAobrAAAAAAD9////AlDDAAAAAAAAFgAUy/UxxZuzZswcmFnN/E9DGSiHLUsuGPUFAAAAABYAFLsH5o0R38wXx+X2cCosTMCZnQ4baAAAAAABAR8A4fUFAAAAABYAFOBI2h5thf3+Lflb2LGCsVSZwsltIgIC/i4dtVARCRWtROG0HHoGcaVklzJUcwo5homgGkSNAnJHMEQCIGx7zKcMIGr7cEES9BR4Kdt/pzPTK3fKWcGyCJXb7MVnAiALOBgqlMH4GbC1HDh/HmylmO54fyEy4lKde7/BT/PWxwEBAwQBAAAAIgYC/i4dtVARCRWtROG0HHoGcaVklzJUcwo5homgGkSNAnIYDwVpQ1QAAIABAACAAAAAgAAAAAAAAAAAAAAiAgL+CIiB59NSCssOJRGiMYQK1chahgAaaJpIXE41Cyir+xgPBWlDVAAAgAEAAIAAAACAAQAAAAAAAAAA')
assert_equal(analysis['next'], 'finalizer')
analysis = self.nodes[0].analyzepsbt('cHNidP8BAHECAAAAAfA00BFgAm6tp86RowwH6BMImQNL5zXUcTT97XoLGz0BAAAAAAD/////AgCAgWrj0AcAFgAUKNw0x8HRctAgmvoevm4u1SbN7XL87QKVAAAAABYAFPck4gF7iL4NL4wtfRAKgQbghiTUAAAAAAABAR8A8gUqAQAAABYAFJUDtxf2PHo641HEOBOAIvFMNTr2AAAA')
assert_equal(analysis['next'], 'creator')
assert_equal(analysis['error'], 'PSBT is not valid. Output amount invalid')
analysis = self.nodes[0].analyzepsbt('cHNidP8BAJoCAAAAAkvEW8NnDtdNtDpsmze+Ht2LH35IJcKv00jKAlUs21RrAwAAAAD/////S8Rbw2cO1020OmybN74e3Ysffkglwq/TSMoCVSzbVGsBAAAAAP7///8CwLYClQAAAAAWABSNJKzjaUb3uOxixsvh1GGE3fW7zQD5ApUAAAAAFgAUKNw0x8HRctAgmvoevm4u1SbN7XIAAAAAAAEAnQIAAAACczMa321tVHuN4GKWKRncycI22aX3uXgwSFUKM2orjRsBAAAAAP7///9zMxrfbW1Ue43gYpYpGdzJwjbZpfe5eDBIVQozaiuNGwAAAAAA/v///wIA+QKVAAAAABl2qRT9zXUVA8Ls5iVqynLHe5/vSe1XyYisQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAAAAAQEfQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAA==')
assert_equal(analysis['next'], 'creator')
assert_equal(analysis['error'], 'PSBT is not valid. Input 0 specifies invalid prevout')
assert_raises_rpc_error(-25, 'Inputs missing or spent', self.nodes[0].walletprocesspsbt, 'cHNidP8BAJoCAAAAAkvEW8NnDtdNtDpsmze+Ht2LH35IJcKv00jKAlUs21RrAwAAAAD/////S8Rbw2cO1020OmybN74e3Ysffkglwq/TSMoCVSzbVGsBAAAAAP7///8CwLYClQAAAAAWABSNJKzjaUb3uOxixsvh1GGE3fW7zQD5ApUAAAAAFgAUKNw0x8HRctAgmvoevm4u1SbN7XIAAAAAAAEAnQIAAAACczMa321tVHuN4GKWKRncycI22aX3uXgwSFUKM2orjRsBAAAAAP7///9zMxrfbW1Ue43gYpYpGdzJwjbZpfe5eDBIVQozaiuNGwAAAAAA/v///wIA+QKVAAAAABl2qRT9zXUVA8Ls5iVqynLHe5/vSe1XyYisQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAAAAAQEfQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAA==')
if __name__ == '__main__':
PSBTTest().main()
| true | true |
f72001fbbd35b8a678f898e1db551767c3665048 | 30,425 | py | Python | src/waldur_azure/migrations/0003_redesign.py | opennode/nodeconductor-assembly-waldur | cad9966389dc9b52b13d2301940c99cf4b243900 | [
"MIT"
] | 2 | 2017-01-20T15:26:25.000Z | 2017-08-03T04:38:08.000Z | src/waldur_azure/migrations/0003_redesign.py | opennode/nodeconductor-assembly-waldur | cad9966389dc9b52b13d2301940c99cf4b243900 | [
"MIT"
] | null | null | null | src/waldur_azure/migrations/0003_redesign.py | opennode/nodeconductor-assembly-waldur | cad9966389dc9b52b13d2301940c99cf4b243900 | [
"MIT"
] | null | null | null | # Generated by Django 1.11.18 on 2019-01-28 14:05
import re
import django.core.validators
import django.db.models.deletion
import django.utils.timezone
import django_fsm
import model_utils.fields
from django.db import migrations, models
import waldur_azure.validators
import waldur_core.core.fields
import waldur_core.core.models
import waldur_core.core.shims
import waldur_core.core.validators
import waldur_core.logging.loggers
class Migration(migrations.Migration):
dependencies = [
('structure', '0005_customer_domain'),
('core', '0003_enlarge_username'),
('waldur_azure', '0002_immutable_default_json'),
]
operations = [
migrations.CreateModel(
name='Location',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'name',
models.CharField(
max_length=150,
validators=[waldur_core.core.validators.validate_name],
verbose_name='name',
),
),
('uuid', waldur_core.core.fields.UUIDField()),
('latitude', models.FloatField(blank=True, null=True)),
('longitude', models.FloatField(blank=True, null=True)),
('backend_id', models.CharField(db_index=True, max_length=255)),
(
'settings',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='+',
to='structure.ServiceSettings',
),
),
],
options={
'abstract': False,
},
bases=(waldur_core.core.models.BackendModelMixin, models.Model),
),
migrations.CreateModel(
name='Network',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'created',
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='created',
),
),
(
'modified',
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='modified',
),
),
(
'description',
models.CharField(
blank=True, max_length=500, verbose_name='description'
),
),
('uuid', waldur_core.core.fields.UUIDField()),
('error_message', models.TextField(blank=True)),
(
'runtime_state',
models.CharField(
blank=True, max_length=150, verbose_name='runtime state'
),
),
(
'state',
django_fsm.FSMIntegerField(
choices=[
(5, 'Creation Scheduled'),
(6, 'Creating'),
(1, 'Update Scheduled'),
(2, 'Updating'),
(7, 'Deletion Scheduled'),
(8, 'Deleting'),
(3, 'OK'),
(4, 'Erred'),
],
default=5,
),
),
('backend_id', models.CharField(blank=True, max_length=255)),
(
'name',
models.CharField(
max_length=64,
validators=[
django.core.validators.RegexValidator(
message='The name can contain only letters, numbers, underscore, period and hyphens.',
regex=re.compile('[a-zA-Z][a-zA-Z0-9._-]+$'),
)
],
),
),
('cidr', models.CharField(max_length=32)),
],
options={
'abstract': False,
},
bases=(
waldur_core.core.models.DescendantMixin,
waldur_core.core.models.BackendModelMixin,
waldur_core.logging.loggers.LoggableMixin,
models.Model,
),
),
migrations.CreateModel(
name='NetworkInterface',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'created',
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='created',
),
),
(
'modified',
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='modified',
),
),
(
'description',
models.CharField(
blank=True, max_length=500, verbose_name='description'
),
),
('uuid', waldur_core.core.fields.UUIDField()),
('error_message', models.TextField(blank=True)),
(
'runtime_state',
models.CharField(
blank=True, max_length=150, verbose_name='runtime state'
),
),
(
'state',
django_fsm.FSMIntegerField(
choices=[
(5, 'Creation Scheduled'),
(6, 'Creating'),
(1, 'Update Scheduled'),
(2, 'Updating'),
(7, 'Deletion Scheduled'),
(8, 'Deleting'),
(3, 'OK'),
(4, 'Erred'),
],
default=5,
),
),
('backend_id', models.CharField(blank=True, max_length=255)),
(
'name',
models.CharField(
max_length=80,
validators=[
django.core.validators.RegexValidator(
message='The name can contain only letters, numbers, underscore, period and hyphens.',
regex=re.compile('[a-zA-Z][a-zA-Z0-9._-]+$'),
)
],
),
),
('config_name', models.CharField(max_length=255)),
],
options={
'abstract': False,
},
bases=(
waldur_core.core.models.DescendantMixin,
waldur_core.core.models.BackendModelMixin,
waldur_core.logging.loggers.LoggableMixin,
models.Model,
),
),
migrations.CreateModel(
name='ResourceGroup',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'created',
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='created',
),
),
(
'modified',
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='modified',
),
),
(
'description',
models.CharField(
blank=True, max_length=500, verbose_name='description'
),
),
('uuid', waldur_core.core.fields.UUIDField()),
('error_message', models.TextField(blank=True)),
(
'runtime_state',
models.CharField(
blank=True, max_length=150, verbose_name='runtime state'
),
),
(
'state',
django_fsm.FSMIntegerField(
choices=[
(5, 'Creation Scheduled'),
(6, 'Creating'),
(1, 'Update Scheduled'),
(2, 'Updating'),
(7, 'Deletion Scheduled'),
(8, 'Deleting'),
(3, 'OK'),
(4, 'Erred'),
],
default=5,
),
),
('backend_id', models.CharField(blank=True, max_length=255)),
(
'name',
models.CharField(
max_length=90,
validators=[
django.core.validators.RegexValidator(
message='The name can include alphanumeric, underscore, parentheses, hyphen, period (except at end), and Unicode characters that match the allowed characters.',
regex=re.compile('^[-\\w._()]+$'),
)
],
),
),
(
'location',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='waldur_azure.Location',
),
),
],
options={
'abstract': False,
},
bases=(
waldur_core.core.models.DescendantMixin,
waldur_core.core.models.BackendModelMixin,
waldur_core.logging.loggers.LoggableMixin,
models.Model,
),
),
migrations.CreateModel(
name='Size',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'name',
models.CharField(
max_length=150,
validators=[waldur_core.core.validators.validate_name],
verbose_name='name',
),
),
('uuid', waldur_core.core.fields.UUIDField()),
('backend_id', models.CharField(db_index=True, max_length=255)),
('max_data_disk_count', models.PositiveIntegerField()),
('memory_in_mb', models.PositiveIntegerField()),
('number_of_cores', models.PositiveIntegerField()),
('os_disk_size_in_mb', models.PositiveIntegerField()),
('resource_disk_size_in_mb', models.PositiveIntegerField()),
(
'settings',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='+',
to='structure.ServiceSettings',
),
),
],
options={
'abstract': False,
},
bases=(waldur_core.core.models.BackendModelMixin, models.Model),
),
migrations.CreateModel(
name='SQLDatabase',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'created',
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='created',
),
),
(
'modified',
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='modified',
),
),
(
'description',
models.CharField(
blank=True, max_length=500, verbose_name='description'
),
),
(
'name',
models.CharField(
max_length=150,
validators=[waldur_core.core.validators.validate_name],
verbose_name='name',
),
),
('uuid', waldur_core.core.fields.UUIDField()),
('error_message', models.TextField(blank=True)),
(
'runtime_state',
models.CharField(
blank=True, max_length=150, verbose_name='runtime state'
),
),
(
'state',
django_fsm.FSMIntegerField(
choices=[
(5, 'Creation Scheduled'),
(6, 'Creating'),
(1, 'Update Scheduled'),
(2, 'Updating'),
(7, 'Deletion Scheduled'),
(8, 'Deleting'),
(3, 'OK'),
(4, 'Erred'),
],
default=5,
),
),
('backend_id', models.CharField(blank=True, max_length=255)),
('charset', models.CharField(blank=True, max_length=255)),
('collation', models.CharField(blank=True, max_length=255)),
(
'resource_group',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='waldur_azure.ResourceGroup',
),
),
],
options={
'abstract': False,
},
bases=(
waldur_core.core.models.DescendantMixin,
waldur_core.core.models.BackendModelMixin,
waldur_core.logging.loggers.LoggableMixin,
models.Model,
),
),
migrations.CreateModel(
name='SQLServer',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'created',
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='created',
),
),
(
'modified',
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='modified',
),
),
(
'description',
models.CharField(
blank=True, max_length=500, verbose_name='description'
),
),
(
'name',
models.CharField(
max_length=150,
validators=[
django.core.validators.RegexValidator(
message='The name can only be made up of lowercase letters "a"-"z", the numbers 0-9 and the hyphen. The hyphen may not lead or trail in the name.',
regex=re.compile('[a-z0-9][a-z0-9-]+[a-z0-9]$'),
)
],
verbose_name='name',
),
),
('uuid', waldur_core.core.fields.UUIDField()),
('error_message', models.TextField(blank=True)),
(
'runtime_state',
models.CharField(
blank=True, max_length=150, verbose_name='runtime state'
),
),
(
'state',
django_fsm.FSMIntegerField(
choices=[
(5, 'Creation Scheduled'),
(6, 'Creating'),
(1, 'Update Scheduled'),
(2, 'Updating'),
(7, 'Deletion Scheduled'),
(8, 'Deleting'),
(3, 'OK'),
(4, 'Erred'),
],
default=5,
),
),
('backend_id', models.CharField(blank=True, max_length=255)),
('username', models.CharField(max_length=50)),
('password', models.CharField(max_length=50)),
('storage_mb', models.PositiveIntegerField(null=True)),
(
'resource_group',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='waldur_azure.ResourceGroup',
),
),
],
options={
'abstract': False,
},
bases=(
waldur_core.core.models.DescendantMixin,
waldur_core.core.models.BackendModelMixin,
waldur_core.logging.loggers.LoggableMixin,
models.Model,
),
),
migrations.CreateModel(
name='SubNet',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'created',
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='created',
),
),
(
'modified',
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='modified',
),
),
(
'description',
models.CharField(
blank=True, max_length=500, verbose_name='description'
),
),
('uuid', waldur_core.core.fields.UUIDField()),
('error_message', models.TextField(blank=True)),
(
'runtime_state',
models.CharField(
blank=True, max_length=150, verbose_name='runtime state'
),
),
(
'state',
django_fsm.FSMIntegerField(
choices=[
(5, 'Creation Scheduled'),
(6, 'Creating'),
(1, 'Update Scheduled'),
(2, 'Updating'),
(7, 'Deletion Scheduled'),
(8, 'Deleting'),
(3, 'OK'),
(4, 'Erred'),
],
default=5,
),
),
('backend_id', models.CharField(blank=True, max_length=255)),
(
'name',
models.CharField(
max_length=80,
validators=[
django.core.validators.RegexValidator(
message='The name can contain only letters, numbers, underscore, period and hyphens.',
regex=re.compile('[a-zA-Z][a-zA-Z0-9._-]+$'),
)
],
),
),
('cidr', models.CharField(max_length=32)),
(
'network',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='waldur_azure.Network',
),
),
(
'resource_group',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='waldur_azure.ResourceGroup',
),
),
],
options={
'abstract': False,
},
bases=(
waldur_core.core.models.DescendantMixin,
waldur_core.core.models.BackendModelMixin,
waldur_core.logging.loggers.LoggableMixin,
models.Model,
),
),
migrations.RemoveField(
model_name='instanceendpoint',
name='instance',
),
migrations.RemoveField(
model_name='virtualmachine',
name='private_ips',
),
migrations.RemoveField(
model_name='virtualmachine',
name='public_ips',
),
migrations.RemoveField(
model_name='virtualmachine',
name='user_password',
),
migrations.RemoveField(
model_name='virtualmachine',
name='user_username',
),
migrations.AddField(
model_name='image',
name='offer',
field=models.CharField(default=None, max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='image',
name='publisher',
field=models.CharField(default=None, max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='image',
name='settings',
field=models.ForeignKey(
default=None,
on_delete=django.db.models.deletion.CASCADE,
related_name='+',
to='structure.ServiceSettings',
),
preserve_default=False,
),
migrations.AddField(
model_name='image',
name='sku',
field=models.CharField(default=None, max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='image',
name='version',
field=models.CharField(default=None, max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='virtualmachine',
name='image',
field=models.ForeignKey(
default=None,
on_delete=django.db.models.deletion.CASCADE,
to='waldur_azure.Image',
),
preserve_default=False,
),
migrations.AddField(
model_name='virtualmachine',
name='password',
field=models.CharField(
default=None,
max_length=72,
validators=[
django.core.validators.MinLengthValidator(6),
django.core.validators.MaxLengthValidator(72),
waldur_azure.validators.validate_password,
],
),
preserve_default=False,
),
migrations.AddField(
model_name='virtualmachine',
name='ssh_key',
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to='core.SshPublicKey',
),
),
migrations.AddField(
model_name='virtualmachine',
name='username',
field=models.CharField(
default=None,
max_length=32,
validators=[waldur_azure.validators.VirtualMachineUsernameValidator],
),
preserve_default=False,
),
migrations.AlterField(
model_name='image',
name='backend_id',
field=models.CharField(db_index=True, max_length=255),
),
migrations.AlterField(
model_name='virtualmachine',
name='name',
field=models.CharField(
max_length=15,
validators=[
django.core.validators.RegexValidator(
message='The name can contain only letters, numbers, and hyphens. The name must be shorter than 15 characters and start with a letter and must end with a letter or a number.',
regex=re.compile('[a-zA-Z][a-zA-Z0-9-]{0,13}[a-zA-Z0-9]$'),
)
],
),
),
migrations.AlterUniqueTogether(
name='image',
unique_together=set([('settings', 'backend_id')]),
),
migrations.DeleteModel(
name='InstanceEndpoint',
),
migrations.AddField(
model_name='sqldatabase',
name='server',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to='waldur_azure.SQLServer'
),
),
migrations.AddField(
model_name='networkinterface',
name='resource_group',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='waldur_azure.ResourceGroup',
),
),
migrations.AddField(
model_name='networkinterface',
name='subnet',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to='waldur_azure.SubNet'
),
),
migrations.AddField(
model_name='network',
name='resource_group',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='waldur_azure.ResourceGroup',
),
),
migrations.AddField(
model_name='virtualmachine',
name='network_interface',
field=models.ForeignKey(
default=None,
on_delete=django.db.models.deletion.CASCADE,
to='waldur_azure.NetworkInterface',
),
preserve_default=False,
),
migrations.AddField(
model_name='virtualmachine',
name='resource_group',
field=models.ForeignKey(
default=None,
on_delete=django.db.models.deletion.CASCADE,
to='waldur_azure.ResourceGroup',
),
preserve_default=False,
),
migrations.AddField(
model_name='virtualmachine',
name='size',
field=models.ForeignKey(
default=None,
on_delete=django.db.models.deletion.CASCADE,
to='waldur_azure.Size',
),
preserve_default=False,
),
migrations.AlterUniqueTogether(
name='size',
unique_together=set([('settings', 'backend_id')]),
),
]
| 36.745169 | 199 | 0.396647 |
import re
import django.core.validators
import django.db.models.deletion
import django.utils.timezone
import django_fsm
import model_utils.fields
from django.db import migrations, models
import waldur_azure.validators
import waldur_core.core.fields
import waldur_core.core.models
import waldur_core.core.shims
import waldur_core.core.validators
import waldur_core.logging.loggers
class Migration(migrations.Migration):
dependencies = [
('structure', '0005_customer_domain'),
('core', '0003_enlarge_username'),
('waldur_azure', '0002_immutable_default_json'),
]
operations = [
migrations.CreateModel(
name='Location',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'name',
models.CharField(
max_length=150,
validators=[waldur_core.core.validators.validate_name],
verbose_name='name',
),
),
('uuid', waldur_core.core.fields.UUIDField()),
('latitude', models.FloatField(blank=True, null=True)),
('longitude', models.FloatField(blank=True, null=True)),
('backend_id', models.CharField(db_index=True, max_length=255)),
(
'settings',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='+',
to='structure.ServiceSettings',
),
),
],
options={
'abstract': False,
},
bases=(waldur_core.core.models.BackendModelMixin, models.Model),
),
migrations.CreateModel(
name='Network',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'created',
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='created',
),
),
(
'modified',
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='modified',
),
),
(
'description',
models.CharField(
blank=True, max_length=500, verbose_name='description'
),
),
('uuid', waldur_core.core.fields.UUIDField()),
('error_message', models.TextField(blank=True)),
(
'runtime_state',
models.CharField(
blank=True, max_length=150, verbose_name='runtime state'
),
),
(
'state',
django_fsm.FSMIntegerField(
choices=[
(5, 'Creation Scheduled'),
(6, 'Creating'),
(1, 'Update Scheduled'),
(2, 'Updating'),
(7, 'Deletion Scheduled'),
(8, 'Deleting'),
(3, 'OK'),
(4, 'Erred'),
],
default=5,
),
),
('backend_id', models.CharField(blank=True, max_length=255)),
(
'name',
models.CharField(
max_length=64,
validators=[
django.core.validators.RegexValidator(
message='The name can contain only letters, numbers, underscore, period and hyphens.',
regex=re.compile('[a-zA-Z][a-zA-Z0-9._-]+$'),
)
],
),
),
('cidr', models.CharField(max_length=32)),
],
options={
'abstract': False,
},
bases=(
waldur_core.core.models.DescendantMixin,
waldur_core.core.models.BackendModelMixin,
waldur_core.logging.loggers.LoggableMixin,
models.Model,
),
),
migrations.CreateModel(
name='NetworkInterface',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'created',
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='created',
),
),
(
'modified',
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='modified',
),
),
(
'description',
models.CharField(
blank=True, max_length=500, verbose_name='description'
),
),
('uuid', waldur_core.core.fields.UUIDField()),
('error_message', models.TextField(blank=True)),
(
'runtime_state',
models.CharField(
blank=True, max_length=150, verbose_name='runtime state'
),
),
(
'state',
django_fsm.FSMIntegerField(
choices=[
(5, 'Creation Scheduled'),
(6, 'Creating'),
(1, 'Update Scheduled'),
(2, 'Updating'),
(7, 'Deletion Scheduled'),
(8, 'Deleting'),
(3, 'OK'),
(4, 'Erred'),
],
default=5,
),
),
('backend_id', models.CharField(blank=True, max_length=255)),
(
'name',
models.CharField(
max_length=80,
validators=[
django.core.validators.RegexValidator(
message='The name can contain only letters, numbers, underscore, period and hyphens.',
regex=re.compile('[a-zA-Z][a-zA-Z0-9._-]+$'),
)
],
),
),
('config_name', models.CharField(max_length=255)),
],
options={
'abstract': False,
},
bases=(
waldur_core.core.models.DescendantMixin,
waldur_core.core.models.BackendModelMixin,
waldur_core.logging.loggers.LoggableMixin,
models.Model,
),
),
migrations.CreateModel(
name='ResourceGroup',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'created',
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='created',
),
),
(
'modified',
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='modified',
),
),
(
'description',
models.CharField(
blank=True, max_length=500, verbose_name='description'
),
),
('uuid', waldur_core.core.fields.UUIDField()),
('error_message', models.TextField(blank=True)),
(
'runtime_state',
models.CharField(
blank=True, max_length=150, verbose_name='runtime state'
),
),
(
'state',
django_fsm.FSMIntegerField(
choices=[
(5, 'Creation Scheduled'),
(6, 'Creating'),
(1, 'Update Scheduled'),
(2, 'Updating'),
(7, 'Deletion Scheduled'),
(8, 'Deleting'),
(3, 'OK'),
(4, 'Erred'),
],
default=5,
),
),
('backend_id', models.CharField(blank=True, max_length=255)),
(
'name',
models.CharField(
max_length=90,
validators=[
django.core.validators.RegexValidator(
message='The name can include alphanumeric, underscore, parentheses, hyphen, period (except at end), and Unicode characters that match the allowed characters.',
regex=re.compile('^[-\\w._()]+$'),
)
],
),
),
(
'location',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='waldur_azure.Location',
),
),
],
options={
'abstract': False,
},
bases=(
waldur_core.core.models.DescendantMixin,
waldur_core.core.models.BackendModelMixin,
waldur_core.logging.loggers.LoggableMixin,
models.Model,
),
),
migrations.CreateModel(
name='Size',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'name',
models.CharField(
max_length=150,
validators=[waldur_core.core.validators.validate_name],
verbose_name='name',
),
),
('uuid', waldur_core.core.fields.UUIDField()),
('backend_id', models.CharField(db_index=True, max_length=255)),
('max_data_disk_count', models.PositiveIntegerField()),
('memory_in_mb', models.PositiveIntegerField()),
('number_of_cores', models.PositiveIntegerField()),
('os_disk_size_in_mb', models.PositiveIntegerField()),
('resource_disk_size_in_mb', models.PositiveIntegerField()),
(
'settings',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='+',
to='structure.ServiceSettings',
),
),
],
options={
'abstract': False,
},
bases=(waldur_core.core.models.BackendModelMixin, models.Model),
),
migrations.CreateModel(
name='SQLDatabase',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'created',
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='created',
),
),
(
'modified',
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='modified',
),
),
(
'description',
models.CharField(
blank=True, max_length=500, verbose_name='description'
),
),
(
'name',
models.CharField(
max_length=150,
validators=[waldur_core.core.validators.validate_name],
verbose_name='name',
),
),
('uuid', waldur_core.core.fields.UUIDField()),
('error_message', models.TextField(blank=True)),
(
'runtime_state',
models.CharField(
blank=True, max_length=150, verbose_name='runtime state'
),
),
(
'state',
django_fsm.FSMIntegerField(
choices=[
(5, 'Creation Scheduled'),
(6, 'Creating'),
(1, 'Update Scheduled'),
(2, 'Updating'),
(7, 'Deletion Scheduled'),
(8, 'Deleting'),
(3, 'OK'),
(4, 'Erred'),
],
default=5,
),
),
('backend_id', models.CharField(blank=True, max_length=255)),
('charset', models.CharField(blank=True, max_length=255)),
('collation', models.CharField(blank=True, max_length=255)),
(
'resource_group',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='waldur_azure.ResourceGroup',
),
),
],
options={
'abstract': False,
},
bases=(
waldur_core.core.models.DescendantMixin,
waldur_core.core.models.BackendModelMixin,
waldur_core.logging.loggers.LoggableMixin,
models.Model,
),
),
migrations.CreateModel(
name='SQLServer',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'created',
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='created',
),
),
(
'modified',
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='modified',
),
),
(
'description',
models.CharField(
blank=True, max_length=500, verbose_name='description'
),
),
(
'name',
models.CharField(
max_length=150,
validators=[
django.core.validators.RegexValidator(
message='The name can only be made up of lowercase letters "a"-"z", the numbers 0-9 and the hyphen. The hyphen may not lead or trail in the name.',
regex=re.compile('[a-z0-9][a-z0-9-]+[a-z0-9]$'),
)
],
verbose_name='name',
),
),
('uuid', waldur_core.core.fields.UUIDField()),
('error_message', models.TextField(blank=True)),
(
'runtime_state',
models.CharField(
blank=True, max_length=150, verbose_name='runtime state'
),
),
(
'state',
django_fsm.FSMIntegerField(
choices=[
(5, 'Creation Scheduled'),
(6, 'Creating'),
(1, 'Update Scheduled'),
(2, 'Updating'),
(7, 'Deletion Scheduled'),
(8, 'Deleting'),
(3, 'OK'),
(4, 'Erred'),
],
default=5,
),
),
('backend_id', models.CharField(blank=True, max_length=255)),
('username', models.CharField(max_length=50)),
('password', models.CharField(max_length=50)),
('storage_mb', models.PositiveIntegerField(null=True)),
(
'resource_group',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='waldur_azure.ResourceGroup',
),
),
],
options={
'abstract': False,
},
bases=(
waldur_core.core.models.DescendantMixin,
waldur_core.core.models.BackendModelMixin,
waldur_core.logging.loggers.LoggableMixin,
models.Model,
),
),
migrations.CreateModel(
name='SubNet',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'created',
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='created',
),
),
(
'modified',
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='modified',
),
),
(
'description',
models.CharField(
blank=True, max_length=500, verbose_name='description'
),
),
('uuid', waldur_core.core.fields.UUIDField()),
('error_message', models.TextField(blank=True)),
(
'runtime_state',
models.CharField(
blank=True, max_length=150, verbose_name='runtime state'
),
),
(
'state',
django_fsm.FSMIntegerField(
choices=[
(5, 'Creation Scheduled'),
(6, 'Creating'),
(1, 'Update Scheduled'),
(2, 'Updating'),
(7, 'Deletion Scheduled'),
(8, 'Deleting'),
(3, 'OK'),
(4, 'Erred'),
],
default=5,
),
),
('backend_id', models.CharField(blank=True, max_length=255)),
(
'name',
models.CharField(
max_length=80,
validators=[
django.core.validators.RegexValidator(
message='The name can contain only letters, numbers, underscore, period and hyphens.',
regex=re.compile('[a-zA-Z][a-zA-Z0-9._-]+$'),
)
],
),
),
('cidr', models.CharField(max_length=32)),
(
'network',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='waldur_azure.Network',
),
),
(
'resource_group',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='waldur_azure.ResourceGroup',
),
),
],
options={
'abstract': False,
},
bases=(
waldur_core.core.models.DescendantMixin,
waldur_core.core.models.BackendModelMixin,
waldur_core.logging.loggers.LoggableMixin,
models.Model,
),
),
migrations.RemoveField(
model_name='instanceendpoint',
name='instance',
),
migrations.RemoveField(
model_name='virtualmachine',
name='private_ips',
),
migrations.RemoveField(
model_name='virtualmachine',
name='public_ips',
),
migrations.RemoveField(
model_name='virtualmachine',
name='user_password',
),
migrations.RemoveField(
model_name='virtualmachine',
name='user_username',
),
migrations.AddField(
model_name='image',
name='offer',
field=models.CharField(default=None, max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='image',
name='publisher',
field=models.CharField(default=None, max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='image',
name='settings',
field=models.ForeignKey(
default=None,
on_delete=django.db.models.deletion.CASCADE,
related_name='+',
to='structure.ServiceSettings',
),
preserve_default=False,
),
migrations.AddField(
model_name='image',
name='sku',
field=models.CharField(default=None, max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='image',
name='version',
field=models.CharField(default=None, max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='virtualmachine',
name='image',
field=models.ForeignKey(
default=None,
on_delete=django.db.models.deletion.CASCADE,
to='waldur_azure.Image',
),
preserve_default=False,
),
migrations.AddField(
model_name='virtualmachine',
name='password',
field=models.CharField(
default=None,
max_length=72,
validators=[
django.core.validators.MinLengthValidator(6),
django.core.validators.MaxLengthValidator(72),
waldur_azure.validators.validate_password,
],
),
preserve_default=False,
),
migrations.AddField(
model_name='virtualmachine',
name='ssh_key',
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to='core.SshPublicKey',
),
),
migrations.AddField(
model_name='virtualmachine',
name='username',
field=models.CharField(
default=None,
max_length=32,
validators=[waldur_azure.validators.VirtualMachineUsernameValidator],
),
preserve_default=False,
),
migrations.AlterField(
model_name='image',
name='backend_id',
field=models.CharField(db_index=True, max_length=255),
),
migrations.AlterField(
model_name='virtualmachine',
name='name',
field=models.CharField(
max_length=15,
validators=[
django.core.validators.RegexValidator(
message='The name can contain only letters, numbers, and hyphens. The name must be shorter than 15 characters and start with a letter and must end with a letter or a number.',
regex=re.compile('[a-zA-Z][a-zA-Z0-9-]{0,13}[a-zA-Z0-9]$'),
)
],
),
),
migrations.AlterUniqueTogether(
name='image',
unique_together=set([('settings', 'backend_id')]),
),
migrations.DeleteModel(
name='InstanceEndpoint',
),
migrations.AddField(
model_name='sqldatabase',
name='server',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to='waldur_azure.SQLServer'
),
),
migrations.AddField(
model_name='networkinterface',
name='resource_group',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='waldur_azure.ResourceGroup',
),
),
migrations.AddField(
model_name='networkinterface',
name='subnet',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to='waldur_azure.SubNet'
),
),
migrations.AddField(
model_name='network',
name='resource_group',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='waldur_azure.ResourceGroup',
),
),
migrations.AddField(
model_name='virtualmachine',
name='network_interface',
field=models.ForeignKey(
default=None,
on_delete=django.db.models.deletion.CASCADE,
to='waldur_azure.NetworkInterface',
),
preserve_default=False,
),
migrations.AddField(
model_name='virtualmachine',
name='resource_group',
field=models.ForeignKey(
default=None,
on_delete=django.db.models.deletion.CASCADE,
to='waldur_azure.ResourceGroup',
),
preserve_default=False,
),
migrations.AddField(
model_name='virtualmachine',
name='size',
field=models.ForeignKey(
default=None,
on_delete=django.db.models.deletion.CASCADE,
to='waldur_azure.Size',
),
preserve_default=False,
),
migrations.AlterUniqueTogether(
name='size',
unique_together=set([('settings', 'backend_id')]),
),
]
| true | true |
f7200243f3b4d289ac50951e7f5c03cf8e464b4c | 7,970 | py | Python | src/pyrad_proc/pyrad/EGG-INFO/scripts/rewrite_monitoring.py | jfigui/pyrad | 7811d593bb09a7f8a621c0e8ae3f32c2b85a0254 | [
"BSD-3-Clause"
] | 41 | 2016-12-01T08:46:06.000Z | 2021-06-24T21:14:33.000Z | src/pyrad_proc/pyrad/EGG-INFO/scripts/rewrite_monitoring.py | jfigui/pyrad | 7811d593bb09a7f8a621c0e8ae3f32c2b85a0254 | [
"BSD-3-Clause"
] | 42 | 2017-02-23T14:52:49.000Z | 2021-02-01T10:43:52.000Z | src/pyrad_proc/pyrad/EGG-INFO/scripts/rewrite_monitoring.py | jfigui/pyrad | 7811d593bb09a7f8a621c0e8ae3f32c2b85a0254 | [
"BSD-3-Clause"
] | 21 | 2016-08-25T15:02:12.000Z | 2021-05-27T04:09:40.000Z | #!/home/daniel/anaconda3/bin/python
# -*- coding: utf-8 -*-
"""
================================================
rewrite_monitoring
================================================
This program rewrites a monitoring time series files into the correct
time order
"""
# Author: fvj
# License: BSD 3 clause
import datetime
import atexit
import numpy as np
import os
from pyrad.io import read_monitoring_ts, write_monitoring_ts
from pyrad.graph import plot_monitoring_ts
from pyrad.io import generate_field_name_str, get_fieldname_pyart
print(__doc__)
def main():
"""
"""
input_base = (
'/store/msrad/radar/pyrad_products/')
output_base = (
'/store/msrad/radar/pyrad_products/')
rad_vec = ['D']
var_vec = ['PhiDP0', 'RhoHV_rain', 'ZDR_prec', 'ZDR_snow', 'dBZ_bias']
year_vec = [datetime.datetime(2018, 1, 1)]
plot_data = True
print("====== Monitoring rewriting started: %s" %
datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"))
atexit.register(_print_end_msg,
"====== Monitoring rewriting finished: ")
for i, rad in enumerate(rad_vec):
print('Processing Radar '+rad)
for j, var in enumerate(var_vec):
if var == 'dBZ':
basedir = 'rad4alp_gc_PH'+rad
dsdir = 'monitoring_clt_Zh'
mon_type = 'GC_MONITORING'
quantiles = [50., 95., 99.]
elif var == 'dBZv':
basedir = 'rad4alp_gc_PH'+rad
dsdir = 'monitoring_clt_Zv'
mon_type = 'GC_MONITORING'
quantiles = [50., 95., 99.]
elif var == 'RhoHV_rain':
basedir = 'rad4alp_dataquality_PL'+rad
dsdir = 'monitoring_RhoHV'
mon_type = 'MONITORING'
quantiles = [65., 80., 95.]
elif var == 'PhiDP0':
basedir = 'rad4alp_dataquality_PL'+rad
dsdir = 'monitoring_PhiDP0'
mon_type = 'MONITORING'
quantiles = [25., 50., 75.]
elif var == 'ZDR_prec':
basedir = 'rad4alp_dataquality_PL'+rad
dsdir = 'monitoring_ZDR'
mon_type = 'MONITORING'
quantiles = [25., 50., 75.]
elif var == 'ZDR_snow':
basedir = 'rad4alp_dataquality_PL'+rad
dsdir = 'monitoring_ZDR_snow'
mon_type = 'MONITORING'
quantiles = [25., 50., 75.]
elif var == 'dBZ_bias':
basedir = 'rad4alp_dataquality_PL'+rad
dsdir = 'monitoring_Zh_bias'
mon_type = 'MONITORING'
quantiles = [25., 50., 75.]
input_path = input_base+basedir+'/'+dsdir+'/VOL_TS/'
output_path = output_base+basedir+'/'+dsdir+'/VOL_TS/'
if not os.path.isdir(output_path):
os.makedirs(output_path)
print('- Processing Variable '+var)
for k, year in enumerate(year_vec):
print('-- Processing Year '+year.strftime('%Y'))
fname_input = (
input_path+year.strftime('%Y')+'_'+rad +
'_ts_'+mon_type+'_'+var+'.csv')
fname_output = (
output_path+year.strftime('%Y')+'_'+rad +
'_ts_'+mon_type+'_'+var+'.csv')
figfname = [
output_path+year.strftime('%Y')+'_'+rad +
'_ts_'+mon_type+'_'+var+'.png']
date, np_t_vec, cquant_vec, lquant_vec, hquant_vec = (
read_monitoring_ts(fname_input, sort_by_date=True))
if date is None:
continue
val_vec = np.ma.asarray(
[lquant_vec, cquant_vec, hquant_vec]).T
fname = write_monitoring_ts(
date, np_t_vec, val_vec, quantiles, var,
fname_output, rewrite=True)
print('written file '+fname)
if not plot_data:
continue
titldate = (date[0].strftime('%Y%m%d')+'-' +
date[-1].strftime('%Y%m%d'))
titl = rad+' Monitoring '+titldate
labely = generate_field_name_str(var)
if var == 'dBZ':
if rad == 'A':
ref_value = 49.5
vmin = 44.5
vmax = 54.5
np_min = 100000
elif rad == 'D':
ref_value = 48.5
vmin = 43.5
vmax = 53.5
np_min = 20000
elif rad == 'L':
ref_value = 67.
vmin = 62.
vmax = 72.
np_min = 100000
elif rad == 'P':
ref_value = 69.
vmin = 64.
vmax = 74.
np_min = 100000
elif rad == 'W':
ref_value = 27.5
vmin = 22.5
vmax = 32.5
np_min = 100000
elif var == 'dBZv':
if rad == 'A':
ref_value = 51.5
vmin = 46.5
vmax = 56.5
np_min = 100000
elif rad == 'D':
ref_value = 50.5
vmin = 45.5
vmax = 55.5
np_min = 20000
elif rad == 'L':
ref_value = 69.5
vmin = 64.5
vmax = 74.5
np_min = 100000
elif rad == 'P':
ref_value = 68.5
vmin = 63.5
vmax = 73.5
np_min = 100000
elif rad == 'W':
ref_value = 26.5
vmin = 21.5
vmax = 31.5
np_min = 100000
elif var == 'RhoHV_rain':
ref_value = 0.99
vmin = 0.95
vmax = 1.01
np_min = 5000
elif var == 'PhiDP0':
ref_value = 0.
vmin = -20.
vmax = 20.
np_min = 500000
elif var == 'ZDR_prec':
ref_value = 0.2
vmin = -2.
vmax = 2.
np_min = 5000
elif var == 'ZDR_snow':
ref_value = 0.2
vmin = -2.
vmax = 2.
np_min = 5000
elif var == 'dBZ_bias':
ref_value = 0.
vmin = -30.
vmax = 30.
np_min = 100
fname = plot_monitoring_ts(
date, np_t_vec, cquant_vec, lquant_vec, hquant_vec,
get_fieldname_pyart(var), figfname,
ref_value=ref_value, vmin=vmin, vmax=vmax, np_min=np_min,
labelx='Time UTC', labely=labely, titl=titl)
print('plotted file '+' '.join(fname))
def _print_end_msg(text):
"""
prints end message
Parameters
----------
text : str
the text to be printed
Returns
-------
Nothing
"""
print(text + datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"))
# ---------------------------------------------------------
# Start main:
# ---------------------------------------------------------
if __name__ == "__main__":
main()
| 33.628692 | 77 | 0.408281 |
import datetime
import atexit
import numpy as np
import os
from pyrad.io import read_monitoring_ts, write_monitoring_ts
from pyrad.graph import plot_monitoring_ts
from pyrad.io import generate_field_name_str, get_fieldname_pyart
print(__doc__)
def main():
input_base = (
'/store/msrad/radar/pyrad_products/')
output_base = (
'/store/msrad/radar/pyrad_products/')
rad_vec = ['D']
var_vec = ['PhiDP0', 'RhoHV_rain', 'ZDR_prec', 'ZDR_snow', 'dBZ_bias']
year_vec = [datetime.datetime(2018, 1, 1)]
plot_data = True
print("====== Monitoring rewriting started: %s" %
datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"))
atexit.register(_print_end_msg,
"====== Monitoring rewriting finished: ")
for i, rad in enumerate(rad_vec):
print('Processing Radar '+rad)
for j, var in enumerate(var_vec):
if var == 'dBZ':
basedir = 'rad4alp_gc_PH'+rad
dsdir = 'monitoring_clt_Zh'
mon_type = 'GC_MONITORING'
quantiles = [50., 95., 99.]
elif var == 'dBZv':
basedir = 'rad4alp_gc_PH'+rad
dsdir = 'monitoring_clt_Zv'
mon_type = 'GC_MONITORING'
quantiles = [50., 95., 99.]
elif var == 'RhoHV_rain':
basedir = 'rad4alp_dataquality_PL'+rad
dsdir = 'monitoring_RhoHV'
mon_type = 'MONITORING'
quantiles = [65., 80., 95.]
elif var == 'PhiDP0':
basedir = 'rad4alp_dataquality_PL'+rad
dsdir = 'monitoring_PhiDP0'
mon_type = 'MONITORING'
quantiles = [25., 50., 75.]
elif var == 'ZDR_prec':
basedir = 'rad4alp_dataquality_PL'+rad
dsdir = 'monitoring_ZDR'
mon_type = 'MONITORING'
quantiles = [25., 50., 75.]
elif var == 'ZDR_snow':
basedir = 'rad4alp_dataquality_PL'+rad
dsdir = 'monitoring_ZDR_snow'
mon_type = 'MONITORING'
quantiles = [25., 50., 75.]
elif var == 'dBZ_bias':
basedir = 'rad4alp_dataquality_PL'+rad
dsdir = 'monitoring_Zh_bias'
mon_type = 'MONITORING'
quantiles = [25., 50., 75.]
input_path = input_base+basedir+'/'+dsdir+'/VOL_TS/'
output_path = output_base+basedir+'/'+dsdir+'/VOL_TS/'
if not os.path.isdir(output_path):
os.makedirs(output_path)
print('- Processing Variable '+var)
for k, year in enumerate(year_vec):
print('-- Processing Year '+year.strftime('%Y'))
fname_input = (
input_path+year.strftime('%Y')+'_'+rad +
'_ts_'+mon_type+'_'+var+'.csv')
fname_output = (
output_path+year.strftime('%Y')+'_'+rad +
'_ts_'+mon_type+'_'+var+'.csv')
figfname = [
output_path+year.strftime('%Y')+'_'+rad +
'_ts_'+mon_type+'_'+var+'.png']
date, np_t_vec, cquant_vec, lquant_vec, hquant_vec = (
read_monitoring_ts(fname_input, sort_by_date=True))
if date is None:
continue
val_vec = np.ma.asarray(
[lquant_vec, cquant_vec, hquant_vec]).T
fname = write_monitoring_ts(
date, np_t_vec, val_vec, quantiles, var,
fname_output, rewrite=True)
print('written file '+fname)
if not plot_data:
continue
titldate = (date[0].strftime('%Y%m%d')+'-' +
date[-1].strftime('%Y%m%d'))
titl = rad+' Monitoring '+titldate
labely = generate_field_name_str(var)
if var == 'dBZ':
if rad == 'A':
ref_value = 49.5
vmin = 44.5
vmax = 54.5
np_min = 100000
elif rad == 'D':
ref_value = 48.5
vmin = 43.5
vmax = 53.5
np_min = 20000
elif rad == 'L':
ref_value = 67.
vmin = 62.
vmax = 72.
np_min = 100000
elif rad == 'P':
ref_value = 69.
vmin = 64.
vmax = 74.
np_min = 100000
elif rad == 'W':
ref_value = 27.5
vmin = 22.5
vmax = 32.5
np_min = 100000
elif var == 'dBZv':
if rad == 'A':
ref_value = 51.5
vmin = 46.5
vmax = 56.5
np_min = 100000
elif rad == 'D':
ref_value = 50.5
vmin = 45.5
vmax = 55.5
np_min = 20000
elif rad == 'L':
ref_value = 69.5
vmin = 64.5
vmax = 74.5
np_min = 100000
elif rad == 'P':
ref_value = 68.5
vmin = 63.5
vmax = 73.5
np_min = 100000
elif rad == 'W':
ref_value = 26.5
vmin = 21.5
vmax = 31.5
np_min = 100000
elif var == 'RhoHV_rain':
ref_value = 0.99
vmin = 0.95
vmax = 1.01
np_min = 5000
elif var == 'PhiDP0':
ref_value = 0.
vmin = -20.
vmax = 20.
np_min = 500000
elif var == 'ZDR_prec':
ref_value = 0.2
vmin = -2.
vmax = 2.
np_min = 5000
elif var == 'ZDR_snow':
ref_value = 0.2
vmin = -2.
vmax = 2.
np_min = 5000
elif var == 'dBZ_bias':
ref_value = 0.
vmin = -30.
vmax = 30.
np_min = 100
fname = plot_monitoring_ts(
date, np_t_vec, cquant_vec, lquant_vec, hquant_vec,
get_fieldname_pyart(var), figfname,
ref_value=ref_value, vmin=vmin, vmax=vmax, np_min=np_min,
labelx='Time UTC', labely=labely, titl=titl)
print('plotted file '+' '.join(fname))
def _print_end_msg(text):
print(text + datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"))
if __name__ == "__main__":
main()
| true | true |
f72002f8e1ad1752270b6c4051b237ce04dec27e | 13,299 | py | Python | Scripts/simulation/interactions/jog_interaction.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | Scripts/simulation/interactions/jog_interaction.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | Scripts/simulation/interactions/jog_interaction.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\interactions\jog_interaction.py
# Compiled at: 2020-07-22 05:56:20
# Size of source mod 2**32: 16676 bytes
from _math import Vector3
import itertools, random
from balloon.tunable_balloon import TunableBalloon
from element_utils import do_all
from event_testing.results import TestResult
from interactions import TargetType
from interactions.base.super_interaction import SuperInteraction
from interactions.constraints import Circle, ANYWHERE
from interactions.utils.routing import FollowPath, PlanRoute, get_route_element_for_path
from routing.walkstyle.walkstyle_request import WalkStyleRequest
from routing.waypoints.waypoint_generator_variant import TunableWaypointGeneratorVariant
from routing.waypoints.waypoint_stitching import WaypointStitchingVariant
from sims4 import random
from sims4.tuning.tunable import TunableRange, Tunable, OptionalTunable
from sims4.tuning.tunable_base import GroupNames
from sims4.utils import flexmethod
import element_utils, routing, sims4.log
logger = sims4.log.Logger('WaypointInteraction')
class _WaypointGeneratorRallyable:
def __init__(self, waypoint_info):
self._original_generator = waypoint_info
def get_start_constraint(self):
return self._original_generator.get_start_constraint()
def get_waypoint_constraints_gen(self, routing_agent, waypoint_count):
yield from self._original_generator.get_waypoint_constraints_gen(routing_agent, waypoint_count)
if False:
yield None
class WaypointInteraction(SuperInteraction):
INSTANCE_TUNABLES = {'waypoint_constraint':TunableWaypointGeneratorVariant(tuning_group=GroupNames.ROUTING),
'waypoint_count':TunableRange(description='\n The number of waypoints to select, from spawn points in the zone, to\n visit for a Jog prior to returning to the original location.\n ',
tunable_type=int,
default=2,
minimum=2,
tuning_group=GroupNames.ROUTING),
'waypoint_walk_style':WalkStyleRequest.TunableFactory(description='\n The walkstyle to use when routing between waypoints.\n ',
tuning_group=GroupNames.ROUTING),
'waypoint_stitching':WaypointStitchingVariant(tuning_group=GroupNames.ROUTING),
'waypoint_randomize_orientation':Tunable(description='\n Make Waypoint orientation random. Default is velocity aligned.\n ',
tunable_type=bool,
default=False,
tuning_group=GroupNames.ROUTING),
'waypoint_clear_locomotion_mask':Tunable(description='\n If enabled, override the locomotion queue mask. This mask controls\n which Animation Requests and XEvents get blocked during locomotion.\n By default, the mask blocks everything. If cleared, it blocks\n nothing. It also lowers the animation track used by locomotion to \n 9,999 from the default of 10,000. Use with care, ask your GPE.\n ',
tunable_type=bool,
default=False,
tuning_group=GroupNames.ROUTING),
'waypoint_override_agent_radius':OptionalTunable(description='\n If enabled, use the specified value as the agent radius when\n generating goals for the waypoints. The agent radius is restored\n for the actual route.\n ',
tunable=TunableRange(description='\n The value to use as the agent radius when generating goals. \n ',
tunable_type=float,
minimum=0,
maximum=1.0,
default=0.123),
tuning_group=GroupNames.ROUTING),
'waypoint_route_fail_balloon':OptionalTunable(description='\n Tuning for balloon to show when failing to plan a aroute for this waypoint interaction. \n ',
tunable=TunableBalloon(locked_args={'balloon_delay':0,
'balloon_delay_random_offset':0,
'balloon_chance':100}),
tuning_group=GroupNames.ROUTING)}
def __init__(self, aop, *args, waypoint_generator=None, **kwargs):
(super().__init__)(aop, *args, **kwargs)
waypoint_info = kwargs.get('waypoint_info')
if waypoint_info is not None:
self._waypoint_generator = _WaypointGeneratorRallyable(waypoint_info)
else:
if aop.target is None:
if self.target_type is TargetType.ACTOR:
target = self.sim
else:
target = aop.target
elif waypoint_generator is None:
self._waypoint_generator = self.waypoint_constraint(self.context, target)
else:
self._waypoint_generator = waypoint_generator
self._routing_infos = None
self._goal_size = 0.0
self.register_on_finishing_callback(self._clean_up_waypoint_generator)
@classmethod
def _test(cls, target, context, **interaction_parameters):
sim = context.sim
routing_master = sim.routing_master
if routing_master is not None:
if sim.parent is not routing_master:
return TestResult(False, '{} cannot run Waypoint interactions because they are following {}', sim, routing_master)
return (super()._test)(target, context, **interaction_parameters)
def _get_starting_constraint(self, *args, **kwargs):
constraint = ANYWHERE
target = self.target
if self._waypoint_generator.is_for_vehicle and target is not None and target.vehicle_component is not None:
constraint = target.is_in_inventory() or Circle((target.position), (target.vehicle_component.minimum_route_distance), routing_surface=(target.routing_surface))
constraint = constraint.intersect(self._waypoint_generator.get_water_constraint())
else:
constraint = self._waypoint_generator.get_start_constraint()
posture_constraint = self._waypoint_generator.get_posture_constraint()
if posture_constraint is not None:
constraint = constraint.intersect(posture_constraint)
return constraint
@flexmethod
def _constraint_gen(cls, inst, *args, **kwargs):
inst_or_cls = inst if inst is not None else cls
if inst is not None:
constraint = (inst._get_starting_constraint)(*args, **kwargs)
yield constraint
yield from (super(__class__, inst_or_cls)._constraint_gen)(*args, **kwargs)
def cancel(self, *args, **kwargs):
for sim_primitive in list(self.sim.primitives):
if isinstance(sim_primitive, FollowPath):
sim_primitive.detach()
return (super().cancel)(*args, **kwargs)
def _clean_up_waypoint_generator(self, _):
self._waypoint_generator.clean_up()
def _get_goals_for_constraint(self, constraint, routing_agent):
goals = []
handles = constraint.get_connectivity_handles(routing_agent)
for handle in handles:
goals.extend(handle.get_goals(always_reject_invalid_goals=True))
return goals
def _show_route_fail_balloon(self):
balloon_tuning = self.waypoint_route_fail_balloon
if balloon_tuning is None:
return
else:
return self.is_user_directed or None
balloon_requests = balloon_tuning(self)
if balloon_requests:
chosen_balloon = random.random.choice(balloon_requests)
if chosen_balloon is not None:
chosen_balloon.distribute()
def _run_interaction_gen(self, timeline):
all_sims = self.required_sims()
if not all_sims:
return
self._routing_infos = []
routing_agent = self.sim
for sim in all_sims:
routing_context = sim.routing_context
routing_agent = sim
vehicle = None if not sim.posture.is_vehicle else sim.parent
if vehicle is not None:
if vehicle.vehicle_component is not None:
routing_agent = vehicle
routing_context = vehicle.routing_component.pathplan_context
self._routing_infos.append((routing_agent, routing_context))
waypoints = []
default_agent_radius = None
if self.waypoint_override_agent_radius is not None:
if routing_agent.routing_component is not None:
default_agent_radius = routing_agent.routing_component._pathplan_context.agent_radius
routing_agent.routing_component._pathplan_context.agent_radius = self.waypoint_override_agent_radius
else:
try:
for constraint in self._waypoint_generator.get_waypoint_constraints_gen(routing_agent, self.waypoint_count):
goals = self._get_goals_for_constraint(constraint, routing_agent)
if not goals:
continue
if self.waypoint_randomize_orientation:
for goal in goals:
goal.orientation = sims4.math.angle_to_yaw_quaternion(random.uniform(0.0, sims4.math.TWO_PI))
waypoints.append(goals)
finally:
if default_agent_radius is not None:
routing_agent.routing_component._pathplan_context.agent_radius = default_agent_radius
return waypoints or False
self._goal_size = max((info[0].routing_component.get_routing_context().agent_goal_radius for info in self._routing_infos))
self._goal_size *= self._goal_size
if self.staging:
for route_waypoints in itertools.cycle(self.waypoint_stitching(waypoints, self._waypoint_generator.loops)):
result = yield from self._do_route_to_constraint_gen(route_waypoints, timeline)
if not result:
return result
else:
for route_waypoints in self.waypoint_stitching(waypoints, self._waypoint_generator.loops):
result = yield from self._do_route_to_constraint_gen(route_waypoints, timeline)
return result
return True
if False:
yield None
def _do_route_to_constraint_gen(self, waypoints, timeline):
if self.is_finishing:
return False
plan_primitives = []
for i, routing_info in enumerate(self._routing_infos):
routing_agent = routing_info[0]
routing_context = routing_info[1]
route = routing.Route((routing_agent.routing_location), (waypoints[(-1)]), waypoints=(waypoints[:-1]), routing_context=routing_context)
plan_primitive = PlanRoute(route, routing_agent, interaction=self)
result = yield from element_utils.run_child(timeline, plan_primitive)
if not result:
self._show_route_fail_balloon()
return False
plan_primitive.path.nodes and plan_primitive.path.nodes.plan_success or self._show_route_fail_balloon()
return False
plan_primitive.path.blended_orientation = self.waypoint_randomize_orientation
plan_primitives.append(plan_primitive)
if i == len(self._routing_infos) - 1:
continue
for node in plan_primitive.path.nodes:
position = Vector3(*node.position)
for goal in itertools.chain.from_iterable(waypoints):
if goal.routing_surface_id != node.routing_surface_id:
continue
dist_sq = (Vector3(*goal.position) - position).magnitude_2d_squared()
if dist_sq < self._goal_size:
goal.cost = routing.get_default_obstacle_cost()
route_primitives = []
track_override = None
mask_override = None
if self.waypoint_clear_locomotion_mask:
mask_override = 0
track_override = 9999
for plan_primitive in plan_primitives:
sequence = get_route_element_for_path((plan_primitive.sim), (plan_primitive.path), interaction=self,
force_follow_path=True,
track_override=track_override,
mask_override=mask_override)
walkstyle_request = self.waypoint_walk_style(plan_primitive.sim)
sequence = walkstyle_request(sequence=sequence)
route_primitives.append(sequence)
result = yield from element_utils.run_child(timeline, do_all(*route_primitives))
return result
if False:
yield None
@classmethod
def get_rallyable_aops_gen(cls, target, context, **kwargs):
key = 'waypoint_info'
if key not in kwargs:
waypoint_generator = cls.waypoint_constraint(context, target)
kwargs[key] = waypoint_generator
yield from (super().get_rallyable_aops_gen)(target, context, rally_constraint=waypoint_generator.get_start_constraint(), **kwargs)
if False:
yield None | 50.759542 | 471 | 0.666892 |
from _math import Vector3
import itertools, random
from balloon.tunable_balloon import TunableBalloon
from element_utils import do_all
from event_testing.results import TestResult
from interactions import TargetType
from interactions.base.super_interaction import SuperInteraction
from interactions.constraints import Circle, ANYWHERE
from interactions.utils.routing import FollowPath, PlanRoute, get_route_element_for_path
from routing.walkstyle.walkstyle_request import WalkStyleRequest
from routing.waypoints.waypoint_generator_variant import TunableWaypointGeneratorVariant
from routing.waypoints.waypoint_stitching import WaypointStitchingVariant
from sims4 import random
from sims4.tuning.tunable import TunableRange, Tunable, OptionalTunable
from sims4.tuning.tunable_base import GroupNames
from sims4.utils import flexmethod
import element_utils, routing, sims4.log
logger = sims4.log.Logger('WaypointInteraction')
class _WaypointGeneratorRallyable:
def __init__(self, waypoint_info):
self._original_generator = waypoint_info
def get_start_constraint(self):
return self._original_generator.get_start_constraint()
def get_waypoint_constraints_gen(self, routing_agent, waypoint_count):
yield from self._original_generator.get_waypoint_constraints_gen(routing_agent, waypoint_count)
if False:
yield None
class WaypointInteraction(SuperInteraction):
INSTANCE_TUNABLES = {'waypoint_constraint':TunableWaypointGeneratorVariant(tuning_group=GroupNames.ROUTING),
'waypoint_count':TunableRange(description='\n The number of waypoints to select, from spawn points in the zone, to\n visit for a Jog prior to returning to the original location.\n ',
tunable_type=int,
default=2,
minimum=2,
tuning_group=GroupNames.ROUTING),
'waypoint_walk_style':WalkStyleRequest.TunableFactory(description='\n The walkstyle to use when routing between waypoints.\n ',
tuning_group=GroupNames.ROUTING),
'waypoint_stitching':WaypointStitchingVariant(tuning_group=GroupNames.ROUTING),
'waypoint_randomize_orientation':Tunable(description='\n Make Waypoint orientation random. Default is velocity aligned.\n ',
tunable_type=bool,
default=False,
tuning_group=GroupNames.ROUTING),
'waypoint_clear_locomotion_mask':Tunable(description='\n If enabled, override the locomotion queue mask. This mask controls\n which Animation Requests and XEvents get blocked during locomotion.\n By default, the mask blocks everything. If cleared, it blocks\n nothing. It also lowers the animation track used by locomotion to \n 9,999 from the default of 10,000. Use with care, ask your GPE.\n ',
tunable_type=bool,
default=False,
tuning_group=GroupNames.ROUTING),
'waypoint_override_agent_radius':OptionalTunable(description='\n If enabled, use the specified value as the agent radius when\n generating goals for the waypoints. The agent radius is restored\n for the actual route.\n ',
tunable=TunableRange(description='\n The value to use as the agent radius when generating goals. \n ',
tunable_type=float,
minimum=0,
maximum=1.0,
default=0.123),
tuning_group=GroupNames.ROUTING),
'waypoint_route_fail_balloon':OptionalTunable(description='\n Tuning for balloon to show when failing to plan a aroute for this waypoint interaction. \n ',
tunable=TunableBalloon(locked_args={'balloon_delay':0,
'balloon_delay_random_offset':0,
'balloon_chance':100}),
tuning_group=GroupNames.ROUTING)}
def __init__(self, aop, *args, waypoint_generator=None, **kwargs):
(super().__init__)(aop, *args, **kwargs)
waypoint_info = kwargs.get('waypoint_info')
if waypoint_info is not None:
self._waypoint_generator = _WaypointGeneratorRallyable(waypoint_info)
else:
if aop.target is None:
if self.target_type is TargetType.ACTOR:
target = self.sim
else:
target = aop.target
elif waypoint_generator is None:
self._waypoint_generator = self.waypoint_constraint(self.context, target)
else:
self._waypoint_generator = waypoint_generator
self._routing_infos = None
self._goal_size = 0.0
self.register_on_finishing_callback(self._clean_up_waypoint_generator)
@classmethod
def _test(cls, target, context, **interaction_parameters):
sim = context.sim
routing_master = sim.routing_master
if routing_master is not None:
if sim.parent is not routing_master:
return TestResult(False, '{} cannot run Waypoint interactions because they are following {}', sim, routing_master)
return (super()._test)(target, context, **interaction_parameters)
def _get_starting_constraint(self, *args, **kwargs):
constraint = ANYWHERE
target = self.target
if self._waypoint_generator.is_for_vehicle and target is not None and target.vehicle_component is not None:
constraint = target.is_in_inventory() or Circle((target.position), (target.vehicle_component.minimum_route_distance), routing_surface=(target.routing_surface))
constraint = constraint.intersect(self._waypoint_generator.get_water_constraint())
else:
constraint = self._waypoint_generator.get_start_constraint()
posture_constraint = self._waypoint_generator.get_posture_constraint()
if posture_constraint is not None:
constraint = constraint.intersect(posture_constraint)
return constraint
@flexmethod
def _constraint_gen(cls, inst, *args, **kwargs):
inst_or_cls = inst if inst is not None else cls
if inst is not None:
constraint = (inst._get_starting_constraint)(*args, **kwargs)
yield constraint
yield from (super(__class__, inst_or_cls)._constraint_gen)(*args, **kwargs)
def cancel(self, *args, **kwargs):
for sim_primitive in list(self.sim.primitives):
if isinstance(sim_primitive, FollowPath):
sim_primitive.detach()
return (super().cancel)(*args, **kwargs)
def _clean_up_waypoint_generator(self, _):
self._waypoint_generator.clean_up()
def _get_goals_for_constraint(self, constraint, routing_agent):
goals = []
handles = constraint.get_connectivity_handles(routing_agent)
for handle in handles:
goals.extend(handle.get_goals(always_reject_invalid_goals=True))
return goals
def _show_route_fail_balloon(self):
balloon_tuning = self.waypoint_route_fail_balloon
if balloon_tuning is None:
return
else:
return self.is_user_directed or None
balloon_requests = balloon_tuning(self)
if balloon_requests:
chosen_balloon = random.random.choice(balloon_requests)
if chosen_balloon is not None:
chosen_balloon.distribute()
def _run_interaction_gen(self, timeline):
all_sims = self.required_sims()
if not all_sims:
return
self._routing_infos = []
routing_agent = self.sim
for sim in all_sims:
routing_context = sim.routing_context
routing_agent = sim
vehicle = None if not sim.posture.is_vehicle else sim.parent
if vehicle is not None:
if vehicle.vehicle_component is not None:
routing_agent = vehicle
routing_context = vehicle.routing_component.pathplan_context
self._routing_infos.append((routing_agent, routing_context))
waypoints = []
default_agent_radius = None
if self.waypoint_override_agent_radius is not None:
if routing_agent.routing_component is not None:
default_agent_radius = routing_agent.routing_component._pathplan_context.agent_radius
routing_agent.routing_component._pathplan_context.agent_radius = self.waypoint_override_agent_radius
else:
try:
for constraint in self._waypoint_generator.get_waypoint_constraints_gen(routing_agent, self.waypoint_count):
goals = self._get_goals_for_constraint(constraint, routing_agent)
if not goals:
continue
if self.waypoint_randomize_orientation:
for goal in goals:
goal.orientation = sims4.math.angle_to_yaw_quaternion(random.uniform(0.0, sims4.math.TWO_PI))
waypoints.append(goals)
finally:
if default_agent_radius is not None:
routing_agent.routing_component._pathplan_context.agent_radius = default_agent_radius
return waypoints or False
self._goal_size = max((info[0].routing_component.get_routing_context().agent_goal_radius for info in self._routing_infos))
self._goal_size *= self._goal_size
if self.staging:
for route_waypoints in itertools.cycle(self.waypoint_stitching(waypoints, self._waypoint_generator.loops)):
result = yield from self._do_route_to_constraint_gen(route_waypoints, timeline)
if not result:
return result
else:
for route_waypoints in self.waypoint_stitching(waypoints, self._waypoint_generator.loops):
result = yield from self._do_route_to_constraint_gen(route_waypoints, timeline)
return result
return True
if False:
yield None
def _do_route_to_constraint_gen(self, waypoints, timeline):
if self.is_finishing:
return False
plan_primitives = []
for i, routing_info in enumerate(self._routing_infos):
routing_agent = routing_info[0]
routing_context = routing_info[1]
route = routing.Route((routing_agent.routing_location), (waypoints[(-1)]), waypoints=(waypoints[:-1]), routing_context=routing_context)
plan_primitive = PlanRoute(route, routing_agent, interaction=self)
result = yield from element_utils.run_child(timeline, plan_primitive)
if not result:
self._show_route_fail_balloon()
return False
plan_primitive.path.nodes and plan_primitive.path.nodes.plan_success or self._show_route_fail_balloon()
return False
plan_primitive.path.blended_orientation = self.waypoint_randomize_orientation
plan_primitives.append(plan_primitive)
if i == len(self._routing_infos) - 1:
continue
for node in plan_primitive.path.nodes:
position = Vector3(*node.position)
for goal in itertools.chain.from_iterable(waypoints):
if goal.routing_surface_id != node.routing_surface_id:
continue
dist_sq = (Vector3(*goal.position) - position).magnitude_2d_squared()
if dist_sq < self._goal_size:
goal.cost = routing.get_default_obstacle_cost()
route_primitives = []
track_override = None
mask_override = None
if self.waypoint_clear_locomotion_mask:
mask_override = 0
track_override = 9999
for plan_primitive in plan_primitives:
sequence = get_route_element_for_path((plan_primitive.sim), (plan_primitive.path), interaction=self,
force_follow_path=True,
track_override=track_override,
mask_override=mask_override)
walkstyle_request = self.waypoint_walk_style(plan_primitive.sim)
sequence = walkstyle_request(sequence=sequence)
route_primitives.append(sequence)
result = yield from element_utils.run_child(timeline, do_all(*route_primitives))
return result
if False:
yield None
@classmethod
def get_rallyable_aops_gen(cls, target, context, **kwargs):
key = 'waypoint_info'
if key not in kwargs:
waypoint_generator = cls.waypoint_constraint(context, target)
kwargs[key] = waypoint_generator
yield from (super().get_rallyable_aops_gen)(target, context, rally_constraint=waypoint_generator.get_start_constraint(), **kwargs)
if False:
yield None | true | true |
f72003c0391c7dabf487c7375b1a310ce99ae57b | 2,718 | py | Python | test/test_mct.py | pistoia/qiskit-aqua | c7900ffdabc1499145739bfab29a392709bee1a0 | [
"Apache-2.0"
] | null | null | null | test/test_mct.py | pistoia/qiskit-aqua | c7900ffdabc1499145739bfab29a392709bee1a0 | [
"Apache-2.0"
] | null | null | null | test/test_mct.py | pistoia/qiskit-aqua | c7900ffdabc1499145739bfab29a392709bee1a0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2018 IBM.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import unittest
import itertools
import numpy as np
from parameterized import parameterized
from qiskit import QuantumCircuit, QuantumRegister
from qiskit import execute as q_execute
from qiskit.quantum_info import state_fidelity
from qiskit.aqua import get_aer_backend
from test.common import QiskitAquaTestCase
num_controls = [i + 1 for i in range(7)]
modes = ['basic', 'advanced', 'noancilla']
class TestMCT(QiskitAquaTestCase):
@parameterized.expand(
itertools.product(num_controls, modes)
)
def test_mct(self, num_controls, mode):
c = QuantumRegister(num_controls, name='c')
o = QuantumRegister(1, name='o')
subsets = [tuple(range(i)) for i in range(num_controls + 1)]
for subset in subsets:
qc = QuantumCircuit(o, c)
if mode == 'basic':
if num_controls <= 2:
num_ancillae = 0
else:
num_ancillae = num_controls - 2
elif mode == 'noancilla':
num_ancillae = 0
else:
if num_controls <= 4:
num_ancillae = 0
else:
num_ancillae = 1
if num_ancillae > 0:
a = QuantumRegister(num_ancillae, name='a')
qc.add_register(a)
for idx in subset:
qc.x(c[idx])
qc.mct(
[c[i] for i in range(num_controls)],
o[0],
[a[i] for i in range(num_ancillae)],
mode=mode
)
for idx in subset:
qc.x(c[idx])
vec = np.asarray(q_execute(qc, get_aer_backend(
'statevector_simulator')).result().get_statevector(qc, decimals=16))
vec_o = [0, 1] if len(subset) == num_controls else [1, 0]
f = state_fidelity(vec, np.array(vec_o + [0] * (2 ** (num_controls + num_ancillae + 1) - 2)))
self.assertAlmostEqual(f, 1)
if __name__ == '__main__':
unittest.main()
| 34.405063 | 105 | 0.577999 |
import unittest
import itertools
import numpy as np
from parameterized import parameterized
from qiskit import QuantumCircuit, QuantumRegister
from qiskit import execute as q_execute
from qiskit.quantum_info import state_fidelity
from qiskit.aqua import get_aer_backend
from test.common import QiskitAquaTestCase
num_controls = [i + 1 for i in range(7)]
modes = ['basic', 'advanced', 'noancilla']
class TestMCT(QiskitAquaTestCase):
@parameterized.expand(
itertools.product(num_controls, modes)
)
def test_mct(self, num_controls, mode):
c = QuantumRegister(num_controls, name='c')
o = QuantumRegister(1, name='o')
subsets = [tuple(range(i)) for i in range(num_controls + 1)]
for subset in subsets:
qc = QuantumCircuit(o, c)
if mode == 'basic':
if num_controls <= 2:
num_ancillae = 0
else:
num_ancillae = num_controls - 2
elif mode == 'noancilla':
num_ancillae = 0
else:
if num_controls <= 4:
num_ancillae = 0
else:
num_ancillae = 1
if num_ancillae > 0:
a = QuantumRegister(num_ancillae, name='a')
qc.add_register(a)
for idx in subset:
qc.x(c[idx])
qc.mct(
[c[i] for i in range(num_controls)],
o[0],
[a[i] for i in range(num_ancillae)],
mode=mode
)
for idx in subset:
qc.x(c[idx])
vec = np.asarray(q_execute(qc, get_aer_backend(
'statevector_simulator')).result().get_statevector(qc, decimals=16))
vec_o = [0, 1] if len(subset) == num_controls else [1, 0]
f = state_fidelity(vec, np.array(vec_o + [0] * (2 ** (num_controls + num_ancillae + 1) - 2)))
self.assertAlmostEqual(f, 1)
if __name__ == '__main__':
unittest.main()
| true | true |
f72005233f11455f1e95662ff8e8514dc68a23af | 3,738 | py | Python | letsencrypt/configuration.py | meehow/letsencrypt | 64073b234a6b87a574d873599a8d4dbf11729d5c | [
"Apache-2.0"
] | 1 | 2021-09-05T14:18:00.000Z | 2021-09-05T14:18:00.000Z | letsencrypt/configuration.py | meehow/letsencrypt | 64073b234a6b87a574d873599a8d4dbf11729d5c | [
"Apache-2.0"
] | null | null | null | letsencrypt/configuration.py | meehow/letsencrypt | 64073b234a6b87a574d873599a8d4dbf11729d5c | [
"Apache-2.0"
] | null | null | null | """Let's Encrypt user-supplied configuration."""
import os
import urlparse
import zope.interface
from acme import challenges
from letsencrypt import constants
from letsencrypt import errors
from letsencrypt import interfaces
class NamespaceConfig(object):
"""Configuration wrapper around :class:`argparse.Namespace`.
For more documentation, including available attributes, please see
:class:`letsencrypt.interfaces.IConfig`. However, note that
the following attributes are dynamically resolved using
:attr:`~letsencrypt.interfaces.IConfig.work_dir` and relative
paths defined in :py:mod:`letsencrypt.constants`:
- `accounts_dir`
- `csr_dir`
- `in_progress_dir`
- `key_dir`
- `renewer_config_file`
- `temp_checkpoint_dir`
:ivar namespace: Namespace typically produced by
:meth:`argparse.ArgumentParser.parse_args`.
:type namespace: :class:`argparse.Namespace`
"""
zope.interface.implements(interfaces.IConfig)
def __init__(self, namespace):
self.namespace = namespace
if self.simple_http_port == self.dvsni_port:
raise errors.Error(
"Trying to run SimpleHTTP and DVSNI "
"on the same port ({0})".format(self.dvsni_port))
def __getattr__(self, name):
return getattr(self.namespace, name)
@property
def server_path(self):
"""File path based on ``server``."""
parsed = urlparse.urlparse(self.namespace.server)
return (parsed.netloc + parsed.path).replace('/', os.path.sep)
@property
def accounts_dir(self): # pylint: disable=missing-docstring
return os.path.join(
self.namespace.config_dir, constants.ACCOUNTS_DIR, self.server_path)
@property
def backup_dir(self): # pylint: disable=missing-docstring
return os.path.join(self.namespace.work_dir, constants.BACKUP_DIR)
@property
def csr_dir(self): # pylint: disable=missing-docstring
return os.path.join(self.namespace.config_dir, constants.CSR_DIR)
@property
def in_progress_dir(self): # pylint: disable=missing-docstring
return os.path.join(self.namespace.work_dir, constants.IN_PROGRESS_DIR)
@property
def key_dir(self): # pylint: disable=missing-docstring
return os.path.join(self.namespace.config_dir, constants.KEY_DIR)
@property
def temp_checkpoint_dir(self): # pylint: disable=missing-docstring
return os.path.join(
self.namespace.work_dir, constants.TEMP_CHECKPOINT_DIR)
@property
def simple_http_port(self): # pylint: disable=missing-docstring
if self.namespace.simple_http_port is not None:
return self.namespace.simple_http_port
else:
return challenges.SimpleHTTPResponse.PORT
class RenewerConfiguration(object):
"""Configuration wrapper for renewer."""
def __init__(self, namespace):
self.namespace = namespace
def __getattr__(self, name):
return getattr(self.namespace, name)
@property
def archive_dir(self): # pylint: disable=missing-docstring
return os.path.join(self.namespace.config_dir, constants.ARCHIVE_DIR)
@property
def live_dir(self): # pylint: disable=missing-docstring
return os.path.join(self.namespace.config_dir, constants.LIVE_DIR)
@property
def renewal_configs_dir(self): # pylint: disable=missing-docstring
return os.path.join(
self.namespace.config_dir, constants.RENEWAL_CONFIGS_DIR)
@property
def renewer_config_file(self): # pylint: disable=missing-docstring
return os.path.join(
self.namespace.config_dir, constants.RENEWER_CONFIG_FILENAME)
| 32.789474 | 80 | 0.697164 | import os
import urlparse
import zope.interface
from acme import challenges
from letsencrypt import constants
from letsencrypt import errors
from letsencrypt import interfaces
class NamespaceConfig(object):
zope.interface.implements(interfaces.IConfig)
def __init__(self, namespace):
self.namespace = namespace
if self.simple_http_port == self.dvsni_port:
raise errors.Error(
"Trying to run SimpleHTTP and DVSNI "
"on the same port ({0})".format(self.dvsni_port))
def __getattr__(self, name):
return getattr(self.namespace, name)
@property
def server_path(self):
parsed = urlparse.urlparse(self.namespace.server)
return (parsed.netloc + parsed.path).replace('/', os.path.sep)
@property
def accounts_dir(self):
return os.path.join(
self.namespace.config_dir, constants.ACCOUNTS_DIR, self.server_path)
@property
def backup_dir(self):
return os.path.join(self.namespace.work_dir, constants.BACKUP_DIR)
@property
def csr_dir(self):
return os.path.join(self.namespace.config_dir, constants.CSR_DIR)
@property
def in_progress_dir(self):
return os.path.join(self.namespace.work_dir, constants.IN_PROGRESS_DIR)
@property
def key_dir(self):
return os.path.join(self.namespace.config_dir, constants.KEY_DIR)
@property
def temp_checkpoint_dir(self):
return os.path.join(
self.namespace.work_dir, constants.TEMP_CHECKPOINT_DIR)
@property
def simple_http_port(self):
if self.namespace.simple_http_port is not None:
return self.namespace.simple_http_port
else:
return challenges.SimpleHTTPResponse.PORT
class RenewerConfiguration(object):
def __init__(self, namespace):
self.namespace = namespace
def __getattr__(self, name):
return getattr(self.namespace, name)
@property
def archive_dir(self):
return os.path.join(self.namespace.config_dir, constants.ARCHIVE_DIR)
@property
def live_dir(self):
return os.path.join(self.namespace.config_dir, constants.LIVE_DIR)
@property
def renewal_configs_dir(self):
return os.path.join(
self.namespace.config_dir, constants.RENEWAL_CONFIGS_DIR)
@property
def renewer_config_file(self):
return os.path.join(
self.namespace.config_dir, constants.RENEWER_CONFIG_FILENAME)
| true | true |
f72005518b34101337fb593f9f38ae1ba0642602 | 8,708 | py | Python | src/modules/display_tickets.py | dat-adi/eisen-tickets | bedd6786da5c49d0021ca97e6e4f33b7a07f5be4 | [
"MIT"
] | null | null | null | src/modules/display_tickets.py | dat-adi/eisen-tickets | bedd6786da5c49d0021ca97e6e4f33b7a07f5be4 | [
"MIT"
] | 11 | 2020-07-31T05:48:51.000Z | 2022-01-16T08:03:28.000Z | src/modules/display_tickets.py | dat-adi/eisen-tickets | bedd6786da5c49d0021ca97e6e4f33b7a07f5be4 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GUI import
import tkinter as tk
# Styling the GUI
from tkinter import ttk
# Database connection
from modules.create_db_components import create_connection
# Deletes the ticket from the database
from modules.removing_tickets import delete_ticket
"""This module is used to display all the tickets present in the
Database."""
# Owned
__author__ = "Datta Adithya"
__credits__ = ["Datta Adithya"]
__license__ = "MIT"
__maintainer__ = "Datta Adithya"
__email__ = "dat.adithya@gmail.com"
# fonts for the project
text_font = ("Helvetica", 12)
# functions to retrieve all of the records from the database
def do_cat(conn):
cur = conn.cursor()
cur.execute('SELECT * FROM tickets WHERE category = "DO"')
conn.commit()
rows = cur.fetchall()
return rows
def dec_cat(conn):
cur = conn.cursor()
cur.execute('SELECT * FROM tickets WHERE category = "DEC"')
conn.commit()
rows = cur.fetchall()
return rows
def dlg_cat(conn):
cur = conn.cursor()
cur.execute('SELECT * FROM tickets WHERE category = "DLG"')
conn.commit()
rows = cur.fetchall()
return rows
def del_cat(conn):
cur = conn.cursor()
cur.execute('SELECT * FROM tickets WHERE category = "DEL"')
conn.commit()
rows = cur.fetchall()
return rows
# GUI for the project
class windows(tk.Tk):
def __init__(self, conn, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
self.wm_title("Eisen's Tickets")
self.iconbitmap(self, default="../../assets/logo.ico")
self.conn = conn
container = tk.Frame(self, height=400, width=600)
container.pack(side="top", fill="both", expand=True)
container.grid_rowconfigure(0, weight=1)
container.grid_columnconfigure(0, weight=1)
self.frames = {}
for F in (MainPage, EisenDisplay, DoPage, DecPage, DlgPage, DelPage):
frame = F(container, self, self.conn)
self.frames[F] = frame
frame.grid(row=0, column=0, sticky="nsew")
self.show_frame(MainPage)
def show_frame(self, cont):
frame = self.frames[cont]
frame.tkraise()
def ticket_display(self, ticket):
new_window = tk.Toplevel(self)
ticket_id = ticket[0]
timestamp = ticket[1]
category = ticket[2]
task = ticket[3]
more_info = ticket[4]
fields = ["Ticket ID", "Timestamp", "Category", "Task", "More Info"]
details = [ticket_id, timestamp, category, task, more_info]
r = 0
for field in fields:
tk.Label(new_window, text=field, relief=tk.RIDGE, width=15).grid(
row=r, column=0
)
tk.Label(new_window, text=details[r], relief=tk.SUNKEN, width=100).grid(
row=r, column=1
)
r += 1
tk.Button(
new_window,
relief=tk.RIDGE,
text="Delete Ticket",
background="#FF3333",
command=lambda: delete_ticket(self.conn, ticket_id),
).grid(row=r, column=0, columnspan=2, sticky="ew")
# Pages made for navigation through the different categories
class MainPage(tk.Frame):
def __init__(self, parent, controller, conn):
tk.Frame.__init__(self, parent)
self.conn = conn
label = tk.Label(self, text="Start Page", font=text_font)
label.pack(padx=10, pady=10)
eisen_display_button = ttk.Button(
self,
text="Display Selection",
command=lambda: controller.show_frame(EisenDisplay),
)
eisen_display_button.pack(side="bottom", fill=tk.X)
class EisenDisplay(tk.Frame):
def __init__(self, parent, controller, conn):
tk.Frame.__init__(self, parent)
self.conn = conn
label = tk.Label(self, text="Eisen Display", font=text_font)
label.pack(padx=10, pady=10)
main_button = ttk.Button(
self,
text="Return to main page",
command=lambda: controller.show_frame(MainPage),
)
main_button.pack(side="bottom", fill=tk.X)
del_button = ttk.Button(
self, text="Eisen Delete", command=lambda: controller.show_frame(DelPage)
)
del_button.pack(side="bottom", fill=tk.X)
dlg_button = ttk.Button(
self, text="Eisen Delegate", command=lambda: controller.show_frame(DlgPage)
)
dlg_button.pack(side="bottom", fill=tk.X)
dec_button = ttk.Button(
self, text="Eisen Decide", command=lambda: controller.show_frame(DecPage)
)
dec_button.pack(side="bottom", fill=tk.X)
do_button = ttk.Button(
self, text="Eisen Do", command=lambda: controller.show_frame(DoPage)
)
do_button.pack(side="bottom", fill=tk.X)
class DoPage(tk.Frame):
def __init__(self, parent, controller, conn):
tk.Frame.__init__(self, parent)
label = tk.Label(self, text="Eisen's Do Page", font=text_font)
label.pack(padx=10, pady=10)
do_rows = do_cat(conn)
for element in do_rows:
tk.Button(
self,
text=element[3],
fg="black",
command=lambda ele=element: controller.ticket_display(ele),
).pack(fill=tk.X)
eisen_display_button = ttk.Button(
self,
text="Display Selection",
command=lambda: controller.show_frame(EisenDisplay),
)
eisen_display_button.pack(side="bottom", fill=tk.X)
dec_button = ttk.Button(
self, text="Eisen Decide", command=lambda: controller.show_frame(DecPage)
)
dec_button.pack(side="bottom", fill=tk.X)
class DecPage(tk.Frame):
def __init__(self, parent, controller, conn):
tk.Frame.__init__(self, parent)
label = tk.Label(self, text="Eisen's Decide Page", font=text_font)
label.pack(padx=10, pady=10)
dec_rows = dec_cat(conn)
for element in dec_rows:
tk.Button(
self,
text=element[3],
fg="black",
command=lambda ele=element: controller.ticket_display(ele),
).pack(fill=tk.X)
eisen_display_button = ttk.Button(
self,
text="Display Selection",
command=lambda: controller.show_frame(EisenDisplay),
)
eisen_display_button.pack(side="bottom", fill=tk.X)
dlg_button = ttk.Button(
self, text="Eisen Delegate", command=lambda: controller.show_frame(DlgPage)
)
dlg_button.pack(side="bottom", fill=tk.X)
class DlgPage(tk.Frame):
def __init__(self, parent, controller, conn):
tk.Frame.__init__(self, parent)
label = tk.Label(self, text="Eisen's Delegate Page", font=text_font)
label.pack(padx=10, pady=10)
dlg_rows = dlg_cat(conn)
for element in dlg_rows:
tk.Button(
self,
text=element[3],
fg="black",
command=lambda ele=element: controller.ticket_display(ele),
).pack(fill=tk.X)
eisen_display_button = ttk.Button(
self,
text="Display Selection",
command=lambda: controller.show_frame(EisenDisplay),
)
eisen_display_button.pack(side="bottom", fill=tk.X)
del_button = ttk.Button(
self, text="Eisen Delete", command=lambda: controller.show_frame(DelPage)
)
del_button.pack(side="bottom", fill=tk.X)
class DelPage(tk.Frame):
def __init__(self, parent, controller, conn):
tk.Frame.__init__(self, parent)
label = tk.Label(self, text="Eisen's Delete Page", font=text_font)
label.pack(padx=10, pady=10)
del_rows = del_cat(conn)
for element in del_rows:
tk.Button(
self,
text=element[3],
fg="black",
command=lambda ele=element: controller.ticket_display(ele),
).pack(fill=tk.X)
eisen_display_button = ttk.Button(
self,
text="Display Selection",
command=lambda: controller.show_frame(EisenDisplay),
)
eisen_display_button.pack(side="bottom", fill=tk.X)
do_button = ttk.Button(
self, text="Eisen Do", command=lambda: controller.show_frame(DoPage)
)
do_button.pack(side="bottom", fill=tk.X)
if __name__ == "__main__":
connection = create_connection(r"D:\eisen-tickets\assets\tickets.db")
four_windows = windows(connection)
four_windows.mainloop()
| 30.989324 | 87 | 0.598071 |
import tkinter as tk
from tkinter import ttk
from modules.create_db_components import create_connection
from modules.removing_tickets import delete_ticket
__author__ = "Datta Adithya"
__credits__ = ["Datta Adithya"]
__license__ = "MIT"
__maintainer__ = "Datta Adithya"
__email__ = "dat.adithya@gmail.com"
text_font = ("Helvetica", 12)
def do_cat(conn):
cur = conn.cursor()
cur.execute('SELECT * FROM tickets WHERE category = "DO"')
conn.commit()
rows = cur.fetchall()
return rows
def dec_cat(conn):
cur = conn.cursor()
cur.execute('SELECT * FROM tickets WHERE category = "DEC"')
conn.commit()
rows = cur.fetchall()
return rows
def dlg_cat(conn):
cur = conn.cursor()
cur.execute('SELECT * FROM tickets WHERE category = "DLG"')
conn.commit()
rows = cur.fetchall()
return rows
def del_cat(conn):
cur = conn.cursor()
cur.execute('SELECT * FROM tickets WHERE category = "DEL"')
conn.commit()
rows = cur.fetchall()
return rows
class windows(tk.Tk):
def __init__(self, conn, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
self.wm_title("Eisen's Tickets")
self.iconbitmap(self, default="../../assets/logo.ico")
self.conn = conn
container = tk.Frame(self, height=400, width=600)
container.pack(side="top", fill="both", expand=True)
container.grid_rowconfigure(0, weight=1)
container.grid_columnconfigure(0, weight=1)
self.frames = {}
for F in (MainPage, EisenDisplay, DoPage, DecPage, DlgPage, DelPage):
frame = F(container, self, self.conn)
self.frames[F] = frame
frame.grid(row=0, column=0, sticky="nsew")
self.show_frame(MainPage)
def show_frame(self, cont):
frame = self.frames[cont]
frame.tkraise()
def ticket_display(self, ticket):
new_window = tk.Toplevel(self)
ticket_id = ticket[0]
timestamp = ticket[1]
category = ticket[2]
task = ticket[3]
more_info = ticket[4]
fields = ["Ticket ID", "Timestamp", "Category", "Task", "More Info"]
details = [ticket_id, timestamp, category, task, more_info]
r = 0
for field in fields:
tk.Label(new_window, text=field, relief=tk.RIDGE, width=15).grid(
row=r, column=0
)
tk.Label(new_window, text=details[r], relief=tk.SUNKEN, width=100).grid(
row=r, column=1
)
r += 1
tk.Button(
new_window,
relief=tk.RIDGE,
text="Delete Ticket",
background="#FF3333",
command=lambda: delete_ticket(self.conn, ticket_id),
).grid(row=r, column=0, columnspan=2, sticky="ew")
# Pages made for navigation through the different categories
class MainPage(tk.Frame):
def __init__(self, parent, controller, conn):
tk.Frame.__init__(self, parent)
self.conn = conn
label = tk.Label(self, text="Start Page", font=text_font)
label.pack(padx=10, pady=10)
eisen_display_button = ttk.Button(
self,
text="Display Selection",
command=lambda: controller.show_frame(EisenDisplay),
)
eisen_display_button.pack(side="bottom", fill=tk.X)
class EisenDisplay(tk.Frame):
def __init__(self, parent, controller, conn):
tk.Frame.__init__(self, parent)
self.conn = conn
label = tk.Label(self, text="Eisen Display", font=text_font)
label.pack(padx=10, pady=10)
main_button = ttk.Button(
self,
text="Return to main page",
command=lambda: controller.show_frame(MainPage),
)
main_button.pack(side="bottom", fill=tk.X)
del_button = ttk.Button(
self, text="Eisen Delete", command=lambda: controller.show_frame(DelPage)
)
del_button.pack(side="bottom", fill=tk.X)
dlg_button = ttk.Button(
self, text="Eisen Delegate", command=lambda: controller.show_frame(DlgPage)
)
dlg_button.pack(side="bottom", fill=tk.X)
dec_button = ttk.Button(
self, text="Eisen Decide", command=lambda: controller.show_frame(DecPage)
)
dec_button.pack(side="bottom", fill=tk.X)
do_button = ttk.Button(
self, text="Eisen Do", command=lambda: controller.show_frame(DoPage)
)
do_button.pack(side="bottom", fill=tk.X)
class DoPage(tk.Frame):
def __init__(self, parent, controller, conn):
tk.Frame.__init__(self, parent)
label = tk.Label(self, text="Eisen's Do Page", font=text_font)
label.pack(padx=10, pady=10)
do_rows = do_cat(conn)
for element in do_rows:
tk.Button(
self,
text=element[3],
fg="black",
command=lambda ele=element: controller.ticket_display(ele),
).pack(fill=tk.X)
eisen_display_button = ttk.Button(
self,
text="Display Selection",
command=lambda: controller.show_frame(EisenDisplay),
)
eisen_display_button.pack(side="bottom", fill=tk.X)
dec_button = ttk.Button(
self, text="Eisen Decide", command=lambda: controller.show_frame(DecPage)
)
dec_button.pack(side="bottom", fill=tk.X)
class DecPage(tk.Frame):
def __init__(self, parent, controller, conn):
tk.Frame.__init__(self, parent)
label = tk.Label(self, text="Eisen's Decide Page", font=text_font)
label.pack(padx=10, pady=10)
dec_rows = dec_cat(conn)
for element in dec_rows:
tk.Button(
self,
text=element[3],
fg="black",
command=lambda ele=element: controller.ticket_display(ele),
).pack(fill=tk.X)
eisen_display_button = ttk.Button(
self,
text="Display Selection",
command=lambda: controller.show_frame(EisenDisplay),
)
eisen_display_button.pack(side="bottom", fill=tk.X)
dlg_button = ttk.Button(
self, text="Eisen Delegate", command=lambda: controller.show_frame(DlgPage)
)
dlg_button.pack(side="bottom", fill=tk.X)
class DlgPage(tk.Frame):
def __init__(self, parent, controller, conn):
tk.Frame.__init__(self, parent)
label = tk.Label(self, text="Eisen's Delegate Page", font=text_font)
label.pack(padx=10, pady=10)
dlg_rows = dlg_cat(conn)
for element in dlg_rows:
tk.Button(
self,
text=element[3],
fg="black",
command=lambda ele=element: controller.ticket_display(ele),
).pack(fill=tk.X)
eisen_display_button = ttk.Button(
self,
text="Display Selection",
command=lambda: controller.show_frame(EisenDisplay),
)
eisen_display_button.pack(side="bottom", fill=tk.X)
del_button = ttk.Button(
self, text="Eisen Delete", command=lambda: controller.show_frame(DelPage)
)
del_button.pack(side="bottom", fill=tk.X)
class DelPage(tk.Frame):
def __init__(self, parent, controller, conn):
tk.Frame.__init__(self, parent)
label = tk.Label(self, text="Eisen's Delete Page", font=text_font)
label.pack(padx=10, pady=10)
del_rows = del_cat(conn)
for element in del_rows:
tk.Button(
self,
text=element[3],
fg="black",
command=lambda ele=element: controller.ticket_display(ele),
).pack(fill=tk.X)
eisen_display_button = ttk.Button(
self,
text="Display Selection",
command=lambda: controller.show_frame(EisenDisplay),
)
eisen_display_button.pack(side="bottom", fill=tk.X)
do_button = ttk.Button(
self, text="Eisen Do", command=lambda: controller.show_frame(DoPage)
)
do_button.pack(side="bottom", fill=tk.X)
if __name__ == "__main__":
connection = create_connection(r"D:\eisen-tickets\assets\tickets.db")
four_windows = windows(connection)
four_windows.mainloop()
| true | true |
f720055934c6413aad908cb155527edb52b40062 | 1,116 | py | Python | root/scripts/includes/python_logger.py | DragonCrafted87/docker-alpine-base | 033199c1d7d6d57271f16841b132469c78658dcf | [
"MIT"
] | null | null | null | root/scripts/includes/python_logger.py | DragonCrafted87/docker-alpine-base | 033199c1d7d6d57271f16841b132469c78658dcf | [
"MIT"
] | null | null | null | root/scripts/includes/python_logger.py | DragonCrafted87/docker-alpine-base | 033199c1d7d6d57271f16841b132469c78658dcf | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
from logging import DEBUG
from logging import INFO
from logging import Formatter
from logging import StreamHandler
from logging import getLogger
from sys import stderr
from sys import stdout
class LogLevelFilter:
def __init__(self, level):
self.__level = level
def filter(self, log_record):
return log_record.levelno == self.__level
def create_logger(name=None):
# create logger
log = getLogger(name)
log.setLevel(DEBUG)
# create formatter and add it to the handlers
log_format = Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
# create console handler with a higher log level
info_handler = StreamHandler(stdout)
info_handler.setLevel(INFO)
info_handler.setFormatter(log_format)
log.addHandler(info_handler)
# create console handler with a higher log level
debug_handler = StreamHandler(stderr)
debug_handler.setLevel(DEBUG)
debug_handler.setFormatter(log_format)
debug_handler.addFilter(LogLevelFilter(DEBUG))
log.addHandler(debug_handler)
return log
| 25.953488 | 82 | 0.728495 |
from logging import DEBUG
from logging import INFO
from logging import Formatter
from logging import StreamHandler
from logging import getLogger
from sys import stderr
from sys import stdout
class LogLevelFilter:
def __init__(self, level):
self.__level = level
def filter(self, log_record):
return log_record.levelno == self.__level
def create_logger(name=None):
log = getLogger(name)
log.setLevel(DEBUG)
log_format = Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
info_handler = StreamHandler(stdout)
info_handler.setLevel(INFO)
info_handler.setFormatter(log_format)
log.addHandler(info_handler)
debug_handler = StreamHandler(stderr)
debug_handler.setLevel(DEBUG)
debug_handler.setFormatter(log_format)
debug_handler.addFilter(LogLevelFilter(DEBUG))
log.addHandler(debug_handler)
return log
| true | true |
f720066904260a4d87d72e6f5790ddf77d44d217 | 101 | py | Python | codes_auto/1603.running-sum-of-1d-array.py | smartmark-pro/leetcode_record | 6504b733d892a705571eb4eac836fb10e94e56db | [
"MIT"
] | null | null | null | codes_auto/1603.running-sum-of-1d-array.py | smartmark-pro/leetcode_record | 6504b733d892a705571eb4eac836fb10e94e56db | [
"MIT"
] | null | null | null | codes_auto/1603.running-sum-of-1d-array.py | smartmark-pro/leetcode_record | 6504b733d892a705571eb4eac836fb10e94e56db | [
"MIT"
] | null | null | null | #
# @lc app=leetcode.cn id=1603 lang=python3
#
# [1603] running-sum-of-1d-array
#
None
# @lc code=end | 14.428571 | 42 | 0.673267 |
None
| true | true |
f72006ab8586955d57cb0232db8f0b952693aca0 | 1,522 | py | Python | setup.py | Rishk/alpha_vantage | 1cb28a98cb0c8526b85d163be96a37fd2d16ff95 | [
"MIT"
] | 1 | 2019-12-27T17:50:59.000Z | 2019-12-27T17:50:59.000Z | setup.py | Rishk/alpha_vantage | 1cb28a98cb0c8526b85d163be96a37fd2d16ff95 | [
"MIT"
] | null | null | null | setup.py | Rishk/alpha_vantage | 1cb28a98cb0c8526b85d163be96a37fd2d16ff95 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
try:
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
except IOError:
long_description = 'Python module to get stock data from the Alpha Vantage Api'
setup(
name='alpha_vantage',
version='2.0.0',
author='Romel J. Torres',
author_email='romel.torres@gmail.com',
license='MIT',
description='Python module to get stock data from the Alpha Vantage Api',
long_description=long_description,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Office/Business :: Financial :: Investment',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
url='https://github.com/RomelTorres/alpha_vantage',
install_requires=[
'requests',
'simplejson'
],
test_requires=[
'nose',
'requests_mock'
],
extras_requires={
'pandas': ['pandas'],
},
keywords=['stocks', 'market', 'finance', 'alpha_vantage', 'quotes',
'shares'],
packages=find_packages(
exclude=['helpers', 'test_alpha_vantage', 'images']),
package_data={
'alpha_vantage': [],
}
)
| 30.44 | 83 | 0.61498 | from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
try:
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
except IOError:
long_description = 'Python module to get stock data from the Alpha Vantage Api'
setup(
name='alpha_vantage',
version='2.0.0',
author='Romel J. Torres',
author_email='romel.torres@gmail.com',
license='MIT',
description='Python module to get stock data from the Alpha Vantage Api',
long_description=long_description,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Office/Business :: Financial :: Investment',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
url='https://github.com/RomelTorres/alpha_vantage',
install_requires=[
'requests',
'simplejson'
],
test_requires=[
'nose',
'requests_mock'
],
extras_requires={
'pandas': ['pandas'],
},
keywords=['stocks', 'market', 'finance', 'alpha_vantage', 'quotes',
'shares'],
packages=find_packages(
exclude=['helpers', 'test_alpha_vantage', 'images']),
package_data={
'alpha_vantage': [],
}
)
| true | true |
f720075710121fe0930a9588a836db8847406e4b | 18,959 | py | Python | tools/rnahmm/algbioi/core/analysis16s.py | fplaza/CAMISIM | 4f2ab5e94773a355210568be946e732df7437cb6 | [
"Apache-2.0"
] | 88 | 2017-11-20T01:30:40.000Z | 2022-03-31T07:29:25.000Z | tools/rnahmm/algbioi/core/analysis16s.py | fplaza/CAMISIM | 4f2ab5e94773a355210568be946e732df7437cb6 | [
"Apache-2.0"
] | 121 | 2017-11-15T09:03:38.000Z | 2022-03-24T09:50:59.000Z | tools/rnahmm/algbioi/core/analysis16s.py | fplaza/CAMISIM | 4f2ab5e94773a355210568be946e732df7437cb6 | [
"Apache-2.0"
] | 35 | 2017-12-14T08:19:40.000Z | 2022-03-03T11:26:46.000Z | #!/usr/bin/env python
"""
Copyright (C) 2014 Ivan Gregor
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Note that we could have written some parts of this code in a nicer way,
but didn't have time. Be careful when reusing the source code.
"""
import os
import sys
import re
import subprocess
from Bio import SeqIO
from algbioi.com import common, csv
from scripts.configparserwrapper import ConfigParserWrapper
class RRNA16S():
"""
A class that handels the rRNA 16S analysis.
"""
def __init__(self, config, s16Database, workingDir):
assert isinstance(config, ConfigParserWrapper)
self._config = config
self._workingDir = workingDir
# self._refDir = os.path.normpath(s16Database)
# self._refDict = csv.getMappingTuple(os.path.join(self._refDir, 'content.csv'), (0,1), (2,), '\t')
def runHMM(self, inputFastaFile, outLog=None, hmmer=3, moltypes="ssu", kingdoms="arc,bac"):
"""
Run the hidden markov model to get regions in the input sequences where the 16S and 23S genes are located.
"""
if hmmer == 2:
self.runHMM_2(inputFastaFile, outLog, moltypes=moltypes, kingdoms=kingdoms)
elif hmmer == 3:
self.runHMM_3(inputFastaFile, outLog, moltypes=moltypes, kingdoms=kingdoms)
def runHMM_2(self, inputFastaFile, outLog=None, moltypes="ssu", kingdoms="arc,bac"):
hmmInstallDir = self._config.get_value("MarkerGeneExtraction", 'rnaHmmInstallDir', is_path=True)
rnammer_executable = self._config.get_value("MarkerGeneExtraction", 'rnammer', is_path=True)
assert isinstance(hmmInstallDir, basestring)
assert isinstance(rnammer_executable, basestring)
out_file_name_prefix = os.path.join(self._workingDir, os.path.basename(inputFastaFile))
hmmer_args = [
"-i '{}'".format(inputFastaFile),
"-o '{}'".format(out_file_name_prefix),
"-r '{}'".format(rnammer_executable),
"-m '{}'".format(moltypes),
"-k '{}'".format(kingdoms),
"-T '{}'".format(self._workingDir)
]
cmd = "{script} {args}".format(
script=os.path.join(hmmInstallDir, 'rna_hmm2.py'),
args=" ".join(hmmer_args),
)
if os.name != 'posix':
print 'Cannot run HMM since your system is not "posix" but', str('"' + os.name + '"'), '\n', cmd
return
if outLog is not None:
stdoutLog = open(outLog, 'a')
else:
stdoutLog = subprocess.PIPE # stdout=subprocess.STDOUT
hmmProc = subprocess.Popen(cmd, shell=True, bufsize=-1, cwd=hmmInstallDir, stdout=stdoutLog)
print 'run cmd:', cmd
hmmProc.wait()
if outLog is not None:
stdoutLog.close()
print 'HMM return code:', hmmProc.returncode
if hmmProc.returncode != 0:
raise Exception("Command returned with non-zero %s status: %s" % (hmmProc.returncode, cmd))
def runHMM_3(self, inputFastaFile, outLog=None, moltypes="ssu", kingdoms="arc,bac"):
hmmInstallDir = self._config.get_value("MarkerGeneExtraction", 'rnaHmmInstallDir', is_path=True)
hmmerBinDir = self._config.get_value("MarkerGeneExtraction", 'hmmerBinDir', is_path=True)
assert isinstance(hmmInstallDir, basestring)
assert isinstance(hmmerBinDir, basestring)
regionsFile = os.path.join(self._workingDir, str(os.path.basename(inputFastaFile) + '.gff'))
hmmer_args = [
"-i '{}'".format(inputFastaFile),
"-o '{}'".format(regionsFile),
"-r '{}'".format(os.path.join(hmmerBinDir, "hmmsearch")),
"-m '{}'".format(moltypes),
"-k '{}'".format(kingdoms)
]
cmd = "{wrapper} {args}".format(
hmmerBinDir=hmmerBinDir,
wrapper=os.path.join(hmmInstallDir, 'rna_hmm3.py'),
args=" ".join(hmmer_args)
)
if os.name != 'posix':
print 'Cannot run HMM since your system is not "posix" but', str('"' + os.name + '"'), '\n', cmd
return
if outLog is not None:
stdoutLog = open(outLog, 'a')
else:
stdoutLog = subprocess.PIPE # stdout=subprocess.STDOUT
hmmProc = subprocess.Popen(cmd, shell=True, bufsize=-1, cwd=hmmInstallDir, stdout=stdoutLog)
print 'run cmd:', cmd
hmmProc.wait()
if outLog is not None:
stdoutLog.close()
print 'HMM return code:', hmmProc.returncode
if hmmProc.returncode != 0:
raise Exception("Command returned with non-zero %s status: %s" % (hmmProc.returncode, cmd))
handle = open(inputFastaFile, "rU")
record_dict = SeqIO.to_dict(SeqIO.parse(handle, "fasta"))
handle.close()
# trunkoutputfilename = inputFastaFile.split( "/" )[-1]
trunkoutputfilename = os.path.join(self._workingDir, os.path.basename(inputFastaFile))
# parse results file line by line
for line in open(regionsFile, "rU"):
if line.startswith("#"):
continue
line = line.split()
ident = line[0]
start = int(line[3])
stop = int(line[4])
strand = line[6]
gene = line[8]
seq = record_dict[ident].seq
if strand == "+":
subseq = seq[start-1:stop]
elif strand == "-":
subseq = seq[start-1:stop].reverse_complement()
else:
sys.stderr.write(" analysis16s: invalid strand symbol")
exit(1)
outfile = open(trunkoutputfilename + "." + gene + ".fna", "a")
print >> outfile, ">%s_%i_%i_%s" % (ident, start, stop, strand)
print >> outfile, subseq
outfile.close()
def classify16S(self, inputFastaFile, outLog=None):
"""
Run mothur to classify the sequences.
"""
try:
self._classify(16, inputFastaFile, outLog)
except Exception:
print('Mothur was not able to classify 16S sequences.')
def classify23S(self, inputFastaFile, outLog=None):
try:
self._classify(23, inputFastaFile, outLog)
except Exception:
print('Mothur was not able to classify 23S sequences.')
def classify5S(self, inputFastaFile, outLog=None):
try:
self._classify(5, inputFastaFile, outLog)
except Exception:
print('Mothur was not able to classify 5S sequences.')
def _classify(self, mode, inputFastaFile, outLog=None):
mothur = os.path.join(os.path.normpath(self._config.get('mothurInstallDir')), 'mothur')
if mode == 16:
extractedRegionsFasta = os.path.join(self._workingDir, str(os.path.basename(inputFastaFile) + '.16S_rRNA.fna'))
taxonomyFile = os.path.join(self._refDir, os.path.normpath(self._refDict[('16S_rRNA','taxonomyDNA')][0]))
templateFile = os.path.join(self._refDir, os.path.normpath(self._refDict[('16S_rRNA','templateDNA')][0]))
#mothurPredFileName = str(extractedRegionsFasta[0:extractedRegionsFasta.rindex('.')] + '.taxonomy')
mothurPredFileName = common.getMothurOutputFilePath(extractedRegionsFasta, taxonomyFile)
predFileName = os.path.join(self._workingDir, str(os.path.basename(inputFastaFile) + '.16P'))
#extractedRegionsFasta = str(inputFastaFile + '.16S_rRNA.fna')
#templateFile = os.path.normpath(self._configRRNA16S.get('mothurClassifyParam16STemplate'))
#taxonomyFile = os.path.normpath(self._configRRNA16S.get('mothurClassifyParam16STaxonomy'))
#mothurPredFileName = str(inputFastaFile + '.16S_rRNA.bacteria+archaea.taxonomy')
#mothurPredFileName = os.path.join(self._workingDir, str(os.path.basename(inputFastaFile) + '.16S_rRNA.bacteria+archaea.taxonomy'))
#mothurPredFileName = os.path.join(self._workingDir, str(os.path.basename(inputFastaFile) + '.16S_rRNA.fasta.taxonomy'))
#predFileName = str(inputFastaFile + '.16P')
elif mode == 23:
extractedRegionsFasta = os.path.join(self._workingDir, str(os.path.basename(inputFastaFile) + '.23S_rRNA.fna'))
taxonomyFile = os.path.join(self._refDir, os.path.normpath(self._refDict[('23S_rRNA','taxonomyDNA')][0]))
templateFile = os.path.join(self._refDir, os.path.normpath(self._refDict[('23S_rRNA','templateDNA')][0]))
#mothurPredFileName = str(extractedRegionsFasta[0:extractedRegionsFasta.rindex('.')] + '.taxonomy')
mothurPredFileName = common.getMothurOutputFilePath(extractedRegionsFasta, taxonomyFile)
predFileName = os.path.join(self._workingDir, str(os.path.basename(inputFastaFile) + '.23P'))
#extractedRegionsFasta = str(inputFastaFile + '.23S_rRNA.fna')
#templateFile = os.path.normpath(self._configRRNA16S.get('mothurClassifyParam23STemplate'))
#taxonomyFile = os.path.normpath(self._configRRNA16S.get('mothurClassifyParam23STaxonomy'))
#mothurPredFileName = str(inputFastaFile + '.23S_rRNA.bacteria+archaea.taxonomy')
#mothurPredFileName = os.path.join(self._workingDir, str(os.path.basename(inputFastaFile) + '.23S_rRNA.bacteria+archaea.taxonomy'))
#mothurPredFileName = os.path.join(self._workingDir, str(os.path.basename(inputFastaFile) + '.23S_rRNA.fasta.taxonomy'))
#predFileName = str(inputFastaFile + '.23P')
elif mode == 5:
#extractedRegionsFasta = str(inputFastaFile + '.5S_rRNA.fna')
extractedRegionsFasta = os.path.join(self._workingDir, str(os.path.basename(inputFastaFile) + '.5S_rRNA.fna'))
taxonomyFile = os.path.join(self._refDir, os.path.normpath(self._refDict[('5S_rRNA','taxonomyDNA')][0]))
templateFile = os.path.join(self._refDir, os.path.normpath(self._refDict[('5S_rRNA','templateDNA')][0]))
mothurPredFileName = common.getMothurOutputFilePath(extractedRegionsFasta, taxonomyFile)
predFileName = os.path.join(self._workingDir, str(os.path.basename(inputFastaFile) + '.5P'))
#templateFile = os.path.normpath(self._configRRNA16S.get('mothurClassifyParam5STemplate'))
#taxonomyFile = os.path.normpath(self._configRRNA16S.get('mothurClassifyParam5STaxonomy'))
#mothurPredFileName = os.path.join(self._workingDir,
# str(os.path.basename(inputFastaFile) + '.5S_rRNA.' + os.path.basename(taxonomyFile) + 'onomy'))#.taxonomy
#predFileName = str(inputFastaFile + '.5P')
else:
raise Exception('Wrong branch')
if not os.path.isfile(mothurPredFileName):
mothurPredFileName = common.getMothurOutputFilePath(extractedRegionsFasta, taxonomyFile, suffix='.bayesian.taxonomy')
param = self._config.get('mothurClassifyParamOther')
cmd = str('time ' + mothur + ' "#classify.seqs(fasta=' + extractedRegionsFasta + ', template=' + templateFile
+ ', taxonomy=' + taxonomyFile + ', ' + param + ')"')
if os.name == 'posix':
if outLog is not None:
stdoutLog = open(outLog, 'w')
else:
stdoutLog = subprocess.STDOUT
mothurProc = subprocess.Popen(cmd, shell=True, bufsize=-1, cwd=self._workingDir, stdout=stdoutLog)
print 'run cmd:', cmd
mothurProc.wait()
if outLog is not None:
stdoutLog.close()
print 'mothur return code:', mothurProc.returncode
if mothurProc.returncode != 0:
raise Exception("Command returned with non-zero %s status: %s" % (mothurProc.returncode, cmd))
else:
print 'Cannot run mothur since your system is not "posix" but', str('"' + os.name + '"'), '\n', cmd
#transform mothur prediction files to the tab separated files
self.mothurPredToTabSepPred(mothurPredFileName, predFileName)
def mothurPredToTabSepPred(self, mothurPredFileName, outPredFileName):
"""
Transforms the mothur output prediction file (*.taxonomy) to the tab separated prediction file seqName tab ncbid tab weight.
"""
try:
fr = open(os.path.normpath(mothurPredFileName),'r')
except Exception:
sys.stderr.write("Cannot open file:" + mothurPredFileName + '\n')
raise
else:
try:
fw = open(os.path.normpath(outPredFileName), 'w')
lineCount = 0
for line in fr:
line = common.noNewLine(line)
try:
if re.match(r'^[0-9]+_[0-9]+_[0-9]+_[0-9]+.*', line):
name = re.sub(r'([0-9]+_[0-9]+)_[0-9]+_[0-9]+_[\+\-\t ]+.*', r'\1' , line)
tag = re.sub(r'[0-9]+_[0-9]+_([0-9]+_[0-9]+_[\+\-]+)[\t ]+.*', r'\1' , line)
placementList = re.sub(r'[0-9]+_[0-9]+_[0-9]+_[0-9]+_[\+\-\t ]+(.*)', r'\1' , line.replace('unclassified;', '')).rsplit(';')
if len(placementList) < 2:
continue
placement = placementList[-2]
try:
clade = int(re.sub('([0-9]+)\(.*', r'\1' , placement))
except ValueError:
continue
weight = float(re.sub('[0-9]+\(([0-9\.]+)\)', r'\1' , placement))
lineCount += 1
if lineCount == 1:
fw.write(name + '\t' + str(clade) + '\t' + str(weight) + '\t' + str(tag))
else:
fw.write('\n' + name + '\t' + str(clade) + '\t' + str(weight) + '\t' + str(tag))
except Exception:
sys.stderr.write('Cannot parse line: ' + str(lineCount) + 'in file: ' + mothurPredFileName + '\n')
raise
except Exception:
sys.stderr.write("Cannot write to file:" + outPredFileName + '\n')
raise
finally:
fw.close()
fr.close()
#
def setCandidatePlacementFrom16S23S5S(self, sequences, taxonomy, inputFastaFile):
#set assigned by the 16S countList[0]
#set assigned by the 23S countList[1]
#intersection = both countList[3]
#predFileName16S = str(inputFastaFile + '.16P')
predFileName16S = os.path.join(self._workingDir, str(os.path.basename(inputFastaFile) + '.16P'))
#predFileName23S = str(inputFastaFile + '.23P')
predFileName23S = os.path.join(self._workingDir, str(os.path.basename(inputFastaFile) + '.23P'))
#predFileName5S = str(inputFastaFile + '.5P')
predFileName5S = os.path.join(self._workingDir, str(os.path.basename(inputFastaFile) + '.5P'))
try:
seqIdSet16S = self._setCandidatePlacement(sequences, taxonomy, predFileName16S, '16S_rRNA')
except Exception:
seqIdSet16S = set()
print("Can't set candidate placement from 16S classification.")
try:
seqIdSet23S = self._setCandidatePlacement(sequences, taxonomy, predFileName23S, '23S_rRNA')
except Exception:
seqIdSet23S = set()
print("Can't set candidate placement from 23S sequences.")
try:
seqIdSet5S = self._setCandidatePlacement(sequences, taxonomy, predFileName5S, '5S_rRNA')
except Exception:
seqIdSet5S = set()
print("Can't set candidate placement from 5S sequences.")
intersectSet = seqIdSet16S | seqIdSet23S | seqIdSet5S
return [len(seqIdSet16S),len(seqIdSet23S),len(seqIdSet5S),len(intersectSet)]
#
def _setCandidatePlacement(self, sequences, taxonomy, predFileName, source):
assignedIdList = []
try:
f = open(os.path.normpath(predFileName),'r')
except Exception:
print "Cannot open file:", predFileName
raise
else:
for line in f:
line = common.noNewLine(line)
if re.match(r'^[0-9]+_[0-9]+\t[0-9]+\t[0-9\.]+\t[^\t]+$', line):
scaffoldId = int(re.sub(r'^([0-9]+)_[0-9]+\t[0-9]+\t[0-9\.]+\t[^\t]+$',r'\1' ,line))
contigId = int(re.sub(r'^[0-9]+_([0-9]+)\t[0-9]+\t[0-9\.]+\t[^\t]+$',r'\1' ,line))
ncbid = int(re.sub(r'^[0-9]+_[0-9]+\t([0-9]+)\t[0-9\.]+\t[^\t]+$',r'\1' ,line))
weight = float(re.sub(r'^[0-9]+_[0-9]+\t[0-9]+\t([0-9\.]+)\t[^\t]+$',r'\1' ,line))
tag = str(re.sub(r'^[0-9]+_[0-9]+\t[0-9]+\t[0-9\.]+\t([^\t]+)$',r'\1' ,line))
if ncbid != 1:
taxPathDict = taxonomy.getPathToRoot(ncbid)
if taxPathDict is not None and taxPathDict.keys() >= 1:
sequences.setCandidateTaxonomyPath(contigId, scaffoldId, taxPathDict, weight, source, tag)
assignedIdList.append(contigId)
else:
sys.stderr.write(str('No taxonomic path found for ncbid: ' + str(ncbid)))
finally:
f.close()
return set(assignedIdList)
if __name__ == "__main__":
#print '16S analysis'
#line = '125_528_- 2(100);unclassified;unclassified;unclassified;unclassified;unclassified;unclassified;unclassified;'
line = '125_528_- 2157(100);28890(95.3333);183925(95.3333);2158(95.3333);2159(95.3333);2172(95.3333);unclassified;unclassified;'
try:
name = re.sub('([0-9]+_[0-9]+)_[\+\-\t ]+.*', r'\1' , line)
placement = re.sub('[0-9]+_[0-9]+_[\+\-\t ]+(.*)', r'\1' , line.replace('unclassified;', '')).rsplit(';')[-2]
clade = int(re.sub('([0-9]+)\(.*', r'\1' , placement))
weight = float(re.sub('[0-9]+\(([0-9\.]+)\)', r'\1' , placement))
except Exception:
print sys.stderr.write('Cannot parse line in file x' + '\n')
print name
print placement
print clade
print weight
| 50.156085 | 152 | 0.588586 |
"""
Copyright (C) 2014 Ivan Gregor
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Note that we could have written some parts of this code in a nicer way,
but didn't have time. Be careful when reusing the source code.
"""
import os
import sys
import re
import subprocess
from Bio import SeqIO
from algbioi.com import common, csv
from scripts.configparserwrapper import ConfigParserWrapper
class RRNA16S():
"""
A class that handels the rRNA 16S analysis.
"""
def __init__(self, config, s16Database, workingDir):
assert isinstance(config, ConfigParserWrapper)
self._config = config
self._workingDir = workingDir
# self._refDir = os.path.normpath(s16Database)
# self._refDict = csv.getMappingTuple(os.path.join(self._refDir, 'content.csv'), (0,1), (2,), '\t')
def runHMM(self, inputFastaFile, outLog=None, hmmer=3, moltypes="ssu", kingdoms="arc,bac"):
"""
Run the hidden markov model to get regions in the input sequences where the 16S and 23S genes are located.
"""
if hmmer == 2:
self.runHMM_2(inputFastaFile, outLog, moltypes=moltypes, kingdoms=kingdoms)
elif hmmer == 3:
self.runHMM_3(inputFastaFile, outLog, moltypes=moltypes, kingdoms=kingdoms)
def runHMM_2(self, inputFastaFile, outLog=None, moltypes="ssu", kingdoms="arc,bac"):
hmmInstallDir = self._config.get_value("MarkerGeneExtraction", 'rnaHmmInstallDir', is_path=True)
rnammer_executable = self._config.get_value("MarkerGeneExtraction", 'rnammer', is_path=True)
assert isinstance(hmmInstallDir, basestring)
assert isinstance(rnammer_executable, basestring)
out_file_name_prefix = os.path.join(self._workingDir, os.path.basename(inputFastaFile))
hmmer_args = [
"-i '{}'".format(inputFastaFile),
"-o '{}'".format(out_file_name_prefix),
"-r '{}'".format(rnammer_executable),
"-m '{}'".format(moltypes),
"-k '{}'".format(kingdoms),
"-T '{}'".format(self._workingDir)
]
cmd = "{script} {args}".format(
script=os.path.join(hmmInstallDir, 'rna_hmm2.py'),
args=" ".join(hmmer_args),
)
if os.name != 'posix':
print 'Cannot run HMM since your system is not "posix" but', str('"' + os.name + '"'), '\n', cmd
return
if outLog is not None:
stdoutLog = open(outLog, 'a')
else:
stdoutLog = subprocess.PIPE # stdout=subprocess.STDOUT
hmmProc = subprocess.Popen(cmd, shell=True, bufsize=-1, cwd=hmmInstallDir, stdout=stdoutLog)
print 'run cmd:', cmd
hmmProc.wait()
if outLog is not None:
stdoutLog.close()
print 'HMM return code:', hmmProc.returncode
if hmmProc.returncode != 0:
raise Exception("Command returned with non-zero %s status: %s" % (hmmProc.returncode, cmd))
def runHMM_3(self, inputFastaFile, outLog=None, moltypes="ssu", kingdoms="arc,bac"):
hmmInstallDir = self._config.get_value("MarkerGeneExtraction", 'rnaHmmInstallDir', is_path=True)
hmmerBinDir = self._config.get_value("MarkerGeneExtraction", 'hmmerBinDir', is_path=True)
assert isinstance(hmmInstallDir, basestring)
assert isinstance(hmmerBinDir, basestring)
regionsFile = os.path.join(self._workingDir, str(os.path.basename(inputFastaFile) + '.gff'))
hmmer_args = [
"-i '{}'".format(inputFastaFile),
"-o '{}'".format(regionsFile),
"-r '{}'".format(os.path.join(hmmerBinDir, "hmmsearch")),
"-m '{}'".format(moltypes),
"-k '{}'".format(kingdoms)
]
cmd = "{wrapper} {args}".format(
hmmerBinDir=hmmerBinDir,
wrapper=os.path.join(hmmInstallDir, 'rna_hmm3.py'),
args=" ".join(hmmer_args)
)
if os.name != 'posix':
print 'Cannot run HMM since your system is not "posix" but', str('"' + os.name + '"'), '\n', cmd
return
if outLog is not None:
stdoutLog = open(outLog, 'a')
else:
stdoutLog = subprocess.PIPE # stdout=subprocess.STDOUT
hmmProc = subprocess.Popen(cmd, shell=True, bufsize=-1, cwd=hmmInstallDir, stdout=stdoutLog)
print 'run cmd:', cmd
hmmProc.wait()
if outLog is not None:
stdoutLog.close()
print 'HMM return code:', hmmProc.returncode
if hmmProc.returncode != 0:
raise Exception("Command returned with non-zero %s status: %s" % (hmmProc.returncode, cmd))
handle = open(inputFastaFile, "rU")
record_dict = SeqIO.to_dict(SeqIO.parse(handle, "fasta"))
handle.close()
# trunkoutputfilename = inputFastaFile.split( "/" )[-1]
trunkoutputfilename = os.path.join(self._workingDir, os.path.basename(inputFastaFile))
# parse results file line by line
for line in open(regionsFile, "rU"):
if line.startswith("#"):
continue
line = line.split()
ident = line[0]
start = int(line[3])
stop = int(line[4])
strand = line[6]
gene = line[8]
seq = record_dict[ident].seq
if strand == "+":
subseq = seq[start-1:stop]
elif strand == "-":
subseq = seq[start-1:stop].reverse_complement()
else:
sys.stderr.write(" analysis16s: invalid strand symbol")
exit(1)
outfile = open(trunkoutputfilename + "." + gene + ".fna", "a")
print >> outfile, ">%s_%i_%i_%s" % (ident, start, stop, strand)
print >> outfile, subseq
outfile.close()
def classify16S(self, inputFastaFile, outLog=None):
"""
Run mothur to classify the sequences.
"""
try:
self._classify(16, inputFastaFile, outLog)
except Exception:
print('Mothur was not able to classify 16S sequences.')
def classify23S(self, inputFastaFile, outLog=None):
try:
self._classify(23, inputFastaFile, outLog)
except Exception:
print('Mothur was not able to classify 23S sequences.')
def classify5S(self, inputFastaFile, outLog=None):
try:
self._classify(5, inputFastaFile, outLog)
except Exception:
print('Mothur was not able to classify 5S sequences.')
def _classify(self, mode, inputFastaFile, outLog=None):
mothur = os.path.join(os.path.normpath(self._config.get('mothurInstallDir')), 'mothur')
if mode == 16:
extractedRegionsFasta = os.path.join(self._workingDir, str(os.path.basename(inputFastaFile) + '.16S_rRNA.fna'))
taxonomyFile = os.path.join(self._refDir, os.path.normpath(self._refDict[('16S_rRNA','taxonomyDNA')][0]))
templateFile = os.path.join(self._refDir, os.path.normpath(self._refDict[('16S_rRNA','templateDNA')][0]))
#mothurPredFileName = str(extractedRegionsFasta[0:extractedRegionsFasta.rindex('.')] + '.taxonomy')
mothurPredFileName = common.getMothurOutputFilePath(extractedRegionsFasta, taxonomyFile)
predFileName = os.path.join(self._workingDir, str(os.path.basename(inputFastaFile) + '.16P'))
#extractedRegionsFasta = str(inputFastaFile + '.16S_rRNA.fna')
#templateFile = os.path.normpath(self._configRRNA16S.get('mothurClassifyParam16STemplate'))
#taxonomyFile = os.path.normpath(self._configRRNA16S.get('mothurClassifyParam16STaxonomy'))
#mothurPredFileName = str(inputFastaFile + '.16S_rRNA.bacteria+archaea.taxonomy')
#mothurPredFileName = os.path.join(self._workingDir, str(os.path.basename(inputFastaFile) + '.16S_rRNA.bacteria+archaea.taxonomy'))
#mothurPredFileName = os.path.join(self._workingDir, str(os.path.basename(inputFastaFile) + '.16S_rRNA.fasta.taxonomy'))
#predFileName = str(inputFastaFile + '.16P')
elif mode == 23:
extractedRegionsFasta = os.path.join(self._workingDir, str(os.path.basename(inputFastaFile) + '.23S_rRNA.fna'))
taxonomyFile = os.path.join(self._refDir, os.path.normpath(self._refDict[('23S_rRNA','taxonomyDNA')][0]))
templateFile = os.path.join(self._refDir, os.path.normpath(self._refDict[('23S_rRNA','templateDNA')][0]))
#mothurPredFileName = str(extractedRegionsFasta[0:extractedRegionsFasta.rindex('.')] + '.taxonomy')
mothurPredFileName = common.getMothurOutputFilePath(extractedRegionsFasta, taxonomyFile)
predFileName = os.path.join(self._workingDir, str(os.path.basename(inputFastaFile) + '.23P'))
#extractedRegionsFasta = str(inputFastaFile + '.23S_rRNA.fna')
#templateFile = os.path.normpath(self._configRRNA16S.get('mothurClassifyParam23STemplate'))
#taxonomyFile = os.path.normpath(self._configRRNA16S.get('mothurClassifyParam23STaxonomy'))
#mothurPredFileName = str(inputFastaFile + '.23S_rRNA.bacteria+archaea.taxonomy')
#mothurPredFileName = os.path.join(self._workingDir, str(os.path.basename(inputFastaFile) + '.23S_rRNA.bacteria+archaea.taxonomy'))
#mothurPredFileName = os.path.join(self._workingDir, str(os.path.basename(inputFastaFile) + '.23S_rRNA.fasta.taxonomy'))
#predFileName = str(inputFastaFile + '.23P')
elif mode == 5:
#extractedRegionsFasta = str(inputFastaFile + '.5S_rRNA.fna')
extractedRegionsFasta = os.path.join(self._workingDir, str(os.path.basename(inputFastaFile) + '.5S_rRNA.fna'))
taxonomyFile = os.path.join(self._refDir, os.path.normpath(self._refDict[('5S_rRNA','taxonomyDNA')][0]))
templateFile = os.path.join(self._refDir, os.path.normpath(self._refDict[('5S_rRNA','templateDNA')][0]))
mothurPredFileName = common.getMothurOutputFilePath(extractedRegionsFasta, taxonomyFile)
predFileName = os.path.join(self._workingDir, str(os.path.basename(inputFastaFile) + '.5P'))
#templateFile = os.path.normpath(self._configRRNA16S.get('mothurClassifyParam5STemplate'))
#taxonomyFile = os.path.normpath(self._configRRNA16S.get('mothurClassifyParam5STaxonomy'))
#mothurPredFileName = os.path.join(self._workingDir,
# str(os.path.basename(inputFastaFile) + '.5S_rRNA.' + os.path.basename(taxonomyFile) + 'onomy'))#.taxonomy
#predFileName = str(inputFastaFile + '.5P')
else:
raise Exception('Wrong branch')
if not os.path.isfile(mothurPredFileName):
mothurPredFileName = common.getMothurOutputFilePath(extractedRegionsFasta, taxonomyFile, suffix='.bayesian.taxonomy')
param = self._config.get('mothurClassifyParamOther')
cmd = str('time ' + mothur + ' "#classify.seqs(fasta=' + extractedRegionsFasta + ', template=' + templateFile
+ ', taxonomy=' + taxonomyFile + ', ' + param + ')"')
if os.name == 'posix':
if outLog is not None:
stdoutLog = open(outLog, 'w')
else:
stdoutLog = subprocess.STDOUT
mothurProc = subprocess.Popen(cmd, shell=True, bufsize=-1, cwd=self._workingDir, stdout=stdoutLog)
print 'run cmd:', cmd
mothurProc.wait()
if outLog is not None:
stdoutLog.close()
print 'mothur return code:', mothurProc.returncode
if mothurProc.returncode != 0:
raise Exception("Command returned with non-zero %s status: %s" % (mothurProc.returncode, cmd))
else:
print 'Cannot run mothur since your system is not "posix" but', str('"' + os.name + '"'), '\n', cmd
#transform mothur prediction files to the tab separated files
self.mothurPredToTabSepPred(mothurPredFileName, predFileName)
def mothurPredToTabSepPred(self, mothurPredFileName, outPredFileName):
"""
Transforms the mothur output prediction file (*.taxonomy) to the tab separated prediction file seqName tab ncbid tab weight.
"""
try:
fr = open(os.path.normpath(mothurPredFileName),'r')
except Exception:
sys.stderr.write("Cannot open file:" + mothurPredFileName + '\n')
raise
else:
try:
fw = open(os.path.normpath(outPredFileName), 'w')
lineCount = 0
for line in fr:
line = common.noNewLine(line)
try:
if re.match(r'^[0-9]+_[0-9]+_[0-9]+_[0-9]+.*', line):
name = re.sub(r'([0-9]+_[0-9]+)_[0-9]+_[0-9]+_[\+\-\t ]+.*', r'\1' , line)
tag = re.sub(r'[0-9]+_[0-9]+_([0-9]+_[0-9]+_[\+\-]+)[\t ]+.*', r'\1' , line)
placementList = re.sub(r'[0-9]+_[0-9]+_[0-9]+_[0-9]+_[\+\-\t ]+(.*)', r'\1' , line.replace('unclassified;', '')).rsplit(';')
if len(placementList) < 2:
continue
placement = placementList[-2]
try:
clade = int(re.sub('([0-9]+)\(.*', r'\1' , placement))
except ValueError:
continue
weight = float(re.sub('[0-9]+\(([0-9\.]+)\)', r'\1' , placement))
lineCount += 1
if lineCount == 1:
fw.write(name + '\t' + str(clade) + '\t' + str(weight) + '\t' + str(tag))
else:
fw.write('\n' + name + '\t' + str(clade) + '\t' + str(weight) + '\t' + str(tag))
except Exception:
sys.stderr.write('Cannot parse line: ' + str(lineCount) + 'in file: ' + mothurPredFileName + '\n')
raise
except Exception:
sys.stderr.write("Cannot write to file:" + outPredFileName + '\n')
raise
finally:
fw.close()
fr.close()
#
def setCandidatePlacementFrom16S23S5S(self, sequences, taxonomy, inputFastaFile):
#set assigned by the 16S countList[0]
#set assigned by the 23S countList[1]
#intersection = both countList[3]
#predFileName16S = str(inputFastaFile + '.16P')
predFileName16S = os.path.join(self._workingDir, str(os.path.basename(inputFastaFile) + '.16P'))
#predFileName23S = str(inputFastaFile + '.23P')
predFileName23S = os.path.join(self._workingDir, str(os.path.basename(inputFastaFile) + '.23P'))
#predFileName5S = str(inputFastaFile + '.5P')
predFileName5S = os.path.join(self._workingDir, str(os.path.basename(inputFastaFile) + '.5P'))
try:
seqIdSet16S = self._setCandidatePlacement(sequences, taxonomy, predFileName16S, '16S_rRNA')
except Exception:
seqIdSet16S = set()
print("Can't set candidate placement from 16S classification.")
try:
seqIdSet23S = self._setCandidatePlacement(sequences, taxonomy, predFileName23S, '23S_rRNA')
except Exception:
seqIdSet23S = set()
print("Can't set candidate placement from 23S sequences.")
try:
seqIdSet5S = self._setCandidatePlacement(sequences, taxonomy, predFileName5S, '5S_rRNA')
except Exception:
seqIdSet5S = set()
print("Can't set candidate placement from 5S sequences.")
intersectSet = seqIdSet16S | seqIdSet23S | seqIdSet5S
return [len(seqIdSet16S),len(seqIdSet23S),len(seqIdSet5S),len(intersectSet)]
def _setCandidatePlacement(self, sequences, taxonomy, predFileName, source):
assignedIdList = []
try:
f = open(os.path.normpath(predFileName),'r')
except Exception:
print "Cannot open file:", predFileName
raise
else:
for line in f:
line = common.noNewLine(line)
if re.match(r'^[0-9]+_[0-9]+\t[0-9]+\t[0-9\.]+\t[^\t]+$', line):
scaffoldId = int(re.sub(r'^([0-9]+)_[0-9]+\t[0-9]+\t[0-9\.]+\t[^\t]+$',r'\1' ,line))
contigId = int(re.sub(r'^[0-9]+_([0-9]+)\t[0-9]+\t[0-9\.]+\t[^\t]+$',r'\1' ,line))
ncbid = int(re.sub(r'^[0-9]+_[0-9]+\t([0-9]+)\t[0-9\.]+\t[^\t]+$',r'\1' ,line))
weight = float(re.sub(r'^[0-9]+_[0-9]+\t[0-9]+\t([0-9\.]+)\t[^\t]+$',r'\1' ,line))
tag = str(re.sub(r'^[0-9]+_[0-9]+\t[0-9]+\t[0-9\.]+\t([^\t]+)$',r'\1' ,line))
if ncbid != 1:
taxPathDict = taxonomy.getPathToRoot(ncbid)
if taxPathDict is not None and taxPathDict.keys() >= 1:
sequences.setCandidateTaxonomyPath(contigId, scaffoldId, taxPathDict, weight, source, tag)
assignedIdList.append(contigId)
else:
sys.stderr.write(str('No taxonomic path found for ncbid: ' + str(ncbid)))
finally:
f.close()
return set(assignedIdList)
if __name__ == "__main__":
line = '125_528_- 2157(100);28890(95.3333);183925(95.3333);2158(95.3333);2159(95.3333);2172(95.3333);unclassified;unclassified;'
try:
name = re.sub('([0-9]+_[0-9]+)_[\+\-\t ]+.*', r'\1' , line)
placement = re.sub('[0-9]+_[0-9]+_[\+\-\t ]+(.*)', r'\1' , line.replace('unclassified;', '')).rsplit(';')[-2]
clade = int(re.sub('([0-9]+)\(.*', r'\1' , placement))
weight = float(re.sub('[0-9]+\(([0-9\.]+)\)', r'\1' , placement))
except Exception:
print sys.stderr.write('Cannot parse line in file x' + '\n')
print name
print placement
print clade
print weight
| false | true |
f72007805c2d3ca886d768516835db709c0dc08b | 1,117 | py | Python | python files/area_calcs.py | dhbesson/abc_visualization | b024bf551e0e331e3f7bd9d63dbe1437a3c25aa7 | [
"MIT"
] | null | null | null | python files/area_calcs.py | dhbesson/abc_visualization | b024bf551e0e331e3f7bd9d63dbe1437a3c25aa7 | [
"MIT"
] | null | null | null | python files/area_calcs.py | dhbesson/abc_visualization | b024bf551e0e331e3f7bd9d63dbe1437a3c25aa7 | [
"MIT"
] | null | null | null | import requests, csv, sys, os, time, json, codecs
server = "https://cloudrf.com"
# dir = "calculations/antennas_1W_2m"
# Open CSV file
import codecs
# csvfile = csv.reader(codecs.open('antennas.csv', 'rU', 'utf-16'))
uid = 'YOUR CLOUDRF UID HERE'
key = 'YOUR CLOUDRF KEY HERE'
def calc_area(dir,csvfile_loc):
n = 0
csvfile = csv.DictReader(open(csvfile_loc))
if not os.path.exists(dir):
os.makedirs(dir)
for row in csvfile:
# Pause script. Important otherwise server will ban you.
time.sleep(1)
start_time = time.time() # Stopwatch start
# print row
r = requests.post(server + "/API/area", data=row)
print(r.text)
# try:
j = json.loads(r.text)
r = requests.get(j['kmz'])
fn = dir + os.sep + str(row['nam']) + ".kmz"
file = open(fn, "wb")
file.write(r.content)
file.close()
print("Saved to %s" % fn)
elapsed = round(time.time() - start_time, 1) # Stopwatch
print("Elapsed: " + str(elapsed) + "s")
n = n + 1 | 27.243902 | 68 | 0.554163 | import requests, csv, sys, os, time, json, codecs
server = "https://cloudrf.com"
import codecs
uid = 'YOUR CLOUDRF UID HERE'
key = 'YOUR CLOUDRF KEY HERE'
def calc_area(dir,csvfile_loc):
n = 0
csvfile = csv.DictReader(open(csvfile_loc))
if not os.path.exists(dir):
os.makedirs(dir)
for row in csvfile:
time.sleep(1)
start_time = time.time()
r = requests.post(server + "/API/area", data=row)
print(r.text)
j = json.loads(r.text)
r = requests.get(j['kmz'])
fn = dir + os.sep + str(row['nam']) + ".kmz"
file = open(fn, "wb")
file.write(r.content)
file.close()
print("Saved to %s" % fn)
elapsed = round(time.time() - start_time, 1)
print("Elapsed: " + str(elapsed) + "s")
n = n + 1 | true | true |
f72008a164fc940a1cd12de39700a25410e41ad5 | 3,485 | py | Python | src/the_tale/the_tale/game/bills/models.py | al-arz/the-tale | 542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5 | [
"BSD-3-Clause"
] | null | null | null | src/the_tale/the_tale/game/bills/models.py | al-arz/the-tale | 542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5 | [
"BSD-3-Clause"
] | null | null | null | src/the_tale/the_tale/game/bills/models.py | al-arz/the-tale | 542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5 | [
"BSD-3-Clause"
] | null | null | null |
import smart_imports
smart_imports.all()
class Bill(django_models.Model):
CAPTION_MIN_LENGTH = 6
CAPTION_MAX_LENGTH = 256
created_at = django_models.DateTimeField(auto_now_add=True, null=False)
updated_at = django_models.DateTimeField(auto_now_add=True, null=False) # MUST setupped by hand
voting_end_at = django_models.DateTimeField(null=True, blank=True)
created_at_turn = django_models.IntegerField(null=False)
applyed_at_turn = django_models.IntegerField(null=True, blank=True)
ended_at = django_models.DateTimeField(null=True, blank=True)
owner = django_models.ForeignKey('accounts.Account', null=True, related_name='+', on_delete=django_models.SET_NULL)
caption = django_models.CharField(max_length=CAPTION_MAX_LENGTH)
type = rels_django.RelationIntegerField(relation=relations.BILL_TYPE, db_index=True)
state = rels_django.RelationIntegerField(relation=relations.BILL_STATE, db_index=True)
approved_by_moderator = django_models.BooleanField(default=False, db_index=True)
remove_initiator = django_models.ForeignKey('accounts.Account', null=True, blank=True, related_name='+', on_delete=django_models.SET_NULL)
technical_data = django_models.TextField(null=False, blank=True, default={})
chronicle_on_accepted = django_models.TextField(null=False, blank=True, default='')
# we should not remove bill when ocasionally remove forum thread
forum_thread = django_models.ForeignKey(forum_models.Thread, null=True, blank=True, related_name='+', on_delete=django_models.SET_NULL)
votes_for = django_models.IntegerField(default=0)
votes_against = django_models.IntegerField(default=0)
votes_refrained = django_models.IntegerField(default=0)
# fields to store config values after processing state (since they can be changed in future)
min_votes_percents_required = django_models.FloatField(default=0.0)
is_declined = django_models.BooleanField(blank=True, default=False)
declined_by = django_models.ForeignKey('bills.Bill', null=True, default=None, related_name='+', blank=True, on_delete=django_models.SET_NULL)
depends_on = django_models.ForeignKey('bills.Bill', null=True, default=None, related_name='+', blank=True, on_delete=django_models.SET_NULL)
def __str__(self):
return '{}-{}'.format(self.id, self.caption)
class Meta:
permissions = (("moderate_bill", "Может администрировать записи в Книге Судеб"), )
class Actor(django_models.Model):
# ATTENTION: if you want to make building an actor, remember, that after it recreated
# (for same person after destroying previouse building)
# it first fully removed from base (previouse building) and only then created
created_at = django_models.DateTimeField(auto_now_add=True, null=False)
bill = django_models.ForeignKey(Bill, null=False, on_delete=django_models.CASCADE)
place = django_models.ForeignKey('places.Place', null=True, related_name='+', on_delete=django_models.CASCADE)
class Vote(django_models.Model):
created_at = django_models.DateTimeField(auto_now_add=True, null=False)
owner = django_models.ForeignKey('accounts.Account', null=True, related_name='+', on_delete=django_models.SET_NULL)
bill = django_models.ForeignKey(Bill, null=False, on_delete=django_models.CASCADE)
type = rels_django.RelationIntegerField(relation=relations.VOTE_TYPE, db_index=True)
class Meta:
unique_together = (('owner', 'bill'),)
| 42.5 | 145 | 0.766428 |
import smart_imports
smart_imports.all()
class Bill(django_models.Model):
CAPTION_MIN_LENGTH = 6
CAPTION_MAX_LENGTH = 256
created_at = django_models.DateTimeField(auto_now_add=True, null=False)
updated_at = django_models.DateTimeField(auto_now_add=True, null=False)
voting_end_at = django_models.DateTimeField(null=True, blank=True)
created_at_turn = django_models.IntegerField(null=False)
applyed_at_turn = django_models.IntegerField(null=True, blank=True)
ended_at = django_models.DateTimeField(null=True, blank=True)
owner = django_models.ForeignKey('accounts.Account', null=True, related_name='+', on_delete=django_models.SET_NULL)
caption = django_models.CharField(max_length=CAPTION_MAX_LENGTH)
type = rels_django.RelationIntegerField(relation=relations.BILL_TYPE, db_index=True)
state = rels_django.RelationIntegerField(relation=relations.BILL_STATE, db_index=True)
approved_by_moderator = django_models.BooleanField(default=False, db_index=True)
remove_initiator = django_models.ForeignKey('accounts.Account', null=True, blank=True, related_name='+', on_delete=django_models.SET_NULL)
technical_data = django_models.TextField(null=False, blank=True, default={})
chronicle_on_accepted = django_models.TextField(null=False, blank=True, default='')
forum_thread = django_models.ForeignKey(forum_models.Thread, null=True, blank=True, related_name='+', on_delete=django_models.SET_NULL)
votes_for = django_models.IntegerField(default=0)
votes_against = django_models.IntegerField(default=0)
votes_refrained = django_models.IntegerField(default=0)
min_votes_percents_required = django_models.FloatField(default=0.0)
is_declined = django_models.BooleanField(blank=True, default=False)
declined_by = django_models.ForeignKey('bills.Bill', null=True, default=None, related_name='+', blank=True, on_delete=django_models.SET_NULL)
depends_on = django_models.ForeignKey('bills.Bill', null=True, default=None, related_name='+', blank=True, on_delete=django_models.SET_NULL)
def __str__(self):
return '{}-{}'.format(self.id, self.caption)
class Meta:
permissions = (("moderate_bill", "Может администрировать записи в Книге Судеб"), )
class Actor(django_models.Model):
created_at = django_models.DateTimeField(auto_now_add=True, null=False)
bill = django_models.ForeignKey(Bill, null=False, on_delete=django_models.CASCADE)
place = django_models.ForeignKey('places.Place', null=True, related_name='+', on_delete=django_models.CASCADE)
class Vote(django_models.Model):
created_at = django_models.DateTimeField(auto_now_add=True, null=False)
owner = django_models.ForeignKey('accounts.Account', null=True, related_name='+', on_delete=django_models.SET_NULL)
bill = django_models.ForeignKey(Bill, null=False, on_delete=django_models.CASCADE)
type = rels_django.RelationIntegerField(relation=relations.VOTE_TYPE, db_index=True)
class Meta:
unique_together = (('owner', 'bill'),)
| true | true |
f72008f42f54ea078b631fc42689eec8279d667b | 486 | py | Python | April 2021/Furthest Building You Can Reach.py | parikshitgupta1/leetcode | eba6c11740dc7597204af127c0f4c2163376294f | [
"MIT"
] | null | null | null | April 2021/Furthest Building You Can Reach.py | parikshitgupta1/leetcode | eba6c11740dc7597204af127c0f4c2163376294f | [
"MIT"
] | null | null | null | April 2021/Furthest Building You Can Reach.py | parikshitgupta1/leetcode | eba6c11740dc7597204af127c0f4c2163376294f | [
"MIT"
] | null | null | null | class Solution:
def furthestBuilding(self, H, bricks, ladders):
jumps_pq = []
for i in range(len(H) - 1):
jump_height = H[i + 1] - H[i]
if jump_height <= 0: continue
heappush(jumps_pq, jump_height)
if len(jumps_pq) > ladders:
bricks -= heappop(jumps_pq)
if(bricks < 0) : return i
return len(H) - 1
a = Solution()
print(a.furthestBuilding([4,12,2,7,3,18,20,3,19], 10, 2))
| 28.588235 | 57 | 0.522634 | class Solution:
def furthestBuilding(self, H, bricks, ladders):
jumps_pq = []
for i in range(len(H) - 1):
jump_height = H[i + 1] - H[i]
if jump_height <= 0: continue
heappush(jumps_pq, jump_height)
if len(jumps_pq) > ladders:
bricks -= heappop(jumps_pq)
if(bricks < 0) : return i
return len(H) - 1
a = Solution()
print(a.furthestBuilding([4,12,2,7,3,18,20,3,19], 10, 2))
| true | true |
f72008f569b6e13c90d5063ad392e029122f1919 | 228 | py | Python | Itertools/itertools.product.py | AndreasGeiger/hackerrank-python | a436c207e62b32f70a6b4279bb641a3c4d90e112 | [
"MIT"
] | null | null | null | Itertools/itertools.product.py | AndreasGeiger/hackerrank-python | a436c207e62b32f70a6b4279bb641a3c4d90e112 | [
"MIT"
] | null | null | null | Itertools/itertools.product.py | AndreasGeiger/hackerrank-python | a436c207e62b32f70a6b4279bb641a3c4d90e112 | [
"MIT"
] | null | null | null | from itertools import product
listA = list(map(int, input().split()))
listB = list(map(int, input().split()))
productLists = list(product(listA, listB))
for i in range(len(productLists)):
print(productLists[i], end=" ")
| 20.727273 | 42 | 0.684211 | from itertools import product
listA = list(map(int, input().split()))
listB = list(map(int, input().split()))
productLists = list(product(listA, listB))
for i in range(len(productLists)):
print(productLists[i], end=" ")
| true | true |
f7200945eace3e7c67af32832e8436d62e73a7ee | 3,102 | py | Python | nativepython/type_wrappers/range_wrapper.py | szymonlipinski/nativepython | 5f0bcc709b99a43681488f2753eccc2ac37a0334 | [
"Apache-2.0"
] | null | null | null | nativepython/type_wrappers/range_wrapper.py | szymonlipinski/nativepython | 5f0bcc709b99a43681488f2753eccc2ac37a0334 | [
"Apache-2.0"
] | null | null | null | nativepython/type_wrappers/range_wrapper.py | szymonlipinski/nativepython | 5f0bcc709b99a43681488f2753eccc2ac37a0334 | [
"Apache-2.0"
] | null | null | null | # Coyright 2017-2019 Nativepython Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nativepython.type_wrappers.wrapper import Wrapper
import nativepython.native_ast as native_ast
class RangeWrapper(Wrapper):
is_pod = True
is_empty = False
is_pass_by_ref = False
def __init__(self):
super().__init__((range, "type"))
def getNativeLayoutType(self):
return native_ast.Type.Void()
def convert_call(self, context, expr, args, kwargs):
if len(args) == 1 and not kwargs:
arg = args[0].toInt64()
if not arg:
return None
return context.pushPod(
_RangeInstanceWrapper,
arg.nonref_expr
)
return super().convert_call(context, expr, args, kwargs)
class RangeInstanceWrapper(Wrapper):
is_pod = True
is_empty = False
is_pass_by_ref = False
def __init__(self):
super().__init__((range, "instance"))
def getNativeLayoutType(self):
return native_ast.Int64
def convert_method_call(self, context, expr, methodname, args, kwargs):
if methodname == "__iter__" and not args and not kwargs:
return context.push(
_RangeIteratorWrapper,
lambda instance:
instance.expr.ElementPtrIntegers(0, 0).store(-1) >>
instance.expr.ElementPtrIntegers(0, 1).store(expr.nonref_expr)
)
return super().convert_method_call(context, expr, methodname, args, kwargs)
class RangeIteratorWrapper(Wrapper):
is_pod = True
is_empty = False
is_pass_by_ref = True
def __init__(self):
super().__init__((range, "iterator"))
def getNativeLayoutType(self):
return native_ast.Type.Struct(
element_types=(("count", native_ast.Int64), ("len", native_ast.Int64)),
name="range_storage"
)
def convert_next(self, context, expr):
context.pushEffect(
expr.expr.ElementPtrIntegers(0, 0).store(
expr.expr.ElementPtrIntegers(0, 0).load().add(1)
)
)
canContinue = context.pushPod(
bool,
expr.expr.ElementPtrIntegers(0, 0).load().lt(
expr.expr.ElementPtrIntegers(0, 1).load()
)
)
nextExpr = context.pushReference(int, expr.expr.ElementPtrIntegers(0, 0))
return nextExpr, canContinue
_RangeWrapper = RangeWrapper()
_RangeInstanceWrapper = RangeInstanceWrapper()
_RangeIteratorWrapper = RangeIteratorWrapper()
| 31.333333 | 83 | 0.640554 |
from nativepython.type_wrappers.wrapper import Wrapper
import nativepython.native_ast as native_ast
class RangeWrapper(Wrapper):
is_pod = True
is_empty = False
is_pass_by_ref = False
def __init__(self):
super().__init__((range, "type"))
def getNativeLayoutType(self):
return native_ast.Type.Void()
def convert_call(self, context, expr, args, kwargs):
if len(args) == 1 and not kwargs:
arg = args[0].toInt64()
if not arg:
return None
return context.pushPod(
_RangeInstanceWrapper,
arg.nonref_expr
)
return super().convert_call(context, expr, args, kwargs)
class RangeInstanceWrapper(Wrapper):
is_pod = True
is_empty = False
is_pass_by_ref = False
def __init__(self):
super().__init__((range, "instance"))
def getNativeLayoutType(self):
return native_ast.Int64
def convert_method_call(self, context, expr, methodname, args, kwargs):
if methodname == "__iter__" and not args and not kwargs:
return context.push(
_RangeIteratorWrapper,
lambda instance:
instance.expr.ElementPtrIntegers(0, 0).store(-1) >>
instance.expr.ElementPtrIntegers(0, 1).store(expr.nonref_expr)
)
return super().convert_method_call(context, expr, methodname, args, kwargs)
class RangeIteratorWrapper(Wrapper):
is_pod = True
is_empty = False
is_pass_by_ref = True
def __init__(self):
super().__init__((range, "iterator"))
def getNativeLayoutType(self):
return native_ast.Type.Struct(
element_types=(("count", native_ast.Int64), ("len", native_ast.Int64)),
name="range_storage"
)
def convert_next(self, context, expr):
context.pushEffect(
expr.expr.ElementPtrIntegers(0, 0).store(
expr.expr.ElementPtrIntegers(0, 0).load().add(1)
)
)
canContinue = context.pushPod(
bool,
expr.expr.ElementPtrIntegers(0, 0).load().lt(
expr.expr.ElementPtrIntegers(0, 1).load()
)
)
nextExpr = context.pushReference(int, expr.expr.ElementPtrIntegers(0, 0))
return nextExpr, canContinue
_RangeWrapper = RangeWrapper()
_RangeInstanceWrapper = RangeInstanceWrapper()
_RangeIteratorWrapper = RangeIteratorWrapper()
| true | true |
f72009f2a950749e199cacf800fa7cbce9a95e33 | 1,856 | py | Python | ml-agents-envs/mlagents_envs/registry/base_registry_entry.py | bobcy2015/ml-agents | 5d02292ad889f1884fa98bd92f127f17cbfe0112 | [
"Apache-2.0"
] | 13,653 | 2017-09-19T15:56:02.000Z | 2022-03-31T18:55:07.000Z | ml-agents-envs/mlagents_envs/registry/base_registry_entry.py | bobcy2015/ml-agents | 5d02292ad889f1884fa98bd92f127f17cbfe0112 | [
"Apache-2.0"
] | 3,623 | 2017-09-20T02:50:20.000Z | 2022-03-31T06:37:25.000Z | ml-agents-envs/mlagents_envs/registry/base_registry_entry.py | bobcy2015/ml-agents | 5d02292ad889f1884fa98bd92f127f17cbfe0112 | [
"Apache-2.0"
] | 4,130 | 2017-09-19T17:36:34.000Z | 2022-03-31T12:54:55.000Z | from abc import abstractmethod
from typing import Any, Optional
from mlagents_envs.base_env import BaseEnv
class BaseRegistryEntry:
def __init__(
self,
identifier: str,
expected_reward: Optional[float],
description: Optional[str],
):
"""
BaseRegistryEntry allows launching a Unity Environment with its make method.
:param identifier: The name of the Unity Environment.
:param expected_reward: The cumulative reward that an Agent must receive
for the task to be considered solved.
:param description: A description of the Unity Environment. Contains human
readable information about potential special arguments that the make method can
take as well as information regarding the observation, reward, actions,
behaviors and number of agents in the Environment.
"""
self._identifier = identifier
self._expected_reward = expected_reward
self._description = description
@property
def identifier(self) -> str:
"""
The unique identifier of the entry
"""
return self._identifier
@property
def expected_reward(self) -> Optional[float]:
"""
The cumulative reward that an Agent must receive for the task to be considered
solved.
"""
return self._expected_reward
@property
def description(self) -> Optional[str]:
"""
A description of the Unity Environment the entry can make.
"""
return self._description
@abstractmethod
def make(self, **kwargs: Any) -> BaseEnv:
"""
This method creates a Unity BaseEnv (usually a UnityEnvironment).
"""
raise NotImplementedError(
f"The make() method not implemented for entry {self.identifier}"
)
| 32.561404 | 87 | 0.649246 | from abc import abstractmethod
from typing import Any, Optional
from mlagents_envs.base_env import BaseEnv
class BaseRegistryEntry:
def __init__(
self,
identifier: str,
expected_reward: Optional[float],
description: Optional[str],
):
self._identifier = identifier
self._expected_reward = expected_reward
self._description = description
@property
def identifier(self) -> str:
return self._identifier
@property
def expected_reward(self) -> Optional[float]:
return self._expected_reward
@property
def description(self) -> Optional[str]:
return self._description
@abstractmethod
def make(self, **kwargs: Any) -> BaseEnv:
raise NotImplementedError(
f"The make() method not implemented for entry {self.identifier}"
)
| true | true |
f7200a2703afba3d344293f747d73f0e8c66c472 | 14,266 | py | Python | sdk/python/pulumi_azure_native/storagesync/v20180701/get_registered_server.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/storagesync/v20180701/get_registered_server.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/storagesync/v20180701/get_registered_server.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetRegisteredServerResult',
'AwaitableGetRegisteredServerResult',
'get_registered_server',
]
@pulumi.output_type
class GetRegisteredServerResult:
"""
Registered Server resource.
"""
def __init__(__self__, agent_version=None, cluster_id=None, cluster_name=None, discovery_endpoint_uri=None, friendly_name=None, id=None, last_heart_beat=None, last_operation_name=None, last_workflow_id=None, management_endpoint_uri=None, monitoring_configuration=None, name=None, provisioning_state=None, resource_location=None, server_certificate=None, server_id=None, server_managementt_error_code=None, server_os_version=None, server_role=None, service_location=None, storage_sync_service_uid=None, type=None):
if agent_version and not isinstance(agent_version, str):
raise TypeError("Expected argument 'agent_version' to be a str")
pulumi.set(__self__, "agent_version", agent_version)
if cluster_id and not isinstance(cluster_id, str):
raise TypeError("Expected argument 'cluster_id' to be a str")
pulumi.set(__self__, "cluster_id", cluster_id)
if cluster_name and not isinstance(cluster_name, str):
raise TypeError("Expected argument 'cluster_name' to be a str")
pulumi.set(__self__, "cluster_name", cluster_name)
if discovery_endpoint_uri and not isinstance(discovery_endpoint_uri, str):
raise TypeError("Expected argument 'discovery_endpoint_uri' to be a str")
pulumi.set(__self__, "discovery_endpoint_uri", discovery_endpoint_uri)
if friendly_name and not isinstance(friendly_name, str):
raise TypeError("Expected argument 'friendly_name' to be a str")
pulumi.set(__self__, "friendly_name", friendly_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if last_heart_beat and not isinstance(last_heart_beat, str):
raise TypeError("Expected argument 'last_heart_beat' to be a str")
pulumi.set(__self__, "last_heart_beat", last_heart_beat)
if last_operation_name and not isinstance(last_operation_name, str):
raise TypeError("Expected argument 'last_operation_name' to be a str")
pulumi.set(__self__, "last_operation_name", last_operation_name)
if last_workflow_id and not isinstance(last_workflow_id, str):
raise TypeError("Expected argument 'last_workflow_id' to be a str")
pulumi.set(__self__, "last_workflow_id", last_workflow_id)
if management_endpoint_uri and not isinstance(management_endpoint_uri, str):
raise TypeError("Expected argument 'management_endpoint_uri' to be a str")
pulumi.set(__self__, "management_endpoint_uri", management_endpoint_uri)
if monitoring_configuration and not isinstance(monitoring_configuration, str):
raise TypeError("Expected argument 'monitoring_configuration' to be a str")
pulumi.set(__self__, "monitoring_configuration", monitoring_configuration)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_location and not isinstance(resource_location, str):
raise TypeError("Expected argument 'resource_location' to be a str")
pulumi.set(__self__, "resource_location", resource_location)
if server_certificate and not isinstance(server_certificate, str):
raise TypeError("Expected argument 'server_certificate' to be a str")
pulumi.set(__self__, "server_certificate", server_certificate)
if server_id and not isinstance(server_id, str):
raise TypeError("Expected argument 'server_id' to be a str")
pulumi.set(__self__, "server_id", server_id)
if server_managementt_error_code and not isinstance(server_managementt_error_code, int):
raise TypeError("Expected argument 'server_managementt_error_code' to be a int")
pulumi.set(__self__, "server_managementt_error_code", server_managementt_error_code)
if server_os_version and not isinstance(server_os_version, str):
raise TypeError("Expected argument 'server_os_version' to be a str")
pulumi.set(__self__, "server_os_version", server_os_version)
if server_role and not isinstance(server_role, str):
raise TypeError("Expected argument 'server_role' to be a str")
pulumi.set(__self__, "server_role", server_role)
if service_location and not isinstance(service_location, str):
raise TypeError("Expected argument 'service_location' to be a str")
pulumi.set(__self__, "service_location", service_location)
if storage_sync_service_uid and not isinstance(storage_sync_service_uid, str):
raise TypeError("Expected argument 'storage_sync_service_uid' to be a str")
pulumi.set(__self__, "storage_sync_service_uid", storage_sync_service_uid)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="agentVersion")
def agent_version(self) -> Optional[str]:
"""
Registered Server Agent Version
"""
return pulumi.get(self, "agent_version")
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> Optional[str]:
"""
Registered Server clusterId
"""
return pulumi.get(self, "cluster_id")
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> Optional[str]:
"""
Registered Server clusterName
"""
return pulumi.get(self, "cluster_name")
@property
@pulumi.getter(name="discoveryEndpointUri")
def discovery_endpoint_uri(self) -> Optional[str]:
"""
Resource discoveryEndpointUri
"""
return pulumi.get(self, "discovery_endpoint_uri")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[str]:
"""
Friendly Name
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lastHeartBeat")
def last_heart_beat(self) -> Optional[str]:
"""
Registered Server last heart beat
"""
return pulumi.get(self, "last_heart_beat")
@property
@pulumi.getter(name="lastOperationName")
def last_operation_name(self) -> Optional[str]:
"""
Resource Last Operation Name
"""
return pulumi.get(self, "last_operation_name")
@property
@pulumi.getter(name="lastWorkflowId")
def last_workflow_id(self) -> Optional[str]:
"""
Registered Server lastWorkflowId
"""
return pulumi.get(self, "last_workflow_id")
@property
@pulumi.getter(name="managementEndpointUri")
def management_endpoint_uri(self) -> Optional[str]:
"""
Management Endpoint Uri
"""
return pulumi.get(self, "management_endpoint_uri")
@property
@pulumi.getter(name="monitoringConfiguration")
def monitoring_configuration(self) -> Optional[str]:
"""
Monitoring Configuration
"""
return pulumi.get(self, "monitoring_configuration")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Registered Server Provisioning State
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceLocation")
def resource_location(self) -> Optional[str]:
"""
Resource Location
"""
return pulumi.get(self, "resource_location")
@property
@pulumi.getter(name="serverCertificate")
def server_certificate(self) -> Optional[str]:
"""
Registered Server Certificate
"""
return pulumi.get(self, "server_certificate")
@property
@pulumi.getter(name="serverId")
def server_id(self) -> Optional[str]:
"""
Registered Server serverId
"""
return pulumi.get(self, "server_id")
@property
@pulumi.getter(name="serverManagementtErrorCode")
def server_managementt_error_code(self) -> Optional[int]:
"""
Registered Server Management Error Code
"""
return pulumi.get(self, "server_managementt_error_code")
@property
@pulumi.getter(name="serverOSVersion")
def server_os_version(self) -> Optional[str]:
"""
Registered Server OS Version
"""
return pulumi.get(self, "server_os_version")
@property
@pulumi.getter(name="serverRole")
def server_role(self) -> Optional[str]:
"""
Registered Server serverRole
"""
return pulumi.get(self, "server_role")
@property
@pulumi.getter(name="serviceLocation")
def service_location(self) -> Optional[str]:
"""
Service Location
"""
return pulumi.get(self, "service_location")
@property
@pulumi.getter(name="storageSyncServiceUid")
def storage_sync_service_uid(self) -> Optional[str]:
"""
Registered Server storageSyncServiceUid
"""
return pulumi.get(self, "storage_sync_service_uid")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetRegisteredServerResult(GetRegisteredServerResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetRegisteredServerResult(
agent_version=self.agent_version,
cluster_id=self.cluster_id,
cluster_name=self.cluster_name,
discovery_endpoint_uri=self.discovery_endpoint_uri,
friendly_name=self.friendly_name,
id=self.id,
last_heart_beat=self.last_heart_beat,
last_operation_name=self.last_operation_name,
last_workflow_id=self.last_workflow_id,
management_endpoint_uri=self.management_endpoint_uri,
monitoring_configuration=self.monitoring_configuration,
name=self.name,
provisioning_state=self.provisioning_state,
resource_location=self.resource_location,
server_certificate=self.server_certificate,
server_id=self.server_id,
server_managementt_error_code=self.server_managementt_error_code,
server_os_version=self.server_os_version,
server_role=self.server_role,
service_location=self.service_location,
storage_sync_service_uid=self.storage_sync_service_uid,
type=self.type)
def get_registered_server(resource_group_name: Optional[str] = None,
server_id: Optional[str] = None,
storage_sync_service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRegisteredServerResult:
"""
Registered Server resource.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str server_id: GUID identifying the on-premises server.
:param str storage_sync_service_name: Name of Storage Sync Service resource.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['serverId'] = server_id
__args__['storageSyncServiceName'] = storage_sync_service_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:storagesync/v20180701:getRegisteredServer', __args__, opts=opts, typ=GetRegisteredServerResult).value
return AwaitableGetRegisteredServerResult(
agent_version=__ret__.agent_version,
cluster_id=__ret__.cluster_id,
cluster_name=__ret__.cluster_name,
discovery_endpoint_uri=__ret__.discovery_endpoint_uri,
friendly_name=__ret__.friendly_name,
id=__ret__.id,
last_heart_beat=__ret__.last_heart_beat,
last_operation_name=__ret__.last_operation_name,
last_workflow_id=__ret__.last_workflow_id,
management_endpoint_uri=__ret__.management_endpoint_uri,
monitoring_configuration=__ret__.monitoring_configuration,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
resource_location=__ret__.resource_location,
server_certificate=__ret__.server_certificate,
server_id=__ret__.server_id,
server_managementt_error_code=__ret__.server_managementt_error_code,
server_os_version=__ret__.server_os_version,
server_role=__ret__.server_role,
service_location=__ret__.service_location,
storage_sync_service_uid=__ret__.storage_sync_service_uid,
type=__ret__.type)
| 41.71345 | 517 | 0.683233 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetRegisteredServerResult',
'AwaitableGetRegisteredServerResult',
'get_registered_server',
]
@pulumi.output_type
class GetRegisteredServerResult:
def __init__(__self__, agent_version=None, cluster_id=None, cluster_name=None, discovery_endpoint_uri=None, friendly_name=None, id=None, last_heart_beat=None, last_operation_name=None, last_workflow_id=None, management_endpoint_uri=None, monitoring_configuration=None, name=None, provisioning_state=None, resource_location=None, server_certificate=None, server_id=None, server_managementt_error_code=None, server_os_version=None, server_role=None, service_location=None, storage_sync_service_uid=None, type=None):
if agent_version and not isinstance(agent_version, str):
raise TypeError("Expected argument 'agent_version' to be a str")
pulumi.set(__self__, "agent_version", agent_version)
if cluster_id and not isinstance(cluster_id, str):
raise TypeError("Expected argument 'cluster_id' to be a str")
pulumi.set(__self__, "cluster_id", cluster_id)
if cluster_name and not isinstance(cluster_name, str):
raise TypeError("Expected argument 'cluster_name' to be a str")
pulumi.set(__self__, "cluster_name", cluster_name)
if discovery_endpoint_uri and not isinstance(discovery_endpoint_uri, str):
raise TypeError("Expected argument 'discovery_endpoint_uri' to be a str")
pulumi.set(__self__, "discovery_endpoint_uri", discovery_endpoint_uri)
if friendly_name and not isinstance(friendly_name, str):
raise TypeError("Expected argument 'friendly_name' to be a str")
pulumi.set(__self__, "friendly_name", friendly_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if last_heart_beat and not isinstance(last_heart_beat, str):
raise TypeError("Expected argument 'last_heart_beat' to be a str")
pulumi.set(__self__, "last_heart_beat", last_heart_beat)
if last_operation_name and not isinstance(last_operation_name, str):
raise TypeError("Expected argument 'last_operation_name' to be a str")
pulumi.set(__self__, "last_operation_name", last_operation_name)
if last_workflow_id and not isinstance(last_workflow_id, str):
raise TypeError("Expected argument 'last_workflow_id' to be a str")
pulumi.set(__self__, "last_workflow_id", last_workflow_id)
if management_endpoint_uri and not isinstance(management_endpoint_uri, str):
raise TypeError("Expected argument 'management_endpoint_uri' to be a str")
pulumi.set(__self__, "management_endpoint_uri", management_endpoint_uri)
if monitoring_configuration and not isinstance(monitoring_configuration, str):
raise TypeError("Expected argument 'monitoring_configuration' to be a str")
pulumi.set(__self__, "monitoring_configuration", monitoring_configuration)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_location and not isinstance(resource_location, str):
raise TypeError("Expected argument 'resource_location' to be a str")
pulumi.set(__self__, "resource_location", resource_location)
if server_certificate and not isinstance(server_certificate, str):
raise TypeError("Expected argument 'server_certificate' to be a str")
pulumi.set(__self__, "server_certificate", server_certificate)
if server_id and not isinstance(server_id, str):
raise TypeError("Expected argument 'server_id' to be a str")
pulumi.set(__self__, "server_id", server_id)
if server_managementt_error_code and not isinstance(server_managementt_error_code, int):
raise TypeError("Expected argument 'server_managementt_error_code' to be a int")
pulumi.set(__self__, "server_managementt_error_code", server_managementt_error_code)
if server_os_version and not isinstance(server_os_version, str):
raise TypeError("Expected argument 'server_os_version' to be a str")
pulumi.set(__self__, "server_os_version", server_os_version)
if server_role and not isinstance(server_role, str):
raise TypeError("Expected argument 'server_role' to be a str")
pulumi.set(__self__, "server_role", server_role)
if service_location and not isinstance(service_location, str):
raise TypeError("Expected argument 'service_location' to be a str")
pulumi.set(__self__, "service_location", service_location)
if storage_sync_service_uid and not isinstance(storage_sync_service_uid, str):
raise TypeError("Expected argument 'storage_sync_service_uid' to be a str")
pulumi.set(__self__, "storage_sync_service_uid", storage_sync_service_uid)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="agentVersion")
def agent_version(self) -> Optional[str]:
return pulumi.get(self, "agent_version")
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> Optional[str]:
return pulumi.get(self, "cluster_id")
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> Optional[str]:
return pulumi.get(self, "cluster_name")
@property
@pulumi.getter(name="discoveryEndpointUri")
def discovery_endpoint_uri(self) -> Optional[str]:
return pulumi.get(self, "discovery_endpoint_uri")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[str]:
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lastHeartBeat")
def last_heart_beat(self) -> Optional[str]:
return pulumi.get(self, "last_heart_beat")
@property
@pulumi.getter(name="lastOperationName")
def last_operation_name(self) -> Optional[str]:
return pulumi.get(self, "last_operation_name")
@property
@pulumi.getter(name="lastWorkflowId")
def last_workflow_id(self) -> Optional[str]:
return pulumi.get(self, "last_workflow_id")
@property
@pulumi.getter(name="managementEndpointUri")
def management_endpoint_uri(self) -> Optional[str]:
return pulumi.get(self, "management_endpoint_uri")
@property
@pulumi.getter(name="monitoringConfiguration")
def monitoring_configuration(self) -> Optional[str]:
return pulumi.get(self, "monitoring_configuration")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceLocation")
def resource_location(self) -> Optional[str]:
return pulumi.get(self, "resource_location")
@property
@pulumi.getter(name="serverCertificate")
def server_certificate(self) -> Optional[str]:
return pulumi.get(self, "server_certificate")
@property
@pulumi.getter(name="serverId")
def server_id(self) -> Optional[str]:
return pulumi.get(self, "server_id")
@property
@pulumi.getter(name="serverManagementtErrorCode")
def server_managementt_error_code(self) -> Optional[int]:
return pulumi.get(self, "server_managementt_error_code")
@property
@pulumi.getter(name="serverOSVersion")
def server_os_version(self) -> Optional[str]:
return pulumi.get(self, "server_os_version")
@property
@pulumi.getter(name="serverRole")
def server_role(self) -> Optional[str]:
return pulumi.get(self, "server_role")
@property
@pulumi.getter(name="serviceLocation")
def service_location(self) -> Optional[str]:
return pulumi.get(self, "service_location")
@property
@pulumi.getter(name="storageSyncServiceUid")
def storage_sync_service_uid(self) -> Optional[str]:
return pulumi.get(self, "storage_sync_service_uid")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
class AwaitableGetRegisteredServerResult(GetRegisteredServerResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetRegisteredServerResult(
agent_version=self.agent_version,
cluster_id=self.cluster_id,
cluster_name=self.cluster_name,
discovery_endpoint_uri=self.discovery_endpoint_uri,
friendly_name=self.friendly_name,
id=self.id,
last_heart_beat=self.last_heart_beat,
last_operation_name=self.last_operation_name,
last_workflow_id=self.last_workflow_id,
management_endpoint_uri=self.management_endpoint_uri,
monitoring_configuration=self.monitoring_configuration,
name=self.name,
provisioning_state=self.provisioning_state,
resource_location=self.resource_location,
server_certificate=self.server_certificate,
server_id=self.server_id,
server_managementt_error_code=self.server_managementt_error_code,
server_os_version=self.server_os_version,
server_role=self.server_role,
service_location=self.service_location,
storage_sync_service_uid=self.storage_sync_service_uid,
type=self.type)
def get_registered_server(resource_group_name: Optional[str] = None,
server_id: Optional[str] = None,
storage_sync_service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRegisteredServerResult:
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['serverId'] = server_id
__args__['storageSyncServiceName'] = storage_sync_service_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:storagesync/v20180701:getRegisteredServer', __args__, opts=opts, typ=GetRegisteredServerResult).value
return AwaitableGetRegisteredServerResult(
agent_version=__ret__.agent_version,
cluster_id=__ret__.cluster_id,
cluster_name=__ret__.cluster_name,
discovery_endpoint_uri=__ret__.discovery_endpoint_uri,
friendly_name=__ret__.friendly_name,
id=__ret__.id,
last_heart_beat=__ret__.last_heart_beat,
last_operation_name=__ret__.last_operation_name,
last_workflow_id=__ret__.last_workflow_id,
management_endpoint_uri=__ret__.management_endpoint_uri,
monitoring_configuration=__ret__.monitoring_configuration,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
resource_location=__ret__.resource_location,
server_certificate=__ret__.server_certificate,
server_id=__ret__.server_id,
server_managementt_error_code=__ret__.server_managementt_error_code,
server_os_version=__ret__.server_os_version,
server_role=__ret__.server_role,
service_location=__ret__.service_location,
storage_sync_service_uid=__ret__.storage_sync_service_uid,
type=__ret__.type)
| true | true |
f7200af076a3e5be760cbc8be2e5197296b523de | 428 | py | Python | exams/forms.py | WillOnGit/exam-timetable | fec170025c39144299d61ea323eed3a000b61cf9 | [
"MIT"
] | null | null | null | exams/forms.py | WillOnGit/exam-timetable | fec170025c39144299d61ea323eed3a000b61cf9 | [
"MIT"
] | null | null | null | exams/forms.py | WillOnGit/exam-timetable | fec170025c39144299d61ea323eed3a000b61cf9 | [
"MIT"
] | null | null | null | from .models import ExamVenue
from django.forms import ModelForm
class RestrictedResponseForm(ModelForm):
def __init__(self, *args, **kwargs):
super(RestrictedResponseForm, self).__init__(*args,**kwargs)
try:
self.fields['assigned_venue'].queryset = ExamVenue.objects.filter(exam=self.instance.exam)
except:
self.fields['assigned_venue'].queryset = ExamVenue.objects.none()
| 35.666667 | 102 | 0.700935 | from .models import ExamVenue
from django.forms import ModelForm
class RestrictedResponseForm(ModelForm):
def __init__(self, *args, **kwargs):
super(RestrictedResponseForm, self).__init__(*args,**kwargs)
try:
self.fields['assigned_venue'].queryset = ExamVenue.objects.filter(exam=self.instance.exam)
except:
self.fields['assigned_venue'].queryset = ExamVenue.objects.none()
| true | true |
f7200b2aa450884bed39014c6f0f6fc44dd2a5aa | 4,253 | py | Python | src/Infraestructura/ccutils/databases/configuration.py | lbarriosh/cygnus-cloud | 1a17fbb55de69adba2ec42db4c9a063865af4fbd | [
"Apache-2.0"
] | 3 | 2017-09-03T22:01:35.000Z | 2019-01-10T05:40:44.000Z | src/web/CygnusCloud/modules/ccutils/databases/configuration.py | lbarriosh/cygnus-cloud | 1a17fbb55de69adba2ec42db4c9a063865af4fbd | [
"Apache-2.0"
] | null | null | null | src/web/CygnusCloud/modules/ccutils/databases/configuration.py | lbarriosh/cygnus-cloud | 1a17fbb55de69adba2ec42db4c9a063865af4fbd | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf8 -*-
'''
========================================================================
CygnusCloud
========================================================================
File: configuration.py
Version: 3.0
Description: Database configurator definitions
Copyright 2012-13 Luis Barrios Hernández, Adrián Fernández Hernández,
Samuel Guayerbas Martín
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import MySQLdb
import os.path
from ccutils.processes.childProcessManager import ChildProcessManager
class DBConfigurator(object):
"""
This class provides methods to configure databases.
"""
def __init__(self, rootPassword):
'''
Initializes the configurator's state
Args:
rootPassword: root's password
'''
self.__rootPassword = rootPassword
def addUser(self, user, password, databaseName, allPrivileges=True):
'''
Adds a new MySQL user
Args:
user: the new user's name
password: the new user's password
databaseName: the database's name
allPrivileges: if True, the new user will be able to do everything
with the database. If False, the new user will only be able to execute queries
in the database.
Returns:
Nothing
'''
conn = MySQLdb.Connection(host="localhost", user="root", passwd=self.__rootPassword)
cursor = conn.cursor()
if (allPrivileges):
privileges = "ALL"
else :
privileges = "SELECT"
cursor.execute("GRANT " + privileges + " ON " + databaseName + ".* TO '" + user + "'@'" + "localhost" + "' IDENTIFIED BY '" + password + "';")
cursor.close()
conn.close()
def createDatabase(self, databaseName):
'''
Creates a MySQL database
Args:
databaseName: the new database's name
Returns:
Nothing
'''
db = MySQLdb.connect(host='localhost',user="root",passwd=self.__rootPassword)
command = "CREATE DATABASE " + databaseName + ";"
cursor = db.cursor()
cursor.execute(command)
cursor.close()
db.close()
def dropDatabase(self, databaseName):
'''
Deletes a MySQL database
Args:
databaseName: the database to delete's name
Returns:
Nothing
'''
db = MySQLdb.connect(host='localhost',user="root",passwd=self.__rootPassword)
command = "DROP DATABASE " + databaseName + ";"
cursor = db.cursor()
cursor.execute(command)
cursor.close()
db.close()
def runSQLScript(self, database, sqlFilePath, username="root", password=None):
'''
Runs a SQL script
Args:
databaseName: the MySQL database to use
sqlFilePath: the SQL script path
username: a MySQL user name.
password: the user's password
'''
passwordCommand = ""
if (password != None and len(password) != 0) :
passwordCommand = "-p" + str(password)
filePath = os.path.abspath(sqlFilePath)
command = "mysql -uroot {0} -e \"source {1}\"".format(passwordCommand, filePath)
ChildProcessManager.runCommandInForeground(command, Exception)
@staticmethod
def __isEmpty__(string):
for c in string :
if (c != ' ' and c != '\n' and c != '\r' and c != 't') :
return False
return True | 36.350427 | 151 | 0.556078 |
import MySQLdb
import os.path
from ccutils.processes.childProcessManager import ChildProcessManager
class DBConfigurator(object):
def __init__(self, rootPassword):
self.__rootPassword = rootPassword
def addUser(self, user, password, databaseName, allPrivileges=True):
conn = MySQLdb.Connection(host="localhost", user="root", passwd=self.__rootPassword)
cursor = conn.cursor()
if (allPrivileges):
privileges = "ALL"
else :
privileges = "SELECT"
cursor.execute("GRANT " + privileges + " ON " + databaseName + ".* TO '" + user + "'@'" + "localhost" + "' IDENTIFIED BY '" + password + "';")
cursor.close()
conn.close()
def createDatabase(self, databaseName):
db = MySQLdb.connect(host='localhost',user="root",passwd=self.__rootPassword)
command = "CREATE DATABASE " + databaseName + ";"
cursor = db.cursor()
cursor.execute(command)
cursor.close()
db.close()
def dropDatabase(self, databaseName):
db = MySQLdb.connect(host='localhost',user="root",passwd=self.__rootPassword)
command = "DROP DATABASE " + databaseName + ";"
cursor = db.cursor()
cursor.execute(command)
cursor.close()
db.close()
def runSQLScript(self, database, sqlFilePath, username="root", password=None):
passwordCommand = ""
if (password != None and len(password) != 0) :
passwordCommand = "-p" + str(password)
filePath = os.path.abspath(sqlFilePath)
command = "mysql -uroot {0} -e \"source {1}\"".format(passwordCommand, filePath)
ChildProcessManager.runCommandInForeground(command, Exception)
@staticmethod
def __isEmpty__(string):
for c in string :
if (c != ' ' and c != '\n' and c != '\r' and c != 't') :
return False
return True | true | true |
f7200b3bdc7b35e8600608ed9b126abac2ec17a1 | 557 | py | Python | shop/migrations/0024_auto_20200311_0154.py | manson800819/test | 6df7d92eababe76a54585cb8102a00a6d79ca467 | [
"MIT"
] | null | null | null | shop/migrations/0024_auto_20200311_0154.py | manson800819/test | 6df7d92eababe76a54585cb8102a00a6d79ca467 | [
"MIT"
] | null | null | null | shop/migrations/0024_auto_20200311_0154.py | manson800819/test | 6df7d92eababe76a54585cb8102a00a6d79ca467 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-03-11 01:54
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('shop', '0023_auto_20200311_0137'),
]
operations = [
migrations.AlterField(
model_name='product',
name='type1',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='products_t', to='shop.Type1'),
),
]
| 25.318182 | 125 | 0.653501 |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('shop', '0023_auto_20200311_0137'),
]
operations = [
migrations.AlterField(
model_name='product',
name='type1',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='products_t', to='shop.Type1'),
),
]
| true | true |
f7200b4756a033cb419019d6f292992490dafe65 | 28,992 | py | Python | geoopt/manifolds/base.py | grapefroot/geoopt | 8f219a820e24b87ac68136ff66af11b25d5c04c5 | [
"Apache-2.0"
] | 4 | 2020-01-27T15:37:19.000Z | 2020-12-06T02:51:03.000Z | geoopt/manifolds/base.py | grapefroot/geoopt | 8f219a820e24b87ac68136ff66af11b25d5c04c5 | [
"Apache-2.0"
] | null | null | null | geoopt/manifolds/base.py | grapefroot/geoopt | 8f219a820e24b87ac68136ff66af11b25d5c04c5 | [
"Apache-2.0"
] | 1 | 2021-05-07T22:01:41.000Z | 2021-05-07T22:01:41.000Z | import abc
import torch.nn
import itertools
from typing import Optional, Tuple, Union
__all__ = ["Manifold", "ScalingInfo"]
class ScalingInfo(object):
"""
Scaling info for each argument that requires rescaling.
.. code:: python
scaled_value = value * scaling ** power if power != 0 else value
For results it is not always required to set powers of scaling, then it is no-op.
The convention for this info is the following. The output of a function is either a tuple or a single object.
In any case, outputs are treated as positionals. Function inputs, in contrast, are treated by keywords.
It is a common practice to maintain function signature when overriding, so this way may be considered
as a sufficient in this particular scenario. The only required info for formula above is ``power``.
"""
# marks method to be not working with Scaled wrapper
NotCompatible = object()
__slots__ = ["kwargs", "results"]
def __init__(self, *results: float, **kwargs: float):
self.results = results
self.kwargs = kwargs
class ScalingStorage(dict):
"""
Helper class to make implementation transparent.
This is just a dictionary with additional overriden ``__call__``
for more explicit and elegant API to declare members. A usage example may be found in :class:`Manifold`.
Methods that require rescaling when wrapped into :class:`Scaled` should be defined as follows
1. Regular methods like ``dist``, ``dist2``, ``expmap``, ``retr`` etc. that are already present in the base class
do not require registration, it has already happened in the base :class:`Manifold` class.
2. New methods (like in :class:`PoincareBall`) should be treated with care.
.. code-block:: python
class PoincareBall(Manifold):
# make a class copy of __scaling__ info. Default methods are already present there
__scaling__ = Manifold.__scaling__.copy()
... # here come regular implementation of the required methods
@__scaling__(ScalingInfo(1)) # rescale output according to rule `out * scaling ** 1`
def dist0(self, x: torch.Tensor, *, dim=-1, keepdim=False):
return math.dist0(x, c=self.c, dim=dim, keepdim=keepdim)
@__scaling__(ScalingInfo(u=-1)) # rescale argument `u` according to the rule `out * scaling ** -1`
def expmap0(self, u: torch.Tensor, *, dim=-1, project=True):
res = math.expmap0(u, c=self.c, dim=dim)
if project:
return math.project(res, c=self.c, dim=dim)
else:
return res
... # other special methods implementation
3. Some methods are not compliant with the above rescaling rules. We should mark them as `NotCompatible`
.. code-block:: python
# continuation of the PoincareBall definition
@__scaling__(ScalingInfo.NotCompatible)
def mobius_fn_apply(
self, fn: callable, x: torch.Tensor, *args, dim=-1, project=True, **kwargs
):
res = math.mobius_fn_apply(fn, x, *args, c=self.c, dim=dim, **kwargs)
if project:
return math.project(res, c=self.c, dim=dim)
else:
return res
"""
def __call__(self, scaling_info: ScalingInfo):
def register(fn):
self[fn.__name__] = scaling_info
return fn
return register
def copy(self):
return self.__class__(self)
class Manifold(torch.nn.Module, metaclass=abc.ABCMeta):
__scaling__ = ScalingStorage() # will be filled along with implementation below
name = None
ndim = None
reversible = None
forward = NotImplemented
def __init__(self, **kwargs):
super().__init__()
@property
def device(self) -> Optional[torch.device]:
"""
Manifold device.
Returns
-------
Optional[torch.device]
"""
p = next(itertools.chain(self.buffers(), self.parameters()), None)
if p is not None:
return p.device
else:
return None
@property
def dtype(self) -> Optional[torch.dtype]:
"""
Manifold dtype.
Returns
-------
Optional[torch.dtype]
"""
p = next(itertools.chain(self.buffers(), self.parameters()), None)
if p is not None:
return p.dtype
else:
return None
def check_point(
self, x: torch.Tensor, *, explain=False
) -> Union[Tuple[bool, Optional[str]], bool]:
"""
Check if point is valid to be used with the manifold.
Parameters
----------
x : torch.Tensor
point on the manifold
explain: bool
return an additional information on check
Returns
-------
bool
boolean indicating if tensor is valid and reason of failure if False
Notes
-----
This check is compatible to what optimizer expects, last dimensions are treated as manifold dimensions
"""
ok, reason = self._check_shape(x.shape, "x")
if explain:
return ok, reason
else:
return ok
def assert_check_point(self, x: torch.Tensor):
"""
Check if point is valid to be used with the manifold and raise an error with informative message on failure.
Parameters
----------
x : torch.Tensor
point on the manifold
Notes
-----
This check is compatible to what optimizer expects, last dimensions are treated as manifold dimensions
"""
ok, reason = self._check_shape(x.shape, "x")
if not ok:
raise ValueError(
"`x` seems to be not valid "
"tensor for {} manifold.\nerror: {}".format(self.name, reason)
)
def check_vector(self, u: torch.Tensor, *, explain=False):
"""
Check if vector is valid to be used with the manifold.
Parameters
----------
u : torch.Tensor
vector on the tangent plane
explain: bool
return an additional information on check
Returns
-------
bool
boolean indicating if tensor is valid and reason of failure if False
Notes
-----
This check is compatible to what optimizer expects, last dimensions are treated as manifold dimensions
"""
ok, reason = self._check_shape(u.shape, "u")
if explain:
return ok, reason
else:
return ok
def assert_check_vector(self, u: torch.Tensor):
"""
Check if vector is valid to be used with the manifold and raise an error with informative message on failure.
Parameters
----------
u : torch.Tensor
vector on the tangent plane
Notes
-----
This check is compatible to what optimizer expects, last dimensions are treated as manifold dimensions
"""
ok, reason = self._check_shape(u.shape, "u")
if not ok:
raise ValueError(
"`u` seems to be not valid "
"tensor for {} manifold.\nerror: {}".format(self.name, reason)
)
def check_point_on_manifold(
self, x: torch.Tensor, *, explain=False, atol=1e-5, rtol=1e-5
) -> Union[Tuple[bool, Optional[str]], bool]:
"""
Check if point :math:`x` is lying on the manifold.
Parameters
----------
x : torch.Tensor
point on the manifold
atol: float
absolute tolerance as in :func:`numpy.allclose`
rtol: float
relative tolerance as in :func:`numpy.allclose`
explain: bool
return an additional information on check
Returns
-------
bool
boolean indicating if tensor is valid and reason of failure if False
Notes
-----
This check is compatible to what optimizer expects, last dimensions are treated as manifold dimensions
"""
ok, reason = self._check_shape(x.shape, "x")
if ok:
ok, reason = self._check_point_on_manifold(x, atol=atol, rtol=rtol)
if explain:
return ok, reason
else:
return ok
def assert_check_point_on_manifold(self, x: torch.Tensor, *, atol=1e-5, rtol=1e-5):
"""
Check if point :math`x` is lying on the manifold and raise an error with informative message on failure.
Parameters
----------
x : torch.Tensor
point on the manifold
atol: float
absolute tolerance as in :func:`numpy.allclose`
rtol: float
relative tolerance as in :func:`numpy.allclose`
"""
self.assert_check_point(x)
ok, reason = self._check_point_on_manifold(x, atol=atol, rtol=rtol)
if not ok:
raise ValueError(
"`x` seems to be a tensor "
"not lying on {} manifold.\nerror: {}".format(self.name, reason)
)
def check_vector_on_tangent(
self,
x: torch.Tensor,
u: torch.Tensor,
*,
ok_point=False,
explain=False,
atol=1e-5,
rtol=1e-5
) -> Union[Tuple[bool, Optional[str]], bool]:
"""
Check if :math:`u` is lying on the tangent space to x.
Parameters
----------
x : torch.Tensor
point on the manifold
u : torch.Tensor
vector on the tangent space to :math:`x`
atol: float
absolute tolerance as in :func:`numpy.allclose`
rtol: float
relative tolerance as in :func:`numpy.allclose`
explain: bool
return an additional information on check
ok_point: bool
is a check for point required?
Returns
-------
bool
boolean indicating if tensor is valid and reason of failure if False
"""
if not ok_point:
ok, reason = self._check_shape(x.shape, "x")
if ok:
ok, reason = self._check_shape(u.shape, "u")
if ok:
ok, reason = self._check_point_on_manifold(x, atol=atol, rtol=rtol)
else:
ok = True
reason = None
if ok:
ok, reason = self._check_vector_on_tangent(x, u, atol=atol, rtol=rtol)
if explain:
return ok, reason
else:
return ok
def assert_check_vector_on_tangent(
self, x: torch.Tensor, u: torch.Tensor, *, ok_point=False, atol=1e-5, rtol=1e-5
):
"""
Check if u :math:`u` is lying on the tangent space to x and raise an error on fail.
Parameters
----------
x : torch.Tensor
point on the manifold
u : torch.Tensor
vector on the tangent space to :math:`x`
atol: float
absolute tolerance as in :func:`numpy.allclose`
rtol: float
relative tolerance as in :func:`numpy.allclose`
ok_point: bool
is a check for point required?
"""
if not ok_point:
ok, reason = self._check_shape(x.shape, "x")
if ok:
ok, reason = self._check_shape(u.shape, "u")
if ok:
ok, reason = self._check_point_on_manifold(x, atol=atol, rtol=rtol)
else:
ok = True
reason = None
if ok:
ok, reason = self._check_vector_on_tangent(x, u, atol=atol, rtol=rtol)
if not ok:
raise ValueError(
"`u` seems to be a tensor "
"not lying on tangent space to `x` for {} manifold.\nerror: {}".format(
self.name, reason
)
)
@__scaling__(ScalingInfo(1))
def dist(self, x: torch.Tensor, y: torch.Tensor, *, keepdim=False) -> torch.Tensor:
"""
Compute distance between 2 points on the manifold that is the shortest path along geodesics.
Parameters
----------
x : torch.Tensor
point on the manifold
y : torch.Tensor
point on the manifold
keepdim : bool
keep the last dim?
Returns
-------
torch.Tensor
distance between two points
"""
raise NotImplementedError
@__scaling__(ScalingInfo(2))
def dist2(self, x: torch.Tensor, y: torch.Tensor, *, keepdim=False) -> torch.Tensor:
"""
Compute squared distance between 2 points on the manifold that is the shortest path along geodesics.
Parameters
----------
x : torch.Tensor
point on the manifold
y : torch.Tensor
point on the manifold
keepdim : bool
keep the last dim?
Returns
-------
torch.Tensor
squared distance between two points
"""
return self.dist(x, y, keepdim=keepdim) ** 2
@abc.abstractmethod
@__scaling__(ScalingInfo(u=-1))
def retr(self, x: torch.Tensor, u: torch.Tensor) -> torch.Tensor:
"""
Perform a retraction from point :math:`x` with given direction :math:`u`.
Parameters
----------
x : torch.Tensor
point on the manifold
u : torch.Tensor
tangent vector at point :math:`x`
Returns
-------
torch.Tensor
transported point
"""
raise NotImplementedError
@abc.abstractmethod
@__scaling__(ScalingInfo(u=-1))
def expmap(self, x: torch.Tensor, u: torch.Tensor) -> torch.Tensor:
r"""
Perform an exponential map :math:`\operatorname{Exp}_x(u)`.
Parameters
----------
x : torch.Tensor
point on the manifold
u : torch.Tensor
tangent vector at point :math:`x`
Returns
-------
torch.Tensor
transported point
"""
raise NotImplementedError
@__scaling__(ScalingInfo(1))
def logmap(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
r"""
Perform an logarithmic map :math:`\operatorname{Log}_{x}(y)`.
Parameters
----------
x : torch.Tensor
point on the manifold
y : torch.Tensor
point on the manifold
Returns
-------
torch.Tensor
tangent vector
"""
raise NotImplementedError
@__scaling__(ScalingInfo(u=-1))
def expmap_transp(
self, x: torch.Tensor, u: torch.Tensor, v: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Perform an exponential map and vector transport from point :math:`x` with given direction :math:`u`.
Parameters
----------
x : torch.Tensor
point on the manifold
u : torch.Tensor
tangent vector at point :math:`x`
v : torch.Tensor
tangent vector at point :math:`x` to be transported
Returns
-------
torch.Tensor
transported point
"""
y = self.expmap(x, u)
v_transp = self.transp(x, y, v)
return y, v_transp
@__scaling__(ScalingInfo(u=-1))
def retr_transp(
self, x: torch.Tensor, u: torch.Tensor, v: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Perform a retraction + vector transport at once.
Parameters
----------
x : torch.Tensor
point on the manifold
u : torch.Tensor
tangent vector at point :math:`x`
v : torch.Tensor
tangent vector at point :math:`x` to be transported
Returns
-------
Tuple[torch.Tensor, torch.Tensor]
transported point and vectors
Notes
-----
Sometimes this is a far more optimal way to preform retraction + vector transport
"""
y = self.retr(x, u)
v_transp = self.transp(x, y, v)
return y, v_transp
@__scaling__(ScalingInfo(u=-1))
def transp_follow_retr(
self, x: torch.Tensor, u: torch.Tensor, v: torch.Tensor
) -> torch.Tensor:
r"""
Perform vector transport following :math:`u`: :math:`\mathfrak{T}_{x\to\operatorname{retr}(x, u)}(v)`.
This operation is sometimes is much more simpler and can be optimized.
Parameters
----------
x : torch.Tensor
point on the manifold
u : torch.Tensor
tangent vector at point :math:`x`
v : torch.Tensor
tangent vector at point :math:`x` to be transported
Returns
-------
torch.Tensor
transported tensor
"""
y = self.retr(x, u)
return self.transp(x, y, v)
@__scaling__(ScalingInfo(u=-1))
def transp_follow_expmap(
self, x: torch.Tensor, u: torch.Tensor, v: torch.Tensor
) -> torch.Tensor:
r"""
Perform vector transport following :math:`u`: :math:`\mathfrak{T}_{x\to\operatorname{Exp}(x, u)}(v)`.
Here, :math:`\operatorname{Exp}` is the best possible approximation of the true exponential map.
There are cases when the exact variant is hard or impossible implement, therefore a
fallback, non-exact, implementation is used.
Parameters
----------
x : torch.Tensor
point on the manifold
u : torch.Tensor
tangent vector at point :math:`x`
v : torch.Tensor
tangent vector at point :math:`x` to be transported
Returns
-------
torch.Tensor
transported tensor
"""
y = self.expmap(x, u)
return self.transp(x, y, v)
def transp(self, x: torch.Tensor, y: torch.Tensor, v: torch.Tensor) -> torch.Tensor:
r"""
Perform vector transport :math:`\mathfrak{T}_{x\to y}(v)`.
Parameters
----------
x : torch.Tensor
start point on the manifold
y : torch.Tensor
target point on the manifold
v : torch.Tensor
tangent vector at point :math:`x`
Returns
-------
torch.Tensor
transported tensor
"""
raise NotImplementedError
@abc.abstractmethod
def inner(
self, x: torch.Tensor, u: torch.Tensor, v=None, *, keepdim=False
) -> torch.Tensor:
"""
Inner product for tangent vectors at point :math:`x`.
Parameters
----------
x : torch.Tensor
point on the manifold
u : torch.Tensor
tangent vector at point :math:`x`
v : Optional[torch.Tensor]
tangent vector at point :math:`x`
keepdim : bool
keep the last dim?
Returns
-------
torch.Tensor
inner product (broadcasted)
"""
raise NotImplementedError
def component_inner(
self, x: torch.Tensor, u: torch.Tensor, v: torch.Tensor = None
) -> torch.Tensor:
"""
Inner product for tangent vectors at point :math:`x` according to components of the manifold.
The result of the function is same as ``inner`` with ``keepdim=True`` for
all the manifolds except ProductManifold. For this manifold it acts different way
computing inner product for each component and then building an output correctly
tiling and reshaping the result.
Parameters
----------
x : torch.Tensor
point on the manifold
u : torch.Tensor
tangent vector at point :math:`x`
v : Optional[torch.Tensor]
tangent vector at point :math:`x`
Returns
-------
torch.Tensor
inner product component wise (broadcasted)
Notes
-----
The purpose of this method is better adaptive properties in optimization since ProductManifold
will "hide" the structure in public API.
"""
return self.inner(x, u, v, keepdim=True)
def norm(self, x: torch.Tensor, u: torch.Tensor, *, keepdim=False) -> torch.Tensor:
"""
Norm of a tangent vector at point :math:`x`.
Parameters
----------
x : torch.Tensor
point on the manifold
u : torch.Tensor
tangent vector at point :math:`x`
keepdim : bool
keep the last dim?
Returns
-------
torch.Tensor
inner product (broadcasted)
"""
return self.inner(x, u, keepdim=keepdim) ** 0.5
@abc.abstractmethod
def proju(self, x: torch.Tensor, u: torch.Tensor) -> torch.Tensor:
"""
Project vector :math:`u` on a tangent space for :math:`x`, usually is the same as :meth:`egrad2rgrad`.
Parameters
----------
x torch.Tensor
point on the manifold
u torch.Tensor
vector to be projected
Returns
-------
torch.Tensor
projected vector
"""
raise NotImplementedError
@abc.abstractmethod
def egrad2rgrad(self, x: torch.Tensor, u: torch.Tensor) -> torch.Tensor:
"""
Transform gradient computed using autodiff to the correct Riemannian gradient for the point :math:`x`.
Parameters
----------
x torch.Tensor
point on the manifold
u torch.Tensor
gradient to be projected
Returns
-------
torch.Tensor
grad vector in the Riemannian manifold
"""
raise NotImplementedError
@abc.abstractmethod
def projx(self, x: torch.Tensor) -> torch.Tensor:
"""
Project point :math:`x` on the manifold.
Parameters
----------
x torch.Tensor
point to be projected
Returns
-------
torch.Tensor
projected point
"""
raise NotImplementedError
def _check_shape(
self, shape: Tuple[int], name: str
) -> Union[Tuple[bool, Optional[str]], bool]:
"""
Util to check shape.
Exhaustive implementation for checking if
a given point has valid dimension size,
shape, etc. It should return boolean and
a reason of failure if check is not passed
Parameters
----------
shape : Tuple[int]
shape of point on the manifold
name : str
name to be present in errors
Returns
-------
bool, str or None
check result and the reason of fail if any
"""
ok = len(shape) >= self.ndim
if not ok:
reason = "'{}' on the {} requires more than {} dim".format(
name, self, self.ndim
)
else:
reason = None
return ok, reason
def _assert_check_shape(self, shape: Tuple[int], name: str):
"""
Util to check shape and raise an error if needed.
Exhaustive implementation for checking if
a given point has valid dimension size,
shape, etc. It will raise a ValueError if check is not passed
Parameters
----------
shape : tuple
shape of point on the manifold
name : str
name to be present in errors
Raises
------
ValueError
"""
ok, reason = self._check_shape(shape, name)
if not ok:
raise ValueError(reason)
@abc.abstractmethod
def _check_point_on_manifold(
self, x: torch.Tensor, *, atol=1e-5, rtol=1e-5
) -> Union[Tuple[bool, Optional[str]], bool]:
"""
Util to check point lies on the manifold.
Exhaustive implementation for checking if
a given point lies on the manifold. It
should return boolean and a reason of
failure if check is not passed. You can
assume assert_check_point is already
passed beforehand
Parameters
----------
x torch.Tensor
point on the manifold
atol: float
absolute tolerance as in :func:`numpy.allclose`
rtol: float
relative tolerance as in :func:`numpy.allclose`
Returns
-------
bool, str or None
check result and the reason of fail if any
"""
# return True, None
raise NotImplementedError
@abc.abstractmethod
def _check_vector_on_tangent(
self, x: torch.Tensor, u: torch.Tensor, *, atol=1e-5, rtol=1e-5
) -> Union[Tuple[bool, Optional[str]], bool]:
"""
Util to check a vector belongs to the tangent space of a point.
Exhaustive implementation for checking if
a given point lies in the tangent space at x
of the manifold. It should return a boolean
indicating whether the test was passed
and a reason of failure if check is not passed.
You can assume assert_check_point is already
passed beforehand
Parameters
----------
x torch.Tensor
u torch.Tensor
atol : float
absolute tolerance
rtol :
relative tolerance
Returns
-------
bool, str or None
check result and the reason of fail if any
"""
# return True, None
raise NotImplementedError
def extra_repr(self):
return ""
def __repr__(self):
extra = self.extra_repr()
if extra:
return self.name + "({}) manifold".format(extra)
else:
return self.name + " manifold"
def unpack_tensor(self, tensor: torch.Tensor) -> torch.Tensor:
"""
Construct a point on the manifold.
This method should help to work with product and compound manifolds.
Internally all points on the manifold are stored in an intuitive format.
However, there might be cases, when this representation is simpler or more efficient to store in
a different way that is hard to use in practice.
Parameters
----------
tensor : torch.Tensor
Returns
-------
torch.Tensor
"""
return tensor
def pack_point(self, *tensors: torch.Tensor) -> torch.Tensor:
"""
Construct a tensor representation of a manifold point.
In case of regular manifolds this will return the same tensor. However, for e.g. Product manifold
this function will pack all non-batch dimensions.
Parameters
----------
tensors : Tuple[torch.Tensor]
Returns
-------
torch.Tensor
"""
if len(tensors) != 1:
raise ValueError("1 tensor expected, got {}".format(len(tensors)))
return tensors[0]
def random(self, *size, dtype=None, device=None, **kwargs) -> torch.Tensor:
"""
Random sampling on the manifold.
The exact implementation depends on manifold and usually does not follow all
assumptions about uniform measure, etc.
"""
raise NotImplementedError
def origin(
self,
*size: Union[int, Tuple[int]],
dtype=None,
device=None,
seed: Optional[int] = 42
) -> torch.Tensor:
"""
Create some reasonable point on the manifold in a deterministic way.
For some manifolds there may exist e.g. zero vector or some analogy.
In case it is possible to define this special point, this point is returned with the desired size.
In other case, the returned point is sampled on the manifold in a deterministic way.
Parameters
----------
size : Union[int, Tuple[int]]
the desired shape
device : torch.device
the desired device
dtype : torch.dtype
the desired dtype
seed : Optional[int]
A parameter controlling deterministic randomness for manifolds that do not provide ``.origin``,
but provide ``.random``. (default: 42)
Returns
-------
torch.Tensor
"""
if seed is not None:
# we promise pseudorandom behaviour but do not want to modify global seed
state = torch.random.get_rng_state()
torch.random.manual_seed(seed)
try:
return self.random(*size, dtype=dtype, device=device)
finally:
torch.random.set_rng_state(state)
else:
return self.random(*size, dtype=dtype, device=device)
| 30.485804 | 117 | 0.559051 | import abc
import torch.nn
import itertools
from typing import Optional, Tuple, Union
__all__ = ["Manifold", "ScalingInfo"]
class ScalingInfo(object):
NotCompatible = object()
__slots__ = ["kwargs", "results"]
def __init__(self, *results: float, **kwargs: float):
self.results = results
self.kwargs = kwargs
class ScalingStorage(dict):
def __call__(self, scaling_info: ScalingInfo):
def register(fn):
self[fn.__name__] = scaling_info
return fn
return register
def copy(self):
return self.__class__(self)
class Manifold(torch.nn.Module, metaclass=abc.ABCMeta):
__scaling__ = ScalingStorage()
name = None
ndim = None
reversible = None
forward = NotImplemented
def __init__(self, **kwargs):
super().__init__()
@property
def device(self) -> Optional[torch.device]:
p = next(itertools.chain(self.buffers(), self.parameters()), None)
if p is not None:
return p.device
else:
return None
@property
def dtype(self) -> Optional[torch.dtype]:
p = next(itertools.chain(self.buffers(), self.parameters()), None)
if p is not None:
return p.dtype
else:
return None
def check_point(
self, x: torch.Tensor, *, explain=False
) -> Union[Tuple[bool, Optional[str]], bool]:
ok, reason = self._check_shape(x.shape, "x")
if explain:
return ok, reason
else:
return ok
def assert_check_point(self, x: torch.Tensor):
ok, reason = self._check_shape(x.shape, "x")
if not ok:
raise ValueError(
"`x` seems to be not valid "
"tensor for {} manifold.\nerror: {}".format(self.name, reason)
)
def check_vector(self, u: torch.Tensor, *, explain=False):
ok, reason = self._check_shape(u.shape, "u")
if explain:
return ok, reason
else:
return ok
def assert_check_vector(self, u: torch.Tensor):
ok, reason = self._check_shape(u.shape, "u")
if not ok:
raise ValueError(
"`u` seems to be not valid "
"tensor for {} manifold.\nerror: {}".format(self.name, reason)
)
def check_point_on_manifold(
self, x: torch.Tensor, *, explain=False, atol=1e-5, rtol=1e-5
) -> Union[Tuple[bool, Optional[str]], bool]:
ok, reason = self._check_shape(x.shape, "x")
if ok:
ok, reason = self._check_point_on_manifold(x, atol=atol, rtol=rtol)
if explain:
return ok, reason
else:
return ok
def assert_check_point_on_manifold(self, x: torch.Tensor, *, atol=1e-5, rtol=1e-5):
self.assert_check_point(x)
ok, reason = self._check_point_on_manifold(x, atol=atol, rtol=rtol)
if not ok:
raise ValueError(
"`x` seems to be a tensor "
"not lying on {} manifold.\nerror: {}".format(self.name, reason)
)
def check_vector_on_tangent(
self,
x: torch.Tensor,
u: torch.Tensor,
*,
ok_point=False,
explain=False,
atol=1e-5,
rtol=1e-5
) -> Union[Tuple[bool, Optional[str]], bool]:
if not ok_point:
ok, reason = self._check_shape(x.shape, "x")
if ok:
ok, reason = self._check_shape(u.shape, "u")
if ok:
ok, reason = self._check_point_on_manifold(x, atol=atol, rtol=rtol)
else:
ok = True
reason = None
if ok:
ok, reason = self._check_vector_on_tangent(x, u, atol=atol, rtol=rtol)
if explain:
return ok, reason
else:
return ok
def assert_check_vector_on_tangent(
self, x: torch.Tensor, u: torch.Tensor, *, ok_point=False, atol=1e-5, rtol=1e-5
):
if not ok_point:
ok, reason = self._check_shape(x.shape, "x")
if ok:
ok, reason = self._check_shape(u.shape, "u")
if ok:
ok, reason = self._check_point_on_manifold(x, atol=atol, rtol=rtol)
else:
ok = True
reason = None
if ok:
ok, reason = self._check_vector_on_tangent(x, u, atol=atol, rtol=rtol)
if not ok:
raise ValueError(
"`u` seems to be a tensor "
"not lying on tangent space to `x` for {} manifold.\nerror: {}".format(
self.name, reason
)
)
@__scaling__(ScalingInfo(1))
def dist(self, x: torch.Tensor, y: torch.Tensor, *, keepdim=False) -> torch.Tensor:
raise NotImplementedError
@__scaling__(ScalingInfo(2))
def dist2(self, x: torch.Tensor, y: torch.Tensor, *, keepdim=False) -> torch.Tensor:
return self.dist(x, y, keepdim=keepdim) ** 2
@abc.abstractmethod
@__scaling__(ScalingInfo(u=-1))
def retr(self, x: torch.Tensor, u: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
@abc.abstractmethod
@__scaling__(ScalingInfo(u=-1))
def expmap(self, x: torch.Tensor, u: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
@__scaling__(ScalingInfo(1))
def logmap(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
@__scaling__(ScalingInfo(u=-1))
def expmap_transp(
self, x: torch.Tensor, u: torch.Tensor, v: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
y = self.expmap(x, u)
v_transp = self.transp(x, y, v)
return y, v_transp
@__scaling__(ScalingInfo(u=-1))
def retr_transp(
self, x: torch.Tensor, u: torch.Tensor, v: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
y = self.retr(x, u)
v_transp = self.transp(x, y, v)
return y, v_transp
@__scaling__(ScalingInfo(u=-1))
def transp_follow_retr(
self, x: torch.Tensor, u: torch.Tensor, v: torch.Tensor
) -> torch.Tensor:
y = self.retr(x, u)
return self.transp(x, y, v)
@__scaling__(ScalingInfo(u=-1))
def transp_follow_expmap(
self, x: torch.Tensor, u: torch.Tensor, v: torch.Tensor
) -> torch.Tensor:
y = self.expmap(x, u)
return self.transp(x, y, v)
def transp(self, x: torch.Tensor, y: torch.Tensor, v: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
@abc.abstractmethod
def inner(
self, x: torch.Tensor, u: torch.Tensor, v=None, *, keepdim=False
) -> torch.Tensor:
raise NotImplementedError
def component_inner(
self, x: torch.Tensor, u: torch.Tensor, v: torch.Tensor = None
) -> torch.Tensor:
return self.inner(x, u, v, keepdim=True)
def norm(self, x: torch.Tensor, u: torch.Tensor, *, keepdim=False) -> torch.Tensor:
return self.inner(x, u, keepdim=keepdim) ** 0.5
@abc.abstractmethod
def proju(self, x: torch.Tensor, u: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
@abc.abstractmethod
def egrad2rgrad(self, x: torch.Tensor, u: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
@abc.abstractmethod
def projx(self, x: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
def _check_shape(
self, shape: Tuple[int], name: str
) -> Union[Tuple[bool, Optional[str]], bool]:
ok = len(shape) >= self.ndim
if not ok:
reason = "'{}' on the {} requires more than {} dim".format(
name, self, self.ndim
)
else:
reason = None
return ok, reason
def _assert_check_shape(self, shape: Tuple[int], name: str):
ok, reason = self._check_shape(shape, name)
if not ok:
raise ValueError(reason)
@abc.abstractmethod
def _check_point_on_manifold(
self, x: torch.Tensor, *, atol=1e-5, rtol=1e-5
) -> Union[Tuple[bool, Optional[str]], bool]:
raise NotImplementedError
@abc.abstractmethod
def _check_vector_on_tangent(
self, x: torch.Tensor, u: torch.Tensor, *, atol=1e-5, rtol=1e-5
) -> Union[Tuple[bool, Optional[str]], bool]:
raise NotImplementedError
def extra_repr(self):
return ""
def __repr__(self):
extra = self.extra_repr()
if extra:
return self.name + "({}) manifold".format(extra)
else:
return self.name + " manifold"
def unpack_tensor(self, tensor: torch.Tensor) -> torch.Tensor:
return tensor
def pack_point(self, *tensors: torch.Tensor) -> torch.Tensor:
if len(tensors) != 1:
raise ValueError("1 tensor expected, got {}".format(len(tensors)))
return tensors[0]
def random(self, *size, dtype=None, device=None, **kwargs) -> torch.Tensor:
raise NotImplementedError
def origin(
self,
*size: Union[int, Tuple[int]],
dtype=None,
device=None,
seed: Optional[int] = 42
) -> torch.Tensor:
if seed is not None:
state = torch.random.get_rng_state()
torch.random.manual_seed(seed)
try:
return self.random(*size, dtype=dtype, device=device)
finally:
torch.random.set_rng_state(state)
else:
return self.random(*size, dtype=dtype, device=device)
| true | true |
f7200b476d9f3051e09445e9614d3e89cf93fb90 | 256 | py | Python | app/recipe/urls.py | rahulsudhakar10/receipe-api-project | 29f205607905bbee347ea9ca505751f4d4cd508a | [
"MIT"
] | null | null | null | app/recipe/urls.py | rahulsudhakar10/receipe-api-project | 29f205607905bbee347ea9ca505751f4d4cd508a | [
"MIT"
] | null | null | null | app/recipe/urls.py | rahulsudhakar10/receipe-api-project | 29f205607905bbee347ea9ca505751f4d4cd508a | [
"MIT"
] | null | null | null | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from . import views
router = DefaultRouter()
router.register('tags', views.TagViewSet)
app_name = 'recipe'
urlpatterns = [
path('', include(router.urls))
]
| 17.066667 | 48 | 0.734375 | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from . import views
router = DefaultRouter()
router.register('tags', views.TagViewSet)
app_name = 'recipe'
urlpatterns = [
path('', include(router.urls))
]
| true | true |
f7200bcd5453259f229956e2f1faa902e06be465 | 6,058 | py | Python | selfdrive/controls/lib/longitudinal_planner.py | mogorman/openpilot-1 | 1d19166992149a7dea3536644d67e9e0e2e385fd | [
"MIT"
] | 1 | 2021-06-10T18:00:03.000Z | 2021-06-10T18:00:03.000Z | selfdrive/controls/lib/longitudinal_planner.py | mogorman/openpilot-1 | 1d19166992149a7dea3536644d67e9e0e2e385fd | [
"MIT"
] | 1 | 2021-05-29T00:57:16.000Z | 2021-05-29T00:57:16.000Z | selfdrive/controls/lib/longitudinal_planner.py | mogorman/openpilot-1 | 1d19166992149a7dea3536644d67e9e0e2e385fd | [
"MIT"
] | 2 | 2021-11-16T01:49:54.000Z | 2022-01-14T04:03:23.000Z | #!/usr/bin/env python3
import math
import numpy as np
from common.numpy_fast import interp
from common.cached_params import CachedParams
import cereal.messaging as messaging
from common.realtime import DT_MDL
from selfdrive.modeld.constants import T_IDXS
from selfdrive.config import Conversions as CV
from selfdrive.controls.lib.longcontrol import LongCtrlState
from selfdrive.controls.lib.longitudinal_mpc_lib.long_mpc import LongitudinalMpc
from selfdrive.controls.lib.longitudinal_mpc_lib.long_mpc import T_IDXS as T_IDXS_MPC
from selfdrive.controls.lib.drive_helpers import V_CRUISE_MAX, CONTROL_N
from selfdrive.swaglog import cloudlog
LON_MPC_STEP = 0.2 # first step is 0.2s
AWARENESS_DECEL = -0.2 # car smoothly decel at .2m/s^2 when user is distracted
A_CRUISE_MIN = -1.2
A_CRUISE_MAX_VALS = [1.2, 1.2, 0.8, 0.6]
A_CRUISE_MAX_BP = [0., 15., 25., 40.]
# Lookup table for turns
_A_TOTAL_MAX_V = [1.7, 3.2]
_A_TOTAL_MAX_BP = [20., 40.]
def get_max_accel(v_ego):
return interp(v_ego, A_CRUISE_MAX_BP, A_CRUISE_MAX_VALS)
def limit_accel_in_turns(v_ego, angle_steers, a_target, CP):
"""
This function returns a limited long acceleration allowed, depending on the existing lateral acceleration
this should avoid accelerating when losing the target in turns
"""
a_total_max = interp(v_ego, _A_TOTAL_MAX_BP, _A_TOTAL_MAX_V)
a_y = v_ego**2 * angle_steers * CV.DEG_TO_RAD / (CP.steerRatio * CP.wheelbase)
a_x_allowed = math.sqrt(max(a_total_max**2 - a_y**2, 0.))
return [a_target[0], min(a_target[1], a_x_allowed)]
class Planner():
def __init__(self, CP, init_v=0.0, init_a=0.0):
self.CP = CP
self.mpc = LongitudinalMpc()
self.fcw = False
self.cachedParams = CachedParams()
self.v_desired = init_v
self.a_desired = init_a
self.alpha = np.exp(-DT_MDL/2.0)
self.v_desired_trajectory = np.zeros(CONTROL_N)
self.a_desired_trajectory = np.zeros(CONTROL_N)
self.j_desired_trajectory = np.zeros(CONTROL_N)
def update(self, sm, CP, lateral_planner):
v_ego = sm['carState'].vEgo
a_ego = sm['carState'].aEgo
v_cruise_kph = sm['controlsState'].vCruise
v_cruise_kph = min(v_cruise_kph, V_CRUISE_MAX)
v_cruise = v_cruise_kph * CV.KPH_TO_MS
long_control_state = sm['controlsState'].longControlState
force_slow_decel = sm['controlsState'].forceDecel
enabled = (long_control_state == LongCtrlState.pid) or (long_control_state == LongCtrlState.stopping)
if not enabled or sm['carState'].gasPressed:
self.v_desired = v_ego
self.a_desired = a_ego
# Prevent divergence, smooth in current v_ego
self.v_desired = self.alpha * self.v_desired + (1 - self.alpha) * v_ego
self.v_desired = max(0.0, self.v_desired)
accel_limits = [A_CRUISE_MIN, get_max_accel(v_ego)]
if not self.cachedParams.get('jvePilot.settings.slowInCurves', 5000) == "1":
accel_limits = limit_accel_in_turns(v_ego, sm['carState'].steeringAngleDeg, accel_limits, self.CP)
if force_slow_decel:
# if required so, force a smooth deceleration
accel_limits[1] = min(accel_limits[1], AWARENESS_DECEL)
accel_limits[0] = min(accel_limits[0], accel_limits[1])
# clip limits, cannot init MPC outside of bounds
accel_limits[0] = min(accel_limits[0], self.a_desired + 0.05)
accel_limits[1] = max(accel_limits[1], self.a_desired - 0.05)
self.mpc.set_accel_limits(accel_limits[0], accel_limits[1])
self.mpc.set_cur_state(self.v_desired, self.a_desired)
self.mpc.update(sm['carState'], sm['radarState'], v_cruise)
self.v_desired_trajectory = np.interp(T_IDXS[:CONTROL_N], T_IDXS_MPC, self.mpc.v_solution)
self.a_desired_trajectory = np.interp(T_IDXS[:CONTROL_N], T_IDXS_MPC, self.mpc.a_solution)
self.j_desired_trajectory = np.interp(T_IDXS[:CONTROL_N], T_IDXS_MPC[:-1], self.mpc.j_solution)
#TODO counter is only needed because radar is glitchy, remove once radar is gone
self.fcw = self.mpc.crash_cnt > 5
if self.fcw:
cloudlog.info("FCW triggered")
# Interpolate 0.05 seconds and save as starting point for next iteration
a_prev = self.a_desired
self.a_desired = float(interp(DT_MDL, T_IDXS[:CONTROL_N], self.a_desired_trajectory))
self.v_desired = self.v_desired + DT_MDL * (self.a_desired + a_prev)/2.0
if lateral_planner.lateralPlan and self.cachedParams.get('jvePilot.settings.slowInCurves', 5000) == "1":
curvs = list(lateral_planner.lateralPlan.curvatures)
if len(curvs):
# find the largest curvature in the solution and use that.
curv = abs(curvs[-1])
if curv != 0:
self.v_desired = float(min(self.v_desired, self.limit_speed_in_curv(sm, curv)))
def publish(self, sm, pm):
plan_send = messaging.new_message('longitudinalPlan')
plan_send.valid = sm.all_alive_and_valid(service_list=['carState', 'controlsState'])
longitudinalPlan = plan_send.longitudinalPlan
longitudinalPlan.modelMonoTime = sm.logMonoTime['modelV2']
longitudinalPlan.processingDelay = (plan_send.logMonoTime / 1e9) - sm.logMonoTime['modelV2']
longitudinalPlan.speeds = [float(x) for x in self.v_desired_trajectory]
longitudinalPlan.accels = [float(x) for x in self.a_desired_trajectory]
longitudinalPlan.jerks = [float(x) for x in self.j_desired_trajectory]
longitudinalPlan.hasLead = sm['radarState'].leadOne.status
longitudinalPlan.longitudinalPlanSource = self.mpc.source
longitudinalPlan.fcw = self.fcw
pm.send('longitudinalPlan', plan_send)
def limit_speed_in_curv(self, sm, curv):
v_ego = sm['carState'].vEgo
a_y_max = 2.975 - v_ego * 0.0375 # ~1.85 @ 75mph, ~2.6 @ 25mph
# drop off
drop_off = self.cachedParams.get_float('jvePilot.settings.slowInCurves.speedDropOff', 5000)
if drop_off != 2 and a_y_max > 0:
a_y_max = np.sqrt(a_y_max) ** drop_off
v_curvature = np.sqrt(a_y_max / np.clip(curv, 1e-4, None))
model_speed = np.min(v_curvature)
return model_speed * self.cachedParams.get_float('jvePilot.settings.slowInCurves.speedRatio', 5000) | 40.657718 | 108 | 0.73176 |
import math
import numpy as np
from common.numpy_fast import interp
from common.cached_params import CachedParams
import cereal.messaging as messaging
from common.realtime import DT_MDL
from selfdrive.modeld.constants import T_IDXS
from selfdrive.config import Conversions as CV
from selfdrive.controls.lib.longcontrol import LongCtrlState
from selfdrive.controls.lib.longitudinal_mpc_lib.long_mpc import LongitudinalMpc
from selfdrive.controls.lib.longitudinal_mpc_lib.long_mpc import T_IDXS as T_IDXS_MPC
from selfdrive.controls.lib.drive_helpers import V_CRUISE_MAX, CONTROL_N
from selfdrive.swaglog import cloudlog
LON_MPC_STEP = 0.2
AWARENESS_DECEL = -0.2
A_CRUISE_MIN = -1.2
A_CRUISE_MAX_VALS = [1.2, 1.2, 0.8, 0.6]
A_CRUISE_MAX_BP = [0., 15., 25., 40.]
_A_TOTAL_MAX_V = [1.7, 3.2]
_A_TOTAL_MAX_BP = [20., 40.]
def get_max_accel(v_ego):
return interp(v_ego, A_CRUISE_MAX_BP, A_CRUISE_MAX_VALS)
def limit_accel_in_turns(v_ego, angle_steers, a_target, CP):
a_total_max = interp(v_ego, _A_TOTAL_MAX_BP, _A_TOTAL_MAX_V)
a_y = v_ego**2 * angle_steers * CV.DEG_TO_RAD / (CP.steerRatio * CP.wheelbase)
a_x_allowed = math.sqrt(max(a_total_max**2 - a_y**2, 0.))
return [a_target[0], min(a_target[1], a_x_allowed)]
class Planner():
def __init__(self, CP, init_v=0.0, init_a=0.0):
self.CP = CP
self.mpc = LongitudinalMpc()
self.fcw = False
self.cachedParams = CachedParams()
self.v_desired = init_v
self.a_desired = init_a
self.alpha = np.exp(-DT_MDL/2.0)
self.v_desired_trajectory = np.zeros(CONTROL_N)
self.a_desired_trajectory = np.zeros(CONTROL_N)
self.j_desired_trajectory = np.zeros(CONTROL_N)
def update(self, sm, CP, lateral_planner):
v_ego = sm['carState'].vEgo
a_ego = sm['carState'].aEgo
v_cruise_kph = sm['controlsState'].vCruise
v_cruise_kph = min(v_cruise_kph, V_CRUISE_MAX)
v_cruise = v_cruise_kph * CV.KPH_TO_MS
long_control_state = sm['controlsState'].longControlState
force_slow_decel = sm['controlsState'].forceDecel
enabled = (long_control_state == LongCtrlState.pid) or (long_control_state == LongCtrlState.stopping)
if not enabled or sm['carState'].gasPressed:
self.v_desired = v_ego
self.a_desired = a_ego
self.v_desired = self.alpha * self.v_desired + (1 - self.alpha) * v_ego
self.v_desired = max(0.0, self.v_desired)
accel_limits = [A_CRUISE_MIN, get_max_accel(v_ego)]
if not self.cachedParams.get('jvePilot.settings.slowInCurves', 5000) == "1":
accel_limits = limit_accel_in_turns(v_ego, sm['carState'].steeringAngleDeg, accel_limits, self.CP)
if force_slow_decel:
accel_limits[1] = min(accel_limits[1], AWARENESS_DECEL)
accel_limits[0] = min(accel_limits[0], accel_limits[1])
accel_limits[0] = min(accel_limits[0], self.a_desired + 0.05)
accel_limits[1] = max(accel_limits[1], self.a_desired - 0.05)
self.mpc.set_accel_limits(accel_limits[0], accel_limits[1])
self.mpc.set_cur_state(self.v_desired, self.a_desired)
self.mpc.update(sm['carState'], sm['radarState'], v_cruise)
self.v_desired_trajectory = np.interp(T_IDXS[:CONTROL_N], T_IDXS_MPC, self.mpc.v_solution)
self.a_desired_trajectory = np.interp(T_IDXS[:CONTROL_N], T_IDXS_MPC, self.mpc.a_solution)
self.j_desired_trajectory = np.interp(T_IDXS[:CONTROL_N], T_IDXS_MPC[:-1], self.mpc.j_solution)
self.fcw = self.mpc.crash_cnt > 5
if self.fcw:
cloudlog.info("FCW triggered")
a_prev = self.a_desired
self.a_desired = float(interp(DT_MDL, T_IDXS[:CONTROL_N], self.a_desired_trajectory))
self.v_desired = self.v_desired + DT_MDL * (self.a_desired + a_prev)/2.0
if lateral_planner.lateralPlan and self.cachedParams.get('jvePilot.settings.slowInCurves', 5000) == "1":
curvs = list(lateral_planner.lateralPlan.curvatures)
if len(curvs):
curv = abs(curvs[-1])
if curv != 0:
self.v_desired = float(min(self.v_desired, self.limit_speed_in_curv(sm, curv)))
def publish(self, sm, pm):
plan_send = messaging.new_message('longitudinalPlan')
plan_send.valid = sm.all_alive_and_valid(service_list=['carState', 'controlsState'])
longitudinalPlan = plan_send.longitudinalPlan
longitudinalPlan.modelMonoTime = sm.logMonoTime['modelV2']
longitudinalPlan.processingDelay = (plan_send.logMonoTime / 1e9) - sm.logMonoTime['modelV2']
longitudinalPlan.speeds = [float(x) for x in self.v_desired_trajectory]
longitudinalPlan.accels = [float(x) for x in self.a_desired_trajectory]
longitudinalPlan.jerks = [float(x) for x in self.j_desired_trajectory]
longitudinalPlan.hasLead = sm['radarState'].leadOne.status
longitudinalPlan.longitudinalPlanSource = self.mpc.source
longitudinalPlan.fcw = self.fcw
pm.send('longitudinalPlan', plan_send)
def limit_speed_in_curv(self, sm, curv):
v_ego = sm['carState'].vEgo
a_y_max = 2.975 - v_ego * 0.0375
drop_off = self.cachedParams.get_float('jvePilot.settings.slowInCurves.speedDropOff', 5000)
if drop_off != 2 and a_y_max > 0:
a_y_max = np.sqrt(a_y_max) ** drop_off
v_curvature = np.sqrt(a_y_max / np.clip(curv, 1e-4, None))
model_speed = np.min(v_curvature)
return model_speed * self.cachedParams.get_float('jvePilot.settings.slowInCurves.speedRatio', 5000) | true | true |
f7200bced83e516dd159f591c79e95855c52a38f | 23,009 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_08_01/aio/operations/_application_gateway_private_endpoint_connections_operations.py | praveenkuttappan/azure-sdk-for-python | 4b79413667b7539750a6c7dde15737013a3d4bd5 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_08_01/aio/operations/_application_gateway_private_endpoint_connections_operations.py | v-xuto/azure-sdk-for-python | 9c6296d22094c5ede410bc83749e8df8694ccacc | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_08_01/aio/operations/_application_gateway_private_endpoint_connections_operations.py | v-xuto/azure-sdk-for-python | 9c6296d22094c5ede410bc83749e8df8694ccacc | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ApplicationGatewayPrivateEndpointConnectionsOperations:
"""ApplicationGatewayPrivateEndpointConnectionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
application_gateway_name: str,
connection_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/privateEndpointConnections/{connectionName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
application_gateway_name: str,
connection_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified private endpoint connection on application gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:param connection_name: The name of the application gateway private endpoint connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
connection_name=connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/privateEndpointConnections/{connectionName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
application_gateway_name: str,
connection_name: str,
parameters: "_models.ApplicationGatewayPrivateEndpointConnection",
**kwargs: Any
) -> Optional["_models.ApplicationGatewayPrivateEndpointConnection"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ApplicationGatewayPrivateEndpointConnection"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ApplicationGatewayPrivateEndpointConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ApplicationGatewayPrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/privateEndpointConnections/{connectionName}'} # type: ignore
async def begin_update(
self,
resource_group_name: str,
application_gateway_name: str,
connection_name: str,
parameters: "_models.ApplicationGatewayPrivateEndpointConnection",
**kwargs: Any
) -> AsyncLROPoller["_models.ApplicationGatewayPrivateEndpointConnection"]:
"""Updates the specified private endpoint connection on application gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:param connection_name: The name of the application gateway private endpoint connection.
:type connection_name: str
:param parameters: Parameters supplied to update application gateway private endpoint
connection operation.
:type parameters: ~azure.mgmt.network.v2020_08_01.models.ApplicationGatewayPrivateEndpointConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ApplicationGatewayPrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_08_01.models.ApplicationGatewayPrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationGatewayPrivateEndpointConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
connection_name=connection_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ApplicationGatewayPrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/privateEndpointConnections/{connectionName}'} # type: ignore
async def get(
self,
resource_group_name: str,
application_gateway_name: str,
connection_name: str,
**kwargs: Any
) -> "_models.ApplicationGatewayPrivateEndpointConnection":
"""Gets the specified private endpoint connection on application gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:param connection_name: The name of the application gateway private endpoint connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationGatewayPrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_08_01.models.ApplicationGatewayPrivateEndpointConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationGatewayPrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationGatewayPrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/privateEndpointConnections/{connectionName}'} # type: ignore
def list(
self,
resource_group_name: str,
application_gateway_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ApplicationGatewayPrivateEndpointConnectionListResult"]:
"""Lists all private endpoint connections on an application gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ApplicationGatewayPrivateEndpointConnectionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_08_01.models.ApplicationGatewayPrivateEndpointConnectionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationGatewayPrivateEndpointConnectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ApplicationGatewayPrivateEndpointConnectionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/privateEndpointConnections'} # type: ignore
| 53.509302 | 241 | 0.690556 |
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ApplicationGatewayPrivateEndpointConnectionsOperations:
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
application_gateway_name: str,
connection_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
url = self._delete_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/privateEndpointConnections/{connectionName}'}
async def begin_delete(
self,
resource_group_name: str,
application_gateway_name: str,
connection_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
connection_name=connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/privateEndpointConnections/{connectionName}'}
async def _update_initial(
self,
resource_group_name: str,
application_gateway_name: str,
connection_name: str,
parameters: "_models.ApplicationGatewayPrivateEndpointConnection",
**kwargs: Any
) -> Optional["_models.ApplicationGatewayPrivateEndpointConnection"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self._update_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'ApplicationGatewayPrivateEndpointConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ApplicationGatewayPrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/privateEndpointConnections/{connectionName}'}
async def begin_update(
self,
resource_group_name: str,
application_gateway_name: str,
connection_name: str,
parameters: "_models.ApplicationGatewayPrivateEndpointConnection",
**kwargs: Any
) -> AsyncLROPoller["_models.ApplicationGatewayPrivateEndpointConnection"]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
connection_name=connection_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ApplicationGatewayPrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/privateEndpointConnections/{connectionName}'}
async def get(
self,
resource_group_name: str,
application_gateway_name: str,
connection_name: str,
**kwargs: Any
) -> "_models.ApplicationGatewayPrivateEndpointConnection":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationGatewayPrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/privateEndpointConnections/{connectionName}'}
def list(
self,
resource_group_name: str,
application_gateway_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ApplicationGatewayPrivateEndpointConnectionListResult"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ApplicationGatewayPrivateEndpointConnectionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/privateEndpointConnections'}
| true | true |
f7200c78df71a4b408145caba6e03ce542b4d9df | 3,753 | py | Python | tests/system/action/motion_comment_section/test_sort.py | MJJojo97/openslides-backend | af0d1edb0070e352d46f285a1ba0bbe3702d49ae | [
"MIT"
] | null | null | null | tests/system/action/motion_comment_section/test_sort.py | MJJojo97/openslides-backend | af0d1edb0070e352d46f285a1ba0bbe3702d49ae | [
"MIT"
] | 19 | 2021-11-22T16:25:54.000Z | 2021-11-25T13:38:13.000Z | tests/system/action/motion_comment_section/test_sort.py | MJJojo97/openslides-backend | af0d1edb0070e352d46f285a1ba0bbe3702d49ae | [
"MIT"
] | null | null | null | from openslides_backend.permissions.permissions import Permissions
from tests.system.action.base import BaseActionTestCase
class MotionCommentSectionSortActionTest(BaseActionTestCase):
def setUp(self) -> None:
super().setUp()
self.permission_test_model = {
"motion_comment_section/31": {
"meeting_id": 1,
"name": "name_loisueb",
},
"motion_comment_section/32": {
"meeting_id": 1,
"name": "name_blanumop",
},
}
def test_sort_correct_1(self) -> None:
self.set_models(
{
"meeting/222": {
"name": "name_SNLGsvIV",
"is_active_in_organization_id": 1,
},
"motion_comment_section/31": {
"meeting_id": 222,
"name": "name_loisueb",
},
"motion_comment_section/32": {
"meeting_id": 222,
"name": "name_blanumop",
},
}
)
response = self.request(
"motion_comment_section.sort",
{"meeting_id": 222, "motion_comment_section_ids": [32, 31]},
)
self.assert_status_code(response, 200)
model_31 = self.get_model("motion_comment_section/31")
assert model_31.get("weight") == 2
model_32 = self.get_model("motion_comment_section/32")
assert model_32.get("weight") == 1
def test_sort_missing_model(self) -> None:
self.set_models(
{
"meeting/222": {
"name": "name_SNLGsvIV",
"is_active_in_organization_id": 1,
},
"motion_comment_section/31": {
"meeting_id": 222,
"name": "name_loisueb",
},
}
)
response = self.request(
"motion_comment_section.sort",
{"meeting_id": 222, "motion_comment_section_ids": [32, 31]},
)
self.assert_status_code(response, 400)
assert "Id 32 not in db_instances." in response.json["message"]
def test_sort_another_section_db(self) -> None:
self.set_models(
{
"meeting/222": {
"name": "name_SNLGsvIV",
"is_active_in_organization_id": 1,
},
"motion_comment_section/31": {
"meeting_id": 222,
"name": "name_loisueb",
},
"motion_comment_section/32": {
"meeting_id": 222,
"name": "name_blanumop",
},
"motion_comment_section/33": {
"meeting_id": 222,
"name": "name_polusiem",
},
}
)
response = self.request(
"motion_comment_section.sort",
{"meeting_id": 222, "motion_comment_section_ids": [32, 31]},
)
self.assert_status_code(response, 400)
assert "Additional db_instances found." in response.json["message"]
def test_sort_no_permissions(self) -> None:
self.base_permission_test(
self.permission_test_model,
"motion_comment_section.sort",
{"meeting_id": 1, "motion_comment_section_ids": [32, 31]},
)
def test_sort_permissions(self) -> None:
self.base_permission_test(
self.permission_test_model,
"motion_comment_section.sort",
{"meeting_id": 1, "motion_comment_section_ids": [32, 31]},
Permissions.Motion.CAN_MANAGE,
)
| 34.75 | 75 | 0.501998 | from openslides_backend.permissions.permissions import Permissions
from tests.system.action.base import BaseActionTestCase
class MotionCommentSectionSortActionTest(BaseActionTestCase):
def setUp(self) -> None:
super().setUp()
self.permission_test_model = {
"motion_comment_section/31": {
"meeting_id": 1,
"name": "name_loisueb",
},
"motion_comment_section/32": {
"meeting_id": 1,
"name": "name_blanumop",
},
}
def test_sort_correct_1(self) -> None:
self.set_models(
{
"meeting/222": {
"name": "name_SNLGsvIV",
"is_active_in_organization_id": 1,
},
"motion_comment_section/31": {
"meeting_id": 222,
"name": "name_loisueb",
},
"motion_comment_section/32": {
"meeting_id": 222,
"name": "name_blanumop",
},
}
)
response = self.request(
"motion_comment_section.sort",
{"meeting_id": 222, "motion_comment_section_ids": [32, 31]},
)
self.assert_status_code(response, 200)
model_31 = self.get_model("motion_comment_section/31")
assert model_31.get("weight") == 2
model_32 = self.get_model("motion_comment_section/32")
assert model_32.get("weight") == 1
def test_sort_missing_model(self) -> None:
self.set_models(
{
"meeting/222": {
"name": "name_SNLGsvIV",
"is_active_in_organization_id": 1,
},
"motion_comment_section/31": {
"meeting_id": 222,
"name": "name_loisueb",
},
}
)
response = self.request(
"motion_comment_section.sort",
{"meeting_id": 222, "motion_comment_section_ids": [32, 31]},
)
self.assert_status_code(response, 400)
assert "Id 32 not in db_instances." in response.json["message"]
def test_sort_another_section_db(self) -> None:
self.set_models(
{
"meeting/222": {
"name": "name_SNLGsvIV",
"is_active_in_organization_id": 1,
},
"motion_comment_section/31": {
"meeting_id": 222,
"name": "name_loisueb",
},
"motion_comment_section/32": {
"meeting_id": 222,
"name": "name_blanumop",
},
"motion_comment_section/33": {
"meeting_id": 222,
"name": "name_polusiem",
},
}
)
response = self.request(
"motion_comment_section.sort",
{"meeting_id": 222, "motion_comment_section_ids": [32, 31]},
)
self.assert_status_code(response, 400)
assert "Additional db_instances found." in response.json["message"]
def test_sort_no_permissions(self) -> None:
self.base_permission_test(
self.permission_test_model,
"motion_comment_section.sort",
{"meeting_id": 1, "motion_comment_section_ids": [32, 31]},
)
def test_sort_permissions(self) -> None:
self.base_permission_test(
self.permission_test_model,
"motion_comment_section.sort",
{"meeting_id": 1, "motion_comment_section_ids": [32, 31]},
Permissions.Motion.CAN_MANAGE,
)
| true | true |
f7200c97efca195813bca2dc2b1dffe77ce84bea | 4,104 | py | Python | test/functional/feature_includeconf.py | kaboela/litecoinz | b793b04a717416726a7b1013b21b07fb35dbc4a2 | [
"MIT"
] | 8 | 2020-06-05T16:30:36.000Z | 2021-09-28T08:39:52.000Z | test/functional/feature_includeconf.py | kaboela/litecoinz | b793b04a717416726a7b1013b21b07fb35dbc4a2 | [
"MIT"
] | 8 | 2020-04-04T11:24:26.000Z | 2021-05-09T18:53:53.000Z | test/functional/feature_includeconf.py | kaboela/litecoinz | b793b04a717416726a7b1013b21b07fb35dbc4a2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Tests the includeconf argument
Verify that:
1. adding includeconf to the configuration file causes the includeconf
file to be loaded in the correct order.
2. includeconf cannot be used as a command line argument.
3. includeconf cannot be used recursively (ie includeconf can only
be used from the base config file).
4. multiple includeconf arguments can be specified in the main config
file.
"""
import os
from test_framework.test_framework import BitcoinTestFramework
class IncludeConfTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = False
self.num_nodes = 1
def setup_chain(self):
super().setup_chain()
# Create additional config files
# - tmpdir/node0/relative.conf
with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f:
f.write("uacomment=relative\n")
# - tmpdir/node0/relative2.conf
with open(os.path.join(self.options.tmpdir, "node0", "relative2.conf"), "w", encoding="utf8") as f:
f.write("uacomment=relative2\n")
with open(os.path.join(self.options.tmpdir, "node0", "litecoinz.conf"), "a", encoding='utf8') as f:
f.write("uacomment=main\nincludeconf=relative.conf\n")
def run_test(self):
self.log.info("-includeconf works from config file. subversion should end with 'main; relative)/'")
subversion = self.nodes[0].getnetworkinfo()["subversion"]
assert subversion.endswith("main; relative)/")
self.log.info("-includeconf cannot be used as command-line arg")
self.stop_node(0)
self.nodes[0].assert_start_raises_init_error(extra_args=["-includeconf=relative2.conf"], expected_msg="Error: Error parsing command line arguments: -includeconf cannot be used from commandline; -includeconf=relative2.conf")
self.log.info("-includeconf cannot be used recursively. subversion should end with 'main; relative)/'")
with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "a", encoding="utf8") as f:
f.write("includeconf=relative2.conf\n")
self.start_node(0)
subversion = self.nodes[0].getnetworkinfo()["subversion"]
assert subversion.endswith("main; relative)/")
self.stop_node(0, expected_stderr="warning: -includeconf cannot be used from included files; ignoring -includeconf=relative2.conf")
self.log.info("-includeconf cannot contain invalid arg")
# Commented out as long as we ignore invalid arguments in configuration files
#with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f:
# f.write("foo=bar\n")
#self.nodes[0].assert_start_raises_init_error(expected_msg="Error: Error reading configuration file: Invalid configuration value foo")
self.log.info("-includeconf cannot be invalid path")
os.remove(os.path.join(self.options.tmpdir, "node0", "relative.conf"))
self.nodes[0].assert_start_raises_init_error(expected_msg="Error: Error reading configuration file: Failed to include configuration file relative.conf")
self.log.info("multiple -includeconf args can be used from the base config file. subversion should end with 'main; relative; relative2)/'")
with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f:
# Restore initial file contents
f.write("uacomment=relative\n")
with open(os.path.join(self.options.tmpdir, "node0", "litecoinz.conf"), "a", encoding='utf8') as f:
f.write("includeconf=relative2.conf\n")
self.start_node(0)
subversion = self.nodes[0].getnetworkinfo()["subversion"]
assert subversion.endswith("main; relative; relative2)/")
if __name__ == '__main__':
IncludeConfTest().main()
| 49.445783 | 231 | 0.694932 |
import os
from test_framework.test_framework import BitcoinTestFramework
class IncludeConfTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = False
self.num_nodes = 1
def setup_chain(self):
super().setup_chain()
with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f:
f.write("uacomment=relative\n")
with open(os.path.join(self.options.tmpdir, "node0", "relative2.conf"), "w", encoding="utf8") as f:
f.write("uacomment=relative2\n")
with open(os.path.join(self.options.tmpdir, "node0", "litecoinz.conf"), "a", encoding='utf8') as f:
f.write("uacomment=main\nincludeconf=relative.conf\n")
def run_test(self):
self.log.info("-includeconf works from config file. subversion should end with 'main; relative)/'")
subversion = self.nodes[0].getnetworkinfo()["subversion"]
assert subversion.endswith("main; relative)/")
self.log.info("-includeconf cannot be used as command-line arg")
self.stop_node(0)
self.nodes[0].assert_start_raises_init_error(extra_args=["-includeconf=relative2.conf"], expected_msg="Error: Error parsing command line arguments: -includeconf cannot be used from commandline; -includeconf=relative2.conf")
self.log.info("-includeconf cannot be used recursively. subversion should end with 'main; relative)/'")
with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "a", encoding="utf8") as f:
f.write("includeconf=relative2.conf\n")
self.start_node(0)
subversion = self.nodes[0].getnetworkinfo()["subversion"]
assert subversion.endswith("main; relative)/")
self.stop_node(0, expected_stderr="warning: -includeconf cannot be used from included files; ignoring -includeconf=relative2.conf")
self.log.info("-includeconf cannot contain invalid arg")
self.log.info("-includeconf cannot be invalid path")
os.remove(os.path.join(self.options.tmpdir, "node0", "relative.conf"))
self.nodes[0].assert_start_raises_init_error(expected_msg="Error: Error reading configuration file: Failed to include configuration file relative.conf")
self.log.info("multiple -includeconf args can be used from the base config file. subversion should end with 'main; relative; relative2)/'")
with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f:
f.write("uacomment=relative\n")
with open(os.path.join(self.options.tmpdir, "node0", "litecoinz.conf"), "a", encoding='utf8') as f:
f.write("includeconf=relative2.conf\n")
self.start_node(0)
subversion = self.nodes[0].getnetworkinfo()["subversion"]
assert subversion.endswith("main; relative; relative2)/")
if __name__ == '__main__':
IncludeConfTest().main()
| true | true |
f72010304316b8649123167ea9d4b94f50b6f1f1 | 25,588 | py | Python | plaidcloud/utilities/tests/test_remote_dimension.py | PlaidCloud/public-utilities | 663e94f2657a02a4249177945e0880bb968c3439 | [
"Apache-2.0"
] | null | null | null | plaidcloud/utilities/tests/test_remote_dimension.py | PlaidCloud/public-utilities | 663e94f2657a02a4249177945e0880bb968c3439 | [
"Apache-2.0"
] | 48 | 2020-10-30T10:15:39.000Z | 2022-03-25T17:23:57.000Z | plaidcloud/utilities/tests/test_remote_dimension.py | PlaidCloud/plaid-utilities | 1031cb87580bbe110f56455925e483a0ae177fe1 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding=utf-8
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import filecmp
import os
import unittest
from unittest import TestCase
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from plaidcloud.utilities.connect import create_connection
from plaidcloud.utilities.remote.dimension import Dimensions
from plaidcloud.utilities.remote.dimension import MAIN
from plaidcloud.utilities.remote.dimension import ROOT
__author__ = 'Dave Parsons'
__copyright__ = 'Copyright 2010-2020, Tartan Solutions, Inc'
__credits__ = ['Dave Parsons']
__license__ = 'Proprietary'
__maintainer__ = 'Dave Parsons'
__email__ = 'dave.parsons@tartansolutions.com'
# Folders for comparison
BASELINE = './dim_baseline/'
FOLDER = './dim_current/'
conn = create_connection(verify_ssl=False)
class TestDimension(TestCase):
"""Test Redis Dimension code"""
def assertFileEqual(self, file1, file2, **kwargs):
return self.assertTrue(filecmp.cmp(file1, file2, shallow=False))
def assertFrameEqual(self, df1, df2, **kwargs):
return assert_frame_equal(df1, df2, check_names=True, check_like=True, **kwargs)
def setUp(self):
if not os.path.exists(BASELINE):
os.makedirs(BASELINE)
self.periods = 'periods_rpc_test'
self.dims = Dimensions(conn=conn)
self.dim = self.dims.get_dimension(name=self.periods, replace=False)
return
def test_001_load_hierarchy_main(self):
df_main = pd.DataFrame(
[
[ROOT, 'Year'],
['Year', 'Q1'],
['Year', 'Q2'],
['Year', 'Q3'],
['Year', 'Q4'],
['Q1', 'January'],
['Q1', 'February'],
['Q1', 'March'],
['Q2', 'April'],
['Q2', 'May'],
['Q2', 'June'],
['Q3', 'July'],
['Q3', 'August'],
['Q3', 'September'],
['Q4', 'October'],
['Q4', 'November'],
['Q4', 'December'],
],
columns=['ParentName', 'ChildName']
)
# Clear down the dimension and reload
self.dim.clear()
# main hierarchy
df_results = self.dim.load_hierarchy_from_dataframe(df_main, 'ParentName', 'ChildName')
df_results.to_csv(f'{FOLDER}df_main_load.csv', index=False)
# Create a backup file to allow reloading in tests
data = self.dims.backup(self.periods)
with open(f'{FOLDER}periods.yaml', 'w') as file:
file.write(data)
self.assertFileEqual(f'{FOLDER}df_main_load.csv', f'{BASELINE}df_main_load.csv')
return
def test_002_save_hierarchy_main(self):
# main hierarchy
df = self.dim.save_hierarchy_to_dataframe(MAIN)
df.drop(labels='index', axis=1, inplace=True)
df.to_csv(f'{FOLDER}df_main_hierarchy.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_main_hierarchy.csv', f'{BASELINE}df_main_hierarchy.csv')
return
def test_003_load_hierarchy_halves(self):
df_halves = pd.DataFrame(
[
[ROOT, 'H1', '~', 'halves'],
[ROOT, 'H2', '~', 'halves'],
['H1', 'Q1', '+', 'halves'],
['H1', 'Q2', '+', 'halves'],
['H2', 'Q3', '+', 'halves'],
['H2', 'Q4', '+', 'halves'],
],
columns=['ParentName', 'ChildName', 'ConsolidationType', 'Hierarchy']
)
# halves hierarchy
df_results = self.dim.load_hierarchy_from_dataframe(df_halves, 'ParentName', 'ChildName',
'ConsolidationType', hierarchy='Hierarchy')
df_results.to_csv(f'{FOLDER}df_halves_load.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_halves_load.csv', f'{BASELINE}df_halves_load.csv')
return
def test_004_save_hierarchy_halves(self):
# halves hierarchy
df = self.dim.save_hierarchy_to_dataframe('halves')
df.drop(labels='index', axis=1, inplace=True)
df.to_csv(f'{FOLDER}df_halves_hierarchy.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_halves_hierarchy.csv', f'{BASELINE}df_halves_hierarchy.csv')
return
def test_005_load_hierarchy_financial(self):
df_financial = pd.DataFrame(
[
[ROOT, 'YTD', '+', 'financial'],
[ROOT, 'YTG', '+', 'financial'],
['YTD', 'January', '+', 'financial'],
['YTD', 'February', '+', 'financial'],
['YTD', 'March', '+', 'financial'],
['YTD', 'April', '+', 'financial'],
['YTG', 'May', '-', 'financial'],
['YTG', 'June', '-', 'financial'],
['YTG', 'July', '-', 'financial'],
['YTG', 'August', '-', 'financial'],
['YTG', 'September', '-', 'financial'],
['YTG', 'October', '-', 'financial'],
['YTG', 'November', '-', 'financial'],
['YTG', 'December', '-', 'financial'],
],
columns=['ParentName', 'ChildName', 'ConsolidationType', 'Hierarchy']
)
# financial hierarchy
df_results = self.dim.load_hierarchy_from_dataframe(df_financial, 'ParentName', 'ChildName',
'ConsolidationType', hierarchy='Hierarchy')
df_results.to_csv(f'{FOLDER}df_financial_load.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_financial_load.csv', f'{BASELINE}df_financial_load.csv')
return
def test_006_save_hierarchy_financial(self):
# financial hierarchy
df = self.dim.save_hierarchy_to_dataframe('financial')
df.drop(labels='index', axis=1, inplace=True)
df.to_csv(f'{FOLDER}df_financial_hierarchy.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_financial_hierarchy.csv', f'{BASELINE}df_financial_hierarchy.csv')
return
def test_007_load_hierarchy_errors(self):
# This dataframe includes specific errors so check out the results dataframe
df_test = pd.DataFrame(
[
['', '', '+', 'main'],
[' ', ' ', '+', 'main'],
['Q5', '', '+', 'main'],
[np.NaN, np.NaN, '+', 'main'],
[None, None, '+', 'main'],
['None', 'None', '+', 'main'],
['Q5', 'Q5', '+', 'main'],
['Q5', ROOT, '+', 'main'],
['Q5', 'Donk:tober', '+', 'main'],
['Donk:tober', 'Janusday', '+', 'main'],
['Year', 'Q5', '+', 'main'],
['Year', 'Q5', '+', 'main'],
['Q4', 'Badtober', '+', 'halves'],
['Q6', 'Craptober', '+', ''],
],
columns=['ParentName', 'ChildName', 'ConsolidationType', 'Hierarchy']
)
df_results = self.dim.load_hierarchy_from_dataframe(df_test, 'ParentName', 'ChildName',
'ConsolidationType', hierarchy='Hierarchy')
df_results.to_csv(f'{FOLDER}df_complex_load.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_complex_load.csv', f'{BASELINE}df_complex_load.csv')
return
def test_008_load_save_aliases(self):
df_aliases = pd.DataFrame(
[
['Trimestre 1', 'French', 'Q1'],
['Trimestre 2', 'French', 'Q2'],
['Trimestre 3', 'French', 'Q3'],
['Trimestre 4', 'French', 'Q4'],
['Janvier', 'French', 'January'],
['Fevier', 'French', 'February'],
['Mars', 'French', 'March'],
['Avril', 'French', 'April'],
['Mai', 'French', 'May'],
['Juin', 'French', 'June'],
['Julliet', 'French', 'July'],
['Aout', 'French', 'August'],
['Septembre', 'French', 'September'],
['Octobre', 'French', 'October'],
['Novembre', 'French', 'November'],
['Decembre', 'French', 'December'],
['Haneri 1', 'Welsh', 'H1'],
['Haneri 2', 'Welsh', 'H2'],
['Ionawr', 'Welsh', 'January'],
['Chwefror', 'Welsh', 'February'],
['Mawrth', 'Welsh', 'March'],
['Ebrill', 'Welsh', 'April'],
['Mai', 'Welsh', 'May'],
['Mehefin', 'Welsh', 'June'],
['Gorffennaf', 'Welsh', 'July'],
['Awst', 'Welsh', 'August'],
['Medi', 'Welsh', 'September'],
['Hydref', 'Welsh', 'October'],
['Tachwedd', 'Welsh', 'November'],
['Rhagfyr', 'Welsh', 'December'],
['Январь', 'Russian', 'January'],
['Февраль', 'Russian', 'February'],
['Март', 'Russian', 'March'],
['Апрель', 'Russian', 'April'],
['Май', 'Russian', 'May'],
['Июнь', 'Russian', 'June'],
['Июль', 'Russian', 'July'],
['Август', 'Russian', 'August'],
['Сентябрь', 'Russian', 'September'],
['Октябрь', 'Russian', 'October'],
['Ноябрь', 'Russian', 'November'],
['Декабрь', 'Russian', 'December'],
['일월', 'Korean', 'January'],
['이월', 'Korean', 'February'],
['삼월', 'Korean', 'March'],
['사월', 'Korean', 'April'],
['오월', 'Korean', 'May'],
['유월', 'Korean', 'June'],
['칠월', 'Korean', 'July'],
['팔월', 'Korean', 'August'],
['구월', 'Korean', 'September'],
['시월', 'Korean', 'October'],
['십일월', 'Korean', 'November'],
['십이월', 'Korean', 'December'],
['☃️', 'Emoji', 'January'],
['💘', 'Emoji', 'February'],
['☘️', 'Emoji', 'March'],
['☔', 'Emoji', 'April'],
['🌺', 'Emoji', 'May'],
['🌞', 'Emoji', 'June'],
['🍦', 'Emoji', 'July'],
['🏖️', 'Emoji', 'August'],
['🍎', 'Emoji', 'September'],
['🎃', 'Emoji', 'October'],
['🍂', 'Emoji', 'November'],
['🎅', 'Emoji', 'December'],
],
columns=['AliasValue', 'AliasName', 'NodeName']
)
# Aliases
self.dim.load_aliases_from_dataframe(df_aliases, 'NodeName', 'AliasName', 'AliasValue')
df = self.dim.save_aliases_to_dataframe(None)
df.drop(labels='index', axis=1, inplace=True)
df.sort_values(by=['name', 'node', 'value'], axis=0, inplace=True)
df.to_csv(f'{FOLDER}df_aliases.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_aliases.csv', f'{BASELINE}df_aliases.csv')
return
def test_009_load_save_properties(self):
df_properties = pd.DataFrame(
[
['Magenta', 'Colour', ROOT],
['Purple', 'Colour', 'Year'],
['Red', 'Colour', 'Q1'],
['Orange', 'Colour', 'Q2'],
['Green', 'Colour', 'April'],
['Green', 'Colour', 'May'],
['Blue', 'Colour', 'July'],
['Blue', 'Colour', 'August'],
['Blue', 'Colour', 'September'],
['White', 'Colour', 'Q4'],
['Red', 'Colour', 'October'],
['Green', 'Colour', 'November'],
['Red', 'Colour', 'December'],
['Winter', 'Season', 'Q1'],
['Spring', 'Season', 'Q2'],
['Summer', 'Season', 'Q3'],
['Autumn', 'Season', 'Q4'],
],
columns=['PropertyValue', 'PropertyName', 'NodeName']
)
# Properties
self.dim.load_properties_from_dataframe(df_properties, 'NodeName', 'PropertyName', 'PropertyValue')
df = self.dim.save_properties_to_dataframe(None)
df.drop(labels='index', axis=1, inplace=True)
df.sort_values(by=['name', 'node', 'value'], axis=0, inplace=True)
df.to_csv(f'{FOLDER}df_properties.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_properties.csv', f'{BASELINE}df_properties.csv')
return
def test_010_load_save_values(self):
df_values = pd.DataFrame(
[
[-10.0, 'Costs', 'January'],
[-100.0, 'Costs', 'February'],
[-1000.0, 'Costs', 'March'],
[-20.0, 'Costs', 'April'],
[-200.0, 'Costs', 'May'],
[-2000.0, 'Costs', 'June'],
[-30.0, 'Costs', 'July'],
[-300.0, 'Costs', 'August'],
[-3000.0, 'Costs', 'September'],
[-40.0, 'Costs', 'October'],
[-400.0, 'Costs', 'November'],
[-4000.0, 'Costs', 'December'],
[10.0, 'Profit', 'January'],
[100.0, 'Profit', 'February'],
[1000.0, 'Profit', 'March'],
[20.0, 'Profit', 'April'],
[200.0, 'Profit', 'May'],
[2000.0, 'Profit', 'June'],
[30.0, 'Profit', 'July'],
[300.0, 'Profit', 'August'],
[3000.0, 'Profit', 'September'],
[40.0, 'Profit', 'October'],
[400.0, 'Profit', 'November'],
[4000.0, 'Profit', 'December'],
],
columns=['Value', 'ValueName', 'NodeName']
)
# Values
self.dim.load_values_from_dataframe(df_values, 'NodeName', 'ValueName', 'Value')
df = self.dim.save_values_to_dataframe(None)
df.drop(labels='index', axis=1, inplace=True)
df.sort_values(by=['name', 'node', 'value'], axis=0, inplace=True)
df.to_csv(f'{FOLDER}df_values.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_values.csv', f'{BASELINE}df_values.csv')
return
def test_011_get_hierarchy_dataframe(self):
df = self.dim.get_hierarchy_dataframe(hierarchy=MAIN)
df = df.reindex(columns=sorted(df.columns))
df.to_csv(f'{FOLDER}df_get_hierarchy_main.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_get_hierarchy_main.csv', f'{BASELINE}df_get_hierarchy_main.csv')
return
def test_012_get_aliases_dataframe(self):
df = self.dim.get_aliases_dataframe()
df = df.reindex(columns=sorted(df.columns))
df.sort_values(by=list(df.columns), axis=0, inplace=True)
df.to_csv(f'{FOLDER}df_get_aliases.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_get_aliases.csv', f'{BASELINE}df_get_aliases.csv')
return
def test_013_get_attributes_dataframe(self):
df = self.dim.get_attributes_dataframe()
df.drop(labels='index', axis=1, inplace=True)
df = df.reindex(columns=sorted(df.columns))
df.sort_values(by=list(df.columns), axis=0, inplace=True)
df.to_csv(f'{FOLDER}df_get_attributes.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_get_attributes.csv', f'{BASELINE}df_get_attributes.csv')
return
def test_014_get_consolidation_dataframe(self):
df = self.dim.get_consolidation_dataframe('Costs', hierarchy=MAIN)
df.to_csv(f'{FOLDER}df_get_consolidation_costs_main.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_get_consolidation_costs_main.csv', f'{BASELINE}df_get_consolidation_costs_main.csv')
return
def test_015_get_properties_dataframe(self):
df = self.dim.get_properties_dataframe()
df.drop(labels='index', axis=1, inplace=True)
df = df.reindex(columns=sorted(df.columns))
df.sort_values(by=list(df.columns), axis=0, inplace=True)
df.to_csv(f'{FOLDER}df_get_properties.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_get_properties.csv', f'{BASELINE}df_get_properties.csv')
return
def test_016_get_values_dataframe(self):
df = self.dim.get_values_dataframe()
df = df.reindex(columns=sorted(df.columns))
df.sort_values(by=list(df.columns), axis=0, inplace=True)
df.to_csv(f'{FOLDER}df_get_values.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_get_values.csv', f'{BASELINE}df_get_values.csv')
return
def test_017_get_hierarchy_table(self):
df = self.dim.hierarchy_table(hierarchy=MAIN)
df = df.reindex(columns=sorted(df.columns))
df.sort_values(by=list(df.columns), axis=0, inplace=True)
df.to_csv(f'{FOLDER}df_get_hierarchy_table_main.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_get_hierarchy_table_main.csv', f'{BASELINE}df_get_hierarchy_table_main.csv')
return
def test_018_get_all_leaves(self):
expected = ['April',
'August',
'December',
'February',
'January',
'Janusday',
'July',
'June',
'March',
'May',
'November',
'October',
'September']
nodes = sorted(self.dim.get_all_leaves(hierarchy=MAIN))
return self.assertListEqual(expected, nodes)
def test_019_get_all_nodes(self):
expected = ['!!root!!',
'April',
'August',
'December',
'Donk-tober',
'February',
'January',
'Janusday',
'July',
'June',
'March',
'May',
'November',
'October',
'Q1',
'Q2',
'Q3',
'Q4',
'Q5',
'September',
'Year']
nodes = sorted(self.dim.get_all_nodes(hierarchy=MAIN))
return self.assertListEqual(expected, nodes)
def test_020_get_all_parents(self):
expected = ['!!root!!', 'Donk-tober', 'Q1', 'Q2', 'Q3', 'Q4', 'Q5', 'Year']
nodes = sorted(self.dim.get_all_parents(hierarchy=MAIN))
return self.assertListEqual(expected, nodes)
def test_021_get_ancestors(self):
expected = [[0, 'February'], [1, 'Q1'], [2, 'Year'], [3, '!!root!!']]
nodes = self.dim.get_ancestors('February', hierarchy=MAIN)
return self.assertListEqual(expected, nodes)
def test_022_get_ancestor_at_generation(self):
expected = 'Year'
node = self.dim.get_ancestor_at_generation('February', 1, hierarchy=MAIN)
return self.assertEqual(expected, node)
def test_023_get_ancestor_at_level(self):
expected = 'Year'
node = self.dim.get_ancestor_at_level('February', 2, hierarchy=MAIN)
return self.assertEqual(expected, node)
def test_024_get_bottom(self):
expected = 'March'
node = self.dim.get_bottom('Q1', hierarchy=MAIN)
return self.assertEqual(expected, node)
def test_025_get_top(self):
expected = 'January'
node = self.dim.get_top('Q1', hierarchy=MAIN)
return self.assertEqual(expected, node)
def test_026_get_down(self):
expected = 'March'
node = self.dim.get_down('Q1', 'February', hierarchy=MAIN)
return self.assertEqual(expected, node)
def test_027_get_up(self):
expected = 'January'
node = self.dim.get_up('Q1', 'February', hierarchy=MAIN)
return self.assertEqual(expected, node)
def test_028_get_children(self):
expected = ['January', 'February', 'March']
nodes = self.dim.get_children('Q1', hierarchy=MAIN)
return self.assertListEqual(expected, nodes)
def test_029_get_children_count(self):
expected = 3
count = self.dim.get_children_count('Q1', hierarchy=MAIN)
return self.assertEqual(expected, count)
def test_030_get_generation(self):
expected = 2
count = self.dim.get_generation('Q1', hierarchy=MAIN)
return self.assertEqual(expected, count)
def test_031_get_grandparent(self):
expected = 'Year'
node = self.dim.get_grandparent('February', hierarchy=MAIN)
return self.assertEqual(expected, node)
def test_032_get_leaves(self):
expected = [[2, 'January'],
[2, 'February'],
[2, 'March'],
[2, 'April'],
[2, 'May'],
[2, 'June'],
[2, 'July'],
[2, 'August'],
[2, 'September'],
[2, 'October'],
[2, 'November'],
[2, 'December'],
[3, 'Janusday']]
nodes = self.dim.get_leaves('Year', hierarchy=MAIN)
return self.assertEqual(expected, nodes)
def test_033_get_leaves_at_generation(self):
expected = [[2, 'January'],
[2, 'February'],
[2, 'March'],
[2, 'April'],
[2, 'May'],
[2, 'June'],
[2, 'July'],
[2, 'August'],
[2, 'September'],
[2, 'October'],
[2, 'November'],
[2, 'December']]
nodes = self.dim.get_leaves_at_generation('Year', 2, hierarchy=MAIN)
return self.assertEqual(expected, nodes)
def test_034_get_leaves_at_level(self):
expected = [[3, 'January'],
[3, 'February'],
[3, 'March'],
[3, 'April'],
[3, 'May'],
[3, 'June'],
[3, 'July'],
[3, 'August'],
[3, 'September'],
[3, 'October'],
[3, 'November'],
[3, 'December']]
nodes = self.dim.get_leaves_at_level('February', 0, hierarchy=MAIN)
return self.assertEqual(expected, nodes)
def test_035_get_parent(self):
expected = 'Q1'
nodes = self.dim.get_parent('February', hierarchy=MAIN)
return self.assertEqual(expected, nodes)
def test_036_get_parents(self):
expected = [['financial', 'halves', 'main'], ['YTD', 'Q1', 'Q1']]
nodes = self.dim.get_parents('February')
return self.assertEqual(expected, nodes)
def test_037_get_siblings(self):
expected = ['January', 'February', 'March']
nodes = self.dim.get_siblings('February', hierarchy=MAIN)
return self.assertEqual(expected, nodes)
def test_038_get_difference(self):
expected = sorted(['Janusday', 'Year', 'Q5', 'Donk-tober'])
nodes = sorted(self.dim.get_difference(['halves']))
return self.assertEqual(expected, nodes)
def test_039_get_intersection(self):
expected = sorted(['!!root!!', 'April', 'August', 'December', 'February', 'January', 'July', 'June', 'March',
'May', 'November', 'October', 'Q1', 'Q2', 'Q3', 'Q4', 'September'])
nodes = sorted(self.dim.get_intersection(['halves']))
return self.assertEqual(expected, nodes)
def test_040_get_union(self):
expected = sorted(['!!root!!', 'April', 'August', 'December', 'Donk-tober', 'February', 'H1', 'H2', 'January',
'Janusday', 'July', 'June', 'March', 'May', 'November', 'October', 'Q1', 'Q2', 'Q3', 'Q4',
'Q5', 'September', 'Year'])
nodes = sorted(self.dim.get_union(['halves']))
return self.assertEqual(expected, nodes)
def test_041_add_node_to_alt(self):
expected = 'H2'
self.dim.add_node('H2', 'Q5', '+', hierarchy='halves', after='Q4')
node = self.dim.get_parent('Q5', hierarchy='halves')
return self.assertEqual(expected, node)
def test_042_move_node_in_alt(self):
expected = 'H1'
self.dim.move_node('Q5', 'H1', hierarchy='halves', before='Q2')
node = self.dim.get_parent('Q5', hierarchy='halves')
return self.assertEqual(expected, node)
def test_043_rename_node(self):
expected = 'Q5'
self.dim.rename_node('Donk-tober', 'Davetober')
node = self.dim.get_parent('Davetober', hierarchy=MAIN)
return self.assertEqual(expected, node)
def test_044_delete_node(self):
self.dim.delete_node('Year', 'Q5', hierarchy=MAIN)
node = self.dim.node_exists('Q5')
return self.assertFalse(node)
def test_045_default_alias_dataframe(self):
self.dim.set_default_aliases(primary='Welsh', secondary='French')
df = self.dim.get_aliases_dataframe()
df = df.reindex(columns=sorted(df.columns))
df.sort_values(by=list(df.columns), axis=0, inplace=True)
df.to_csv(f'{FOLDER}df_get_default_aliases.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_get_default_aliases.csv', f'{BASELINE}df_get_default_aliases.csv')
pass
def tearDown(self):
self.dim = None
self.dims = None
| 40.745223 | 126 | 0.523409 |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import filecmp
import os
import unittest
from unittest import TestCase
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from plaidcloud.utilities.connect import create_connection
from plaidcloud.utilities.remote.dimension import Dimensions
from plaidcloud.utilities.remote.dimension import MAIN
from plaidcloud.utilities.remote.dimension import ROOT
__author__ = 'Dave Parsons'
__copyright__ = 'Copyright 2010-2020, Tartan Solutions, Inc'
__credits__ = ['Dave Parsons']
__license__ = 'Proprietary'
__maintainer__ = 'Dave Parsons'
__email__ = 'dave.parsons@tartansolutions.com'
BASELINE = './dim_baseline/'
FOLDER = './dim_current/'
conn = create_connection(verify_ssl=False)
class TestDimension(TestCase):
def assertFileEqual(self, file1, file2, **kwargs):
return self.assertTrue(filecmp.cmp(file1, file2, shallow=False))
def assertFrameEqual(self, df1, df2, **kwargs):
return assert_frame_equal(df1, df2, check_names=True, check_like=True, **kwargs)
def setUp(self):
if not os.path.exists(BASELINE):
os.makedirs(BASELINE)
self.periods = 'periods_rpc_test'
self.dims = Dimensions(conn=conn)
self.dim = self.dims.get_dimension(name=self.periods, replace=False)
return
def test_001_load_hierarchy_main(self):
df_main = pd.DataFrame(
[
[ROOT, 'Year'],
['Year', 'Q1'],
['Year', 'Q2'],
['Year', 'Q3'],
['Year', 'Q4'],
['Q1', 'January'],
['Q1', 'February'],
['Q1', 'March'],
['Q2', 'April'],
['Q2', 'May'],
['Q2', 'June'],
['Q3', 'July'],
['Q3', 'August'],
['Q3', 'September'],
['Q4', 'October'],
['Q4', 'November'],
['Q4', 'December'],
],
columns=['ParentName', 'ChildName']
)
self.dim.clear()
df_results = self.dim.load_hierarchy_from_dataframe(df_main, 'ParentName', 'ChildName')
df_results.to_csv(f'{FOLDER}df_main_load.csv', index=False)
data = self.dims.backup(self.periods)
with open(f'{FOLDER}periods.yaml', 'w') as file:
file.write(data)
self.assertFileEqual(f'{FOLDER}df_main_load.csv', f'{BASELINE}df_main_load.csv')
return
def test_002_save_hierarchy_main(self):
df = self.dim.save_hierarchy_to_dataframe(MAIN)
df.drop(labels='index', axis=1, inplace=True)
df.to_csv(f'{FOLDER}df_main_hierarchy.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_main_hierarchy.csv', f'{BASELINE}df_main_hierarchy.csv')
return
def test_003_load_hierarchy_halves(self):
df_halves = pd.DataFrame(
[
[ROOT, 'H1', '~', 'halves'],
[ROOT, 'H2', '~', 'halves'],
['H1', 'Q1', '+', 'halves'],
['H1', 'Q2', '+', 'halves'],
['H2', 'Q3', '+', 'halves'],
['H2', 'Q4', '+', 'halves'],
],
columns=['ParentName', 'ChildName', 'ConsolidationType', 'Hierarchy']
)
df_results = self.dim.load_hierarchy_from_dataframe(df_halves, 'ParentName', 'ChildName',
'ConsolidationType', hierarchy='Hierarchy')
df_results.to_csv(f'{FOLDER}df_halves_load.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_halves_load.csv', f'{BASELINE}df_halves_load.csv')
return
def test_004_save_hierarchy_halves(self):
df = self.dim.save_hierarchy_to_dataframe('halves')
df.drop(labels='index', axis=1, inplace=True)
df.to_csv(f'{FOLDER}df_halves_hierarchy.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_halves_hierarchy.csv', f'{BASELINE}df_halves_hierarchy.csv')
return
def test_005_load_hierarchy_financial(self):
df_financial = pd.DataFrame(
[
[ROOT, 'YTD', '+', 'financial'],
[ROOT, 'YTG', '+', 'financial'],
['YTD', 'January', '+', 'financial'],
['YTD', 'February', '+', 'financial'],
['YTD', 'March', '+', 'financial'],
['YTD', 'April', '+', 'financial'],
['YTG', 'May', '-', 'financial'],
['YTG', 'June', '-', 'financial'],
['YTG', 'July', '-', 'financial'],
['YTG', 'August', '-', 'financial'],
['YTG', 'September', '-', 'financial'],
['YTG', 'October', '-', 'financial'],
['YTG', 'November', '-', 'financial'],
['YTG', 'December', '-', 'financial'],
],
columns=['ParentName', 'ChildName', 'ConsolidationType', 'Hierarchy']
)
df_results = self.dim.load_hierarchy_from_dataframe(df_financial, 'ParentName', 'ChildName',
'ConsolidationType', hierarchy='Hierarchy')
df_results.to_csv(f'{FOLDER}df_financial_load.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_financial_load.csv', f'{BASELINE}df_financial_load.csv')
return
def test_006_save_hierarchy_financial(self):
df = self.dim.save_hierarchy_to_dataframe('financial')
df.drop(labels='index', axis=1, inplace=True)
df.to_csv(f'{FOLDER}df_financial_hierarchy.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_financial_hierarchy.csv', f'{BASELINE}df_financial_hierarchy.csv')
return
def test_007_load_hierarchy_errors(self):
df_test = pd.DataFrame(
[
['', '', '+', 'main'],
[' ', ' ', '+', 'main'],
['Q5', '', '+', 'main'],
[np.NaN, np.NaN, '+', 'main'],
[None, None, '+', 'main'],
['None', 'None', '+', 'main'],
['Q5', 'Q5', '+', 'main'],
['Q5', ROOT, '+', 'main'],
['Q5', 'Donk:tober', '+', 'main'],
['Donk:tober', 'Janusday', '+', 'main'],
['Year', 'Q5', '+', 'main'],
['Year', 'Q5', '+', 'main'],
['Q4', 'Badtober', '+', 'halves'],
['Q6', 'Craptober', '+', ''],
],
columns=['ParentName', 'ChildName', 'ConsolidationType', 'Hierarchy']
)
df_results = self.dim.load_hierarchy_from_dataframe(df_test, 'ParentName', 'ChildName',
'ConsolidationType', hierarchy='Hierarchy')
df_results.to_csv(f'{FOLDER}df_complex_load.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_complex_load.csv', f'{BASELINE}df_complex_load.csv')
return
def test_008_load_save_aliases(self):
df_aliases = pd.DataFrame(
[
['Trimestre 1', 'French', 'Q1'],
['Trimestre 2', 'French', 'Q2'],
['Trimestre 3', 'French', 'Q3'],
['Trimestre 4', 'French', 'Q4'],
['Janvier', 'French', 'January'],
['Fevier', 'French', 'February'],
['Mars', 'French', 'March'],
['Avril', 'French', 'April'],
['Mai', 'French', 'May'],
['Juin', 'French', 'June'],
['Julliet', 'French', 'July'],
['Aout', 'French', 'August'],
['Septembre', 'French', 'September'],
['Octobre', 'French', 'October'],
['Novembre', 'French', 'November'],
['Decembre', 'French', 'December'],
['Haneri 1', 'Welsh', 'H1'],
['Haneri 2', 'Welsh', 'H2'],
['Ionawr', 'Welsh', 'January'],
['Chwefror', 'Welsh', 'February'],
['Mawrth', 'Welsh', 'March'],
['Ebrill', 'Welsh', 'April'],
['Mai', 'Welsh', 'May'],
['Mehefin', 'Welsh', 'June'],
['Gorffennaf', 'Welsh', 'July'],
['Awst', 'Welsh', 'August'],
['Medi', 'Welsh', 'September'],
['Hydref', 'Welsh', 'October'],
['Tachwedd', 'Welsh', 'November'],
['Rhagfyr', 'Welsh', 'December'],
['Январь', 'Russian', 'January'],
['Февраль', 'Russian', 'February'],
['Март', 'Russian', 'March'],
['Апрель', 'Russian', 'April'],
['Май', 'Russian', 'May'],
['Июнь', 'Russian', 'June'],
['Июль', 'Russian', 'July'],
['Август', 'Russian', 'August'],
['Сентябрь', 'Russian', 'September'],
['Октябрь', 'Russian', 'October'],
['Ноябрь', 'Russian', 'November'],
['Декабрь', 'Russian', 'December'],
['일월', 'Korean', 'January'],
['이월', 'Korean', 'February'],
['삼월', 'Korean', 'March'],
['사월', 'Korean', 'April'],
['오월', 'Korean', 'May'],
['유월', 'Korean', 'June'],
['칠월', 'Korean', 'July'],
['팔월', 'Korean', 'August'],
['구월', 'Korean', 'September'],
['시월', 'Korean', 'October'],
['십일월', 'Korean', 'November'],
['십이월', 'Korean', 'December'],
['☃️', 'Emoji', 'January'],
['💘', 'Emoji', 'February'],
['☘️', 'Emoji', 'March'],
['☔', 'Emoji', 'April'],
['🌺', 'Emoji', 'May'],
['🌞', 'Emoji', 'June'],
['🍦', 'Emoji', 'July'],
['🏖️', 'Emoji', 'August'],
['🍎', 'Emoji', 'September'],
['🎃', 'Emoji', 'October'],
['🍂', 'Emoji', 'November'],
['🎅', 'Emoji', 'December'],
],
columns=['AliasValue', 'AliasName', 'NodeName']
)
self.dim.load_aliases_from_dataframe(df_aliases, 'NodeName', 'AliasName', 'AliasValue')
df = self.dim.save_aliases_to_dataframe(None)
df.drop(labels='index', axis=1, inplace=True)
df.sort_values(by=['name', 'node', 'value'], axis=0, inplace=True)
df.to_csv(f'{FOLDER}df_aliases.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_aliases.csv', f'{BASELINE}df_aliases.csv')
return
def test_009_load_save_properties(self):
df_properties = pd.DataFrame(
[
['Magenta', 'Colour', ROOT],
['Purple', 'Colour', 'Year'],
['Red', 'Colour', 'Q1'],
['Orange', 'Colour', 'Q2'],
['Green', 'Colour', 'April'],
['Green', 'Colour', 'May'],
['Blue', 'Colour', 'July'],
['Blue', 'Colour', 'August'],
['Blue', 'Colour', 'September'],
['White', 'Colour', 'Q4'],
['Red', 'Colour', 'October'],
['Green', 'Colour', 'November'],
['Red', 'Colour', 'December'],
['Winter', 'Season', 'Q1'],
['Spring', 'Season', 'Q2'],
['Summer', 'Season', 'Q3'],
['Autumn', 'Season', 'Q4'],
],
columns=['PropertyValue', 'PropertyName', 'NodeName']
)
self.dim.load_properties_from_dataframe(df_properties, 'NodeName', 'PropertyName', 'PropertyValue')
df = self.dim.save_properties_to_dataframe(None)
df.drop(labels='index', axis=1, inplace=True)
df.sort_values(by=['name', 'node', 'value'], axis=0, inplace=True)
df.to_csv(f'{FOLDER}df_properties.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_properties.csv', f'{BASELINE}df_properties.csv')
return
def test_010_load_save_values(self):
df_values = pd.DataFrame(
[
[-10.0, 'Costs', 'January'],
[-100.0, 'Costs', 'February'],
[-1000.0, 'Costs', 'March'],
[-20.0, 'Costs', 'April'],
[-200.0, 'Costs', 'May'],
[-2000.0, 'Costs', 'June'],
[-30.0, 'Costs', 'July'],
[-300.0, 'Costs', 'August'],
[-3000.0, 'Costs', 'September'],
[-40.0, 'Costs', 'October'],
[-400.0, 'Costs', 'November'],
[-4000.0, 'Costs', 'December'],
[10.0, 'Profit', 'January'],
[100.0, 'Profit', 'February'],
[1000.0, 'Profit', 'March'],
[20.0, 'Profit', 'April'],
[200.0, 'Profit', 'May'],
[2000.0, 'Profit', 'June'],
[30.0, 'Profit', 'July'],
[300.0, 'Profit', 'August'],
[3000.0, 'Profit', 'September'],
[40.0, 'Profit', 'October'],
[400.0, 'Profit', 'November'],
[4000.0, 'Profit', 'December'],
],
columns=['Value', 'ValueName', 'NodeName']
)
self.dim.load_values_from_dataframe(df_values, 'NodeName', 'ValueName', 'Value')
df = self.dim.save_values_to_dataframe(None)
df.drop(labels='index', axis=1, inplace=True)
df.sort_values(by=['name', 'node', 'value'], axis=0, inplace=True)
df.to_csv(f'{FOLDER}df_values.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_values.csv', f'{BASELINE}df_values.csv')
return
def test_011_get_hierarchy_dataframe(self):
df = self.dim.get_hierarchy_dataframe(hierarchy=MAIN)
df = df.reindex(columns=sorted(df.columns))
df.to_csv(f'{FOLDER}df_get_hierarchy_main.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_get_hierarchy_main.csv', f'{BASELINE}df_get_hierarchy_main.csv')
return
def test_012_get_aliases_dataframe(self):
df = self.dim.get_aliases_dataframe()
df = df.reindex(columns=sorted(df.columns))
df.sort_values(by=list(df.columns), axis=0, inplace=True)
df.to_csv(f'{FOLDER}df_get_aliases.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_get_aliases.csv', f'{BASELINE}df_get_aliases.csv')
return
def test_013_get_attributes_dataframe(self):
df = self.dim.get_attributes_dataframe()
df.drop(labels='index', axis=1, inplace=True)
df = df.reindex(columns=sorted(df.columns))
df.sort_values(by=list(df.columns), axis=0, inplace=True)
df.to_csv(f'{FOLDER}df_get_attributes.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_get_attributes.csv', f'{BASELINE}df_get_attributes.csv')
return
def test_014_get_consolidation_dataframe(self):
df = self.dim.get_consolidation_dataframe('Costs', hierarchy=MAIN)
df.to_csv(f'{FOLDER}df_get_consolidation_costs_main.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_get_consolidation_costs_main.csv', f'{BASELINE}df_get_consolidation_costs_main.csv')
return
def test_015_get_properties_dataframe(self):
df = self.dim.get_properties_dataframe()
df.drop(labels='index', axis=1, inplace=True)
df = df.reindex(columns=sorted(df.columns))
df.sort_values(by=list(df.columns), axis=0, inplace=True)
df.to_csv(f'{FOLDER}df_get_properties.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_get_properties.csv', f'{BASELINE}df_get_properties.csv')
return
def test_016_get_values_dataframe(self):
df = self.dim.get_values_dataframe()
df = df.reindex(columns=sorted(df.columns))
df.sort_values(by=list(df.columns), axis=0, inplace=True)
df.to_csv(f'{FOLDER}df_get_values.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_get_values.csv', f'{BASELINE}df_get_values.csv')
return
def test_017_get_hierarchy_table(self):
df = self.dim.hierarchy_table(hierarchy=MAIN)
df = df.reindex(columns=sorted(df.columns))
df.sort_values(by=list(df.columns), axis=0, inplace=True)
df.to_csv(f'{FOLDER}df_get_hierarchy_table_main.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_get_hierarchy_table_main.csv', f'{BASELINE}df_get_hierarchy_table_main.csv')
return
def test_018_get_all_leaves(self):
expected = ['April',
'August',
'December',
'February',
'January',
'Janusday',
'July',
'June',
'March',
'May',
'November',
'October',
'September']
nodes = sorted(self.dim.get_all_leaves(hierarchy=MAIN))
return self.assertListEqual(expected, nodes)
def test_019_get_all_nodes(self):
expected = ['!!root!!',
'April',
'August',
'December',
'Donk-tober',
'February',
'January',
'Janusday',
'July',
'June',
'March',
'May',
'November',
'October',
'Q1',
'Q2',
'Q3',
'Q4',
'Q5',
'September',
'Year']
nodes = sorted(self.dim.get_all_nodes(hierarchy=MAIN))
return self.assertListEqual(expected, nodes)
def test_020_get_all_parents(self):
expected = ['!!root!!', 'Donk-tober', 'Q1', 'Q2', 'Q3', 'Q4', 'Q5', 'Year']
nodes = sorted(self.dim.get_all_parents(hierarchy=MAIN))
return self.assertListEqual(expected, nodes)
def test_021_get_ancestors(self):
expected = [[0, 'February'], [1, 'Q1'], [2, 'Year'], [3, '!!root!!']]
nodes = self.dim.get_ancestors('February', hierarchy=MAIN)
return self.assertListEqual(expected, nodes)
def test_022_get_ancestor_at_generation(self):
expected = 'Year'
node = self.dim.get_ancestor_at_generation('February', 1, hierarchy=MAIN)
return self.assertEqual(expected, node)
def test_023_get_ancestor_at_level(self):
expected = 'Year'
node = self.dim.get_ancestor_at_level('February', 2, hierarchy=MAIN)
return self.assertEqual(expected, node)
def test_024_get_bottom(self):
expected = 'March'
node = self.dim.get_bottom('Q1', hierarchy=MAIN)
return self.assertEqual(expected, node)
def test_025_get_top(self):
expected = 'January'
node = self.dim.get_top('Q1', hierarchy=MAIN)
return self.assertEqual(expected, node)
def test_026_get_down(self):
expected = 'March'
node = self.dim.get_down('Q1', 'February', hierarchy=MAIN)
return self.assertEqual(expected, node)
def test_027_get_up(self):
expected = 'January'
node = self.dim.get_up('Q1', 'February', hierarchy=MAIN)
return self.assertEqual(expected, node)
def test_028_get_children(self):
expected = ['January', 'February', 'March']
nodes = self.dim.get_children('Q1', hierarchy=MAIN)
return self.assertListEqual(expected, nodes)
def test_029_get_children_count(self):
expected = 3
count = self.dim.get_children_count('Q1', hierarchy=MAIN)
return self.assertEqual(expected, count)
def test_030_get_generation(self):
expected = 2
count = self.dim.get_generation('Q1', hierarchy=MAIN)
return self.assertEqual(expected, count)
def test_031_get_grandparent(self):
expected = 'Year'
node = self.dim.get_grandparent('February', hierarchy=MAIN)
return self.assertEqual(expected, node)
def test_032_get_leaves(self):
expected = [[2, 'January'],
[2, 'February'],
[2, 'March'],
[2, 'April'],
[2, 'May'],
[2, 'June'],
[2, 'July'],
[2, 'August'],
[2, 'September'],
[2, 'October'],
[2, 'November'],
[2, 'December'],
[3, 'Janusday']]
nodes = self.dim.get_leaves('Year', hierarchy=MAIN)
return self.assertEqual(expected, nodes)
def test_033_get_leaves_at_generation(self):
expected = [[2, 'January'],
[2, 'February'],
[2, 'March'],
[2, 'April'],
[2, 'May'],
[2, 'June'],
[2, 'July'],
[2, 'August'],
[2, 'September'],
[2, 'October'],
[2, 'November'],
[2, 'December']]
nodes = self.dim.get_leaves_at_generation('Year', 2, hierarchy=MAIN)
return self.assertEqual(expected, nodes)
def test_034_get_leaves_at_level(self):
expected = [[3, 'January'],
[3, 'February'],
[3, 'March'],
[3, 'April'],
[3, 'May'],
[3, 'June'],
[3, 'July'],
[3, 'August'],
[3, 'September'],
[3, 'October'],
[3, 'November'],
[3, 'December']]
nodes = self.dim.get_leaves_at_level('February', 0, hierarchy=MAIN)
return self.assertEqual(expected, nodes)
def test_035_get_parent(self):
expected = 'Q1'
nodes = self.dim.get_parent('February', hierarchy=MAIN)
return self.assertEqual(expected, nodes)
def test_036_get_parents(self):
expected = [['financial', 'halves', 'main'], ['YTD', 'Q1', 'Q1']]
nodes = self.dim.get_parents('February')
return self.assertEqual(expected, nodes)
def test_037_get_siblings(self):
expected = ['January', 'February', 'March']
nodes = self.dim.get_siblings('February', hierarchy=MAIN)
return self.assertEqual(expected, nodes)
def test_038_get_difference(self):
expected = sorted(['Janusday', 'Year', 'Q5', 'Donk-tober'])
nodes = sorted(self.dim.get_difference(['halves']))
return self.assertEqual(expected, nodes)
def test_039_get_intersection(self):
expected = sorted(['!!root!!', 'April', 'August', 'December', 'February', 'January', 'July', 'June', 'March',
'May', 'November', 'October', 'Q1', 'Q2', 'Q3', 'Q4', 'September'])
nodes = sorted(self.dim.get_intersection(['halves']))
return self.assertEqual(expected, nodes)
def test_040_get_union(self):
expected = sorted(['!!root!!', 'April', 'August', 'December', 'Donk-tober', 'February', 'H1', 'H2', 'January',
'Janusday', 'July', 'June', 'March', 'May', 'November', 'October', 'Q1', 'Q2', 'Q3', 'Q4',
'Q5', 'September', 'Year'])
nodes = sorted(self.dim.get_union(['halves']))
return self.assertEqual(expected, nodes)
def test_041_add_node_to_alt(self):
expected = 'H2'
self.dim.add_node('H2', 'Q5', '+', hierarchy='halves', after='Q4')
node = self.dim.get_parent('Q5', hierarchy='halves')
return self.assertEqual(expected, node)
def test_042_move_node_in_alt(self):
expected = 'H1'
self.dim.move_node('Q5', 'H1', hierarchy='halves', before='Q2')
node = self.dim.get_parent('Q5', hierarchy='halves')
return self.assertEqual(expected, node)
def test_043_rename_node(self):
expected = 'Q5'
self.dim.rename_node('Donk-tober', 'Davetober')
node = self.dim.get_parent('Davetober', hierarchy=MAIN)
return self.assertEqual(expected, node)
def test_044_delete_node(self):
self.dim.delete_node('Year', 'Q5', hierarchy=MAIN)
node = self.dim.node_exists('Q5')
return self.assertFalse(node)
def test_045_default_alias_dataframe(self):
self.dim.set_default_aliases(primary='Welsh', secondary='French')
df = self.dim.get_aliases_dataframe()
df = df.reindex(columns=sorted(df.columns))
df.sort_values(by=list(df.columns), axis=0, inplace=True)
df.to_csv(f'{FOLDER}df_get_default_aliases.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_get_default_aliases.csv', f'{BASELINE}df_get_default_aliases.csv')
pass
def tearDown(self):
self.dim = None
self.dims = None
| true | true |
f7201125ce532819474be57b6c62cb7fcba4cd59 | 33,749 | py | Python | sdks/python/apache_beam/dataframe/pandas_doctests_test.py | psobot/beam | d9da8a4dc818b01a86d2dce2e78c0d78b47038bb | [
"Apache-2.0",
"BSD-3-Clause"
] | 5 | 2019-07-27T11:54:33.000Z | 2021-06-06T11:53:36.000Z | sdks/python/apache_beam/dataframe/pandas_doctests_test.py | psobot/beam | d9da8a4dc818b01a86d2dce2e78c0d78b47038bb | [
"Apache-2.0",
"BSD-3-Clause"
] | 12 | 2019-04-15T15:27:23.000Z | 2019-07-01T18:13:10.000Z | sdks/python/apache_beam/dataframe/pandas_doctests_test.py | psobot/beam | d9da8a4dc818b01a86d2dce2e78c0d78b47038bb | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2021-06-03T19:54:48.000Z | 2021-06-03T19:54:48.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
import pandas as pd
from apache_beam.dataframe import doctests
from apache_beam.dataframe.pandas_top_level_functions import _is_top_level_function
@unittest.skipIf(sys.platform == 'win32', '[BEAM-10626]')
class DoctestTest(unittest.TestCase):
def test_ndframe_tests(self):
# IO methods are tested in io_test.py
skip_writes = {
f'pandas.core.generic.NDFrame.{name}': ['*']
for name in dir(pd.core.generic.NDFrame) if name.startswith('to_')
}
result = doctests.testmod(
pd.core.generic,
use_beam=False,
report=True,
wont_implement_ok={
'pandas.core.generic.NDFrame.first': ['*'],
'pandas.core.generic.NDFrame.head': ['*'],
'pandas.core.generic.NDFrame.last': ['*'],
'pandas.core.generic.NDFrame.shift': ['*'],
'pandas.core.generic.NDFrame.tail': ['*'],
'pandas.core.generic.NDFrame.take': ['*'],
'pandas.core.generic.NDFrame.values': ['*'],
'pandas.core.generic.NDFrame.tz_localize': [
"s.tz_localize('CET', ambiguous='infer')",
# np.array is not a deferred object. This use-case is possible
# with a deferred Series though, which is tested in
# frames_test.py
"s.tz_localize('CET', ambiguous=np.array([True, True, False]))",
],
'pandas.core.generic.NDFrame.truncate': [
# These inputs rely on tail (wont implement, order
# sensitive) for verification
"df.tail()",
"df.loc['2016-01-05':'2016-01-10', :].tail()",
],
'pandas.core.generic.NDFrame.replace': [
"s.replace([1, 2], method='bfill')",
# Relies on method='pad'
"s.replace('a', None)",
],
'pandas.core.generic.NDFrame.fillna': [
"df.fillna(method='ffill')",
'df.fillna(value=values, limit=1)',
],
'pandas.core.generic.NDFrame.sort_values': ['*'],
'pandas.core.generic.NDFrame.mask': [
'df.where(m, -df) == np.where(m, df, -df)'
],
'pandas.core.generic.NDFrame.where': [
'df.where(m, -df) == np.where(m, df, -df)'
],
'pandas.core.generic.NDFrame.interpolate': ['*'],
},
not_implemented_ok={
'pandas.core.generic.NDFrame.asof': ['*'],
'pandas.core.generic.NDFrame.at_time': ['*'],
'pandas.core.generic.NDFrame.between_time': ['*'],
'pandas.core.generic.NDFrame.describe': ['*'],
'pandas.core.generic.NDFrame.ewm': ['*'],
'pandas.core.generic.NDFrame.expanding': ['*'],
'pandas.core.generic.NDFrame.flags': ['*'],
'pandas.core.generic.NDFrame.pct_change': ['*'],
'pandas.core.generic.NDFrame.rank': ['*'],
'pandas.core.generic.NDFrame.reindex': ['*'],
'pandas.core.generic.NDFrame.reindex_like': ['*'],
'pandas.core.generic.NDFrame.replace': ['*'],
'pandas.core.generic.NDFrame.resample': ['*'],
'pandas.core.generic.NDFrame.rolling': ['*'],
'pandas.core.generic.NDFrame.sample': ['*'],
'pandas.core.generic.NDFrame.set_flags': ['*'],
'pandas.core.generic.NDFrame.squeeze': ['*'],
'pandas.core.generic.NDFrame.transform': ['*'],
'pandas.core.generic.NDFrame.truncate': ['*'],
'pandas.core.generic.NDFrame.xs': ['*'],
# argsort unimplemented
'pandas.core.generic.NDFrame.abs': [
'df.loc[(df.c - 43).abs().argsort()]',
],
},
skip={
# Internal test
'pandas.core.generic.NDFrame._set_axis_name': ['*'],
# Fails to construct test series. asfreq is not implemented anyway.
'pandas.core.generic.NDFrame.asfreq': ['*'],
'pandas.core.generic.NDFrame.astype': ['*'],
'pandas.core.generic.NDFrame.convert_dtypes': ['*'],
'pandas.core.generic.NDFrame.copy': ['*'],
'pandas.core.generic.NDFrame.droplevel': ['*'],
'pandas.core.generic.NDFrame.infer_objects': ['*'],
'pandas.core.generic.NDFrame.rank': [
# Modified dataframe
'df'
],
'pandas.core.generic.NDFrame.rename': [
# Seems to be an upstream bug. The actual error has a different
# message:
# TypeError: Index(...) must be called with a collection of
# some kind, 2 was passed
# pandas doctests only verify the type of exception
'df.rename(2)'
],
# Tests rely on setting index
'pandas.core.generic.NDFrame.rename_axis': ['*'],
# Raises right exception, but testing framework has matching issues.
'pandas.core.generic.NDFrame.replace': [
"df.replace({'a string': 'new value', True: False}) # raises"
],
'pandas.core.generic.NDFrame.squeeze': ['*'],
# NameError
'pandas.core.generic.NDFrame.resample': ['df'],
# Skipped so we don't need to install natsort
'pandas.core.generic.NDFrame.sort_values': [
'from natsort import index_natsorted',
'df.sort_values(\n'
' by="time",\n'
' key=lambda x: np.argsort(index_natsorted(df["time"]))\n'
')'
],
**skip_writes
})
self.assertEqual(result.failed, 0)
def test_dataframe_tests(self):
result = doctests.testmod(
pd.core.frame,
use_beam=False,
report=True,
wont_implement_ok={
'pandas.core.frame.DataFrame.T': ['*'],
'pandas.core.frame.DataFrame.cummax': ['*'],
'pandas.core.frame.DataFrame.cummin': ['*'],
'pandas.core.frame.DataFrame.cumsum': ['*'],
'pandas.core.frame.DataFrame.cumprod': ['*'],
'pandas.core.frame.DataFrame.diff': ['*'],
'pandas.core.frame.DataFrame.fillna': [
"df.fillna(method='ffill')",
'df.fillna(value=values, limit=1)',
],
'pandas.core.frame.DataFrame.items': ['*'],
'pandas.core.frame.DataFrame.itertuples': ['*'],
'pandas.core.frame.DataFrame.iterrows': ['*'],
'pandas.core.frame.DataFrame.iteritems': ['*'],
# default keep is 'first'
'pandas.core.frame.DataFrame.nlargest': [
"df.nlargest(3, 'population')",
"df.nlargest(3, ['population', 'GDP'])",
"df.nlargest(3, 'population', keep='last')"
],
'pandas.core.frame.DataFrame.nsmallest': [
"df.nsmallest(3, 'population')",
"df.nsmallest(3, ['population', 'GDP'])",
"df.nsmallest(3, 'population', keep='last')",
],
'pandas.core.frame.DataFrame.replace': [
"s.replace([1, 2], method='bfill')",
# Relies on method='pad'
"s.replace('a', None)",
],
'pandas.core.frame.DataFrame.to_records': ['*'],
'pandas.core.frame.DataFrame.to_dict': ['*'],
'pandas.core.frame.DataFrame.to_numpy': ['*'],
'pandas.core.frame.DataFrame.to_string': ['*'],
'pandas.core.frame.DataFrame.transpose': ['*'],
'pandas.core.frame.DataFrame.shape': ['*'],
'pandas.core.frame.DataFrame.shift': [
'df.shift(periods=3, freq="D")',
'df.shift(periods=3, freq="infer")'
],
'pandas.core.frame.DataFrame.unstack': ['*'],
'pandas.core.frame.DataFrame.memory_usage': ['*'],
'pandas.core.frame.DataFrame.info': ['*'],
# Not equal to df.agg('mode', axis='columns', numeric_only=True)
# because there can be multiple columns if a row has more than one
# mode
'pandas.core.frame.DataFrame.mode': [
"df.mode(axis='columns', numeric_only=True)"
],
'pandas.core.frame.DataFrame.append': [
'df.append(df2, ignore_index=True)',
"for i in range(5):\n" +
" df = df.append({'A': i}, ignore_index=True)",
],
'pandas.core.frame.DataFrame.sort_index': ['*'],
'pandas.core.frame.DataFrame.sort_values': ['*'],
'pandas.core.frame.DataFrame.melt': [
"df.melt(id_vars=['A'], value_vars=['B'])",
"df.melt(id_vars=['A'], value_vars=['B', 'C'])",
"df.melt(col_level=0, id_vars=['A'], value_vars=['B'])",
"df.melt(id_vars=[('A', 'D')], value_vars=[('B', 'E')])",
"df.melt(id_vars=['A'], value_vars=['B'],\n" +
" var_name='myVarname', value_name='myValname')"
]
},
not_implemented_ok={
'pandas.core.frame.DataFrame.transform': ['*'],
'pandas.core.frame.DataFrame.reindex': ['*'],
'pandas.core.frame.DataFrame.reindex_axis': ['*'],
'pandas.core.frame.DataFrame.round': [
'df.round(decimals)',
],
# We should be able to support pivot and pivot_table for categorical
# columns
'pandas.core.frame.DataFrame.pivot': ['*'],
# We can implement this as a zipping operator, but it won't have the
# same capability. The doctest includes an example that branches on
# a deferred result.
'pandas.core.frame.DataFrame.combine': ['*'],
# Can be implemented as a zipping operator
'pandas.core.frame.DataFrame.combine_first': ['*'],
# Difficult to parallelize but should be possible?
'pandas.core.frame.DataFrame.dot': [
# reindex not supported
's2 = s.reindex([1, 0, 2, 3])',
'df.dot(s2)',
],
# Trivially elementwise for axis=columns. Relies on global indexing
# for axis=rows.
# Difficult to determine proxy, need to inspect function
'pandas.core.frame.DataFrame.apply': ['*'],
# Cross-join not implemented
'pandas.core.frame.DataFrame.merge': [
"df1.merge(df2, how='cross')"
],
# TODO(BEAM-11711)
'pandas.core.frame.DataFrame.set_index': [
"df.set_index([s, s**2])",
],
},
skip={
# Throws NotImplementedError when modifying df
'pandas.core.frame.DataFrame.transform': ['df'],
'pandas.core.frame.DataFrame.axes': [
# Returns deferred index.
'df.axes',
],
'pandas.core.frame.DataFrame.compare': ['*'],
'pandas.core.frame.DataFrame.cov': [
# Relies on setting entries ahead of time.
"df.loc[df.index[:5], 'a'] = np.nan",
"df.loc[df.index[5:10], 'b'] = np.nan",
'df.cov(min_periods=12)',
],
'pandas.core.frame.DataFrame.drop_duplicates': ['*'],
'pandas.core.frame.DataFrame.duplicated': ['*'],
'pandas.core.frame.DataFrame.idxmax': ['*'],
'pandas.core.frame.DataFrame.idxmin': ['*'],
'pandas.core.frame.DataFrame.rename': [
# Returns deferred index.
'df.index',
'df.rename(index=str).index',
],
'pandas.core.frame.DataFrame.set_index': [
# TODO(BEAM-11711): This could pass in the index as
# a DeferredIndex, and we should fail it as order-sensitive.
"df.set_index([pd.Index([1, 2, 3, 4]), 'year'])",
],
'pandas.core.frame.DataFrame.set_axis': ['*'],
'pandas.core.frame.DataFrame.to_markdown': ['*'],
'pandas.core.frame.DataFrame.to_parquet': ['*'],
'pandas.core.frame.DataFrame.value_counts': ['*'],
'pandas.core.frame.DataFrame.to_records': [
'df.index = df.index.rename("I")',
'index_dtypes = f"<S{df.index.str.len().max()}"', # 1.x
'index_dtypes = "<S{}".format(df.index.str.len().max())', #0.x
'df.to_records(index_dtypes=index_dtypes)',
],
# These tests use the static method pd.pivot_table, which doesn't
# actually raise NotImplementedError
'pandas.core.frame.DataFrame.pivot_table': ['*'],
# Expected to raise a ValueError, but we raise NotImplementedError
'pandas.core.frame.DataFrame.pivot': [
"df.pivot(index='foo', columns='bar', values='baz')"
],
'pandas.core.frame.DataFrame.append': [
'df',
# pylint: disable=line-too-long
"pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)],\n"
" ignore_index=True)"
],
'pandas.core.frame.DataFrame.eval': ['df'],
'pandas.core.frame.DataFrame.melt': [
"df.columns = [list('ABC'), list('DEF')]", "df"
],
'pandas.core.frame.DataFrame.merge': [
# Order-sensitive index, checked in frames_test.py.
"df1.merge(df2, left_on='lkey', right_on='rkey')",
"df1.merge(df2, left_on='lkey', right_on='rkey',\n"
" suffixes=('_left', '_right'))",
"df1.merge(df2, how='left', on='a')",
],
# Raises right exception, but testing framework has matching issues.
'pandas.core.frame.DataFrame.replace': [
"df.replace({'a string': 'new value', True: False}) # raises"
],
'pandas.core.frame.DataFrame.to_sparse': ['type(df)'],
# Skipped because "seen_wont_implement" is reset before getting to
# these calls, so the NameError they raise is not ignored.
'pandas.core.frame.DataFrame.T': [
'df1_transposed.dtypes', 'df2_transposed.dtypes'
],
'pandas.core.frame.DataFrame.transpose': [
'df1_transposed.dtypes', 'df2_transposed.dtypes'
],
# Skipped because the relies on iloc to set a cell to NA. Test is
# replicated in frames_test::DeferredFrameTest::test_applymap.
'pandas.core.frame.DataFrame.applymap': [
'df_copy.iloc[0, 0] = pd.NA',
"df_copy.applymap(lambda x: len(str(x)), na_action='ignore')",
],
# Skipped so we don't need to install natsort
'pandas.core.frame.DataFrame.sort_values': [
'from natsort import index_natsorted',
'df.sort_values(\n'
' by="time",\n'
' key=lambda x: np.argsort(index_natsorted(df["time"]))\n'
')'
],
# Mode that we don't yet support, documentation added in pandas
# 1.2.0 (https://github.com/pandas-dev/pandas/issues/35912)
'pandas.core.frame.DataFrame.aggregate': [
"df.agg(x=('A', max), y=('B', 'min'), z=('C', np.mean))"
],
})
self.assertEqual(result.failed, 0)
def test_series_tests(self):
result = doctests.testmod(
pd.core.series,
use_beam=False,
report=True,
wont_implement_ok={
'pandas.core.series.Series.__array__': ['*'],
'pandas.core.series.Series.array': ['*'],
'pandas.core.series.Series.cummax': ['*'],
'pandas.core.series.Series.cummin': ['*'],
'pandas.core.series.Series.cumsum': ['*'],
'pandas.core.series.Series.cumprod': ['*'],
'pandas.core.series.Series.diff': ['*'],
'pandas.core.series.Series.dot': [
's.dot(arr)', # non-deferred result
],
'pandas.core.series.Series.fillna': [
"df.fillna(method='ffill')",
'df.fillna(value=values, limit=1)',
],
'pandas.core.series.Series.items': ['*'],
'pandas.core.series.Series.iteritems': ['*'],
# default keep is 'first'
'pandas.core.series.Series.nlargest': [
"s.nlargest()",
"s.nlargest(3)",
"s.nlargest(3, keep='last')",
],
'pandas.core.series.Series.memory_usage': ['*'],
'pandas.core.series.Series.nsmallest': [
"s.nsmallest()",
"s.nsmallest(3)",
"s.nsmallest(3, keep='last')",
],
'pandas.core.series.Series.pop': ['*'],
'pandas.core.series.Series.searchsorted': ['*'],
'pandas.core.series.Series.shift': ['*'],
'pandas.core.series.Series.take': ['*'],
'pandas.core.series.Series.to_dict': ['*'],
'pandas.core.series.Series.unique': ['*'],
'pandas.core.series.Series.unstack': ['*'],
'pandas.core.series.Series.values': ['*'],
'pandas.core.series.Series.view': ['*'],
'pandas.core.series.Series.append': [
's1.append(s2, ignore_index=True)',
],
'pandas.core.series.Series.sort_index': ['*'],
'pandas.core.series.Series.sort_values': ['*'],
'pandas.core.series.Series.argmax': ['*'],
'pandas.core.series.Series.argmin': ['*'],
},
not_implemented_ok={
'pandas.core.series.Series.transform': ['*'],
'pandas.core.series.Series.groupby': [
'ser.groupby(["a", "b", "a", "b"]).mean()',
'ser.groupby(["a", "b", "a", np.nan]).mean()',
'ser.groupby(["a", "b", "a", np.nan], dropna=False).mean()',
# Grouping by a series is not supported
'ser.groupby(ser > 100).mean()',
],
'pandas.core.series.Series.reindex': ['*'],
},
skip={
# error formatting
'pandas.core.series.Series.append': [
's1.append(s2, verify_integrity=True)',
],
# Throws NotImplementedError when modifying df
'pandas.core.series.Series.transform': ['df'],
'pandas.core.series.Series.autocorr': ['*'],
'pandas.core.series.Series.combine': ['*'],
'pandas.core.series.Series.combine_first': ['*'],
'pandas.core.series.Series.compare': ['*'],
'pandas.core.series.Series.cov': [
# Differs in LSB on jenkins.
"s1.cov(s2)",
],
'pandas.core.series.Series.drop_duplicates': ['*'],
'pandas.core.series.Series.duplicated': ['*'],
'pandas.core.series.Series.explode': ['*'],
'pandas.core.series.Series.idxmax': ['*'],
'pandas.core.series.Series.idxmin': ['*'],
'pandas.core.series.Series.nonzero': ['*'],
'pandas.core.series.Series.quantile': ['*'],
'pandas.core.series.Series.pop': ['ser'], # testing side effect
'pandas.core.series.Series.repeat': ['*'],
'pandas.core.series.Series.replace': ['*'],
'pandas.core.series.Series.reset_index': ['*'],
'pandas.core.series.Series.searchsorted': [
# This doctest seems to be incorrectly parsed.
"x = pd.Categorical(['apple', 'bread', 'bread',"
],
'pandas.core.series.Series.set_axis': ['*'],
'pandas.core.series.Series.to_csv': ['*'],
'pandas.core.series.Series.to_markdown': ['*'],
'pandas.core.series.Series.update': ['*'],
'pandas.core.series.Series.view': [
# Inspection after modification.
's'
],
})
self.assertEqual(result.failed, 0)
def test_string_tests(self):
PD_VERSION = tuple(int(v) for v in pd.__version__.split('.'))
if PD_VERSION < (1, 2, 0):
module = pd.core.strings
else:
# Definitions were moved to accessor in pandas 1.2.0
module = pd.core.strings.accessor
module_name = module.__name__
result = doctests.testmod(
module,
use_beam=False,
wont_implement_ok={
# These methods can accept deferred series objects, but not lists
f'{module_name}.StringMethods.cat': [
"s.str.cat(['A', 'B', 'C', 'D'], sep=',')",
"s.str.cat(['A', 'B', 'C', 'D'], sep=',', na_rep='-')",
"s.str.cat(['A', 'B', 'C', 'D'], na_rep='-')"
],
f'{module_name}.StringMethods.repeat': [
's.str.repeat(repeats=[1, 2, 3])'
],
f'{module_name}.str_repeat': ['s.str.repeat(repeats=[1, 2, 3])'],
f'{module_name}.StringMethods.get_dummies': ['*'],
f'{module_name}.str_get_dummies': ['*'],
},
skip={
# count() on Series with a NaN produces mismatched type if we
# have a NaN-only partition.
f'{module_name}.StringMethods.count': ["s.str.count('a')"],
f'{module_name}.str_count': ["s.str.count('a')"],
# Produce None instead of NaN, see
# frames_test.py::DeferredFrameTest::test_str_split
f'{module_name}.StringMethods.rsplit': [
's.str.split(expand=True)',
's.str.rsplit("/", n=1, expand=True)',
],
f'{module_name}.StringMethods.split': [
's.str.split(expand=True)',
's.str.rsplit("/", n=1, expand=True)',
],
# Bad test strings in pandas 1.1.x
f'{module_name}.str_replace': [
"pd.Series(['foo', 'fuz', np.nan]).str.replace('f', repr)"
],
f'{module_name}.StringMethods.replace': [
"pd.Series(['foo', 'fuz', np.nan]).str.replace('f', repr)"
],
# output has incorrect formatting in 1.2.x
f'{module_name}.StringMethods.extractall': ['*']
})
self.assertEqual(result.failed, 0)
def test_datetime_tests(self):
# TODO(BEAM-10721)
datetimelike_result = doctests.testmod(
pd.core.arrays.datetimelike,
use_beam=False,
skip={
'pandas.core.arrays.datetimelike.AttributesMixin._unbox_scalar': [
'*'
],
'pandas.core.arrays.datetimelike.TimelikeOps.ceil': ['*'],
'pandas.core.arrays.datetimelike.TimelikeOps.floor': ['*'],
'pandas.core.arrays.datetimelike.TimelikeOps.round': ['*'],
})
datetime_result = doctests.testmod(
pd.core.arrays.datetimes,
use_beam=False,
skip={
'pandas.core.arrays.datetimes.DatetimeArray.day': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.hour': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.microsecond': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.minute': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.month': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.nanosecond': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.second': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.year': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.is_leap_year': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.is_month_end': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.is_month_start': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.is_quarter_end': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.is_quarter_start': [
'*'
],
'pandas.core.arrays.datetimes.DatetimeArray.is_year_end': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.is_year_start': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.to_period': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.tz_localize': ['*'],
})
self.assertEqual(datetimelike_result.failed, 0)
self.assertEqual(datetime_result.failed, 0)
def test_indexing_tests(self):
result = doctests.testmod(
pd.core.indexing,
use_beam=False,
skip={
'pandas.core.indexing._IndexSlice': ['*'],
'pandas.core.indexing.IndexingMixin.at': ['*'],
'pandas.core.indexing.IndexingMixin.iat': ['*'],
'pandas.core.indexing.IndexingMixin.iloc': ['*'],
'pandas.core.indexing.IndexingMixin.loc': ['*'],
'pandas.core.indexing._AtIndexer': ['*'],
'pandas.core.indexing._LocIndexer': ['*'],
'pandas.core.indexing._iAtIndexer': ['*'],
'pandas.core.indexing._iLocIndexer': ['*'],
})
self.assertEqual(result.failed, 0)
def test_groupby_tests(self):
result = doctests.testmod(
pd.core.groupby.groupby,
use_beam=False,
wont_implement_ok={
'pandas.core.groupby.groupby.GroupBy.head': ['*'],
'pandas.core.groupby.groupby.GroupBy.tail': ['*'],
'pandas.core.groupby.groupby.GroupBy.nth': ['*'],
'pandas.core.groupby.groupby.GroupBy.cumcount': ['*'],
},
not_implemented_ok={
'pandas.core.groupby.groupby.GroupBy.describe': ['*'],
'pandas.core.groupby.groupby.GroupBy.ngroup': ['*'],
'pandas.core.groupby.groupby.GroupBy.resample': ['*'],
'pandas.core.groupby.groupby.GroupBy.sample': ['*'],
'pandas.core.groupby.groupby.GroupBy.quantile': ['*'],
'pandas.core.groupby.groupby.BaseGroupBy.pipe': ['*'],
# pipe tests are in a different location in pandas 1.1.x
'pandas.core.groupby.groupby._GroupBy.pipe': ['*'],
'pandas.core.groupby.groupby.GroupBy.nth': [
"df.groupby('A', as_index=False).nth(1)",
],
},
skip={
# Uses iloc to mutate a DataFrame
'pandas.core.groupby.groupby.GroupBy.resample': [
'df.iloc[2, 0] = 5',
'df',
],
# TODO: Raise wont implement for list passed as a grouping column
# Currently raises unhashable type: list
'pandas.core.groupby.groupby.GroupBy.ngroup': [
'df.groupby(["A", [1,1,2,3,2,1]]).ngroup()'
],
})
self.assertEqual(result.failed, 0)
result = doctests.testmod(
pd.core.groupby.generic,
use_beam=False,
wont_implement_ok={
# Returns an array by default, not a Series. WontImplement
# (non-deferred)
'pandas.core.groupby.generic.SeriesGroupBy.unique': ['*'],
# TODO: Is take actually deprecated?
'pandas.core.groupby.generic.DataFrameGroupBy.take': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.take': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.nsmallest': [
"s.nsmallest(3, keep='last')",
"s.nsmallest(3)",
"s.nsmallest()",
],
'pandas.core.groupby.generic.SeriesGroupBy.nlargest': [
"s.nlargest(3, keep='last')",
"s.nlargest(3)",
"s.nlargest()",
],
'pandas.core.groupby.generic.DataFrameGroupBy.diff': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.diff': ['*'],
'pandas.core.groupby.generic.DataFrameGroupBy.hist': ['*'],
'pandas.core.groupby.generic.DataFrameGroupBy.fillna': [
"df.fillna(method='ffill')",
'df.fillna(value=values, limit=1)',
],
'pandas.core.groupby.generic.SeriesGroupBy.fillna': [
"df.fillna(method='ffill')",
'df.fillna(value=values, limit=1)',
],
},
not_implemented_ok={
'pandas.core.groupby.generic.DataFrameGroupBy.transform': ['*'],
'pandas.core.groupby.generic.DataFrameGroupBy.idxmax': ['*'],
'pandas.core.groupby.generic.DataFrameGroupBy.idxmin': ['*'],
'pandas.core.groupby.generic.DataFrameGroupBy.filter': ['*'],
'pandas.core.groupby.generic.DataFrameGroupBy.nunique': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.transform': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.idxmax': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.idxmin': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.filter': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.describe': ['*'],
},
skip={
'pandas.core.groupby.generic.SeriesGroupBy.cov': [
# Floating point comparison fails
's1.cov(s2)',
],
'pandas.core.groupby.generic.DataFrameGroupBy.cov': [
# Mutates input DataFrame with loc
# TODO: Replicate in frames_test.py
"df.loc[df.index[:5], 'a'] = np.nan",
"df.loc[df.index[5:10], 'b'] = np.nan",
"df.cov(min_periods=12)",
],
# These examples rely on grouping by a list
'pandas.core.groupby.generic.SeriesGroupBy.aggregate': ['*'],
'pandas.core.groupby.generic.DataFrameGroupBy.aggregate': ['*'],
})
self.assertEqual(result.failed, 0)
def test_top_level(self):
tests = {
name: func.__doc__
for (name, func) in pd.__dict__.items()
if _is_top_level_function(func) and getattr(func, '__doc__', None)
}
# IO methods are tested in io_test.py
skip_reads = {name: ['*'] for name in dir(pd) if name.startswith('read_')}
result = doctests.teststrings(
tests,
use_beam=False,
report=True,
not_implemented_ok={
'concat': ['pd.concat([s1, s2], ignore_index=True)'],
'crosstab': ['*'],
'cut': ['*'],
'eval': ['*'],
'factorize': ['*'],
'get_dummies': ['*'],
'infer_freq': ['*'],
'lreshape': ['*'],
'melt': ['*'],
'merge': ["df1.merge(df2, how='cross')"],
'merge_asof': ['*'],
'pivot': ['*'],
'pivot_table': ['*'],
'qcut': ['*'],
'reset_option': ['*'],
'set_eng_float_format': ['*'],
'set_option': ['*'],
'to_numeric': ['*'],
'to_timedelta': ['*'],
'unique': ['*'],
'value_counts': ['*'],
'wide_to_long': ['*'],
},
wont_implement_ok={
'to_datetime': ['s.head()'],
'to_pickle': ['*'],
'melt': [
"pd.melt(df, id_vars=['A'], value_vars=['B'])",
"pd.melt(df, id_vars=['A'], value_vars=['B', 'C'])",
"pd.melt(df, col_level=0, id_vars=['A'], value_vars=['B'])",
"pd.melt(df, id_vars=[('A', 'D')], value_vars=[('B', 'E')])",
"pd.melt(df, id_vars=['A'], value_vars=['B'],\n" +
" var_name='myVarname', value_name='myValname')"
],
},
skip={
# error formatting
'concat': ['pd.concat([df5, df6], verify_integrity=True)'],
# doctest DeprecationWarning
'melt': ['df'],
# Order-sensitive re-indexing.
'merge': [
"df1.merge(df2, left_on='lkey', right_on='rkey')",
"df1.merge(df2, left_on='lkey', right_on='rkey',\n"
" suffixes=('_left', '_right'))",
"df1.merge(df2, how='left', on='a')",
],
# Not an actual test.
'option_context': ['*'],
'factorize': ['codes', 'uniques'],
# Bad top-level use of un-imported function.
'merge_ordered': [
'merge_ordered(df1, df2, fill_method="ffill", left_by="group")'
],
# Expected error.
'pivot': ["df.pivot(index='foo', columns='bar', values='baz')"],
# Never written.
'to_pickle': ['os.remove("./dummy.pkl")'],
**skip_reads
})
self.assertEqual(result.failed, 0)
if __name__ == '__main__':
unittest.main()
| 44.406579 | 83 | 0.518919 |
import sys
import unittest
import pandas as pd
from apache_beam.dataframe import doctests
from apache_beam.dataframe.pandas_top_level_functions import _is_top_level_function
@unittest.skipIf(sys.platform == 'win32', '[BEAM-10626]')
class DoctestTest(unittest.TestCase):
def test_ndframe_tests(self):
skip_writes = {
f'pandas.core.generic.NDFrame.{name}': ['*']
for name in dir(pd.core.generic.NDFrame) if name.startswith('to_')
}
result = doctests.testmod(
pd.core.generic,
use_beam=False,
report=True,
wont_implement_ok={
'pandas.core.generic.NDFrame.first': ['*'],
'pandas.core.generic.NDFrame.head': ['*'],
'pandas.core.generic.NDFrame.last': ['*'],
'pandas.core.generic.NDFrame.shift': ['*'],
'pandas.core.generic.NDFrame.tail': ['*'],
'pandas.core.generic.NDFrame.take': ['*'],
'pandas.core.generic.NDFrame.values': ['*'],
'pandas.core.generic.NDFrame.tz_localize': [
"s.tz_localize('CET', ambiguous='infer')",
"s.tz_localize('CET', ambiguous=np.array([True, True, False]))",
],
'pandas.core.generic.NDFrame.truncate': [
"df.tail()",
"df.loc['2016-01-05':'2016-01-10', :].tail()",
],
'pandas.core.generic.NDFrame.replace': [
"s.replace([1, 2], method='bfill')",
"s.replace('a', None)",
],
'pandas.core.generic.NDFrame.fillna': [
"df.fillna(method='ffill')",
'df.fillna(value=values, limit=1)',
],
'pandas.core.generic.NDFrame.sort_values': ['*'],
'pandas.core.generic.NDFrame.mask': [
'df.where(m, -df) == np.where(m, df, -df)'
],
'pandas.core.generic.NDFrame.where': [
'df.where(m, -df) == np.where(m, df, -df)'
],
'pandas.core.generic.NDFrame.interpolate': ['*'],
},
not_implemented_ok={
'pandas.core.generic.NDFrame.asof': ['*'],
'pandas.core.generic.NDFrame.at_time': ['*'],
'pandas.core.generic.NDFrame.between_time': ['*'],
'pandas.core.generic.NDFrame.describe': ['*'],
'pandas.core.generic.NDFrame.ewm': ['*'],
'pandas.core.generic.NDFrame.expanding': ['*'],
'pandas.core.generic.NDFrame.flags': ['*'],
'pandas.core.generic.NDFrame.pct_change': ['*'],
'pandas.core.generic.NDFrame.rank': ['*'],
'pandas.core.generic.NDFrame.reindex': ['*'],
'pandas.core.generic.NDFrame.reindex_like': ['*'],
'pandas.core.generic.NDFrame.replace': ['*'],
'pandas.core.generic.NDFrame.resample': ['*'],
'pandas.core.generic.NDFrame.rolling': ['*'],
'pandas.core.generic.NDFrame.sample': ['*'],
'pandas.core.generic.NDFrame.set_flags': ['*'],
'pandas.core.generic.NDFrame.squeeze': ['*'],
'pandas.core.generic.NDFrame.transform': ['*'],
'pandas.core.generic.NDFrame.truncate': ['*'],
'pandas.core.generic.NDFrame.xs': ['*'],
'pandas.core.generic.NDFrame.abs': [
'df.loc[(df.c - 43).abs().argsort()]',
],
},
skip={
'pandas.core.generic.NDFrame._set_axis_name': ['*'],
'pandas.core.generic.NDFrame.asfreq': ['*'],
'pandas.core.generic.NDFrame.astype': ['*'],
'pandas.core.generic.NDFrame.convert_dtypes': ['*'],
'pandas.core.generic.NDFrame.copy': ['*'],
'pandas.core.generic.NDFrame.droplevel': ['*'],
'pandas.core.generic.NDFrame.infer_objects': ['*'],
'pandas.core.generic.NDFrame.rank': [
'df'
],
'pandas.core.generic.NDFrame.rename': [
'df.rename(2)'
],
'pandas.core.generic.NDFrame.rename_axis': ['*'],
'pandas.core.generic.NDFrame.replace': [
"df.replace({'a string': 'new value', True: False}) # raises"
],
'pandas.core.generic.NDFrame.squeeze': ['*'],
'pandas.core.generic.NDFrame.resample': ['df'],
'pandas.core.generic.NDFrame.sort_values': [
'from natsort import index_natsorted',
'df.sort_values(\n'
' by="time",\n'
' key=lambda x: np.argsort(index_natsorted(df["time"]))\n'
')'
],
**skip_writes
})
self.assertEqual(result.failed, 0)
def test_dataframe_tests(self):
result = doctests.testmod(
pd.core.frame,
use_beam=False,
report=True,
wont_implement_ok={
'pandas.core.frame.DataFrame.T': ['*'],
'pandas.core.frame.DataFrame.cummax': ['*'],
'pandas.core.frame.DataFrame.cummin': ['*'],
'pandas.core.frame.DataFrame.cumsum': ['*'],
'pandas.core.frame.DataFrame.cumprod': ['*'],
'pandas.core.frame.DataFrame.diff': ['*'],
'pandas.core.frame.DataFrame.fillna': [
"df.fillna(method='ffill')",
'df.fillna(value=values, limit=1)',
],
'pandas.core.frame.DataFrame.items': ['*'],
'pandas.core.frame.DataFrame.itertuples': ['*'],
'pandas.core.frame.DataFrame.iterrows': ['*'],
'pandas.core.frame.DataFrame.iteritems': ['*'],
# default keep is 'first'
'pandas.core.frame.DataFrame.nlargest': [
"df.nlargest(3, 'population')",
"df.nlargest(3, ['population', 'GDP'])",
"df.nlargest(3, 'population', keep='last')"
],
'pandas.core.frame.DataFrame.nsmallest': [
"df.nsmallest(3, 'population')",
"df.nsmallest(3, ['population', 'GDP'])",
"df.nsmallest(3, 'population', keep='last')",
],
'pandas.core.frame.DataFrame.replace': [
"s.replace([1, 2], method='bfill')",
# Relies on method='pad'
"s.replace('a', None)",
],
'pandas.core.frame.DataFrame.to_records': ['*'],
'pandas.core.frame.DataFrame.to_dict': ['*'],
'pandas.core.frame.DataFrame.to_numpy': ['*'],
'pandas.core.frame.DataFrame.to_string': ['*'],
'pandas.core.frame.DataFrame.transpose': ['*'],
'pandas.core.frame.DataFrame.shape': ['*'],
'pandas.core.frame.DataFrame.shift': [
'df.shift(periods=3, freq="D")',
'df.shift(periods=3, freq="infer")'
],
'pandas.core.frame.DataFrame.unstack': ['*'],
'pandas.core.frame.DataFrame.memory_usage': ['*'],
'pandas.core.frame.DataFrame.info': ['*'],
# Not equal to df.agg('mode', axis='columns', numeric_only=True)
# because there can be multiple columns if a row has more than one
# mode
'pandas.core.frame.DataFrame.mode': [
"df.mode(axis='columns', numeric_only=True)"
],
'pandas.core.frame.DataFrame.append': [
'df.append(df2, ignore_index=True)',
"for i in range(5):\n" +
" df = df.append({'A': i}, ignore_index=True)",
],
'pandas.core.frame.DataFrame.sort_index': ['*'],
'pandas.core.frame.DataFrame.sort_values': ['*'],
'pandas.core.frame.DataFrame.melt': [
"df.melt(id_vars=['A'], value_vars=['B'])",
"df.melt(id_vars=['A'], value_vars=['B', 'C'])",
"df.melt(col_level=0, id_vars=['A'], value_vars=['B'])",
"df.melt(id_vars=[('A', 'D')], value_vars=[('B', 'E')])",
"df.melt(id_vars=['A'], value_vars=['B'],\n" +
" var_name='myVarname', value_name='myValname')"
]
},
not_implemented_ok={
'pandas.core.frame.DataFrame.transform': ['*'],
'pandas.core.frame.DataFrame.reindex': ['*'],
'pandas.core.frame.DataFrame.reindex_axis': ['*'],
'pandas.core.frame.DataFrame.round': [
'df.round(decimals)',
],
# We should be able to support pivot and pivot_table for categorical
# columns
'pandas.core.frame.DataFrame.pivot': ['*'],
# We can implement this as a zipping operator, but it won't have the
'pandas.core.frame.DataFrame.combine': ['*'],
'pandas.core.frame.DataFrame.combine_first': ['*'],
'pandas.core.frame.DataFrame.dot': [
's2 = s.reindex([1, 0, 2, 3])',
'df.dot(s2)',
],
'pandas.core.frame.DataFrame.apply': ['*'],
'pandas.core.frame.DataFrame.merge': [
"df1.merge(df2, how='cross')"
],
'pandas.core.frame.DataFrame.set_index': [
"df.set_index([s, s**2])",
],
},
skip={
'pandas.core.frame.DataFrame.transform': ['df'],
'pandas.core.frame.DataFrame.axes': [
'df.axes',
],
'pandas.core.frame.DataFrame.compare': ['*'],
'pandas.core.frame.DataFrame.cov': [
"df.loc[df.index[:5], 'a'] = np.nan",
"df.loc[df.index[5:10], 'b'] = np.nan",
'df.cov(min_periods=12)',
],
'pandas.core.frame.DataFrame.drop_duplicates': ['*'],
'pandas.core.frame.DataFrame.duplicated': ['*'],
'pandas.core.frame.DataFrame.idxmax': ['*'],
'pandas.core.frame.DataFrame.idxmin': ['*'],
'pandas.core.frame.DataFrame.rename': [
'df.index',
'df.rename(index=str).index',
],
'pandas.core.frame.DataFrame.set_index': [
"df.set_index([pd.Index([1, 2, 3, 4]), 'year'])",
],
'pandas.core.frame.DataFrame.set_axis': ['*'],
'pandas.core.frame.DataFrame.to_markdown': ['*'],
'pandas.core.frame.DataFrame.to_parquet': ['*'],
'pandas.core.frame.DataFrame.value_counts': ['*'],
'pandas.core.frame.DataFrame.to_records': [
'df.index = df.index.rename("I")',
'index_dtypes = f"<S{df.index.str.len().max()}"',
'index_dtypes = "<S{}".format(df.index.str.len().max())',
'df.to_records(index_dtypes=index_dtypes)',
],
# actually raise NotImplementedError
'pandas.core.frame.DataFrame.pivot_table': ['*'],
# Expected to raise a ValueError, but we raise NotImplementedError
'pandas.core.frame.DataFrame.pivot': [
"df.pivot(index='foo', columns='bar', values='baz')"
],
'pandas.core.frame.DataFrame.append': [
'df',
# pylint: disable=line-too-long
"pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)],\n"
" ignore_index=True)"
],
'pandas.core.frame.DataFrame.eval': ['df'],
'pandas.core.frame.DataFrame.melt': [
"df.columns = [list('ABC'), list('DEF')]", "df"
],
'pandas.core.frame.DataFrame.merge': [
# Order-sensitive index, checked in frames_test.py.
"df1.merge(df2, left_on='lkey', right_on='rkey')",
"df1.merge(df2, left_on='lkey', right_on='rkey',\n"
" suffixes=('_left', '_right'))",
"df1.merge(df2, how='left', on='a')",
],
# Raises right exception, but testing framework has matching issues.
'pandas.core.frame.DataFrame.replace': [
"df.replace({'a string': 'new value', True: False}) # raises"
],
'pandas.core.frame.DataFrame.to_sparse': ['type(df)'],
# Skipped because "seen_wont_implement" is reset before getting to
# these calls, so the NameError they raise is not ignored.
'pandas.core.frame.DataFrame.T': [
'df1_transposed.dtypes', 'df2_transposed.dtypes'
],
'pandas.core.frame.DataFrame.transpose': [
'df1_transposed.dtypes', 'df2_transposed.dtypes'
],
# Skipped because the relies on iloc to set a cell to NA. Test is
# replicated in frames_test::DeferredFrameTest::test_applymap.
'pandas.core.frame.DataFrame.applymap': [
'df_copy.iloc[0, 0] = pd.NA',
"df_copy.applymap(lambda x: len(str(x)), na_action='ignore')",
],
# Skipped so we don't need to install natsort
'pandas.core.frame.DataFrame.sort_values': [
'from natsort import index_natsorted',
'df.sort_values(\n'
' by="time",\n'
' key=lambda x: np.argsort(index_natsorted(df["time"]))\n'
')'
],
# 1.2.0 (https://github.com/pandas-dev/pandas/issues/35912)
'pandas.core.frame.DataFrame.aggregate': [
"df.agg(x=('A', max), y=('B', 'min'), z=('C', np.mean))"
],
})
self.assertEqual(result.failed, 0)
def test_series_tests(self):
result = doctests.testmod(
pd.core.series,
use_beam=False,
report=True,
wont_implement_ok={
'pandas.core.series.Series.__array__': ['*'],
'pandas.core.series.Series.array': ['*'],
'pandas.core.series.Series.cummax': ['*'],
'pandas.core.series.Series.cummin': ['*'],
'pandas.core.series.Series.cumsum': ['*'],
'pandas.core.series.Series.cumprod': ['*'],
'pandas.core.series.Series.diff': ['*'],
'pandas.core.series.Series.dot': [
's.dot(arr)', # non-deferred result
],
'pandas.core.series.Series.fillna': [
"df.fillna(method='ffill')",
'df.fillna(value=values, limit=1)',
],
'pandas.core.series.Series.items': ['*'],
'pandas.core.series.Series.iteritems': ['*'],
# default keep is 'first'
'pandas.core.series.Series.nlargest': [
"s.nlargest()",
"s.nlargest(3)",
"s.nlargest(3, keep='last')",
],
'pandas.core.series.Series.memory_usage': ['*'],
'pandas.core.series.Series.nsmallest': [
"s.nsmallest()",
"s.nsmallest(3)",
"s.nsmallest(3, keep='last')",
],
'pandas.core.series.Series.pop': ['*'],
'pandas.core.series.Series.searchsorted': ['*'],
'pandas.core.series.Series.shift': ['*'],
'pandas.core.series.Series.take': ['*'],
'pandas.core.series.Series.to_dict': ['*'],
'pandas.core.series.Series.unique': ['*'],
'pandas.core.series.Series.unstack': ['*'],
'pandas.core.series.Series.values': ['*'],
'pandas.core.series.Series.view': ['*'],
'pandas.core.series.Series.append': [
's1.append(s2, ignore_index=True)',
],
'pandas.core.series.Series.sort_index': ['*'],
'pandas.core.series.Series.sort_values': ['*'],
'pandas.core.series.Series.argmax': ['*'],
'pandas.core.series.Series.argmin': ['*'],
},
not_implemented_ok={
'pandas.core.series.Series.transform': ['*'],
'pandas.core.series.Series.groupby': [
'ser.groupby(["a", "b", "a", "b"]).mean()',
'ser.groupby(["a", "b", "a", np.nan]).mean()',
'ser.groupby(["a", "b", "a", np.nan], dropna=False).mean()',
# Grouping by a series is not supported
'ser.groupby(ser > 100).mean()',
],
'pandas.core.series.Series.reindex': ['*'],
},
skip={
# error formatting
'pandas.core.series.Series.append': [
's1.append(s2, verify_integrity=True)',
],
# Throws NotImplementedError when modifying df
'pandas.core.series.Series.transform': ['df'],
'pandas.core.series.Series.autocorr': ['*'],
'pandas.core.series.Series.combine': ['*'],
'pandas.core.series.Series.combine_first': ['*'],
'pandas.core.series.Series.compare': ['*'],
'pandas.core.series.Series.cov': [
# Differs in LSB on jenkins.
"s1.cov(s2)",
],
'pandas.core.series.Series.drop_duplicates': ['*'],
'pandas.core.series.Series.duplicated': ['*'],
'pandas.core.series.Series.explode': ['*'],
'pandas.core.series.Series.idxmax': ['*'],
'pandas.core.series.Series.idxmin': ['*'],
'pandas.core.series.Series.nonzero': ['*'],
'pandas.core.series.Series.quantile': ['*'],
'pandas.core.series.Series.pop': ['ser'], # testing side effect
'pandas.core.series.Series.repeat': ['*'],
'pandas.core.series.Series.replace': ['*'],
'pandas.core.series.Series.reset_index': ['*'],
'pandas.core.series.Series.searchsorted': [
# This doctest seems to be incorrectly parsed.
"x = pd.Categorical(['apple', 'bread', 'bread',"
],
'pandas.core.series.Series.set_axis': ['*'],
'pandas.core.series.Series.to_csv': ['*'],
'pandas.core.series.Series.to_markdown': ['*'],
'pandas.core.series.Series.update': ['*'],
'pandas.core.series.Series.view': [
# Inspection after modification.
's'
],
})
self.assertEqual(result.failed, 0)
def test_string_tests(self):
PD_VERSION = tuple(int(v) for v in pd.__version__.split('.'))
if PD_VERSION < (1, 2, 0):
module = pd.core.strings
else:
# Definitions were moved to accessor in pandas 1.2.0
module = pd.core.strings.accessor
module_name = module.__name__
result = doctests.testmod(
module,
use_beam=False,
wont_implement_ok={
# These methods can accept deferred series objects, but not lists
f'{module_name}.StringMethods.cat': [
"s.str.cat(['A', 'B', 'C', 'D'], sep=',')",
"s.str.cat(['A', 'B', 'C', 'D'], sep=',', na_rep='-')",
"s.str.cat(['A', 'B', 'C', 'D'], na_rep='-')"
],
f'{module_name}.StringMethods.repeat': [
's.str.repeat(repeats=[1, 2, 3])'
],
f'{module_name}.str_repeat': ['s.str.repeat(repeats=[1, 2, 3])'],
f'{module_name}.StringMethods.get_dummies': ['*'],
f'{module_name}.str_get_dummies': ['*'],
},
skip={
# count() on Series with a NaN produces mismatched type if we
# have a NaN-only partition.
f'{module_name}.StringMethods.count': ["s.str.count('a')"],
f'{module_name}.str_count': ["s.str.count('a')"],
# Produce None instead of NaN, see
# frames_test.py::DeferredFrameTest::test_str_split
f'{module_name}.StringMethods.rsplit': [
's.str.split(expand=True)',
's.str.rsplit("/", n=1, expand=True)',
],
f'{module_name}.StringMethods.split': [
's.str.split(expand=True)',
's.str.rsplit("/", n=1, expand=True)',
],
# Bad test strings in pandas 1.1.x
f'{module_name}.str_replace': [
"pd.Series(['foo', 'fuz', np.nan]).str.replace('f', repr)"
],
f'{module_name}.StringMethods.replace': [
"pd.Series(['foo', 'fuz', np.nan]).str.replace('f', repr)"
],
# output has incorrect formatting in 1.2.x
f'{module_name}.StringMethods.extractall': ['*']
})
self.assertEqual(result.failed, 0)
def test_datetime_tests(self):
# TODO(BEAM-10721)
datetimelike_result = doctests.testmod(
pd.core.arrays.datetimelike,
use_beam=False,
skip={
'pandas.core.arrays.datetimelike.AttributesMixin._unbox_scalar': [
'*'
],
'pandas.core.arrays.datetimelike.TimelikeOps.ceil': ['*'],
'pandas.core.arrays.datetimelike.TimelikeOps.floor': ['*'],
'pandas.core.arrays.datetimelike.TimelikeOps.round': ['*'],
})
datetime_result = doctests.testmod(
pd.core.arrays.datetimes,
use_beam=False,
skip={
'pandas.core.arrays.datetimes.DatetimeArray.day': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.hour': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.microsecond': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.minute': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.month': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.nanosecond': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.second': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.year': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.is_leap_year': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.is_month_end': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.is_month_start': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.is_quarter_end': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.is_quarter_start': [
'*'
],
'pandas.core.arrays.datetimes.DatetimeArray.is_year_end': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.is_year_start': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.to_period': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.tz_localize': ['*'],
})
self.assertEqual(datetimelike_result.failed, 0)
self.assertEqual(datetime_result.failed, 0)
def test_indexing_tests(self):
result = doctests.testmod(
pd.core.indexing,
use_beam=False,
skip={
'pandas.core.indexing._IndexSlice': ['*'],
'pandas.core.indexing.IndexingMixin.at': ['*'],
'pandas.core.indexing.IndexingMixin.iat': ['*'],
'pandas.core.indexing.IndexingMixin.iloc': ['*'],
'pandas.core.indexing.IndexingMixin.loc': ['*'],
'pandas.core.indexing._AtIndexer': ['*'],
'pandas.core.indexing._LocIndexer': ['*'],
'pandas.core.indexing._iAtIndexer': ['*'],
'pandas.core.indexing._iLocIndexer': ['*'],
})
self.assertEqual(result.failed, 0)
def test_groupby_tests(self):
result = doctests.testmod(
pd.core.groupby.groupby,
use_beam=False,
wont_implement_ok={
'pandas.core.groupby.groupby.GroupBy.head': ['*'],
'pandas.core.groupby.groupby.GroupBy.tail': ['*'],
'pandas.core.groupby.groupby.GroupBy.nth': ['*'],
'pandas.core.groupby.groupby.GroupBy.cumcount': ['*'],
},
not_implemented_ok={
'pandas.core.groupby.groupby.GroupBy.describe': ['*'],
'pandas.core.groupby.groupby.GroupBy.ngroup': ['*'],
'pandas.core.groupby.groupby.GroupBy.resample': ['*'],
'pandas.core.groupby.groupby.GroupBy.sample': ['*'],
'pandas.core.groupby.groupby.GroupBy.quantile': ['*'],
'pandas.core.groupby.groupby.BaseGroupBy.pipe': ['*'],
# pipe tests are in a different location in pandas 1.1.x
'pandas.core.groupby.groupby._GroupBy.pipe': ['*'],
'pandas.core.groupby.groupby.GroupBy.nth': [
"df.groupby('A', as_index=False).nth(1)",
],
},
skip={
# Uses iloc to mutate a DataFrame
'pandas.core.groupby.groupby.GroupBy.resample': [
'df.iloc[2, 0] = 5',
'df',
],
# TODO: Raise wont implement for list passed as a grouping column
# Currently raises unhashable type: list
'pandas.core.groupby.groupby.GroupBy.ngroup': [
'df.groupby(["A", [1,1,2,3,2,1]]).ngroup()'
],
})
self.assertEqual(result.failed, 0)
result = doctests.testmod(
pd.core.groupby.generic,
use_beam=False,
wont_implement_ok={
# Returns an array by default, not a Series. WontImplement
# (non-deferred)
'pandas.core.groupby.generic.SeriesGroupBy.unique': ['*'],
# TODO: Is take actually deprecated?
'pandas.core.groupby.generic.DataFrameGroupBy.take': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.take': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.nsmallest': [
"s.nsmallest(3, keep='last')",
"s.nsmallest(3)",
"s.nsmallest()",
],
'pandas.core.groupby.generic.SeriesGroupBy.nlargest': [
"s.nlargest(3, keep='last')",
"s.nlargest(3)",
"s.nlargest()",
],
'pandas.core.groupby.generic.DataFrameGroupBy.diff': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.diff': ['*'],
'pandas.core.groupby.generic.DataFrameGroupBy.hist': ['*'],
'pandas.core.groupby.generic.DataFrameGroupBy.fillna': [
"df.fillna(method='ffill')",
'df.fillna(value=values, limit=1)',
],
'pandas.core.groupby.generic.SeriesGroupBy.fillna': [
"df.fillna(method='ffill')",
'df.fillna(value=values, limit=1)',
],
},
not_implemented_ok={
'pandas.core.groupby.generic.DataFrameGroupBy.transform': ['*'],
'pandas.core.groupby.generic.DataFrameGroupBy.idxmax': ['*'],
'pandas.core.groupby.generic.DataFrameGroupBy.idxmin': ['*'],
'pandas.core.groupby.generic.DataFrameGroupBy.filter': ['*'],
'pandas.core.groupby.generic.DataFrameGroupBy.nunique': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.transform': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.idxmax': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.idxmin': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.filter': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.describe': ['*'],
},
skip={
'pandas.core.groupby.generic.SeriesGroupBy.cov': [
# Floating point comparison fails
's1.cov(s2)',
],
'pandas.core.groupby.generic.DataFrameGroupBy.cov': [
# Mutates input DataFrame with loc
# TODO: Replicate in frames_test.py
"df.loc[df.index[:5], 'a'] = np.nan",
"df.loc[df.index[5:10], 'b'] = np.nan",
"df.cov(min_periods=12)",
],
# These examples rely on grouping by a list
'pandas.core.groupby.generic.SeriesGroupBy.aggregate': ['*'],
'pandas.core.groupby.generic.DataFrameGroupBy.aggregate': ['*'],
})
self.assertEqual(result.failed, 0)
def test_top_level(self):
tests = {
name: func.__doc__
for (name, func) in pd.__dict__.items()
if _is_top_level_function(func) and getattr(func, '__doc__', None)
}
# IO methods are tested in io_test.py
skip_reads = {name: ['*'] for name in dir(pd) if name.startswith('read_')}
result = doctests.teststrings(
tests,
use_beam=False,
report=True,
not_implemented_ok={
'concat': ['pd.concat([s1, s2], ignore_index=True)'],
'crosstab': ['*'],
'cut': ['*'],
'eval': ['*'],
'factorize': ['*'],
'get_dummies': ['*'],
'infer_freq': ['*'],
'lreshape': ['*'],
'melt': ['*'],
'merge': ["df1.merge(df2, how='cross')"],
'merge_asof': ['*'],
'pivot': ['*'],
'pivot_table': ['*'],
'qcut': ['*'],
'reset_option': ['*'],
'set_eng_float_format': ['*'],
'set_option': ['*'],
'to_numeric': ['*'],
'to_timedelta': ['*'],
'unique': ['*'],
'value_counts': ['*'],
'wide_to_long': ['*'],
},
wont_implement_ok={
'to_datetime': ['s.head()'],
'to_pickle': ['*'],
'melt': [
"pd.melt(df, id_vars=['A'], value_vars=['B'])",
"pd.melt(df, id_vars=['A'], value_vars=['B', 'C'])",
"pd.melt(df, col_level=0, id_vars=['A'], value_vars=['B'])",
"pd.melt(df, id_vars=[('A', 'D')], value_vars=[('B', 'E')])",
"pd.melt(df, id_vars=['A'], value_vars=['B'],\n" +
" var_name='myVarname', value_name='myValname')"
],
},
skip={
# error formatting
'concat': ['pd.concat([df5, df6], verify_integrity=True)'],
# doctest DeprecationWarning
'melt': ['df'],
# Order-sensitive re-indexing.
'merge': [
"df1.merge(df2, left_on='lkey', right_on='rkey')",
"df1.merge(df2, left_on='lkey', right_on='rkey',\n"
" suffixes=('_left', '_right'))",
"df1.merge(df2, how='left', on='a')",
],
# Not an actual test.
'option_context': ['*'],
'factorize': ['codes', 'uniques'],
# Bad top-level use of un-imported function.
'merge_ordered': [
'merge_ordered(df1, df2, fill_method="ffill", left_by="group")'
],
# Expected error.
'pivot': ["df.pivot(index='foo', columns='bar', values='baz')"],
# Never written.
'to_pickle': ['os.remove("./dummy.pkl")'],
**skip_reads
})
self.assertEqual(result.failed, 0)
if __name__ == '__main__':
unittest.main()
| true | true |
f720120c884a1396999a7662659cbb6bb8cb01bb | 811 | py | Python | manage.py | oguzhanunlu/validate_json | 79cda734934195bd59055d7f04288a7b538f9542 | [
"Apache-2.0"
] | null | null | null | manage.py | oguzhanunlu/validate_json | 79cda734934195bd59055d7f04288a7b538f9542 | [
"Apache-2.0"
] | null | null | null | manage.py | oguzhanunlu/validate_json | 79cda734934195bd59055d7f04288a7b538f9542 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "validate_json.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| 35.26087 | 77 | 0.644883 |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "validate_json.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| true | true |
f72012960cef127dbd4634d4f311534488584e40 | 27,649 | py | Python | salt/modules/snapper.py | amaclean199/salt | 8aaac011b4616e3c9e74a1daafb4a2146a5a430f | [
"Apache-2.0"
] | 12 | 2015-01-21T00:18:25.000Z | 2021-07-11T07:35:26.000Z | salt/modules/snapper.py | amaclean199/salt | 8aaac011b4616e3c9e74a1daafb4a2146a5a430f | [
"Apache-2.0"
] | 1 | 2015-10-05T22:03:10.000Z | 2015-10-05T22:03:10.000Z | salt/modules/snapper.py | amaclean199/salt | 8aaac011b4616e3c9e74a1daafb4a2146a5a430f | [
"Apache-2.0"
] | 12 | 2015-01-05T09:50:42.000Z | 2019-08-19T01:43:40.000Z | # -*- coding: utf-8 -*-
'''
Module to manage filesystem snapshots with snapper
.. versionadded:: 2016.11.0
:codeauthor: Duncan Mac-Vicar P. <dmacvicar@suse.de>
:codeauthor: Pablo Suárez Hernández <psuarezhernandez@suse.de>
:depends: ``dbus`` Python module.
:depends: ``snapper`` http://snapper.io, available in most distros
:maturity: new
:platform: Linux
'''
from __future__ import absolute_import, unicode_literals, print_function
import logging
import os
import time
import difflib
try:
from pwd import getpwuid
HAS_PWD = True
except ImportError:
HAS_PWD = False
from salt.exceptions import CommandExecutionError
import salt.utils.files
# import 3rd party libs
from salt.ext import six
try:
import dbus # pylint: disable=wrong-import-order
HAS_DBUS = True
except ImportError:
HAS_DBUS = False
DBUS_STATUS_MAP = {
1: "created",
2: "deleted",
4: "type changed",
8: "modified",
16: "permission changed",
32: "owner changed",
64: "group changed",
128: "extended attributes changed",
256: "ACL info changed",
}
SNAPPER_DBUS_OBJECT = 'org.opensuse.Snapper'
SNAPPER_DBUS_PATH = '/org/opensuse/Snapper'
SNAPPER_DBUS_INTERFACE = 'org.opensuse.Snapper'
# pylint: disable=invalid-name
log = logging.getLogger(__name__)
bus = None
system_bus_error = None
snapper = None
snapper_error = None
if HAS_DBUS:
try:
bus = dbus.SystemBus()
except dbus.DBusException as exc:
log.warning(exc)
system_bus_error = exc
else:
if SNAPPER_DBUS_OBJECT in bus.list_activatable_names():
try:
snapper = dbus.Interface(bus.get_object(SNAPPER_DBUS_OBJECT,
SNAPPER_DBUS_PATH),
dbus_interface=SNAPPER_DBUS_INTERFACE)
except (dbus.DBusException, ValueError) as exc:
log.warning(exc)
snapper_error = exc
else:
snapper_error = 'snapper is missing'
# pylint: enable=invalid-name
def __virtual__():
error_msg = 'The snapper module cannot be loaded: {0}'
if not HAS_DBUS:
return False, error_msg.format('missing python dbus module')
elif not snapper:
return False, error_msg.format(snapper_error)
elif not bus:
return False, error_msg.format(system_bus_error)
elif not HAS_PWD:
return False, error_msg.format('pwd module not available')
return 'snapper'
def _snapshot_to_data(snapshot):
'''
Returns snapshot data from a D-Bus response.
A snapshot D-Bus response is a dbus.Struct containing the
information related to a snapshot:
[id, type, pre_snapshot, timestamp, user, description,
cleanup_algorithm, userdata]
id: dbus.UInt32
type: dbus.UInt16
pre_snapshot: dbus.UInt32
timestamp: dbus.Int64
user: dbus.UInt32
description: dbus.String
cleaup_algorithm: dbus.String
userdata: dbus.Dictionary
'''
data = {}
data['id'] = snapshot[0]
data['type'] = ['single', 'pre', 'post'][snapshot[1]]
if data['type'] == 'post':
data['pre'] = snapshot[2]
if snapshot[3] != -1:
data['timestamp'] = snapshot[3]
else:
data['timestamp'] = int(time.time())
data['user'] = getpwuid(snapshot[4])[0]
data['description'] = snapshot[5]
data['cleanup'] = snapshot[6]
data['userdata'] = {}
for key, value in snapshot[7].items():
data['userdata'][key] = value
return data
def _dbus_exception_to_reason(exc, args):
'''
Returns a error message from a snapper DBusException
'''
error = exc.get_dbus_name()
if error == 'error.unknown_config':
return "Unknown configuration '{0}'".format(args['config'])
elif error == 'error.illegal_snapshot':
return 'Invalid snapshot'
else:
return exc.get_dbus_name()
def list_snapshots(config='root'):
'''
List available snapshots
CLI example:
.. code-block:: bash
salt '*' snapper.list_snapshots config=myconfig
'''
try:
snapshots = snapper.ListSnapshots(config)
return [_snapshot_to_data(s) for s in snapshots]
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while listing snapshots: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
def get_snapshot(number=0, config='root'):
'''
Get detailed information about a given snapshot
CLI example:
.. code-block:: bash
salt '*' snapper.get_snapshot 1
'''
try:
snapshot = snapper.GetSnapshot(config, int(number))
return _snapshot_to_data(snapshot)
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while retrieving snapshot: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
def list_configs():
'''
List all available configs
CLI example:
.. code-block:: bash
salt '*' snapper.list_configs
'''
try:
configs = snapper.ListConfigs()
return dict((config[0], config[2]) for config in configs)
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while listing configurations: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
def _config_filter(value):
if isinstance(value, bool):
return 'yes' if value else 'no'
return value
def set_config(name='root', **kwargs):
'''
Set configuration values
CLI example:
.. code-block:: bash
salt '*' snapper.set_config SYNC_ACL=True
Keys are case insensitive as they will be always uppercased to
snapper convention. The above example is equivalent to:
.. code-block:: bash
salt '*' snapper.set_config sync_acl=True
'''
try:
data = dict((k.upper(), _config_filter(v)) for k, v in
kwargs.items() if not k.startswith('__'))
snapper.SetConfig(name, data)
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while setting configuration {0}: {1}'
.format(name, _dbus_exception_to_reason(exc, locals()))
)
return True
def _get_last_snapshot(config='root'):
'''
Returns the last existing created snapshot
'''
snapshot_list = sorted(list_snapshots(config), key=lambda x: x['id'])
return snapshot_list[-1]
def status_to_string(dbus_status):
'''
Converts a numeric dbus snapper status into a string
CLI Example:
.. code-block:: bash
salt '*' snapper.status_to_string <dbus_status>
'''
status_tuple = (
dbus_status & 0b000000001, dbus_status & 0b000000010, dbus_status & 0b000000100,
dbus_status & 0b000001000, dbus_status & 0b000010000, dbus_status & 0b000100000,
dbus_status & 0b001000000, dbus_status & 0b010000000, dbus_status & 0b100000000
)
return [DBUS_STATUS_MAP[status] for status in status_tuple if status]
def get_config(name='root'):
'''
Retrieves all values from a given configuration
CLI example:
.. code-block:: bash
salt '*' snapper.get_config
'''
try:
config = snapper.GetConfig(name)
return config
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while retrieving configuration: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
def create_config(name=None,
subvolume=None,
fstype=None,
template=None,
extra_opts=None):
'''
Creates a new Snapper configuration
name
Name of the new Snapper configuration.
subvolume
Path to the related subvolume.
fstype
Filesystem type of the subvolume.
template
Configuration template to use. (Default: default)
extra_opts
Extra Snapper configuration opts dictionary. It will override the values provided
by the given template (if any).
CLI example:
.. code-block:: bash
salt '*' snapper.create_config name=myconfig subvolume=/foo/bar/ fstype=btrfs
salt '*' snapper.create_config name=myconfig subvolume=/foo/bar/ fstype=btrfs template="default"
salt '*' snapper.create_config name=myconfig subvolume=/foo/bar/ fstype=btrfs extra_opts='{"NUMBER_CLEANUP": False}'
'''
def raise_arg_error(argname):
raise CommandExecutionError(
'You must provide a "{0}" for the new configuration'.format(argname)
)
if not name:
raise_arg_error("name")
if not subvolume:
raise_arg_error("subvolume")
if not fstype:
raise_arg_error("fstype")
if not template:
template = ""
try:
snapper.CreateConfig(name, subvolume, fstype, template)
if extra_opts:
set_config(name, **extra_opts)
return get_config(name)
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while creating the new configuration: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
def create_snapshot(config='root', snapshot_type='single', pre_number=None,
description=None, cleanup_algorithm='number', userdata=None,
**kwargs):
'''
Creates an snapshot
config
Configuration name.
snapshot_type
Specifies the type of the new snapshot. Possible values are
single, pre and post.
pre_number
For post snapshots the number of the pre snapshot must be
provided.
description
Description for the snapshot. If not given, the salt job will be used.
cleanup_algorithm
Set the cleanup algorithm for the snapshot.
number
Deletes old snapshots when a certain number of snapshots
is reached.
timeline
Deletes old snapshots but keeps a number of hourly,
daily, weekly, monthly and yearly snapshots.
empty-pre-post
Deletes pre/post snapshot pairs with empty diffs.
userdata
Set userdata for the snapshot (key-value pairs).
Returns the number of the created snapshot.
CLI example:
.. code-block:: bash
salt '*' snapper.create_snapshot
'''
if not userdata:
userdata = {}
jid = kwargs.get('__pub_jid')
if description is None and jid is not None:
description = 'salt job {0}'.format(jid)
if jid is not None:
userdata['salt_jid'] = jid
new_nr = None
try:
if snapshot_type == 'single':
new_nr = snapper.CreateSingleSnapshot(config, description,
cleanup_algorithm, userdata)
elif snapshot_type == 'pre':
new_nr = snapper.CreatePreSnapshot(config, description,
cleanup_algorithm, userdata)
elif snapshot_type == 'post':
if pre_number is None:
raise CommandExecutionError(
"pre snapshot number 'pre_number' needs to be"
"specified for snapshots of the 'post' type")
new_nr = snapper.CreatePostSnapshot(config, pre_number, description,
cleanup_algorithm, userdata)
else:
raise CommandExecutionError(
"Invalid snapshot type '{0}'", format(snapshot_type))
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while listing changed files: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
return new_nr
def delete_snapshot(snapshots_ids=None, config="root"):
'''
Deletes an snapshot
config
Configuration name. (Default: root)
snapshots_ids
List of the snapshots IDs to be deleted.
CLI example:
.. code-block:: bash
salt '*' snapper.delete_snapshot 54
salt '*' snapper.delete_snapshot config=root 54
salt '*' snapper.delete_snapshot config=root snapshots_ids=[54,55,56]
'''
if not snapshots_ids:
raise CommandExecutionError('Error: No snapshot ID has been provided')
try:
current_snapshots_ids = [x['id'] for x in list_snapshots(config)]
if not isinstance(snapshots_ids, list):
snapshots_ids = [snapshots_ids]
if not set(snapshots_ids).issubset(set(current_snapshots_ids)):
raise CommandExecutionError(
"Error: Snapshots '{0}' not found".format(", ".join(
[six.text_type(x) for x in set(snapshots_ids).difference(
set(current_snapshots_ids))]))
)
snapper.DeleteSnapshots(config, snapshots_ids)
return {config: {"ids": snapshots_ids, "status": "deleted"}}
except dbus.DBusException as exc:
raise CommandExecutionError(_dbus_exception_to_reason(exc, locals()))
def modify_snapshot(snapshot_id=None,
description=None,
userdata=None,
cleanup=None,
config="root"):
'''
Modify attributes of an existing snapshot.
config
Configuration name. (Default: root)
snapshot_id
ID of the snapshot to be modified.
cleanup
Change the cleanup method of the snapshot. (str)
description
Change the description of the snapshot. (str)
userdata
Change the userdata dictionary of the snapshot. (dict)
CLI example:
.. code-block:: bash
salt '*' snapper.modify_snapshot 54 description="my snapshot description"
salt '*' snapper.modify_snapshot 54 description="my snapshot description"
salt '*' snapper.modify_snapshot 54 userdata='{"foo": "bar"}'
salt '*' snapper.modify_snapshot snapshot_id=54 cleanup="number"
'''
if not snapshot_id:
raise CommandExecutionError('Error: No snapshot ID has been provided')
snapshot = get_snapshot(config=config, number=snapshot_id)
try:
# Updating only the explicitly provided attributes by the user
updated_opts = {
'description': description if description is not None else snapshot['description'],
'cleanup': cleanup if cleanup is not None else snapshot['cleanup'],
'userdata': userdata if userdata is not None else snapshot['userdata'],
}
snapper.SetSnapshot(config,
snapshot_id,
updated_opts['description'],
updated_opts['cleanup'],
updated_opts['userdata'])
return get_snapshot(config=config, number=snapshot_id)
except dbus.DBusException as exc:
raise CommandExecutionError(_dbus_exception_to_reason(exc, locals()))
def _get_num_interval(config, num_pre, num_post):
'''
Returns numerical interval based on optionals num_pre, num_post values
'''
post = int(num_post) if num_post else 0
pre = int(num_pre) if num_pre is not None else _get_last_snapshot(config)['id']
return pre, post
def _is_text_file(filename):
'''
Checks if a file is a text file
'''
type_of_file = os.popen('file -bi {0}'.format(filename), 'r').read()
return type_of_file.startswith('text')
def run(function, *args, **kwargs):
'''
Runs a function from an execution module creating pre and post snapshots
and associating the salt job id with those snapshots for easy undo and
cleanup.
function
Salt function to call.
config
Configuration name. (default: "root")
description
A description for the snapshots. (default: None)
userdata
Data to include in the snapshot metadata. (default: None)
cleanup_algorithm
Snapper cleanup algorithm. (default: "number")
`*args`
args for the function to call. (default: None)
`**kwargs`
kwargs for the function to call (default: None)
This would run append text to /etc/motd using the file.append
module, and will create two snapshots, pre and post with the associated
metadata. The jid will be available as salt_jid in the userdata of the
snapshot.
You can immediately see the changes
CLI Example:
.. code-block:: bash
salt '*' snapper.run file.append args='["/etc/motd", "some text"]'
'''
config = kwargs.pop("config", "root")
description = kwargs.pop("description", "snapper.run[{0}]".format(function))
cleanup_algorithm = kwargs.pop("cleanup_algorithm", "number")
userdata = kwargs.pop("userdata", {})
func_kwargs = dict((k, v) for k, v in kwargs.items() if not k.startswith('__'))
kwargs = dict((k, v) for k, v in kwargs.items() if k.startswith('__'))
pre_nr = __salt__['snapper.create_snapshot'](
config=config,
snapshot_type='pre',
description=description,
cleanup_algorithm=cleanup_algorithm,
userdata=userdata,
**kwargs)
if function not in __salt__:
raise CommandExecutionError(
'function "{0}" does not exist'.format(function)
)
try:
ret = __salt__[function](*args, **func_kwargs)
except CommandExecutionError as exc:
ret = "\n".join([six.text_type(exc), __salt__[function].__doc__])
__salt__['snapper.create_snapshot'](
config=config,
snapshot_type='post',
pre_number=pre_nr,
description=description,
cleanup_algorithm=cleanup_algorithm,
userdata=userdata,
**kwargs)
return ret
def status(config='root', num_pre=None, num_post=None):
'''
Returns a comparison between two snapshots
config
Configuration name.
num_pre
first snapshot ID to compare. Default is last snapshot
num_post
last snapshot ID to compare. Default is 0 (current state)
CLI example:
.. code-block:: bash
salt '*' snapper.status
salt '*' snapper.status num_pre=19 num_post=20
'''
try:
pre, post = _get_num_interval(config, num_pre, num_post)
snapper.CreateComparison(config, int(pre), int(post))
files = snapper.GetFiles(config, int(pre), int(post))
status_ret = {}
SUBVOLUME = list_configs()[config]['SUBVOLUME']
for file in files:
# In case of SUBVOLUME is included in filepath we remove it
# to prevent from filepath starting with double '/'
_filepath = file[0][len(SUBVOLUME):] if file[0].startswith(SUBVOLUME) else file[0]
status_ret[os.path.normpath(SUBVOLUME + _filepath)] = {'status': status_to_string(file[1])}
return status_ret
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while listing changed files: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
def changed_files(config='root', num_pre=None, num_post=None):
'''
Returns the files changed between two snapshots
config
Configuration name.
num_pre
first snapshot ID to compare. Default is last snapshot
num_post
last snapshot ID to compare. Default is 0 (current state)
CLI example:
.. code-block:: bash
salt '*' snapper.changed_files
salt '*' snapper.changed_files num_pre=19 num_post=20
'''
return status(config, num_pre, num_post).keys()
def undo(config='root', files=None, num_pre=None, num_post=None):
'''
Undo all file changes that happened between num_pre and num_post, leaving
the files into the state of num_pre.
.. warning::
If one of the files has changes after num_post, they will be overwritten
The snapshots are used to determine the file list, but the current
version of the files will be overwritten by the versions in num_pre.
You to undo changes between num_pre and the current version of the
files use num_post=0.
CLI Example:
.. code-block:: bash
salt '*' snapper.undo
'''
pre, post = _get_num_interval(config, num_pre, num_post)
changes = status(config, pre, post)
changed = set(changes.keys())
requested = set(files or changed)
if not requested.issubset(changed):
raise CommandExecutionError(
'Given file list contains files that are not present'
'in the changed filelist: {0}'.format(changed - requested))
cmdret = __salt__['cmd.run']('snapper -c {0} undochange {1}..{2} {3}'.format(
config, pre, post, ' '.join(requested)))
try:
components = cmdret.split(' ')
ret = {}
for comp in components:
key, val = comp.split(':')
ret[key] = val
return ret
except ValueError as exc:
raise CommandExecutionError(
'Error while processing Snapper response: {0}'.format(cmdret))
def _get_jid_snapshots(jid, config='root'):
'''
Returns pre/post snapshots made by a given Salt jid
Looks for 'salt_jid' entries into snapshots userdata which are created
when 'snapper.run' is executed.
'''
jid_snapshots = [x for x in list_snapshots(config) if x['userdata'].get("salt_jid") == jid]
pre_snapshot = [x for x in jid_snapshots if x['type'] == "pre"]
post_snapshot = [x for x in jid_snapshots if x['type'] == "post"]
if not pre_snapshot or not post_snapshot:
raise CommandExecutionError("Jid '{0}' snapshots not found".format(jid))
return (
pre_snapshot[0]['id'],
post_snapshot[0]['id']
)
def undo_jid(jid, config='root'):
'''
Undo the changes applied by a salt job
jid
The job id to lookup
config
Configuration name.
CLI Example:
.. code-block:: bash
salt '*' snapper.undo_jid jid=20160607130930720112
'''
pre_snapshot, post_snapshot = _get_jid_snapshots(jid, config=config)
return undo(config, num_pre=pre_snapshot, num_post=post_snapshot)
def diff(config='root', filename=None, num_pre=None, num_post=None):
'''
Returns the differences between two snapshots
config
Configuration name.
filename
if not provided the showing differences between snapshots for
all "text" files
num_pre
first snapshot ID to compare. Default is last snapshot
num_post
last snapshot ID to compare. Default is 0 (current state)
CLI Example:
.. code-block:: bash
salt '*' snapper.diff
salt '*' snapper.diff filename=/var/log/snapper.log num_pre=19 num_post=20
'''
try:
pre, post = _get_num_interval(config, num_pre, num_post)
files = changed_files(config, pre, post)
if filename:
files = [filename] if filename in files else []
SUBVOLUME = list_configs()[config]['SUBVOLUME']
pre_mount = snapper.MountSnapshot(config, pre, False) if pre else SUBVOLUME
post_mount = snapper.MountSnapshot(config, post, False) if post else SUBVOLUME
files_diff = dict()
for filepath in [filepath for filepath in files if not os.path.isdir(filepath)]:
_filepath = filepath
if filepath.startswith(SUBVOLUME):
_filepath = filepath[len(SUBVOLUME):]
# Just in case, removing possible double '/' from the final file paths
pre_file = os.path.normpath(pre_mount + "/" + _filepath).replace("//", "/")
post_file = os.path.normpath(post_mount + "/" + _filepath).replace("//", "/")
if os.path.isfile(pre_file):
pre_file_exists = True
with salt.utils.files.fopen(pre_file) as rfh:
pre_file_content = [salt.utils.stringutils.to_unicode(_l)
for _l in rfh.readlines()]
else:
pre_file_content = []
pre_file_exists = False
if os.path.isfile(post_file):
post_file_exists = True
with salt.utils.files.fopen(post_file) as rfh:
post_file_content = [salt.utils.stringutils.to_unicode(_l)
for _l in rfh.readlines()]
else:
post_file_content = []
post_file_exists = False
if _is_text_file(pre_file) or _is_text_file(post_file):
files_diff[filepath] = {
'comment': "text file changed",
'diff': ''.join(difflib.unified_diff(pre_file_content,
post_file_content,
fromfile=pre_file,
tofile=post_file))}
if pre_file_exists and not post_file_exists:
files_diff[filepath]['comment'] = "text file deleted"
if not pre_file_exists and post_file_exists:
files_diff[filepath]['comment'] = "text file created"
elif not _is_text_file(pre_file) and not _is_text_file(post_file):
# This is a binary file
files_diff[filepath] = {'comment': "binary file changed"}
if pre_file_exists:
files_diff[filepath]['old_sha256_digest'] = __salt__['hashutil.sha256_digest'](''.join(pre_file_content))
if post_file_exists:
files_diff[filepath]['new_sha256_digest'] = __salt__['hashutil.sha256_digest'](''.join(post_file_content))
if post_file_exists and not pre_file_exists:
files_diff[filepath]['comment'] = "binary file created"
if pre_file_exists and not post_file_exists:
files_diff[filepath]['comment'] = "binary file deleted"
if pre:
snapper.UmountSnapshot(config, pre, False)
if post:
snapper.UmountSnapshot(config, post, False)
return files_diff
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while showing differences between snapshots: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
def diff_jid(jid, config='root'):
'''
Returns the changes applied by a `jid`
jid
The job id to lookup
config
Configuration name.
CLI Example:
.. code-block:: bash
salt '*' snapper.diff_jid jid=20160607130930720112
'''
pre_snapshot, post_snapshot = _get_jid_snapshots(jid, config=config)
return diff(config, num_pre=pre_snapshot, num_post=post_snapshot)
def create_baseline(tag="baseline", config='root'):
'''
Creates a snapshot marked as baseline
tag
Tag name for the baseline
config
Configuration name.
CLI Example:
.. code-block:: bash
salt '*' snapper.create_baseline
salt '*' snapper.create_baseline my_custom_baseline
'''
return __salt__['snapper.create_snapshot'](config=config,
snapshot_type='single',
description="baseline snapshot",
cleanup_algorithm="number",
userdata={"baseline_tag": tag})
| 30.823857 | 126 | 0.619733 |
from __future__ import absolute_import, unicode_literals, print_function
import logging
import os
import time
import difflib
try:
from pwd import getpwuid
HAS_PWD = True
except ImportError:
HAS_PWD = False
from salt.exceptions import CommandExecutionError
import salt.utils.files
from salt.ext import six
try:
import dbus
HAS_DBUS = True
except ImportError:
HAS_DBUS = False
DBUS_STATUS_MAP = {
1: "created",
2: "deleted",
4: "type changed",
8: "modified",
16: "permission changed",
32: "owner changed",
64: "group changed",
128: "extended attributes changed",
256: "ACL info changed",
}
SNAPPER_DBUS_OBJECT = 'org.opensuse.Snapper'
SNAPPER_DBUS_PATH = '/org/opensuse/Snapper'
SNAPPER_DBUS_INTERFACE = 'org.opensuse.Snapper'
log = logging.getLogger(__name__)
bus = None
system_bus_error = None
snapper = None
snapper_error = None
if HAS_DBUS:
try:
bus = dbus.SystemBus()
except dbus.DBusException as exc:
log.warning(exc)
system_bus_error = exc
else:
if SNAPPER_DBUS_OBJECT in bus.list_activatable_names():
try:
snapper = dbus.Interface(bus.get_object(SNAPPER_DBUS_OBJECT,
SNAPPER_DBUS_PATH),
dbus_interface=SNAPPER_DBUS_INTERFACE)
except (dbus.DBusException, ValueError) as exc:
log.warning(exc)
snapper_error = exc
else:
snapper_error = 'snapper is missing'
def __virtual__():
error_msg = 'The snapper module cannot be loaded: {0}'
if not HAS_DBUS:
return False, error_msg.format('missing python dbus module')
elif not snapper:
return False, error_msg.format(snapper_error)
elif not bus:
return False, error_msg.format(system_bus_error)
elif not HAS_PWD:
return False, error_msg.format('pwd module not available')
return 'snapper'
def _snapshot_to_data(snapshot):
data = {}
data['id'] = snapshot[0]
data['type'] = ['single', 'pre', 'post'][snapshot[1]]
if data['type'] == 'post':
data['pre'] = snapshot[2]
if snapshot[3] != -1:
data['timestamp'] = snapshot[3]
else:
data['timestamp'] = int(time.time())
data['user'] = getpwuid(snapshot[4])[0]
data['description'] = snapshot[5]
data['cleanup'] = snapshot[6]
data['userdata'] = {}
for key, value in snapshot[7].items():
data['userdata'][key] = value
return data
def _dbus_exception_to_reason(exc, args):
error = exc.get_dbus_name()
if error == 'error.unknown_config':
return "Unknown configuration '{0}'".format(args['config'])
elif error == 'error.illegal_snapshot':
return 'Invalid snapshot'
else:
return exc.get_dbus_name()
def list_snapshots(config='root'):
try:
snapshots = snapper.ListSnapshots(config)
return [_snapshot_to_data(s) for s in snapshots]
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while listing snapshots: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
def get_snapshot(number=0, config='root'):
try:
snapshot = snapper.GetSnapshot(config, int(number))
return _snapshot_to_data(snapshot)
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while retrieving snapshot: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
def list_configs():
try:
configs = snapper.ListConfigs()
return dict((config[0], config[2]) for config in configs)
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while listing configurations: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
def _config_filter(value):
if isinstance(value, bool):
return 'yes' if value else 'no'
return value
def set_config(name='root', **kwargs):
try:
data = dict((k.upper(), _config_filter(v)) for k, v in
kwargs.items() if not k.startswith('__'))
snapper.SetConfig(name, data)
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while setting configuration {0}: {1}'
.format(name, _dbus_exception_to_reason(exc, locals()))
)
return True
def _get_last_snapshot(config='root'):
snapshot_list = sorted(list_snapshots(config), key=lambda x: x['id'])
return snapshot_list[-1]
def status_to_string(dbus_status):
status_tuple = (
dbus_status & 0b000000001, dbus_status & 0b000000010, dbus_status & 0b000000100,
dbus_status & 0b000001000, dbus_status & 0b000010000, dbus_status & 0b000100000,
dbus_status & 0b001000000, dbus_status & 0b010000000, dbus_status & 0b100000000
)
return [DBUS_STATUS_MAP[status] for status in status_tuple if status]
def get_config(name='root'):
try:
config = snapper.GetConfig(name)
return config
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while retrieving configuration: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
def create_config(name=None,
subvolume=None,
fstype=None,
template=None,
extra_opts=None):
def raise_arg_error(argname):
raise CommandExecutionError(
'You must provide a "{0}" for the new configuration'.format(argname)
)
if not name:
raise_arg_error("name")
if not subvolume:
raise_arg_error("subvolume")
if not fstype:
raise_arg_error("fstype")
if not template:
template = ""
try:
snapper.CreateConfig(name, subvolume, fstype, template)
if extra_opts:
set_config(name, **extra_opts)
return get_config(name)
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while creating the new configuration: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
def create_snapshot(config='root', snapshot_type='single', pre_number=None,
description=None, cleanup_algorithm='number', userdata=None,
**kwargs):
if not userdata:
userdata = {}
jid = kwargs.get('__pub_jid')
if description is None and jid is not None:
description = 'salt job {0}'.format(jid)
if jid is not None:
userdata['salt_jid'] = jid
new_nr = None
try:
if snapshot_type == 'single':
new_nr = snapper.CreateSingleSnapshot(config, description,
cleanup_algorithm, userdata)
elif snapshot_type == 'pre':
new_nr = snapper.CreatePreSnapshot(config, description,
cleanup_algorithm, userdata)
elif snapshot_type == 'post':
if pre_number is None:
raise CommandExecutionError(
"pre snapshot number 'pre_number' needs to be"
"specified for snapshots of the 'post' type")
new_nr = snapper.CreatePostSnapshot(config, pre_number, description,
cleanup_algorithm, userdata)
else:
raise CommandExecutionError(
"Invalid snapshot type '{0}'", format(snapshot_type))
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while listing changed files: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
return new_nr
def delete_snapshot(snapshots_ids=None, config="root"):
if not snapshots_ids:
raise CommandExecutionError('Error: No snapshot ID has been provided')
try:
current_snapshots_ids = [x['id'] for x in list_snapshots(config)]
if not isinstance(snapshots_ids, list):
snapshots_ids = [snapshots_ids]
if not set(snapshots_ids).issubset(set(current_snapshots_ids)):
raise CommandExecutionError(
"Error: Snapshots '{0}' not found".format(", ".join(
[six.text_type(x) for x in set(snapshots_ids).difference(
set(current_snapshots_ids))]))
)
snapper.DeleteSnapshots(config, snapshots_ids)
return {config: {"ids": snapshots_ids, "status": "deleted"}}
except dbus.DBusException as exc:
raise CommandExecutionError(_dbus_exception_to_reason(exc, locals()))
def modify_snapshot(snapshot_id=None,
description=None,
userdata=None,
cleanup=None,
config="root"):
if not snapshot_id:
raise CommandExecutionError('Error: No snapshot ID has been provided')
snapshot = get_snapshot(config=config, number=snapshot_id)
try:
updated_opts = {
'description': description if description is not None else snapshot['description'],
'cleanup': cleanup if cleanup is not None else snapshot['cleanup'],
'userdata': userdata if userdata is not None else snapshot['userdata'],
}
snapper.SetSnapshot(config,
snapshot_id,
updated_opts['description'],
updated_opts['cleanup'],
updated_opts['userdata'])
return get_snapshot(config=config, number=snapshot_id)
except dbus.DBusException as exc:
raise CommandExecutionError(_dbus_exception_to_reason(exc, locals()))
def _get_num_interval(config, num_pre, num_post):
post = int(num_post) if num_post else 0
pre = int(num_pre) if num_pre is not None else _get_last_snapshot(config)['id']
return pre, post
def _is_text_file(filename):
type_of_file = os.popen('file -bi {0}'.format(filename), 'r').read()
return type_of_file.startswith('text')
def run(function, *args, **kwargs):
config = kwargs.pop("config", "root")
description = kwargs.pop("description", "snapper.run[{0}]".format(function))
cleanup_algorithm = kwargs.pop("cleanup_algorithm", "number")
userdata = kwargs.pop("userdata", {})
func_kwargs = dict((k, v) for k, v in kwargs.items() if not k.startswith('__'))
kwargs = dict((k, v) for k, v in kwargs.items() if k.startswith('__'))
pre_nr = __salt__['snapper.create_snapshot'](
config=config,
snapshot_type='pre',
description=description,
cleanup_algorithm=cleanup_algorithm,
userdata=userdata,
**kwargs)
if function not in __salt__:
raise CommandExecutionError(
'function "{0}" does not exist'.format(function)
)
try:
ret = __salt__[function](*args, **func_kwargs)
except CommandExecutionError as exc:
ret = "\n".join([six.text_type(exc), __salt__[function].__doc__])
__salt__['snapper.create_snapshot'](
config=config,
snapshot_type='post',
pre_number=pre_nr,
description=description,
cleanup_algorithm=cleanup_algorithm,
userdata=userdata,
**kwargs)
return ret
def status(config='root', num_pre=None, num_post=None):
try:
pre, post = _get_num_interval(config, num_pre, num_post)
snapper.CreateComparison(config, int(pre), int(post))
files = snapper.GetFiles(config, int(pre), int(post))
status_ret = {}
SUBVOLUME = list_configs()[config]['SUBVOLUME']
for file in files:
_filepath = file[0][len(SUBVOLUME):] if file[0].startswith(SUBVOLUME) else file[0]
status_ret[os.path.normpath(SUBVOLUME + _filepath)] = {'status': status_to_string(file[1])}
return status_ret
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while listing changed files: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
def changed_files(config='root', num_pre=None, num_post=None):
return status(config, num_pre, num_post).keys()
def undo(config='root', files=None, num_pre=None, num_post=None):
pre, post = _get_num_interval(config, num_pre, num_post)
changes = status(config, pre, post)
changed = set(changes.keys())
requested = set(files or changed)
if not requested.issubset(changed):
raise CommandExecutionError(
'Given file list contains files that are not present'
'in the changed filelist: {0}'.format(changed - requested))
cmdret = __salt__['cmd.run']('snapper -c {0} undochange {1}..{2} {3}'.format(
config, pre, post, ' '.join(requested)))
try:
components = cmdret.split(' ')
ret = {}
for comp in components:
key, val = comp.split(':')
ret[key] = val
return ret
except ValueError as exc:
raise CommandExecutionError(
'Error while processing Snapper response: {0}'.format(cmdret))
def _get_jid_snapshots(jid, config='root'):
jid_snapshots = [x for x in list_snapshots(config) if x['userdata'].get("salt_jid") == jid]
pre_snapshot = [x for x in jid_snapshots if x['type'] == "pre"]
post_snapshot = [x for x in jid_snapshots if x['type'] == "post"]
if not pre_snapshot or not post_snapshot:
raise CommandExecutionError("Jid '{0}' snapshots not found".format(jid))
return (
pre_snapshot[0]['id'],
post_snapshot[0]['id']
)
def undo_jid(jid, config='root'):
pre_snapshot, post_snapshot = _get_jid_snapshots(jid, config=config)
return undo(config, num_pre=pre_snapshot, num_post=post_snapshot)
def diff(config='root', filename=None, num_pre=None, num_post=None):
try:
pre, post = _get_num_interval(config, num_pre, num_post)
files = changed_files(config, pre, post)
if filename:
files = [filename] if filename in files else []
SUBVOLUME = list_configs()[config]['SUBVOLUME']
pre_mount = snapper.MountSnapshot(config, pre, False) if pre else SUBVOLUME
post_mount = snapper.MountSnapshot(config, post, False) if post else SUBVOLUME
files_diff = dict()
for filepath in [filepath for filepath in files if not os.path.isdir(filepath)]:
_filepath = filepath
if filepath.startswith(SUBVOLUME):
_filepath = filepath[len(SUBVOLUME):]
pre_file = os.path.normpath(pre_mount + "/" + _filepath).replace("//", "/")
post_file = os.path.normpath(post_mount + "/" + _filepath).replace("//", "/")
if os.path.isfile(pre_file):
pre_file_exists = True
with salt.utils.files.fopen(pre_file) as rfh:
pre_file_content = [salt.utils.stringutils.to_unicode(_l)
for _l in rfh.readlines()]
else:
pre_file_content = []
pre_file_exists = False
if os.path.isfile(post_file):
post_file_exists = True
with salt.utils.files.fopen(post_file) as rfh:
post_file_content = [salt.utils.stringutils.to_unicode(_l)
for _l in rfh.readlines()]
else:
post_file_content = []
post_file_exists = False
if _is_text_file(pre_file) or _is_text_file(post_file):
files_diff[filepath] = {
'comment': "text file changed",
'diff': ''.join(difflib.unified_diff(pre_file_content,
post_file_content,
fromfile=pre_file,
tofile=post_file))}
if pre_file_exists and not post_file_exists:
files_diff[filepath]['comment'] = "text file deleted"
if not pre_file_exists and post_file_exists:
files_diff[filepath]['comment'] = "text file created"
elif not _is_text_file(pre_file) and not _is_text_file(post_file):
files_diff[filepath] = {'comment': "binary file changed"}
if pre_file_exists:
files_diff[filepath]['old_sha256_digest'] = __salt__['hashutil.sha256_digest'](''.join(pre_file_content))
if post_file_exists:
files_diff[filepath]['new_sha256_digest'] = __salt__['hashutil.sha256_digest'](''.join(post_file_content))
if post_file_exists and not pre_file_exists:
files_diff[filepath]['comment'] = "binary file created"
if pre_file_exists and not post_file_exists:
files_diff[filepath]['comment'] = "binary file deleted"
if pre:
snapper.UmountSnapshot(config, pre, False)
if post:
snapper.UmountSnapshot(config, post, False)
return files_diff
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while showing differences between snapshots: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
def diff_jid(jid, config='root'):
pre_snapshot, post_snapshot = _get_jid_snapshots(jid, config=config)
return diff(config, num_pre=pre_snapshot, num_post=post_snapshot)
def create_baseline(tag="baseline", config='root'):
return __salt__['snapper.create_snapshot'](config=config,
snapshot_type='single',
description="baseline snapshot",
cleanup_algorithm="number",
userdata={"baseline_tag": tag})
| true | true |
f72013a2dc273fdddd592328a01ea75807d6c262 | 1,098 | py | Python | cli/tests/test_managers/test_run.py | hackerwins/polyaxon | ff56a098283ca872abfbaae6ba8abba479ffa394 | [
"Apache-2.0"
] | null | null | null | cli/tests/test_managers/test_run.py | hackerwins/polyaxon | ff56a098283ca872abfbaae6ba8abba479ffa394 | [
"Apache-2.0"
] | null | null | null | cli/tests/test_managers/test_run.py | hackerwins/polyaxon | ff56a098283ca872abfbaae6ba8abba479ffa394 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#
# Copyright 2019 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
from __future__ import absolute_import, division, print_function
from unittest import TestCase
import pytest
from polyaxon_sdk import V1Run
from polyaxon.managers.run import RunManager
@pytest.mark.managers_mark
class TestRunManager(TestCase):
def test_default_props(self):
assert RunManager.IS_GLOBAL is False
assert RunManager.IS_POLYAXON_DIR is True
assert RunManager.CONFIG_FILE_NAME == ".polyaxonrun"
assert RunManager.CONFIG == V1Run
| 30.5 | 74 | 0.762295 |
from __future__ import absolute_import, division, print_function
from unittest import TestCase
import pytest
from polyaxon_sdk import V1Run
from polyaxon.managers.run import RunManager
@pytest.mark.managers_mark
class TestRunManager(TestCase):
def test_default_props(self):
assert RunManager.IS_GLOBAL is False
assert RunManager.IS_POLYAXON_DIR is True
assert RunManager.CONFIG_FILE_NAME == ".polyaxonrun"
assert RunManager.CONFIG == V1Run
| true | true |
f720140e054f1279701550bef9197a63ff8e51bf | 88,490 | py | Python | src/neo4j_loader/load_csv_data.py | cebriggs7135/ontology-api | 4d2512dcec532cfdfdcd5ff88216e402afbbe2af | [
"MIT"
] | null | null | null | src/neo4j_loader/load_csv_data.py | cebriggs7135/ontology-api | 4d2512dcec532cfdfdcd5ff88216e402afbbe2af | [
"MIT"
] | null | null | null | src/neo4j_loader/load_csv_data.py | cebriggs7135/ontology-api | 4d2512dcec532cfdfdcd5ff88216e402afbbe2af | [
"MIT"
] | null | null | null | '''
Created on Oct 20, 2020
@author: chb69
'''
import sys
import os
import types
import mysql.connector
from mysql.connector import errorcode
import csv
import argparse
"""
this list includes the prefixes for several informatics resources found in the PheKnowLator mapping data.
This might be useful in the future:
bto: is BRENDA Tissue ontology (human brain related)
fbbt: Flybase
caro: Common Anatomy Reference Ontology
xao: Frog
zfa: Zebrafish
ma: mouse anatomy
wbbt: Wormbase
fao: fungal anatomy ontology
https://ncit.nci.nih.gov/ncitbrowser/conceptreport.jsp?dictionary=nci_thesaurus&code=<code> resolves to NCI terms might be able to map these**
vsao: Vertebrate Skeletal Anatomy Ontology
kupo: Kidney and Urinary Pathway Ontology
mp: mouse phenotype
emapa: Mouse Developmental Anatomy Ontology
caloha:ts- an ontology of human anatomy and human cell types
pmid: PubMed
mat: Minimal Anatomy Terminology
miaa: ???
efo: ??? (found Experimental Factor Ontology but that doesn't look right)
ehdaa: Human Developmental Anatomy Ontology
vhog: Vertebrate Homologous Ontology Group
pba: ???
bams: BAMS Neuroanatomical Ontology
mba: ???
ev: eVOC (Expressed Sequence Annotation for Humans)
dhba: ???
http://www.snomedbrowser.com/codes/details/<code> resolves to SNOMED terms. We might be able to import these
nlxanat: a NIF ontology http://uri.neuinfo.org/nif/nifstd/nlx_anat
aao: Amphibian Gross Anatomy Ontology
tao: Teleost Anatomy Ontology
tgma: Mosquito Gross Anatomy Ontology
hao: Hymenoptera Anatomy Ontology
"""
config = {}
def load_config(root_path, filename):
'''This method was heavily borrowed from the flask config.py file's from_pyfile method.
It reads a file containing python constants and loads it into a dictionary.
:param root_path: the path leading to the config file
:param filename: the filename of the config relative to the
root path.
'''
filename = os.path.join(root_path, filename)
d = types.ModuleType("config")
d.__file__ = filename
return_dict = {}
try:
with open(filename, mode="rb") as config_file:
exec(compile(config_file.read(), filename, "exec"), d.__dict__)
for config_key in d.__dict__:
if str(config_key).startswith('__') == False:
return_dict[config_key] = d.__dict__[config_key]
except OSError as e:
e.strerror = f"Unable to load configuration file ({e.strerror})"
raise
return return_dict
def create_database(config):
''' Create the initial database. This method uses the SQL script found in
the TABLE_CREATE_SQL_FILEPATH of the config file to build the database.
:param dict config: The configuration settings
'''
connection = None
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'])
cursor = connection.cursor()
with open(config['TABLE_CREATE_SQL_FILEPATH'], encoding="utf-8") as f:
commands = f.read().split(';')
for command in commands:
if str(command).strip() != "":
print('Executing: ' + command)
cursor.execute(command)
print ("Done creating database tables.")
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
finally:
if connection != None:
connection.close()
def create_indices(config):
''' Create the indices in the mysql database to improve performance in the
transform step. There is a set of default indices that need to be created.
These are found in the config parameter INDEX_CREATE_SQL_FILEPATH. After these
are created, a series of custom indices need to be added to the various tables
created from the other config parameters.
:param dict config: The configuration settings
'''
connection = None
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'])
cursor = connection.cursor()
"""
with open(config['INDEX_CREATE_SQL_FILEPATH'], encoding="utf-8") as f:
# this code creates the "default" indices
commands = f.read().split(';')
for command in commands:
if str(command).strip() != "":
print('Executing: ' + command)
cursor.execute(command)
"""
# the code below creates the indices for the tables created from entries in the
# app.cfg file
for table_info in config['NODE_METADATA_FILE_TABLE_INFO']:
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_ontology_uri_idx (ontology_uri(500))".format(table_name=table_info['table_name'])
cursor.execute(sql)
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_node_label_idx (node_label(500))".format(table_name=table_info['table_name'])
cursor.execute(sql)
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_codeid_idx (codeid(500))".format(table_name=table_info['table_name'])
cursor.execute(sql)
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_sab_idx (sab(50))".format(table_name=table_info['table_name'])
cursor.execute(sql)
for table_info in config['EDGE_LIST_FILE_TABLE_INFO']:
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_subject_idx (subject(50))".format(table_name=table_info['table_name'])
cursor.execute(sql)
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_predicate_idx (predicate(100))".format(table_name=table_info['table_name'])
cursor.execute(sql)
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_object_idx (object(100))".format(table_name=table_info['table_name'])
cursor.execute(sql)
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_sab_idx (sab(50))".format(table_name=table_info['table_name'])
cursor.execute(sql)
for table_info in config['DBXREF_FILE_TABLE_INFO']:
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_ontology_uri_idx (ontology_uri(50))".format(table_name=table_info['table_name'])
cursor.execute(sql)
sql = "ALTER TABLE {table_name} ADD FULLTEXT INDEX {table_name}_dbxrefs_idx (dbxrefs(700))".format(table_name=table_info['table_name'])
cursor.execute(sql)
for table_info in config['RELATIONS_FILE_TABLE_INFO']:
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_relation_id_idx (relation_id(100))".format(table_name=table_info['table_name'])
cursor.execute(sql)
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_relation_label_idx (relation_label(50))".format(table_name=table_info['table_name'])
cursor.execute(sql)
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_inverse_relation_label_idx (inverse_relation_label(50))".format(table_name=table_info['table_name'])
cursor.execute(sql)
for table_info in config['SYNONYM_LIST_FILE_TABLE_INFO']:
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_ontology_uri_idx (ontology_uri(500))".format(table_name=table_info['table_name'])
cursor.execute(sql)
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_synonym_idx (synonym(500))".format(table_name=table_info['table_name'])
cursor.execute(sql)
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_sab_idx (sab(50))".format(table_name=table_info['table_name'])
cursor.execute(sql)
print ("Done creating database indices.")
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
finally:
if connection != None:
connection.close()
def load_edge_list(config):
'''
Load all of the edge_list CSV files into a series of mysql tables.
param dict config: the configuration data for this application
'''
edge_list_list = config['EDGE_LIST_FILE_TABLE_INFO']
connection = None
sql = ''
record_count = 0
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
for table_data in edge_list_list:
# walk through the list of edge_list files found in the config file.
# for each entry, read the corresponding file and load it into the referenced
# mysql table.
table_name = table_data['table_name']
file_name = table_data['file_name']
sab = table_data['sab']
drop_table_sql = "DROP TABLE IF EXISTS {table_name}".format(table_name=table_name)
cursor.execute(drop_table_sql)
table_create_sql = """CREATE TABLE {table_name} (
id INT NOT NULL AUTO_INCREMENT,
subject VARCHAR(2048) NOT NULL,
predicate VARCHAR(2048) NOT NULL,
object VARCHAR(2048) NOT NULL,
sab VARCHAR(50),
PRIMARY KEY(id)
)""".format(table_name=table_name)
# this is the generic SQL to create the edge_list tables
cursor.execute(table_create_sql)
connection.commit()
print("Created table: " + table_name)
file_path = os.path.join(config['ONTOLOGY_SOURCE_DIR'], file_name)
load_file(config, file_path, table_name)
sql = "UPDATE {table_name} SET sab = '{sab}'".format(table_name=table_name,sab=sab)
# add the SAB for all records in table
cursor.execute(sql)
connection.commit()
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def load_synonym_list(config):
'''
Load all of the synonym CSV files into a series of mysql tables.
param dict config: the configuration data for this application
'''
if 'SYNONYM_LIST_FILE_TABLE_INFO' not in config:
#don't run this code if the synonym file is missing
return
synonym_list = config['SYNONYM_LIST_FILE_TABLE_INFO']
connection = None
sql = ''
record_count = 0
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
for table_data in synonym_list:
# walk through the list of synonym files found in the config file.
# for each entry, read the corresponding file and load it into the referenced
# mysql table.
table_name = table_data['table_name']
file_name = table_data['file_name']
sab = table_data['sab']
drop_table_sql = "DROP TABLE IF EXISTS {table_name}".format(table_name=table_name)
cursor.execute(drop_table_sql)
table_create_sql = """CREATE TABLE {table_name} (
id INT NOT NULL AUTO_INCREMENT,
ontology_uri VARCHAR(2048) NOT NULL,
codeid VARCHAR(2048) NOT NULL,
synonym VARCHAR(2048) NOT NULL,
sab VARCHAR(50),
PRIMARY KEY(id)
)""".format(table_name=table_name)
# this is the generic SQL to create a synonym table
cursor.execute(table_create_sql)
connection.commit()
print("Created table: " + table_name)
file_path = os.path.join(config['ONTOLOGY_SOURCE_DIR'], file_name)
load_file(config, file_path, table_name)
sql = "UPDATE {table_name} SET sab = '{sab}'".format(table_name=table_name,sab=sab)
# add the SAB for all records in table
cursor.execute(sql)
connection.commit()
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def load_relations(config):
'''
Load all of the relations CSV files into a series of mysql tables.
param dict config: the configuration data for this application
'''
node_metadata_list = config['RELATIONS_FILE_TABLE_INFO']
connection = None
sql = ''
record_count = 0
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
for table_data in node_metadata_list:
# walk through the list of relations files found in the config file.
# for each entry, read the corresponding file and load it into the referenced
# mysql table.
table_name = table_data['table_name']
file_name = table_data['file_name']
sab = table_data['sab']
drop_table_sql = "DROP TABLE IF EXISTS {table_name}".format(table_name=table_name)
cursor.execute(drop_table_sql)
table_create_sql = """CREATE TABLE {table_name} (
id INT NOT NULL AUTO_INCREMENT,
relation_id VARCHAR(2048) NOT NULL,
relation_label VARCHAR(2048) NOT NULL,
inverse_relation_label VARCHAR(2048),
PRIMARY KEY(id)
)""".format(table_name=table_name)
# this is the generic create relations SQL statement
cursor.execute(table_create_sql)
connection.commit()
print("Created table: " + table_name)
file_path = os.path.join(config['ONTOLOGY_SOURCE_DIR'], file_name)
load_file(config, file_path, table_name)
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def create_missing_codeids(config):
node_metadata_list = config['NODE_METADATA_FILE_TABLE_INFO']
connection = None
sql = ''
record_count = 0
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
for table_data in node_metadata_list:
table_name = table_data['table_name']
sql = """UPDATE {table_name}
SET codeid = REPLACE(REPLACE(ontology_uri, 'http://purl.obolibrary.org/obo/',''), '_', ' ')
WHERE codeid IS NULL""".format(table_name=table_name)
# add a codeid for all records in table
cursor.execute(sql)
connection.commit()
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def fix_dbxrefs(config):
connection = None
sql = ''
record_count = 0
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
table_name = 'dbxrefs'
sql = """UPDATE {table_name}
SET xref = UPPER(xref)""".format(table_name=table_name)
# uppercase all dbxrefs data in table
cursor.execute(sql)
connection.commit()
sql = """UPDATE {table_name}
SET xref = REPLACE(xref, 'NCIT:', 'NCI:') WHERE xref LIKE 'NCIT:%'""".format(table_name=table_name)
# convert all the NCI codes
cursor.execute(sql)
connection.commit()
sql = """UPDATE {table_name}
SET xref = REPLACE(xref, 'HTTP://WWW.SNOMEDBROWSER.COM/CODES/DETAILS/', 'SNOMEDCT_US:') WHERE xref LIKE 'HTTP://WWW.SNOMEDBROWSER.COM/CODES/DETAILS/%'""".format(table_name=table_name)
# convert all the SNOMED codes
cursor.execute(sql)
connection.commit()
sql = """UPDATE {table_name}
SET xref = REPLACE(xref, 'MESH:', 'MSH:') WHERE xref LIKE 'MESH:%'
AND instr(xref, 'MESH:D') > 0
AND instr(xref, 'MESH:D24') = 0""".format(table_name=table_name)
# convert all the MeSH codes
cursor.execute(sql)
connection.commit()
sql = """UPDATE {table_name}
SET xref = REPLACE(xref, ':', ' ')""".format(table_name=table_name)
# replace all remaining colons with spaces dbxrefs data in table
cursor.execute(sql)
connection.commit()
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def load_node_metadata(config):
'''
Load all of the node_metadata CSV files into a series of mysql tables.
param dict config: the configuration data for this application
'''
node_metadata_list = config['NODE_METADATA_FILE_TABLE_INFO']
connection = None
sql = ''
record_count = 0
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
for table_data in node_metadata_list:
# walk through the list of node_metadata files found in the config file.
# for each entry, read the corresponding file and load it into the referenced
# mysql table.
table_name = table_data['table_name']
file_name = table_data['file_name']
sab = table_data['sab']
drop_table_sql = "DROP TABLE IF EXISTS {table_name}".format(table_name=table_name)
cursor.execute(drop_table_sql)
table_create_sql = """CREATE TABLE {table_name} (
id INT NOT NULL AUTO_INCREMENT,
ontology_uri VARCHAR(2048) NOT NULL,
codeid VARCHAR(2048),
node_label VARCHAR(2048) NOT NULL,
node_definition VARCHAR(2048) NOT NULL,
sab VARCHAR(50),
PRIMARY KEY(id)
)""".format(table_name=table_name)
# this SQL creates the generic node_metadata table
cursor.execute(table_create_sql)
connection.commit()
print("Created table: " + table_name)
file_path = os.path.join(config['ONTOLOGY_SOURCE_DIR'], file_name)
load_file(config, file_path, table_name)
sql = "UPDATE {table_name} SET sab = '{sab}'".format(table_name=table_name,sab=sab)
# add the SAB for all records in table
cursor.execute(sql)
connection.commit()
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def load_dbxref(config):
'''
Load all of the dbxref CSV files into a series of mysql tables.
param dict config: the configuration data for this application
'''
dbxref_list = config['DBXREF_FILE_TABLE_INFO']
connection = None
sql = ''
record_count = 0
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
for table_data in dbxref_list:
# walk through the list of dbxref files found in the config file.
# for each entry, read the corresponding file and load it into the referenced
# mysql table.
table_name = table_data['table_name']
file_name = table_data['file_name']
sab = table_data['sab']
drop_table_sql = "DROP TABLE IF EXISTS {table_name}".format(table_name=table_name)
cursor.execute(drop_table_sql)
table_create_sql = """CREATE TABLE {table_name} (
id INT NOT NULL AUTO_INCREMENT,
ontology_uri VARCHAR(2048) NOT NULL,
dbxrefs VARCHAR(5120) NOT NULL,
sab VARCHAR(50),
PRIMARY KEY(id)
)""".format(table_name=table_name)
# this is the SQL to create a generic dbxref table
cursor.execute(table_create_sql)
connection.commit()
print("Created table: " + table_name)
file_path = os.path.join(config['ONTOLOGY_SOURCE_DIR'], file_name)
load_file(config, file_path, table_name)
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def load_umls_codes(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'CODEs.csv')
table_name = 'umls_codes'
load_file(config, file_path, table_name)
def load_umls_defs(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'DEFs.csv')
table_name = 'umls_defs'
load_file(config, file_path, table_name)
def load_umls_suis(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'SUIs.csv')
table_name = 'umls_suis'
load_file(config, file_path, table_name)
def load_umls_cuis(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'CUIs.csv')
table_name = 'umls_cuis'
load_file(config, file_path, table_name)
def load_umls_tuis(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'TUIs.csv')
table_name = 'umls_tuis'
load_file(config, file_path, table_name)
def load_umls_code_suis(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'CODE-SUIs.csv')
table_name = 'umls_code_suis'
load_file(config, file_path, table_name)
def load_umls_cui_codes(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'CUI-CODEs.csv')
table_name = 'umls_cui_codes'
load_file(config, file_path, table_name)
def load_umls_cui_cuis(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'CUI-CUIs.csv')
table_name = 'umls_cui_cuis'
load_file(config, file_path, table_name)
def load_umls_cui_suis(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'CUI-SUIs.csv')
table_name = 'umls_cui_suis'
load_file(config, file_path, table_name)
def load_umls_cui_tuis(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'CUI-TUIs.csv')
table_name = 'umls_cui_tuis'
load_file(config, file_path, table_name)
def load_umls_def_rel(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'DEFrel.csv')
table_name = 'umls_def_rel'
load_file(config, file_path, table_name)
def load_umls_tui_rel(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'TUIrel.csv')
table_name = 'umls_tui_rel'
load_file(config, file_path, table_name)
def build_xref_table(config):
'''
Build the dbxrefs table by reading the ontology_dbxref table. The ontology_dbxref table contains a column dbxrefs.
This method takes dbxrefs, a pipe-delimited list of xrefs, and splits it into separate entries (ex: xref1|xref2|xref3).
Each individual xref becomes a new row in the dbxrefs table.
:param dict config: The configuration settings
'''
dbxref_list = config['DBXREF_FILE_TABLE_INFO']
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
drop_table_sql = "DROP TABLE IF EXISTS dbxrefs"
cursor.execute(drop_table_sql)
create_table_sql = """CREATE TABLE dbxrefs (
id INT NOT NULL AUTO_INCREMENT,
ontology_uri VARCHAR(2048) NOT NULL,
xref VARCHAR(2048) NOT NULL,
PRIMARY KEY(id)
);"""
cursor.execute(create_table_sql)
for table_data in dbxref_list:
table_name = table_data['table_name']
sab = table_data['sab']
cursor.execute("SELECT ontology_uri, dbxrefs FROM {table_name}".format(table_name=table_name))
print("Loading {sab} data into table {table_name}".format(table_name="dbxrefs", sab=sab), end='', flush=True)
result = cursor.fetchall()
record_count = 0
for row in result:
ontology_uri = row['ontology_uri']
all_xrefs = row['dbxrefs']
xref_list = all_xrefs.split('|')
# For each row in the ontology_dbxref table, split the dbxrefs column into a list
for ref in xref_list:
# for each xref in the list, insert a new row into the dbxrefs table
ref = ref.replace("'","''")
sql = "INSERT INTO dbxrefs (ontology_uri, xref) VALUES ('{ontology_uri}','{ref}')".format(ontology_uri=ontology_uri, ref=ref)
cursor.execute(sql)
record_count = record_count + 1
#commit every 10,000 records
if record_count % 10000 == 0:
print('.', end='', flush=True)
connection.commit()
print('') # do this to disable the 'end' flag in prior print statements
connection.commit()
print ("Done loading the {table_name} table.".format(table_name="dbxrefs"))
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def load_file(config, file_path, table_name):
'''
Load a CSV or tab-delimited file into a mysql table.
param dict config: the configuration data for this application
param str file_path: the full path to the CSV or tab-delimited file that will be loaded
param str table_name: the name of the table in the database that will contain the data from file_path
'''
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor()
record_count = 0
with open(file_path) as csvfile:
myCSVReader = None
if file_path.endswith('.txt'):
# this code determines whether we are loading a CSV or tab-delimited file
myCSVReader = csv.DictReader(csvfile, delimiter='\t')
else:
myCSVReader = csv.DictReader(csvfile)
field_names = myCSVReader.fieldnames
# the following statements remove some extra columns from the UMLS exported files
if 'name_lc' in field_names:
field_names.remove('name_lc')
if 'REL' in field_names:
field_names.remove('REL')
if 'RELA' in field_names:
field_names.remove('RELA')
if (file_path.endswith('CUI-SUIs.csv') or
file_path.endswith('CUI-TUIs.csv') or
file_path.endswith('DEFrel.csv') or
file_path.endswith('TUIrel.csv')):
# add a field for type if the UMLS file contains relationship data
field_names.append('type')
field_list_str = '%s' % ', '.join(map(str, field_names))
# the next two lines "cleanup" the column names from the file into a SQL compliant column name
field_list_str = field_list_str.replace(':ID', '')
field_list_str = field_list_str.replace(':', '')
value_list_str = ''
for field in field_names:
# Build a list of column names for the insert SQL statement
value_list_str += '%({field})s, '.format(field=field)
value_list_str = value_list_str[:-2]
sql = """INSERT INTO {table_name}({field_list})
VALUE ({value_list})""".format(table_name=table_name, field_list=field_list_str, value_list=value_list_str)
print("Loading data from {file_name} into table {table_name}".format(file_name=file_path, table_name=table_name), end='', flush=True)
for row in myCSVReader:
# for some of the files, specify the 'type' column
if file_path.endswith('CUI-SUIs.csv'):
row['type'] = 'PREF_TERM'
if file_path.endswith('CUI-TUIs.csv'):
row['type'] = 'STY'
if file_path.endswith('DEFrel.csv'):
row['type'] = 'DEF'
if file_path.endswith('TUIrel.csv'):
row['type'] = 'ISA_STY'
# use row directly when csv headers match column names.
# remove data from a row if the column header is None
if table_name == 'suis':
if None in row.keys():
row.pop(None)
if None in row.keys():
row.pop(None)
cursor.execute(sql, row)
record_count = record_count + 1
#commit every 200,000 records
if record_count % 200000 == 0:
print('.', end='', flush=True)
connection.commit()
print('') # do this to disable the 'end' flag in prior print statements
connection.commit()
print ("Done loading the {table_name} table.".format(table_name=table_name))
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def extract_non_umls(config):
load_node_metadata(config)
load_relations(config)
load_dbxref(config)
load_edge_list(config)
load_synonym_list(config)
# This code is temporary. It should be moved to a pre-processing step
create_missing_codeids(config)
# END This code is temporary. It should be moved to a pre-processing step
def extract(config):
'''
The extract method loads the CSV and tab-delimited files into mysql tables mirroring their file structure.
param dict config: The configuration data for this application
'''
create_database(config)
load_node_metadata(config)
load_relations(config)
load_dbxref(config)
load_edge_list(config)
load_synonym_list(config)
load_umls_codes(config)
load_umls_defs(config)
load_umls_suis(config)
load_umls_cuis(config)
load_umls_tuis(config)
load_umls_cui_codes(config)
load_umls_code_suis(config)
load_umls_cui_cuis(config)
load_umls_cui_suis(config)
load_umls_cui_tuis(config)
load_umls_def_rel(config)
load_umls_tui_rel(config)
# This code is temporary. It should be moved to a pre-processing step
create_missing_codeids(config)
# END This code is temporary. It should be moved to a pre-processing step
create_indices(config)
print("Done with extract process")
def build_ambiguous_codes_table(config):
'''
Construct a table called temp_ambiguous_codes (ontology_uri, codeid). This table contains a subset of
the codes that map to more than one CUI. These codes are "ambiguous" because we cannot use them in our automated
processing. Our code cannot decide which of the CUIs should be assigned the preferred term from the data we are loading.
Also, these connections tend to conflate items (ex: left hand, right hand, and hand are all the same).
We will use this table to "filter out" some of the data during the ETL process.
param dict config: The configuration data for this application.
'''
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
drop_table_sql = "DROP TABLE IF EXISTS temp_ambiguous_codes"
cursor.execute(drop_table_sql)
create_table_sql = """CREATE TABLE temp_ambiguous_codes (
id INT NOT NULL AUTO_INCREMENT,
ontology_uri VARCHAR(2048) NOT NULL,
codeid VARCHAR(2048),
PRIMARY KEY(id)
);"""
cursor.execute(create_table_sql)
print("Created table temp_ambiguous_codes")
sql = """INSERT INTO temp_ambiguous_codes (ontology_uri, codeid)
SELECT DISTINCT ontology_uri, xref as codeid
FROM dbxrefs, umls_cui_codes as rel
WHERE xref = rel.end_id
GROUP BY ontology_uri, xref
HAVING COUNT(DISTINCT rel.start_id) > 1"""
"""This query builds the temp_ambiguous_codes table. It inserts codes with
more than 1 CUI into the temp_ambiguous_codes table.
"""
cursor.execute(sql)
connection.commit()
print("Loaded codes into table temp_ambiguous_codes")
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def temp_build_ccf_code_cui_table(config):
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
drop_table_sql = "DROP TABLE IF EXISTS temp_ccf_cui_codes"
cursor.execute(drop_table_sql)
create_table_sql = """CREATE TABLE temp_ccf_cui_codes (
id INT NOT NULL AUTO_INCREMENT,
codeid VARCHAR(2048) NOT NULL,
cui VARCHAR(2048),
PRIMARY KEY(id)
);"""
cursor.execute(create_table_sql)
print("Created table temp_ccf_cui_codes")
sql = "ALTER TABLE temp_ccf_cui_codes ADD INDEX temp_ccf_cui_codes_codeid(codeid(50))"
cursor.execute(sql)
sql = "ALTER TABLE temp_ccf_cui_codes ADD INDEX temp_ccf_cui_codes_cui_idx(cui(50))"
cursor.execute(sql)
cursor = connection.cursor()
record_count = 0
file_path = '/home/chb69/umls_data/ccf/CCF-CUI.csv'
with open(file_path) as csvfile:
myCSVReader = None
if file_path.endswith('.txt'):
# this code determines whether we are loading a CSV or tab-delimited file
myCSVReader = csv.DictReader(csvfile, delimiter='\t')
else:
myCSVReader = csv.DictReader(csvfile)
field_names = myCSVReader.fieldnames
#a.name,b.CodeID,c.CUI,d.name
print("Loading data from {file_name} into table {table_name}".format(file_name=file_path, table_name='temp_ccf_cui_codes'), end='', flush=True)
for row in myCSVReader:
sql = "INSERT INTO temp_ccf_cui_codes (codeid, cui) VALUES ('{codeid}','{cui}')".format(codeid=row['b.CodeID'],cui=row['c.CUI'])
cursor.execute(sql)
connection.commit()
print ("Done loading the {table_name} table.".format(table_name="temp_ccf_cui_codes"))
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def build_ontology_uri_to_umls_map_table(config):
'''
Construct a table called ontology_uri_map (ontology_uri, cui, codeid, type, sab). This table is a mapping
between the dbxref data and the UMLS data. The table is built from dbxrefs and cui_codes (ULMS)
tables. The ontology_uri is the primary key within the dbxref data. The cui and codeid are the main keys
within the UMLS data. Each record in ontology_uri_map allows one to move between both systems.
param dict config: The configuration data for this application.
'''
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
drop_table_sql = "DROP TABLE IF EXISTS ontology_uri_map"
cursor.execute(drop_table_sql)
create_table_sql = """CREATE TABLE ontology_uri_map (
id INT NOT NULL AUTO_INCREMENT,
ontology_uri VARCHAR(2048) NOT NULL,
cui VARCHAR(2048),
codeid VARCHAR(2048),
type VARCHAR(50),
mapping_type VARCHAR(50),
sab VARCHAR(50),
PRIMARY KEY(id)
);"""
cursor.execute(create_table_sql)
print("Created table ontology_uri_map")
sql = """INSERT INTO ontology_uri_map (ontology_uri, cui)
SELECT DISTINCT ontology_uri, substr(xref,6) as CUI FROM dbxrefs
WHERE xref LIKE 'UMLS%'"""
# This query loads all the ontology_uri's that map directly to a UMLS CUI according to the dbxrefs table
# these records will have their codeid column set to NULL
cursor.execute(sql)
connection.commit()
print("Loaded UMLS map into table ontology_uri_map")
sql = """INSERT INTO ontology_uri_map (ontology_uri, codeid, cui, type, sab)
SELECT DISTINCT ontology_uri, xref as codeid, rel.start_id as cui, 'PT' as type, substring_index(xref,' ', 1) as sab
FROM dbxrefs, umls_cui_codes as rel
WHERE xref = rel.end_id
AND (ontology_uri, xref) NOT IN (SELECT ontology_uri,codeid FROM temp_ambiguous_codes)"""
# This query loads all the ontology_uri's that map to a code according to the dbxrefs table
cursor.execute(sql)
connection.commit()
print("Loaded map into table ontology_uri_map")
# add indices after loading to speed up the load
sql = "ALTER TABLE ontology_uri_map ADD INDEX ontology_uri_map_ontology_uri_idx(ontology_uri(50))"
cursor.execute(sql)
sql = "ALTER TABLE ontology_uri_map ADD INDEX ontology_uri_map_cui_idx(cui(50))"
cursor.execute(sql)
sql = "ALTER TABLE ontology_uri_map ADD INDEX ontology_uri_map_codeid_idx(codeid(50))"
cursor.execute(sql)
sql = "ALTER TABLE ontology_uri_map ADD INDEX ontology_uri_map_type_idx(type(50))"
cursor.execute(sql)
sql = "ALTER TABLE ontology_uri_map ADD INDEX ontology_uri_map_mapping_type_idx(mapping_type(50))"
cursor.execute(sql)
sql = "ALTER TABLE ontology_uri_map ADD INDEX ontology_uri_map_sab_idx(sab(50))"
cursor.execute(sql)
print("Built indices for table ontology_uri_map")
sql = """UPDATE ontology_uri_map SET mapping_type = 'PRIMARY' where codeid is null AND ontology_uri IN (
SELECT ontology_uri from (SELECT ontology_uri FROM ontology_uri_map
where codeid is null
group by ontology_uri
having count(distinct cui) = 1) as table_one)"""
# This query sets all the PRIMARY CUIs
cursor.execute(sql)
connection.commit()
print("Loaded PRIMARY CUI map data into table ontology_uri_map")
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def build_relations_table(config):
'''
Create a new table called relations. This table will contains a superset of all relations
loaded so far. After this table is loaded, UPDATE it to add the inverse relations (if necessary).
param dict config: the configuration data for this application
'''
relations_table_info = config['RELATIONS_FILE_TABLE_INFO']
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
drop_table_sql = "DROP TABLE IF EXISTS relations"
cursor.execute(drop_table_sql)
create_table_sql = """CREATE TABLE relations (
id INT NOT NULL AUTO_INCREMENT,
relation_id VARCHAR(2048) NOT NULL,
relation_label VARCHAR(2048) NOT NULL,
inverse_relation_label VARCHAR(2048),
sab VARCHAR(50),
PRIMARY KEY(id));"""
# step 1: create the new relations table
cursor.execute(create_table_sql)
print("Created table relations")
for table_info in relations_table_info:
# step 2: for each entry in the RELATIONS_FILE_TABLE_INFO config entry,
# insert the data from the table referenced by RELATIONS_FILE_TABLE_INFO into the relations table
table_name = table_info['table_name']
sab = table_info['sab']
sql = """INSERT INTO relations (relation_id, relation_label, inverse_relation_label, sab)
SELECT relation_id, relation_label, inverse_relation_label, '{sab}' FROM {table_name}""".format(table_name=table_name, sab=sab)
cursor.execute(sql)
connection.commit()
print("Loaded {sab} relations data into table relations".format(sab=sab))
sql = """UPDATE relations r1
LEFT JOIN relations r2
ON r1.relation_id = r2.relation_id
SET r1.inverse_relation_label = CONCAT('inverse ', r2.relation_label)
WHERE r2.inverse_relation_label IS NULL"""
"""After the 'normal' or 'forward' relations are loaded, find any records in the relations table that
have inverse_relation_label set to NULL. For each record missing an inverse_relation_label, create an
inverse_relation_label equal to 'inverse ' + relation_label
"""
cursor.execute(sql)
connection.commit()
print("Added inverse relations for {sab} data into table relations".format(sab=sab))
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def insert_new_cui_cui_relations(config):
'''
Extract all relationships between two UMLS CUIs found in the PheKnowLator data. This method only
inserts data into the cui_cuis table. It does not create new CUIs. It adds both the "regular" relations
plus their inverse relations.
param dict config: The configuration data for this application.
'''
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
edge_list_file_info = config['EDGE_LIST_FILE_TABLE_INFO']
for edge_list_info in edge_list_file_info:
# walk through all the existing edge_list tables and load the data into the
# umls_cui_cuis table
sab = edge_list_info['sab']
table_name = edge_list_info['table_name']
sql = """DELETE FROM umls_cui_cuis WHERE sab = '{sab}'""".format(sab=sab)
cursor.execute(sql)
connection.commit()
print('')
print("Deleted {sab} map from table umls_cui_cuis".format(sab=sab))
sql = """INSERT INTO umls_cui_cuis (start_id, type, end_id, sab)
SELECT DISTINCT subject_table.cui as start_id, lower(replace(rel.relation_label,' ','_')) as type, object_table.cui as end_id, 'UBERON' as sab
FROM {table_name} el, relations rel, ontology_uri_map subject_table, ontology_uri_map object_table
WHERE rel.relation_id = el.predicate
AND subject_table.ontology_uri = el.subject
AND subject_table.mapping_type = 'PRIMARY'
AND object_table.ontology_uri = el.object
AND object_table.mapping_type = 'PRIMARY'
AND subject_table.cui != object_table.cui
AND el.sab = '{sab}'""".format(table_name=table_name,sab=sab)
"""
This query needs some explanation. Basically, the edge_list table is the central table in the query. We use the edge_list
table structure (subject, predicate, object) to find records where the edge_list contains relationships between
the subject CUI and the object CUI. This record will become a new relationship between 2 CUIs. Lastly, we map from the
edge_list relation_id to the "English" relation_label. We replace the spaces in the relation_label with underscores ('_').
This becomes the label for the relationship in the CUI to CUI relationship.
"""
cursor.execute(sql)
connection.commit()
print("Loaded {sab} map into table umls_cui_cuis".format(sab=sab))
sql = """INSERT INTO umls_cui_cuis (start_id, type, end_id, sab)
SELECT DISTINCT object_table.cui as start_id, lower(replace(rel.inverse_relation_label,' ','_')) as type, subject_table.cui as end_id, 'UBERON' as sab
FROM {table_name} el, relations rel, ontology_uri_map subject_table, ontology_uri_map object_table
WHERE rel.relation_id = el.predicate
AND subject_table.ontology_uri = el.subject
AND subject_table.mapping_type = 'PRIMARY'
AND object_table.ontology_uri = el.object
AND object_table.mapping_type = 'PRIMARY'
AND subject_table.cui != object_table.cui
AND rel.inverse_relation_label IS NOT NULL
AND el.sab = '{sab}'""".format(table_name=table_name,sab=sab)
"""
This query is basically the same as the initial query above, but there are two important differences:
- the relationship used is the inverse_relation_label from the pkl_relations table.
- the subject and object are swapped since we are creating the inverse relationship
"""
cursor.execute(sql)
connection.commit()
print("Loaded {sab} inverse relation map into table umls_cui_cuis".format(sab=sab))
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def insert_new_terms(config):
'''
The method creates new labels (Term nodes) in the graph for each node_metadata table.
Adding a Term node affects several tables: suis, code_suis, cui_suis, and new_sui_map. The new_sui_map
does not represent data in the graph, it merely tracks minted SUIs between application runs to avoid changing the
SUI and losing its connection to the UMLS codes.
param dict config: The configuration data for this application.
'''
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
truncate_table_sql = "TRUNCATE suis_updated"
cursor.execute(truncate_table_sql)
connection.commit()
truncate_table_sql = "TRUNCATE code_suis_updated"
cursor.execute(truncate_table_sql)
connection.commit()
truncate_table_sql = "TRUNCATE new_sui_map"
cursor.execute(truncate_table_sql)
connection.commit()
truncate_table_sql = """TRUNCATE cui_suis_updated"""
cursor.execute(truncate_table_sql)
connection.commit()
print ("Copying cui_suis INTO cui_suis_updated")
sql = """INSERT INTO cui_suis_updated SELECT * FROM umls_cui_suis"""
cursor.execute(sql)
connection.commit()
sql = "DROP INDEX suis_updated_sui_idx ON suis_updated"
cursor.execute(sql)
connection.commit()
sql = "DROP INDEX suis_updated_name_idx ON suis_updated"
cursor.execute(sql)
connection.commit()
print ("Copying suis INTO suis_updated")
sql = """INSERT INTO suis_updated SELECT * FROM umls_suis"""
cursor.execute(sql)
connection.commit()
sql = "ALTER TABLE suis_updated ADD INDEX suis_updated_sui_idx (sui(100))"
cursor.execute(sql)
connection.commit()
sql = "ALTER TABLE suis_updated ADD INDEX suis_updated_name_idx (name(500))"
cursor.execute(sql)
connection.commit()
sql = "DROP INDEX code_suis_updated_start_id_idx ON code_suis_updated"
cursor.execute(sql)
connection.commit()
sql = "DROP INDEX code_suis_updated_end_id_idx ON code_suis_updated"
cursor.execute(sql)
connection.commit()
sql = "DROP INDEX code_suis_updated_type_idx ON code_suis_updated"
cursor.execute(sql)
connection.commit()
sql = "DROP INDEX code_suis_updated_cui_idx ON code_suis_updated"
cursor.execute(sql)
connection.commit()
print ("Copying code_suis INTO code_suis_updated")
sql = """INSERT INTO code_suis_updated SELECT * FROM umls_code_suis"""
cursor.execute(sql)
connection.commit()
sql = "ALTER TABLE code_suis_updated ADD INDEX code_suis_updated_start_id_idx (start_id(100))"
cursor.execute(sql)
connection.commit()
sql = "ALTER TABLE code_suis_updated ADD INDEX code_suis_updated_end_id_idx (end_id(100))"
cursor.execute(sql)
connection.commit()
sql = "ALTER TABLE code_suis_updated ADD INDEX code_suis_updated_type_idx (type(100))"
cursor.execute(sql)
connection.commit()
sql = "ALTER TABLE code_suis_updated ADD INDEX code_suis_updated_cui_idx (cui(100))"
cursor.execute(sql)
connection.commit()
node_metadata_info = config['NODE_METADATA_FILE_TABLE_INFO']
record_count = 1 # start SUI numbering at one
for table_info in node_metadata_info:
# for each entry in the NODE_METADATA_FILE_TABLE_INFO config entry, query the node_metadata
# table and find all missing terms. Then add the missing terms to the appropriate database tables
table_name = table_info['table_name']
sab = table_info['sab']
dict_new_suis = {}
""" keep an in-memory list of the new SUIs generated
The SQL includes a list of existing SUIs when it is initially executed.
During execution, new SUIs are created but they are missing from the ones
retrieved by the SQL (i.e. a "dirty read"). Therefore, the new SUIs are not found and will
create duplicate SUIs with the same labels. This in-memory list provides
lookup services to avoid recreating the labels."""
sql = """SELECT oum.ontology_uri as ontology_uri, oum.cui AS cui, IFNULL(oum.codeid,nm.codeid) AS codeid, nm.node_label AS label, '{sab}' as sab, su.sui AS sui, 'PT' AS term_type
FROM {table_name} nm
INNER JOIN ontology_uri_map oum
ON nm.ontology_uri = oum.ontology_uri
AND oum.mapping_type = 'PRIMARY'
LEFT OUTER JOIN suis_updated su
ON nm.node_label = su.name
WHERE oum.codeid is null OR oum.codeid NOT IN (select start_id FROM code_suis_updated)""".format(table_name=table_name,sab=sab)
"""This query joins the ontology_uri_map data to the label from the node_metadata table. The query only returns
records where the codeid is NULL or the codeid is missing from the code_suis_updated table. These represent
records that need a new SUI minted."""
cursor.execute(sql)
result = cursor.fetchall()
print("")
print ("Loading tables suis_updated, code_suis_updated, and new_sui_map for SAB: {sab}".format(sab=sab), end='', flush=True)
for row in result:
ontology_uri = row['ontology_uri']
cui = row['cui']
codeid = row['codeid']
code_list = str(codeid).split(' ')
code = code_list[1]
label = row['label']
term_type = row['term_type']
sui = row['sui']
if sui == None:
if label in dict_new_suis.keys():
# if the label already exists, then use the existing SUI
sui = dict_new_suis[label]
else:
# if the label does not exist, then mint a new SUI
sui = 'HS' + str(record_count).zfill(6)
# mint a new SUI prefixed with 'HS'
sql = """INSERT INTO suis_updated (sui, name) VALUES ('{sui}',"{name}")""".format(sui=sui,name=label)
cursor.execute(sql)
sql = """INSERT INTO new_sui_map (codeid, sui, name) VALUES ('{codeid}','{sui}',"{name}")""".format(codeid=codeid,sui=sui,name=label)
cursor.execute(sql)
dict_new_suis[label] = sui
# add the new SUI to the in memory list
sql = """INSERT INTO code_suis_updated (start_id, end_id, type, cui) VALUES ('{codeid}','{sui}','{term_type}','{cui}')""".format(codeid=codeid,sui=sui,cui=cui,term_type=term_type)
cursor.execute(sql)
if 'HC' in cui and term_type == 'PT':
#insert a new HCUI into the cui_suis_updated table since it does not exist in the table yet.
sql = """INSERT INTO cui_suis_updated (start_id, end_id, type) VALUES ('{cui}','{sui}','PREF_TERM')""".format(cui=cui,sui=sui)
cursor.execute(sql)
record_count = record_count + 1
#commit every 10,000 records
if record_count % 10000 == 0:
print('.', end='', flush=True)
connection.commit()
connection.commit()
print('')
insert_new_synonyms(config, record_count)
# after the for loop completes, add all the synonymous terms. This is done outside of the for loop
# because there is not necessarily a 1 to 1 relationship between the node_metadata entries and the synoymous files.
# However, there is a dependency because the insert_new_synonym method needs to continue the SUI numbering.
# This method is executed after the commit because then we do not need to worry about a situation where some of the
# terms have not yet been committed to the database.
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def insert_new_synonyms(config, record_count):
'''
The method creates new labels (Term nodes) in the graph for each synonym table.
This method is basically identical to the insert_new_terms method. The only differences are the
a) the config entry used (this uses SYNONYM_LIST_FILE_TABLE_INFO) and b) SQL used to find the synonyms
Adding a Term node affects several tables: suis, code_suis, cui_suis, and new_sui_map. The new_sui_map
does not represent data in the graph, it merely tracks minted SUIs between application runs to avoid changing the
SUI and losing its connection to the UMLS codes.
param dict config: The configuration data for this application.
'''
if 'SYNONYM_LIST_FILE_TABLE_INFO' not in config:
#skip this method if there are no synonym files defined
return
synonym_list = config['SYNONYM_LIST_FILE_TABLE_INFO']
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
for table_info in synonym_list:
sab = table_info['sab']
table_name = table_info['table_name']
dict_new_suis = {}
""" keep an in-memory list of the new SUIs generated
The SQL includes a list of existing SUIs when it is initially executed.
During execution, new SUIs are created but they are missing from the ones
retrieved by the SQL (i.e. a "dirty read"). Therefore, the new SUIs are not found and will
create duplicate SUIs with the same labels. This in-memory list provides
lookup services to avoid recreating the labels."""
sql = """SELECT DISTINCT oum.ontology_uri as ontology_uri, oum.cui AS cui,nm.codeid AS codeid, nm.synonym AS label, '{sab}' as sab, su.sui AS sui, 'SY' AS term_type
FROM {table_name} nm
INNER JOIN ontology_uri_map oum
ON nm.ontology_uri = oum.ontology_uri
LEFT OUTER JOIN suis_updated su
ON nm.synonym = su.name""".format(table_name=table_name,sab=sab)
"""This query joins the ontology_uri_map data to the label from the node_metadata table. The query only returns
records where the codeid is NULL or the codeid is missing from the code_suis_updated table. These represent
records that need a new SUI minted."""
cursor.execute(sql)
result = cursor.fetchall()
print ("Loading tables suis_updated, code_suis_updated, and new_sui_map for SAB: {sab}".format(sab=sab), end='', flush=True)
for row in result:
ontology_uri = row['ontology_uri']
cui = row['cui']
codeid = row['codeid']
code_list = str(codeid).split(' ')
code = code_list[1]
label = row['label']
term_type = row['term_type']
sui = row['sui']
if sui == None:
if label in dict_new_suis.keys():
# if the label already exists, then use the existing SUI
sui = dict_new_suis[label]
else:
# if the label does not exist, then mint a new SUI
sui = 'HS' + str(record_count).zfill(6)
# mint a new SUI prefixed with 'HS'
sql = """INSERT INTO suis_updated (sui, name) VALUES ('{sui}',"{name}")""".format(sui=sui,name=label)
cursor.execute(sql)
sql = """INSERT INTO new_sui_map (codeid, sui, name) VALUES ('{codeid}','{sui}',"{name}")""".format(codeid=codeid,sui=sui,name=label)
cursor.execute(sql)
dict_new_suis[label] = sui
# add the new SUI to the in memory list
sql = """INSERT INTO code_suis_updated (start_id, end_id, type, cui) VALUES ('{codeid}','{sui}','{term_type}','{cui}')""".format(codeid=codeid,sui=sui,cui=cui,term_type=term_type)
cursor.execute(sql)
record_count = record_count + 1
#commit every 10,000 records
if record_count % 10000 == 0:
print('.', end='', flush=True)
connection.commit()
connection.commit()
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def insert_new_cuis(config):
'''
Find every entry in the node_metadata tables that is missing from the ontology_uri_map table. This indicates a
record that was not mapped to any existing UMLS code. This means the record needs a new CUI minted for it.
param dict config: The configuration data for this application.
'''
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
truncate_table_sql = "TRUNCATE cuis_updated"
cursor.execute(truncate_table_sql)
connection.commit()
print ("Truncating cui_codes_updated")
sql = """TRUNCATE cui_codes_updated"""
cursor.execute(sql)
connection.commit()
print ("Copying cuis INTO cuis_updated")
sql = """INSERT INTO cuis_updated SELECT * FROM umls_cuis"""
cursor.execute(sql)
connection.commit()
print ("Deleting HuBMAP CUIs")
sql = """DELETE FROM ontology_uri_map WHERE cui LIKE 'HC%'"""
cursor.execute(sql)
connection.commit()
print ("Copying cuis INTO cui_codes_updated")
sql = """INSERT INTO cui_codes_updated SELECT * FROM umls_cui_codes"""
cursor.execute(sql)
connection.commit()
node_metadata_info = config['NODE_METADATA_FILE_TABLE_INFO']
record_count = 1 # start HCUI numbering at one
print ("Creating new HCUI's and codes")
for table_info in node_metadata_info:
sab = table_info['sab']
table_name = table_info['table_name']
print ("Deleting {sab} codes from umls_codes".format(sab=sab))
sql = """DELETE FROM umls_codes WHERE sab = '{sab}'""".format(sab=sab)
# remove old records for the sab
cursor.execute(sql)
connection.commit()
print("Loading node metadata for {sab}".format(sab=sab))
sql = """SELECT ontology_uri AS ontology_uri, codeid AS codeid, sab AS sab FROM {table_name} nm
WHERE nm.ontology_uri NOT IN (SELECT ontology_uri FROM ontology_uri_map WHERE mapping_type = 'PRIMARY')""".format(table_name=table_name)
"""Find all the records in the current node_metadata table that were not mapped to an UMLS terms."""
cursor.execute(sql)
result = cursor.fetchall()
for row in result:
ontology_uri = row['ontology_uri']
cui = 'HC' + str(record_count).zfill(6)
# mint a new CUI using the HC prefix
record_count = record_count + 1
current_sab = sab
codeid = row['codeid']
code_list = str(codeid).split(' ')
code = code_list[1]
sql = """INSERT INTO ontology_uri_map (ontology_uri,codeid,cui,sab,mapping_type) VALUES ('{ontology_uri}','{codeid}','{cui}','{sab}','PRIMARY')""".format(codeid=codeid,cui=cui,ontology_uri=ontology_uri,sab=current_sab)
# add the new HCUI to the ontology_uri_map
cursor.execute(sql)
sql = """INSERT INTO cuis_updated (cui) VALUES ('{cui}')""".format(cui=cui)
# add the new HCUI to the cuis_updated table
cursor.execute(sql)
connection.commit()
sql = """INSERT INTO umls_codes (codeid, sab,code) VALUES ('{codeid}','{sab}','{code}')""".format(codeid=codeid,sab=current_sab,code=code)
# add the new Code information to umls_codes
cursor.execute(sql)
connection.commit()
sql = """INSERT INTO cui_codes_updated (start_id, end_id) VALUES ('{cui}','{codeid}')""".format(cui=cui,codeid=codeid)
# connect the new HCUI to its new Code
cursor.execute(sql)
connection.commit()
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def insert_new_codes(config):
'''
Create the new codes in the graph. This code creates new codes plus connects them to the appropriate
CUIs.
Note: By the time this code executes, the insert_new_cuis method should have already inserted.
So this code does not need to insert them. This also means this method is dependent upon the insert_new_cuis method.
param dict config: The configuration data for this application.
'''
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
node_metadata_info = config['NODE_METADATA_FILE_TABLE_INFO']
for table_info in node_metadata_info:
table_name = table_info['table_name']
current_sab = table_info['sab']
sql = """SELECT nm.ontology_uri as ontology_uri, nm.codeid as codeid, oum.cui as cui, nm.sab as sab
FROM {table_name} nm, ontology_uri_map oum
WHERE oum.ontology_uri = nm.ontology_uri
and oum.codeid IS NOT NULL
and nm.codeid not in (select codeid from umls_codes)""".format(table_name=table_name)
# this SQL finds all the codes in the current node_metadata missing from the umls_codes table
# these are the codes we need to add
cursor.execute(sql)
result = cursor.fetchall()
print ("Creating new codes for sab: {sab}".format(sab=current_sab))
for row in result:
cui = row['cui']
codeid = row['codeid']
code_list = str(codeid).split(' ')
code = code_list[1]
sql = """INSERT INTO umls_codes (codeid, sab,code) VALUES ('{codeid}','{sab}','{code}')""".format(codeid=codeid,sab=current_sab,code=code)
cursor.execute(sql)
connection.commit()
sql = """INSERT INTO cui_codes_updated (start_id, end_id) VALUES ('{cui}','{codeid}')""".format(cui=cui,codeid=codeid)
cursor.execute(sql)
connection.commit()
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def insert_new_defs(config):
'''
Add the defintions from the PHeKnowLator data for the UBERON and CL nodes.
param dict config: The configuration data for this application.
'''
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
truncate_table_sql = "TRUNCATE defs_updated"
cursor.execute(truncate_table_sql)
connection.commit()
truncate_table_sql = "TRUNCATE def_rel_updated"
cursor.execute(truncate_table_sql)
connection.commit()
truncate_table_sql = "TRUNCATE new_def_map"
cursor.execute(truncate_table_sql)
connection.commit()
print("")
print ("Copying defs INTO defs_updated")
sql = """INSERT INTO defs_updated SELECT * FROM umls_defs"""
cursor.execute(sql)
connection.commit()
print ("Copying def_rel INTO def_rel_updated")
sql = """INSERT INTO def_rel_updated SELECT * FROM umls_def_rel"""
cursor.execute(sql)
connection.commit()
node_metadata_info = config['NODE_METADATA_FILE_TABLE_INFO']
record_count = 1 # start SUI numbering at one
for table_info in node_metadata_info:
table_name = table_info['table_name']
sab = table_info['sab']
sql = """SELECT oum.cui, nm.node_definition, '{sab}' as sab
FROM {table_name} nm, ontology_uri_map oum
WHERE nm.ontology_uri = oum.ontology_uri
AND oum.mapping_type = 'PRIMARY'
AND node_definition <> 'None'
AND node_definition <> '.'""".format(table_name=table_name,sab=sab)
cursor.execute(sql)
result = cursor.fetchall()
print("")
print ("Loading tables defs_updated, def_rels_updated, and new_def_map", end='', flush=True)
for row in result:
cui = row['cui']
node_definition = row['node_definition']
sab = row['sab']
atui = 'HAT' + str(record_count).zfill(6)
record_count = record_count + 1
if '"' in node_definition:
node_definition = node_definition.replace('"','\\"')
sql = """INSERT INTO defs_updated (atui, sab, def) VALUES ('{atui}','{sab}',"{node_definition}")""".format(atui=atui,sab=sab,node_definition=node_definition)
cursor.execute(sql)
sql = """INSERT INTO def_rel_updated (start_id, end_id, type, sab) VALUES ('{cui}','{atui}','DEF','{sab}')""".format(atui=atui,sab=sab,cui=cui)
cursor.execute(sql)
sql = """INSERT INTO new_def_map (cui, atui, node_definition, sab) VALUES ('{cui}','{atui}',"{node_definition}", '{sab}')""".format(atui=atui,sab=sab,cui=cui,node_definition=node_definition)
cursor.execute(sql)
#commit every 10,000 records
if record_count % 10000 == 0:
print('.', end='', flush=True)
connection.commit()
connection.commit()
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def transform(config):
'''
This coordinates the transform methods.
param dict config: The configuration data for this application.
'''
build_xref_table(config)
# This code is temporary. It should be moved to a pre-processing step
fix_dbxrefs(config)
# END This code is temporary. It should be moved to a pre-processing step
build_ambiguous_codes_table(config)
build_ontology_uri_to_umls_map_table(config)
build_relations_table(config)
insert_new_cuis(config)
insert_new_codes(config)
insert_new_terms(config)
insert_new_defs(config)
insert_new_cui_cui_relations(config)
print('') # do this to disable the 'end' flag in prior print statements
print("Done with transform process")
def load(config):
'''
This method initiates the .CSV file export process.
param dict config: The configuration data for this application.
'''
export_files(config)
print('') # do this to disable the 'end' flag in prior print statements
print("Done with load process")
def export_files(config):
'''
This method walks through the subset of mysql tables and generates a sets of .CSV files. These
.CSV files adhere to the Neo4j 'CSV file header format' found here:
https://neo4j.com/docs/operations-manual/current/tools/import/file-header-format/
This method matches the mysql table with a file_name and manages any column header adjustments
that need to be made.
param dict config: The configuration data for this application.
'''
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
export_table_info = [{'table_name': 'umls_codes', 'file_name':'CODEs.csv','sql_columns':['codeid','sab','code'],'file_columns':['CodeID:ID','SAB','CODE']},
{'table_name': 'umls_tui_rel', 'file_name':'TUIrel.csv','sql_columns':['start_id','end_id'],'file_columns':[':START_ID',':END_ID']},
{'table_name': 'umls_cui_tuis', 'file_name':'CUI-TUIs.csv','sql_columns':['start_id','end_id'],'file_columns':[':START_ID',':END_ID']},
{'table_name': 'umls_cui_cuis', 'file_name':'CUI-CUIs.csv','sql_columns':['start_id','end_id','type','sab'],'file_columns':[':START_ID',':END_ID',':TYPE','SAB']},
{'table_name': 'cui_codes_updated', 'file_name':'CUI-CODEs.csv','sql_columns':['start_id','end_id'],'file_columns':[':START_ID',':END_ID']},
{'table_name': 'code_suis_updated', 'file_name':'CODE-SUIs.csv','sql_columns':['start_id','end_id','type','cui'],'file_columns':[':START_ID',':END_ID',':TYPE','CUI']},
{'table_name': 'cui_suis_updated', 'file_name':'CUI-SUIs.csv','sql_columns':['start_id','end_id'],'file_columns':[':START_ID',':END_ID']},
{'table_name': 'cuis_updated', 'file_name':'CUIs.csv','sql_columns':['cui'],'file_columns':['CUI:ID']},
{'table_name': 'suis_updated', 'file_name':'SUIs.csv','sql_columns':['sui','name'],'file_columns':['SUI:ID','name']},
{'table_name': 'umls_tuis', 'file_name':'TUIs.csv','sql_columns':['tui','name','stn','def'],'file_columns':['TUI:ID','name','STN','DEF']},
{'table_name': 'defs_updated', 'file_name':'DEFs.csv','sql_columns':['atui','sab','def'],'file_columns':['ATUI:ID','SAB','DEF']},
{'table_name': 'def_rel_updated', 'file_name':'DEFrel.csv','sql_columns':['start_id','end_id'],'file_columns':[':START_ID',':END_ID']}]
'''
This method walks through the subset of mysql tables found in the export_table_info variable. Each entry
in export_table_info contains:
table_name: the mysql table name to export
file_name: the name of the .CSV file to be generated
sql_columns: a list of the columns to be includes in the SELECT statement
file_columns: a list of the column headers to use when writing the data to the .CSV files
The sql_columns and file_columns should map 1:1. For example in the table_name umls_codes and file_name CODEs.csv entry:
codeid SQL column becomes -> CodeID:ID in the .CSV file
sab SQL column becomes -> SAB in the .CSV file
code SQL column becomes -> CODE in the .CSV file
'''
for export_info in export_table_info:
# walk through all the entries in the export_table_info list
table_name = export_info['table_name']
file_name = export_info['file_name']
sql_columns = export_info['sql_columns']
file_columns = export_info['file_columns']
file_path = os.path.join(config['OUTPUT_DIR'],file_name)
# set the output file path
sql = """SELECT DISTINCT {col_list} FROM {table_name}""".format(table_name=table_name,col_list=",".join(sql_columns))
# build the SELECT statement from the sql_columns variable. Also, apply a SQL 'DISTINCT' keyword to avoid duplicates
cursor.execute(sql)
result = cursor.fetchall()
print("")
print ("Writing data from {table_name} to file {file_path}".format(table_name=table_name,file_path=file_path), end='', flush=True)
f = open(file_path, 'w')
record_count = 0
writer = csv.writer(f,quoting=csv.QUOTE_ALL)
writer.writerow(file_columns)
# write the file_columns as the headers for the .CSV file
data_rows = []
for result_row in result:
data_list = []
for field in sql_columns:
data_list.append(result_row[field])
data_rows.append(data_list)
record_count = record_count + 1
#write every 100,000 records
if record_count % 100000 == 0:
print('.', end='', flush=True)
writer.writerows(data_rows)
# clear data_rows
data_rows = []
writer.writerows(data_rows)
f.close()
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
# utility function
def isascii(s):
"""Check if the characters in string s are in ASCII, U+0-U+7F."""
return len(s) == len(s.encode())
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('commands', type=str, nargs='+',default='extract transform load')
command_list = []
try:
args = parser.parse_args()
command_list = args.commands
except:
command_list = ['extract','extract_non_umls','transform','load']
file_path = os.path.join(os.path.abspath(os.path.dirname(__file__)))
#file_path = '/home/chb69/git/ontology-api/src/neo4j_loader'
file_name = 'app.cfg'
config = load_config(file_path, file_name)
#extract_non_umls(config)
#transform(config)
#load(config)
if 'extract_non_umls' in command_list:
extract_non_umls(config)
if 'extract' in command_list:
extract(config)
if 'transform' in command_list:
transform(config)
if 'load' in command_list:
load(config)
print("Done")
| 44.691919 | 234 | 0.609108 |
import sys
import os
import types
import mysql.connector
from mysql.connector import errorcode
import csv
import argparse
config = {}
def load_config(root_path, filename):
filename = os.path.join(root_path, filename)
d = types.ModuleType("config")
d.__file__ = filename
return_dict = {}
try:
with open(filename, mode="rb") as config_file:
exec(compile(config_file.read(), filename, "exec"), d.__dict__)
for config_key in d.__dict__:
if str(config_key).startswith('__') == False:
return_dict[config_key] = d.__dict__[config_key]
except OSError as e:
e.strerror = f"Unable to load configuration file ({e.strerror})"
raise
return return_dict
def create_database(config):
connection = None
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'])
cursor = connection.cursor()
with open(config['TABLE_CREATE_SQL_FILEPATH'], encoding="utf-8") as f:
commands = f.read().split(';')
for command in commands:
if str(command).strip() != "":
print('Executing: ' + command)
cursor.execute(command)
print ("Done creating database tables.")
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
finally:
if connection != None:
connection.close()
def create_indices(config):
connection = None
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'])
cursor = connection.cursor()
for table_info in config['NODE_METADATA_FILE_TABLE_INFO']:
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_ontology_uri_idx (ontology_uri(500))".format(table_name=table_info['table_name'])
cursor.execute(sql)
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_node_label_idx (node_label(500))".format(table_name=table_info['table_name'])
cursor.execute(sql)
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_codeid_idx (codeid(500))".format(table_name=table_info['table_name'])
cursor.execute(sql)
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_sab_idx (sab(50))".format(table_name=table_info['table_name'])
cursor.execute(sql)
for table_info in config['EDGE_LIST_FILE_TABLE_INFO']:
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_subject_idx (subject(50))".format(table_name=table_info['table_name'])
cursor.execute(sql)
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_predicate_idx (predicate(100))".format(table_name=table_info['table_name'])
cursor.execute(sql)
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_object_idx (object(100))".format(table_name=table_info['table_name'])
cursor.execute(sql)
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_sab_idx (sab(50))".format(table_name=table_info['table_name'])
cursor.execute(sql)
for table_info in config['DBXREF_FILE_TABLE_INFO']:
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_ontology_uri_idx (ontology_uri(50))".format(table_name=table_info['table_name'])
cursor.execute(sql)
sql = "ALTER TABLE {table_name} ADD FULLTEXT INDEX {table_name}_dbxrefs_idx (dbxrefs(700))".format(table_name=table_info['table_name'])
cursor.execute(sql)
for table_info in config['RELATIONS_FILE_TABLE_INFO']:
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_relation_id_idx (relation_id(100))".format(table_name=table_info['table_name'])
cursor.execute(sql)
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_relation_label_idx (relation_label(50))".format(table_name=table_info['table_name'])
cursor.execute(sql)
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_inverse_relation_label_idx (inverse_relation_label(50))".format(table_name=table_info['table_name'])
cursor.execute(sql)
for table_info in config['SYNONYM_LIST_FILE_TABLE_INFO']:
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_ontology_uri_idx (ontology_uri(500))".format(table_name=table_info['table_name'])
cursor.execute(sql)
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_synonym_idx (synonym(500))".format(table_name=table_info['table_name'])
cursor.execute(sql)
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_sab_idx (sab(50))".format(table_name=table_info['table_name'])
cursor.execute(sql)
print ("Done creating database indices.")
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
finally:
if connection != None:
connection.close()
def load_edge_list(config):
edge_list_list = config['EDGE_LIST_FILE_TABLE_INFO']
connection = None
sql = ''
record_count = 0
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
for table_data in edge_list_list:
table_name = table_data['table_name']
file_name = table_data['file_name']
sab = table_data['sab']
drop_table_sql = "DROP TABLE IF EXISTS {table_name}".format(table_name=table_name)
cursor.execute(drop_table_sql)
table_create_sql = """CREATE TABLE {table_name} (
id INT NOT NULL AUTO_INCREMENT,
subject VARCHAR(2048) NOT NULL,
predicate VARCHAR(2048) NOT NULL,
object VARCHAR(2048) NOT NULL,
sab VARCHAR(50),
PRIMARY KEY(id)
)""".format(table_name=table_name)
cursor.execute(table_create_sql)
connection.commit()
print("Created table: " + table_name)
file_path = os.path.join(config['ONTOLOGY_SOURCE_DIR'], file_name)
load_file(config, file_path, table_name)
sql = "UPDATE {table_name} SET sab = '{sab}'".format(table_name=table_name,sab=sab)
cursor.execute(sql)
connection.commit()
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def load_synonym_list(config):
if 'SYNONYM_LIST_FILE_TABLE_INFO' not in config:
return
synonym_list = config['SYNONYM_LIST_FILE_TABLE_INFO']
connection = None
sql = ''
record_count = 0
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
for table_data in synonym_list:
# walk through the list of synonym files found in the config file.
# for each entry, read the corresponding file and load it into the referenced
# mysql table.
table_name = table_data['table_name']
file_name = table_data['file_name']
sab = table_data['sab']
drop_table_sql = "DROP TABLE IF EXISTS {table_name}".format(table_name=table_name)
cursor.execute(drop_table_sql)
table_create_sql = """CREATE TABLE {table_name} (
id INT NOT NULL AUTO_INCREMENT,
ontology_uri VARCHAR(2048) NOT NULL,
codeid VARCHAR(2048) NOT NULL,
synonym VARCHAR(2048) NOT NULL,
sab VARCHAR(50),
PRIMARY KEY(id)
)""".format(table_name=table_name)
# this is the generic SQL to create a synonym table
cursor.execute(table_create_sql)
connection.commit()
print("Created table: " + table_name)
file_path = os.path.join(config['ONTOLOGY_SOURCE_DIR'], file_name)
load_file(config, file_path, table_name)
sql = "UPDATE {table_name} SET sab = '{sab}'".format(table_name=table_name,sab=sab)
# add the SAB for all records in table
cursor.execute(sql)
connection.commit()
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def load_relations(config):
node_metadata_list = config['RELATIONS_FILE_TABLE_INFO']
connection = None
sql = ''
record_count = 0
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
for table_data in node_metadata_list:
# walk through the list of relations files found in the config file.
# for each entry, read the corresponding file and load it into the referenced
# mysql table.
table_name = table_data['table_name']
file_name = table_data['file_name']
sab = table_data['sab']
drop_table_sql = "DROP TABLE IF EXISTS {table_name}".format(table_name=table_name)
cursor.execute(drop_table_sql)
table_create_sql = """CREATE TABLE {table_name} (
id INT NOT NULL AUTO_INCREMENT,
relation_id VARCHAR(2048) NOT NULL,
relation_label VARCHAR(2048) NOT NULL,
inverse_relation_label VARCHAR(2048),
PRIMARY KEY(id)
)""".format(table_name=table_name)
# this is the generic create relations SQL statement
cursor.execute(table_create_sql)
connection.commit()
print("Created table: " + table_name)
file_path = os.path.join(config['ONTOLOGY_SOURCE_DIR'], file_name)
load_file(config, file_path, table_name)
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def create_missing_codeids(config):
node_metadata_list = config['NODE_METADATA_FILE_TABLE_INFO']
connection = None
sql = ''
record_count = 0
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
for table_data in node_metadata_list:
table_name = table_data['table_name']
sql = """UPDATE {table_name}
SET codeid = REPLACE(REPLACE(ontology_uri, 'http://purl.obolibrary.org/obo/',''), '_', ' ')
WHERE codeid IS NULL""".format(table_name=table_name)
# add a codeid for all records in table
cursor.execute(sql)
connection.commit()
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def fix_dbxrefs(config):
connection = None
sql = ''
record_count = 0
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
table_name = 'dbxrefs'
sql = """UPDATE {table_name}
SET xref = UPPER(xref)""".format(table_name=table_name)
# uppercase all dbxrefs data in table
cursor.execute(sql)
connection.commit()
sql = """UPDATE {table_name}
SET xref = REPLACE(xref, 'NCIT:', 'NCI:') WHERE xref LIKE 'NCIT:%'""".format(table_name=table_name)
# convert all the NCI codes
cursor.execute(sql)
connection.commit()
sql = """UPDATE {table_name}
SET xref = REPLACE(xref, 'HTTP://WWW.SNOMEDBROWSER.COM/CODES/DETAILS/', 'SNOMEDCT_US:') WHERE xref LIKE 'HTTP://WWW.SNOMEDBROWSER.COM/CODES/DETAILS/%'""".format(table_name=table_name)
# convert all the SNOMED codes
cursor.execute(sql)
connection.commit()
sql = """UPDATE {table_name}
SET xref = REPLACE(xref, 'MESH:', 'MSH:') WHERE xref LIKE 'MESH:%'
AND instr(xref, 'MESH:D') > 0
AND instr(xref, 'MESH:D24') = 0""".format(table_name=table_name)
# convert all the MeSH codes
cursor.execute(sql)
connection.commit()
sql = """UPDATE {table_name}
SET xref = REPLACE(xref, ':', ' ')""".format(table_name=table_name)
# replace all remaining colons with spaces dbxrefs data in table
cursor.execute(sql)
connection.commit()
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def load_node_metadata(config):
node_metadata_list = config['NODE_METADATA_FILE_TABLE_INFO']
connection = None
sql = ''
record_count = 0
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
for table_data in node_metadata_list:
# walk through the list of node_metadata files found in the config file.
# for each entry, read the corresponding file and load it into the referenced
# mysql table.
table_name = table_data['table_name']
file_name = table_data['file_name']
sab = table_data['sab']
drop_table_sql = "DROP TABLE IF EXISTS {table_name}".format(table_name=table_name)
cursor.execute(drop_table_sql)
table_create_sql = """CREATE TABLE {table_name} (
id INT NOT NULL AUTO_INCREMENT,
ontology_uri VARCHAR(2048) NOT NULL,
codeid VARCHAR(2048),
node_label VARCHAR(2048) NOT NULL,
node_definition VARCHAR(2048) NOT NULL,
sab VARCHAR(50),
PRIMARY KEY(id)
)""".format(table_name=table_name)
# this SQL creates the generic node_metadata table
cursor.execute(table_create_sql)
connection.commit()
print("Created table: " + table_name)
file_path = os.path.join(config['ONTOLOGY_SOURCE_DIR'], file_name)
load_file(config, file_path, table_name)
sql = "UPDATE {table_name} SET sab = '{sab}'".format(table_name=table_name,sab=sab)
# add the SAB for all records in table
cursor.execute(sql)
connection.commit()
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def load_dbxref(config):
dbxref_list = config['DBXREF_FILE_TABLE_INFO']
connection = None
sql = ''
record_count = 0
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
for table_data in dbxref_list:
# walk through the list of dbxref files found in the config file.
# for each entry, read the corresponding file and load it into the referenced
# mysql table.
table_name = table_data['table_name']
file_name = table_data['file_name']
sab = table_data['sab']
drop_table_sql = "DROP TABLE IF EXISTS {table_name}".format(table_name=table_name)
cursor.execute(drop_table_sql)
table_create_sql = """CREATE TABLE {table_name} (
id INT NOT NULL AUTO_INCREMENT,
ontology_uri VARCHAR(2048) NOT NULL,
dbxrefs VARCHAR(5120) NOT NULL,
sab VARCHAR(50),
PRIMARY KEY(id)
)""".format(table_name=table_name)
# this is the SQL to create a generic dbxref table
cursor.execute(table_create_sql)
connection.commit()
print("Created table: " + table_name)
file_path = os.path.join(config['ONTOLOGY_SOURCE_DIR'], file_name)
load_file(config, file_path, table_name)
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def load_umls_codes(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'CODEs.csv')
table_name = 'umls_codes'
load_file(config, file_path, table_name)
def load_umls_defs(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'DEFs.csv')
table_name = 'umls_defs'
load_file(config, file_path, table_name)
def load_umls_suis(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'SUIs.csv')
table_name = 'umls_suis'
load_file(config, file_path, table_name)
def load_umls_cuis(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'CUIs.csv')
table_name = 'umls_cuis'
load_file(config, file_path, table_name)
def load_umls_tuis(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'TUIs.csv')
table_name = 'umls_tuis'
load_file(config, file_path, table_name)
def load_umls_code_suis(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'CODE-SUIs.csv')
table_name = 'umls_code_suis'
load_file(config, file_path, table_name)
def load_umls_cui_codes(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'CUI-CODEs.csv')
table_name = 'umls_cui_codes'
load_file(config, file_path, table_name)
def load_umls_cui_cuis(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'CUI-CUIs.csv')
table_name = 'umls_cui_cuis'
load_file(config, file_path, table_name)
def load_umls_cui_suis(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'CUI-SUIs.csv')
table_name = 'umls_cui_suis'
load_file(config, file_path, table_name)
def load_umls_cui_tuis(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'CUI-TUIs.csv')
table_name = 'umls_cui_tuis'
load_file(config, file_path, table_name)
def load_umls_def_rel(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'DEFrel.csv')
table_name = 'umls_def_rel'
load_file(config, file_path, table_name)
def load_umls_tui_rel(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'TUIrel.csv')
table_name = 'umls_tui_rel'
load_file(config, file_path, table_name)
def build_xref_table(config):
dbxref_list = config['DBXREF_FILE_TABLE_INFO']
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
drop_table_sql = "DROP TABLE IF EXISTS dbxrefs"
cursor.execute(drop_table_sql)
create_table_sql = """CREATE TABLE dbxrefs (
id INT NOT NULL AUTO_INCREMENT,
ontology_uri VARCHAR(2048) NOT NULL,
xref VARCHAR(2048) NOT NULL,
PRIMARY KEY(id)
);"""
cursor.execute(create_table_sql)
for table_data in dbxref_list:
table_name = table_data['table_name']
sab = table_data['sab']
cursor.execute("SELECT ontology_uri, dbxrefs FROM {table_name}".format(table_name=table_name))
print("Loading {sab} data into table {table_name}".format(table_name="dbxrefs", sab=sab), end='', flush=True)
result = cursor.fetchall()
record_count = 0
for row in result:
ontology_uri = row['ontology_uri']
all_xrefs = row['dbxrefs']
xref_list = all_xrefs.split('|')
# For each row in the ontology_dbxref table, split the dbxrefs column into a list
for ref in xref_list:
# for each xref in the list, insert a new row into the dbxrefs table
ref = ref.replace("'","''")
sql = "INSERT INTO dbxrefs (ontology_uri, xref) VALUES ('{ontology_uri}','{ref}')".format(ontology_uri=ontology_uri, ref=ref)
cursor.execute(sql)
record_count = record_count + 1
if record_count % 10000 == 0:
print('.', end='', flush=True)
connection.commit()
print('')
connection.commit()
print ("Done loading the {table_name} table.".format(table_name="dbxrefs"))
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def load_file(config, file_path, table_name):
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor()
record_count = 0
with open(file_path) as csvfile:
myCSVReader = None
if file_path.endswith('.txt'):
myCSVReader = csv.DictReader(csvfile, delimiter='\t')
else:
myCSVReader = csv.DictReader(csvfile)
field_names = myCSVReader.fieldnames
if 'name_lc' in field_names:
field_names.remove('name_lc')
if 'REL' in field_names:
field_names.remove('REL')
if 'RELA' in field_names:
field_names.remove('RELA')
if (file_path.endswith('CUI-SUIs.csv') or
file_path.endswith('CUI-TUIs.csv') or
file_path.endswith('DEFrel.csv') or
file_path.endswith('TUIrel.csv')):
field_names.append('type')
field_list_str = '%s' % ', '.join(map(str, field_names))
field_list_str = field_list_str.replace(':ID', '')
field_list_str = field_list_str.replace(':', '')
value_list_str = ''
for field in field_names:
value_list_str += '%({field})s, '.format(field=field)
value_list_str = value_list_str[:-2]
sql = """INSERT INTO {table_name}({field_list})
VALUE ({value_list})""".format(table_name=table_name, field_list=field_list_str, value_list=value_list_str)
print("Loading data from {file_name} into table {table_name}".format(file_name=file_path, table_name=table_name), end='', flush=True)
for row in myCSVReader:
if file_path.endswith('CUI-SUIs.csv'):
row['type'] = 'PREF_TERM'
if file_path.endswith('CUI-TUIs.csv'):
row['type'] = 'STY'
if file_path.endswith('DEFrel.csv'):
row['type'] = 'DEF'
if file_path.endswith('TUIrel.csv'):
row['type'] = 'ISA_STY'
if table_name == 'suis':
if None in row.keys():
row.pop(None)
if None in row.keys():
row.pop(None)
cursor.execute(sql, row)
record_count = record_count + 1
if record_count % 200000 == 0:
print('.', end='', flush=True)
connection.commit()
print('')
connection.commit()
print ("Done loading the {table_name} table.".format(table_name=table_name))
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def extract_non_umls(config):
load_node_metadata(config)
load_relations(config)
load_dbxref(config)
load_edge_list(config)
load_synonym_list(config)
create_missing_codeids(config)
def extract(config):
create_database(config)
load_node_metadata(config)
load_relations(config)
load_dbxref(config)
load_edge_list(config)
load_synonym_list(config)
load_umls_codes(config)
load_umls_defs(config)
load_umls_suis(config)
load_umls_cuis(config)
load_umls_tuis(config)
load_umls_cui_codes(config)
load_umls_code_suis(config)
load_umls_cui_cuis(config)
load_umls_cui_suis(config)
load_umls_cui_tuis(config)
load_umls_def_rel(config)
load_umls_tui_rel(config)
create_missing_codeids(config)
create_indices(config)
print("Done with extract process")
def build_ambiguous_codes_table(config):
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
drop_table_sql = "DROP TABLE IF EXISTS temp_ambiguous_codes"
cursor.execute(drop_table_sql)
create_table_sql = """CREATE TABLE temp_ambiguous_codes (
id INT NOT NULL AUTO_INCREMENT,
ontology_uri VARCHAR(2048) NOT NULL,
codeid VARCHAR(2048),
PRIMARY KEY(id)
);"""
cursor.execute(create_table_sql)
print("Created table temp_ambiguous_codes")
sql = """INSERT INTO temp_ambiguous_codes (ontology_uri, codeid)
SELECT DISTINCT ontology_uri, xref as codeid
FROM dbxrefs, umls_cui_codes as rel
WHERE xref = rel.end_id
GROUP BY ontology_uri, xref
HAVING COUNT(DISTINCT rel.start_id) > 1"""
cursor.execute(sql)
connection.commit()
print("Loaded codes into table temp_ambiguous_codes")
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def temp_build_ccf_code_cui_table(config):
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
drop_table_sql = "DROP TABLE IF EXISTS temp_ccf_cui_codes"
cursor.execute(drop_table_sql)
create_table_sql = """CREATE TABLE temp_ccf_cui_codes (
id INT NOT NULL AUTO_INCREMENT,
codeid VARCHAR(2048) NOT NULL,
cui VARCHAR(2048),
PRIMARY KEY(id)
);"""
cursor.execute(create_table_sql)
print("Created table temp_ccf_cui_codes")
sql = "ALTER TABLE temp_ccf_cui_codes ADD INDEX temp_ccf_cui_codes_codeid(codeid(50))"
cursor.execute(sql)
sql = "ALTER TABLE temp_ccf_cui_codes ADD INDEX temp_ccf_cui_codes_cui_idx(cui(50))"
cursor.execute(sql)
cursor = connection.cursor()
record_count = 0
file_path = '/home/chb69/umls_data/ccf/CCF-CUI.csv'
with open(file_path) as csvfile:
myCSVReader = None
if file_path.endswith('.txt'):
myCSVReader = csv.DictReader(csvfile, delimiter='\t')
else:
myCSVReader = csv.DictReader(csvfile)
field_names = myCSVReader.fieldnames
print("Loading data from {file_name} into table {table_name}".format(file_name=file_path, table_name='temp_ccf_cui_codes'), end='', flush=True)
for row in myCSVReader:
sql = "INSERT INTO temp_ccf_cui_codes (codeid, cui) VALUES ('{codeid}','{cui}')".format(codeid=row['b.CodeID'],cui=row['c.CUI'])
cursor.execute(sql)
connection.commit()
print ("Done loading the {table_name} table.".format(table_name="temp_ccf_cui_codes"))
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def build_ontology_uri_to_umls_map_table(config):
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
drop_table_sql = "DROP TABLE IF EXISTS ontology_uri_map"
cursor.execute(drop_table_sql)
create_table_sql = """CREATE TABLE ontology_uri_map (
id INT NOT NULL AUTO_INCREMENT,
ontology_uri VARCHAR(2048) NOT NULL,
cui VARCHAR(2048),
codeid VARCHAR(2048),
type VARCHAR(50),
mapping_type VARCHAR(50),
sab VARCHAR(50),
PRIMARY KEY(id)
);"""
cursor.execute(create_table_sql)
print("Created table ontology_uri_map")
sql = """INSERT INTO ontology_uri_map (ontology_uri, cui)
SELECT DISTINCT ontology_uri, substr(xref,6) as CUI FROM dbxrefs
WHERE xref LIKE 'UMLS%'"""
# these records will have their codeid column set to NULL
cursor.execute(sql)
connection.commit()
print("Loaded UMLS map into table ontology_uri_map")
sql = """INSERT INTO ontology_uri_map (ontology_uri, codeid, cui, type, sab)
SELECT DISTINCT ontology_uri, xref as codeid, rel.start_id as cui, 'PT' as type, substring_index(xref,' ', 1) as sab
FROM dbxrefs, umls_cui_codes as rel
WHERE xref = rel.end_id
AND (ontology_uri, xref) NOT IN (SELECT ontology_uri,codeid FROM temp_ambiguous_codes)"""
# This query loads all the ontology_uri's that map to a code according to the dbxrefs table
cursor.execute(sql)
connection.commit()
print("Loaded map into table ontology_uri_map")
sql = "ALTER TABLE ontology_uri_map ADD INDEX ontology_uri_map_ontology_uri_idx(ontology_uri(50))"
cursor.execute(sql)
sql = "ALTER TABLE ontology_uri_map ADD INDEX ontology_uri_map_cui_idx(cui(50))"
cursor.execute(sql)
sql = "ALTER TABLE ontology_uri_map ADD INDEX ontology_uri_map_codeid_idx(codeid(50))"
cursor.execute(sql)
sql = "ALTER TABLE ontology_uri_map ADD INDEX ontology_uri_map_type_idx(type(50))"
cursor.execute(sql)
sql = "ALTER TABLE ontology_uri_map ADD INDEX ontology_uri_map_mapping_type_idx(mapping_type(50))"
cursor.execute(sql)
sql = "ALTER TABLE ontology_uri_map ADD INDEX ontology_uri_map_sab_idx(sab(50))"
cursor.execute(sql)
print("Built indices for table ontology_uri_map")
sql = """UPDATE ontology_uri_map SET mapping_type = 'PRIMARY' where codeid is null AND ontology_uri IN (
SELECT ontology_uri from (SELECT ontology_uri FROM ontology_uri_map
where codeid is null
group by ontology_uri
having count(distinct cui) = 1) as table_one)"""
cursor.execute(sql)
connection.commit()
print("Loaded PRIMARY CUI map data into table ontology_uri_map")
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def build_relations_table(config):
relations_table_info = config['RELATIONS_FILE_TABLE_INFO']
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
drop_table_sql = "DROP TABLE IF EXISTS relations"
cursor.execute(drop_table_sql)
create_table_sql = """CREATE TABLE relations (
id INT NOT NULL AUTO_INCREMENT,
relation_id VARCHAR(2048) NOT NULL,
relation_label VARCHAR(2048) NOT NULL,
inverse_relation_label VARCHAR(2048),
sab VARCHAR(50),
PRIMARY KEY(id));"""
cursor.execute(create_table_sql)
print("Created table relations")
for table_info in relations_table_info:
table_name = table_info['table_name']
sab = table_info['sab']
sql = """INSERT INTO relations (relation_id, relation_label, inverse_relation_label, sab)
SELECT relation_id, relation_label, inverse_relation_label, '{sab}' FROM {table_name}""".format(table_name=table_name, sab=sab)
cursor.execute(sql)
connection.commit()
print("Loaded {sab} relations data into table relations".format(sab=sab))
sql = """UPDATE relations r1
LEFT JOIN relations r2
ON r1.relation_id = r2.relation_id
SET r1.inverse_relation_label = CONCAT('inverse ', r2.relation_label)
WHERE r2.inverse_relation_label IS NULL"""
cursor.execute(sql)
connection.commit()
print("Added inverse relations for {sab} data into table relations".format(sab=sab))
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def insert_new_cui_cui_relations(config):
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
edge_list_file_info = config['EDGE_LIST_FILE_TABLE_INFO']
for edge_list_info in edge_list_file_info:
sab = edge_list_info['sab']
table_name = edge_list_info['table_name']
sql = """DELETE FROM umls_cui_cuis WHERE sab = '{sab}'""".format(sab=sab)
cursor.execute(sql)
connection.commit()
print('')
print("Deleted {sab} map from table umls_cui_cuis".format(sab=sab))
sql = """INSERT INTO umls_cui_cuis (start_id, type, end_id, sab)
SELECT DISTINCT subject_table.cui as start_id, lower(replace(rel.relation_label,' ','_')) as type, object_table.cui as end_id, 'UBERON' as sab
FROM {table_name} el, relations rel, ontology_uri_map subject_table, ontology_uri_map object_table
WHERE rel.relation_id = el.predicate
AND subject_table.ontology_uri = el.subject
AND subject_table.mapping_type = 'PRIMARY'
AND object_table.ontology_uri = el.object
AND object_table.mapping_type = 'PRIMARY'
AND subject_table.cui != object_table.cui
AND el.sab = '{sab}'""".format(table_name=table_name,sab=sab)
cursor.execute(sql)
connection.commit()
print("Loaded {sab} map into table umls_cui_cuis".format(sab=sab))
sql = """INSERT INTO umls_cui_cuis (start_id, type, end_id, sab)
SELECT DISTINCT object_table.cui as start_id, lower(replace(rel.inverse_relation_label,' ','_')) as type, subject_table.cui as end_id, 'UBERON' as sab
FROM {table_name} el, relations rel, ontology_uri_map subject_table, ontology_uri_map object_table
WHERE rel.relation_id = el.predicate
AND subject_table.ontology_uri = el.subject
AND subject_table.mapping_type = 'PRIMARY'
AND object_table.ontology_uri = el.object
AND object_table.mapping_type = 'PRIMARY'
AND subject_table.cui != object_table.cui
AND rel.inverse_relation_label IS NOT NULL
AND el.sab = '{sab}'""".format(table_name=table_name,sab=sab)
cursor.execute(sql)
connection.commit()
print("Loaded {sab} inverse relation map into table umls_cui_cuis".format(sab=sab))
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def insert_new_terms(config):
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
truncate_table_sql = "TRUNCATE suis_updated"
cursor.execute(truncate_table_sql)
connection.commit()
truncate_table_sql = "TRUNCATE code_suis_updated"
cursor.execute(truncate_table_sql)
connection.commit()
truncate_table_sql = "TRUNCATE new_sui_map"
cursor.execute(truncate_table_sql)
connection.commit()
truncate_table_sql = """TRUNCATE cui_suis_updated"""
cursor.execute(truncate_table_sql)
connection.commit()
print ("Copying cui_suis INTO cui_suis_updated")
sql = """INSERT INTO cui_suis_updated SELECT * FROM umls_cui_suis"""
cursor.execute(sql)
connection.commit()
sql = "DROP INDEX suis_updated_sui_idx ON suis_updated"
cursor.execute(sql)
connection.commit()
sql = "DROP INDEX suis_updated_name_idx ON suis_updated"
cursor.execute(sql)
connection.commit()
print ("Copying suis INTO suis_updated")
sql = """INSERT INTO suis_updated SELECT * FROM umls_suis"""
cursor.execute(sql)
connection.commit()
sql = "ALTER TABLE suis_updated ADD INDEX suis_updated_sui_idx (sui(100))"
cursor.execute(sql)
connection.commit()
sql = "ALTER TABLE suis_updated ADD INDEX suis_updated_name_idx (name(500))"
cursor.execute(sql)
connection.commit()
sql = "DROP INDEX code_suis_updated_start_id_idx ON code_suis_updated"
cursor.execute(sql)
connection.commit()
sql = "DROP INDEX code_suis_updated_end_id_idx ON code_suis_updated"
cursor.execute(sql)
connection.commit()
sql = "DROP INDEX code_suis_updated_type_idx ON code_suis_updated"
cursor.execute(sql)
connection.commit()
sql = "DROP INDEX code_suis_updated_cui_idx ON code_suis_updated"
cursor.execute(sql)
connection.commit()
print ("Copying code_suis INTO code_suis_updated")
sql = """INSERT INTO code_suis_updated SELECT * FROM umls_code_suis"""
cursor.execute(sql)
connection.commit()
sql = "ALTER TABLE code_suis_updated ADD INDEX code_suis_updated_start_id_idx (start_id(100))"
cursor.execute(sql)
connection.commit()
sql = "ALTER TABLE code_suis_updated ADD INDEX code_suis_updated_end_id_idx (end_id(100))"
cursor.execute(sql)
connection.commit()
sql = "ALTER TABLE code_suis_updated ADD INDEX code_suis_updated_type_idx (type(100))"
cursor.execute(sql)
connection.commit()
sql = "ALTER TABLE code_suis_updated ADD INDEX code_suis_updated_cui_idx (cui(100))"
cursor.execute(sql)
connection.commit()
node_metadata_info = config['NODE_METADATA_FILE_TABLE_INFO']
record_count = 1
for table_info in node_metadata_info:
table_name = table_info['table_name']
sab = table_info['sab']
dict_new_suis = {}
sql = """SELECT oum.ontology_uri as ontology_uri, oum.cui AS cui, IFNULL(oum.codeid,nm.codeid) AS codeid, nm.node_label AS label, '{sab}' as sab, su.sui AS sui, 'PT' AS term_type
FROM {table_name} nm
INNER JOIN ontology_uri_map oum
ON nm.ontology_uri = oum.ontology_uri
AND oum.mapping_type = 'PRIMARY'
LEFT OUTER JOIN suis_updated su
ON nm.node_label = su.name
WHERE oum.codeid is null OR oum.codeid NOT IN (select start_id FROM code_suis_updated)""".format(table_name=table_name,sab=sab)
cursor.execute(sql)
result = cursor.fetchall()
print("")
print ("Loading tables suis_updated, code_suis_updated, and new_sui_map for SAB: {sab}".format(sab=sab), end='', flush=True)
for row in result:
ontology_uri = row['ontology_uri']
cui = row['cui']
codeid = row['codeid']
code_list = str(codeid).split(' ')
code = code_list[1]
label = row['label']
term_type = row['term_type']
sui = row['sui']
if sui == None:
if label in dict_new_suis.keys():
sui = dict_new_suis[label]
else:
sui = 'HS' + str(record_count).zfill(6)
sql = """INSERT INTO suis_updated (sui, name) VALUES ('{sui}',"{name}")""".format(sui=sui,name=label)
cursor.execute(sql)
sql = """INSERT INTO new_sui_map (codeid, sui, name) VALUES ('{codeid}','{sui}',"{name}")""".format(codeid=codeid,sui=sui,name=label)
cursor.execute(sql)
dict_new_suis[label] = sui
sql = """INSERT INTO code_suis_updated (start_id, end_id, type, cui) VALUES ('{codeid}','{sui}','{term_type}','{cui}')""".format(codeid=codeid,sui=sui,cui=cui,term_type=term_type)
cursor.execute(sql)
if 'HC' in cui and term_type == 'PT':
sql = """INSERT INTO cui_suis_updated (start_id, end_id, type) VALUES ('{cui}','{sui}','PREF_TERM')""".format(cui=cui,sui=sui)
cursor.execute(sql)
record_count = record_count + 1
if record_count % 10000 == 0:
print('.', end='', flush=True)
connection.commit()
connection.commit()
print('')
insert_new_synonyms(config, record_count)
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def insert_new_synonyms(config, record_count):
if 'SYNONYM_LIST_FILE_TABLE_INFO' not in config:
return
synonym_list = config['SYNONYM_LIST_FILE_TABLE_INFO']
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
for table_info in synonym_list:
sab = table_info['sab']
table_name = table_info['table_name']
dict_new_suis = {}
sql = """SELECT DISTINCT oum.ontology_uri as ontology_uri, oum.cui AS cui,nm.codeid AS codeid, nm.synonym AS label, '{sab}' as sab, su.sui AS sui, 'SY' AS term_type
FROM {table_name} nm
INNER JOIN ontology_uri_map oum
ON nm.ontology_uri = oum.ontology_uri
LEFT OUTER JOIN suis_updated su
ON nm.synonym = su.name""".format(table_name=table_name,sab=sab)
cursor.execute(sql)
result = cursor.fetchall()
print ("Loading tables suis_updated, code_suis_updated, and new_sui_map for SAB: {sab}".format(sab=sab), end='', flush=True)
for row in result:
ontology_uri = row['ontology_uri']
cui = row['cui']
codeid = row['codeid']
code_list = str(codeid).split(' ')
code = code_list[1]
label = row['label']
term_type = row['term_type']
sui = row['sui']
if sui == None:
if label in dict_new_suis.keys():
sui = dict_new_suis[label]
else:
sui = 'HS' + str(record_count).zfill(6)
sql = """INSERT INTO suis_updated (sui, name) VALUES ('{sui}',"{name}")""".format(sui=sui,name=label)
cursor.execute(sql)
sql = """INSERT INTO new_sui_map (codeid, sui, name) VALUES ('{codeid}','{sui}',"{name}")""".format(codeid=codeid,sui=sui,name=label)
cursor.execute(sql)
dict_new_suis[label] = sui
sql = """INSERT INTO code_suis_updated (start_id, end_id, type, cui) VALUES ('{codeid}','{sui}','{term_type}','{cui}')""".format(codeid=codeid,sui=sui,cui=cui,term_type=term_type)
cursor.execute(sql)
record_count = record_count + 1
if record_count % 10000 == 0:
print('.', end='', flush=True)
connection.commit()
connection.commit()
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def insert_new_cuis(config):
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
truncate_table_sql = "TRUNCATE cuis_updated"
cursor.execute(truncate_table_sql)
connection.commit()
print ("Truncating cui_codes_updated")
sql = """TRUNCATE cui_codes_updated"""
cursor.execute(sql)
connection.commit()
print ("Copying cuis INTO cuis_updated")
sql = """INSERT INTO cuis_updated SELECT * FROM umls_cuis"""
cursor.execute(sql)
connection.commit()
print ("Deleting HuBMAP CUIs")
sql = """DELETE FROM ontology_uri_map WHERE cui LIKE 'HC%'"""
cursor.execute(sql)
connection.commit()
print ("Copying cuis INTO cui_codes_updated")
sql = """INSERT INTO cui_codes_updated SELECT * FROM umls_cui_codes"""
cursor.execute(sql)
connection.commit()
node_metadata_info = config['NODE_METADATA_FILE_TABLE_INFO']
record_count = 1
print ("Creating new HCUI's and codes")
for table_info in node_metadata_info:
sab = table_info['sab']
table_name = table_info['table_name']
print ("Deleting {sab} codes from umls_codes".format(sab=sab))
sql = """DELETE FROM umls_codes WHERE sab = '{sab}'""".format(sab=sab)
# remove old records for the sab
cursor.execute(sql)
connection.commit()
print("Loading node metadata for {sab}".format(sab=sab))
sql = """SELECT ontology_uri AS ontology_uri, codeid AS codeid, sab AS sab FROM {table_name} nm
WHERE nm.ontology_uri NOT IN (SELECT ontology_uri FROM ontology_uri_map WHERE mapping_type = 'PRIMARY')""".format(table_name=table_name)
cursor.execute(sql)
result = cursor.fetchall()
for row in result:
ontology_uri = row['ontology_uri']
cui = 'HC' + str(record_count).zfill(6)
# mint a new CUI using the HC prefix
record_count = record_count + 1
current_sab = sab
codeid = row['codeid']
code_list = str(codeid).split(' ')
code = code_list[1]
sql = """INSERT INTO ontology_uri_map (ontology_uri,codeid,cui,sab,mapping_type) VALUES ('{ontology_uri}','{codeid}','{cui}','{sab}','PRIMARY')""".format(codeid=codeid,cui=cui,ontology_uri=ontology_uri,sab=current_sab)
# add the new HCUI to the ontology_uri_map
cursor.execute(sql)
sql = """INSERT INTO cuis_updated (cui) VALUES ('{cui}')""".format(cui=cui)
# add the new HCUI to the cuis_updated table
cursor.execute(sql)
connection.commit()
sql = """INSERT INTO umls_codes (codeid, sab,code) VALUES ('{codeid}','{sab}','{code}')""".format(codeid=codeid,sab=current_sab,code=code)
# add the new Code information to umls_codes
cursor.execute(sql)
connection.commit()
sql = """INSERT INTO cui_codes_updated (start_id, end_id) VALUES ('{cui}','{codeid}')""".format(cui=cui,codeid=codeid)
# connect the new HCUI to its new Code
cursor.execute(sql)
connection.commit()
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def insert_new_codes(config):
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
node_metadata_info = config['NODE_METADATA_FILE_TABLE_INFO']
for table_info in node_metadata_info:
table_name = table_info['table_name']
current_sab = table_info['sab']
sql = """SELECT nm.ontology_uri as ontology_uri, nm.codeid as codeid, oum.cui as cui, nm.sab as sab
FROM {table_name} nm, ontology_uri_map oum
WHERE oum.ontology_uri = nm.ontology_uri
and oum.codeid IS NOT NULL
and nm.codeid not in (select codeid from umls_codes)""".format(table_name=table_name)
# this SQL finds all the codes in the current node_metadata missing from the umls_codes table
# these are the codes we need to add
cursor.execute(sql)
result = cursor.fetchall()
print ("Creating new codes for sab: {sab}".format(sab=current_sab))
for row in result:
cui = row['cui']
codeid = row['codeid']
code_list = str(codeid).split(' ')
code = code_list[1]
sql = """INSERT INTO umls_codes (codeid, sab,code) VALUES ('{codeid}','{sab}','{code}')""".format(codeid=codeid,sab=current_sab,code=code)
cursor.execute(sql)
connection.commit()
sql = """INSERT INTO cui_codes_updated (start_id, end_id) VALUES ('{cui}','{codeid}')""".format(cui=cui,codeid=codeid)
cursor.execute(sql)
connection.commit()
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def insert_new_defs(config):
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
truncate_table_sql = "TRUNCATE defs_updated"
cursor.execute(truncate_table_sql)
connection.commit()
truncate_table_sql = "TRUNCATE def_rel_updated"
cursor.execute(truncate_table_sql)
connection.commit()
truncate_table_sql = "TRUNCATE new_def_map"
cursor.execute(truncate_table_sql)
connection.commit()
print("")
print ("Copying defs INTO defs_updated")
sql = """INSERT INTO defs_updated SELECT * FROM umls_defs"""
cursor.execute(sql)
connection.commit()
print ("Copying def_rel INTO def_rel_updated")
sql = """INSERT INTO def_rel_updated SELECT * FROM umls_def_rel"""
cursor.execute(sql)
connection.commit()
node_metadata_info = config['NODE_METADATA_FILE_TABLE_INFO']
record_count = 1 # start SUI numbering at one
for table_info in node_metadata_info:
table_name = table_info['table_name']
sab = table_info['sab']
sql = """SELECT oum.cui, nm.node_definition, '{sab}' as sab
FROM {table_name} nm, ontology_uri_map oum
WHERE nm.ontology_uri = oum.ontology_uri
AND oum.mapping_type = 'PRIMARY'
AND node_definition <> 'None'
AND node_definition <> '.'""".format(table_name=table_name,sab=sab)
cursor.execute(sql)
result = cursor.fetchall()
print("")
print ("Loading tables defs_updated, def_rels_updated, and new_def_map", end='', flush=True)
for row in result:
cui = row['cui']
node_definition = row['node_definition']
sab = row['sab']
atui = 'HAT' + str(record_count).zfill(6)
record_count = record_count + 1
if '"' in node_definition:
node_definition = node_definition.replace('"','\\"')
sql = """INSERT INTO defs_updated (atui, sab, def) VALUES ('{atui}','{sab}',"{node_definition}")""".format(atui=atui,sab=sab,node_definition=node_definition)
cursor.execute(sql)
sql = """INSERT INTO def_rel_updated (start_id, end_id, type, sab) VALUES ('{cui}','{atui}','DEF','{sab}')""".format(atui=atui,sab=sab,cui=cui)
cursor.execute(sql)
sql = """INSERT INTO new_def_map (cui, atui, node_definition, sab) VALUES ('{cui}','{atui}',"{node_definition}", '{sab}')""".format(atui=atui,sab=sab,cui=cui,node_definition=node_definition)
cursor.execute(sql)
#commit every 10,000 records
if record_count % 10000 == 0:
print('.', end='', flush=True)
connection.commit()
connection.commit()
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def transform(config):
build_xref_table(config)
# This code is temporary. It should be moved to a pre-processing step
fix_dbxrefs(config)
# END This code is temporary. It should be moved to a pre-processing step
build_ambiguous_codes_table(config)
build_ontology_uri_to_umls_map_table(config)
build_relations_table(config)
insert_new_cuis(config)
insert_new_codes(config)
insert_new_terms(config)
insert_new_defs(config)
insert_new_cui_cui_relations(config)
print('') # do this to disable the 'end' flag in prior print statements
print("Done with transform process")
def load(config):
export_files(config)
print('') # do this to disable the 'end' flag in prior print statements
print("Done with load process")
def export_files(config):
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
export_table_info = [{'table_name': 'umls_codes', 'file_name':'CODEs.csv','sql_columns':['codeid','sab','code'],'file_columns':['CodeID:ID','SAB','CODE']},
{'table_name': 'umls_tui_rel', 'file_name':'TUIrel.csv','sql_columns':['start_id','end_id'],'file_columns':[':START_ID',':END_ID']},
{'table_name': 'umls_cui_tuis', 'file_name':'CUI-TUIs.csv','sql_columns':['start_id','end_id'],'file_columns':[':START_ID',':END_ID']},
{'table_name': 'umls_cui_cuis', 'file_name':'CUI-CUIs.csv','sql_columns':['start_id','end_id','type','sab'],'file_columns':[':START_ID',':END_ID',':TYPE','SAB']},
{'table_name': 'cui_codes_updated', 'file_name':'CUI-CODEs.csv','sql_columns':['start_id','end_id'],'file_columns':[':START_ID',':END_ID']},
{'table_name': 'code_suis_updated', 'file_name':'CODE-SUIs.csv','sql_columns':['start_id','end_id','type','cui'],'file_columns':[':START_ID',':END_ID',':TYPE','CUI']},
{'table_name': 'cui_suis_updated', 'file_name':'CUI-SUIs.csv','sql_columns':['start_id','end_id'],'file_columns':[':START_ID',':END_ID']},
{'table_name': 'cuis_updated', 'file_name':'CUIs.csv','sql_columns':['cui'],'file_columns':['CUI:ID']},
{'table_name': 'suis_updated', 'file_name':'SUIs.csv','sql_columns':['sui','name'],'file_columns':['SUI:ID','name']},
{'table_name': 'umls_tuis', 'file_name':'TUIs.csv','sql_columns':['tui','name','stn','def'],'file_columns':['TUI:ID','name','STN','DEF']},
{'table_name': 'defs_updated', 'file_name':'DEFs.csv','sql_columns':['atui','sab','def'],'file_columns':['ATUI:ID','SAB','DEF']},
{'table_name': 'def_rel_updated', 'file_name':'DEFrel.csv','sql_columns':['start_id','end_id'],'file_columns':[':START_ID',':END_ID']}]
for export_info in export_table_info:
# walk through all the entries in the export_table_info list
table_name = export_info['table_name']
file_name = export_info['file_name']
sql_columns = export_info['sql_columns']
file_columns = export_info['file_columns']
file_path = os.path.join(config['OUTPUT_DIR'],file_name)
# set the output file path
sql = """SELECT DISTINCT {col_list} FROM {table_name}""".format(table_name=table_name,col_list=",".join(sql_columns))
# build the SELECT statement from the sql_columns variable. Also, apply a SQL 'DISTINCT' keyword to avoid duplicates
cursor.execute(sql)
result = cursor.fetchall()
print("")
print ("Writing data from {table_name} to file {file_path}".format(table_name=table_name,file_path=file_path), end='', flush=True)
f = open(file_path, 'w')
record_count = 0
writer = csv.writer(f,quoting=csv.QUOTE_ALL)
writer.writerow(file_columns)
# write the file_columns as the headers for the .CSV file
data_rows = []
for result_row in result:
data_list = []
for field in sql_columns:
data_list.append(result_row[field])
data_rows.append(data_list)
record_count = record_count + 1
#write every 100,000 records
if record_count % 100000 == 0:
print('.', end='', flush=True)
writer.writerows(data_rows)
# clear data_rows
data_rows = []
writer.writerows(data_rows)
f.close()
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
# utility function
def isascii(s):
return len(s) == len(s.encode())
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('commands', type=str, nargs='+',default='extract transform load')
command_list = []
try:
args = parser.parse_args()
command_list = args.commands
except:
command_list = ['extract','extract_non_umls','transform','load']
file_path = os.path.join(os.path.abspath(os.path.dirname(__file__)))
#file_path = '/home/chb69/git/ontology-api/src/neo4j_loader'
file_name = 'app.cfg'
config = load_config(file_path, file_name)
#extract_non_umls(config)
#transform(config)
#load(config)
if 'extract_non_umls' in command_list:
extract_non_umls(config)
if 'extract' in command_list:
extract(config)
if 'transform' in command_list:
transform(config)
if 'load' in command_list:
load(config)
print("Done")
| true | true |
f7201460d7fc455a3f1d476f67b45706ae5482ed | 4,736 | py | Python | baselines/Termination_DEOC/run_atari_miniworld.py | anandkamat05/TDEOC | 11749457c3a7550e11ba1acc4784e8545f8087aa | [
"MIT"
] | 5 | 2020-11-10T21:38:04.000Z | 2021-08-11T01:34:50.000Z | baselines/Termination_DEOC/run_atari_miniworld.py | LARS12llt/TDEOC | 11749457c3a7550e11ba1acc4784e8545f8087aa | [
"MIT"
] | 8 | 2020-09-26T01:31:02.000Z | 2022-02-10T02:19:53.000Z | baselines/Termination_DEOC/run_atari_miniworld.py | LARS12llt/TDEOC | 11749457c3a7550e11ba1acc4784e8545f8087aa | [
"MIT"
] | 1 | 2020-11-18T03:20:26.000Z | 2020-11-18T03:20:26.000Z | # !/usr/bin/env python
from baselines.common import set_global_seeds, tf_util as U
from baselines import bench
import os.path as osp
import gym, logging
from mpi4py import MPI
import pdb
from gym_extensions.continuous import mujoco
import gym_miniworld
from baselines import logger
import sys
def train(env_id, num_timesteps, seed, num_options,app, saves ,wsaves, epoch,dc, render=False, caption='', deoc=False, tradeoff=0.1, term_mult=1.0, lr_mult=1.0, tdeoc=False):
from baselines.Termination_DEOC import cnn_policy, pposgd_simple
# U.make_session(num_cpu=1).__enter__()
# set_global_seeds(seed)
rank = MPI.COMM_WORLD.Get_rank()
sess = U.single_threaded_session()
sess.__enter__()
if rank == 0:
logger.configure()
else:
logger.configure(format_strs=[])
workerseed = seed + 10000 * MPI.COMM_WORLD.Get_rank() if seed is not None else None
set_global_seeds(workerseed)
env = gym.make(env_id)
env.seed(workerseed)
def policy_fn(name, ob_space, ac_space):
return cnn_policy.CnnPolicy(name=name, ob_space=ob_space, ac_space=ac_space,
hid_size=64, num_hid_layers=2, num_options=num_options, dc=dc)
env = bench.Monitor(env, logger.get_dir() and
osp.join(logger.get_dir(), str(rank)))
if num_options ==1:
optimsize=64
elif num_options >1 and num_options < 5:
optimsize=32
else:
print("Only upto 3 options or primitive actions is currently supported.")
sys.exit()
# ATARI HYPERPARAMETERS
# pposgd_simple.learn(env, policy_fn,
# max_timesteps=num_timesteps*1.1,
# timesteps_per_batch=256,
# clip_param=0.2, entcoeff=0.001,
# optim_epochs=4, optim_stepsize=1e-3, optim_batchsize=optimsize,
# gamma=0.99, lam=0.95, schedule='linear', num_options=num_options,
# app=app, saves=saves, wsaves=wsaves, epoch=epoch, seed=seed,dc=dc, render=render, caption=caption,
# deoc=deoc, tradeoff=tradeoff, term_mult=term_mult, lr_mult=lr_mult, tdeoc=tdeoc
# )
# MINIWORLD HYPERPARAMETERS
pposgd_simple.learn(env, policy_fn,
max_timesteps=num_timesteps,
timesteps_per_batch=2048,
clip_param=0.2, entcoeff=0.01,
optim_epochs=4, optim_stepsize=3e-4, optim_batchsize=optimsize,
gamma=0.99, lam=0.95, schedule='linear', num_options=num_options,
app=app, saves=saves, wsaves=wsaves, epoch=epoch, seed=seed,dc=dc, render=render, caption=caption,
deoc=deoc, tradeoff=tradeoff, term_mult=term_mult, lr_mult=lr_mult, tdeoc=tdeoc
)
env.close()
def main():
import argparse
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--env', help='environment ID', default='MiniWorld-OneRoom-v0')
parser.add_argument('--seed', help='RNG seed', type=int, default=16)
parser.add_argument('--opt', help='number of options', type=int, default=2)
parser.add_argument('--app', help='Append to folder name', type=str, default='')
parser.add_argument('--saves', dest='saves', action='store_true', default=False)
parser.add_argument('--wsaves', dest='wsaves', action='store_true', default=False)
parser.add_argument('--epoch', help='Epoch', type=int, default=-1)
parser.add_argument('--dc', type=float, default=0.)
parser.add_argument('--render', dest='render', action='store_true', default=False)
parser.add_argument('--caption', help='Caption for run', default='')
parser.add_argument('--deoc', help='Augment reward with diversity', action='store_true', default=False)
parser.add_argument('--tradeoff', type=float, default=0.0)
parser.add_argument('--term_mult', type=float, default=1.0)
parser.add_argument('--lr_mult', type=float, default=1.0)
parser.add_argument('--tdeoc', help='Use diversity in termination objective', action='store_true', default=False)
args = parser.parse_args()
if args.tdeoc and not args.deoc:
print("Setting deoc arg to True...")
args.deoc = True
train(args.env, num_timesteps=2e6, seed=args.seed, num_options=args.opt, app=args.app, saves=args.saves,
wsaves=args.wsaves, epoch=args.epoch,dc=args.dc,
render=args.render, caption=args.caption, deoc=args.deoc, tradeoff=args.tradeoff, term_mult=args.term_mult, lr_mult=args.lr_mult, tdeoc=args.tdeoc)
if __name__ == '__main__':
main() | 47.36 | 174 | 0.655617 |
from baselines.common import set_global_seeds, tf_util as U
from baselines import bench
import os.path as osp
import gym, logging
from mpi4py import MPI
import pdb
from gym_extensions.continuous import mujoco
import gym_miniworld
from baselines import logger
import sys
def train(env_id, num_timesteps, seed, num_options,app, saves ,wsaves, epoch,dc, render=False, caption='', deoc=False, tradeoff=0.1, term_mult=1.0, lr_mult=1.0, tdeoc=False):
from baselines.Termination_DEOC import cnn_policy, pposgd_simple
rank = MPI.COMM_WORLD.Get_rank()
sess = U.single_threaded_session()
sess.__enter__()
if rank == 0:
logger.configure()
else:
logger.configure(format_strs=[])
workerseed = seed + 10000 * MPI.COMM_WORLD.Get_rank() if seed is not None else None
set_global_seeds(workerseed)
env = gym.make(env_id)
env.seed(workerseed)
def policy_fn(name, ob_space, ac_space):
return cnn_policy.CnnPolicy(name=name, ob_space=ob_space, ac_space=ac_space,
hid_size=64, num_hid_layers=2, num_options=num_options, dc=dc)
env = bench.Monitor(env, logger.get_dir() and
osp.join(logger.get_dir(), str(rank)))
if num_options ==1:
optimsize=64
elif num_options >1 and num_options < 5:
optimsize=32
else:
print("Only upto 3 options or primitive actions is currently supported.")
sys.exit()
pposgd_simple.learn(env, policy_fn,
max_timesteps=num_timesteps,
timesteps_per_batch=2048,
clip_param=0.2, entcoeff=0.01,
optim_epochs=4, optim_stepsize=3e-4, optim_batchsize=optimsize,
gamma=0.99, lam=0.95, schedule='linear', num_options=num_options,
app=app, saves=saves, wsaves=wsaves, epoch=epoch, seed=seed,dc=dc, render=render, caption=caption,
deoc=deoc, tradeoff=tradeoff, term_mult=term_mult, lr_mult=lr_mult, tdeoc=tdeoc
)
env.close()
def main():
import argparse
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--env', help='environment ID', default='MiniWorld-OneRoom-v0')
parser.add_argument('--seed', help='RNG seed', type=int, default=16)
parser.add_argument('--opt', help='number of options', type=int, default=2)
parser.add_argument('--app', help='Append to folder name', type=str, default='')
parser.add_argument('--saves', dest='saves', action='store_true', default=False)
parser.add_argument('--wsaves', dest='wsaves', action='store_true', default=False)
parser.add_argument('--epoch', help='Epoch', type=int, default=-1)
parser.add_argument('--dc', type=float, default=0.)
parser.add_argument('--render', dest='render', action='store_true', default=False)
parser.add_argument('--caption', help='Caption for run', default='')
parser.add_argument('--deoc', help='Augment reward with diversity', action='store_true', default=False)
parser.add_argument('--tradeoff', type=float, default=0.0)
parser.add_argument('--term_mult', type=float, default=1.0)
parser.add_argument('--lr_mult', type=float, default=1.0)
parser.add_argument('--tdeoc', help='Use diversity in termination objective', action='store_true', default=False)
args = parser.parse_args()
if args.tdeoc and not args.deoc:
print("Setting deoc arg to True...")
args.deoc = True
train(args.env, num_timesteps=2e6, seed=args.seed, num_options=args.opt, app=args.app, saves=args.saves,
wsaves=args.wsaves, epoch=args.epoch,dc=args.dc,
render=args.render, caption=args.caption, deoc=args.deoc, tradeoff=args.tradeoff, term_mult=args.term_mult, lr_mult=args.lr_mult, tdeoc=args.tdeoc)
if __name__ == '__main__':
main() | true | true |
f72014b925bc545ca989597e22d048c3a184e38f | 1,430 | py | Python | Transposition/transpositionFileCipher.py | a1exlism/HackingSecretCiphersWithPy | d7ec59d9eb5c5ae55c68ce911a3973ae0c526698 | [
"MIT"
] | null | null | null | Transposition/transpositionFileCipher.py | a1exlism/HackingSecretCiphersWithPy | d7ec59d9eb5c5ae55c68ce911a3973ae0c526698 | [
"MIT"
] | null | null | null | Transposition/transpositionFileCipher.py | a1exlism/HackingSecretCiphersWithPy | d7ec59d9eb5c5ae55c68ce911a3973ae0c526698 | [
"MIT"
] | null | null | null | import os, sys, time, Transposition.transpositionEncrypt as ENC, \
Transposition.transpositionDecrypt as DEC
def main():
f_key = 10
# f_mode = 'encrypt'
f_mode = 'decrypt'
if f_mode == 'decrypt':
input_filename = 'frankenstein.encrypt.txt'
else:
input_filename = 'frankenstein.txt'
output_filename = f'frankenstein.{f_mode}.txt'
if not os.path.exists(input_filename):
print(f'File {input_filename} not exist, Quitting...')
sys.exit()
if os.path.exists(output_filename):
print(
f'File {output_filename} existed, will be overwrite. (C)ontinue or (Q)uit?')
response = input('> ')
if not response.lower().startswith('c'):
sys.exit()
# read file
file_obj = open(input_filename)
content = file_obj.read()
file_obj.close()
print(f'{f_mode.title()}ing...')
start_time = time.time()
if f_mode == 'encrypt':
transformed = ENC.encrypt_msg(f_key, content)
else:
transformed = DEC.decrypt_msg(f_key, content)
total_time = round(time.time() - start_time, 2)
print(f'{f_mode.title()}sion tookes {total_time} seconds.')
# write to file
output_file_obj = open(output_filename, 'w')
output_file_obj.write(transformed)
output_file_obj.close()
print(f'{output_filename} with {len(content)} {f_mode}ed done.')
if __name__ == '__main__':
main()
| 28.039216 | 88 | 0.636364 | import os, sys, time, Transposition.transpositionEncrypt as ENC, \
Transposition.transpositionDecrypt as DEC
def main():
f_key = 10
f_mode = 'decrypt'
if f_mode == 'decrypt':
input_filename = 'frankenstein.encrypt.txt'
else:
input_filename = 'frankenstein.txt'
output_filename = f'frankenstein.{f_mode}.txt'
if not os.path.exists(input_filename):
print(f'File {input_filename} not exist, Quitting...')
sys.exit()
if os.path.exists(output_filename):
print(
f'File {output_filename} existed, will be overwrite. (C)ontinue or (Q)uit?')
response = input('> ')
if not response.lower().startswith('c'):
sys.exit()
file_obj = open(input_filename)
content = file_obj.read()
file_obj.close()
print(f'{f_mode.title()}ing...')
start_time = time.time()
if f_mode == 'encrypt':
transformed = ENC.encrypt_msg(f_key, content)
else:
transformed = DEC.decrypt_msg(f_key, content)
total_time = round(time.time() - start_time, 2)
print(f'{f_mode.title()}sion tookes {total_time} seconds.')
output_file_obj = open(output_filename, 'w')
output_file_obj.write(transformed)
output_file_obj.close()
print(f'{output_filename} with {len(content)} {f_mode}ed done.')
if __name__ == '__main__':
main()
| true | true |
f720154bd7ce3ed32ed597d87d874fe71e148ab1 | 7,664 | py | Python | test/jit/test_misc.py | metacpp/pytorch | 1e7a4d6bbe1fac4fb94f6b62f24c6e242db1e952 | [
"Intel"
] | 1 | 2022-03-02T00:28:04.000Z | 2022-03-02T00:28:04.000Z | test/jit/test_misc.py | metacpp/pytorch | 1e7a4d6bbe1fac4fb94f6b62f24c6e242db1e952 | [
"Intel"
] | 1 | 2022-03-01T06:10:50.000Z | 2022-03-01T06:10:50.000Z | test/jit/test_misc.py | metacpp/pytorch | 1e7a4d6bbe1fac4fb94f6b62f24c6e242db1e952 | [
"Intel"
] | null | null | null | # Owner(s): ["oncall: jit"]
from typing import Any, Dict, List, Optional, Tuple
from torch.testing._internal.jit_utils import JitTestCase, make_global
from torch.testing import FileCheck
from torch import jit
from jit.test_module_interface import TestModuleInterface # noqa: F401
import os
import sys
import torch
import torch.testing._internal.jit_utils
import torch.nn as nn
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestMisc(JitTestCase):
def test_joined_str(self):
def func(x):
hello, test = "Hello", "test"
print(f"{hello + ' ' + test}, I'm a {test}")
print("format blank")
hi = 'hi'
print(f"stuff before {hi}")
print(f"{hi} stuff after")
return x + 1
x = torch.arange(4., requires_grad=True)
# TODO: Add support for f-strings in string parser frontend
# self.checkScript(func, [x], optimize=True, capture_output=True)
with self.capture_stdout() as captured:
out = func(x)
scripted = torch.jit.script(func)
with self.capture_stdout() as captured_script:
out_script = func(x)
self.assertEqual(out, out_script)
self.assertEqual(captured, captured_script)
def test_kwarg_support(self):
with self.assertRaisesRegex(torch.jit.frontend.NotSupportedError, "variable number of arguments"):
class M(torch.nn.Module):
def forward(self, *, n_tokens: int, device_name: str = 2):
pass
torch.jit.script(M())
class M(torch.nn.Module):
def forward(self, *, n_tokens: int, device_name: str):
return n_tokens, device_name
sm = torch.jit.script(M())
with self.assertRaisesRegex(RuntimeError, "missing value for argument 'n_tokens'"):
sm()
with self.assertRaisesRegex(RuntimeError, "positional arg"):
sm(3, 'hello')
self.assertEqual(sm(n_tokens=3, device_name='hello'), (3, 'hello'))
def test_tuple_subscripted_assign(self):
with self.assertRaisesRegex(RuntimeError, "subscripted assignment"):
@torch.jit.script
def foo(a: Tuple[int, int]) -> None:
a[0] = a[1]
with self.assertRaisesRegex(RuntimeError, "augmented assignment"):
@torch.jit.script
def bar(a: Tuple[int, int]) -> None:
a[0] += a[1]
def test_subexpression_List_Future(self):
@torch.jit.script
def fn(x: List[torch.jit.Future[int]]) -> torch.jit.Future[int]:
return x[0]
FileCheck().check('Future[int]').check('Future[int]').run(fn.graph)
def test_subexpression_Future_annotate(self):
@torch.jit.script
def fn() -> torch.jit.Future[int]:
x: List[torch.jit.Future[int]] = []
return x[0]
FileCheck().check("Future[int][]").run(fn.graph)
def test_future_isinstance(self):
@torch.jit.script
def fn(x: Any) -> torch.jit.Future[int]:
assert isinstance(x, jit.Future[int])
return x
FileCheck().check("Future[int]").run(fn.graph)
def test_str_refine_any(self):
def forward(x: Any) -> str:
if isinstance(x, str):
return x
return "foo"
forward = torch.jit.script(forward)
self.assertEqual(forward(1), "foo")
self.assertEqual(forward("bar"), "bar")
def test_subexpression_Tuple_int_int_Future(self):
@torch.jit.script
def fn(x: Tuple[int, int, torch.jit.Future[int]]) -> Tuple[int, torch.jit.Future[int]]:
return x[0], x[2]
FileCheck().check('(int, int, Future[int])').check('(int, Future[int])').run(fn.graph)
def test_subexpression_Dict_int_Future(self):
@torch.jit.script
def fn(x: Dict[int, torch.jit.Future[int]], y: int) -> torch.jit.Future[int]:
return x[y]
FileCheck().check('Dict(int, Future(int))').check('Future[int]').run(fn.graph)
def test_subexpression_Optional(self):
@torch.jit.script
def fn(x: Optional[Dict[int, torch.jit.Future[int]]]) -> Optional[torch.jit.Future[int]]:
if x is not None:
return x[0]
else:
return None
FileCheck().check('Dict(int, Future(int))?').run(fn.graph)
def test_if_returning_any(self):
"""
Check that an if statement can return different
types early from each branch when the return
type of the function is Any.
"""
def if_function(inp: torch.Tensor) -> Any:
if inp.shape[0] == 1:
return inp * inp
else:
return "str"
self.checkScript(if_function, (torch.randn(5),))
def test_export_opnames_interface(self):
@torch.jit.interface
class OneTwoModule(nn.Module):
def one(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
pass
def two(self, x: torch.Tensor) -> torch.Tensor:
pass
def forward(self, x: torch.Tensor) -> torch.Tensor:
pass
class FooMod(nn.Module):
def one(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return x + y
def two(self, x: torch.Tensor) -> torch.Tensor:
return 2 * x
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.one(self.two(x), x)
class BarMod(nn.Module):
def one(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return x * y
def two(self, x: torch.Tensor) -> torch.Tensor:
return 2 / x
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.two(self.one(x, x))
make_global(OneTwoModule)
class M(nn.Module):
sub : OneTwoModule
def __init__(self):
super(M, self).__init__()
self.sub = BarMod()
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.sub.forward(x)
def use_module_interface(mod_list: List[OneTwoModule], x: torch.Tensor):
return mod_list[0].forward(x) + mod_list[1].forward(x)
torch._C._enable_mobile_interface_call_export()
scripted_M_mod = torch.jit.script(M())
self.assertTrue(set(['aten::mul.Scalar', 'aten::mul.Tensor', 'aten::reciprocal']).issubset(
set(torch.jit.export_opnames(scripted_M_mod))))
scripted_M_mod.sub = torch.jit.script(FooMod())
self.assertTrue(set(['aten::add.Tensor', 'aten::mul.Scalar']).issubset(
set(torch.jit.export_opnames(scripted_M_mod))))
def test_broadcasting_list(self):
"""
Test BroadcastingList and torch.nn._size_N_t alias
"""
from torch._jit_internal import BroadcastingList2
from torch.nn.common_types import _size_2_t
def sum_i(x: _size_2_t) -> int:
return x[0] + x[1]
def sum_f(x: BroadcastingList2[float]) -> float:
return x[0] + x[1]
self.assertTrue(torch.jit.script(sum_i)(4) == 8)
self.assertTrue(torch.jit.script(sum_f)(4.5) == 9.)
| 33.762115 | 106 | 0.584551 |
from typing import Any, Dict, List, Optional, Tuple
from torch.testing._internal.jit_utils import JitTestCase, make_global
from torch.testing import FileCheck
from torch import jit
from jit.test_module_interface import TestModuleInterface
import os
import sys
import torch
import torch.testing._internal.jit_utils
import torch.nn as nn
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestMisc(JitTestCase):
def test_joined_str(self):
def func(x):
hello, test = "Hello", "test"
print(f"{hello + ' ' + test}, I'm a {test}")
print("format blank")
hi = 'hi'
print(f"stuff before {hi}")
print(f"{hi} stuff after")
return x + 1
x = torch.arange(4., requires_grad=True)
# TODO: Add support for f-strings in string parser frontend
# self.checkScript(func, [x], optimize=True, capture_output=True)
with self.capture_stdout() as captured:
out = func(x)
scripted = torch.jit.script(func)
with self.capture_stdout() as captured_script:
out_script = func(x)
self.assertEqual(out, out_script)
self.assertEqual(captured, captured_script)
def test_kwarg_support(self):
with self.assertRaisesRegex(torch.jit.frontend.NotSupportedError, "variable number of arguments"):
class M(torch.nn.Module):
def forward(self, *, n_tokens: int, device_name: str = 2):
pass
torch.jit.script(M())
class M(torch.nn.Module):
def forward(self, *, n_tokens: int, device_name: str):
return n_tokens, device_name
sm = torch.jit.script(M())
with self.assertRaisesRegex(RuntimeError, "missing value for argument 'n_tokens'"):
sm()
with self.assertRaisesRegex(RuntimeError, "positional arg"):
sm(3, 'hello')
self.assertEqual(sm(n_tokens=3, device_name='hello'), (3, 'hello'))
def test_tuple_subscripted_assign(self):
with self.assertRaisesRegex(RuntimeError, "subscripted assignment"):
@torch.jit.script
def foo(a: Tuple[int, int]) -> None:
a[0] = a[1]
with self.assertRaisesRegex(RuntimeError, "augmented assignment"):
@torch.jit.script
def bar(a: Tuple[int, int]) -> None:
a[0] += a[1]
def test_subexpression_List_Future(self):
@torch.jit.script
def fn(x: List[torch.jit.Future[int]]) -> torch.jit.Future[int]:
return x[0]
FileCheck().check('Future[int]').check('Future[int]').run(fn.graph)
def test_subexpression_Future_annotate(self):
@torch.jit.script
def fn() -> torch.jit.Future[int]:
x: List[torch.jit.Future[int]] = []
return x[0]
FileCheck().check("Future[int][]").run(fn.graph)
def test_future_isinstance(self):
@torch.jit.script
def fn(x: Any) -> torch.jit.Future[int]:
assert isinstance(x, jit.Future[int])
return x
FileCheck().check("Future[int]").run(fn.graph)
def test_str_refine_any(self):
def forward(x: Any) -> str:
if isinstance(x, str):
return x
return "foo"
forward = torch.jit.script(forward)
self.assertEqual(forward(1), "foo")
self.assertEqual(forward("bar"), "bar")
def test_subexpression_Tuple_int_int_Future(self):
@torch.jit.script
def fn(x: Tuple[int, int, torch.jit.Future[int]]) -> Tuple[int, torch.jit.Future[int]]:
return x[0], x[2]
FileCheck().check('(int, int, Future[int])').check('(int, Future[int])').run(fn.graph)
def test_subexpression_Dict_int_Future(self):
@torch.jit.script
def fn(x: Dict[int, torch.jit.Future[int]], y: int) -> torch.jit.Future[int]:
return x[y]
FileCheck().check('Dict(int, Future(int))').check('Future[int]').run(fn.graph)
def test_subexpression_Optional(self):
@torch.jit.script
def fn(x: Optional[Dict[int, torch.jit.Future[int]]]) -> Optional[torch.jit.Future[int]]:
if x is not None:
return x[0]
else:
return None
FileCheck().check('Dict(int, Future(int))?').run(fn.graph)
def test_if_returning_any(self):
def if_function(inp: torch.Tensor) -> Any:
if inp.shape[0] == 1:
return inp * inp
else:
return "str"
self.checkScript(if_function, (torch.randn(5),))
def test_export_opnames_interface(self):
@torch.jit.interface
class OneTwoModule(nn.Module):
def one(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
pass
def two(self, x: torch.Tensor) -> torch.Tensor:
pass
def forward(self, x: torch.Tensor) -> torch.Tensor:
pass
class FooMod(nn.Module):
def one(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return x + y
def two(self, x: torch.Tensor) -> torch.Tensor:
return 2 * x
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.one(self.two(x), x)
class BarMod(nn.Module):
def one(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return x * y
def two(self, x: torch.Tensor) -> torch.Tensor:
return 2 / x
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.two(self.one(x, x))
make_global(OneTwoModule)
class M(nn.Module):
sub : OneTwoModule
def __init__(self):
super(M, self).__init__()
self.sub = BarMod()
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.sub.forward(x)
def use_module_interface(mod_list: List[OneTwoModule], x: torch.Tensor):
return mod_list[0].forward(x) + mod_list[1].forward(x)
torch._C._enable_mobile_interface_call_export()
scripted_M_mod = torch.jit.script(M())
self.assertTrue(set(['aten::mul.Scalar', 'aten::mul.Tensor', 'aten::reciprocal']).issubset(
set(torch.jit.export_opnames(scripted_M_mod))))
scripted_M_mod.sub = torch.jit.script(FooMod())
self.assertTrue(set(['aten::add.Tensor', 'aten::mul.Scalar']).issubset(
set(torch.jit.export_opnames(scripted_M_mod))))
def test_broadcasting_list(self):
from torch._jit_internal import BroadcastingList2
from torch.nn.common_types import _size_2_t
def sum_i(x: _size_2_t) -> int:
return x[0] + x[1]
def sum_f(x: BroadcastingList2[float]) -> float:
return x[0] + x[1]
self.assertTrue(torch.jit.script(sum_i)(4) == 8)
self.assertTrue(torch.jit.script(sum_f)(4.5) == 9.)
| true | true |
f72015bb9160c0942bb83fe1f3d4aa6377a9797d | 5,563 | py | Python | natlas-server/app/elastic/client.py | m4rcu5/natlas | d1057c5349a5443cecffb3db9a6428f7271b07ad | [
"Apache-2.0"
] | null | null | null | natlas-server/app/elastic/client.py | m4rcu5/natlas | d1057c5349a5443cecffb3db9a6428f7271b07ad | [
"Apache-2.0"
] | null | null | null | natlas-server/app/elastic/client.py | m4rcu5/natlas | d1057c5349a5443cecffb3db9a6428f7271b07ad | [
"Apache-2.0"
] | null | null | null | import json
from config import Config
import elasticsearch
import time
from datetime import datetime
import logging
from opencensus.trace import execution_context
from opencensus.trace import span as span_module
import semver
class ElasticClient:
es = None
lastReconnectAttempt = None
mapping = {}
natlasIndices = ["nmap", "nmap_history"]
status = False
# Quiets the elasticsearch logger because otherwise connection errors print tracebacks to the WARNING level, even when the exception is handled.
logger = logging.getLogger('elasticsearch')
logger.setLevel('ERROR')
def __init__(self, elasticURL):
# Elastic is initialized outside an application context so we have to instatiate Config ourselves to get BASEDIR
with open(Config().BASEDIR + '/defaults/elastic/mapping.json') as mapfile:
self.mapping = json.loads(mapfile.read())
try:
self.es = elasticsearch.Elasticsearch(elasticURL, timeout=5, max_retries=1)
self.status = self._ping()
if self.status:
self.esversion = semver.VersionInfo.parse(self.es.info()['version']['number'])
self.logger.info("Elastic Version: " + str(self.esversion))
self._initialize_indices()
self.logger.info("Initialized Elasticsearch indices")
except Exception:
self.status = False
raise
finally:
# Set the lastReconnectAttempt to the timestamp after initialization
self.lastReconnectAttempt = datetime.utcnow()
return
def _initialize_indices(self):
''' Check each required index and make sure it exists, if it doesn't then create it '''
for index in self.natlasIndices:
if not self.es.indices.exists(index):
self.es.indices.create(index)
# Avoid a race condition
time.sleep(2)
for index in self.natlasIndices:
if self.esversion.match(">=7.0.0"):
self.es.indices.put_mapping(index=index, doc_type='_doc', body=self.mapping, include_type_name=True)
else:
self.es.indices.put_mapping(index=index, doc_type='_doc', body=self.mapping)
def _ping(self):
''' Returns True if the cluster is up, False otherwise'''
with self._new_trace_span(operation='ping'):
return self.es.ping()
def _attempt_reconnect(self):
''' Attempt to reconnect if we haven't tried to reconnect too recently '''
now = datetime.utcnow()
delta = now - self.lastReconnectAttempt
if delta.seconds < 30:
return self.status
else:
self.status = self._ping()
return self.status
def _check_status(self):
''' If we're in a known bad state, try to reconnect '''
if not self.status and not self._attempt_reconnect():
raise elasticsearch.ConnectionError
return self.status
def get_collection(self, **kwargs):
''' Execute a search and return a collection of results '''
results = self.execute_search(**kwargs)
if not results:
return 0, []
docsources = self.collate_source(results['hits']['hits'])
return results['hits']['total'], docsources
def get_single_host(self, **kwargs):
''' Execute a search and return a single result '''
results = self.execute_search(**kwargs)
if not results or results['hits']['total'] == 0:
return 0, None
return results['hits']['total'], results['hits']['hits'][0]['_source']
def collate_source(self, documents):
return map(lambda doc: doc['_source'], documents)
# Mid-level query executor abstraction.
def execute_search(self, **kwargs):
''' Execute an arbitrary search.'''
with self._new_trace_span(operation='search', **kwargs) as span:
results = self._execute_raw_query(self.es.search, doc_type='_doc', rest_total_hits_as_int=True, **kwargs)
span.add_attribute('es.hits.total', results['hits']['total'])
self._attach_shard_span_attrs(span, results)
return results
def execute_count(self, **kwargs):
''' Executes an arbitrary count.'''
results = None
with self._new_trace_span(operation='count', **kwargs) as span:
results = self._execute_raw_query(self.es.count, doc_type='_doc', **kwargs)
self._attach_shard_span_attrs(span, results)
if not results:
return 0
return results
def execute_delete_by_query(self, **kwargs):
''' Executes an arbitrary delete_by_query.'''
with self._new_trace_span(operation='delete_by', **kwargs) as span:
results = self._execute_raw_query(self.es.delete_by_query, doc_type='_doc', **kwargs)
self._attach_shard_span_attrs(span, results)
return results
def execute_index(self, **kwargs):
''' Executes an arbitrary index. '''
with self._new_trace_span(operation='index', **kwargs):
results = self._execute_raw_query(self.es.index, doc_type='_doc', **kwargs)
return results
# Inner-most query executor. All queries route through here.
def _execute_raw_query(self, func, **kwargs):
''' Wraps the es client to make sure that ConnectionErrors are handled uniformly '''
self._check_status()
try:
return func(**kwargs)
except elasticsearch.ConnectionError:
self.status = False
raise elasticsearch.ConnectionError
# Tracing methods
def _new_trace_span(self, operation, **kwargs):
tracer = execution_context.get_opencensus_tracer()
span_name = "elasticsearch"
if 'index' in kwargs:
span_name += '.' + operation
span = tracer.span(name=span_name)
span.span_kind = span_module.SpanKind.CLIENT
if 'index' in kwargs:
span.add_attribute('es.index', kwargs['index'])
if 'body' in kwargs:
span.add_attribute('es.query', kwargs['body'])
return span
def _attach_shard_span_attrs(self, span, results):
span.add_attribute('es.shards.total', results['_shards']['total'])
span.add_attribute('es.shards.successful', results['_shards']['successful'])
| 35.660256 | 145 | 0.732339 | import json
from config import Config
import elasticsearch
import time
from datetime import datetime
import logging
from opencensus.trace import execution_context
from opencensus.trace import span as span_module
import semver
class ElasticClient:
es = None
lastReconnectAttempt = None
mapping = {}
natlasIndices = ["nmap", "nmap_history"]
status = False
logger = logging.getLogger('elasticsearch')
logger.setLevel('ERROR')
def __init__(self, elasticURL):
with open(Config().BASEDIR + '/defaults/elastic/mapping.json') as mapfile:
self.mapping = json.loads(mapfile.read())
try:
self.es = elasticsearch.Elasticsearch(elasticURL, timeout=5, max_retries=1)
self.status = self._ping()
if self.status:
self.esversion = semver.VersionInfo.parse(self.es.info()['version']['number'])
self.logger.info("Elastic Version: " + str(self.esversion))
self._initialize_indices()
self.logger.info("Initialized Elasticsearch indices")
except Exception:
self.status = False
raise
finally:
self.lastReconnectAttempt = datetime.utcnow()
return
def _initialize_indices(self):
for index in self.natlasIndices:
if not self.es.indices.exists(index):
self.es.indices.create(index)
time.sleep(2)
for index in self.natlasIndices:
if self.esversion.match(">=7.0.0"):
self.es.indices.put_mapping(index=index, doc_type='_doc', body=self.mapping, include_type_name=True)
else:
self.es.indices.put_mapping(index=index, doc_type='_doc', body=self.mapping)
def _ping(self):
with self._new_trace_span(operation='ping'):
return self.es.ping()
def _attempt_reconnect(self):
now = datetime.utcnow()
delta = now - self.lastReconnectAttempt
if delta.seconds < 30:
return self.status
else:
self.status = self._ping()
return self.status
def _check_status(self):
if not self.status and not self._attempt_reconnect():
raise elasticsearch.ConnectionError
return self.status
def get_collection(self, **kwargs):
results = self.execute_search(**kwargs)
if not results:
return 0, []
docsources = self.collate_source(results['hits']['hits'])
return results['hits']['total'], docsources
def get_single_host(self, **kwargs):
results = self.execute_search(**kwargs)
if not results or results['hits']['total'] == 0:
return 0, None
return results['hits']['total'], results['hits']['hits'][0]['_source']
def collate_source(self, documents):
return map(lambda doc: doc['_source'], documents)
def execute_search(self, **kwargs):
with self._new_trace_span(operation='search', **kwargs) as span:
results = self._execute_raw_query(self.es.search, doc_type='_doc', rest_total_hits_as_int=True, **kwargs)
span.add_attribute('es.hits.total', results['hits']['total'])
self._attach_shard_span_attrs(span, results)
return results
def execute_count(self, **kwargs):
results = None
with self._new_trace_span(operation='count', **kwargs) as span:
results = self._execute_raw_query(self.es.count, doc_type='_doc', **kwargs)
self._attach_shard_span_attrs(span, results)
if not results:
return 0
return results
def execute_delete_by_query(self, **kwargs):
with self._new_trace_span(operation='delete_by', **kwargs) as span:
results = self._execute_raw_query(self.es.delete_by_query, doc_type='_doc', **kwargs)
self._attach_shard_span_attrs(span, results)
return results
def execute_index(self, **kwargs):
with self._new_trace_span(operation='index', **kwargs):
results = self._execute_raw_query(self.es.index, doc_type='_doc', **kwargs)
return results
def _execute_raw_query(self, func, **kwargs):
self._check_status()
try:
return func(**kwargs)
except elasticsearch.ConnectionError:
self.status = False
raise elasticsearch.ConnectionError
def _new_trace_span(self, operation, **kwargs):
tracer = execution_context.get_opencensus_tracer()
span_name = "elasticsearch"
if 'index' in kwargs:
span_name += '.' + operation
span = tracer.span(name=span_name)
span.span_kind = span_module.SpanKind.CLIENT
if 'index' in kwargs:
span.add_attribute('es.index', kwargs['index'])
if 'body' in kwargs:
span.add_attribute('es.query', kwargs['body'])
return span
def _attach_shard_span_attrs(self, span, results):
span.add_attribute('es.shards.total', results['_shards']['total'])
span.add_attribute('es.shards.successful', results['_shards']['successful'])
| true | true |
f7201983075ff6e117cd811c5af6d092bb3c77bd | 79,711 | py | Python | src/pretix/control/views/orders.py | joelbcastillo/pretix | 1005437c69d5fed2a0ea2525b41481b0952fe6f1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/pretix/control/views/orders.py | joelbcastillo/pretix | 1005437c69d5fed2a0ea2525b41481b0952fe6f1 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-06-12T00:09:40.000Z | 2020-06-12T00:09:40.000Z | src/pretix/control/views/orders.py | joelbcastillo/pretix | 1005437c69d5fed2a0ea2525b41481b0952fe6f1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | import json
import logging
import mimetypes
import os
import re
from datetime import datetime, time, timedelta
from decimal import Decimal, DecimalException
from urllib.parse import urlencode
import vat_moss.id
from django.conf import settings
from django.contrib import messages
from django.core.files import File
from django.db import transaction
from django.db.models import (
Count, IntegerField, OuterRef, Prefetch, ProtectedError, Q, Subquery, Sum,
)
from django.forms import formset_factory
from django.http import (
FileResponse, Http404, HttpResponseNotAllowed, HttpResponseRedirect,
JsonResponse,
)
from django.shortcuts import get_object_or_404, redirect, render
from django.urls import reverse
from django.utils import formats
from django.utils.functional import cached_property
from django.utils.http import is_safe_url
from django.utils.timezone import make_aware, now
from django.utils.translation import ugettext_lazy as _
from django.views.generic import (
DetailView, FormView, ListView, TemplateView, View,
)
from i18nfield.strings import LazyI18nString
from pretix.base.channels import get_all_sales_channels
from pretix.base.email import get_email_context
from pretix.base.i18n import language
from pretix.base.models import (
CachedCombinedTicket, CachedFile, CachedTicket, Invoice, InvoiceAddress,
Item, ItemVariation, LogEntry, Order, QuestionAnswer, Quota,
generate_position_secret, generate_secret,
)
from pretix.base.models.orders import (
OrderFee, OrderPayment, OrderPosition, OrderRefund,
)
from pretix.base.models.tax import EU_COUNTRIES, cc_to_vat_prefix
from pretix.base.payment import PaymentException
from pretix.base.services import tickets
from pretix.base.services.export import export
from pretix.base.services.invoices import (
generate_cancellation, generate_invoice, invoice_pdf, invoice_pdf_task,
invoice_qualified, regenerate_invoice,
)
from pretix.base.services.locking import LockTimeoutException
from pretix.base.services.mail import SendMailException, render_mail
from pretix.base.services.orders import (
OrderChangeManager, OrderError, approve_order, cancel_order, deny_order,
extend_order, mark_order_expired, mark_order_refunded,
notify_user_changed_order,
)
from pretix.base.services.stats import order_overview
from pretix.base.services.tickets import generate
from pretix.base.signals import (
order_modified, register_data_exporters, register_ticket_outputs,
)
from pretix.base.templatetags.money import money_filter
from pretix.base.templatetags.rich_text import markdown_compile_email
from pretix.base.views.mixins import OrderQuestionsViewMixin
from pretix.base.views.tasks import AsyncAction
from pretix.control.forms.filter import (
EventOrderFilterForm, OverviewFilterForm, RefundFilterForm,
)
from pretix.control.forms.orders import (
CancelForm, CommentForm, ConfirmPaymentForm, ExporterForm, ExtendForm,
MarkPaidForm, OrderContactForm, OrderFeeChangeForm, OrderLocaleForm,
OrderMailForm, OrderPositionAddForm, OrderPositionAddFormset,
OrderPositionChangeForm, OrderRefundForm, OtherOperationsForm,
)
from pretix.control.permissions import EventPermissionRequiredMixin
from pretix.control.views import PaginationMixin
from pretix.helpers.safedownload import check_token
from pretix.presale.signals import question_form_fields
logger = logging.getLogger(__name__)
class OrderList(EventPermissionRequiredMixin, PaginationMixin, ListView):
model = Order
context_object_name = 'orders'
template_name = 'pretixcontrol/orders/index.html'
permission = 'can_view_orders'
def get_queryset(self):
qs = Order.objects.filter(
event=self.request.event
).select_related('invoice_address')
if self.filter_form.is_valid():
qs = self.filter_form.filter_qs(qs)
return qs
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['filter_form'] = self.filter_form
# Only compute this annotations for this page (query optimization)
s = OrderPosition.objects.filter(
order=OuterRef('pk')
).order_by().values('order').annotate(k=Count('id')).values('k')
annotated = {
o['pk']: o
for o in
Order.annotate_overpayments(Order.objects).filter(
pk__in=[o.pk for o in ctx['orders']]
).annotate(
pcnt=Subquery(s, output_field=IntegerField())
).values(
'pk', 'pcnt', 'is_overpaid', 'is_underpaid', 'is_pending_with_full_payment', 'has_external_refund',
'has_pending_refund'
)
}
for o in ctx['orders']:
if o.pk not in annotated:
continue
o.pcnt = annotated.get(o.pk)['pcnt']
o.is_overpaid = annotated.get(o.pk)['is_overpaid']
o.is_underpaid = annotated.get(o.pk)['is_underpaid']
o.is_pending_with_full_payment = annotated.get(o.pk)['is_pending_with_full_payment']
o.has_external_refund = annotated.get(o.pk)['has_external_refund']
o.has_pending_refund = annotated.get(o.pk)['has_pending_refund']
if ctx['page_obj'].paginator.count < 1000:
# Performance safeguard: Only count positions if the data set is small
ctx['sums'] = self.get_queryset().annotate(
pcnt=Subquery(s, output_field=IntegerField())
).aggregate(
s=Sum('total'), pc=Sum('pcnt'), c=Count('id')
)
else:
ctx['sums'] = self.get_queryset().aggregate(s=Sum('total'), c=Count('id'))
return ctx
@cached_property
def filter_form(self):
return EventOrderFilterForm(data=self.request.GET, event=self.request.event)
class OrderView(EventPermissionRequiredMixin, DetailView):
context_object_name = 'order'
model = Order
def get_object(self, queryset=None):
try:
return Order.objects.get(
event=self.request.event,
code=self.kwargs['code'].upper()
)
except Order.DoesNotExist:
raise Http404()
def _redirect_back(self):
return redirect('control:event.order',
event=self.request.event.slug,
organizer=self.request.event.organizer.slug,
code=self.order.code)
@cached_property
def order(self):
return self.get_object()
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['can_generate_invoice'] = invoice_qualified(self.order) and (
self.request.event.settings.invoice_generate in ('admin', 'user', 'paid', 'True')
) and (
not self.order.invoices.exists()
or (
self.order.status in (Order.STATUS_PAID, Order.STATUS_PENDING)
and self.order.invoices.filter(is_cancellation=True).count() >= self.order.invoices.filter(is_cancellation=False).count()
)
)
return ctx
def get_order_url(self):
return reverse('control:event.order', kwargs={
'event': self.request.event.slug,
'organizer': self.request.event.organizer.slug,
'code': self.order.code
})
class OrderDetail(OrderView):
template_name = 'pretixcontrol/order/index.html'
permission = 'can_view_orders'
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['items'] = self.get_items()
ctx['event'] = self.request.event
ctx['payments'] = self.order.payments.order_by('-created')
ctx['refunds'] = self.order.refunds.select_related('payment').order_by('-created')
for p in ctx['payments']:
if p.payment_provider:
p.html_info = (p.payment_provider.payment_control_render(self.request, p) or "").strip()
for r in ctx['refunds']:
if r.payment_provider:
r.html_info = (r.payment_provider.refund_control_render(self.request, r) or "").strip()
ctx['invoices'] = list(self.order.invoices.all().select_related('event'))
ctx['comment_form'] = CommentForm(initial={
'comment': self.order.comment,
'checkin_attention': self.order.checkin_attention
})
ctx['display_locale'] = dict(settings.LANGUAGES)[self.object.locale or self.request.event.settings.locale]
ctx['overpaid'] = self.order.pending_sum * -1
ctx['sales_channel'] = get_all_sales_channels().get(self.order.sales_channel)
ctx['download_buttons'] = self.download_buttons
return ctx
@cached_property
def download_buttons(self):
buttons = []
responses = register_ticket_outputs.send(self.request.event)
for receiver, response in responses:
provider = response(self.request.event)
buttons.append({
'text': provider.download_button_text or 'Ticket',
'icon': provider.download_button_icon or 'fa-download',
'identifier': provider.identifier,
'multi': provider.multi_download_enabled,
'javascript_required': provider.javascript_required
})
return buttons
def get_items(self):
queryset = self.object.all_positions
cartpos = queryset.order_by(
'item', 'variation'
).select_related(
'item', 'variation', 'addon_to', 'tax_rule'
).prefetch_related(
'item__questions', 'issued_gift_cards',
Prefetch('answers', queryset=QuestionAnswer.objects.prefetch_related('options').select_related('question')),
'checkins', 'checkins__list'
).order_by('positionid')
positions = []
for p in cartpos:
responses = question_form_fields.send(sender=self.request.event, position=p)
p.additional_fields = []
data = p.meta_info_data
for r, response in sorted(responses, key=lambda r: str(r[0])):
if response:
for key, value in response.items():
p.additional_fields.append({
'answer': data.get('question_form_data', {}).get(key),
'question': value.label
})
p.has_questions = (
p.additional_fields or
(p.item.admission and self.request.event.settings.attendee_names_asked) or
(p.item.admission and self.request.event.settings.attendee_emails_asked) or
p.item.questions.all()
)
p.cache_answers()
positions.append(p)
positions.sort(key=lambda p: p.sort_key)
return {
'positions': positions,
'raw': cartpos,
'total': self.object.total,
'fees': self.object.all_fees.all(),
'net_total': self.object.net_total,
'tax_total': self.object.tax_total,
}
class OrderDownload(AsyncAction, OrderView):
task = generate
permission = 'can_view_orders'
def get_success_url(self, value):
return self.get_self_url()
def get_error_url(self):
return self.get_order_url()
def get_self_url(self):
return reverse('control:event.order.download.ticket', kwargs=self.kwargs)
@cached_property
def output(self):
responses = register_ticket_outputs.send(self.request.event)
for receiver, response in responses:
provider = response(self.request.event)
if provider.identifier == self.kwargs.get('output'):
return provider
@cached_property
def order_position(self):
try:
return self.order.positions.get(pk=self.kwargs.get('position'))
except OrderPosition.DoesNotExist:
return None
def get(self, request, *args, **kwargs):
if 'async_id' in request.GET and settings.HAS_CELERY:
return self.get_result(request)
ct = self.get_last_ct()
if ct:
return self.success(ct)
return self.http_method_not_allowed(request)
def post(self, request, *args, **kwargs):
if not self.output:
return self.error(_('You requested an invalid ticket output type.'))
if not self.order_position:
raise Http404(_('Unknown order code or not authorized to access this order.'))
if 'position' in kwargs and not self.order_position.generate_ticket:
return self.error(_('Ticket download is not enabled for this product.'))
ct = self.get_last_ct()
if ct:
return self.success(ct)
return self.do('orderposition' if 'position' in kwargs else 'order',
self.order_position.pk if 'position' in kwargs else self.order.pk,
self.output.identifier)
def get_success_message(self, value):
return ""
def success(self, value):
if "ajax" in self.request.POST or "ajax" in self.request.GET:
return JsonResponse({
'ready': True,
'success': True,
'redirect': self.get_success_url(value),
'message': str(self.get_success_message(value))
})
if isinstance(value, CachedTicket):
if value.type == 'text/uri-list':
resp = HttpResponseRedirect(value.file.file.read())
return resp
else:
resp = FileResponse(value.file.file, content_type=value.type)
resp['Content-Disposition'] = 'attachment; filename="{}-{}-{}-{}{}"'.format(
self.request.event.slug.upper(), self.order.code, self.order_position.positionid,
self.output.identifier, value.extension
)
return resp
elif isinstance(value, CachedCombinedTicket):
resp = FileResponse(value.file.file, content_type=value.type)
resp['Content-Disposition'] = 'attachment; filename="{}-{}-{}{}"'.format(
self.request.event.slug.upper(), self.order.code, self.output.identifier, value.extension
)
return resp
else:
return redirect(self.get_self_url())
def get_last_ct(self):
if 'position' in self.kwargs:
ct = CachedTicket.objects.filter(
order_position=self.order_position, provider=self.output.identifier, file__isnull=False
).last()
else:
ct = CachedCombinedTicket.objects.filter(
order=self.order, provider=self.output.identifier, file__isnull=False
).last()
if not ct or not ct.file:
return None
return ct
class OrderComment(OrderView):
permission = 'can_change_orders'
def post(self, *args, **kwargs):
form = CommentForm(self.request.POST)
if form.is_valid():
if form.cleaned_data.get('comment') != self.order.comment:
self.order.comment = form.cleaned_data.get('comment')
self.order.log_action('pretix.event.order.comment', user=self.request.user, data={
'new_comment': form.cleaned_data.get('comment')
})
if form.cleaned_data.get('checkin_attention') != self.order.checkin_attention:
self.order.checkin_attention = form.cleaned_data.get('checkin_attention')
self.order.log_action('pretix.event.order.checkin_attention', user=self.request.user, data={
'new_value': form.cleaned_data.get('checkin_attention')
})
self.order.save(update_fields=['checkin_attention', 'comment'])
messages.success(self.request, _('The comment has been updated.'))
else:
messages.error(self.request, _('Could not update the comment.'))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return HttpResponseNotAllowed(['POST'])
class OrderApprove(OrderView):
permission = 'can_change_orders'
def post(self, *args, **kwargs):
if self.order.require_approval:
try:
approve_order(self.order, user=self.request.user)
except OrderError as e:
messages.error(self.request, str(e))
else:
messages.success(self.request, _('The order has been approved.'))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return render(self.request, 'pretixcontrol/order/approve.html', {
'order': self.order,
})
class OrderDelete(OrderView):
permission = 'can_change_orders'
def post(self, *args, **kwargs):
if self.order.testmode:
try:
with transaction.atomic():
self.order.gracefully_delete(user=self.request.user)
messages.success(self.request, _('The order has been deleted.'))
return redirect(reverse('control:event.orders', kwargs={
'event': self.request.event.slug,
'organizer': self.request.organizer.slug,
}))
except ProtectedError:
messages.error(self.request, _('The order could not be deleted as some constraints (e.g. data created '
'by plug-ins) do not allow it.'))
return self.get(self.request, *self.args, **self.kwargs)
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
if not self.order.testmode:
messages.error(self.request, _('Only orders created in test mode can be deleted.'))
return redirect(self.get_order_url())
return render(self.request, 'pretixcontrol/order/delete.html', {
'order': self.order,
})
class OrderDeny(OrderView):
permission = 'can_change_orders'
def post(self, *args, **kwargs):
if self.order.require_approval:
try:
deny_order(self.order, user=self.request.user,
comment=self.request.POST.get('comment'),
send_mail=self.request.POST.get('send_email') == 'on')
except OrderError as e:
messages.error(self.request, str(e))
else:
messages.success(self.request, _('The order has been denied and is therefore now canceled.'))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return render(self.request, 'pretixcontrol/order/deny.html', {
'order': self.order,
})
class OrderPaymentCancel(OrderView):
permission = 'can_change_orders'
@cached_property
def payment(self):
return get_object_or_404(self.order.payments, pk=self.kwargs['payment'])
def post(self, *args, **kwargs):
if self.payment.state in (OrderPayment.PAYMENT_STATE_CREATED, OrderPayment.PAYMENT_STATE_PENDING):
try:
with transaction.atomic():
self.payment.payment_provider.cancel_payment(self.payment)
self.order.log_action('pretix.event.order.payment.canceled', {
'local_id': self.payment.local_id,
'provider': self.payment.provider,
}, user=self.request.user if self.request.user.is_authenticated else None)
except PaymentException as e:
self.order.log_action(
'pretix.event.order.payment.canceled.failed',
{
'local_id': self.payment.local_id,
'provider': self.payment.provider,
'error': str(e)
},
user=self.request.user if self.request.user.is_authenticated else None,
)
messages.error(self.request, str(e))
else:
messages.success(self.request, _('This payment has been canceled.'))
else:
messages.error(self.request, _('This payment can not be canceled at the moment.'))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return render(self.request, 'pretixcontrol/order/pay_cancel.html', {
'order': self.order,
})
class OrderRefundCancel(OrderView):
permission = 'can_change_orders'
@cached_property
def refund(self):
return get_object_or_404(self.order.refunds, pk=self.kwargs['refund'])
def post(self, *args, **kwargs):
if self.refund.state in (OrderRefund.REFUND_STATE_CREATED, OrderRefund.REFUND_STATE_TRANSIT,
OrderRefund.REFUND_STATE_EXTERNAL):
with transaction.atomic():
self.refund.state = OrderRefund.REFUND_STATE_CANCELED
self.refund.save()
self.order.log_action('pretix.event.order.refund.canceled', {
'local_id': self.refund.local_id,
'provider': self.refund.provider,
}, user=self.request.user)
messages.success(self.request, _('The refund has been canceled.'))
else:
messages.error(self.request, _('This refund can not be canceled at the moment.'))
if "next" in self.request.GET and is_safe_url(self.request.GET.get("next"), allowed_hosts=None):
return redirect(self.request.GET.get("next"))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return render(self.request, 'pretixcontrol/order/refund_cancel.html', {
'order': self.order,
})
class OrderRefundProcess(OrderView):
permission = 'can_change_orders'
@cached_property
def refund(self):
return get_object_or_404(self.order.refunds, pk=self.kwargs['refund'])
def post(self, *args, **kwargs):
if self.refund.state == OrderRefund.REFUND_STATE_EXTERNAL:
self.refund.done(user=self.request.user)
if self.request.POST.get("action") == "r" and self.order.status != Order.STATUS_CANCELED:
mark_order_refunded(self.order, user=self.request.user)
elif not (self.order.status == Order.STATUS_PAID and self.order.pending_sum <= 0):
self.order.status = Order.STATUS_PENDING
self.order.set_expires(
now(),
self.order.event.subevents.filter(
id__in=self.order.positions.values_list('subevent_id', flat=True))
)
self.order.save(update_fields=['status', 'expires'])
messages.success(self.request, _('The refund has been processed.'))
else:
messages.error(self.request, _('This refund can not be processed at the moment.'))
if "next" in self.request.GET and is_safe_url(self.request.GET.get("next"), allowed_hosts=None):
return redirect(self.request.GET.get("next"))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return render(self.request, 'pretixcontrol/order/refund_process.html', {
'order': self.order,
'refund': self.refund,
'pending_sum': self.order.pending_sum + self.refund.amount,
'propose_cancel': self.order.pending_sum + self.refund.amount >= self.order.total
})
class OrderRefundDone(OrderView):
permission = 'can_change_orders'
@cached_property
def refund(self):
return get_object_or_404(self.order.refunds, pk=self.kwargs['refund'])
def post(self, *args, **kwargs):
if self.refund.state in (OrderRefund.REFUND_STATE_CREATED, OrderRefund.REFUND_STATE_TRANSIT):
self.refund.done(user=self.request.user)
messages.success(self.request, _('The refund has been marked as done.'))
else:
messages.error(self.request, _('This refund can not be processed at the moment.'))
if "next" in self.request.GET and is_safe_url(self.request.GET.get("next"), allowed_hosts=None):
return redirect(self.request.GET.get("next"))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return render(self.request, 'pretixcontrol/order/refund_done.html', {
'order': self.order,
})
class OrderPaymentConfirm(OrderView):
permission = 'can_change_orders'
@cached_property
def payment(self):
return get_object_or_404(self.order.payments, pk=self.kwargs['payment'])
@cached_property
def mark_paid_form(self):
return ConfirmPaymentForm(
instance=self.order,
data=self.request.POST if self.request.method == "POST" else None,
)
def post(self, *args, **kwargs):
if self.payment.state in (OrderPayment.PAYMENT_STATE_CREATED, OrderPayment.PAYMENT_STATE_PENDING):
if not self.mark_paid_form.is_valid():
return render(self.request, 'pretixcontrol/order/pay_complete.html', {
'form': self.mark_paid_form,
'order': self.order,
})
try:
self.payment.confirm(user=self.request.user,
count_waitinglist=False,
force=self.mark_paid_form.cleaned_data.get('force', False))
except Quota.QuotaExceededException as e:
messages.error(self.request, str(e))
except PaymentException as e:
messages.error(self.request, str(e))
except SendMailException:
messages.warning(self.request,
_('The payment has been marked as complete, but we were unable to send a '
'confirmation mail.'))
else:
messages.success(self.request, _('The payment has been marked as complete.'))
else:
messages.error(self.request, _('This payment can not be confirmed at the moment.'))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return render(self.request, 'pretixcontrol/order/pay_complete.html', {
'form': self.mark_paid_form,
'order': self.order,
})
class OrderRefundView(OrderView):
permission = 'can_change_orders'
@cached_property
def start_form(self):
return OrderRefundForm(
order=self.order,
data=self.request.POST if self.request.method == "POST" else (
self.request.GET if "start-action" in self.request.GET else None
),
prefix='start',
initial={
'partial_amount': self.order.payment_refund_sum,
'action': (
'mark_pending' if self.order.status == Order.STATUS_PAID
else 'do_nothing'
)
}
)
def choose_form(self):
payments = list(self.order.payments.filter(state=OrderPayment.PAYMENT_STATE_CONFIRMED))
if self.start_form.cleaned_data.get('mode') == 'full':
full_refund = self.order.payment_refund_sum
else:
full_refund = self.start_form.cleaned_data.get('partial_amount')
proposals = self.order.propose_auto_refunds(full_refund, payments=payments)
to_refund = full_refund - sum(proposals.values())
for p in payments:
p.propose_refund = proposals.get(p, 0)
if 'perform' in self.request.POST:
refund_selected = Decimal('0.00')
refunds = []
is_valid = True
manual_value = self.request.POST.get('refund-manual', '0') or '0'
manual_value = formats.sanitize_separators(manual_value)
try:
manual_value = Decimal(manual_value)
except (DecimalException, TypeError):
messages.error(self.request, _('You entered an invalid number.'))
is_valid = False
else:
refund_selected += manual_value
if manual_value:
refunds.append(OrderRefund(
order=self.order,
payment=None,
source=OrderRefund.REFUND_SOURCE_ADMIN,
state=(
OrderRefund.REFUND_STATE_DONE
if self.request.POST.get('manual_state') == 'done'
else OrderRefund.REFUND_STATE_CREATED
),
amount=manual_value,
provider='manual'
))
giftcard_value = self.request.POST.get('refund-new-giftcard', '0') or '0'
giftcard_value = formats.sanitize_separators(giftcard_value)
try:
giftcard_value = Decimal(giftcard_value)
except (DecimalException, TypeError):
messages.error(self.request, _('You entered an invalid number.'))
is_valid = False
else:
if giftcard_value:
refund_selected += giftcard_value
giftcard = self.request.organizer.issued_gift_cards.create(
currency=self.request.event.currency,
testmode=self.order.testmode
)
giftcard.log_action('pretix.giftcards.created', user=self.request.user, data={})
refunds.append(OrderRefund(
order=self.order,
payment=None,
source=OrderRefund.REFUND_SOURCE_ADMIN,
state=OrderRefund.REFUND_STATE_CREATED,
execution_date=now(),
amount=giftcard_value,
provider='giftcard',
info=json.dumps({
'gift_card': giftcard.pk
})
))
offsetting_value = self.request.POST.get('refund-offsetting', '0') or '0'
offsetting_value = formats.sanitize_separators(offsetting_value)
try:
offsetting_value = Decimal(offsetting_value)
except (DecimalException, TypeError):
messages.error(self.request, _('You entered an invalid number.'))
is_valid = False
else:
if offsetting_value:
refund_selected += offsetting_value
try:
order = Order.objects.get(code=self.request.POST.get('order-offsetting'),
event__organizer=self.request.organizer)
except Order.DoesNotExist:
messages.error(self.request, _('You entered an order that could not be found.'))
is_valid = False
else:
refunds.append(OrderRefund(
order=self.order,
payment=None,
source=OrderRefund.REFUND_SOURCE_ADMIN,
state=OrderRefund.REFUND_STATE_DONE,
execution_date=now(),
amount=offsetting_value,
provider='offsetting',
info=json.dumps({
'orders': [order.code]
})
))
for p in payments:
value = self.request.POST.get('refund-{}'.format(p.pk), '0') or '0'
value = formats.sanitize_separators(value)
try:
value = Decimal(value)
except (DecimalException, TypeError):
messages.error(self.request, _('You entered an invalid number.'))
is_valid = False
else:
if value == 0:
continue
elif value > p.available_amount:
messages.error(self.request, _('You can not refund more than the amount of a '
'payment that is not yet refunded.'))
is_valid = False
break
elif value != p.amount and not p.partial_refund_possible:
messages.error(self.request, _('You selected a partial refund for a payment method that '
'only supports full refunds.'))
is_valid = False
break
elif (p.partial_refund_possible or p.full_refund_possible) and value > 0:
refund_selected += value
refunds.append(OrderRefund(
order=self.order,
payment=p,
source=OrderRefund.REFUND_SOURCE_ADMIN,
state=OrderRefund.REFUND_STATE_CREATED,
amount=value,
provider=p.provider
))
any_success = False
if refund_selected == full_refund and is_valid:
for r in refunds:
r.save()
self.order.log_action('pretix.event.order.refund.created', {
'local_id': r.local_id,
'provider': r.provider,
}, user=self.request.user)
if r.payment or r.provider == "offsetting" or r.provider == "giftcard":
try:
r.payment_provider.execute_refund(r)
except PaymentException as e:
r.state = OrderRefund.REFUND_STATE_FAILED
r.save()
messages.error(self.request, _('One of the refunds failed to be processed. You should '
'retry to refund in a different way. The error message '
'was: {}').format(str(e)))
else:
any_success = True
if r.state == OrderRefund.REFUND_STATE_DONE:
messages.success(self.request, _('A refund of {} has been processed.').format(
money_filter(r.amount, self.request.event.currency)
))
elif r.state == OrderRefund.REFUND_STATE_CREATED:
messages.info(self.request, _('A refund of {} has been saved, but not yet '
'fully executed. You can mark it as complete '
'below.').format(
money_filter(r.amount, self.request.event.currency)
))
else:
any_success = True
if any_success:
if self.start_form.cleaned_data.get('action') == 'mark_refunded':
mark_order_refunded(self.order, user=self.request.user)
elif self.start_form.cleaned_data.get('action') == 'mark_pending':
if not (self.order.status == Order.STATUS_PAID and self.order.pending_sum <= 0):
self.order.status = Order.STATUS_PENDING
self.order.set_expires(
now(),
self.order.event.subevents.filter(
id__in=self.order.positions.values_list('subevent_id', flat=True))
)
self.order.save(update_fields=['status', 'expires'])
if giftcard_value and self.order.email:
messages.success(self.request, _('A new gift card was created. You can now send the user their '
'gift card code.'))
return redirect(reverse('control:event.order.sendmail', kwargs={
'event': self.request.event.slug,
'organizer': self.request.event.organizer.slug,
'code': self.order.code
}) + '?' + urlencode({
'subject': _('Your gift card code'),
'message': _('Hello,\n\nwe have refunded you {amount} for your order.\n\nYou can use the gift '
'card code {giftcard} to pay for future ticket purchases in our shop.\n\n'
'Your {event} team').format(
event="{event}",
amount=money_filter(giftcard_value, self.request.event.currency),
giftcard=giftcard.secret,
)
}))
return redirect(self.get_order_url())
else:
messages.error(self.request, _('The refunds you selected do not match the selected total refund '
'amount.'))
return render(self.request, 'pretixcontrol/order/refund_choose.html', {
'payments': payments,
'remainder': to_refund,
'order': self.order,
'partial_amount': (
self.request.POST.get('start-partial_amount') if self.request.method == 'POST'
else self.request.GET.get('start-partial_amount')
),
'start_form': self.start_form
})
def post(self, *args, **kwargs):
if self.start_form.is_valid():
return self.choose_form()
return self.get(*args, **kwargs)
def get(self, *args, **kwargs):
if self.start_form.is_valid():
return self.choose_form()
return render(self.request, 'pretixcontrol/order/refund_start.html', {
'form': self.start_form,
'order': self.order,
})
class OrderTransition(OrderView):
permission = 'can_change_orders'
@cached_property
def mark_paid_form(self):
return MarkPaidForm(
instance=self.order,
data=self.request.POST if self.request.method == "POST" else None,
)
@cached_property
def mark_canceled_form(self):
return CancelForm(
instance=self.order,
data=self.request.POST if self.request.method == "POST" else None,
)
def post(self, *args, **kwargs):
to = self.request.POST.get('status', '')
if self.order.status in (Order.STATUS_PENDING, Order.STATUS_EXPIRED) and to == 'p' and self.mark_paid_form.is_valid():
ps = self.mark_paid_form.cleaned_data['amount']
try:
p = self.order.payments.get(
state__in=(OrderPayment.PAYMENT_STATE_PENDING, OrderPayment.PAYMENT_STATE_CREATED),
provider='manual',
amount=ps
)
except OrderPayment.DoesNotExist:
for p in self.order.payments.filter(state__in=(OrderPayment.PAYMENT_STATE_PENDING,
OrderPayment.PAYMENT_STATE_CREATED)):
try:
with transaction.atomic():
p.payment_provider.cancel_payment(p)
self.order.log_action('pretix.event.order.payment.canceled', {
'local_id': p.local_id,
'provider': p.provider,
}, user=self.request.user if self.request.user.is_authenticated else None)
except PaymentException as e:
self.order.log_action(
'pretix.event.order.payment.canceled.failed',
{
'local_id': p.local_id,
'provider': p.provider,
'error': str(e)
},
user=self.request.user if self.request.user.is_authenticated else None,
)
p = self.order.payments.create(
state=OrderPayment.PAYMENT_STATE_CREATED,
provider='manual',
amount=ps,
fee=None
)
payment_date = None
if self.mark_paid_form.cleaned_data['payment_date'] != now().date():
payment_date = make_aware(datetime.combine(
self.mark_paid_form.cleaned_data['payment_date'],
time(hour=0, minute=0, second=0)
), self.order.event.timezone)
try:
p.confirm(user=self.request.user, count_waitinglist=False, payment_date=payment_date,
force=self.mark_paid_form.cleaned_data.get('force', False))
except Quota.QuotaExceededException as e:
p.state = OrderPayment.PAYMENT_STATE_FAILED
p.save()
self.order.log_action('pretix.event.order.payment.failed', {
'local_id': p.local_id,
'provider': p.provider,
'message': str(e)
})
messages.error(self.request, str(e))
except PaymentException as e:
p.state = OrderPayment.PAYMENT_STATE_FAILED
p.save()
self.order.log_action('pretix.event.order.payment.failed', {
'local_id': p.local_id,
'provider': p.provider,
'message': str(e)
})
messages.error(self.request, str(e))
except SendMailException:
messages.warning(self.request, _('The order has been marked as paid, but we were unable to send a '
'confirmation mail.'))
else:
messages.success(self.request, _('The payment has been created successfully.'))
elif self.order.cancel_allowed() and to == 'c' and self.mark_canceled_form.is_valid():
try:
cancel_order(self.order, user=self.request.user,
send_mail=self.mark_canceled_form.cleaned_data['send_email'],
cancellation_fee=self.mark_canceled_form.cleaned_data.get('cancellation_fee'))
except OrderError as e:
messages.error(self.request, str(e))
else:
self.order.refresh_from_db()
if self.order.pending_sum < 0:
messages.success(self.request, _('The order has been canceled. You can now select how you want to '
'transfer the money back to the user.'))
return redirect(reverse('control:event.order.refunds.start', kwargs={
'event': self.request.event.slug,
'organizer': self.request.event.organizer.slug,
'code': self.order.code
}) + '?start-action=do_nothing&start-mode=partial&start-partial_amount={}'.format(
self.order.pending_sum * -1
))
messages.success(self.request, _('The order has been canceled.'))
elif self.order.status == Order.STATUS_PENDING and to == 'e':
mark_order_expired(self.order, user=self.request.user)
messages.success(self.request, _('The order has been marked as expired.'))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
to = self.request.GET.get('status', '')
if self.order.status in (Order.STATUS_PENDING, Order.STATUS_EXPIRED) and to == 'p':
return render(self.request, 'pretixcontrol/order/pay.html', {
'form': self.mark_paid_form,
'order': self.order,
})
elif self.order.cancel_allowed() and to == 'c':
return render(self.request, 'pretixcontrol/order/cancel.html', {
'form': self.mark_canceled_form,
'order': self.order,
})
else:
return HttpResponseNotAllowed(['POST'])
class OrderInvoiceCreate(OrderView):
permission = 'can_change_orders'
def post(self, *args, **kwargs):
has_inv = self.order.invoices.exists() and not (
self.order.status in (Order.STATUS_PAID, Order.STATUS_PENDING)
and self.order.invoices.filter(is_cancellation=True).count() >= self.order.invoices.filter(is_cancellation=False).count()
)
if self.request.event.settings.get('invoice_generate') not in ('admin', 'user', 'paid', 'True') or not invoice_qualified(self.order):
messages.error(self.request, _('You cannot generate an invoice for this order.'))
elif has_inv:
messages.error(self.request, _('An invoice for this order already exists.'))
else:
inv = generate_invoice(self.order)
self.order.log_action('pretix.event.order.invoice.generated', user=self.request.user, data={
'invoice': inv.pk
})
messages.success(self.request, _('The invoice has been generated.'))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return HttpResponseNotAllowed(['POST'])
class OrderCheckVATID(OrderView):
permission = 'can_change_orders'
def post(self, *args, **kwargs):
try:
ia = self.order.invoice_address
except InvoiceAddress.DoesNotExist:
messages.error(self.request, _('No VAT ID specified.'))
return redirect(self.get_order_url())
else:
if not ia.vat_id:
messages.error(self.request, _('No VAT ID specified.'))
return redirect(self.get_order_url())
if not ia.country:
messages.error(self.request, _('No country specified.'))
return redirect(self.get_order_url())
if str(ia.country) not in EU_COUNTRIES:
messages.error(self.request, _('VAT ID could not be checked since a non-EU country has been '
'specified.'))
return redirect(self.get_order_url())
if ia.vat_id[:2] != cc_to_vat_prefix(str(ia.country)):
messages.error(self.request, _('Your VAT ID does not match the selected country.'))
return redirect(self.get_order_url())
try:
result = vat_moss.id.validate(ia.vat_id)
if result:
country_code, normalized_id, company_name = result
ia.vat_id_validated = True
ia.vat_id = normalized_id
ia.save()
except vat_moss.errors.InvalidError:
messages.error(self.request, _('This VAT ID is not valid.'))
except vat_moss.errors.WebServiceUnavailableError:
logger.exception('VAT ID checking failed for country {}'.format(ia.country))
messages.error(self.request, _('The VAT ID could not be checked, as the VAT checking service of '
'the country is currently not available.'))
else:
messages.success(self.request, _('This VAT ID is valid.'))
return redirect(self.get_order_url())
def get(self, *args, **kwargs): # NOQA
return HttpResponseNotAllowed(['POST'])
class OrderInvoiceRegenerate(OrderView):
permission = 'can_change_orders'
def post(self, *args, **kwargs):
try:
inv = self.order.invoices.get(pk=kwargs.get('id'))
except Invoice.DoesNotExist:
messages.error(self.request, _('Unknown invoice.'))
else:
if inv.canceled:
messages.error(self.request, _('The invoice has already been canceled.'))
elif inv.shredded:
messages.error(self.request, _('The invoice has been cleaned of personal data.'))
else:
inv = regenerate_invoice(inv)
self.order.log_action('pretix.event.order.invoice.regenerated', user=self.request.user, data={
'invoice': inv.pk
})
messages.success(self.request, _('The invoice has been regenerated.'))
return redirect(self.get_order_url())
def get(self, *args, **kwargs): # NOQA
return HttpResponseNotAllowed(['POST'])
class OrderInvoiceReissue(OrderView):
permission = 'can_change_orders'
def post(self, *args, **kwargs):
try:
inv = self.order.invoices.get(pk=kwargs.get('id'))
except Invoice.DoesNotExist:
messages.error(self.request, _('Unknown invoice.'))
else:
if inv.canceled:
messages.error(self.request, _('The invoice has already been canceled.'))
elif inv.shredded:
messages.error(self.request, _('The invoice has been cleaned of personal data.'))
else:
c = generate_cancellation(inv)
if self.order.status != Order.STATUS_CANCELED:
inv = generate_invoice(self.order)
else:
inv = c
self.order.log_action('pretix.event.order.invoice.reissued', user=self.request.user, data={
'invoice': inv.pk
})
messages.success(self.request, _('The invoice has been reissued.'))
return redirect(self.get_order_url())
def get(self, *args, **kwargs): # NOQA
return HttpResponseNotAllowed(['POST'])
class OrderResendLink(OrderView):
permission = 'can_change_orders'
def post(self, *args, **kwargs):
try:
if 'position' in kwargs:
p = get_object_or_404(self.order.positions, pk=kwargs['position'])
p.resend_link(user=self.request.user)
else:
self.order.resend_link(user=self.request.user)
except SendMailException:
messages.error(self.request, _('There was an error sending the mail. Please try again later.'))
return redirect(self.get_order_url())
messages.success(self.request, _('The email has been queued to be sent.'))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return HttpResponseNotAllowed(['POST'])
class InvoiceDownload(EventPermissionRequiredMixin, View):
permission = 'can_view_orders'
def get_order_url(self):
return reverse('control:event.order', kwargs={
'event': self.request.event.slug,
'organizer': self.request.event.organizer.slug,
'code': self.invoice.order.code
})
def get(self, request, *args, **kwargs):
try:
self.invoice = Invoice.objects.get(
event=self.request.event,
id=self.kwargs['invoice']
)
except Invoice.DoesNotExist:
raise Http404(_('This invoice has not been found'))
if not self.invoice.file:
invoice_pdf(self.invoice.pk)
self.invoice = Invoice.objects.get(pk=self.invoice.pk)
if self.invoice.shredded:
messages.error(request, _('The invoice file is no longer stored on the server.'))
return redirect(self.get_order_url())
if not self.invoice.file:
# This happens if we have celery installed and the file will be generated in the background
messages.warning(request, _('The invoice file has not yet been generated, we will generate it for you '
'now. Please try again in a few seconds.'))
return redirect(self.get_order_url())
try:
resp = FileResponse(self.invoice.file.file, content_type='application/pdf')
except FileNotFoundError:
invoice_pdf_task.apply(args=(self.invoice.pk,))
return self.get(request, *args, **kwargs)
resp['Content-Disposition'] = 'inline; filename="{}.pdf"'.format(self.invoice.number)
resp._csp_ignore = True # Some browser's PDF readers do not work with CSP
return resp
class OrderExtend(OrderView):
permission = 'can_change_orders'
def post(self, *args, **kwargs):
if self.form.is_valid():
try:
extend_order(
self.order,
new_date=self.form.cleaned_data.get('expires'),
force=self.form.cleaned_data.get('quota_ignore', False),
user=self.request.user
)
messages.success(self.request, _('The payment term has been changed.'))
except OrderError as e:
messages.error(self.request, str(e))
return self._redirect_here()
except LockTimeoutException:
messages.error(self.request, _('We were not able to process the request completely as the '
'server was too busy.'))
return self._redirect_back()
else:
return self.get(*args, **kwargs)
def dispatch(self, request, *args, **kwargs):
if self.order.status not in (Order.STATUS_PENDING, Order.STATUS_EXPIRED):
messages.error(self.request, _('This action is only allowed for pending orders.'))
return self._redirect_back()
return super().dispatch(request, *kwargs, **kwargs)
def _redirect_here(self):
return redirect('control:event.order.extend',
event=self.request.event.slug,
organizer=self.request.event.organizer.slug,
code=self.order.code)
def get(self, *args, **kwargs):
return render(self.request, 'pretixcontrol/order/extend.html', {
'order': self.order,
'form': self.form,
})
@cached_property
def form(self):
return ExtendForm(instance=self.order,
data=self.request.POST if self.request.method == "POST" else None)
class OrderChange(OrderView):
permission = 'can_change_orders'
template_name = 'pretixcontrol/order/change.html'
def dispatch(self, request, *args, **kwargs):
if self.order.status not in (Order.STATUS_PENDING, Order.STATUS_PAID):
messages.error(self.request, _('This action is only allowed for pending or paid orders.'))
return self._redirect_back()
return super().dispatch(request, *args, **kwargs)
@cached_property
def other_form(self):
return OtherOperationsForm(prefix='other', order=self.order,
data=self.request.POST if self.request.method == "POST" else None)
@cached_property
def add_formset(self):
ff = formset_factory(
OrderPositionAddForm, formset=OrderPositionAddFormset,
can_order=False, can_delete=True, extra=0
)
return ff(
prefix='add',
order=self.order,
data=self.request.POST if self.request.method == "POST" else None
)
@cached_property
def fees(self):
fees = list(self.order.fees.all())
for f in fees:
f.form = OrderFeeChangeForm(prefix='of-{}'.format(f.pk), instance=f,
data=self.request.POST if self.request.method == "POST" else None)
try:
ia = self.order.invoice_address
except InvoiceAddress.DoesNotExist:
ia = None
f.apply_tax = self.request.event.settings.tax_rate_default and self.request.event.settings.tax_rate_default.tax_applicable(invoice_address=ia)
return fees
@cached_property
def positions(self):
positions = list(self.order.positions.all())
for p in positions:
p.form = OrderPositionChangeForm(prefix='op-{}'.format(p.pk), instance=p,
initial={'seat': p.seat.seat_guid if p.seat else None},
data=self.request.POST if self.request.method == "POST" else None)
try:
ia = self.order.invoice_address
except InvoiceAddress.DoesNotExist:
ia = None
p.apply_tax = p.item.tax_rule and p.item.tax_rule.tax_applicable(invoice_address=ia)
return positions
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['positions'] = self.positions
ctx['fees'] = self.fees
ctx['add_formset'] = self.add_formset
ctx['other_form'] = self.other_form
return ctx
def _process_other(self, ocm):
if not self.other_form.is_valid():
return False
else:
if self.other_form.cleaned_data['recalculate_taxes']:
ocm.recalculate_taxes()
return True
def _process_add(self, ocm):
if not self.add_formset.is_valid():
return False
else:
for f in self.add_formset.forms:
if f in self.add_formset.deleted_forms or not f.has_changed():
continue
if '-' in f.cleaned_data['itemvar']:
itemid, varid = f.cleaned_data['itemvar'].split('-')
else:
itemid, varid = f.cleaned_data['itemvar'], None
item = Item.objects.get(pk=itemid, event=self.request.event)
if varid:
variation = ItemVariation.objects.get(pk=varid, item=item)
else:
variation = None
try:
ocm.add_position(item, variation,
f.cleaned_data['price'],
f.cleaned_data.get('addon_to'),
f.cleaned_data.get('subevent'),
f.cleaned_data.get('seat'))
except OrderError as e:
f.custom_error = str(e)
return False
return True
def _process_fees(self, ocm):
for f in self.fees:
if not f.form.is_valid():
return False
try:
if f.form.cleaned_data['operation_cancel']:
ocm.cancel_fee(f)
continue
if f.form.cleaned_data['value'] != f.value:
ocm.change_fee(f, f.form.cleaned_data['value'])
except OrderError as e:
f.custom_error = str(e)
return False
return True
def _process_change(self, ocm):
for p in self.positions:
if not p.form.is_valid():
return False
try:
if p.form.cleaned_data['operation_cancel']:
ocm.cancel(p)
continue
if p.form.cleaned_data['itemvar']:
if '-' in p.form.cleaned_data['itemvar']:
itemid, varid = p.form.cleaned_data['itemvar'].split('-')
else:
itemid, varid = p.form.cleaned_data['itemvar'], None
item = Item.objects.get(pk=itemid, event=self.request.event)
if varid:
variation = ItemVariation.objects.get(pk=varid, item=item)
else:
variation = None
if item != p.item or variation != p.variation:
ocm.change_item(p, item, variation)
if self.request.event.has_subevents and p.form.cleaned_data['subevent'] and p.form.cleaned_data['subevent'] != p.subevent:
ocm.change_subevent(p, p.form.cleaned_data['subevent'])
if p.seat and p.form.cleaned_data['seat'] and p.form.cleaned_data['seat'] != p.seat.seat_guid:
ocm.change_seat(p, p.form.cleaned_data['seat'])
if p.form.cleaned_data['price'] != p.price:
ocm.change_price(p, p.form.cleaned_data['price'])
if p.form.cleaned_data['operation_split']:
ocm.split(p)
if p.form.cleaned_data['operation_secret']:
ocm.regenerate_secret(p)
except OrderError as e:
p.custom_error = str(e)
return False
return True
def post(self, *args, **kwargs):
notify = self.other_form.cleaned_data['notify'] if self.other_form.is_valid() else True
ocm = OrderChangeManager(
self.order,
user=self.request.user,
notify=notify,
reissue_invoice=self.other_form.cleaned_data['reissue_invoice'] if self.other_form.is_valid() else True
)
form_valid = self._process_add(ocm) and self._process_fees(ocm) and self._process_change(ocm) and self._process_other(ocm)
if not form_valid:
messages.error(self.request, _('An error occurred. Please see the details below.'))
else:
try:
ocm.commit(check_quotas=not self.other_form.cleaned_data['ignore_quotas'])
except OrderError as e:
messages.error(self.request, str(e))
else:
if notify:
messages.success(self.request, _('The order has been changed and the user has been notified.'))
else:
messages.success(self.request, _('The order has been changed.'))
return self._redirect_back()
return self.get(*args, **kwargs)
class OrderModifyInformation(OrderQuestionsViewMixin, OrderView):
permission = 'can_change_orders'
template_name = 'pretixcontrol/order/change_questions.html'
only_user_visible = False
all_optional = True
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['other_form'] = self.other_form
return ctx
@cached_property
def other_form(self):
return OtherOperationsForm(prefix='other', order=self.order, initial={'notify': False},
data=self.request.POST if self.request.method == "POST" else None)
def post(self, request, *args, **kwargs):
failed = not self.save() or not self.invoice_form.is_valid() or not self.other_form.is_valid()
notify = self.other_form.cleaned_data['notify'] if self.other_form.is_valid() else True
if failed:
messages.error(self.request,
_("We had difficulties processing your input. Please review the errors below."))
return self.get(request, *args, **kwargs)
if notify:
notify_user_changed_order(self.order)
if hasattr(self.invoice_form, 'save'):
self.invoice_form.save()
self.order.log_action('pretix.event.order.modified', {
'invoice_data': self.invoice_form.cleaned_data,
'data': [{
k: (f.cleaned_data.get(k).name if isinstance(f.cleaned_data.get(k), File) else f.cleaned_data.get(k))
for k in f.changed_data
} for f in self.forms]
}, user=request.user)
if self.invoice_form.has_changed():
success_message = ('The invoice address has been updated. If you want to generate a new invoice, '
'you need to do this manually.')
messages.success(self.request, _(success_message))
tickets.invalidate_cache.apply_async(kwargs={'event': self.request.event.pk, 'order': self.order.pk})
order_modified.send(sender=self.request.event, order=self.order)
return redirect(self.get_order_url())
class OrderContactChange(OrderView):
permission = 'can_change_orders'
template_name = 'pretixcontrol/order/change_contact.html'
def get_context_data(self, **kwargs):
ctx = super().get_context_data()
ctx['form'] = self.form
return ctx
@cached_property
def form(self):
return OrderContactForm(
instance=self.order,
data=self.request.POST if self.request.method == "POST" else None
)
def post(self, *args, **kwargs):
old_email = self.order.email
changed = False
if self.form.is_valid():
new_email = self.form.cleaned_data['email']
if new_email != old_email:
changed = True
self.order.log_action(
'pretix.event.order.contact.changed',
data={
'old_email': old_email,
'new_email': self.form.cleaned_data['email'],
},
user=self.request.user,
)
if self.form.cleaned_data['regenerate_secrets']:
changed = True
self.order.secret = generate_secret()
for op in self.order.all_positions.all():
op.secret = generate_position_secret()
op.save()
tickets.invalidate_cache.apply_async(kwargs={'event': self.request.event.pk, 'order': self.order.pk})
self.order.log_action('pretix.event.order.secret.changed', user=self.request.user)
self.form.save()
if changed:
messages.success(self.request, _('The order has been changed.'))
else:
messages.success(self.request, _('Nothing about the order had to be changed.'))
return redirect(self.get_order_url())
return self.get(*args, **kwargs)
class OrderLocaleChange(OrderView):
permission = 'can_change_orders'
template_name = 'pretixcontrol/order/change_locale.html'
def get_context_data(self, **kwargs):
ctx = super().get_context_data()
ctx['form'] = self.form
return ctx
@cached_property
def form(self):
return OrderLocaleForm(
instance=self.order,
data=self.request.POST if self.request.method == "POST" else None
)
def post(self, *args, **kwargs):
old_locale = self.order.locale
if self.form.is_valid():
self.order.log_action(
'pretix.event.order.locale.changed',
data={
'old_locale': old_locale,
'new_locale': self.form.cleaned_data['locale'],
},
user=self.request.user,
)
self.form.save()
tickets.invalidate_cache.apply_async(kwargs={'event': self.request.event.pk, 'order': self.order.pk})
messages.success(self.request, _('The order has been changed.'))
return redirect(self.get_order_url())
return self.get(*args, **kwargs)
class OrderViewMixin:
def get_object(self, queryset=None):
try:
return Order.objects.get(
event=self.request.event,
code=self.kwargs['code'].upper()
)
except Order.DoesNotExist:
raise Http404()
@cached_property
def order(self):
return self.get_object()
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['order'] = self.order
return ctx
class OrderSendMail(EventPermissionRequiredMixin, OrderViewMixin, FormView):
template_name = 'pretixcontrol/order/sendmail.html'
permission = 'can_change_orders'
form_class = OrderMailForm
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['order'] = Order.objects.get(
event=self.request.event,
code=self.kwargs['code'].upper()
)
kwargs['initial'] = {}
if self.request.GET.get('subject'):
kwargs['initial']['subject'] = self.request.GET.get('subject')
if self.request.GET.get('message'):
kwargs['initial']['message'] = self.request.GET.get('message')
return kwargs
def form_invalid(self, form):
messages.error(self.request, _('We could not send the email. See below for details.'))
return super().form_invalid(form)
def form_valid(self, form):
order = Order.objects.get(
event=self.request.event,
code=self.kwargs['code'].upper()
)
self.preview_output = {}
with language(order.locale):
email_context = get_email_context(event=order.event, order=order)
email_template = LazyI18nString(form.cleaned_data['message'])
email_content = render_mail(email_template, email_context)
if self.request.POST.get('action') == 'preview':
self.preview_output = {
'subject': _('Subject: {subject}').format(subject=form.cleaned_data['subject']),
'html': markdown_compile_email(email_content)
}
return self.get(self.request, *self.args, **self.kwargs)
else:
try:
order.send_mail(
form.cleaned_data['subject'], email_template,
email_context, 'pretix.event.order.email.custom_sent',
self.request.user, auto_email=False
)
messages.success(self.request,
_('Your message has been queued and will be sent to {}.'.format(order.email)))
except SendMailException:
messages.error(
self.request,
_('Failed to send mail to the following user: {}'.format(order.email))
)
return super(OrderSendMail, self).form_valid(form)
def get_success_url(self):
return reverse('control:event.order', kwargs={
'event': self.request.event.slug,
'organizer': self.request.event.organizer.slug,
'code': self.kwargs['code']
})
def get_context_data(self, *args, **kwargs):
ctx = super().get_context_data(*args, **kwargs)
ctx['preview_output'] = getattr(self, 'preview_output', None)
return ctx
class OrderEmailHistory(EventPermissionRequiredMixin, OrderViewMixin, ListView):
template_name = 'pretixcontrol/order/mail_history.html'
permission = 'can_view_orders'
model = LogEntry
context_object_name = 'logs'
paginate_by = 10
def get_queryset(self):
order = get_object_or_404(
Order,
event=self.request.event,
code=self.kwargs['code'].upper()
)
qs = order.all_logentries()
qs = qs.filter(
action_type__contains="order.email"
)
return qs
class AnswerDownload(EventPermissionRequiredMixin, OrderViewMixin, ListView):
permission = 'can_view_orders'
def get(self, request, *args, **kwargs):
answid = kwargs.get('answer')
token = request.GET.get('token', '')
answer = get_object_or_404(QuestionAnswer, orderposition__order=self.order, id=answid)
if not answer.file:
raise Http404()
if not check_token(request, answer, token):
raise Http404(_("This link is no longer valid. Please go back, refresh the page, and try again."))
ftype, ignored = mimetypes.guess_type(answer.file.name)
resp = FileResponse(answer.file, content_type=ftype or 'application/binary')
resp['Content-Disposition'] = 'attachment; filename="{}-{}-{}-{}"'.format(
self.request.event.slug.upper(), self.order.code,
answer.orderposition.positionid,
os.path.basename(answer.file.name).split('.', 1)[1]
)
return resp
class OverView(EventPermissionRequiredMixin, TemplateView):
template_name = 'pretixcontrol/orders/overview.html'
permission = 'can_view_orders'
@cached_property
def filter_form(self):
return OverviewFilterForm(data=self.request.GET, event=self.request.event)
def get_context_data(self, **kwargs):
ctx = super().get_context_data()
if self.filter_form.is_valid():
ctx['items_by_category'], ctx['total'] = order_overview(
self.request.event,
subevent=self.filter_form.cleaned_data.get('subevent'),
date_filter=self.filter_form.cleaned_data['date_axis'],
date_from=self.filter_form.cleaned_data['date_from'],
date_until=self.filter_form.cleaned_data['date_until'],
fees=True
)
else:
ctx['items_by_category'], ctx['total'] = order_overview(
self.request.event,
fees=True
)
ctx['subevent_warning'] = (
self.request.event.has_subevents and
self.filter_form.is_valid() and
self.filter_form.cleaned_data.get('subevent') and
OrderFee.objects.filter(order__event=self.request.event).exclude(value=0).exists()
)
ctx['filter_form'] = self.filter_form
return ctx
class OrderGo(EventPermissionRequiredMixin, View):
permission = 'can_view_orders'
def get_order(self, code):
try:
return Order.objects.get(code=code, event=self.request.event)
except Order.DoesNotExist:
return Order.objects.get(code=Order.normalize_code(code), event=self.request.event)
def get(self, request, *args, **kwargs):
code = request.GET.get("code", "").upper().strip()
if '://' in code:
m = re.match('.*/ORDER/([A-Z0-9]{' + str(settings.ENTROPY['order_code']) + '})/.*', code)
if m:
code = m.group(1)
try:
if code.startswith(request.event.slug.upper()):
code = code[len(request.event.slug):]
if code.startswith('-'):
code = code[1:]
order = self.get_order(code)
return redirect('control:event.order', event=request.event.slug, organizer=request.event.organizer.slug,
code=order.code)
except Order.DoesNotExist:
try:
i = self.request.event.invoices.get(Q(invoice_no=code) | Q(full_invoice_no=code))
return redirect('control:event.order', event=request.event.slug, organizer=request.event.organizer.slug,
code=i.order.code)
except Invoice.DoesNotExist:
pass
messages.error(request, _('There is no order with the given order code.'))
return redirect('control:event.orders', event=request.event.slug, organizer=request.event.organizer.slug)
class ExportMixin:
@cached_property
def exporters(self):
exporters = []
responses = register_data_exporters.send(self.request.event)
for ex in sorted([response(self.request.event) for r, response in responses], key=lambda ex: str(ex.verbose_name)):
if self.request.GET.get("identifier") and ex.identifier != self.request.GET.get("identifier"):
continue
# Use form parse cycle to generate useful defaults
test_form = ExporterForm(data=self.request.GET, prefix=ex.identifier)
test_form.fields = ex.export_form_fields
test_form.is_valid()
initial = {
k: v for k, v in test_form.cleaned_data.items() if ex.identifier + "-" + k in self.request.GET
}
ex.form = ExporterForm(
data=(self.request.POST if self.request.method == 'POST' else None),
prefix=ex.identifier,
initial=initial
)
ex.form.fields = ex.export_form_fields
exporters.append(ex)
return exporters
class ExportDoView(EventPermissionRequiredMixin, ExportMixin, AsyncAction, View):
permission = 'can_view_orders'
known_errortypes = ['ExportError']
task = export
def get_success_message(self, value):
return None
def get_success_url(self, value):
return reverse('cachedfile.download', kwargs={'id': str(value)})
def get_error_url(self):
return reverse('control:event.orders.export', kwargs={
'event': self.request.event.slug,
'organizer': self.request.event.organizer.slug
})
@cached_property
def exporter(self):
for ex in self.exporters:
if ex.identifier == self.request.POST.get("exporter"):
return ex
def post(self, request, *args, **kwargs):
if not self.exporter:
messages.error(self.request, _('The selected exporter was not found.'))
return redirect('control:event.orders.export', kwargs={
'event': self.request.event.slug,
'organizer': self.request.event.organizer.slug
})
if not self.exporter.form.is_valid():
messages.error(self.request, _('There was a problem processing your input. See below for error details.'))
return self.get(request, *args, **kwargs)
cf = CachedFile()
cf.date = now()
cf.expires = now() + timedelta(days=3)
cf.save()
return self.do(self.request.event.id, str(cf.id), self.exporter.identifier, self.exporter.form.cleaned_data)
class ExportView(EventPermissionRequiredMixin, ExportMixin, TemplateView):
permission = 'can_view_orders'
template_name = 'pretixcontrol/orders/export.html'
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['exporters'] = self.exporters
return ctx
class RefundList(EventPermissionRequiredMixin, PaginationMixin, ListView):
model = OrderRefund
context_object_name = 'refunds'
template_name = 'pretixcontrol/orders/refunds.html'
permission = 'can_view_orders'
def get_queryset(self):
qs = OrderRefund.objects.filter(
order__event=self.request.event
).select_related('order')
if self.filter_form.is_valid():
qs = self.filter_form.filter_qs(qs)
return qs.distinct()
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['filter_form'] = self.filter_form
return ctx
@cached_property
def filter_form(self):
return RefundFilterForm(data=self.request.GET, event=self.request.event,
initial={'status': 'open'})
| 42.264581 | 154 | 0.575303 | import json
import logging
import mimetypes
import os
import re
from datetime import datetime, time, timedelta
from decimal import Decimal, DecimalException
from urllib.parse import urlencode
import vat_moss.id
from django.conf import settings
from django.contrib import messages
from django.core.files import File
from django.db import transaction
from django.db.models import (
Count, IntegerField, OuterRef, Prefetch, ProtectedError, Q, Subquery, Sum,
)
from django.forms import formset_factory
from django.http import (
FileResponse, Http404, HttpResponseNotAllowed, HttpResponseRedirect,
JsonResponse,
)
from django.shortcuts import get_object_or_404, redirect, render
from django.urls import reverse
from django.utils import formats
from django.utils.functional import cached_property
from django.utils.http import is_safe_url
from django.utils.timezone import make_aware, now
from django.utils.translation import ugettext_lazy as _
from django.views.generic import (
DetailView, FormView, ListView, TemplateView, View,
)
from i18nfield.strings import LazyI18nString
from pretix.base.channels import get_all_sales_channels
from pretix.base.email import get_email_context
from pretix.base.i18n import language
from pretix.base.models import (
CachedCombinedTicket, CachedFile, CachedTicket, Invoice, InvoiceAddress,
Item, ItemVariation, LogEntry, Order, QuestionAnswer, Quota,
generate_position_secret, generate_secret,
)
from pretix.base.models.orders import (
OrderFee, OrderPayment, OrderPosition, OrderRefund,
)
from pretix.base.models.tax import EU_COUNTRIES, cc_to_vat_prefix
from pretix.base.payment import PaymentException
from pretix.base.services import tickets
from pretix.base.services.export import export
from pretix.base.services.invoices import (
generate_cancellation, generate_invoice, invoice_pdf, invoice_pdf_task,
invoice_qualified, regenerate_invoice,
)
from pretix.base.services.locking import LockTimeoutException
from pretix.base.services.mail import SendMailException, render_mail
from pretix.base.services.orders import (
OrderChangeManager, OrderError, approve_order, cancel_order, deny_order,
extend_order, mark_order_expired, mark_order_refunded,
notify_user_changed_order,
)
from pretix.base.services.stats import order_overview
from pretix.base.services.tickets import generate
from pretix.base.signals import (
order_modified, register_data_exporters, register_ticket_outputs,
)
from pretix.base.templatetags.money import money_filter
from pretix.base.templatetags.rich_text import markdown_compile_email
from pretix.base.views.mixins import OrderQuestionsViewMixin
from pretix.base.views.tasks import AsyncAction
from pretix.control.forms.filter import (
EventOrderFilterForm, OverviewFilterForm, RefundFilterForm,
)
from pretix.control.forms.orders import (
CancelForm, CommentForm, ConfirmPaymentForm, ExporterForm, ExtendForm,
MarkPaidForm, OrderContactForm, OrderFeeChangeForm, OrderLocaleForm,
OrderMailForm, OrderPositionAddForm, OrderPositionAddFormset,
OrderPositionChangeForm, OrderRefundForm, OtherOperationsForm,
)
from pretix.control.permissions import EventPermissionRequiredMixin
from pretix.control.views import PaginationMixin
from pretix.helpers.safedownload import check_token
from pretix.presale.signals import question_form_fields
logger = logging.getLogger(__name__)
class OrderList(EventPermissionRequiredMixin, PaginationMixin, ListView):
model = Order
context_object_name = 'orders'
template_name = 'pretixcontrol/orders/index.html'
permission = 'can_view_orders'
def get_queryset(self):
qs = Order.objects.filter(
event=self.request.event
).select_related('invoice_address')
if self.filter_form.is_valid():
qs = self.filter_form.filter_qs(qs)
return qs
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['filter_form'] = self.filter_form
s = OrderPosition.objects.filter(
order=OuterRef('pk')
).order_by().values('order').annotate(k=Count('id')).values('k')
annotated = {
o['pk']: o
for o in
Order.annotate_overpayments(Order.objects).filter(
pk__in=[o.pk for o in ctx['orders']]
).annotate(
pcnt=Subquery(s, output_field=IntegerField())
).values(
'pk', 'pcnt', 'is_overpaid', 'is_underpaid', 'is_pending_with_full_payment', 'has_external_refund',
'has_pending_refund'
)
}
for o in ctx['orders']:
if o.pk not in annotated:
continue
o.pcnt = annotated.get(o.pk)['pcnt']
o.is_overpaid = annotated.get(o.pk)['is_overpaid']
o.is_underpaid = annotated.get(o.pk)['is_underpaid']
o.is_pending_with_full_payment = annotated.get(o.pk)['is_pending_with_full_payment']
o.has_external_refund = annotated.get(o.pk)['has_external_refund']
o.has_pending_refund = annotated.get(o.pk)['has_pending_refund']
if ctx['page_obj'].paginator.count < 1000:
ctx['sums'] = self.get_queryset().annotate(
pcnt=Subquery(s, output_field=IntegerField())
).aggregate(
s=Sum('total'), pc=Sum('pcnt'), c=Count('id')
)
else:
ctx['sums'] = self.get_queryset().aggregate(s=Sum('total'), c=Count('id'))
return ctx
@cached_property
def filter_form(self):
return EventOrderFilterForm(data=self.request.GET, event=self.request.event)
class OrderView(EventPermissionRequiredMixin, DetailView):
context_object_name = 'order'
model = Order
def get_object(self, queryset=None):
try:
return Order.objects.get(
event=self.request.event,
code=self.kwargs['code'].upper()
)
except Order.DoesNotExist:
raise Http404()
def _redirect_back(self):
return redirect('control:event.order',
event=self.request.event.slug,
organizer=self.request.event.organizer.slug,
code=self.order.code)
@cached_property
def order(self):
return self.get_object()
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['can_generate_invoice'] = invoice_qualified(self.order) and (
self.request.event.settings.invoice_generate in ('admin', 'user', 'paid', 'True')
) and (
not self.order.invoices.exists()
or (
self.order.status in (Order.STATUS_PAID, Order.STATUS_PENDING)
and self.order.invoices.filter(is_cancellation=True).count() >= self.order.invoices.filter(is_cancellation=False).count()
)
)
return ctx
def get_order_url(self):
return reverse('control:event.order', kwargs={
'event': self.request.event.slug,
'organizer': self.request.event.organizer.slug,
'code': self.order.code
})
class OrderDetail(OrderView):
template_name = 'pretixcontrol/order/index.html'
permission = 'can_view_orders'
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['items'] = self.get_items()
ctx['event'] = self.request.event
ctx['payments'] = self.order.payments.order_by('-created')
ctx['refunds'] = self.order.refunds.select_related('payment').order_by('-created')
for p in ctx['payments']:
if p.payment_provider:
p.html_info = (p.payment_provider.payment_control_render(self.request, p) or "").strip()
for r in ctx['refunds']:
if r.payment_provider:
r.html_info = (r.payment_provider.refund_control_render(self.request, r) or "").strip()
ctx['invoices'] = list(self.order.invoices.all().select_related('event'))
ctx['comment_form'] = CommentForm(initial={
'comment': self.order.comment,
'checkin_attention': self.order.checkin_attention
})
ctx['display_locale'] = dict(settings.LANGUAGES)[self.object.locale or self.request.event.settings.locale]
ctx['overpaid'] = self.order.pending_sum * -1
ctx['sales_channel'] = get_all_sales_channels().get(self.order.sales_channel)
ctx['download_buttons'] = self.download_buttons
return ctx
@cached_property
def download_buttons(self):
buttons = []
responses = register_ticket_outputs.send(self.request.event)
for receiver, response in responses:
provider = response(self.request.event)
buttons.append({
'text': provider.download_button_text or 'Ticket',
'icon': provider.download_button_icon or 'fa-download',
'identifier': provider.identifier,
'multi': provider.multi_download_enabled,
'javascript_required': provider.javascript_required
})
return buttons
def get_items(self):
queryset = self.object.all_positions
cartpos = queryset.order_by(
'item', 'variation'
).select_related(
'item', 'variation', 'addon_to', 'tax_rule'
).prefetch_related(
'item__questions', 'issued_gift_cards',
Prefetch('answers', queryset=QuestionAnswer.objects.prefetch_related('options').select_related('question')),
'checkins', 'checkins__list'
).order_by('positionid')
positions = []
for p in cartpos:
responses = question_form_fields.send(sender=self.request.event, position=p)
p.additional_fields = []
data = p.meta_info_data
for r, response in sorted(responses, key=lambda r: str(r[0])):
if response:
for key, value in response.items():
p.additional_fields.append({
'answer': data.get('question_form_data', {}).get(key),
'question': value.label
})
p.has_questions = (
p.additional_fields or
(p.item.admission and self.request.event.settings.attendee_names_asked) or
(p.item.admission and self.request.event.settings.attendee_emails_asked) or
p.item.questions.all()
)
p.cache_answers()
positions.append(p)
positions.sort(key=lambda p: p.sort_key)
return {
'positions': positions,
'raw': cartpos,
'total': self.object.total,
'fees': self.object.all_fees.all(),
'net_total': self.object.net_total,
'tax_total': self.object.tax_total,
}
class OrderDownload(AsyncAction, OrderView):
task = generate
permission = 'can_view_orders'
def get_success_url(self, value):
return self.get_self_url()
def get_error_url(self):
return self.get_order_url()
def get_self_url(self):
return reverse('control:event.order.download.ticket', kwargs=self.kwargs)
@cached_property
def output(self):
responses = register_ticket_outputs.send(self.request.event)
for receiver, response in responses:
provider = response(self.request.event)
if provider.identifier == self.kwargs.get('output'):
return provider
@cached_property
def order_position(self):
try:
return self.order.positions.get(pk=self.kwargs.get('position'))
except OrderPosition.DoesNotExist:
return None
def get(self, request, *args, **kwargs):
if 'async_id' in request.GET and settings.HAS_CELERY:
return self.get_result(request)
ct = self.get_last_ct()
if ct:
return self.success(ct)
return self.http_method_not_allowed(request)
def post(self, request, *args, **kwargs):
if not self.output:
return self.error(_('You requested an invalid ticket output type.'))
if not self.order_position:
raise Http404(_('Unknown order code or not authorized to access this order.'))
if 'position' in kwargs and not self.order_position.generate_ticket:
return self.error(_('Ticket download is not enabled for this product.'))
ct = self.get_last_ct()
if ct:
return self.success(ct)
return self.do('orderposition' if 'position' in kwargs else 'order',
self.order_position.pk if 'position' in kwargs else self.order.pk,
self.output.identifier)
def get_success_message(self, value):
return ""
def success(self, value):
if "ajax" in self.request.POST or "ajax" in self.request.GET:
return JsonResponse({
'ready': True,
'success': True,
'redirect': self.get_success_url(value),
'message': str(self.get_success_message(value))
})
if isinstance(value, CachedTicket):
if value.type == 'text/uri-list':
resp = HttpResponseRedirect(value.file.file.read())
return resp
else:
resp = FileResponse(value.file.file, content_type=value.type)
resp['Content-Disposition'] = 'attachment; filename="{}-{}-{}-{}{}"'.format(
self.request.event.slug.upper(), self.order.code, self.order_position.positionid,
self.output.identifier, value.extension
)
return resp
elif isinstance(value, CachedCombinedTicket):
resp = FileResponse(value.file.file, content_type=value.type)
resp['Content-Disposition'] = 'attachment; filename="{}-{}-{}{}"'.format(
self.request.event.slug.upper(), self.order.code, self.output.identifier, value.extension
)
return resp
else:
return redirect(self.get_self_url())
def get_last_ct(self):
if 'position' in self.kwargs:
ct = CachedTicket.objects.filter(
order_position=self.order_position, provider=self.output.identifier, file__isnull=False
).last()
else:
ct = CachedCombinedTicket.objects.filter(
order=self.order, provider=self.output.identifier, file__isnull=False
).last()
if not ct or not ct.file:
return None
return ct
class OrderComment(OrderView):
permission = 'can_change_orders'
def post(self, *args, **kwargs):
form = CommentForm(self.request.POST)
if form.is_valid():
if form.cleaned_data.get('comment') != self.order.comment:
self.order.comment = form.cleaned_data.get('comment')
self.order.log_action('pretix.event.order.comment', user=self.request.user, data={
'new_comment': form.cleaned_data.get('comment')
})
if form.cleaned_data.get('checkin_attention') != self.order.checkin_attention:
self.order.checkin_attention = form.cleaned_data.get('checkin_attention')
self.order.log_action('pretix.event.order.checkin_attention', user=self.request.user, data={
'new_value': form.cleaned_data.get('checkin_attention')
})
self.order.save(update_fields=['checkin_attention', 'comment'])
messages.success(self.request, _('The comment has been updated.'))
else:
messages.error(self.request, _('Could not update the comment.'))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return HttpResponseNotAllowed(['POST'])
class OrderApprove(OrderView):
permission = 'can_change_orders'
def post(self, *args, **kwargs):
if self.order.require_approval:
try:
approve_order(self.order, user=self.request.user)
except OrderError as e:
messages.error(self.request, str(e))
else:
messages.success(self.request, _('The order has been approved.'))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return render(self.request, 'pretixcontrol/order/approve.html', {
'order': self.order,
})
class OrderDelete(OrderView):
permission = 'can_change_orders'
def post(self, *args, **kwargs):
if self.order.testmode:
try:
with transaction.atomic():
self.order.gracefully_delete(user=self.request.user)
messages.success(self.request, _('The order has been deleted.'))
return redirect(reverse('control:event.orders', kwargs={
'event': self.request.event.slug,
'organizer': self.request.organizer.slug,
}))
except ProtectedError:
messages.error(self.request, _('The order could not be deleted as some constraints (e.g. data created '
'by plug-ins) do not allow it.'))
return self.get(self.request, *self.args, **self.kwargs)
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
if not self.order.testmode:
messages.error(self.request, _('Only orders created in test mode can be deleted.'))
return redirect(self.get_order_url())
return render(self.request, 'pretixcontrol/order/delete.html', {
'order': self.order,
})
class OrderDeny(OrderView):
permission = 'can_change_orders'
def post(self, *args, **kwargs):
if self.order.require_approval:
try:
deny_order(self.order, user=self.request.user,
comment=self.request.POST.get('comment'),
send_mail=self.request.POST.get('send_email') == 'on')
except OrderError as e:
messages.error(self.request, str(e))
else:
messages.success(self.request, _('The order has been denied and is therefore now canceled.'))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return render(self.request, 'pretixcontrol/order/deny.html', {
'order': self.order,
})
class OrderPaymentCancel(OrderView):
permission = 'can_change_orders'
@cached_property
def payment(self):
return get_object_or_404(self.order.payments, pk=self.kwargs['payment'])
def post(self, *args, **kwargs):
if self.payment.state in (OrderPayment.PAYMENT_STATE_CREATED, OrderPayment.PAYMENT_STATE_PENDING):
try:
with transaction.atomic():
self.payment.payment_provider.cancel_payment(self.payment)
self.order.log_action('pretix.event.order.payment.canceled', {
'local_id': self.payment.local_id,
'provider': self.payment.provider,
}, user=self.request.user if self.request.user.is_authenticated else None)
except PaymentException as e:
self.order.log_action(
'pretix.event.order.payment.canceled.failed',
{
'local_id': self.payment.local_id,
'provider': self.payment.provider,
'error': str(e)
},
user=self.request.user if self.request.user.is_authenticated else None,
)
messages.error(self.request, str(e))
else:
messages.success(self.request, _('This payment has been canceled.'))
else:
messages.error(self.request, _('This payment can not be canceled at the moment.'))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return render(self.request, 'pretixcontrol/order/pay_cancel.html', {
'order': self.order,
})
class OrderRefundCancel(OrderView):
permission = 'can_change_orders'
@cached_property
def refund(self):
return get_object_or_404(self.order.refunds, pk=self.kwargs['refund'])
def post(self, *args, **kwargs):
if self.refund.state in (OrderRefund.REFUND_STATE_CREATED, OrderRefund.REFUND_STATE_TRANSIT,
OrderRefund.REFUND_STATE_EXTERNAL):
with transaction.atomic():
self.refund.state = OrderRefund.REFUND_STATE_CANCELED
self.refund.save()
self.order.log_action('pretix.event.order.refund.canceled', {
'local_id': self.refund.local_id,
'provider': self.refund.provider,
}, user=self.request.user)
messages.success(self.request, _('The refund has been canceled.'))
else:
messages.error(self.request, _('This refund can not be canceled at the moment.'))
if "next" in self.request.GET and is_safe_url(self.request.GET.get("next"), allowed_hosts=None):
return redirect(self.request.GET.get("next"))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return render(self.request, 'pretixcontrol/order/refund_cancel.html', {
'order': self.order,
})
class OrderRefundProcess(OrderView):
permission = 'can_change_orders'
@cached_property
def refund(self):
return get_object_or_404(self.order.refunds, pk=self.kwargs['refund'])
def post(self, *args, **kwargs):
if self.refund.state == OrderRefund.REFUND_STATE_EXTERNAL:
self.refund.done(user=self.request.user)
if self.request.POST.get("action") == "r" and self.order.status != Order.STATUS_CANCELED:
mark_order_refunded(self.order, user=self.request.user)
elif not (self.order.status == Order.STATUS_PAID and self.order.pending_sum <= 0):
self.order.status = Order.STATUS_PENDING
self.order.set_expires(
now(),
self.order.event.subevents.filter(
id__in=self.order.positions.values_list('subevent_id', flat=True))
)
self.order.save(update_fields=['status', 'expires'])
messages.success(self.request, _('The refund has been processed.'))
else:
messages.error(self.request, _('This refund can not be processed at the moment.'))
if "next" in self.request.GET and is_safe_url(self.request.GET.get("next"), allowed_hosts=None):
return redirect(self.request.GET.get("next"))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return render(self.request, 'pretixcontrol/order/refund_process.html', {
'order': self.order,
'refund': self.refund,
'pending_sum': self.order.pending_sum + self.refund.amount,
'propose_cancel': self.order.pending_sum + self.refund.amount >= self.order.total
})
class OrderRefundDone(OrderView):
permission = 'can_change_orders'
@cached_property
def refund(self):
return get_object_or_404(self.order.refunds, pk=self.kwargs['refund'])
def post(self, *args, **kwargs):
if self.refund.state in (OrderRefund.REFUND_STATE_CREATED, OrderRefund.REFUND_STATE_TRANSIT):
self.refund.done(user=self.request.user)
messages.success(self.request, _('The refund has been marked as done.'))
else:
messages.error(self.request, _('This refund can not be processed at the moment.'))
if "next" in self.request.GET and is_safe_url(self.request.GET.get("next"), allowed_hosts=None):
return redirect(self.request.GET.get("next"))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return render(self.request, 'pretixcontrol/order/refund_done.html', {
'order': self.order,
})
class OrderPaymentConfirm(OrderView):
permission = 'can_change_orders'
@cached_property
def payment(self):
return get_object_or_404(self.order.payments, pk=self.kwargs['payment'])
@cached_property
def mark_paid_form(self):
return ConfirmPaymentForm(
instance=self.order,
data=self.request.POST if self.request.method == "POST" else None,
)
def post(self, *args, **kwargs):
if self.payment.state in (OrderPayment.PAYMENT_STATE_CREATED, OrderPayment.PAYMENT_STATE_PENDING):
if not self.mark_paid_form.is_valid():
return render(self.request, 'pretixcontrol/order/pay_complete.html', {
'form': self.mark_paid_form,
'order': self.order,
})
try:
self.payment.confirm(user=self.request.user,
count_waitinglist=False,
force=self.mark_paid_form.cleaned_data.get('force', False))
except Quota.QuotaExceededException as e:
messages.error(self.request, str(e))
except PaymentException as e:
messages.error(self.request, str(e))
except SendMailException:
messages.warning(self.request,
_('The payment has been marked as complete, but we were unable to send a '
'confirmation mail.'))
else:
messages.success(self.request, _('The payment has been marked as complete.'))
else:
messages.error(self.request, _('This payment can not be confirmed at the moment.'))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return render(self.request, 'pretixcontrol/order/pay_complete.html', {
'form': self.mark_paid_form,
'order': self.order,
})
class OrderRefundView(OrderView):
permission = 'can_change_orders'
@cached_property
def start_form(self):
return OrderRefundForm(
order=self.order,
data=self.request.POST if self.request.method == "POST" else (
self.request.GET if "start-action" in self.request.GET else None
),
prefix='start',
initial={
'partial_amount': self.order.payment_refund_sum,
'action': (
'mark_pending' if self.order.status == Order.STATUS_PAID
else 'do_nothing'
)
}
)
def choose_form(self):
payments = list(self.order.payments.filter(state=OrderPayment.PAYMENT_STATE_CONFIRMED))
if self.start_form.cleaned_data.get('mode') == 'full':
full_refund = self.order.payment_refund_sum
else:
full_refund = self.start_form.cleaned_data.get('partial_amount')
proposals = self.order.propose_auto_refunds(full_refund, payments=payments)
to_refund = full_refund - sum(proposals.values())
for p in payments:
p.propose_refund = proposals.get(p, 0)
if 'perform' in self.request.POST:
refund_selected = Decimal('0.00')
refunds = []
is_valid = True
manual_value = self.request.POST.get('refund-manual', '0') or '0'
manual_value = formats.sanitize_separators(manual_value)
try:
manual_value = Decimal(manual_value)
except (DecimalException, TypeError):
messages.error(self.request, _('You entered an invalid number.'))
is_valid = False
else:
refund_selected += manual_value
if manual_value:
refunds.append(OrderRefund(
order=self.order,
payment=None,
source=OrderRefund.REFUND_SOURCE_ADMIN,
state=(
OrderRefund.REFUND_STATE_DONE
if self.request.POST.get('manual_state') == 'done'
else OrderRefund.REFUND_STATE_CREATED
),
amount=manual_value,
provider='manual'
))
giftcard_value = self.request.POST.get('refund-new-giftcard', '0') or '0'
giftcard_value = formats.sanitize_separators(giftcard_value)
try:
giftcard_value = Decimal(giftcard_value)
except (DecimalException, TypeError):
messages.error(self.request, _('You entered an invalid number.'))
is_valid = False
else:
if giftcard_value:
refund_selected += giftcard_value
giftcard = self.request.organizer.issued_gift_cards.create(
currency=self.request.event.currency,
testmode=self.order.testmode
)
giftcard.log_action('pretix.giftcards.created', user=self.request.user, data={})
refunds.append(OrderRefund(
order=self.order,
payment=None,
source=OrderRefund.REFUND_SOURCE_ADMIN,
state=OrderRefund.REFUND_STATE_CREATED,
execution_date=now(),
amount=giftcard_value,
provider='giftcard',
info=json.dumps({
'gift_card': giftcard.pk
})
))
offsetting_value = self.request.POST.get('refund-offsetting', '0') or '0'
offsetting_value = formats.sanitize_separators(offsetting_value)
try:
offsetting_value = Decimal(offsetting_value)
except (DecimalException, TypeError):
messages.error(self.request, _('You entered an invalid number.'))
is_valid = False
else:
if offsetting_value:
refund_selected += offsetting_value
try:
order = Order.objects.get(code=self.request.POST.get('order-offsetting'),
event__organizer=self.request.organizer)
except Order.DoesNotExist:
messages.error(self.request, _('You entered an order that could not be found.'))
is_valid = False
else:
refunds.append(OrderRefund(
order=self.order,
payment=None,
source=OrderRefund.REFUND_SOURCE_ADMIN,
state=OrderRefund.REFUND_STATE_DONE,
execution_date=now(),
amount=offsetting_value,
provider='offsetting',
info=json.dumps({
'orders': [order.code]
})
))
for p in payments:
value = self.request.POST.get('refund-{}'.format(p.pk), '0') or '0'
value = formats.sanitize_separators(value)
try:
value = Decimal(value)
except (DecimalException, TypeError):
messages.error(self.request, _('You entered an invalid number.'))
is_valid = False
else:
if value == 0:
continue
elif value > p.available_amount:
messages.error(self.request, _('You can not refund more than the amount of a '
'payment that is not yet refunded.'))
is_valid = False
break
elif value != p.amount and not p.partial_refund_possible:
messages.error(self.request, _('You selected a partial refund for a payment method that '
'only supports full refunds.'))
is_valid = False
break
elif (p.partial_refund_possible or p.full_refund_possible) and value > 0:
refund_selected += value
refunds.append(OrderRefund(
order=self.order,
payment=p,
source=OrderRefund.REFUND_SOURCE_ADMIN,
state=OrderRefund.REFUND_STATE_CREATED,
amount=value,
provider=p.provider
))
any_success = False
if refund_selected == full_refund and is_valid:
for r in refunds:
r.save()
self.order.log_action('pretix.event.order.refund.created', {
'local_id': r.local_id,
'provider': r.provider,
}, user=self.request.user)
if r.payment or r.provider == "offsetting" or r.provider == "giftcard":
try:
r.payment_provider.execute_refund(r)
except PaymentException as e:
r.state = OrderRefund.REFUND_STATE_FAILED
r.save()
messages.error(self.request, _('One of the refunds failed to be processed. You should '
'retry to refund in a different way. The error message '
'was: {}').format(str(e)))
else:
any_success = True
if r.state == OrderRefund.REFUND_STATE_DONE:
messages.success(self.request, _('A refund of {} has been processed.').format(
money_filter(r.amount, self.request.event.currency)
))
elif r.state == OrderRefund.REFUND_STATE_CREATED:
messages.info(self.request, _('A refund of {} has been saved, but not yet '
'fully executed. You can mark it as complete '
'below.').format(
money_filter(r.amount, self.request.event.currency)
))
else:
any_success = True
if any_success:
if self.start_form.cleaned_data.get('action') == 'mark_refunded':
mark_order_refunded(self.order, user=self.request.user)
elif self.start_form.cleaned_data.get('action') == 'mark_pending':
if not (self.order.status == Order.STATUS_PAID and self.order.pending_sum <= 0):
self.order.status = Order.STATUS_PENDING
self.order.set_expires(
now(),
self.order.event.subevents.filter(
id__in=self.order.positions.values_list('subevent_id', flat=True))
)
self.order.save(update_fields=['status', 'expires'])
if giftcard_value and self.order.email:
messages.success(self.request, _('A new gift card was created. You can now send the user their '
'gift card code.'))
return redirect(reverse('control:event.order.sendmail', kwargs={
'event': self.request.event.slug,
'organizer': self.request.event.organizer.slug,
'code': self.order.code
}) + '?' + urlencode({
'subject': _('Your gift card code'),
'message': _('Hello,\n\nwe have refunded you {amount} for your order.\n\nYou can use the gift '
'card code {giftcard} to pay for future ticket purchases in our shop.\n\n'
'Your {event} team').format(
event="{event}",
amount=money_filter(giftcard_value, self.request.event.currency),
giftcard=giftcard.secret,
)
}))
return redirect(self.get_order_url())
else:
messages.error(self.request, _('The refunds you selected do not match the selected total refund '
'amount.'))
return render(self.request, 'pretixcontrol/order/refund_choose.html', {
'payments': payments,
'remainder': to_refund,
'order': self.order,
'partial_amount': (
self.request.POST.get('start-partial_amount') if self.request.method == 'POST'
else self.request.GET.get('start-partial_amount')
),
'start_form': self.start_form
})
def post(self, *args, **kwargs):
if self.start_form.is_valid():
return self.choose_form()
return self.get(*args, **kwargs)
def get(self, *args, **kwargs):
if self.start_form.is_valid():
return self.choose_form()
return render(self.request, 'pretixcontrol/order/refund_start.html', {
'form': self.start_form,
'order': self.order,
})
class OrderTransition(OrderView):
permission = 'can_change_orders'
@cached_property
def mark_paid_form(self):
return MarkPaidForm(
instance=self.order,
data=self.request.POST if self.request.method == "POST" else None,
)
@cached_property
def mark_canceled_form(self):
return CancelForm(
instance=self.order,
data=self.request.POST if self.request.method == "POST" else None,
)
def post(self, *args, **kwargs):
to = self.request.POST.get('status', '')
if self.order.status in (Order.STATUS_PENDING, Order.STATUS_EXPIRED) and to == 'p' and self.mark_paid_form.is_valid():
ps = self.mark_paid_form.cleaned_data['amount']
try:
p = self.order.payments.get(
state__in=(OrderPayment.PAYMENT_STATE_PENDING, OrderPayment.PAYMENT_STATE_CREATED),
provider='manual',
amount=ps
)
except OrderPayment.DoesNotExist:
for p in self.order.payments.filter(state__in=(OrderPayment.PAYMENT_STATE_PENDING,
OrderPayment.PAYMENT_STATE_CREATED)):
try:
with transaction.atomic():
p.payment_provider.cancel_payment(p)
self.order.log_action('pretix.event.order.payment.canceled', {
'local_id': p.local_id,
'provider': p.provider,
}, user=self.request.user if self.request.user.is_authenticated else None)
except PaymentException as e:
self.order.log_action(
'pretix.event.order.payment.canceled.failed',
{
'local_id': p.local_id,
'provider': p.provider,
'error': str(e)
},
user=self.request.user if self.request.user.is_authenticated else None,
)
p = self.order.payments.create(
state=OrderPayment.PAYMENT_STATE_CREATED,
provider='manual',
amount=ps,
fee=None
)
payment_date = None
if self.mark_paid_form.cleaned_data['payment_date'] != now().date():
payment_date = make_aware(datetime.combine(
self.mark_paid_form.cleaned_data['payment_date'],
time(hour=0, minute=0, second=0)
), self.order.event.timezone)
try:
p.confirm(user=self.request.user, count_waitinglist=False, payment_date=payment_date,
force=self.mark_paid_form.cleaned_data.get('force', False))
except Quota.QuotaExceededException as e:
p.state = OrderPayment.PAYMENT_STATE_FAILED
p.save()
self.order.log_action('pretix.event.order.payment.failed', {
'local_id': p.local_id,
'provider': p.provider,
'message': str(e)
})
messages.error(self.request, str(e))
except PaymentException as e:
p.state = OrderPayment.PAYMENT_STATE_FAILED
p.save()
self.order.log_action('pretix.event.order.payment.failed', {
'local_id': p.local_id,
'provider': p.provider,
'message': str(e)
})
messages.error(self.request, str(e))
except SendMailException:
messages.warning(self.request, _('The order has been marked as paid, but we were unable to send a '
'confirmation mail.'))
else:
messages.success(self.request, _('The payment has been created successfully.'))
elif self.order.cancel_allowed() and to == 'c' and self.mark_canceled_form.is_valid():
try:
cancel_order(self.order, user=self.request.user,
send_mail=self.mark_canceled_form.cleaned_data['send_email'],
cancellation_fee=self.mark_canceled_form.cleaned_data.get('cancellation_fee'))
except OrderError as e:
messages.error(self.request, str(e))
else:
self.order.refresh_from_db()
if self.order.pending_sum < 0:
messages.success(self.request, _('The order has been canceled. You can now select how you want to '
'transfer the money back to the user.'))
return redirect(reverse('control:event.order.refunds.start', kwargs={
'event': self.request.event.slug,
'organizer': self.request.event.organizer.slug,
'code': self.order.code
}) + '?start-action=do_nothing&start-mode=partial&start-partial_amount={}'.format(
self.order.pending_sum * -1
))
messages.success(self.request, _('The order has been canceled.'))
elif self.order.status == Order.STATUS_PENDING and to == 'e':
mark_order_expired(self.order, user=self.request.user)
messages.success(self.request, _('The order has been marked as expired.'))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
to = self.request.GET.get('status', '')
if self.order.status in (Order.STATUS_PENDING, Order.STATUS_EXPIRED) and to == 'p':
return render(self.request, 'pretixcontrol/order/pay.html', {
'form': self.mark_paid_form,
'order': self.order,
})
elif self.order.cancel_allowed() and to == 'c':
return render(self.request, 'pretixcontrol/order/cancel.html', {
'form': self.mark_canceled_form,
'order': self.order,
})
else:
return HttpResponseNotAllowed(['POST'])
class OrderInvoiceCreate(OrderView):
permission = 'can_change_orders'
def post(self, *args, **kwargs):
has_inv = self.order.invoices.exists() and not (
self.order.status in (Order.STATUS_PAID, Order.STATUS_PENDING)
and self.order.invoices.filter(is_cancellation=True).count() >= self.order.invoices.filter(is_cancellation=False).count()
)
if self.request.event.settings.get('invoice_generate') not in ('admin', 'user', 'paid', 'True') or not invoice_qualified(self.order):
messages.error(self.request, _('You cannot generate an invoice for this order.'))
elif has_inv:
messages.error(self.request, _('An invoice for this order already exists.'))
else:
inv = generate_invoice(self.order)
self.order.log_action('pretix.event.order.invoice.generated', user=self.request.user, data={
'invoice': inv.pk
})
messages.success(self.request, _('The invoice has been generated.'))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return HttpResponseNotAllowed(['POST'])
class OrderCheckVATID(OrderView):
permission = 'can_change_orders'
def post(self, *args, **kwargs):
try:
ia = self.order.invoice_address
except InvoiceAddress.DoesNotExist:
messages.error(self.request, _('No VAT ID specified.'))
return redirect(self.get_order_url())
else:
if not ia.vat_id:
messages.error(self.request, _('No VAT ID specified.'))
return redirect(self.get_order_url())
if not ia.country:
messages.error(self.request, _('No country specified.'))
return redirect(self.get_order_url())
if str(ia.country) not in EU_COUNTRIES:
messages.error(self.request, _('VAT ID could not be checked since a non-EU country has been '
'specified.'))
return redirect(self.get_order_url())
if ia.vat_id[:2] != cc_to_vat_prefix(str(ia.country)):
messages.error(self.request, _('Your VAT ID does not match the selected country.'))
return redirect(self.get_order_url())
try:
result = vat_moss.id.validate(ia.vat_id)
if result:
country_code, normalized_id, company_name = result
ia.vat_id_validated = True
ia.vat_id = normalized_id
ia.save()
except vat_moss.errors.InvalidError:
messages.error(self.request, _('This VAT ID is not valid.'))
except vat_moss.errors.WebServiceUnavailableError:
logger.exception('VAT ID checking failed for country {}'.format(ia.country))
messages.error(self.request, _('The VAT ID could not be checked, as the VAT checking service of '
'the country is currently not available.'))
else:
messages.success(self.request, _('This VAT ID is valid.'))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return HttpResponseNotAllowed(['POST'])
class OrderInvoiceRegenerate(OrderView):
permission = 'can_change_orders'
def post(self, *args, **kwargs):
try:
inv = self.order.invoices.get(pk=kwargs.get('id'))
except Invoice.DoesNotExist:
messages.error(self.request, _('Unknown invoice.'))
else:
if inv.canceled:
messages.error(self.request, _('The invoice has already been canceled.'))
elif inv.shredded:
messages.error(self.request, _('The invoice has been cleaned of personal data.'))
else:
inv = regenerate_invoice(inv)
self.order.log_action('pretix.event.order.invoice.regenerated', user=self.request.user, data={
'invoice': inv.pk
})
messages.success(self.request, _('The invoice has been regenerated.'))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return HttpResponseNotAllowed(['POST'])
class OrderInvoiceReissue(OrderView):
permission = 'can_change_orders'
def post(self, *args, **kwargs):
try:
inv = self.order.invoices.get(pk=kwargs.get('id'))
except Invoice.DoesNotExist:
messages.error(self.request, _('Unknown invoice.'))
else:
if inv.canceled:
messages.error(self.request, _('The invoice has already been canceled.'))
elif inv.shredded:
messages.error(self.request, _('The invoice has been cleaned of personal data.'))
else:
c = generate_cancellation(inv)
if self.order.status != Order.STATUS_CANCELED:
inv = generate_invoice(self.order)
else:
inv = c
self.order.log_action('pretix.event.order.invoice.reissued', user=self.request.user, data={
'invoice': inv.pk
})
messages.success(self.request, _('The invoice has been reissued.'))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return HttpResponseNotAllowed(['POST'])
class OrderResendLink(OrderView):
permission = 'can_change_orders'
def post(self, *args, **kwargs):
try:
if 'position' in kwargs:
p = get_object_or_404(self.order.positions, pk=kwargs['position'])
p.resend_link(user=self.request.user)
else:
self.order.resend_link(user=self.request.user)
except SendMailException:
messages.error(self.request, _('There was an error sending the mail. Please try again later.'))
return redirect(self.get_order_url())
messages.success(self.request, _('The email has been queued to be sent.'))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return HttpResponseNotAllowed(['POST'])
class InvoiceDownload(EventPermissionRequiredMixin, View):
permission = 'can_view_orders'
def get_order_url(self):
return reverse('control:event.order', kwargs={
'event': self.request.event.slug,
'organizer': self.request.event.organizer.slug,
'code': self.invoice.order.code
})
def get(self, request, *args, **kwargs):
try:
self.invoice = Invoice.objects.get(
event=self.request.event,
id=self.kwargs['invoice']
)
except Invoice.DoesNotExist:
raise Http404(_('This invoice has not been found'))
if not self.invoice.file:
invoice_pdf(self.invoice.pk)
self.invoice = Invoice.objects.get(pk=self.invoice.pk)
if self.invoice.shredded:
messages.error(request, _('The invoice file is no longer stored on the server.'))
return redirect(self.get_order_url())
if not self.invoice.file:
messages.warning(request, _('The invoice file has not yet been generated, we will generate it for you '
'now. Please try again in a few seconds.'))
return redirect(self.get_order_url())
try:
resp = FileResponse(self.invoice.file.file, content_type='application/pdf')
except FileNotFoundError:
invoice_pdf_task.apply(args=(self.invoice.pk,))
return self.get(request, *args, **kwargs)
resp['Content-Disposition'] = 'inline; filename="{}.pdf"'.format(self.invoice.number)
resp._csp_ignore = True
return resp
class OrderExtend(OrderView):
permission = 'can_change_orders'
def post(self, *args, **kwargs):
if self.form.is_valid():
try:
extend_order(
self.order,
new_date=self.form.cleaned_data.get('expires'),
force=self.form.cleaned_data.get('quota_ignore', False),
user=self.request.user
)
messages.success(self.request, _('The payment term has been changed.'))
except OrderError as e:
messages.error(self.request, str(e))
return self._redirect_here()
except LockTimeoutException:
messages.error(self.request, _('We were not able to process the request completely as the '
'server was too busy.'))
return self._redirect_back()
else:
return self.get(*args, **kwargs)
def dispatch(self, request, *args, **kwargs):
if self.order.status not in (Order.STATUS_PENDING, Order.STATUS_EXPIRED):
messages.error(self.request, _('This action is only allowed for pending orders.'))
return self._redirect_back()
return super().dispatch(request, *kwargs, **kwargs)
def _redirect_here(self):
return redirect('control:event.order.extend',
event=self.request.event.slug,
organizer=self.request.event.organizer.slug,
code=self.order.code)
def get(self, *args, **kwargs):
return render(self.request, 'pretixcontrol/order/extend.html', {
'order': self.order,
'form': self.form,
})
@cached_property
def form(self):
return ExtendForm(instance=self.order,
data=self.request.POST if self.request.method == "POST" else None)
class OrderChange(OrderView):
permission = 'can_change_orders'
template_name = 'pretixcontrol/order/change.html'
def dispatch(self, request, *args, **kwargs):
if self.order.status not in (Order.STATUS_PENDING, Order.STATUS_PAID):
messages.error(self.request, _('This action is only allowed for pending or paid orders.'))
return self._redirect_back()
return super().dispatch(request, *args, **kwargs)
@cached_property
def other_form(self):
return OtherOperationsForm(prefix='other', order=self.order,
data=self.request.POST if self.request.method == "POST" else None)
@cached_property
def add_formset(self):
ff = formset_factory(
OrderPositionAddForm, formset=OrderPositionAddFormset,
can_order=False, can_delete=True, extra=0
)
return ff(
prefix='add',
order=self.order,
data=self.request.POST if self.request.method == "POST" else None
)
@cached_property
def fees(self):
fees = list(self.order.fees.all())
for f in fees:
f.form = OrderFeeChangeForm(prefix='of-{}'.format(f.pk), instance=f,
data=self.request.POST if self.request.method == "POST" else None)
try:
ia = self.order.invoice_address
except InvoiceAddress.DoesNotExist:
ia = None
f.apply_tax = self.request.event.settings.tax_rate_default and self.request.event.settings.tax_rate_default.tax_applicable(invoice_address=ia)
return fees
@cached_property
def positions(self):
positions = list(self.order.positions.all())
for p in positions:
p.form = OrderPositionChangeForm(prefix='op-{}'.format(p.pk), instance=p,
initial={'seat': p.seat.seat_guid if p.seat else None},
data=self.request.POST if self.request.method == "POST" else None)
try:
ia = self.order.invoice_address
except InvoiceAddress.DoesNotExist:
ia = None
p.apply_tax = p.item.tax_rule and p.item.tax_rule.tax_applicable(invoice_address=ia)
return positions
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['positions'] = self.positions
ctx['fees'] = self.fees
ctx['add_formset'] = self.add_formset
ctx['other_form'] = self.other_form
return ctx
def _process_other(self, ocm):
if not self.other_form.is_valid():
return False
else:
if self.other_form.cleaned_data['recalculate_taxes']:
ocm.recalculate_taxes()
return True
def _process_add(self, ocm):
if not self.add_formset.is_valid():
return False
else:
for f in self.add_formset.forms:
if f in self.add_formset.deleted_forms or not f.has_changed():
continue
if '-' in f.cleaned_data['itemvar']:
itemid, varid = f.cleaned_data['itemvar'].split('-')
else:
itemid, varid = f.cleaned_data['itemvar'], None
item = Item.objects.get(pk=itemid, event=self.request.event)
if varid:
variation = ItemVariation.objects.get(pk=varid, item=item)
else:
variation = None
try:
ocm.add_position(item, variation,
f.cleaned_data['price'],
f.cleaned_data.get('addon_to'),
f.cleaned_data.get('subevent'),
f.cleaned_data.get('seat'))
except OrderError as e:
f.custom_error = str(e)
return False
return True
def _process_fees(self, ocm):
for f in self.fees:
if not f.form.is_valid():
return False
try:
if f.form.cleaned_data['operation_cancel']:
ocm.cancel_fee(f)
continue
if f.form.cleaned_data['value'] != f.value:
ocm.change_fee(f, f.form.cleaned_data['value'])
except OrderError as e:
f.custom_error = str(e)
return False
return True
def _process_change(self, ocm):
for p in self.positions:
if not p.form.is_valid():
return False
try:
if p.form.cleaned_data['operation_cancel']:
ocm.cancel(p)
continue
if p.form.cleaned_data['itemvar']:
if '-' in p.form.cleaned_data['itemvar']:
itemid, varid = p.form.cleaned_data['itemvar'].split('-')
else:
itemid, varid = p.form.cleaned_data['itemvar'], None
item = Item.objects.get(pk=itemid, event=self.request.event)
if varid:
variation = ItemVariation.objects.get(pk=varid, item=item)
else:
variation = None
if item != p.item or variation != p.variation:
ocm.change_item(p, item, variation)
if self.request.event.has_subevents and p.form.cleaned_data['subevent'] and p.form.cleaned_data['subevent'] != p.subevent:
ocm.change_subevent(p, p.form.cleaned_data['subevent'])
if p.seat and p.form.cleaned_data['seat'] and p.form.cleaned_data['seat'] != p.seat.seat_guid:
ocm.change_seat(p, p.form.cleaned_data['seat'])
if p.form.cleaned_data['price'] != p.price:
ocm.change_price(p, p.form.cleaned_data['price'])
if p.form.cleaned_data['operation_split']:
ocm.split(p)
if p.form.cleaned_data['operation_secret']:
ocm.regenerate_secret(p)
except OrderError as e:
p.custom_error = str(e)
return False
return True
def post(self, *args, **kwargs):
notify = self.other_form.cleaned_data['notify'] if self.other_form.is_valid() else True
ocm = OrderChangeManager(
self.order,
user=self.request.user,
notify=notify,
reissue_invoice=self.other_form.cleaned_data['reissue_invoice'] if self.other_form.is_valid() else True
)
form_valid = self._process_add(ocm) and self._process_fees(ocm) and self._process_change(ocm) and self._process_other(ocm)
if not form_valid:
messages.error(self.request, _('An error occurred. Please see the details below.'))
else:
try:
ocm.commit(check_quotas=not self.other_form.cleaned_data['ignore_quotas'])
except OrderError as e:
messages.error(self.request, str(e))
else:
if notify:
messages.success(self.request, _('The order has been changed and the user has been notified.'))
else:
messages.success(self.request, _('The order has been changed.'))
return self._redirect_back()
return self.get(*args, **kwargs)
class OrderModifyInformation(OrderQuestionsViewMixin, OrderView):
permission = 'can_change_orders'
template_name = 'pretixcontrol/order/change_questions.html'
only_user_visible = False
all_optional = True
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['other_form'] = self.other_form
return ctx
@cached_property
def other_form(self):
return OtherOperationsForm(prefix='other', order=self.order, initial={'notify': False},
data=self.request.POST if self.request.method == "POST" else None)
def post(self, request, *args, **kwargs):
failed = not self.save() or not self.invoice_form.is_valid() or not self.other_form.is_valid()
notify = self.other_form.cleaned_data['notify'] if self.other_form.is_valid() else True
if failed:
messages.error(self.request,
_("We had difficulties processing your input. Please review the errors below."))
return self.get(request, *args, **kwargs)
if notify:
notify_user_changed_order(self.order)
if hasattr(self.invoice_form, 'save'):
self.invoice_form.save()
self.order.log_action('pretix.event.order.modified', {
'invoice_data': self.invoice_form.cleaned_data,
'data': [{
k: (f.cleaned_data.get(k).name if isinstance(f.cleaned_data.get(k), File) else f.cleaned_data.get(k))
for k in f.changed_data
} for f in self.forms]
}, user=request.user)
if self.invoice_form.has_changed():
success_message = ('The invoice address has been updated. If you want to generate a new invoice, '
'you need to do this manually.')
messages.success(self.request, _(success_message))
tickets.invalidate_cache.apply_async(kwargs={'event': self.request.event.pk, 'order': self.order.pk})
order_modified.send(sender=self.request.event, order=self.order)
return redirect(self.get_order_url())
class OrderContactChange(OrderView):
permission = 'can_change_orders'
template_name = 'pretixcontrol/order/change_contact.html'
def get_context_data(self, **kwargs):
ctx = super().get_context_data()
ctx['form'] = self.form
return ctx
@cached_property
def form(self):
return OrderContactForm(
instance=self.order,
data=self.request.POST if self.request.method == "POST" else None
)
def post(self, *args, **kwargs):
old_email = self.order.email
changed = False
if self.form.is_valid():
new_email = self.form.cleaned_data['email']
if new_email != old_email:
changed = True
self.order.log_action(
'pretix.event.order.contact.changed',
data={
'old_email': old_email,
'new_email': self.form.cleaned_data['email'],
},
user=self.request.user,
)
if self.form.cleaned_data['regenerate_secrets']:
changed = True
self.order.secret = generate_secret()
for op in self.order.all_positions.all():
op.secret = generate_position_secret()
op.save()
tickets.invalidate_cache.apply_async(kwargs={'event': self.request.event.pk, 'order': self.order.pk})
self.order.log_action('pretix.event.order.secret.changed', user=self.request.user)
self.form.save()
if changed:
messages.success(self.request, _('The order has been changed.'))
else:
messages.success(self.request, _('Nothing about the order had to be changed.'))
return redirect(self.get_order_url())
return self.get(*args, **kwargs)
class OrderLocaleChange(OrderView):
permission = 'can_change_orders'
template_name = 'pretixcontrol/order/change_locale.html'
def get_context_data(self, **kwargs):
ctx = super().get_context_data()
ctx['form'] = self.form
return ctx
@cached_property
def form(self):
return OrderLocaleForm(
instance=self.order,
data=self.request.POST if self.request.method == "POST" else None
)
def post(self, *args, **kwargs):
old_locale = self.order.locale
if self.form.is_valid():
self.order.log_action(
'pretix.event.order.locale.changed',
data={
'old_locale': old_locale,
'new_locale': self.form.cleaned_data['locale'],
},
user=self.request.user,
)
self.form.save()
tickets.invalidate_cache.apply_async(kwargs={'event': self.request.event.pk, 'order': self.order.pk})
messages.success(self.request, _('The order has been changed.'))
return redirect(self.get_order_url())
return self.get(*args, **kwargs)
class OrderViewMixin:
def get_object(self, queryset=None):
try:
return Order.objects.get(
event=self.request.event,
code=self.kwargs['code'].upper()
)
except Order.DoesNotExist:
raise Http404()
@cached_property
def order(self):
return self.get_object()
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['order'] = self.order
return ctx
class OrderSendMail(EventPermissionRequiredMixin, OrderViewMixin, FormView):
template_name = 'pretixcontrol/order/sendmail.html'
permission = 'can_change_orders'
form_class = OrderMailForm
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['order'] = Order.objects.get(
event=self.request.event,
code=self.kwargs['code'].upper()
)
kwargs['initial'] = {}
if self.request.GET.get('subject'):
kwargs['initial']['subject'] = self.request.GET.get('subject')
if self.request.GET.get('message'):
kwargs['initial']['message'] = self.request.GET.get('message')
return kwargs
def form_invalid(self, form):
messages.error(self.request, _('We could not send the email. See below for details.'))
return super().form_invalid(form)
def form_valid(self, form):
order = Order.objects.get(
event=self.request.event,
code=self.kwargs['code'].upper()
)
self.preview_output = {}
with language(order.locale):
email_context = get_email_context(event=order.event, order=order)
email_template = LazyI18nString(form.cleaned_data['message'])
email_content = render_mail(email_template, email_context)
if self.request.POST.get('action') == 'preview':
self.preview_output = {
'subject': _('Subject: {subject}').format(subject=form.cleaned_data['subject']),
'html': markdown_compile_email(email_content)
}
return self.get(self.request, *self.args, **self.kwargs)
else:
try:
order.send_mail(
form.cleaned_data['subject'], email_template,
email_context, 'pretix.event.order.email.custom_sent',
self.request.user, auto_email=False
)
messages.success(self.request,
_('Your message has been queued and will be sent to {}.'.format(order.email)))
except SendMailException:
messages.error(
self.request,
_('Failed to send mail to the following user: {}'.format(order.email))
)
return super(OrderSendMail, self).form_valid(form)
def get_success_url(self):
return reverse('control:event.order', kwargs={
'event': self.request.event.slug,
'organizer': self.request.event.organizer.slug,
'code': self.kwargs['code']
})
def get_context_data(self, *args, **kwargs):
ctx = super().get_context_data(*args, **kwargs)
ctx['preview_output'] = getattr(self, 'preview_output', None)
return ctx
class OrderEmailHistory(EventPermissionRequiredMixin, OrderViewMixin, ListView):
template_name = 'pretixcontrol/order/mail_history.html'
permission = 'can_view_orders'
model = LogEntry
context_object_name = 'logs'
paginate_by = 10
def get_queryset(self):
order = get_object_or_404(
Order,
event=self.request.event,
code=self.kwargs['code'].upper()
)
qs = order.all_logentries()
qs = qs.filter(
action_type__contains="order.email"
)
return qs
class AnswerDownload(EventPermissionRequiredMixin, OrderViewMixin, ListView):
permission = 'can_view_orders'
def get(self, request, *args, **kwargs):
answid = kwargs.get('answer')
token = request.GET.get('token', '')
answer = get_object_or_404(QuestionAnswer, orderposition__order=self.order, id=answid)
if not answer.file:
raise Http404()
if not check_token(request, answer, token):
raise Http404(_("This link is no longer valid. Please go back, refresh the page, and try again."))
ftype, ignored = mimetypes.guess_type(answer.file.name)
resp = FileResponse(answer.file, content_type=ftype or 'application/binary')
resp['Content-Disposition'] = 'attachment; filename="{}-{}-{}-{}"'.format(
self.request.event.slug.upper(), self.order.code,
answer.orderposition.positionid,
os.path.basename(answer.file.name).split('.', 1)[1]
)
return resp
class OverView(EventPermissionRequiredMixin, TemplateView):
template_name = 'pretixcontrol/orders/overview.html'
permission = 'can_view_orders'
@cached_property
def filter_form(self):
return OverviewFilterForm(data=self.request.GET, event=self.request.event)
def get_context_data(self, **kwargs):
ctx = super().get_context_data()
if self.filter_form.is_valid():
ctx['items_by_category'], ctx['total'] = order_overview(
self.request.event,
subevent=self.filter_form.cleaned_data.get('subevent'),
date_filter=self.filter_form.cleaned_data['date_axis'],
date_from=self.filter_form.cleaned_data['date_from'],
date_until=self.filter_form.cleaned_data['date_until'],
fees=True
)
else:
ctx['items_by_category'], ctx['total'] = order_overview(
self.request.event,
fees=True
)
ctx['subevent_warning'] = (
self.request.event.has_subevents and
self.filter_form.is_valid() and
self.filter_form.cleaned_data.get('subevent') and
OrderFee.objects.filter(order__event=self.request.event).exclude(value=0).exists()
)
ctx['filter_form'] = self.filter_form
return ctx
class OrderGo(EventPermissionRequiredMixin, View):
permission = 'can_view_orders'
def get_order(self, code):
try:
return Order.objects.get(code=code, event=self.request.event)
except Order.DoesNotExist:
return Order.objects.get(code=Order.normalize_code(code), event=self.request.event)
def get(self, request, *args, **kwargs):
code = request.GET.get("code", "").upper().strip()
if '://' in code:
m = re.match('.*/ORDER/([A-Z0-9]{' + str(settings.ENTROPY['order_code']) + '})/.*', code)
if m:
code = m.group(1)
try:
if code.startswith(request.event.slug.upper()):
code = code[len(request.event.slug):]
if code.startswith('-'):
code = code[1:]
order = self.get_order(code)
return redirect('control:event.order', event=request.event.slug, organizer=request.event.organizer.slug,
code=order.code)
except Order.DoesNotExist:
try:
i = self.request.event.invoices.get(Q(invoice_no=code) | Q(full_invoice_no=code))
return redirect('control:event.order', event=request.event.slug, organizer=request.event.organizer.slug,
code=i.order.code)
except Invoice.DoesNotExist:
pass
messages.error(request, _('There is no order with the given order code.'))
return redirect('control:event.orders', event=request.event.slug, organizer=request.event.organizer.slug)
class ExportMixin:
@cached_property
def exporters(self):
exporters = []
responses = register_data_exporters.send(self.request.event)
for ex in sorted([response(self.request.event) for r, response in responses], key=lambda ex: str(ex.verbose_name)):
if self.request.GET.get("identifier") and ex.identifier != self.request.GET.get("identifier"):
continue
# Use form parse cycle to generate useful defaults
test_form = ExporterForm(data=self.request.GET, prefix=ex.identifier)
test_form.fields = ex.export_form_fields
test_form.is_valid()
initial = {
k: v for k, v in test_form.cleaned_data.items() if ex.identifier + "-" + k in self.request.GET
}
ex.form = ExporterForm(
data=(self.request.POST if self.request.method == 'POST' else None),
prefix=ex.identifier,
initial=initial
)
ex.form.fields = ex.export_form_fields
exporters.append(ex)
return exporters
class ExportDoView(EventPermissionRequiredMixin, ExportMixin, AsyncAction, View):
permission = 'can_view_orders'
known_errortypes = ['ExportError']
task = export
def get_success_message(self, value):
return None
def get_success_url(self, value):
return reverse('cachedfile.download', kwargs={'id': str(value)})
def get_error_url(self):
return reverse('control:event.orders.export', kwargs={
'event': self.request.event.slug,
'organizer': self.request.event.organizer.slug
})
@cached_property
def exporter(self):
for ex in self.exporters:
if ex.identifier == self.request.POST.get("exporter"):
return ex
def post(self, request, *args, **kwargs):
if not self.exporter:
messages.error(self.request, _('The selected exporter was not found.'))
return redirect('control:event.orders.export', kwargs={
'event': self.request.event.slug,
'organizer': self.request.event.organizer.slug
})
if not self.exporter.form.is_valid():
messages.error(self.request, _('There was a problem processing your input. See below for error details.'))
return self.get(request, *args, **kwargs)
cf = CachedFile()
cf.date = now()
cf.expires = now() + timedelta(days=3)
cf.save()
return self.do(self.request.event.id, str(cf.id), self.exporter.identifier, self.exporter.form.cleaned_data)
class ExportView(EventPermissionRequiredMixin, ExportMixin, TemplateView):
permission = 'can_view_orders'
template_name = 'pretixcontrol/orders/export.html'
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['exporters'] = self.exporters
return ctx
class RefundList(EventPermissionRequiredMixin, PaginationMixin, ListView):
model = OrderRefund
context_object_name = 'refunds'
template_name = 'pretixcontrol/orders/refunds.html'
permission = 'can_view_orders'
def get_queryset(self):
qs = OrderRefund.objects.filter(
order__event=self.request.event
).select_related('order')
if self.filter_form.is_valid():
qs = self.filter_form.filter_qs(qs)
return qs.distinct()
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['filter_form'] = self.filter_form
return ctx
@cached_property
def filter_form(self):
return RefundFilterForm(data=self.request.GET, event=self.request.event,
initial={'status': 'open'})
| true | true |
f7201a7642abbd76d0bee748789b15d308c71b10 | 9,682 | py | Python | vspk/cli/cli.py | mohaimenhasan/vspk-python | 4c7b297427048340b250cc3c74d9214dc0d4bde1 | [
"BSD-3-Clause"
] | 19 | 2016-03-07T12:34:22.000Z | 2020-06-11T11:09:02.000Z | vspk/cli/cli.py | mohaimenhasan/vspk-python | 4c7b297427048340b250cc3c74d9214dc0d4bde1 | [
"BSD-3-Clause"
] | 40 | 2016-06-13T15:36:54.000Z | 2020-11-10T18:14:43.000Z | vspk/cli/cli.py | mohaimenhasan/vspk-python | 4c7b297427048340b250cc3c74d9214dc0d4bde1 | [
"BSD-3-Clause"
] | 15 | 2016-06-10T22:06:01.000Z | 2020-12-15T18:37:42.000Z | #!/usr/bin/env python
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import sys
sys.path.append("../")
class _HelpAction(argparse._HelpAction):
def __call__(self, parser, namespace, values, option_string=None):
parser.print_help()
subparsers_actions = [
action for action in parser._actions
if isinstance(action, argparse._SubParsersAction)]
for subparsers_action in subparsers_actions:
for choice, subparser in subparsers_action.choices.items():
print("\n{}:\n{}".format(choice.upper(), "-" * (len(choice) + 1)))
print(subparser.format_help())
parser.exit()
def main(argv=sys.argv):
default_parser = argparse.ArgumentParser(description="CLI for VSD", add_help=False)
default_parser.add_argument("-v", "--verbose", help="Activate verbose mode", action="store_true")
default_parser.add_argument("--username", help="Username to get an api key or set 'VSD_USERNAME' in your variable environment")
default_parser.add_argument("--password", help="Password to get an api key or set 'VSD_PASSWORD' in your variable environment")
default_parser.add_argument("--api", help="URL of the API endpoint or set 'VSD_API_URL' in your variable environment")
default_parser.add_argument("--version", help="Version of the API or set 'VSD_API_VERSION' in your variable environment")
default_parser.add_argument("--enterprise", help="Name of the enterprise to connect or set 'VSD_ENTERPRISE' in your variable environment")
default_parser.add_argument("--json", help="Add this option get a JSON output or set VSD_JSON_OUTPUT=True", action="store_true")
parser = argparse.ArgumentParser(description="CLI for VSD Software Development Kit", add_help=False)
parser.add_argument("-h", "--help", action=_HelpAction, help="help for help if you need some help")
subparsers = parser.add_subparsers(dest="command",
title="All available commands")
# List Command
list_parser = subparsers.add_parser("list", description="List all objects", parents=[default_parser])
list_parser.add_argument("list", help="Name of the object (See command 'objects' to list all objects name)")
list_parser.add_argument("--in", dest="parent_infos", nargs=2, help="Specify the PARENT_NAME and PARENT_UUID")
list_parser.add_argument("-f", "--filter", dest="filter", help="Specify a filter predicate")
list_parser.add_argument("-x", "--fields", dest="fields", help="Specify output fields", nargs="+", type=str)
list_parser.add_argument("-q", "--query", dest="query_parameters", nargs="*", help="List of Key=Value that will be sent as query parameters", required=False)
list_parser.add_argument("-p", "--page", dest="page", help="The page number that needs to be retreived. This value is ignored unless you also configure the page size parameter. Default value is 0", type=int, default=0)
list_parser.add_argument("-s", "--page-size", dest="page_size", help="The size of a single page that needs to be retreived. If this is configured, the list command will only return a maximum of this amount of results", type=int)
# Count Command
count_parser = subparsers.add_parser("count", description="Count all objects", parents=[default_parser])
count_parser.add_argument("count", help="Name of the object (See command 'objects' to list all objects name)")
count_parser.add_argument("--in", dest="parent_infos", nargs=2, help="Specify the parent name and its uuid")
count_parser.add_argument("-f", "--filter", dest="filter", help="Specify a filter predicate")
count_parser.add_argument("-x", "--fields", dest="fields", help="Specify output fields", nargs="+", type=str)
count_parser.add_argument("-q", "--query", dest="query_parameters", nargs="*", help="List of Key=Value that will be sent as query parameters", required=False)
# Show Command
show_parser = subparsers.add_parser("show", description="Show a specific object", parents=[default_parser])
show_parser.add_argument("show", help="Name of the object to show (See command 'objects' to list all objects name)")
show_parser.add_argument("-i", "--id", dest="id", help="Identifier of the object to show", required=True)
show_parser.add_argument("-x", "--fields", dest="fields", help="Specify output fields", nargs="+", type=str)
# Create Command
create_parser = subparsers.add_parser("create", description="Create a new object", parents=[default_parser])
create_parser.add_argument("create", help="Name of the object to create (See command 'objects' to list all objects name)")
create_parser.add_argument("--in", dest="parent_infos", nargs=2, help="Specify the parent name and its uuid")
create_parser.add_argument("-p", "--params", dest="params", nargs="*", help="List of Key=Value parameters", required=True)
# Update Command
update_parser = subparsers.add_parser("update", description="Update an existing object", parents=[default_parser])
update_parser.add_argument("update", help="Name of the object to update (See command 'objects' to list all objects name)")
update_parser.add_argument("-i", "--id", dest="id", help="Identifier of the object to show", required=True)
update_parser.add_argument("-p", "--params", dest="params", nargs="*", help="List of Key=Value parameters", required=True)
# Delete Command
delete_parser = subparsers.add_parser("delete", description="Delete an existing object", parents=[default_parser])
delete_parser.add_argument("delete", help="Name of the object to update (See command 'objects' to list all objects name)")
delete_parser.add_argument("-i", "--id", dest="id", help="Identifier of the object to show", required=True)
# Assign Command
assign_parser = subparsers.add_parser('assign', description="Assign a set of new objects according to their identifier", parents=[default_parser])
assign_parser.add_argument('assign', help='Name of the object to assign (See command `objects` to list all objects name)')
assign_parser.add_argument('--ids', dest='ids', nargs='*', help='Identifier of the object to assign', required=True)
assign_parser.add_argument('--to', dest='parent_infos', nargs=2, help="Specify the resource name and its uuid", required=True)
# Unassign Command
unassign_parser = subparsers.add_parser('unassign', description="Unassign a set of new objects according to their identifier", parents=[default_parser])
unassign_parser.add_argument('unassign', help='Name of the object to unassign (See command `objects` to list all objects name)')
unassign_parser.add_argument('--ids', dest='ids', nargs='*', help='Identifier of the object to unassign', required=True)
unassign_parser.add_argument('--from', dest='parent_infos', nargs=2, help="Specify the resource name and its uuid", required=True)
# Reassign Command
reassign_parser = subparsers.add_parser('reassign', description="Reassign all objects according to their identifier", parents=[default_parser])
reassign_parser.add_argument('reassign', help='Name of the object to reassign (See command `objects` to list all objects name)')
reassign_parser.add_argument('--ids', dest='ids', nargs='*', help='Identifier of the object to reassign. If --ids is not specified, it will remove all assigned objects')
reassign_parser.add_argument('--to', dest='parent_infos', nargs=2, help="Specify the resource name and its uuid", required=True)
# Resources Command
objects_parser = subparsers.add_parser("objects", description="Explore all objects", parents=[default_parser])
objects_parser.add_argument("-f", "--filter", dest="filter", help="Filter by name (ex: -f nsg)")
objects_parser.add_argument("-p", "--parent", dest="parent", help="Filter by parent (ex -p enterprise)")
objects_parser.add_argument("-c", "--child", dest="child", help="Filter by children (ex: -c domain)")
args = parser.parse_args()
from commands import CLICommand
CLICommand.execute(args)
if __name__ == "__main__":
main() | 68.666667 | 232 | 0.727639 |
import argparse
import sys
sys.path.append("../")
class _HelpAction(argparse._HelpAction):
def __call__(self, parser, namespace, values, option_string=None):
parser.print_help()
subparsers_actions = [
action for action in parser._actions
if isinstance(action, argparse._SubParsersAction)]
for subparsers_action in subparsers_actions:
for choice, subparser in subparsers_action.choices.items():
print("\n{}:\n{}".format(choice.upper(), "-" * (len(choice) + 1)))
print(subparser.format_help())
parser.exit()
def main(argv=sys.argv):
default_parser = argparse.ArgumentParser(description="CLI for VSD", add_help=False)
default_parser.add_argument("-v", "--verbose", help="Activate verbose mode", action="store_true")
default_parser.add_argument("--username", help="Username to get an api key or set 'VSD_USERNAME' in your variable environment")
default_parser.add_argument("--password", help="Password to get an api key or set 'VSD_PASSWORD' in your variable environment")
default_parser.add_argument("--api", help="URL of the API endpoint or set 'VSD_API_URL' in your variable environment")
default_parser.add_argument("--version", help="Version of the API or set 'VSD_API_VERSION' in your variable environment")
default_parser.add_argument("--enterprise", help="Name of the enterprise to connect or set 'VSD_ENTERPRISE' in your variable environment")
default_parser.add_argument("--json", help="Add this option get a JSON output or set VSD_JSON_OUTPUT=True", action="store_true")
parser = argparse.ArgumentParser(description="CLI for VSD Software Development Kit", add_help=False)
parser.add_argument("-h", "--help", action=_HelpAction, help="help for help if you need some help")
subparsers = parser.add_subparsers(dest="command",
title="All available commands")
list_parser = subparsers.add_parser("list", description="List all objects", parents=[default_parser])
list_parser.add_argument("list", help="Name of the object (See command 'objects' to list all objects name)")
list_parser.add_argument("--in", dest="parent_infos", nargs=2, help="Specify the PARENT_NAME and PARENT_UUID")
list_parser.add_argument("-f", "--filter", dest="filter", help="Specify a filter predicate")
list_parser.add_argument("-x", "--fields", dest="fields", help="Specify output fields", nargs="+", type=str)
list_parser.add_argument("-q", "--query", dest="query_parameters", nargs="*", help="List of Key=Value that will be sent as query parameters", required=False)
list_parser.add_argument("-p", "--page", dest="page", help="The page number that needs to be retreived. This value is ignored unless you also configure the page size parameter. Default value is 0", type=int, default=0)
list_parser.add_argument("-s", "--page-size", dest="page_size", help="The size of a single page that needs to be retreived. If this is configured, the list command will only return a maximum of this amount of results", type=int)
count_parser = subparsers.add_parser("count", description="Count all objects", parents=[default_parser])
count_parser.add_argument("count", help="Name of the object (See command 'objects' to list all objects name)")
count_parser.add_argument("--in", dest="parent_infos", nargs=2, help="Specify the parent name and its uuid")
count_parser.add_argument("-f", "--filter", dest="filter", help="Specify a filter predicate")
count_parser.add_argument("-x", "--fields", dest="fields", help="Specify output fields", nargs="+", type=str)
count_parser.add_argument("-q", "--query", dest="query_parameters", nargs="*", help="List of Key=Value that will be sent as query parameters", required=False)
show_parser = subparsers.add_parser("show", description="Show a specific object", parents=[default_parser])
show_parser.add_argument("show", help="Name of the object to show (See command 'objects' to list all objects name)")
show_parser.add_argument("-i", "--id", dest="id", help="Identifier of the object to show", required=True)
show_parser.add_argument("-x", "--fields", dest="fields", help="Specify output fields", nargs="+", type=str)
create_parser = subparsers.add_parser("create", description="Create a new object", parents=[default_parser])
create_parser.add_argument("create", help="Name of the object to create (See command 'objects' to list all objects name)")
create_parser.add_argument("--in", dest="parent_infos", nargs=2, help="Specify the parent name and its uuid")
create_parser.add_argument("-p", "--params", dest="params", nargs="*", help="List of Key=Value parameters", required=True)
update_parser = subparsers.add_parser("update", description="Update an existing object", parents=[default_parser])
update_parser.add_argument("update", help="Name of the object to update (See command 'objects' to list all objects name)")
update_parser.add_argument("-i", "--id", dest="id", help="Identifier of the object to show", required=True)
update_parser.add_argument("-p", "--params", dest="params", nargs="*", help="List of Key=Value parameters", required=True)
delete_parser = subparsers.add_parser("delete", description="Delete an existing object", parents=[default_parser])
delete_parser.add_argument("delete", help="Name of the object to update (See command 'objects' to list all objects name)")
delete_parser.add_argument("-i", "--id", dest="id", help="Identifier of the object to show", required=True)
assign_parser = subparsers.add_parser('assign', description="Assign a set of new objects according to their identifier", parents=[default_parser])
assign_parser.add_argument('assign', help='Name of the object to assign (See command `objects` to list all objects name)')
assign_parser.add_argument('--ids', dest='ids', nargs='*', help='Identifier of the object to assign', required=True)
assign_parser.add_argument('--to', dest='parent_infos', nargs=2, help="Specify the resource name and its uuid", required=True)
unassign_parser = subparsers.add_parser('unassign', description="Unassign a set of new objects according to their identifier", parents=[default_parser])
unassign_parser.add_argument('unassign', help='Name of the object to unassign (See command `objects` to list all objects name)')
unassign_parser.add_argument('--ids', dest='ids', nargs='*', help='Identifier of the object to unassign', required=True)
unassign_parser.add_argument('--from', dest='parent_infos', nargs=2, help="Specify the resource name and its uuid", required=True)
reassign_parser = subparsers.add_parser('reassign', description="Reassign all objects according to their identifier", parents=[default_parser])
reassign_parser.add_argument('reassign', help='Name of the object to reassign (See command `objects` to list all objects name)')
reassign_parser.add_argument('--ids', dest='ids', nargs='*', help='Identifier of the object to reassign. If --ids is not specified, it will remove all assigned objects')
reassign_parser.add_argument('--to', dest='parent_infos', nargs=2, help="Specify the resource name and its uuid", required=True)
objects_parser = subparsers.add_parser("objects", description="Explore all objects", parents=[default_parser])
objects_parser.add_argument("-f", "--filter", dest="filter", help="Filter by name (ex: -f nsg)")
objects_parser.add_argument("-p", "--parent", dest="parent", help="Filter by parent (ex -p enterprise)")
objects_parser.add_argument("-c", "--child", dest="child", help="Filter by children (ex: -c domain)")
args = parser.parse_args()
from commands import CLICommand
CLICommand.execute(args)
if __name__ == "__main__":
main() | true | true |
f7201ae4d61deb09277603a9c4d12dfb5b5dd40b | 1,565 | py | Python | deepmath/deephol/public/proof_assistant.py | LaudateCorpus1/deepmath | b5b721f54de1d5d6a02d78f5da5995237f9995f9 | [
"Apache-2.0"
] | 830 | 2016-11-07T21:46:27.000Z | 2022-03-23T08:01:03.000Z | deepmath/deephol/public/proof_assistant.py | LaudateCorpus1/deepmath | b5b721f54de1d5d6a02d78f5da5995237f9995f9 | [
"Apache-2.0"
] | 26 | 2016-11-07T22:06:31.000Z | 2022-02-16T00:18:29.000Z | deepmath/deephol/public/proof_assistant.py | LaudateCorpus1/deepmath | b5b721f54de1d5d6a02d78f5da5995237f9995f9 | [
"Apache-2.0"
] | 168 | 2016-11-07T21:48:55.000Z | 2022-03-19T02:47:14.000Z | """A python client interface for ProofAssistantService."""
from __future__ import absolute_import
from __future__ import division
# Import Type Annotations
from __future__ import print_function
import grpc
import tensorflow as tf
from deepmath.proof_assistant import proof_assistant_pb2
from deepmath.proof_assistant import proof_assistant_pb2_grpc
tf.flags.DEFINE_string(
'proof_assistant_server_address', 'localhost:2000',
'address (including port) of the proof assistant server')
FLAGS = tf.flags.FLAGS
GIGABYTE = 1024 * 1024 * 1024
GRPC_MAX_MESSAGE_LENGTH = GIGABYTE
class ProofAssistant(object):
"""Class for intefacing a proof assistant."""
def __init__(self):
self.channel = grpc.insecure_channel(
FLAGS.proof_assistant_server_address,
options=[('grpc.max_send_message_length', GRPC_MAX_MESSAGE_LENGTH),
('grpc.max_receive_message_length', GRPC_MAX_MESSAGE_LENGTH)])
self.stub = proof_assistant_pb2_grpc.ProofAssistantServiceStub(self.channel)
def ApplyTactic(self, request: proof_assistant_pb2.ApplyTacticRequest
) -> proof_assistant_pb2.ApplyTacticResponse:
return self.stub.ApplyTactic(request)
def VerifyProof(self, request: proof_assistant_pb2.VerifyProofRequest
) -> proof_assistant_pb2.VerifyProofResponse:
return self.stub.VerifyProof(request)
def RegisterTheorem(self, request: proof_assistant_pb2.RegisterTheoremRequest
) -> proof_assistant_pb2.RegisterTheoremResponse:
return self.stub.RegisterTheorem(request)
| 36.395349 | 80 | 0.771885 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import grpc
import tensorflow as tf
from deepmath.proof_assistant import proof_assistant_pb2
from deepmath.proof_assistant import proof_assistant_pb2_grpc
tf.flags.DEFINE_string(
'proof_assistant_server_address', 'localhost:2000',
'address (including port) of the proof assistant server')
FLAGS = tf.flags.FLAGS
GIGABYTE = 1024 * 1024 * 1024
GRPC_MAX_MESSAGE_LENGTH = GIGABYTE
class ProofAssistant(object):
def __init__(self):
self.channel = grpc.insecure_channel(
FLAGS.proof_assistant_server_address,
options=[('grpc.max_send_message_length', GRPC_MAX_MESSAGE_LENGTH),
('grpc.max_receive_message_length', GRPC_MAX_MESSAGE_LENGTH)])
self.stub = proof_assistant_pb2_grpc.ProofAssistantServiceStub(self.channel)
def ApplyTactic(self, request: proof_assistant_pb2.ApplyTacticRequest
) -> proof_assistant_pb2.ApplyTacticResponse:
return self.stub.ApplyTactic(request)
def VerifyProof(self, request: proof_assistant_pb2.VerifyProofRequest
) -> proof_assistant_pb2.VerifyProofResponse:
return self.stub.VerifyProof(request)
def RegisterTheorem(self, request: proof_assistant_pb2.RegisterTheoremRequest
) -> proof_assistant_pb2.RegisterTheoremResponse:
return self.stub.RegisterTheorem(request)
| true | true |
f7201bd62f21c953c3e3878bc9213acf8336f68d | 1,120 | py | Python | src/data_utils/image_dataset.py | lindsey98/dml_cross_entropy | 4312cb295e972abda7b0e2bdadecf1965c5d7ed5 | [
"BSD-3-Clause"
] | null | null | null | src/data_utils/image_dataset.py | lindsey98/dml_cross_entropy | 4312cb295e972abda7b0e2bdadecf1965c5d7ed5 | [
"BSD-3-Clause"
] | null | null | null | src/data_utils/image_dataset.py | lindsey98/dml_cross_entropy | 4312cb295e972abda7b0e2bdadecf1965c5d7ed5 | [
"BSD-3-Clause"
] | null | null | null | from PIL import Image
from torch.utils.data import Dataset
from src.data_utils.utils import load_data
class ImageDataset(Dataset):
def __init__(self, samples: list, transform, preload: bool = False, num_workers=None):
self.transform = transform
self.samples = samples
self.targets = [label for _, label in self.samples]
self.preloaded = False
if preload:
image_paths = [image_path for image_path, _ in self.samples]
self.images = load_data(image_paths, num_workers=num_workers)
self.preloaded = True
print(self.__class__.__name__ + ' loaded with {} images'.format(len(self.images.keys())))
def __len__(self):
return len(self.samples)
def __getitem__(self, index):
image_path, label = self.samples[index]
if self.preloaded:
image = self.images[image_path].convert('RGB')
else:
image = Image.open(image_path).convert('RGB')
if self.transform is not None:
image = self.transform(image)
return image, label, index
| 31.111111 | 101 | 0.63125 | from PIL import Image
from torch.utils.data import Dataset
from src.data_utils.utils import load_data
class ImageDataset(Dataset):
def __init__(self, samples: list, transform, preload: bool = False, num_workers=None):
self.transform = transform
self.samples = samples
self.targets = [label for _, label in self.samples]
self.preloaded = False
if preload:
image_paths = [image_path for image_path, _ in self.samples]
self.images = load_data(image_paths, num_workers=num_workers)
self.preloaded = True
print(self.__class__.__name__ + ' loaded with {} images'.format(len(self.images.keys())))
def __len__(self):
return len(self.samples)
def __getitem__(self, index):
image_path, label = self.samples[index]
if self.preloaded:
image = self.images[image_path].convert('RGB')
else:
image = Image.open(image_path).convert('RGB')
if self.transform is not None:
image = self.transform(image)
return image, label, index
| true | true |
f7201d90ee64fded6d4df4a84c0dcb782c4684f0 | 20,761 | py | Python | Lib/site-packages/git/objects/commit.py | nemarugommula/ecommerce | 60185e79655fbaf0fcad9e877a886fe9eb3c4451 | [
"bzip2-1.0.6"
] | null | null | null | Lib/site-packages/git/objects/commit.py | nemarugommula/ecommerce | 60185e79655fbaf0fcad9e877a886fe9eb3c4451 | [
"bzip2-1.0.6"
] | 7 | 2020-02-12T03:06:52.000Z | 2021-06-10T19:33:14.000Z | Lib/site-packages/git/objects/commit.py | nemarugommula/ecommerce | 60185e79655fbaf0fcad9e877a886fe9eb3c4451 | [
"bzip2-1.0.6"
] | null | null | null | # commit.py
# Copyright (C) 2008, 2009 Michael Trier (mtrier@gmail.com) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
from gitdb import IStream
from git.util import (
hex_to_bin,
Actor,
Iterable,
Stats,
finalize_process
)
from git.diff import Diffable
from .tree import Tree
from . import base
from .util import (
Traversable,
Serializable,
parse_date,
altz_to_utctz_str,
parse_actor_and_date,
from_timestamp,
)
from git.compat import text_type
from time import (
time,
daylight,
altzone,
timezone,
localtime
)
import os
from io import BytesIO
import logging
log = logging.getLogger('git.objects.commit')
log.addHandler(logging.NullHandler())
__all__ = ('Commit', )
class Commit(base.Object, Iterable, Diffable, Traversable, Serializable):
"""Wraps a git Commit object.
This class will act lazily on some of its attributes and will query the
value on demand only if it involves calling the git binary."""
# ENVIRONMENT VARIABLES
# read when creating new commits
env_author_date = "GIT_AUTHOR_DATE"
env_committer_date = "GIT_COMMITTER_DATE"
# CONFIGURATION KEYS
conf_encoding = 'i18n.commitencoding'
# INVARIANTS
default_encoding = "UTF-8"
# object configuration
type = "commit"
__slots__ = ("tree",
"author", "authored_date", "author_tz_offset",
"committer", "committed_date", "committer_tz_offset",
"message", "parents", "encoding", "gpgsig")
_id_attribute_ = "hexsha"
def __init__(self, repo, binsha, tree=None, author=None, authored_date=None, author_tz_offset=None,
committer=None, committed_date=None, committer_tz_offset=None,
message=None, parents=None, encoding=None, gpgsig=None):
"""Instantiate a new Commit. All keyword arguments taking None as default will
be implicitly set on first query.
:param binsha: 20 byte sha1
:param parents: tuple( Commit, ... )
is a tuple of commit ids or actual Commits
:param tree: Tree
Tree object
:param author: Actor
is the author Actor object
:param authored_date: int_seconds_since_epoch
is the authored DateTime - use time.gmtime() to convert it into a
different format
:param author_tz_offset: int_seconds_west_of_utc
is the timezone that the authored_date is in
:param committer: Actor
is the committer string
:param committed_date: int_seconds_since_epoch
is the committed DateTime - use time.gmtime() to convert it into a
different format
:param committer_tz_offset: int_seconds_west_of_utc
is the timezone that the committed_date is in
:param message: string
is the commit message
:param encoding: string
encoding of the message, defaults to UTF-8
:param parents:
List or tuple of Commit objects which are our parent(s) in the commit
dependency graph
:return: git.Commit
:note:
Timezone information is in the same format and in the same sign
as what time.altzone returns. The sign is inverted compared to git's
UTC timezone."""
super(Commit, self).__init__(repo, binsha)
if tree is not None:
assert isinstance(tree, Tree), "Tree needs to be a Tree instance, was %s" % type(tree)
if tree is not None:
self.tree = tree
if author is not None:
self.author = author
if authored_date is not None:
self.authored_date = authored_date
if author_tz_offset is not None:
self.author_tz_offset = author_tz_offset
if committer is not None:
self.committer = committer
if committed_date is not None:
self.committed_date = committed_date
if committer_tz_offset is not None:
self.committer_tz_offset = committer_tz_offset
if message is not None:
self.message = message
if parents is not None:
self.parents = parents
if encoding is not None:
self.encoding = encoding
if gpgsig is not None:
self.gpgsig = gpgsig
@classmethod
def _get_intermediate_items(cls, commit):
return commit.parents
def _set_cache_(self, attr):
if attr in Commit.__slots__:
# read the data in a chunk, its faster - then provide a file wrapper
_binsha, _typename, self.size, stream = self.repo.odb.stream(self.binsha)
self._deserialize(BytesIO(stream.read()))
else:
super(Commit, self)._set_cache_(attr)
# END handle attrs
@property
def authored_datetime(self):
return from_timestamp(self.authored_date, self.author_tz_offset)
@property
def committed_datetime(self):
return from_timestamp(self.committed_date, self.committer_tz_offset)
@property
def summary(self):
""":return: First line of the commit message"""
return self.message.split('\n', 1)[0]
def count(self, paths='', **kwargs):
"""Count the number of commits reachable from this commit
:param paths:
is an optional path or a list of paths restricting the return value
to commits actually containing the paths
:param kwargs:
Additional options to be passed to git-rev-list. They must not alter
the output style of the command, or parsing will yield incorrect results
:return: int defining the number of reachable commits"""
# yes, it makes a difference whether empty paths are given or not in our case
# as the empty paths version will ignore merge commits for some reason.
if paths:
return len(self.repo.git.rev_list(self.hexsha, '--', paths, **kwargs).splitlines())
return len(self.repo.git.rev_list(self.hexsha, **kwargs).splitlines())
@property
def name_rev(self):
"""
:return:
String describing the commits hex sha based on the closest Reference.
Mostly useful for UI purposes"""
return self.repo.git.name_rev(self)
@classmethod
def iter_items(cls, repo, rev, paths='', **kwargs):
"""Find all commits matching the given criteria.
:param repo: is the Repo
:param rev: revision specifier, see git-rev-parse for viable options
:param paths:
is an optional path or list of paths, if set only Commits that include the path
or paths will be considered
:param kwargs:
optional keyword arguments to git rev-list where
``max_count`` is the maximum number of commits to fetch
``skip`` is the number of commits to skip
``since`` all commits since i.e. '1970-01-01'
:return: iterator yielding Commit items"""
if 'pretty' in kwargs:
raise ValueError("--pretty cannot be used as parsing expects single sha's only")
# END handle pretty
# use -- in any case, to prevent possibility of ambiguous arguments
# see https://github.com/gitpython-developers/GitPython/issues/264
args = ['--']
if paths:
args.extend((paths, ))
# END if paths
proc = repo.git.rev_list(rev, args, as_process=True, **kwargs)
return cls._iter_from_process_or_stream(repo, proc)
def iter_parents(self, paths='', **kwargs):
"""Iterate _all_ parents of this commit.
:param paths:
Optional path or list of paths limiting the Commits to those that
contain at least one of the paths
:param kwargs: All arguments allowed by git-rev-list
:return: Iterator yielding Commit objects which are parents of self """
# skip ourselves
skip = kwargs.get("skip", 1)
if skip == 0: # skip ourselves
skip = 1
kwargs['skip'] = skip
return self.iter_items(self.repo, self, paths, **kwargs)
@property
def stats(self):
"""Create a git stat from changes between this commit and its first parent
or from all changes done if this is the very first commit.
:return: git.Stats"""
if not self.parents:
text = self.repo.git.diff_tree(self.hexsha, '--', numstat=True, root=True)
text2 = ""
for line in text.splitlines()[1:]:
(insertions, deletions, filename) = line.split("\t")
text2 += "%s\t%s\t%s\n" % (insertions, deletions, filename)
text = text2
else:
text = self.repo.git.diff(self.parents[0].hexsha, self.hexsha, '--', numstat=True)
return Stats._list_from_string(self.repo, text)
@classmethod
def _iter_from_process_or_stream(cls, repo, proc_or_stream):
"""Parse out commit information into a list of Commit objects
We expect one-line per commit, and parse the actual commit information directly
from our lighting fast object database
:param proc: git-rev-list process instance - one sha per line
:return: iterator returning Commit objects"""
stream = proc_or_stream
if not hasattr(stream, 'readline'):
stream = proc_or_stream.stdout
readline = stream.readline
while True:
line = readline()
if not line:
break
hexsha = line.strip()
if len(hexsha) > 40:
# split additional information, as returned by bisect for instance
hexsha, _ = line.split(None, 1)
# END handle extra info
assert len(hexsha) == 40, "Invalid line: %s" % hexsha
yield Commit(repo, hex_to_bin(hexsha))
# END for each line in stream
# TODO: Review this - it seems process handling got a bit out of control
# due to many developers trying to fix the open file handles issue
if hasattr(proc_or_stream, 'wait'):
finalize_process(proc_or_stream)
@classmethod
def create_from_tree(cls, repo, tree, message, parent_commits=None, head=False, author=None, committer=None,
author_date=None, commit_date=None):
"""Commit the given tree, creating a commit object.
:param repo: Repo object the commit should be part of
:param tree: Tree object or hex or bin sha
the tree of the new commit
:param message: Commit message. It may be an empty string if no message is provided.
It will be converted to a string in any case.
:param parent_commits:
Optional Commit objects to use as parents for the new commit.
If empty list, the commit will have no parents at all and become
a root commit.
If None , the current head commit will be the parent of the
new commit object
:param head:
If True, the HEAD will be advanced to the new commit automatically.
Else the HEAD will remain pointing on the previous commit. This could
lead to undesired results when diffing files.
:param author: The name of the author, optional. If unset, the repository
configuration is used to obtain this value.
:param committer: The name of the committer, optional. If unset, the
repository configuration is used to obtain this value.
:param author_date: The timestamp for the author field
:param commit_date: The timestamp for the committer field
:return: Commit object representing the new commit
:note:
Additional information about the committer and Author are taken from the
environment or from the git configuration, see git-commit-tree for
more information"""
if parent_commits is None:
try:
parent_commits = [repo.head.commit]
except ValueError:
# empty repositories have no head commit
parent_commits = []
# END handle parent commits
else:
for p in parent_commits:
if not isinstance(p, cls):
raise ValueError("Parent commit '%r' must be of type %s" % (p, cls))
# end check parent commit types
# END if parent commits are unset
# retrieve all additional information, create a commit object, and
# serialize it
# Generally:
# * Environment variables override configuration values
# * Sensible defaults are set according to the git documentation
# COMMITER AND AUTHOR INFO
cr = repo.config_reader()
env = os.environ
committer = committer or Actor.committer(cr)
author = author or Actor.author(cr)
# PARSE THE DATES
unix_time = int(time())
is_dst = daylight and localtime().tm_isdst > 0
offset = altzone if is_dst else timezone
author_date_str = env.get(cls.env_author_date, '')
if author_date:
author_time, author_offset = parse_date(author_date)
elif author_date_str:
author_time, author_offset = parse_date(author_date_str)
else:
author_time, author_offset = unix_time, offset
# END set author time
committer_date_str = env.get(cls.env_committer_date, '')
if commit_date:
committer_time, committer_offset = parse_date(commit_date)
elif committer_date_str:
committer_time, committer_offset = parse_date(committer_date_str)
else:
committer_time, committer_offset = unix_time, offset
# END set committer time
# assume utf8 encoding
enc_section, enc_option = cls.conf_encoding.split('.')
conf_encoding = cr.get_value(enc_section, enc_option, cls.default_encoding)
# if the tree is no object, make sure we create one - otherwise
# the created commit object is invalid
if isinstance(tree, str):
tree = repo.tree(tree)
# END tree conversion
# CREATE NEW COMMIT
new_commit = cls(repo, cls.NULL_BIN_SHA, tree,
author, author_time, author_offset,
committer, committer_time, committer_offset,
message, parent_commits, conf_encoding)
stream = BytesIO()
new_commit._serialize(stream)
streamlen = stream.tell()
stream.seek(0)
istream = repo.odb.store(IStream(cls.type, streamlen, stream))
new_commit.binsha = istream.binsha
if head:
# need late import here, importing git at the very beginning throws
# as well ...
import git.refs
try:
repo.head.set_commit(new_commit, logmsg=message)
except ValueError:
# head is not yet set to the ref our HEAD points to
# Happens on first commit
master = git.refs.Head.create(repo, repo.head.ref, new_commit, logmsg="commit (initial): %s" % message)
repo.head.set_reference(master, logmsg='commit: Switching to %s' % master)
# END handle empty repositories
# END advance head handling
return new_commit
#{ Serializable Implementation
def _serialize(self, stream):
write = stream.write
write(("tree %s\n" % self.tree).encode('ascii'))
for p in self.parents:
write(("parent %s\n" % p).encode('ascii'))
a = self.author
aname = a.name
c = self.committer
fmt = "%s %s <%s> %s %s\n"
write((fmt % ("author", aname, a.email,
self.authored_date,
altz_to_utctz_str(self.author_tz_offset))).encode(self.encoding))
# encode committer
aname = c.name
write((fmt % ("committer", aname, c.email,
self.committed_date,
altz_to_utctz_str(self.committer_tz_offset))).encode(self.encoding))
if self.encoding != self.default_encoding:
write(("encoding %s\n" % self.encoding).encode('ascii'))
try:
if self.__getattribute__('gpgsig') is not None:
write(b"gpgsig")
for sigline in self.gpgsig.rstrip("\n").split("\n"):
write((" " + sigline + "\n").encode('ascii'))
except AttributeError:
pass
write(b"\n")
# write plain bytes, be sure its encoded according to our encoding
if isinstance(self.message, text_type):
write(self.message.encode(self.encoding))
else:
write(self.message)
# END handle encoding
return self
def _deserialize(self, stream):
""":param from_rev_list: if true, the stream format is coming from the rev-list command
Otherwise it is assumed to be a plain data stream from our object"""
readline = stream.readline
self.tree = Tree(self.repo, hex_to_bin(readline().split()[1]), Tree.tree_id << 12, '')
self.parents = []
next_line = None
while True:
parent_line = readline()
if not parent_line.startswith(b'parent'):
next_line = parent_line
break
# END abort reading parents
self.parents.append(type(self)(self.repo, hex_to_bin(parent_line.split()[-1].decode('ascii'))))
# END for each parent line
self.parents = tuple(self.parents)
# we don't know actual author encoding before we have parsed it, so keep the lines around
author_line = next_line
committer_line = readline()
# we might run into one or more mergetag blocks, skip those for now
next_line = readline()
while next_line.startswith(b'mergetag '):
next_line = readline()
while next_line.startswith(b' '):
next_line = readline()
# end skip mergetags
# now we can have the encoding line, or an empty line followed by the optional
# message.
self.encoding = self.default_encoding
self.gpgsig = None
# read headers
enc = next_line
buf = enc.strip()
while buf:
if buf[0:10] == b"encoding ":
self.encoding = buf[buf.find(' ') + 1:].decode(
self.encoding, 'ignore')
elif buf[0:7] == b"gpgsig ":
sig = buf[buf.find(b' ') + 1:] + b"\n"
is_next_header = False
while True:
sigbuf = readline()
if not sigbuf:
break
if sigbuf[0:1] != b" ":
buf = sigbuf.strip()
is_next_header = True
break
sig += sigbuf[1:]
# end read all signature
self.gpgsig = sig.rstrip(b"\n").decode(self.encoding, 'ignore')
if is_next_header:
continue
buf = readline().strip()
# decode the authors name
try:
self.author, self.authored_date, self.author_tz_offset = \
parse_actor_and_date(author_line.decode(self.encoding, 'replace'))
except UnicodeDecodeError:
log.error("Failed to decode author line '%s' using encoding %s", author_line, self.encoding,
exc_info=True)
try:
self.committer, self.committed_date, self.committer_tz_offset = \
parse_actor_and_date(committer_line.decode(self.encoding, 'replace'))
except UnicodeDecodeError:
log.error("Failed to decode committer line '%s' using encoding %s", committer_line, self.encoding,
exc_info=True)
# END handle author's encoding
# a stream from our data simply gives us the plain message
# The end of our message stream is marked with a newline that we strip
self.message = stream.read()
try:
self.message = self.message.decode(self.encoding, 'replace')
except UnicodeDecodeError:
log.error("Failed to decode message '%s' using encoding %s", self.message, self.encoding, exc_info=True)
# END exception handling
return self
#} END serializable implementation
| 38.878277 | 119 | 0.607822 |
from gitdb import IStream
from git.util import (
hex_to_bin,
Actor,
Iterable,
Stats,
finalize_process
)
from git.diff import Diffable
from .tree import Tree
from . import base
from .util import (
Traversable,
Serializable,
parse_date,
altz_to_utctz_str,
parse_actor_and_date,
from_timestamp,
)
from git.compat import text_type
from time import (
time,
daylight,
altzone,
timezone,
localtime
)
import os
from io import BytesIO
import logging
log = logging.getLogger('git.objects.commit')
log.addHandler(logging.NullHandler())
__all__ = ('Commit', )
class Commit(base.Object, Iterable, Diffable, Traversable, Serializable):
env_author_date = "GIT_AUTHOR_DATE"
env_committer_date = "GIT_COMMITTER_DATE"
conf_encoding = 'i18n.commitencoding'
default_encoding = "UTF-8"
type = "commit"
__slots__ = ("tree",
"author", "authored_date", "author_tz_offset",
"committer", "committed_date", "committer_tz_offset",
"message", "parents", "encoding", "gpgsig")
_id_attribute_ = "hexsha"
def __init__(self, repo, binsha, tree=None, author=None, authored_date=None, author_tz_offset=None,
committer=None, committed_date=None, committer_tz_offset=None,
message=None, parents=None, encoding=None, gpgsig=None):
super(Commit, self).__init__(repo, binsha)
if tree is not None:
assert isinstance(tree, Tree), "Tree needs to be a Tree instance, was %s" % type(tree)
if tree is not None:
self.tree = tree
if author is not None:
self.author = author
if authored_date is not None:
self.authored_date = authored_date
if author_tz_offset is not None:
self.author_tz_offset = author_tz_offset
if committer is not None:
self.committer = committer
if committed_date is not None:
self.committed_date = committed_date
if committer_tz_offset is not None:
self.committer_tz_offset = committer_tz_offset
if message is not None:
self.message = message
if parents is not None:
self.parents = parents
if encoding is not None:
self.encoding = encoding
if gpgsig is not None:
self.gpgsig = gpgsig
@classmethod
def _get_intermediate_items(cls, commit):
return commit.parents
def _set_cache_(self, attr):
if attr in Commit.__slots__:
_binsha, _typename, self.size, stream = self.repo.odb.stream(self.binsha)
self._deserialize(BytesIO(stream.read()))
else:
super(Commit, self)._set_cache_(attr)
@property
def authored_datetime(self):
return from_timestamp(self.authored_date, self.author_tz_offset)
@property
def committed_datetime(self):
return from_timestamp(self.committed_date, self.committer_tz_offset)
@property
def summary(self):
return self.message.split('\n', 1)[0]
def count(self, paths='', **kwargs):
if paths:
return len(self.repo.git.rev_list(self.hexsha, '--', paths, **kwargs).splitlines())
return len(self.repo.git.rev_list(self.hexsha, **kwargs).splitlines())
@property
def name_rev(self):
return self.repo.git.name_rev(self)
@classmethod
def iter_items(cls, repo, rev, paths='', **kwargs):
if 'pretty' in kwargs:
raise ValueError("--pretty cannot be used as parsing expects single sha's only")
# END handle pretty
# use -- in any case, to prevent possibility of ambiguous arguments
# see https://github.com/gitpython-developers/GitPython/issues/264
args = ['--']
if paths:
args.extend((paths, ))
# END if paths
proc = repo.git.rev_list(rev, args, as_process=True, **kwargs)
return cls._iter_from_process_or_stream(repo, proc)
def iter_parents(self, paths='', **kwargs):
# skip ourselves
skip = kwargs.get("skip", 1)
if skip == 0: # skip ourselves
skip = 1
kwargs['skip'] = skip
return self.iter_items(self.repo, self, paths, **kwargs)
@property
def stats(self):
if not self.parents:
text = self.repo.git.diff_tree(self.hexsha, '--', numstat=True, root=True)
text2 = ""
for line in text.splitlines()[1:]:
(insertions, deletions, filename) = line.split("\t")
text2 += "%s\t%s\t%s\n" % (insertions, deletions, filename)
text = text2
else:
text = self.repo.git.diff(self.parents[0].hexsha, self.hexsha, '--', numstat=True)
return Stats._list_from_string(self.repo, text)
@classmethod
def _iter_from_process_or_stream(cls, repo, proc_or_stream):
stream = proc_or_stream
if not hasattr(stream, 'readline'):
stream = proc_or_stream.stdout
readline = stream.readline
while True:
line = readline()
if not line:
break
hexsha = line.strip()
if len(hexsha) > 40:
# split additional information, as returned by bisect for instance
hexsha, _ = line.split(None, 1)
# END handle extra info
assert len(hexsha) == 40, "Invalid line: %s" % hexsha
yield Commit(repo, hex_to_bin(hexsha))
# END for each line in stream
# TODO: Review this - it seems process handling got a bit out of control
# due to many developers trying to fix the open file handles issue
if hasattr(proc_or_stream, 'wait'):
finalize_process(proc_or_stream)
@classmethod
def create_from_tree(cls, repo, tree, message, parent_commits=None, head=False, author=None, committer=None,
author_date=None, commit_date=None):
if parent_commits is None:
try:
parent_commits = [repo.head.commit]
except ValueError:
# empty repositories have no head commit
parent_commits = []
# END handle parent commits
else:
for p in parent_commits:
if not isinstance(p, cls):
raise ValueError("Parent commit '%r' must be of type %s" % (p, cls))
# end check parent commit types
# END if parent commits are unset
# retrieve all additional information, create a commit object, and
# serialize it
# Generally:
# * Environment variables override configuration values
# * Sensible defaults are set according to the git documentation
# COMMITER AND AUTHOR INFO
cr = repo.config_reader()
env = os.environ
committer = committer or Actor.committer(cr)
author = author or Actor.author(cr)
# PARSE THE DATES
unix_time = int(time())
is_dst = daylight and localtime().tm_isdst > 0
offset = altzone if is_dst else timezone
author_date_str = env.get(cls.env_author_date, '')
if author_date:
author_time, author_offset = parse_date(author_date)
elif author_date_str:
author_time, author_offset = parse_date(author_date_str)
else:
author_time, author_offset = unix_time, offset
# END set author time
committer_date_str = env.get(cls.env_committer_date, '')
if commit_date:
committer_time, committer_offset = parse_date(commit_date)
elif committer_date_str:
committer_time, committer_offset = parse_date(committer_date_str)
else:
committer_time, committer_offset = unix_time, offset
# END set committer time
# assume utf8 encoding
enc_section, enc_option = cls.conf_encoding.split('.')
conf_encoding = cr.get_value(enc_section, enc_option, cls.default_encoding)
# if the tree is no object, make sure we create one - otherwise
# the created commit object is invalid
if isinstance(tree, str):
tree = repo.tree(tree)
# END tree conversion
# CREATE NEW COMMIT
new_commit = cls(repo, cls.NULL_BIN_SHA, tree,
author, author_time, author_offset,
committer, committer_time, committer_offset,
message, parent_commits, conf_encoding)
stream = BytesIO()
new_commit._serialize(stream)
streamlen = stream.tell()
stream.seek(0)
istream = repo.odb.store(IStream(cls.type, streamlen, stream))
new_commit.binsha = istream.binsha
if head:
# need late import here, importing git at the very beginning throws
# as well ...
import git.refs
try:
repo.head.set_commit(new_commit, logmsg=message)
except ValueError:
# head is not yet set to the ref our HEAD points to
# Happens on first commit
master = git.refs.Head.create(repo, repo.head.ref, new_commit, logmsg="commit (initial): %s" % message)
repo.head.set_reference(master, logmsg='commit: Switching to %s' % master)
# END handle empty repositories
# END advance head handling
return new_commit
#{ Serializable Implementation
def _serialize(self, stream):
write = stream.write
write(("tree %s\n" % self.tree).encode('ascii'))
for p in self.parents:
write(("parent %s\n" % p).encode('ascii'))
a = self.author
aname = a.name
c = self.committer
fmt = "%s %s <%s> %s %s\n"
write((fmt % ("author", aname, a.email,
self.authored_date,
altz_to_utctz_str(self.author_tz_offset))).encode(self.encoding))
# encode committer
aname = c.name
write((fmt % ("committer", aname, c.email,
self.committed_date,
altz_to_utctz_str(self.committer_tz_offset))).encode(self.encoding))
if self.encoding != self.default_encoding:
write(("encoding %s\n" % self.encoding).encode('ascii'))
try:
if self.__getattribute__('gpgsig') is not None:
write(b"gpgsig")
for sigline in self.gpgsig.rstrip("\n").split("\n"):
write((" " + sigline + "\n").encode('ascii'))
except AttributeError:
pass
write(b"\n")
# write plain bytes, be sure its encoded according to our encoding
if isinstance(self.message, text_type):
write(self.message.encode(self.encoding))
else:
write(self.message)
# END handle encoding
return self
def _deserialize(self, stream):
readline = stream.readline
self.tree = Tree(self.repo, hex_to_bin(readline().split()[1]), Tree.tree_id << 12, '')
self.parents = []
next_line = None
while True:
parent_line = readline()
if not parent_line.startswith(b'parent'):
next_line = parent_line
break
# END abort reading parents
self.parents.append(type(self)(self.repo, hex_to_bin(parent_line.split()[-1].decode('ascii'))))
# END for each parent line
self.parents = tuple(self.parents)
# we don't know actual author encoding before we have parsed it, so keep the lines around
author_line = next_line
committer_line = readline()
next_line = readline()
while next_line.startswith(b'mergetag '):
next_line = readline()
while next_line.startswith(b' '):
next_line = readline()
self.encoding = self.default_encoding
self.gpgsig = None
enc = next_line
buf = enc.strip()
while buf:
if buf[0:10] == b"encoding ":
self.encoding = buf[buf.find(' ') + 1:].decode(
self.encoding, 'ignore')
elif buf[0:7] == b"gpgsig ":
sig = buf[buf.find(b' ') + 1:] + b"\n"
is_next_header = False
while True:
sigbuf = readline()
if not sigbuf:
break
if sigbuf[0:1] != b" ":
buf = sigbuf.strip()
is_next_header = True
break
sig += sigbuf[1:]
self.gpgsig = sig.rstrip(b"\n").decode(self.encoding, 'ignore')
if is_next_header:
continue
buf = readline().strip()
try:
self.author, self.authored_date, self.author_tz_offset = \
parse_actor_and_date(author_line.decode(self.encoding, 'replace'))
except UnicodeDecodeError:
log.error("Failed to decode author line '%s' using encoding %s", author_line, self.encoding,
exc_info=True)
try:
self.committer, self.committed_date, self.committer_tz_offset = \
parse_actor_and_date(committer_line.decode(self.encoding, 'replace'))
except UnicodeDecodeError:
log.error("Failed to decode committer line '%s' using encoding %s", committer_line, self.encoding,
exc_info=True)
# a stream from our data simply gives us the plain message
# The end of our message stream is marked with a newline that we strip
self.message = stream.read()
try:
self.message = self.message.decode(self.encoding, 'replace')
except UnicodeDecodeError:
log.error("Failed to decode message '%s' using encoding %s", self.message, self.encoding, exc_info=True)
# END exception handling
return self
#} END serializable implementation
| true | true |
f7201e595590259f3f2e768dbecb2c84ac98021f | 14,932 | py | Python | run_validation.py | ASinanSaglam/atomizer_analysis | 8dfc1230b2ad0c691885f8fd7119d6169cd7d1ed | [
"MIT"
] | null | null | null | run_validation.py | ASinanSaglam/atomizer_analysis | 8dfc1230b2ad0c691885f8fd7119d6169cd7d1ed | [
"MIT"
] | null | null | null | run_validation.py | ASinanSaglam/atomizer_analysis | 8dfc1230b2ad0c691885f8fd7119d6169cd7d1ed | [
"MIT"
] | null | null | null | # %matplotlib notebook
import os, re, sys, urllib, requests, base64, IPython, io, pickle, glob
sys.path.append("/home/monoid/Development/fresh_atomizer_checks/atomizer/SBMLparser/test/manual")
import itertools as itt
import numpy as np
import subprocess as sb
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import roadrunner, h5py
from bs4 import BeautifulSoup as BS
from IPython.display import Image, display
from matplotlib import rcParams
import analyzerTools as AT
def run_test(analyzer, test_no, t_end=1000, atomize=False, db=None, meta=None):
if(analyzer.run_single_test(test_no, t_end=100, atomize=atomize,meta=meta)):
if meta:
meta[test_no]["success"] = True
print("run successful {}".format(test_no))
#if db is not None:
# # Save results into a DataFrame
# res = analyzer.all_results[test_no]
# sbml, bngl, rmsd, valid_per, keys = res[0],res[1],res[2],res[3],res[4]
# for key in keys:
# # couldn't get the curation keys
# if len(key) == 2:
# skey, bkey = key
# # got curation keys
# elif len(key) == 3:
# skey, bkey, ckey = key
# else:
# print("couldn't find keys")
# IPython.embed()
# sys.exit()
# # setting up the database
# db.at["{:010d}".format(test_no), "{}_sbml".format(skey)] = res[0][skey]
# db.at["{:010d}".format(test_no), "{}_bngl".format(bkey)] = res[1][bkey]
# analyzer.plot_results(test_no, legend=True, save_fig=True)
# if(analyzer.run_old_test(test_no, t_end=100, atomize=atomize)):
# print("run successful {}".format(test_no))
# analyzer.plot_old_results(test_no, legend=False, save_fig=True)
else:
if meta:
meta[test_no]["success"] = False
print("run failed {}".format(test_no))
def uniquefy_names(keys):
unique_keys = []
if len(keys[0]) == 3:
bkeys_d = {}
skeys_d = {}
ckeys_d = {}
for key in keys:
bkey, skey, ckey = key
if bkey in bkeys_d.keys():
bkey_new = bkey + "_{}".format(bkeys_d[bkey])
bkeys_d[bkey] += 1
bkey = bkey_new
else:
bkeys_d[bkey] = 1
if skey in skeys_d.keys():
skey_new = skey + "_{}".format(skeys_d[skey])
skeys_d[skey] += 1
skey = skey_new
else:
skeys_d[skey] = 1
if ckey in ckeys_d.keys():
ckey_new = ckey + "_{}".format(ckeys_d[ckey])
ckeys_d[ckey] += 1
ckey = ckey_new
else:
ckeys_d[ckey] = 1
unique_keys.append( (bkey,skey,ckey) )
else:
bkeys_d = {}
skeys_d = {}
for key in keys:
bkey, skey = key
if bkey in bkeys_d.keys():
bkey_new = bkey + "_{}".format(bkeys_d[bkey])
bkeys_d[bkey] += 1
bkey = bkey_new
else:
bkeys_d[bkey] = 1
if skey in skeys_d.keys():
skey_new = skey + "_{}".format(skeys_d[skey])
skeys_d[skey] += 1
skey = skey_new
else:
skeys_d[skey] = 1
unique_keys.append( (bkey,skey) )
return unique_keys
def update_results(results, h5file):
for key in results:
if "{:010d}".format(key) in h5file:
continue
# create a model group
res_grp = h5file.create_group("{:010d}".format(key))
# pull dataframes
sres, bres, _, _, keys_used = results[key]
# names
if len(keys_used) == 0:
continue
if len(keys_used[0]) == 2:
names_to_use = [keys_used[i][1] for i in range(len(keys_used))]
skeyd = dict([(keys_used[i][1],keys_used[i][0]) for i in range(len(keys_used))])
bkeyd = dict([(keys_used[i][1],keys_used[i][1]) for i in range(len(keys_used))])
skn = list(map(lambda x: skeyd[x], names_to_use))
bkn = list(map(lambda x: bkeyd[x], names_to_use))
else:
names_to_use = [keys_used[i][2] for i in range(len(keys_used))]
skeyd = dict([(keys_used[i][2],keys_used[i][0]) for i in range(len(keys_used))])
bkeyd = dict([(keys_used[i][2],keys_used[i][1]) for i in range(len(keys_used))])
skn = list(map(lambda x: skeyd[x], names_to_use))
bkn = list(map(lambda x: bkeyd[x], names_to_use))
# make structured arrays
sdtype = np.dtype({"names":names_to_use,
"formats": ["<f8" for i in range(len(names_to_use))]})
bdtype = np.dtype({"names":names_to_use,
"formats": ["<f8" for i in range(len(names_to_use))]})
# if len(names_to_use) != sres[skn].shape[1]:
# # we have multiple datasets per name, drop one
# for iname,name in enumerate(names_to_use):
# if len(sres[name].shape) > 1:
# #
stupl = list(map(tuple, sres[skn].values))
btupl = list(map(tuple, bres[bkn].values))
sarr = np.array(stupl, dtype=sdtype)
barr = np.array(btupl, dtype=bdtype)
# add the data in, if it exists
if sarr.shape[0] != 0:
sg = res_grp.create_dataset("sbml_data", data=sarr)
if barr.shape[0] != 0:
bg = res_grp.create_dataset("bngl_data", data=barr)
print("updated results")
return True
def save_meta(meta, fname="meta_data.pickle"):
if os.path.isfile(fname):
with open(fname, "rb") as f:
m = pickle.load(f)
for key in meta:
m[key] = meta[key]
with open(fname, "wb") as f:
pickle.dump(m, f)
else:
with open(fname, "wb") as f:
pickle.dump(meta, f)
# All the paths we need
# The BNG2.pl file for bionetgen runs
bng_path = "/home/monoid/apps/BioNetGen-2.5.0/BNG2.pl"
# This is the python file that can be called from the command line
sbml_translator_path = "/home/monoid/Development/fresh_atomizer_checks/atomizer/SBMLparser/sbmlTranslator.py"
# if you give this the ATOMIZER ANALYZER 5000 will import atomizer and run internally
# translator_package_path = "/home/monoid/Development/fresh_atomizer_checks/atomizer/SBMLparser"
translator_package_path = None
# This is neccesary for atomizer, has default naming conventions and a lot more
# this path will be sym linked to everywhere you want to run translator under
config_path = "/home/monoid/Development/fresh_atomizer_checks/atomizer/SBMLparser/config"
# the path to the folder that contains 5 zero padded folders for each test
tests_path = "/home/monoid/Development/fresh_atomizer_checks/atomizer/SBMLparser/test/curated"
# Now we also add COPASI PATH!!_!_
copasi_path = "/home/monoid/apps/copasi/4.27/bin/CopasiSE"
# change directory to where we want to run the tests
os.chdir("/home/monoid/Development/fresh_atomizer_checks/atomizer/SBMLparser/test/analyzerTools")
# The analyzer setup
ba = AT.BiomodelAnalyzer(bng_path, sbml_translator_path, config_path, tests_path,
translator_import=translator_package_path, copasi_path=copasi_path)
# Let's re-run everything
tests = list(range(908,915))
known_issues = set([24,25,34,154,155,196,201,589,613,668,669,696,468, # Not implemented
643,644,645, # Complex "i" is used in function/parameter
63,245,248,305,556,575,578,542, # rule named used as parameter
342,429,457,547,570,627,637,638, # compartment used as parameter
527,562,592,593,596,723,250, # Actually broken, even in Copasi
304,324,330,331,341,343,345,349,367,371,374,377,381,533,548,
549,551,618,642,670,671,680,682,684,118,252,673,531,532,555,
561, # no reactions
306,307,308,309,310,311,388,390,391,393,409,
428,505,512,528,557,566,567,719,641,71,90,173,
253, # assignment rules used in reactions
610, # function defs for v16/v17
558,568,674,722,412,445,302,208,268,51,55,162,180,179,579,
691,465,466,238,312,538,603,604,605,215, # Uses time
635,636, # Uses not only time but also encoded strings for parameters
119, # single reaction, not really suitable for translation
47,483,484,486,487, # initial states should result in no reactions,
164,165,167,326,375,400,554,577,664,672,693,698,
234,237,286,450, # Uses piecewise definitions
396,398,507,522,705,
499,474, # SBML modeller is careless and uses species that should be params
607, # Function not defined properly/links to another function
319,206,39,145,353,385,392,463,608,470,472, # non-integer stoichiometry
161,182,239, # true multi-compartment model
271 # multi-compartment and the modeller has issues
])
# Need to figure out, mostly CVODE
list_of_fails = set([246,336,378,383,384,387,438,9,107,123,183,192,269,
279,292,328,617,678,606, # new ones
616, # Legitimate bug, if species name is very simple AND rate constant
# only depenent on the species concentration AND we end up generating
# an observable with the same name as species name, then BNGL thinkg
# we are giving obs name as the rate constant, leading to a bug
255, # Circular dependency in funcs?
401,402,403, # if func messes with func ordering
559, # can't load copasi result
64, # Due to website addition? also in too long set
232, # BNG takes too long?
172,176,177 # doesn't end up translating, takes a long time?
])
#too_long = set([64,574,426,70,217,247,503,469,471,473,506,451,595, # WAAAY TOO LONG - debug
# 332,334, # ATOMIZER BREAKS THESE
# 217,247,293,426,469 # too long when atomized
# ])
too_long = set([64 ,172,176,177,212,217,235,247,293,385,
426,451,457,463,469,470,471,472,473,474,
496,497,503,505,506,574,595,835,
863, # transl too long
232,608, # BNG takes too long
63,70, # long but completes?
269 # due to long CVODE error
])
################# NEW CHECKS ##############
# A complete new set of checks to see the latest state of the tool as we are
# writing the manuscript.
new_checks = set([64,217,235,496, # too long
497,498, # skey ratio index out of range?
63, # fairly long but does complete
119,465,468, # no data?
247,269,469,470,471,472,473,474,
503,505,506,595,606,608,835,863 # long, didn't check if completes
])
################# RUN FAILS ###############
run_fails = set([9,24,25,34,51,55,107,
123,154,155,162,164,165,167,172,176,177,179,180,183,192,
201,208,215,232,234,237,238,245,246,248,250,255,268,279,286,292,
302,305,312,326,328,332,334,336,353,375,383,384,385,387,396,398,
400,401,402,403,412,426,429,438,445,450,451,457,463,466,483,484,
486,487,499,507,522,527,531,532,538,542,547,554,555,556,558,559,
561,562,574,575,577,578,579,589,592,593,599,600,602,607,610,617,
627,635,636,637,638,643,644,645,664,668,669,672,673,674,675,678,
687,688,692,693,696,698,705,722,723,730,731,748,749,757,759,760,
763,764,766,775,801,802,808,815,824,826,833,837,840,841,849,851,
858,859,876,879,880 # run_failed
])
################# EVENTS #################
w_event = set([1,7,56,77,81,87,88,95,96,97,101,104,109, # models with events
111,117,120,121,122,124,125,126,127,128,129,130,131, # models with events
132,133,134,135,136,137,139,140,141,142,144,148,149, # models with events
152,153,158,186,187,188,189,193,194,195,196,227,235, # models with events
241,244,256,265,281,285,287,297,301,316,317,318,327, # models with events
337,338,339,340,342,344,404,408,422,436,437,439,479, # models with events
480,488,493,494,496,497,534,535,536,537,540,541,563, # models with events
570,571,597,598,601,612,613,620,621,628,632,634,650, # models with events
659,681,695,699,702,706,711,718,727,734,735,736,786, # models with events
789,791,794,806,814,816,817,818,820,822,825,829,834, # models with events
856,860,862,864,901]) # models with events
################# END CHECKS ##############
all_issues = known_issues.union(w_event)
all_issues = all_issues.union(list_of_fails)
# Load in database
# dbname = "validation.h5"
# if os.path.isfile(dbname):
# db = pd.read_hdf(dbname,key="validation")
# else:
# db = pd.DataFrame()
# run tests
# try:
if os.path.isfile("results.h5"):
os.remove("results.h5")
# results_file = h5py.File("results.h5","a")
results_file = h5py.File("results.h5","w")
else:
results_file = h5py.File("results.h5","w")
meta_data = {}
for test_no in tests:
#if test_no in all_issues:
# continue
# if test_no in w_event or test_no in new_checks or test_no in run_fails:
# if test_no in new_checks or test_no in run_fails:
# continue
if test_no in too_long:
meta_data[test_no] = {"too_long":True}
continue
if (os.path.isfile("/home/monoid/Development/fresh_atomizer_checks/atomizer/SBMLparser/test/curated/BIOMD{0:010d}.xml".format(test_no))):
#run_test(ba, test_no, t_end=100, atomize=False, db=db)
meta_data[test_no] = {"file":True, "too_long":False}
run_test(ba, test_no, t_end=100, atomize=True, meta=meta_data)
update_results(ba.all_results,results_file)
else:
meta_data[test_no] = {"file":False}
print("number {} doesn't exist".format(test_no))
save_meta(meta_data)
# with open("validation.pickle", 'wb') as f:
# pickle.dump(ba.all_results, f)
#except:
# with open("validation.pickle", 'wb') as f:
# pickle.dump(ba.all_results, f)
# db.to_hdf(dbname,"validation")
| 47.858974 | 141 | 0.575676 |
import os, re, sys, urllib, requests, base64, IPython, io, pickle, glob
sys.path.append("/home/monoid/Development/fresh_atomizer_checks/atomizer/SBMLparser/test/manual")
import itertools as itt
import numpy as np
import subprocess as sb
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import roadrunner, h5py
from bs4 import BeautifulSoup as BS
from IPython.display import Image, display
from matplotlib import rcParams
import analyzerTools as AT
def run_test(analyzer, test_no, t_end=1000, atomize=False, db=None, meta=None):
if(analyzer.run_single_test(test_no, t_end=100, atomize=atomize,meta=meta)):
if meta:
meta[test_no]["success"] = True
print("run successful {}".format(test_no))
2:
# skey, bkey = key
# # got curation keys
# elif len(key) == 3:
# skey, bkey, ckey = key
# else:
# print("couldn't find keys")
else:
if meta:
meta[test_no]["success"] = False
print("run failed {}".format(test_no))
def uniquefy_names(keys):
unique_keys = []
if len(keys[0]) == 3:
bkeys_d = {}
skeys_d = {}
ckeys_d = {}
for key in keys:
bkey, skey, ckey = key
if bkey in bkeys_d.keys():
bkey_new = bkey + "_{}".format(bkeys_d[bkey])
bkeys_d[bkey] += 1
bkey = bkey_new
else:
bkeys_d[bkey] = 1
if skey in skeys_d.keys():
skey_new = skey + "_{}".format(skeys_d[skey])
skeys_d[skey] += 1
skey = skey_new
else:
skeys_d[skey] = 1
if ckey in ckeys_d.keys():
ckey_new = ckey + "_{}".format(ckeys_d[ckey])
ckeys_d[ckey] += 1
ckey = ckey_new
else:
ckeys_d[ckey] = 1
unique_keys.append( (bkey,skey,ckey) )
else:
bkeys_d = {}
skeys_d = {}
for key in keys:
bkey, skey = key
if bkey in bkeys_d.keys():
bkey_new = bkey + "_{}".format(bkeys_d[bkey])
bkeys_d[bkey] += 1
bkey = bkey_new
else:
bkeys_d[bkey] = 1
if skey in skeys_d.keys():
skey_new = skey + "_{}".format(skeys_d[skey])
skeys_d[skey] += 1
skey = skey_new
else:
skeys_d[skey] = 1
unique_keys.append( (bkey,skey) )
return unique_keys
def update_results(results, h5file):
for key in results:
if "{:010d}".format(key) in h5file:
continue
res_grp = h5file.create_group("{:010d}".format(key))
sres, bres, _, _, keys_used = results[key]
if len(keys_used) == 0:
continue
if len(keys_used[0]) == 2:
names_to_use = [keys_used[i][1] for i in range(len(keys_used))]
skeyd = dict([(keys_used[i][1],keys_used[i][0]) for i in range(len(keys_used))])
bkeyd = dict([(keys_used[i][1],keys_used[i][1]) for i in range(len(keys_used))])
skn = list(map(lambda x: skeyd[x], names_to_use))
bkn = list(map(lambda x: bkeyd[x], names_to_use))
else:
names_to_use = [keys_used[i][2] for i in range(len(keys_used))]
skeyd = dict([(keys_used[i][2],keys_used[i][0]) for i in range(len(keys_used))])
bkeyd = dict([(keys_used[i][2],keys_used[i][1]) for i in range(len(keys_used))])
skn = list(map(lambda x: skeyd[x], names_to_use))
bkn = list(map(lambda x: bkeyd[x], names_to_use))
sdtype = np.dtype({"names":names_to_use,
"formats": ["<f8" for i in range(len(names_to_use))]})
bdtype = np.dtype({"names":names_to_use,
"formats": ["<f8" for i in range(len(names_to_use))]})
(map(tuple, sres[skn].values))
btupl = list(map(tuple, bres[bkn].values))
sarr = np.array(stupl, dtype=sdtype)
barr = np.array(btupl, dtype=bdtype)
if sarr.shape[0] != 0:
sg = res_grp.create_dataset("sbml_data", data=sarr)
if barr.shape[0] != 0:
bg = res_grp.create_dataset("bngl_data", data=barr)
print("updated results")
return True
def save_meta(meta, fname="meta_data.pickle"):
if os.path.isfile(fname):
with open(fname, "rb") as f:
m = pickle.load(f)
for key in meta:
m[key] = meta[key]
with open(fname, "wb") as f:
pickle.dump(m, f)
else:
with open(fname, "wb") as f:
pickle.dump(meta, f)
bng_path = "/home/monoid/apps/BioNetGen-2.5.0/BNG2.pl"
sbml_translator_path = "/home/monoid/Development/fresh_atomizer_checks/atomizer/SBMLparser/sbmlTranslator.py"
translator_package_path = None
config_path = "/home/monoid/Development/fresh_atomizer_checks/atomizer/SBMLparser/config"
tests_path = "/home/monoid/Development/fresh_atomizer_checks/atomizer/SBMLparser/test/curated"
copasi_path = "/home/monoid/apps/copasi/4.27/bin/CopasiSE"
os.chdir("/home/monoid/Development/fresh_atomizer_checks/atomizer/SBMLparser/test/analyzerTools")
ba = AT.BiomodelAnalyzer(bng_path, sbml_translator_path, config_path, tests_path,
translator_import=translator_package_path, copasi_path=copasi_path)
tests = list(range(908,915))
known_issues = set([24,25,34,154,155,196,201,589,613,668,669,696,468, # Not implemented
643,644,645, # Complex "i" is used in function/parameter
63,245,248,305,556,575,578,542, # rule named used as parameter
342,429,457,547,570,627,637,638, # compartment used as parameter
527,562,592,593,596,723,250, # Actually broken, even in Copasi
304,324,330,331,341,343,345,349,367,371,374,377,381,533,548,
549,551,618,642,670,671,680,682,684,118,252,673,531,532,555,
561, # no reactions
306,307,308,309,310,311,388,390,391,393,409,
428,505,512,528,557,566,567,719,641,71,90,173,
253, # assignment rules used in reactions
610, # function defs for v16/v17
558,568,674,722,412,445,302,208,268,51,55,162,180,179,579,
691,465,466,238,312,538,603,604,605,215, # Uses time
635,636, # Uses not only time but also encoded strings for parameters
119, # single reaction, not really suitable for translation
47,483,484,486,487, # initial states should result in no reactions,
164,165,167,326,375,400,554,577,664,672,693,698,
234,237,286,450, # Uses piecewise definitions
396,398,507,522,705,
499,474, # SBML modeller is careless and uses species that should be params
607, # Function not defined properly/links to another function
319,206,39,145,353,385,392,463,608,470,472, # non-integer stoichiometry
161,182,239, # true multi-compartment model
271 # multi-compartment and the modeller has issues
])
# Need to figure out, mostly CVODE
list_of_fails = set([246,336,378,383,384,387,438,9,107,123,183,192,269,
279,292,328,617,678,606, # new ones
616, # Legitimate bug, if species name is very simple AND rate constant
# only depenent on the species concentration AND we end up generating
# an observable with the same name as species name, then BNGL thinkg
# we are giving obs name as the rate constant, leading to a bug
255, # Circular dependency in funcs?
401,402,403, # if func messes with func ordering
559, # can't load copasi result
64,
232,
172,176,177
])
#too_long = set([64,574,426,70,217,247,503,469,471,473,506,451,595, # WAAAY TOO LONG - debug
# 332,334, # ATOMIZER BREAKS THESE
# 217,247,293,426,469 # too long when atomized
# ])
too_long = set([64 ,172,176,177,212,217,235,247,293,385,
426,451,457,463,469,470,471,472,473,474,
496,497,503,505,506,574,595,835,
863, # transl too long
232,608, # BNG takes too long
63,70, # long but completes?
269 # due to long CVODE error
])
################# NEW CHECKS ##############
# A complete new set of checks to see the latest state of the tool as we are
# writing the manuscript.
new_checks = set([64,217,235,496, # too long
497,498, # skey ratio index out of range?
63, # fairly long but does complete
119,465,468, # no data?
247,269,469,470,471,472,473,474,
503,505,506,595,606,608,835,863 # long, didn't check if completes
])
731,748,749,757,759,760,
763,764,766,775,801,802,808,815,824,826,833,837,840,841,849,851,
858,859,876,879,880
])
856,860,862,864,901])
no, t_end=100, atomize=True, meta=meta_data)
update_results(ba.all_results,results_file)
else:
meta_data[test_no] = {"file":False}
print("number {} doesn't exist".format(test_no))
save_meta(meta_data)
# with open("validation.pickle", 'wb') as f:
# pickle.dump(ba.all_results, f)
#except:
# with open("validation.pickle", 'wb') as f:
# pickle.dump(ba.all_results, f)
# db.to_hdf(dbname,"validation")
| true | true |
f7201f03fb11fc26e5295dea810629ac3fa330da | 5,030 | py | Python | cedar/forms.py | stewardshiptools/stewardshiptools | ee5d27e7b0d5d4947f34ad02bdf63a06ad0a5c3e | [
"MIT"
] | null | null | null | cedar/forms.py | stewardshiptools/stewardshiptools | ee5d27e7b0d5d4947f34ad02bdf63a06ad0a5c3e | [
"MIT"
] | 11 | 2020-03-24T15:29:46.000Z | 2022-03-11T23:14:48.000Z | cedar/forms.py | stewardshiptools/stewardshiptools | ee5d27e7b0d5d4947f34ad02bdf63a06ad0a5c3e | [
"MIT"
] | null | null | null | from os.path import join
from email.mime.image import MIMEImage
from django.conf import settings
from django.forms import ModelForm, ValidationError, ChoiceField
from django.forms.models import BaseInlineFormSet
from django.forms.models import inlineformset_factory
from django.contrib.auth.forms import ReadOnlyPasswordHashWidget, ReadOnlyPasswordHashField, PasswordResetForm
from django.contrib.auth.tokens import default_token_generator
from django.core.mail import EmailMultiAlternatives
from django.template import loader
from crm.models import Person
from crm.forms import PersonSettingsForm
from django.contrib.auth.models import User
from security.forms import SecurityLevelModelFormMixin
from security.models import SecurityLevel
class UserAdminForm(SecurityLevelModelFormMixin, ModelForm):
"""
Override the user admin form so that we can force firstname, lastname
to be required --- needed for pushing changes over to crm.Person.
"""
password = ReadOnlyPasswordHashField(label=("Password"),
help_text=("Raw passwords are not stored, so there is no way to see "
"this user's password, but you can change the password "
"using <a href=\"password/\">this form</a>."))
# Need to minimally declare security_level here so that the user admin can see it.
# The mixin will take care of details.
security_level = ChoiceField()
def __init__(self, *args, **kwargs):
super(UserAdminForm, self).__init__(*args, **kwargs)
self.fields['first_name'].required = True
self.fields['last_name'].required = True
self.fields['email'].required = True
# self.fields['password'].widget = ReadOnlyPasswordHashWidget()
def get_security_level_default(self):
level_range = [x[0] for x in SecurityLevel.level_choices]
return max(level_range) # Default users to the lowest security level.
class Meta:
model = User
fields = '__all__'
class UserSettingsForm(ModelForm):
"""
This is the form used by the user menu to update user
profile settings. hides user password (for now).
"""
def __init__(self, *args, **kwargs):
super(UserSettingsForm, self).__init__(*args, **kwargs)
self.fields['first_name'].required = True
self.fields['last_name'].required = True
self.fields['email'].required = True
class Meta:
model = User
fields = ('first_name',
'last_name',
'email',)
class CedarPasswordResetForm(PasswordResetForm):
def send_mail(self, subject_template_name, email_template_name,
context, from_email, to_email, html_email_template_name=None):
"""
Sends a django.core.mail.EmailMultiAlternatives to `to_email`.
"""
subject = loader.render_to_string(subject_template_name, context)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
body = loader.render_to_string(email_template_name, context)
email_message = EmailMultiAlternatives(subject, body, from_email, [to_email])
if html_email_template_name is not None:
html_email = loader.render_to_string(html_email_template_name, context)
email_message.attach_alternative(html_email, 'text/html')
email_message.mixed_subtype = 'related'
with open(join(settings.STATIC_ROOT, 'css/cedarbox_icon_gry.png'), 'rb') as fp:
logo_img = MIMEImage(fp.read())
logo_img.add_header('Content-ID', '<{}>'.format('cedarbox_icon_gry.png'))
email_message.attach(logo_img)
email_message.send()
def save(self, domain_override=None,
subject_template_name='registration/password_reset_subject.txt',
email_template_name='registration/password_reset_email.html',
use_https=False, token_generator=default_token_generator,
from_email=None, request=None, html_email_template_name=None):
# If domain_override hasn't been provided. Let's override it ourself using the request.
if domain_override is None and request is not None:
domain_override = request.META['HTTP_HOST']
return super(CedarPasswordResetForm, self).save(
domain_override=domain_override,
subject_template_name=subject_template_name,
email_template_name=email_template_name,
use_https=use_https, token_generator=token_generator,
from_email=from_email, request=request, html_email_template_name=html_email_template_name
)
UserSettingsFormset = inlineformset_factory(User,
Person,
form=PersonSettingsForm,
extra=1
)
| 43.362069 | 110 | 0.663022 | from os.path import join
from email.mime.image import MIMEImage
from django.conf import settings
from django.forms import ModelForm, ValidationError, ChoiceField
from django.forms.models import BaseInlineFormSet
from django.forms.models import inlineformset_factory
from django.contrib.auth.forms import ReadOnlyPasswordHashWidget, ReadOnlyPasswordHashField, PasswordResetForm
from django.contrib.auth.tokens import default_token_generator
from django.core.mail import EmailMultiAlternatives
from django.template import loader
from crm.models import Person
from crm.forms import PersonSettingsForm
from django.contrib.auth.models import User
from security.forms import SecurityLevelModelFormMixin
from security.models import SecurityLevel
class UserAdminForm(SecurityLevelModelFormMixin, ModelForm):
password = ReadOnlyPasswordHashField(label=("Password"),
help_text=("Raw passwords are not stored, so there is no way to see "
"this user's password, but you can change the password "
"using <a href=\"password/\">this form</a>."))
# Need to minimally declare security_level here so that the user admin can see it.
# The mixin will take care of details.
security_level = ChoiceField()
def __init__(self, *args, **kwargs):
super(UserAdminForm, self).__init__(*args, **kwargs)
self.fields['first_name'].required = True
self.fields['last_name'].required = True
self.fields['email'].required = True
# self.fields['password'].widget = ReadOnlyPasswordHashWidget()
def get_security_level_default(self):
level_range = [x[0] for x in SecurityLevel.level_choices]
return max(level_range) # Default users to the lowest security level.
class Meta:
model = User
fields = '__all__'
class UserSettingsForm(ModelForm):
def __init__(self, *args, **kwargs):
super(UserSettingsForm, self).__init__(*args, **kwargs)
self.fields['first_name'].required = True
self.fields['last_name'].required = True
self.fields['email'].required = True
class Meta:
model = User
fields = ('first_name',
'last_name',
'email',)
class CedarPasswordResetForm(PasswordResetForm):
def send_mail(self, subject_template_name, email_template_name,
context, from_email, to_email, html_email_template_name=None):
subject = loader.render_to_string(subject_template_name, context)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
body = loader.render_to_string(email_template_name, context)
email_message = EmailMultiAlternatives(subject, body, from_email, [to_email])
if html_email_template_name is not None:
html_email = loader.render_to_string(html_email_template_name, context)
email_message.attach_alternative(html_email, 'text/html')
email_message.mixed_subtype = 'related'
with open(join(settings.STATIC_ROOT, 'css/cedarbox_icon_gry.png'), 'rb') as fp:
logo_img = MIMEImage(fp.read())
logo_img.add_header('Content-ID', '<{}>'.format('cedarbox_icon_gry.png'))
email_message.attach(logo_img)
email_message.send()
def save(self, domain_override=None,
subject_template_name='registration/password_reset_subject.txt',
email_template_name='registration/password_reset_email.html',
use_https=False, token_generator=default_token_generator,
from_email=None, request=None, html_email_template_name=None):
# If domain_override hasn't been provided. Let's override it ourself using the request.
if domain_override is None and request is not None:
domain_override = request.META['HTTP_HOST']
return super(CedarPasswordResetForm, self).save(
domain_override=domain_override,
subject_template_name=subject_template_name,
email_template_name=email_template_name,
use_https=use_https, token_generator=token_generator,
from_email=from_email, request=request, html_email_template_name=html_email_template_name
)
UserSettingsFormset = inlineformset_factory(User,
Person,
form=PersonSettingsForm,
extra=1
)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.