CombinedText stringlengths 4 3.42M |
|---|
import torch
import numpy as np
from baseline.utils import lookup_sentence, get_version
from baseline.utils import crf_mask as crf_m
from torch.autograd import Variable
import torch.autograd
import torch.nn as nn
import torch.nn.functional
import math
import copy
PYT_MAJOR_VERSION = get_version(torch)
def sequence_mask(lengths):
lens = lengths.cpu()
max_len = torch.max(lens)
# 1 x T
row = torch.arange(0, max_len.item()).type_as(lens).view(1, -1)
# B x 1
col = lens.view(-1, 1)
# Broadcast to B x T, compares increasing number to max
mask = row < col
return mask
def classify_bt(model, batch_time):
tensor = torch.from_numpy(batch_time) if type(batch_time) == np.ndarray else batch_time
probs = model(torch.autograd.Variable(tensor, requires_grad=False).cuda()).exp().data
probs.div_(torch.sum(probs))
results = []
batchsz = probs.size(0)
for b in range(batchsz):
outcomes = [(model.labels[id_i], prob_i) for id_i, prob_i in enumerate(probs[b])]
results.append(outcomes)
return results
def predict_seq_bt(model, x, xch, lengths):
x_t = torch.from_numpy(x) if type(x) == np.ndarray else x
xch_t = torch.from_numpy(xch) if type(xch) == np.ndarray else xch
len_v = torch.from_numpy(lengths) if type(lengths) == np.ndarray else lengths
x_v = torch.autograd.Variable(x_t, requires_grad=False).cuda()
xch_v = torch.autograd.Variable(xch_t, requires_grad=False).cuda()
#len_v = torch.autograd.Variable(len_t, requires_grad=False)
results = model((x_v, xch_v, len_v))
#print(results)
#if type(x) == np.ndarray:
# # results = results.cpu().numpy()
# # Fix this to not be greedy
# results = np.argmax(results, -1)
return results
def to_scalar(var):
# returns a python float
return var.view(-1).data.tolist()[0]
def argmax(vec):
# return the argmax as a python int
_, idx = torch.max(vec, 1)
return to_scalar(idx)
def log_sum_exp(vec):
max_score = vec[0, argmax(vec)]
max_score_broadcast = max_score.view(1, -1).expand(1, vec.size()[1])
return max_score + torch.log(torch.sum(torch.exp(vec - max_score_broadcast)))
class SequenceCriterion(nn.Module):
def __init__(self, LossFn=nn.NLLLoss):
super(SequenceCriterion, self).__init__()
self.crit = LossFn(ignore_index=0, size_average=False)
def forward(self, inputs, targets):
# This is BxT, which is what we want!
total_sz = targets.nelement()
loss = self.crit(inputs.view(total_sz, -1), targets.view(total_sz))
return loss
class StackedLSTMCell(nn.Module):
def __init__(self, num_layers, input_size, rnn_size, dropout):
super(StackedLSTMCell, self).__init__()
self.dropout = nn.Dropout(dropout)
self.num_layers = num_layers
self.layers = nn.ModuleList()
for i in range(num_layers):
self.layers.append(nn.LSTMCell(input_size=input_size, hidden_size=rnn_size, bias=False))
input_size = rnn_size
def forward(self, input, hidden):
h_0, c_0 = hidden
hs, cs = [], []
for i, layer in enumerate(self.layers):
h_i, c_i = layer(input, (h_0[i], c_0[i]))
input = h_i
if i != self.num_layers - 1:
input = self.dropout(input)
hs += [h_i]
cs += [c_i]
hs = torch.stack(hs)
cs = torch.stack(cs)
return input, (hs, cs)
class StackedGRUCell(nn.Module):
def __init__(self, num_layers, input_size, rnn_size, dropout):
super(StackedGRUCell, self).__init__()
self.dropout = nn.Dropout(dropout)
self.num_layers = num_layers
self.layers = nn.ModuleList()
for i in range(num_layers):
self.layers.append(nn.GRUCell(input_size=input_size, hidden_size=rnn_size))
input_size = rnn_size
def forward(self, input, hidden):
h_0 = hidden
hs = []
for i, layer in enumerate(self.layers):
h_i = layer(input, (h_0[i]))
input = h_i
if i != self.num_layers:
input = self.dropout(input)
hs += [h_i]
hs = torch.stack(hs)
return input, hs
def pytorch_rnn_cell(insz, hsz, rnntype, nlayers, dropout):
if rnntype == 'gru':
rnn = StackedGRUCell(nlayers, insz, hsz, dropout)
else:
rnn = StackedLSTMCell(nlayers, insz, hsz, dropout)
return rnn
def pytorch_embedding(x2vec, finetune=True):
dsz = x2vec.dsz
lut = nn.Embedding(x2vec.vsz + 1, dsz, padding_idx=0)
del lut.weight
lut.weight = nn.Parameter(torch.FloatTensor(x2vec.weights),
requires_grad=finetune)
return lut
def pytorch_activation(name="relu"):
if name == "tanh":
return nn.Tanh()
if name == "hardtanh":
return nn.Hardtanh()
if name == "prelu":
return nn.PReLU()
if name == "sigmoid":
return nn.Sigmoid()
if name == "log_sigmoid":
return nn.LogSigmoid()
return nn.ReLU()
def pytorch_conv1d(in_channels, out_channels, fsz, unif=0, padding=0, initializer=None):
c = nn.Conv1d(in_channels, out_channels, fsz, padding=padding)
if unif > 0:
c.weight.data.uniform_(-unif, unif)
elif initializer == "ortho":
nn.init.orthogonal(c.weight)
elif initializer == "he" or initializer == "kaiming":
nn.init.kaiming_uniform(c.weight)
else:
nn.init.xavier_uniform_(c.weight)
return c
def pytorch_linear(in_sz, out_sz, unif=0, initializer=None):
l = nn.Linear(in_sz, out_sz)
if unif > 0:
l.weight.data.uniform_(-unif, unif)
elif initializer == "ortho":
nn.init.orthogonal(l.weight)
elif initializer == "he" or initializer == "kaiming":
nn.init.kaiming_uniform(l.weight)
else:
nn.init.xavier_uniform_(l.weight)
l.bias.data.zero_()
return l
def pytorch_clone_module(module_, N):
return nn.ModuleList([copy.deepcopy(module_) for _ in range(N)])
def _cat_dir(h):
return torch.cat([h[0:h.size(0):2], h[1:h.size(0):2]], dim=-1)
class BiRNNWrapper(nn.Module):
def __init__(self, rnn, nlayers):
super(BiRNNWrapper, self).__init__()
self.rnn = rnn
self.nlayers = nlayers
def forward(self, seq):
output, hidden = self.rnn(seq)
if isinstance(hidden, tuple):
hidden = tuple(_cat_dir(h) for h in hidden)
else:
hidden = _cat_dir(hidden)
return output, hidden
def pytorch_rnn(insz, hsz, rnntype, nlayers, dropout):
if nlayers == 1:
dropout = 0.0
if rnntype == 'gru':
rnn = torch.nn.GRU(insz, hsz, nlayers, dropout=dropout)
elif rnntype == 'blstm':
rnn = torch.nn.LSTM(insz, hsz, nlayers, dropout=dropout, bidirectional=True)
rnn = BiRNNWrapper(rnn, nlayers)
else:
rnn = torch.nn.LSTM(insz, hsz, nlayers, dropout=dropout)
return rnn
class ParallelConv(nn.Module):
def __init__(self, insz, outsz, filtsz, activation_type, pdrop):
super(ParallelConv, self).__init__()
convs = []
outsz_filts = outsz
if type(outsz) == int:
outsz_filts = len(filtsz) * [outsz]
self.outsz = sum(outsz_filts)
for i, fsz in enumerate(filtsz):
pad = fsz//2
conv = nn.Sequential(
nn.Conv1d(insz, outsz_filts[i], fsz, padding=pad),
pytorch_activation(activation_type)
)
convs.append(conv)
# Add the module so its managed correctly
self.convs = nn.ModuleList(convs)
self.conv_drop = nn.Dropout(pdrop)
def forward(self, input_bct):
mots = []
for conv in self.convs:
# In Conv1d, data BxCxT, max over time
conv_out = conv(input_bct)
mot, _ = conv_out.max(2)
mots.append(mot)
mots = torch.cat(mots, 1)
return self.conv_drop(mots)
class Highway(nn.Module):
def __init__(self,
input_size):
super(Highway, self).__init__()
self.proj = nn.Linear(input_size, input_size)
self.transform = nn.Linear(input_size, input_size)
self.transform.bias.data.fill_(-2.0)
def forward(self, input):
proj_result = nn.functional.relu(self.proj(input))
proj_gate = nn.functional.sigmoid(self.transform(input))
gated = (proj_gate * proj_result) + ((1 - proj_gate) * input)
return gated
class LayerNorm(nn.Module):
"""
Applies Layer Normalization over a mini-batch of inputs as described in
the paper `Layer Normalization`_ .
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x]} + \epsilon} * \gamma + \beta
This is provided in pytorch's master, and can be replaced in the near future.
For the time, being, this code is adapted from:
http://nlp.seas.harvard.edu/2018/04/03/attention.html
https://github.com/pytorch/pytorch/pull/2019
"""
def __init__(self, num_features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a = nn.Parameter(torch.ones(num_features))
self.b = nn.Parameter(torch.zeros(num_features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = ((x - mean).pow(2).sum(-1, keepdim=True).div(x.size(-1) - 1) + self.eps).sqrt()
d = (std + self.eps) + self.b
return self.a * (x - mean) / d
def pytorch_lstm(insz, hsz, rnntype, nlayers, dropout, unif=0, batch_first=False, initializer=None):
if nlayers == 1:
dropout = 0.0
ndir = 2 if rnntype.startswith('b') else 1
#print('ndir: %d, rnntype: %s, nlayers: %d, dropout: %.2f, unif: %.2f' % (ndir, rnntype, nlayers, dropout, unif))
rnn = torch.nn.LSTM(insz, hsz, nlayers, dropout=dropout, bidirectional=True if ndir > 1 else False, batch_first=batch_first)#, bias=False)
if unif > 0:
for weight in rnn.parameters():
weight.data.uniform_(-unif, unif)
elif initializer == "ortho":
nn.init.orthogonal(rnn.weight_hh_l0)
nn.init.orthogonal(rnn.weight_ih_l0)
elif initializer == "he" or initializer == "kaiming":
nn.init.kaiming_uniform(rnn.weight_hh_l0)
nn.init.kaiming_uniform(rnn.weight_ih_l0)
else:
nn.init.xavier_uniform_(rnn.weight_hh_l0)
nn.init.xavier_uniform_(rnn.weight_ih_l0)
return rnn, ndir*hsz
def pytorch_prepare_optimizer(model, **kwargs):
mom = kwargs.get('mom', 0.9)
optim = kwargs.get('optim', 'sgd')
eta = kwargs.get('eta', kwargs.get('lr', 0.01))
decay_rate = float(kwargs.get('decay_rate', 0.0))
decay_type = kwargs.get('decay_type', None)
if optim == 'adadelta':
optimizer = torch.optim.Adadelta(model.parameters(), lr=eta)
elif optim == 'adam':
optimizer = torch.optim.Adam(model.parameters(), lr=eta)
elif optim == 'rmsprop':
optimizer = torch.optim.RMSprop(model.parameters(), lr=eta)
elif optim == 'asgd':
optimizer = torch.optim.ASGD(model.parameters(), lr=eta)
else:
optimizer = torch.optim.SGD(model.parameters(), lr=eta, momentum=mom)
scheduler = None
if decay_rate > 0.0 and decay_type is not None:
if decay_type == 'invtime':
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1, gamma=decay_rate)
return optimizer, scheduler
def append2seq(seq, modules):
for i, module in enumerate(modules):
seq.add_module('%s-%d' % (str(module).replace('.', 'dot'), i), module)
def tensor_max(tensor):
return tensor.max()
def tensor_shape(tensor):
return tensor.size()
def tensor_reverse_2nd(tensor):
idx = torch.LongTensor([i for i in range(tensor.size(1)-1, -1, -1)])
return tensor.index_select(1, idx)
def long_0_tensor_alloc(dims, dtype=None):
lt = long_tensor_alloc(dims)
lt.zero_()
return lt
def long_tensor_alloc(dims, dtype=None):
if type(dims) == int or len(dims) == 1:
return torch.LongTensor(dims)
return torch.LongTensor(*dims)
def prepare_src(model, tokens, mxlen=100):
src_vocab = model.get_src_vocab()
length = min(len(tokens), mxlen)
x = torch.LongTensor(length).zero_()
for j in range(length):
word = tokens[j]
if word not in src_vocab:
if word != '':
print(word)
idx = 0
else:
idx = src_vocab[word]
x[j] = idx
return torch.autograd.Variable(x.view(-1, 1))
#def beam_decode_tokens(model, src_tokens, K, idx2word, mxlen=50):
# src = prepare_src(model, src_tokens, mxlen)
# paths, scores = beam_decode(model, src, K)
# path_str = []
# for j, path in enumerate(paths):
# path_str.append([idx2word[i] for i in path])
# return path_str, scores
#return beam_decode(model, src, K)
def show_examples_pytorch(model, es, rlut1, rlut2, embed2, mxlen, sample, prob_clip, max_examples, reverse):
si = np.random.randint(0, len(es))
batch_dict = es[si]
src_array = batch_dict['src']
tgt_array = batch_dict['dst']
src_len = batch_dict['src_len']
if max_examples > 0:
max_examples = min(max_examples, src_array.size(0))
src_array = src_array[0:max_examples]
tgt_array = tgt_array[0:max_examples]
src_len = src_len[0:max_examples]
# TODO: fix this, check for GPU first
src_array = src_array.cuda()
for src_len_i, src_i, tgt_i in zip(src_len, src_array, tgt_array):
print('========================================================================')
src_len_i = torch.ones(1).fill_(src_len_i).type_as(src_len)
sent = lookup_sentence(rlut1, src_i.cpu().numpy(), reverse=reverse)
print('[OP] %s' % sent)
sent = lookup_sentence(rlut2, tgt_i.cpu().numpy())
print('[Actual] %s' % sent)
src_dict = {'src': torch.autograd.Variable(src_i.view(1, -1), requires_grad=False),
'src_len': torch.autograd.Variable(src_len_i, requires_grad=False)}
dst_i = model.run(src_dict)[0][0]
dst_i = [idx.item() for idx in dst_i]
sent = lookup_sentence(rlut2, dst_i)
print('Guess: %s' % sent)
print('------------------------------------------------------------------------')
# Some of this code is borrowed from here:
# https://github.com/rguthrie3/DeepLearningForNLPInPytorch
def argmax(vec):
# return the argmax as a python int
_, idx = torch.max(vec, 1)
return idx.data[0]
# Compute log sum exp in a numerically stable way for the forward algorithm
def log_sum_exp(vec):
max_score = vec[0, argmax(vec)]
max_score_broadcast = max_score.view(1, -1).expand(1, vec.size()[1])
return max_score + torch.log(torch.sum(torch.exp(vec - max_score_broadcast)))
def vec_log_sum_exp(vec, dim):
"""Vectorized version of log-sum-exp
:param vec: Vector
:param dim: What dimension to operate on
:return:
"""
max_scores, idx = torch.max(vec, dim, keepdim=True)
max_scores_broadcast = max_scores.expand_as(vec)
return max_scores + torch.log(torch.sum(torch.exp(vec - max_scores_broadcast), dim, keepdim=True))
def crf_mask(vocab, span_type, s_idx, e_idx, pad_idx=None):
"""Create a CRF mask.
Returns a Tensor with valid transitions as a 0 and invalid as a 1 for easy use with `masked_fill`
"""
np_mask = crf_m(vocab, span_type, s_idx, e_idx, pad_idx=pad_idx)
return torch.from_numpy(np_mask) == 0
class CRF(nn.Module):
def __init__(self, n_tags, idxs=None, batch_first=True, vocab=None, span_type=None, pad_idx=None):
"""Initialize the object.
:param n_tags: int The number of tags in your output (emission size)
:param idxs: Tuple(int. int) The index of the start and stop symbol
in emissions.
:param vocab: The label vocab of the form vocab[string]: int
:param span_type: The tagging span_type used. `IOB`, `IOB2`, or `IOBES`
:param pds_idx: The index of the pad symbol in the vocab
Note:
if idxs is none then the CRF adds these symbols to the emission
vectors and n_tags is assumed to be the number of output tags.
if idxs is not none then the first element is assumed to be the
start index and the second idx is assumed to be the end index. In
this case n_tags is assumed to include the start and end symbols.
if vocab is not None then a transition mask will be created that
limits illegal transitions.
"""
super(CRF, self).__init__()
if idxs is None:
self.start_idx = n_tags
self.end_idx = n_tags + 1
self.n_tags = n_tags + 2
self.add_ends = True
else:
self.start_idx, self.end_idx = idxs
self.n_tags = n_tags
self.add_ends = False
self.span_type = None
if vocab is not None:
assert span_type is not None, "To mask transitions you need to provide a tagging span_type, choices are `IOB`, `BIO` (or `IOB2`), and `IOBES`"
# If there weren't start and end idx provided we need to add them.
if idxs is None:
vocab = vocab.copy()
vocab['<GO>'] = self.start_idx
vocab['<EOS>'] = self.end_idx
self.span_type = span_type
self.register_buffer('mask', crf_mask(vocab, span_type, self.start_idx, self.end_idx, pad_idx).unsqueeze(0))
else:
self.mask = None
self.transitions_p = nn.Parameter(torch.Tensor(1, self.n_tags, self.n_tags).zero_())
self.batch_first = batch_first
def extra_repr(self):
str_ = "n_tags=%d, batch_first=%s" % (self.n_tags, self.batch_first)
if self.mask is not None:
str_ += ", masked=True, span_type=%s" % self.span_type
return str_
@staticmethod
def _prep_input(input_):
ends = torch.Tensor(input_.size()[0], input_.size()[1], 2).fill_(-1e4).to(input_.device)
return torch.cat([input_, ends], dim=2)
@property
def transitions(self):
if self.mask is not None:
return self.transitions_p.masked_fill(self.mask, -1e4)
return self.transitions_p
def neg_log_loss(self, unary, tags, lengths):
"""Neg Log Loss with a Batched CRF.
:param unary: torch.FloatTensor: [T, B, N] or [B, T, N]
:param tags: torch.LongTensor: [T, B] or [B, T]
:param lengths: torch.LongTensor: [B]
:return: torch.FloatTensor: [B]
"""
# Convert from [B, T, N] -> [T, B, N]
if self.batch_first:
unary = unary.transpose(0, 1)
tags = tags.transpose(0, 1)
if self.add_ends:
unary = CRF._prep_input(unary)
_, batch_size, _ = unary.size()
min_lengths = torch.min(lengths)
fwd_score = self.forward(unary, lengths, batch_size, min_lengths)
gold_score = self.score_sentence(unary, tags, lengths, batch_size, min_lengths)
return fwd_score - gold_score
def score_sentence(self, unary, tags, lengths, batch_size, min_length):
"""Score a batch of sentences.
:param unary: torch.FloatTensor: [T, B, N]
:param tags: torch.LongTensor: [T, B]
:param lengths: torch.LongTensor: [B]
:param batzh_size: int: B
:param min_length: torch.LongTensor: []
:return: torch.FloatTensor: [B]
"""
trans = self.transitions.squeeze(0) # [N, N]
batch_range = torch.arange(batch_size, dtype=torch.int64) # [B]
start = torch.full((1, batch_size), self.start_idx, dtype=tags.dtype, device=tags.device) # [1, B]
tags = torch.cat([start, tags], 0) # [T, B]
scores = torch.zeros(batch_size, requires_grad=True).to(unary.device) # [B]
for i, unary_t in enumerate(unary):
new_scores = (
trans[tags[i + 1], tags[i]] +
unary_t[batch_range, tags[i + 1]]
)
if i >= min_length:
# If we are farther along `T` than your length don't add to your score
mask = (i >= lengths)
scores = scores + new_scores.masked_fill(mask, 0)
else:
scores = scores + new_scores
# Add stop tag
scores = scores + trans[self.end_idx, tags[lengths, batch_range]]
return scores
def forward(self, unary, lengths, batch_size, min_length):
"""For CRF forward on a batch.
:param unary: torch.FloatTensor: [T, B, N]
:param lengths: torch.LongTensor: [B]
:param batzh_size: int: B
:param min_length: torch.LongTensor: []
:return: torch.FloatTensor: [B]
"""
# alphas: [B, 1, N]
alphas = torch.Tensor(batch_size, 1, self.n_tags).fill_(-1e4).to(unary.device)
alphas[:, 0, self.start_idx] = 0.
alphas.requires_grad = True
trans = self.transitions # [1, N, N]
for i, unary_t in enumerate(unary):
# unary_t: [B, N]
unary_t = unary_t.unsqueeze(2) # [B, N, 1]
# Broadcast alphas along the rows of trans
# Broadcast trans along the batch of alphas
# [B, 1, N] + [1, N, N] -> [B, N, N]
# Broadcast unary_t along the cols of result
# [B, N, N] + [B, N, 1] -> [B, N, N]
scores = alphas + trans + unary_t
new_alphas = vec_log_sum_exp(scores, 2).transpose(1, 2)
# If we haven't reached your length zero out old alpha and take new one.
# If we are past your length, zero out new_alpha and keep old one.
if i >= min_length:
mask = (i < lengths).view(-1, 1, 1)
alphas = alphas.masked_fill(mask, 0) + new_alphas.masked_fill(mask == 0, 0)
else:
alphas = new_alphas
terminal_vars = alphas + trans[:, self.end_idx]
alphas = vec_log_sum_exp(terminal_vars, 2)
return alphas.squeeze()
def decode(self, unary, lengths):
"""Do Viterbi decode on a batch.
:param unary: torch.FloatTensor: [T, B, N] or [B, T, N]
:param lengths: torch.LongTensor: [B]
:return: List[torch.LongTensor]: [B] the paths
:return: torch.FloatTensor: [B] the path score
"""
if self.batch_first:
unary = unary.transpose(0, 1)
if self.add_ends:
unary = CRF._prep_input(unary)
seq_len, batch_size, _ = unary.size()
min_length = torch.min(lengths)
batch_range = torch.arange(batch_size, dtype=torch.int64)
backpointers = []
# alphas: [B, 1, N]
alphas = torch.Tensor(batch_size, 1, self.n_tags).fill_(-1e4).to(unary.device)
alphas[:, 0, self.start_idx] = 0
alphas.requires_grad = True
trans = self.transitions # [1, N, N]
for i, unary_t in enumerate(unary):
# Broadcast alphas along the rows of trans and trans along the batch of alphas
next_tag_var = alphas + trans # [B, 1, N] + [1, N, N] -> [B, N, N]
viterbi, best_tag_ids = torch.max(next_tag_var, 2) # [B, N]
backpointers.append(best_tag_ids.data)
new_alphas = viterbi + unary_t # [B, N] + [B, N]
new_alphas.unsqueeze_(1) # Prep for next round
# If we haven't reached your length zero out old alpha and take new one.
# If we are past your length, zero out new_alpha and keep old one.
if i >= min_length:
mask = (i < lengths).view(-1, 1, 1)
alphas = alphas.masked_fill(mask, 0) + new_alphas.masked_fill(mask == 0, 0)
else:
alphas = new_alphas
# Add end tag
terminal_var = alphas.squeeze(1) + trans[:, self.end_idx]
_, best_tag_id = torch.max(terminal_var, 1)
path_score = terminal_var[batch_range, best_tag_id] # Select best_tag from each batch
best_path = [best_tag_id]
# Flip lengths
rev_len = seq_len - lengths - 1
for i, backpointer_t in enumerate(reversed(backpointers)):
# Get new best tag candidate
new_best_tag_id = backpointer_t[batch_range, best_tag_id]
# We are going backwards now, if you passed your flipped length then you aren't in your real results yet
mask = (i > rev_len)
best_tag_id = best_tag_id.masked_fill(mask, 0) + new_best_tag_id.masked_fill(mask == 0, 0)
best_path.append(best_tag_id)
_ = best_path.pop()
best_path.reverse()
best_path = torch.stack(best_path)
# Return list of paths
paths = []
best_path = best_path.transpose(0, 1)
for path, length in zip(best_path, lengths):
paths.append(path[:length])
return paths, path_score.squeeze(0)
Added a class to wrap LSTM encoding in PyT
import torch
import numpy as np
from baseline.utils import lookup_sentence, get_version
from baseline.utils import crf_mask as crf_m
from torch.autograd import Variable
import torch.autograd
import torch.nn as nn
import torch.nn.functional
import math
import copy
PYT_MAJOR_VERSION = get_version(torch)
def sequence_mask(lengths):
lens = lengths.cpu()
max_len = torch.max(lens)
# 1 x T
row = torch.arange(0, max_len.item()).type_as(lens).view(1, -1)
# B x 1
col = lens.view(-1, 1)
# Broadcast to B x T, compares increasing number to max
mask = row < col
return mask
def classify_bt(model, batch_time):
tensor = torch.from_numpy(batch_time) if type(batch_time) == np.ndarray else batch_time
probs = model(torch.autograd.Variable(tensor, requires_grad=False).cuda()).exp().data
probs.div_(torch.sum(probs))
results = []
batchsz = probs.size(0)
for b in range(batchsz):
outcomes = [(model.labels[id_i], prob_i) for id_i, prob_i in enumerate(probs[b])]
results.append(outcomes)
return results
def predict_seq_bt(model, x, xch, lengths):
x_t = torch.from_numpy(x) if type(x) == np.ndarray else x
xch_t = torch.from_numpy(xch) if type(xch) == np.ndarray else xch
len_v = torch.from_numpy(lengths) if type(lengths) == np.ndarray else lengths
x_v = torch.autograd.Variable(x_t, requires_grad=False).cuda()
xch_v = torch.autograd.Variable(xch_t, requires_grad=False).cuda()
#len_v = torch.autograd.Variable(len_t, requires_grad=False)
results = model((x_v, xch_v, len_v))
#print(results)
#if type(x) == np.ndarray:
# # results = results.cpu().numpy()
# # Fix this to not be greedy
# results = np.argmax(results, -1)
return results
def to_scalar(var):
# returns a python float
return var.view(-1).data.tolist()[0]
def argmax(vec):
# return the argmax as a python int
_, idx = torch.max(vec, 1)
return to_scalar(idx)
def log_sum_exp(vec):
max_score = vec[0, argmax(vec)]
max_score_broadcast = max_score.view(1, -1).expand(1, vec.size()[1])
return max_score + torch.log(torch.sum(torch.exp(vec - max_score_broadcast)))
class SequenceCriterion(nn.Module):
def __init__(self, LossFn=nn.NLLLoss):
super(SequenceCriterion, self).__init__()
self.crit = LossFn(ignore_index=0, size_average=False)
def forward(self, inputs, targets):
# This is BxT, which is what we want!
total_sz = targets.nelement()
loss = self.crit(inputs.view(total_sz, -1), targets.view(total_sz))
return loss
class StackedLSTMCell(nn.Module):
def __init__(self, num_layers, input_size, rnn_size, dropout):
super(StackedLSTMCell, self).__init__()
self.dropout = nn.Dropout(dropout)
self.num_layers = num_layers
self.layers = nn.ModuleList()
for i in range(num_layers):
self.layers.append(nn.LSTMCell(input_size=input_size, hidden_size=rnn_size, bias=False))
input_size = rnn_size
def forward(self, input, hidden):
h_0, c_0 = hidden
hs, cs = [], []
for i, layer in enumerate(self.layers):
h_i, c_i = layer(input, (h_0[i], c_0[i]))
input = h_i
if i != self.num_layers - 1:
input = self.dropout(input)
hs += [h_i]
cs += [c_i]
hs = torch.stack(hs)
cs = torch.stack(cs)
return input, (hs, cs)
class StackedGRUCell(nn.Module):
def __init__(self, num_layers, input_size, rnn_size, dropout):
super(StackedGRUCell, self).__init__()
self.dropout = nn.Dropout(dropout)
self.num_layers = num_layers
self.layers = nn.ModuleList()
for i in range(num_layers):
self.layers.append(nn.GRUCell(input_size=input_size, hidden_size=rnn_size))
input_size = rnn_size
def forward(self, input, hidden):
h_0 = hidden
hs = []
for i, layer in enumerate(self.layers):
h_i = layer(input, (h_0[i]))
input = h_i
if i != self.num_layers:
input = self.dropout(input)
hs += [h_i]
hs = torch.stack(hs)
return input, hs
def pytorch_rnn_cell(insz, hsz, rnntype, nlayers, dropout):
if rnntype == 'gru':
rnn = StackedGRUCell(nlayers, insz, hsz, dropout)
else:
rnn = StackedLSTMCell(nlayers, insz, hsz, dropout)
return rnn
def pytorch_embedding(x2vec, finetune=True):
dsz = x2vec.dsz
lut = nn.Embedding(x2vec.vsz + 1, dsz, padding_idx=0)
del lut.weight
lut.weight = nn.Parameter(torch.FloatTensor(x2vec.weights),
requires_grad=finetune)
return lut
def pytorch_activation(name="relu"):
if name == "tanh":
return nn.Tanh()
if name == "hardtanh":
return nn.Hardtanh()
if name == "prelu":
return nn.PReLU()
if name == "sigmoid":
return nn.Sigmoid()
if name == "log_sigmoid":
return nn.LogSigmoid()
return nn.ReLU()
def pytorch_conv1d(in_channels, out_channels, fsz, unif=0, padding=0, initializer=None):
c = nn.Conv1d(in_channels, out_channels, fsz, padding=padding)
if unif > 0:
c.weight.data.uniform_(-unif, unif)
elif initializer == "ortho":
nn.init.orthogonal(c.weight)
elif initializer == "he" or initializer == "kaiming":
nn.init.kaiming_uniform(c.weight)
else:
nn.init.xavier_uniform_(c.weight)
return c
def pytorch_linear(in_sz, out_sz, unif=0, initializer=None):
l = nn.Linear(in_sz, out_sz)
if unif > 0:
l.weight.data.uniform_(-unif, unif)
elif initializer == "ortho":
nn.init.orthogonal(l.weight)
elif initializer == "he" or initializer == "kaiming":
nn.init.kaiming_uniform(l.weight)
else:
nn.init.xavier_uniform_(l.weight)
l.bias.data.zero_()
return l
def pytorch_clone_module(module_, N):
return nn.ModuleList([copy.deepcopy(module_) for _ in range(N)])
def _cat_dir(h):
return torch.cat([h[0:h.size(0):2], h[1:h.size(0):2]], dim=-1)
class BiRNNWrapper(nn.Module):
def __init__(self, rnn, nlayers):
super(BiRNNWrapper, self).__init__()
self.rnn = rnn
self.nlayers = nlayers
def forward(self, seq):
output, hidden = self.rnn(seq)
if isinstance(hidden, tuple):
hidden = tuple(_cat_dir(h) for h in hidden)
else:
hidden = _cat_dir(hidden)
return output, hidden
def pytorch_rnn(insz, hsz, rnntype, nlayers, dropout):
if nlayers == 1:
dropout = 0.0
if rnntype == 'gru':
rnn = torch.nn.GRU(insz, hsz, nlayers, dropout=dropout)
elif rnntype == 'blstm':
rnn = torch.nn.LSTM(insz, hsz, nlayers, dropout=dropout, bidirectional=True)
rnn = BiRNNWrapper(rnn, nlayers)
else:
rnn = torch.nn.LSTM(insz, hsz, nlayers, dropout=dropout)
return rnn
class ParallelConv(nn.Module):
def __init__(self, insz, outsz, filtsz, activation_type, pdrop):
super(ParallelConv, self).__init__()
convs = []
outsz_filts = outsz
if type(outsz) == int:
outsz_filts = len(filtsz) * [outsz]
self.outsz = sum(outsz_filts)
for i, fsz in enumerate(filtsz):
pad = fsz//2
conv = nn.Sequential(
nn.Conv1d(insz, outsz_filts[i], fsz, padding=pad),
pytorch_activation(activation_type)
)
convs.append(conv)
# Add the module so its managed correctly
self.convs = nn.ModuleList(convs)
self.conv_drop = nn.Dropout(pdrop)
def forward(self, input_bct):
mots = []
for conv in self.convs:
# In Conv1d, data BxCxT, max over time
conv_out = conv(input_bct)
mot, _ = conv_out.max(2)
mots.append(mot)
mots = torch.cat(mots, 1)
return self.conv_drop(mots)
class Highway(nn.Module):
def __init__(self,
input_size):
super(Highway, self).__init__()
self.proj = nn.Linear(input_size, input_size)
self.transform = nn.Linear(input_size, input_size)
self.transform.bias.data.fill_(-2.0)
def forward(self, input):
proj_result = nn.functional.relu(self.proj(input))
proj_gate = nn.functional.sigmoid(self.transform(input))
gated = (proj_gate * proj_result) + ((1 - proj_gate) * input)
return gated
class LayerNorm(nn.Module):
"""
Applies Layer Normalization over a mini-batch of inputs as described in
the paper `Layer Normalization`_ .
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x]} + \epsilon} * \gamma + \beta
This is provided in pytorch's master, and can be replaced in the near future.
For the time, being, this code is adapted from:
http://nlp.seas.harvard.edu/2018/04/03/attention.html
https://github.com/pytorch/pytorch/pull/2019
"""
def __init__(self, num_features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a = nn.Parameter(torch.ones(num_features))
self.b = nn.Parameter(torch.zeros(num_features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = ((x - mean).pow(2).sum(-1, keepdim=True).div(x.size(-1) - 1) + self.eps).sqrt()
d = (std + self.eps) + self.b
return self.a * (x - mean) / d
def pytorch_lstm(insz, hsz, rnntype, nlayers, dropout, unif=0, batch_first=False, initializer=None):
if nlayers == 1:
dropout = 0.0
ndir = 2 if rnntype.startswith('b') else 1
#print('ndir: %d, rnntype: %s, nlayers: %d, dropout: %.2f, unif: %.2f' % (ndir, rnntype, nlayers, dropout, unif))
rnn = torch.nn.LSTM(insz, hsz, nlayers, dropout=dropout, bidirectional=True if ndir > 1 else False, batch_first=batch_first)#, bias=False)
if unif > 0:
for weight in rnn.parameters():
weight.data.uniform_(-unif, unif)
elif initializer == "ortho":
nn.init.orthogonal(rnn.weight_hh_l0)
nn.init.orthogonal(rnn.weight_ih_l0)
elif initializer == "he" or initializer == "kaiming":
nn.init.kaiming_uniform(rnn.weight_hh_l0)
nn.init.kaiming_uniform(rnn.weight_ih_l0)
else:
nn.init.xavier_uniform_(rnn.weight_hh_l0)
nn.init.xavier_uniform_(rnn.weight_ih_l0)
return rnn, ndir*hsz
class LSTMEncoder(nn.Module):
def __init__(self, insz, hsz, rnntype, nlayers, dropout, residual=False, unif=0, initializer=None):
super(LSTMEncoder, self).__init__()
self.residual = residual
self.rnn, self.outsz = pytorch_lstm(insz, hsz, rnntype, nlayers, dropout, unif, False, initializer)
def forward(self, tbc, lengths):
packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths.tolist())
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output)
return output + tbc if self.residual else output
def pytorch_prepare_optimizer(model, **kwargs):
mom = kwargs.get('mom', 0.9)
optim = kwargs.get('optim', 'sgd')
eta = kwargs.get('eta', kwargs.get('lr', 0.01))
decay_rate = float(kwargs.get('decay_rate', 0.0))
decay_type = kwargs.get('decay_type', None)
if optim == 'adadelta':
optimizer = torch.optim.Adadelta(model.parameters(), lr=eta)
elif optim == 'adam':
optimizer = torch.optim.Adam(model.parameters(), lr=eta)
elif optim == 'rmsprop':
optimizer = torch.optim.RMSprop(model.parameters(), lr=eta)
elif optim == 'asgd':
optimizer = torch.optim.ASGD(model.parameters(), lr=eta)
else:
optimizer = torch.optim.SGD(model.parameters(), lr=eta, momentum=mom)
scheduler = None
if decay_rate > 0.0 and decay_type is not None:
if decay_type == 'invtime':
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1, gamma=decay_rate)
return optimizer, scheduler
def append2seq(seq, modules):
for i, module in enumerate(modules):
seq.add_module('%s-%d' % (str(module).replace('.', 'dot'), i), module)
def tensor_max(tensor):
return tensor.max()
def tensor_shape(tensor):
return tensor.size()
def tensor_reverse_2nd(tensor):
idx = torch.LongTensor([i for i in range(tensor.size(1)-1, -1, -1)])
return tensor.index_select(1, idx)
def long_0_tensor_alloc(dims, dtype=None):
lt = long_tensor_alloc(dims)
lt.zero_()
return lt
def long_tensor_alloc(dims, dtype=None):
if type(dims) == int or len(dims) == 1:
return torch.LongTensor(dims)
return torch.LongTensor(*dims)
def prepare_src(model, tokens, mxlen=100):
src_vocab = model.get_src_vocab()
length = min(len(tokens), mxlen)
x = torch.LongTensor(length).zero_()
for j in range(length):
word = tokens[j]
if word not in src_vocab:
if word != '':
print(word)
idx = 0
else:
idx = src_vocab[word]
x[j] = idx
return torch.autograd.Variable(x.view(-1, 1))
#def beam_decode_tokens(model, src_tokens, K, idx2word, mxlen=50):
# src = prepare_src(model, src_tokens, mxlen)
# paths, scores = beam_decode(model, src, K)
# path_str = []
# for j, path in enumerate(paths):
# path_str.append([idx2word[i] for i in path])
# return path_str, scores
#return beam_decode(model, src, K)
def show_examples_pytorch(model, es, rlut1, rlut2, embed2, mxlen, sample, prob_clip, max_examples, reverse):
si = np.random.randint(0, len(es))
batch_dict = es[si]
src_array = batch_dict['src']
tgt_array = batch_dict['dst']
src_len = batch_dict['src_len']
if max_examples > 0:
max_examples = min(max_examples, src_array.size(0))
src_array = src_array[0:max_examples]
tgt_array = tgt_array[0:max_examples]
src_len = src_len[0:max_examples]
# TODO: fix this, check for GPU first
src_array = src_array.cuda()
for src_len_i, src_i, tgt_i in zip(src_len, src_array, tgt_array):
print('========================================================================')
src_len_i = torch.ones(1).fill_(src_len_i).type_as(src_len)
sent = lookup_sentence(rlut1, src_i.cpu().numpy(), reverse=reverse)
print('[OP] %s' % sent)
sent = lookup_sentence(rlut2, tgt_i.cpu().numpy())
print('[Actual] %s' % sent)
src_dict = {'src': torch.autograd.Variable(src_i.view(1, -1), requires_grad=False),
'src_len': torch.autograd.Variable(src_len_i, requires_grad=False)}
dst_i = model.run(src_dict)[0][0]
dst_i = [idx.item() for idx in dst_i]
sent = lookup_sentence(rlut2, dst_i)
print('Guess: %s' % sent)
print('------------------------------------------------------------------------')
# Some of this code is borrowed from here:
# https://github.com/rguthrie3/DeepLearningForNLPInPytorch
def argmax(vec):
# return the argmax as a python int
_, idx = torch.max(vec, 1)
return idx.data[0]
# Compute log sum exp in a numerically stable way for the forward algorithm
def log_sum_exp(vec):
max_score = vec[0, argmax(vec)]
max_score_broadcast = max_score.view(1, -1).expand(1, vec.size()[1])
return max_score + torch.log(torch.sum(torch.exp(vec - max_score_broadcast)))
def vec_log_sum_exp(vec, dim):
"""Vectorized version of log-sum-exp
:param vec: Vector
:param dim: What dimension to operate on
:return:
"""
max_scores, idx = torch.max(vec, dim, keepdim=True)
max_scores_broadcast = max_scores.expand_as(vec)
return max_scores + torch.log(torch.sum(torch.exp(vec - max_scores_broadcast), dim, keepdim=True))
def crf_mask(vocab, span_type, s_idx, e_idx, pad_idx=None):
"""Create a CRF mask.
Returns a Tensor with valid transitions as a 0 and invalid as a 1 for easy use with `masked_fill`
"""
np_mask = crf_m(vocab, span_type, s_idx, e_idx, pad_idx=pad_idx)
return torch.from_numpy(np_mask) == 0
class CRF(nn.Module):
def __init__(self, n_tags, idxs=None, batch_first=True, vocab=None, span_type=None, pad_idx=None):
"""Initialize the object.
:param n_tags: int The number of tags in your output (emission size)
:param idxs: Tuple(int. int) The index of the start and stop symbol
in emissions.
:param vocab: The label vocab of the form vocab[string]: int
:param span_type: The tagging span_type used. `IOB`, `IOB2`, or `IOBES`
:param pds_idx: The index of the pad symbol in the vocab
Note:
if idxs is none then the CRF adds these symbols to the emission
vectors and n_tags is assumed to be the number of output tags.
if idxs is not none then the first element is assumed to be the
start index and the second idx is assumed to be the end index. In
this case n_tags is assumed to include the start and end symbols.
if vocab is not None then a transition mask will be created that
limits illegal transitions.
"""
super(CRF, self).__init__()
if idxs is None:
self.start_idx = n_tags
self.end_idx = n_tags + 1
self.n_tags = n_tags + 2
self.add_ends = True
else:
self.start_idx, self.end_idx = idxs
self.n_tags = n_tags
self.add_ends = False
self.span_type = None
if vocab is not None:
assert span_type is not None, "To mask transitions you need to provide a tagging span_type, choices are `IOB`, `BIO` (or `IOB2`), and `IOBES`"
# If there weren't start and end idx provided we need to add them.
if idxs is None:
vocab = vocab.copy()
vocab['<GO>'] = self.start_idx
vocab['<EOS>'] = self.end_idx
self.span_type = span_type
self.register_buffer('mask', crf_mask(vocab, span_type, self.start_idx, self.end_idx, pad_idx).unsqueeze(0))
else:
self.mask = None
self.transitions_p = nn.Parameter(torch.Tensor(1, self.n_tags, self.n_tags).zero_())
self.batch_first = batch_first
def extra_repr(self):
str_ = "n_tags=%d, batch_first=%s" % (self.n_tags, self.batch_first)
if self.mask is not None:
str_ += ", masked=True, span_type=%s" % self.span_type
return str_
@staticmethod
def _prep_input(input_):
ends = torch.Tensor(input_.size()[0], input_.size()[1], 2).fill_(-1e4).to(input_.device)
return torch.cat([input_, ends], dim=2)
@property
def transitions(self):
if self.mask is not None:
return self.transitions_p.masked_fill(self.mask, -1e4)
return self.transitions_p
def neg_log_loss(self, unary, tags, lengths):
"""Neg Log Loss with a Batched CRF.
:param unary: torch.FloatTensor: [T, B, N] or [B, T, N]
:param tags: torch.LongTensor: [T, B] or [B, T]
:param lengths: torch.LongTensor: [B]
:return: torch.FloatTensor: [B]
"""
# Convert from [B, T, N] -> [T, B, N]
if self.batch_first:
unary = unary.transpose(0, 1)
tags = tags.transpose(0, 1)
if self.add_ends:
unary = CRF._prep_input(unary)
_, batch_size, _ = unary.size()
min_lengths = torch.min(lengths)
fwd_score = self.forward(unary, lengths, batch_size, min_lengths)
gold_score = self.score_sentence(unary, tags, lengths, batch_size, min_lengths)
return fwd_score - gold_score
def score_sentence(self, unary, tags, lengths, batch_size, min_length):
"""Score a batch of sentences.
:param unary: torch.FloatTensor: [T, B, N]
:param tags: torch.LongTensor: [T, B]
:param lengths: torch.LongTensor: [B]
:param batzh_size: int: B
:param min_length: torch.LongTensor: []
:return: torch.FloatTensor: [B]
"""
trans = self.transitions.squeeze(0) # [N, N]
batch_range = torch.arange(batch_size, dtype=torch.int64) # [B]
start = torch.full((1, batch_size), self.start_idx, dtype=tags.dtype, device=tags.device) # [1, B]
tags = torch.cat([start, tags], 0) # [T, B]
scores = torch.zeros(batch_size, requires_grad=True).to(unary.device) # [B]
for i, unary_t in enumerate(unary):
new_scores = (
trans[tags[i + 1], tags[i]] +
unary_t[batch_range, tags[i + 1]]
)
if i >= min_length:
# If we are farther along `T` than your length don't add to your score
mask = (i >= lengths)
scores = scores + new_scores.masked_fill(mask, 0)
else:
scores = scores + new_scores
# Add stop tag
scores = scores + trans[self.end_idx, tags[lengths, batch_range]]
return scores
def forward(self, unary, lengths, batch_size, min_length):
"""For CRF forward on a batch.
:param unary: torch.FloatTensor: [T, B, N]
:param lengths: torch.LongTensor: [B]
:param batzh_size: int: B
:param min_length: torch.LongTensor: []
:return: torch.FloatTensor: [B]
"""
# alphas: [B, 1, N]
alphas = torch.Tensor(batch_size, 1, self.n_tags).fill_(-1e4).to(unary.device)
alphas[:, 0, self.start_idx] = 0.
alphas.requires_grad = True
trans = self.transitions # [1, N, N]
for i, unary_t in enumerate(unary):
# unary_t: [B, N]
unary_t = unary_t.unsqueeze(2) # [B, N, 1]
# Broadcast alphas along the rows of trans
# Broadcast trans along the batch of alphas
# [B, 1, N] + [1, N, N] -> [B, N, N]
# Broadcast unary_t along the cols of result
# [B, N, N] + [B, N, 1] -> [B, N, N]
scores = alphas + trans + unary_t
new_alphas = vec_log_sum_exp(scores, 2).transpose(1, 2)
# If we haven't reached your length zero out old alpha and take new one.
# If we are past your length, zero out new_alpha and keep old one.
if i >= min_length:
mask = (i < lengths).view(-1, 1, 1)
alphas = alphas.masked_fill(mask, 0) + new_alphas.masked_fill(mask == 0, 0)
else:
alphas = new_alphas
terminal_vars = alphas + trans[:, self.end_idx]
alphas = vec_log_sum_exp(terminal_vars, 2)
return alphas.squeeze()
def decode(self, unary, lengths):
"""Do Viterbi decode on a batch.
:param unary: torch.FloatTensor: [T, B, N] or [B, T, N]
:param lengths: torch.LongTensor: [B]
:return: List[torch.LongTensor]: [B] the paths
:return: torch.FloatTensor: [B] the path score
"""
if self.batch_first:
unary = unary.transpose(0, 1)
if self.add_ends:
unary = CRF._prep_input(unary)
seq_len, batch_size, _ = unary.size()
min_length = torch.min(lengths)
batch_range = torch.arange(batch_size, dtype=torch.int64)
backpointers = []
# alphas: [B, 1, N]
alphas = torch.Tensor(batch_size, 1, self.n_tags).fill_(-1e4).to(unary.device)
alphas[:, 0, self.start_idx] = 0
alphas.requires_grad = True
trans = self.transitions # [1, N, N]
for i, unary_t in enumerate(unary):
# Broadcast alphas along the rows of trans and trans along the batch of alphas
next_tag_var = alphas + trans # [B, 1, N] + [1, N, N] -> [B, N, N]
viterbi, best_tag_ids = torch.max(next_tag_var, 2) # [B, N]
backpointers.append(best_tag_ids.data)
new_alphas = viterbi + unary_t # [B, N] + [B, N]
new_alphas.unsqueeze_(1) # Prep for next round
# If we haven't reached your length zero out old alpha and take new one.
# If we are past your length, zero out new_alpha and keep old one.
if i >= min_length:
mask = (i < lengths).view(-1, 1, 1)
alphas = alphas.masked_fill(mask, 0) + new_alphas.masked_fill(mask == 0, 0)
else:
alphas = new_alphas
# Add end tag
terminal_var = alphas.squeeze(1) + trans[:, self.end_idx]
_, best_tag_id = torch.max(terminal_var, 1)
path_score = terminal_var[batch_range, best_tag_id] # Select best_tag from each batch
best_path = [best_tag_id]
# Flip lengths
rev_len = seq_len - lengths - 1
for i, backpointer_t in enumerate(reversed(backpointers)):
# Get new best tag candidate
new_best_tag_id = backpointer_t[batch_range, best_tag_id]
# We are going backwards now, if you passed your flipped length then you aren't in your real results yet
mask = (i > rev_len)
best_tag_id = best_tag_id.masked_fill(mask, 0) + new_best_tag_id.masked_fill(mask == 0, 0)
best_path.append(best_tag_id)
_ = best_path.pop()
best_path.reverse()
best_path = torch.stack(best_path)
# Return list of paths
paths = []
best_path = best_path.transpose(0, 1)
for path, length in zip(best_path, lengths):
paths.append(path[:length])
return paths, path_score.squeeze(0)
|
from __future__ import annotations
import json
import time
from typing import Any, Callable, NamedTuple
from collections import namedtuple
from enum import Enum
from pyinstrument import processors
from pyinstrument.frame import BaseFrame
from pyinstrument.renderers.base import ProcessorList, Renderer
from pyinstrument.session import Session
# pyright: strict
encode_str: Callable[[str], str] = json.encoder.encode_basestring # type: ignore
def encode_bool(a_bool: bool):
return "true" if a_bool else "false"
class SpeedscopeFrame(NamedTuple):
"""
Named tuple to store data needed for speedscope's concept of a
frame, hereafter referred to as a "speedscope frame", as opposed to
a "pyinstrument frame". This type must be hashable in order to use
it as a dictionary key; a dictionary will be used to track unique
speedscope frames.
"""
name: str
file: str
line: int
class SpeedscopeFrameEncoder(json.JSONEncoder):
"""
Encoder used by json.dumps method on SpeedscopeFrame objects to serialize
SpeedscopeEvent objects in JSON format.
"""
def default(self, obj):
if isinstance(obj, SpeedscopeFrame):
return {"name": obj.name, "file": obj.file, "line": obj.line}
return json.JSONEncoder.default(self, obj)
class SpeedscopeEventType(Enum):
"""Enum representing the only two types of speedscope frame events"""
OPEN: str = "O"
CLOSE: str = "C"
class SpeedscopeEvent(NamedTuple):
"""
Named tuple to store speedscope's concept of an "event", which
corresponds to opening or closing stack frames as functions or
methods are entered or exited.
"""
type: SpeedscopeEventType
at: float
frame: int
class SpeedscopeEventEncoder(json.JSONEncoder):
"""
Encoder used by json.dumps method on SpeedscopeEvent objects to
serialize SpeedscopeEvent objects in JSON format.
"""
def default(self, obj):
if isinstance(obj, SpeedscopeEvent):
return {"type": obj.type, "at": obj.at, "frame": obj.frame}
if isinstance(obj, SpeedscopeEventType):
return obj.value
return json.JSONEncoder.default(self, obj)
# Dictionaries in Python 3.7+ track insertion order, and
# dict.popitem() returns (key, value) pair in reverse insertion order
# (LIFO)
class SpeedscopeRenderer(Renderer):
"""
Outputs a tree of JSON conforming to the speedscope schema documented at
wiki: https://github.com/jlfwong/speedscope/wiki/Importing-from-custom-sources
schema: https://www.speedscope.app/file-format-schema.json
spec: https://github.com/jlfwong/speedscope/blob/main/src/lib/file-format-spec.ts
example: https://github.com/jlfwong/speedscope/blob/main/sample/profiles/speedscope/0.0.1/simple.speedscope.json
"""
def __init__(self, **kwargs: Any):
super().__init__(**kwargs)
# Member holding a running total of wall clock time needed to
# compute the times at which events occur
self._event_time: float = 0.0
# Map of speedscope frames to speedscope frame indices, needed
# to construct evented speedscope profiles; exploits LIFO
# property of popinfo method in Python 3.7+ dictionaries. This
# dictionary is used to build up the "shared" JSON array in
# speedscope's schema.
self._frame_to_index: dict[SpeedscopeFrame, int] = {}
def render_frame(self, frame: BaseFrame | None):
"""Renders frame as string by representing it JSON array-formatted
string containing the speedscope open frame event, opend and
close frame events of all children, and close event, in order,
except for the outer enclosing square brackets. This
information is used to build up the "events" array in
speedscope-formatted JSON.
This method has two notable side effects:
* it populates the self._frame_to_index dictionary that matches
speedscope frames with their positions in the "shared" array of
speedscope output; this dictionary will be used to write this
"shared" array in the render method
* it accumulates a running total of time elapsed by
accumulating the self_time spent in each pyinstrument frame;
this running total is used by speedscope events to construct
a flame chart
This method avoids using the json module because it uses twice
as many stack frames, which will crash by exceeding the stack
limit on deep-but-valid call stacks. List comprehensions are
avoided for similar reasons.
"""
# if frame is None, recursion bottoms out; no event frames
# need to be added
if frame is None:
return ""
sframe = SpeedscopeFrame(frame.function, frame.file_path, frame.line_no)
if sframe not in self._frame_to_index:
self._frame_to_index[sframe] = len(self._frame_to_index)
sframe_index = self._frame_to_index[sframe]
open_event = SpeedscopeEvent(
SpeedscopeEventType.OPEN,
self._event_time,
sframe_index
)
event_array: list[str] = [json.dumps(open_event, cls=SpeedscopeEventEncoder)]
for child in frame.children:
child_events = self.render_frame(child)
if child_events:
event_array.append(child_events)
# If number of frames approaches 1e16 * desired accuracy
# level, consider using Neumaier-Kahan summation; improves
# worst-case relative accuracy of sum from O(num_summands *
# eps) to (2 * eps + O(num_summands * eps * eps)), where eps
# is IEEE-754 double precision unit roundoff, approximately
# 1e-16. Average case relative accuracy expressions replace
# num_summands with sqrt(num_summands). However, Kahan
# summation quadruples operation count of sum, and Neumaier
# variant also adds a branch & swap for each summand. Pairwise
# summation isn't an option here because a running total is
# needed.
self._event_time += frame.self_time
close_event = SpeedscopeEvent(
SpeedscopeEventType.CLOSE,
self._event_time,
sframe_index
)
event_array.append(json.dumps(close_event, cls=SpeedscopeEventEncoder))
# Omit enclosing square brackets here; these brackets are applied in
# the render method
return "%s" % ",".join(event_array)
def render(self, session: Session):
frame = self.preprocess(session.root_frame())
property_decls: list[str] = []
# Fields for file
schema_url: str = "https://www.speedscope.app/file-format-schema.json"
property_decls.append('"$schema": %s' % encode_str(schema_url))
id_: str = time.strftime("%Y-%m-%dT%H-%M-%S", time.localtime(session.start_time))
name: str = "CPU profile for {} at {}".format(session.program, id_)
property_decls.append('"name": %s' % encode_str(name))
property_decls.append('"activeProfileIndex": null')
# TODO(goxberry@gmail.com): figure out how to get version from
# pyinstrument and add it here as something like
# pyinstrument@4.0.4 ; can't use from pyinstrument import
# __version__
exporter: str = "pyinstrument"
property_decls.append('"exporter": %s' % encode_str(exporter))
# Fields for profile
profile_decls: list[str] = []
profile_type: str = "evented"
profile_decls.append('"type": %s' % encode_str(profile_type))
profile_name: str = session.program
profile_decls.append('"name": %s' % encode_str(profile_name))
unit: str = "seconds"
profile_decls.append('"unit": %s' % encode_str(unit))
start_value: float = 0.0
profile_decls.append('"startValue": %f' % start_value)
end_value: float = session.duration
profile_decls.append('"endValue": %f' % end_value)
# use render_frame to build up dictionary of frames for 'shared' field
# via the self._frame_to_index field; have it output the string
# representation of the events array
profile_decls.append('"events": [%s]' % self.render_frame(frame))
profile_string = "{%s}" % ",".join(profile_decls)
property_decls.append('"profiles": [%s]' % profile_string)
# exploits Python 3.7+ dictionary property of iterating over
# keys in insertion order
shared_decls: list[str] = []
for sframe in iter(self._frame_to_index):
shared_decls.append(json.dumps(sframe, cls=SpeedscopeFrameEncoder))
property_decls.append('"shared": {"frames": [%s]}' % ",".join(shared_decls))
return "{%s}\n" % ",".join(property_decls)
def default_processors(self) -> ProcessorList:
return [
processors.remove_importlib,
processors.merge_consecutive_self_time,
processors.aggregate_repeated_calls,
processors.group_library_frames_processor,
processors.remove_unnecessary_self_time_nodes,
processors.remove_irrelevant_nodes,
]
Speedscope-related JSON encoders: add type hints
This commit adds type hints to the Speedscope-related JSON encoders.
from __future__ import annotations
import json
import time
from typing import Any, Callable, NamedTuple
from collections import namedtuple
from enum import Enum
from pyinstrument import processors
from pyinstrument.frame import BaseFrame
from pyinstrument.renderers.base import ProcessorList, Renderer
from pyinstrument.session import Session
# pyright: strict
encode_str: Callable[[str], str] = json.encoder.encode_basestring # type: ignore
def encode_bool(a_bool: bool):
return "true" if a_bool else "false"
class SpeedscopeFrame(NamedTuple):
"""
Named tuple to store data needed for speedscope's concept of a
frame, hereafter referred to as a "speedscope frame", as opposed to
a "pyinstrument frame". This type must be hashable in order to use
it as a dictionary key; a dictionary will be used to track unique
speedscope frames.
"""
name: str
file: str
line: int
class SpeedscopeFrameEncoder(json.JSONEncoder):
"""
Encoder used by json.dumps method on SpeedscopeFrame objects to serialize
SpeedscopeEvent objects in JSON format.
"""
def default(self, o: Any) -> Any:
if isinstance(o, SpeedscopeFrame):
return {"name": o.name, "file": o.file, "line": o.line}
return json.JSONEncoder.default(self, o)
class SpeedscopeEventType(Enum):
"""Enum representing the only two types of speedscope frame events"""
OPEN: str = "O"
CLOSE: str = "C"
class SpeedscopeEvent(NamedTuple):
"""
Named tuple to store speedscope's concept of an "event", which
corresponds to opening or closing stack frames as functions or
methods are entered or exited.
"""
type: SpeedscopeEventType
at: float
frame: int
class SpeedscopeEventEncoder(json.JSONEncoder):
"""
Encoder used by json.dumps method on SpeedscopeEvent objects to
serialize SpeedscopeEvent objects in JSON format.
"""
def default(self, o: Any) -> Any:
if isinstance(o, SpeedscopeEvent):
return {"type": o.type, "at": o.at, "frame": o.frame}
if isinstance(o, SpeedscopeEventType):
return o.value
return json.JSONEncoder.default(self, o)
# Dictionaries in Python 3.7+ track insertion order, and
# dict.popitem() returns (key, value) pair in reverse insertion order
# (LIFO)
class SpeedscopeRenderer(Renderer):
"""
Outputs a tree of JSON conforming to the speedscope schema documented at
wiki: https://github.com/jlfwong/speedscope/wiki/Importing-from-custom-sources
schema: https://www.speedscope.app/file-format-schema.json
spec: https://github.com/jlfwong/speedscope/blob/main/src/lib/file-format-spec.ts
example: https://github.com/jlfwong/speedscope/blob/main/sample/profiles/speedscope/0.0.1/simple.speedscope.json
"""
def __init__(self, **kwargs: Any):
super().__init__(**kwargs)
# Member holding a running total of wall clock time needed to
# compute the times at which events occur
self._event_time: float = 0.0
# Map of speedscope frames to speedscope frame indices, needed
# to construct evented speedscope profiles; exploits LIFO
# property of popinfo method in Python 3.7+ dictionaries. This
# dictionary is used to build up the "shared" JSON array in
# speedscope's schema.
self._frame_to_index: dict[SpeedscopeFrame, int] = {}
def render_frame(self, frame: BaseFrame | None):
"""Renders frame as string by representing it JSON array-formatted
string containing the speedscope open frame event, opend and
close frame events of all children, and close event, in order,
except for the outer enclosing square brackets. This
information is used to build up the "events" array in
speedscope-formatted JSON.
This method has two notable side effects:
* it populates the self._frame_to_index dictionary that matches
speedscope frames with their positions in the "shared" array of
speedscope output; this dictionary will be used to write this
"shared" array in the render method
* it accumulates a running total of time elapsed by
accumulating the self_time spent in each pyinstrument frame;
this running total is used by speedscope events to construct
a flame chart
This method avoids using the json module because it uses twice
as many stack frames, which will crash by exceeding the stack
limit on deep-but-valid call stacks. List comprehensions are
avoided for similar reasons.
"""
# if frame is None, recursion bottoms out; no event frames
# need to be added
if frame is None:
return ""
sframe = SpeedscopeFrame(frame.function, frame.file_path, frame.line_no)
if sframe not in self._frame_to_index:
self._frame_to_index[sframe] = len(self._frame_to_index)
sframe_index = self._frame_to_index[sframe]
open_event = SpeedscopeEvent(
SpeedscopeEventType.OPEN,
self._event_time,
sframe_index
)
event_array: list[str] = [json.dumps(open_event, cls=SpeedscopeEventEncoder)]
for child in frame.children:
child_events = self.render_frame(child)
if child_events:
event_array.append(child_events)
# If number of frames approaches 1e16 * desired accuracy
# level, consider using Neumaier-Kahan summation; improves
# worst-case relative accuracy of sum from O(num_summands *
# eps) to (2 * eps + O(num_summands * eps * eps)), where eps
# is IEEE-754 double precision unit roundoff, approximately
# 1e-16. Average case relative accuracy expressions replace
# num_summands with sqrt(num_summands). However, Kahan
# summation quadruples operation count of sum, and Neumaier
# variant also adds a branch & swap for each summand. Pairwise
# summation isn't an option here because a running total is
# needed.
self._event_time += frame.self_time
close_event = SpeedscopeEvent(
SpeedscopeEventType.CLOSE,
self._event_time,
sframe_index
)
event_array.append(json.dumps(close_event, cls=SpeedscopeEventEncoder))
# Omit enclosing square brackets here; these brackets are applied in
# the render method
return "%s" % ",".join(event_array)
def render(self, session: Session):
frame = self.preprocess(session.root_frame())
property_decls: list[str] = []
# Fields for file
schema_url: str = "https://www.speedscope.app/file-format-schema.json"
property_decls.append('"$schema": %s' % encode_str(schema_url))
id_: str = time.strftime("%Y-%m-%dT%H-%M-%S", time.localtime(session.start_time))
name: str = "CPU profile for {} at {}".format(session.program, id_)
property_decls.append('"name": %s' % encode_str(name))
property_decls.append('"activeProfileIndex": null')
# TODO(goxberry@gmail.com): figure out how to get version from
# pyinstrument and add it here as something like
# pyinstrument@4.0.4 ; can't use from pyinstrument import
# __version__
exporter: str = "pyinstrument"
property_decls.append('"exporter": %s' % encode_str(exporter))
# Fields for profile
profile_decls: list[str] = []
profile_type: str = "evented"
profile_decls.append('"type": %s' % encode_str(profile_type))
profile_name: str = session.program
profile_decls.append('"name": %s' % encode_str(profile_name))
unit: str = "seconds"
profile_decls.append('"unit": %s' % encode_str(unit))
start_value: float = 0.0
profile_decls.append('"startValue": %f' % start_value)
end_value: float = session.duration
profile_decls.append('"endValue": %f' % end_value)
# use render_frame to build up dictionary of frames for 'shared' field
# via the self._frame_to_index field; have it output the string
# representation of the events array
profile_decls.append('"events": [%s]' % self.render_frame(frame))
profile_string = "{%s}" % ",".join(profile_decls)
property_decls.append('"profiles": [%s]' % profile_string)
# exploits Python 3.7+ dictionary property of iterating over
# keys in insertion order
shared_decls: list[str] = []
for sframe in iter(self._frame_to_index):
shared_decls.append(json.dumps(sframe, cls=SpeedscopeFrameEncoder))
property_decls.append('"shared": {"frames": [%s]}' % ",".join(shared_decls))
return "{%s}\n" % ",".join(property_decls)
def default_processors(self) -> ProcessorList:
return [
processors.remove_importlib,
processors.merge_consecutive_self_time,
processors.aggregate_repeated_calls,
processors.group_library_frames_processor,
processors.remove_unnecessary_self_time_nodes,
processors.remove_irrelevant_nodes,
]
|
re-enable cap
|
# Copyright 2019 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for audio_label_data_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from magenta.models.onsets_frames_transcription import audio_label_data_utils
from magenta.music import audio_io
from magenta.music import constants
from magenta.music import testing_lib
from magenta.music.protobuf import music_pb2
import numpy as np
import tensorflow.compat.v1 as tf
SAMPLE_RATE = 16000
class SplitAudioTest(tf.test.TestCase):
def _CreateSyntheticSequence(self):
seq = music_pb2.NoteSequence(total_time=10)
testing_lib.add_track_to_sequence(seq, 0, [(50, 20, 0, 5)])
testing_lib.add_track_to_sequence(seq, 0, [(50, 80, 5, 10)])
return seq
def _CreateSyntheticExample(self):
sequence = self._CreateSyntheticSequence()
wav_samples = np.zeros(2 * SAMPLE_RATE, np.float32)
wav_data = audio_io.samples_to_wav_data(wav_samples, SAMPLE_RATE)
return wav_data, sequence
def testSplitAudioLabelData(self):
wav_data, sequence = self._CreateSyntheticExample()
records = audio_label_data_utils.process_record(
wav_data, sequence, 'test', sample_rate=SAMPLE_RATE)
for record in records:
audio = record.features.feature['audio'].bytes_list.value[0]
velocity_range = music_pb2.VelocityRange.FromString(
record.features.feature['velocity_range'].bytes_list.value[0])
note_sequence = music_pb2.NoteSequence.FromString(
record.features.feature['sequence'].bytes_list.value[0])
self.assertEqual(
np.all(
audio_io.wav_data_to_samples(audio, sample_rate=SAMPLE_RATE) ==
np.zeros(2 * SAMPLE_RATE)), True)
self.assertEqual(velocity_range.min, 20)
self.assertEqual(velocity_range.max, 80)
self.assertEqual(note_sequence.notes[0].velocity, 20)
self.assertEqual(note_sequence.notes[0].end_time, 5.)
self.assertEqual(note_sequence.notes[1].velocity, 80)
self.assertEqual(note_sequence.notes[1].end_time, 10.)
def testSplitMidi(self):
sequence = music_pb2.NoteSequence()
sequence.notes.add(pitch=60, start_time=1.0, end_time=2.9)
sequence.notes.add(pitch=60, start_time=8.0, end_time=11.0)
sequence.notes.add(pitch=60, start_time=14.0, end_time=17.0)
sequence.notes.add(pitch=60, start_time=20.0, end_time=23.0)
sequence.total_time = 25.
sample_rate = 160
samples = np.zeros(sample_rate * int(sequence.total_time))
splits = audio_label_data_utils.find_split_points(
sequence, samples, sample_rate, 0, 3)
self.assertEqual(splits, [0., 3., 6., 9., 12., 15., 18., 21., 24., 25.])
samples[int(8.5 * sample_rate)] = 1
samples[int(8.5 * sample_rate) + 1] = -1
splits = audio_label_data_utils.find_split_points(
sequence, samples, sample_rate, 0, 3)
self.assertEqual(splits, [
0.0, 3.0, 6.0, 8.50625, 11.50625, 14.50625, 17.50625, 20.50625,
23.50625, 25.
])
class MixSequencesTest(tf.test.TestCase):
def testMixSequences(self):
sample_rate = 10
sequence1 = music_pb2.NoteSequence()
sequence1.notes.add(pitch=60, start_time=0.5, end_time=1.0, velocity=90)
sequence1.notes.add(pitch=62, start_time=1.0, end_time=2.0, velocity=90)
sequence1.total_time = 2.0
samples1 = np.linspace(0, 1, sample_rate * sequence1.total_time)
sequence2 = music_pb2.NoteSequence()
sequence2.notes.add(pitch=64, start_time=0.5, end_time=1.0, velocity=90)
sequence2.total_time = 1.0
samples2 = np.linspace(0, 1, sample_rate * sequence2.total_time)
mixed_samples, mixed_sequence = audio_label_data_utils.mix_sequences(
[samples1, samples2], sample_rate, [sequence1, sequence2])
expected_sequence = music_pb2.NoteSequence()
expected_sequence.ticks_per_quarter = constants.STANDARD_PPQ
expected_sequence.notes.add(
pitch=60, start_time=0.5, end_time=1.0, velocity=90)
expected_sequence.notes.add(
pitch=62, start_time=1.0, end_time=2.0, velocity=90)
expected_sequence.notes.add(
pitch=64, start_time=0.5, end_time=1.0, velocity=90)
expected_sequence.notes.add(
pitch=64, start_time=1.5, end_time=2.0, velocity=90)
expected_sequence.total_time = 2.0
self.assertProtoEquals(expected_sequence, mixed_sequence)
expected_samples = np.concatenate([samples2, samples2]) * .5 + samples1 * .5
np.testing.assert_array_equal(expected_samples, mixed_samples)
def testMixSequencesLongerNoteSequence(self):
sample_rate = 10
sequence1 = music_pb2.NoteSequence()
sequence1.notes.add(pitch=60, start_time=0.5, end_time=1.0, velocity=90)
sequence1.notes.add(pitch=62, start_time=1.0, end_time=2.0, velocity=90)
sequence1.total_time = 2.0
# samples1 will be .1 seconds shorter than sequence1
samples1 = np.linspace(0, 1, sample_rate * (sequence1.total_time - .1))
sequence2 = music_pb2.NoteSequence()
sequence2.notes.add(pitch=64, start_time=0.5, end_time=1.0, velocity=90)
sequence2.total_time = 1.0
samples2 = np.linspace(0, 1, sample_rate * sequence2.total_time)
mixed_samples, mixed_sequence = audio_label_data_utils.mix_sequences(
[samples1, samples2], sample_rate, [sequence1, sequence2])
expected_sequence = music_pb2.NoteSequence()
expected_sequence.ticks_per_quarter = constants.STANDARD_PPQ
expected_sequence.notes.add(
pitch=60, start_time=0.5, end_time=1.0, velocity=90)
expected_sequence.notes.add(
pitch=62, start_time=1.0, end_time=2.0, velocity=90)
expected_sequence.notes.add(
pitch=64, start_time=0.5, end_time=1.0, velocity=90)
expected_sequence.notes.add(
pitch=64, start_time=1.5, end_time=2.0, velocity=90)
expected_sequence.total_time = 2.0
self.assertProtoEquals(expected_sequence, mixed_sequence)
# We expect samples1 to have 2 samples of padding and samples2 to be
# repeated 1 time fully and once with a single sample.
expected_samples = (
np.concatenate([samples2, samples2, [samples2[0]]]) * .5 +
np.concatenate([samples1, [0, 0]]) * .5)
np.testing.assert_array_equal(expected_samples, mixed_samples)
def testMixSequencesWithSustain(self):
sample_rate = 10
sequence1 = music_pb2.NoteSequence()
sequence1.notes.add(pitch=60, start_time=0.5, end_time=0.6, velocity=90)
sequence1.notes.add(pitch=62, start_time=1.0, end_time=2.0, velocity=90)
sequence1.total_time = 2.0
testing_lib.add_control_changes_to_sequence(
sequence1, 0, [(0.0, 64, 127), (1.0, 64, 0)])
samples1 = np.linspace(0, 1, sample_rate * sequence1.total_time)
sequence2 = music_pb2.NoteSequence()
sequence2.notes.add(pitch=64, start_time=0.5, end_time=0.6, velocity=90)
sequence2.total_time = 1.0
testing_lib.add_control_changes_to_sequence(
sequence2, 0, [(0.0, 64, 127), (0.9, 64, 0)])
samples2 = np.linspace(0, 1, sample_rate * sequence2.total_time)
mixed_samples, mixed_sequence = audio_label_data_utils.mix_sequences(
[samples1, samples2], sample_rate, [sequence1, sequence2])
expected_sequence = music_pb2.NoteSequence()
expected_sequence.ticks_per_quarter = constants.STANDARD_PPQ
expected_sequence.notes.add(
pitch=60, start_time=0.5, end_time=1.0, velocity=90)
expected_sequence.notes.add(
pitch=62, start_time=1.0, end_time=2.0, velocity=90)
expected_sequence.notes.add(
pitch=64, start_time=0.5, end_time=0.9, velocity=90)
expected_sequence.notes.add(
pitch=64, start_time=1.5, end_time=1.9, velocity=90)
expected_sequence.total_time = 2.0
self.assertProtoEquals(expected_sequence, mixed_sequence)
expected_samples = np.concatenate([samples2, samples2]) * .5 + samples1 * .5
np.testing.assert_array_equal(expected_samples, mixed_samples)
def testMixSequencesTotalTime(self):
sample_rate = 10
sequence1 = music_pb2.NoteSequence()
sequence1.notes.add(pitch=60, start_time=0.5, end_time=1.0, velocity=90)
sequence1.notes.add(pitch=62, start_time=1.0, end_time=1.5, velocity=90)
sequence1.total_time = 1.5
samples1 = np.linspace(0, 1, sample_rate * 2)
sequence2 = music_pb2.NoteSequence()
sequence2.notes.add(pitch=64, start_time=0.5, end_time=0.9, velocity=90)
sequence2.total_time = 0.9
samples2 = np.linspace(0, 1, sample_rate * 1)
mixed_samples, mixed_sequence = audio_label_data_utils.mix_sequences(
[samples1, samples2], sample_rate, [sequence1, sequence2])
expected_sequence = music_pb2.NoteSequence()
expected_sequence.ticks_per_quarter = constants.STANDARD_PPQ
expected_sequence.notes.add(
pitch=60, start_time=0.5, end_time=1.0, velocity=90)
expected_sequence.notes.add(
pitch=62, start_time=1.0, end_time=1.5, velocity=90)
expected_sequence.notes.add(
pitch=64, start_time=0.5, end_time=0.9, velocity=90)
expected_sequence.notes.add(
pitch=64, start_time=1.5, end_time=1.9, velocity=90)
# Expected time is 1.9 because the sequences are repeated according to the
# length of their associated audio. So sequence1 is not repeated at all
# (audio is 2 seconds) and sequence2 is repeated once after shifting all the
# notes by the audio length of 1 second. The final total_time is left as is
# after the last repeat, so it ends up being 1 + .9 seconds.
expected_sequence.total_time = 1.9
self.assertProtoEquals(expected_sequence, mixed_sequence)
expected_samples = np.concatenate([samples2, samples2]) * .5 + samples1 * .5
np.testing.assert_array_equal(expected_samples, mixed_samples)
if __name__ == '__main__':
tf.test.main()
ensure correct data type for linspace calls to fix numpy errors.
PiperOrigin-RevId: 288723342
# Copyright 2019 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for audio_label_data_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from magenta.models.onsets_frames_transcription import audio_label_data_utils
from magenta.music import audio_io
from magenta.music import constants
from magenta.music import testing_lib
from magenta.music.protobuf import music_pb2
import numpy as np
import tensorflow.compat.v1 as tf
SAMPLE_RATE = 16000
class SplitAudioTest(tf.test.TestCase):
def _CreateSyntheticSequence(self):
seq = music_pb2.NoteSequence(total_time=10)
testing_lib.add_track_to_sequence(seq, 0, [(50, 20, 0, 5)])
testing_lib.add_track_to_sequence(seq, 0, [(50, 80, 5, 10)])
return seq
def _CreateSyntheticExample(self):
sequence = self._CreateSyntheticSequence()
wav_samples = np.zeros(2 * SAMPLE_RATE, np.float32)
wav_data = audio_io.samples_to_wav_data(wav_samples, SAMPLE_RATE)
return wav_data, sequence
def testSplitAudioLabelData(self):
wav_data, sequence = self._CreateSyntheticExample()
records = audio_label_data_utils.process_record(
wav_data, sequence, 'test', sample_rate=SAMPLE_RATE)
for record in records:
audio = record.features.feature['audio'].bytes_list.value[0]
velocity_range = music_pb2.VelocityRange.FromString(
record.features.feature['velocity_range'].bytes_list.value[0])
note_sequence = music_pb2.NoteSequence.FromString(
record.features.feature['sequence'].bytes_list.value[0])
self.assertEqual(
np.all(
audio_io.wav_data_to_samples(audio, sample_rate=SAMPLE_RATE) ==
np.zeros(2 * SAMPLE_RATE)), True)
self.assertEqual(velocity_range.min, 20)
self.assertEqual(velocity_range.max, 80)
self.assertEqual(note_sequence.notes[0].velocity, 20)
self.assertEqual(note_sequence.notes[0].end_time, 5.)
self.assertEqual(note_sequence.notes[1].velocity, 80)
self.assertEqual(note_sequence.notes[1].end_time, 10.)
def testSplitMidi(self):
sequence = music_pb2.NoteSequence()
sequence.notes.add(pitch=60, start_time=1.0, end_time=2.9)
sequence.notes.add(pitch=60, start_time=8.0, end_time=11.0)
sequence.notes.add(pitch=60, start_time=14.0, end_time=17.0)
sequence.notes.add(pitch=60, start_time=20.0, end_time=23.0)
sequence.total_time = 25.
sample_rate = 160
samples = np.zeros(sample_rate * int(sequence.total_time))
splits = audio_label_data_utils.find_split_points(
sequence, samples, sample_rate, 0, 3)
self.assertEqual(splits, [0., 3., 6., 9., 12., 15., 18., 21., 24., 25.])
samples[int(8.5 * sample_rate)] = 1
samples[int(8.5 * sample_rate) + 1] = -1
splits = audio_label_data_utils.find_split_points(
sequence, samples, sample_rate, 0, 3)
self.assertEqual(splits, [
0.0, 3.0, 6.0, 8.50625, 11.50625, 14.50625, 17.50625, 20.50625,
23.50625, 25.
])
class MixSequencesTest(tf.test.TestCase):
def testMixSequences(self):
sample_rate = 10
sequence1 = music_pb2.NoteSequence()
sequence1.notes.add(pitch=60, start_time=0.5, end_time=1.0, velocity=90)
sequence1.notes.add(pitch=62, start_time=1.0, end_time=2.0, velocity=90)
sequence1.total_time = 2.0
samples1 = np.linspace(0, 1, int(sample_rate * sequence1.total_time))
sequence2 = music_pb2.NoteSequence()
sequence2.notes.add(pitch=64, start_time=0.5, end_time=1.0, velocity=90)
sequence2.total_time = 1.0
samples2 = np.linspace(0, 1, int(sample_rate * sequence2.total_time))
mixed_samples, mixed_sequence = audio_label_data_utils.mix_sequences(
[samples1, samples2], sample_rate, [sequence1, sequence2])
expected_sequence = music_pb2.NoteSequence()
expected_sequence.ticks_per_quarter = constants.STANDARD_PPQ
expected_sequence.notes.add(
pitch=60, start_time=0.5, end_time=1.0, velocity=90)
expected_sequence.notes.add(
pitch=62, start_time=1.0, end_time=2.0, velocity=90)
expected_sequence.notes.add(
pitch=64, start_time=0.5, end_time=1.0, velocity=90)
expected_sequence.notes.add(
pitch=64, start_time=1.5, end_time=2.0, velocity=90)
expected_sequence.total_time = 2.0
self.assertProtoEquals(expected_sequence, mixed_sequence)
expected_samples = np.concatenate([samples2, samples2]) * .5 + samples1 * .5
np.testing.assert_array_equal(expected_samples, mixed_samples)
def testMixSequencesLongerNoteSequence(self):
sample_rate = 10
sequence1 = music_pb2.NoteSequence()
sequence1.notes.add(pitch=60, start_time=0.5, end_time=1.0, velocity=90)
sequence1.notes.add(pitch=62, start_time=1.0, end_time=2.0, velocity=90)
sequence1.total_time = 2.0
# samples1 will be .1 seconds shorter than sequence1
samples1 = np.linspace(0, 1, int(sample_rate * (sequence1.total_time - .1)))
sequence2 = music_pb2.NoteSequence()
sequence2.notes.add(pitch=64, start_time=0.5, end_time=1.0, velocity=90)
sequence2.total_time = 1.0
samples2 = np.linspace(0, 1, int(sample_rate * sequence2.total_time))
mixed_samples, mixed_sequence = audio_label_data_utils.mix_sequences(
[samples1, samples2], sample_rate, [sequence1, sequence2])
expected_sequence = music_pb2.NoteSequence()
expected_sequence.ticks_per_quarter = constants.STANDARD_PPQ
expected_sequence.notes.add(
pitch=60, start_time=0.5, end_time=1.0, velocity=90)
expected_sequence.notes.add(
pitch=62, start_time=1.0, end_time=2.0, velocity=90)
expected_sequence.notes.add(
pitch=64, start_time=0.5, end_time=1.0, velocity=90)
expected_sequence.notes.add(
pitch=64, start_time=1.5, end_time=2.0, velocity=90)
expected_sequence.total_time = 2.0
self.assertProtoEquals(expected_sequence, mixed_sequence)
# We expect samples1 to have 2 samples of padding and samples2 to be
# repeated 1 time fully and once with a single sample.
expected_samples = (
np.concatenate([samples2, samples2, [samples2[0]]]) * .5 +
np.concatenate([samples1, [0, 0]]) * .5)
np.testing.assert_array_equal(expected_samples, mixed_samples)
def testMixSequencesWithSustain(self):
sample_rate = 10
sequence1 = music_pb2.NoteSequence()
sequence1.notes.add(pitch=60, start_time=0.5, end_time=0.6, velocity=90)
sequence1.notes.add(pitch=62, start_time=1.0, end_time=2.0, velocity=90)
sequence1.total_time = 2.0
testing_lib.add_control_changes_to_sequence(
sequence1, 0, [(0.0, 64, 127), (1.0, 64, 0)])
samples1 = np.linspace(0, 1, int(sample_rate * sequence1.total_time))
sequence2 = music_pb2.NoteSequence()
sequence2.notes.add(pitch=64, start_time=0.5, end_time=0.6, velocity=90)
sequence2.total_time = 1.0
testing_lib.add_control_changes_to_sequence(
sequence2, 0, [(0.0, 64, 127), (0.9, 64, 0)])
samples2 = np.linspace(0, 1, int(sample_rate * sequence2.total_time))
mixed_samples, mixed_sequence = audio_label_data_utils.mix_sequences(
[samples1, samples2], sample_rate, [sequence1, sequence2])
expected_sequence = music_pb2.NoteSequence()
expected_sequence.ticks_per_quarter = constants.STANDARD_PPQ
expected_sequence.notes.add(
pitch=60, start_time=0.5, end_time=1.0, velocity=90)
expected_sequence.notes.add(
pitch=62, start_time=1.0, end_time=2.0, velocity=90)
expected_sequence.notes.add(
pitch=64, start_time=0.5, end_time=0.9, velocity=90)
expected_sequence.notes.add(
pitch=64, start_time=1.5, end_time=1.9, velocity=90)
expected_sequence.total_time = 2.0
self.assertProtoEquals(expected_sequence, mixed_sequence)
expected_samples = np.concatenate([samples2, samples2]) * .5 + samples1 * .5
np.testing.assert_array_equal(expected_samples, mixed_samples)
def testMixSequencesTotalTime(self):
sample_rate = 10
sequence1 = music_pb2.NoteSequence()
sequence1.notes.add(pitch=60, start_time=0.5, end_time=1.0, velocity=90)
sequence1.notes.add(pitch=62, start_time=1.0, end_time=1.5, velocity=90)
sequence1.total_time = 1.5
samples1 = np.linspace(0, 1, int(sample_rate * 2))
sequence2 = music_pb2.NoteSequence()
sequence2.notes.add(pitch=64, start_time=0.5, end_time=0.9, velocity=90)
sequence2.total_time = 0.9
samples2 = np.linspace(0, 1, int(sample_rate * 1))
mixed_samples, mixed_sequence = audio_label_data_utils.mix_sequences(
[samples1, samples2], sample_rate, [sequence1, sequence2])
expected_sequence = music_pb2.NoteSequence()
expected_sequence.ticks_per_quarter = constants.STANDARD_PPQ
expected_sequence.notes.add(
pitch=60, start_time=0.5, end_time=1.0, velocity=90)
expected_sequence.notes.add(
pitch=62, start_time=1.0, end_time=1.5, velocity=90)
expected_sequence.notes.add(
pitch=64, start_time=0.5, end_time=0.9, velocity=90)
expected_sequence.notes.add(
pitch=64, start_time=1.5, end_time=1.9, velocity=90)
# Expected time is 1.9 because the sequences are repeated according to the
# length of their associated audio. So sequence1 is not repeated at all
# (audio is 2 seconds) and sequence2 is repeated once after shifting all the
# notes by the audio length of 1 second. The final total_time is left as is
# after the last repeat, so it ends up being 1 + .9 seconds.
expected_sequence.total_time = 1.9
self.assertProtoEquals(expected_sequence, mixed_sequence)
expected_samples = np.concatenate([samples2, samples2]) * .5 + samples1 * .5
np.testing.assert_array_equal(expected_samples, mixed_samples)
if __name__ == '__main__':
tf.test.main()
|
"""
Serilizers for the accounts application API
"""
from django.contrib.auth.models import User
from rest_framework import serializers
from muckrock.accounts.models import Profile, Statistics
from muckrock.jurisdiction.models import Jurisdiction
# pylint: disable=too-few-public-methods
class ProfileSerializer(serializers.ModelSerializer):
"""Serializer for Profile model"""
location = serializers.PrimaryKeyRelatedField(
queryset=Jurisdiction.objects.all(),
style={'base_template': 'input.html'})
class Meta:
model = Profile
exclude = ('user',)
class UserSerializer(serializers.ModelSerializer):
"""Serializer for User model"""
profile = ProfileSerializer()
class Meta:
model = User
fields = ('username', 'first_name', 'last_name', 'email', 'is_staff', 'is_superuser',
'last_login', 'date_joined', 'groups', 'profile')
class StatisticsSerializer(serializers.ModelSerializer):
"""Serializer for Statistics model"""
def __init__(self, *args, **kwargs):
# pylint: disable=super-on-old-class
super(StatisticsSerializer, self).__init__(*args, **kwargs)
if 'request' not in self.context or not self.context['request'].user.is_staff:
staff_only = (
'pro_users',
'pro_user_names',
'total_page_views',
'daily_requests_pro',
'daily_requests_basic',
'daily_requests_beta',
'daily_requests_proxy',
'daily_requests_admin',
'daily_requests_org',
'daily_articles',
'total_tasks',
'total_unresolved_tasks',
'total_generic_tasks',
'total_unresolved_generic_tasks',
'total_orphan_tasks',
'total_unresolved_orphan_tasks',
'total_snailmail_tasks',
'total_unresolved_snailmail_tasks',
'total_rejected_tasks',
'total_unresolved_rejected_tasks',
'total_staleagency_tasks',
'total_unresolved_staleagency_tasks',
'total_flagged_tasks',
'total_unresolved_flagged_tasks',
'total_newagency_tasks',
'total_unresolved_newagency_tasks',
'total_response_tasks',
'total_unresolved_response_tasks',
'total_faxfail_tasks',
'total_unresolved_faxfail_tasks',
'total_payment_tasks',
'total_unresolved_payment_tasks',
'total_crowdfundpayment_tasks',
'total_unresolved_crowdfundpayment_tasks',
'daily_robot_response_tasks',
'admin_notes',
'total_active_org_members',
'total_active_orgs',
'sent_communications_email',
'sent_communications_fax',
'sent_communications_mail',
'total_users_filed',
'flag_processing_days',
'unresolved_snailmail_appeals',
'total_crowdfunds',
'total_crowdfunds_pro',
'total_crowdfunds_basic',
'total_crowdfunds_beta',
'total_crowdfunds_proxy',
'total_crowdfunds_admin',
'open_crowdfunds',
'open_crowdfunds_pro',
'open_crowdfunds_basic',
'open_crowdfunds_beta',
'open_crowdfunds_proxy',
'open_crowdfunds_admin',
'closed_crowdfunds_0',
'closed_crowdfunds_0_25',
'closed_crowdfunds_25_50',
'closed_crowdfunds_50_75',
'closed_crowdfunds_75_100',
'closed_crowdfunds_100_125',
'closed_crowdfunds_125_150',
'closed_crowdfunds_150_175',
'closed_crowdfunds_175_200',
'closed_crowdfunds_200',
'total_crowdfund_payments',
'total_crowdfund_payments_loggedin',
'total_crowdfund_payments_loggedout',
'public_projects',
'private_projects',
'unapproved_projects',
'crowdfund_projects',
'project_users',
'project_users_pro',
'project_users_basic',
'project_users_beta',
'project_users_proxy',
'project_users_admin',
'total_exemptions',
'total_invoked_exemptions',
'total_example_appeals',
)
for field in staff_only:
self.fields.pop(field)
class Meta:
model = Statistics
fields = (
'date',
'total_requests',
'total_requests_success',
'total_requests_denied',
'total_requests_draft',
'total_requests_submitted',
'total_requests_awaiting_ack',
'total_requests_awaiting_response',
'total_requests_awaiting_appeal',
'total_requests_fix_required',
'total_requests_payment_required',
'total_requests_no_docs',
'total_requests_partial',
'total_requests_abandoned',
'total_requests_lawsuit',
'total_pages',
'total_users',
'total_agencies',
'total_fees',
'pro_users',
'pro_user_names',
'total_page_views',
'daily_requests_pro',
'daily_requests_basic',
'daily_requests_beta',
'daily_requests_proxy',
'daily_requests_admin',
'daily_requests_org',
'daily_articles',
'total_tasks',
'total_unresolved_tasks',
'total_generic_tasks',
'total_unresolved_generic_tasks',
'total_orphan_tasks',
'total_unresolved_orphan_tasks',
'total_snailmail_tasks',
'total_unresolved_snailmail_tasks',
'total_rejected_tasks',
'total_unresolved_rejected_tasks',
'total_staleagency_tasks',
'total_unresolved_staleagency_tasks',
'total_flagged_tasks',
'total_unresolved_flagged_tasks',
'total_newagency_tasks',
'total_unresolved_newagency_tasks',
'total_response_tasks',
'total_unresolved_response_tasks',
'total_faxfail_tasks',
'total_unresolved_faxfail_tasks',
'total_payment_tasks',
'total_unresolved_payment_tasks',
'total_crowdfundpayment_tasks',
'total_unresolved_crowdfundpayment_tasks',
'daily_robot_response_tasks',
'public_notes',
'admin_notes',
'total_active_org_members',
'total_active_orgs',
'sent_communications_email',
'sent_communications_fax',
'sent_communications_mail',
'total_users_filed',
'flag_processing_days',
'unresolved_snailmail_appeals',
'total_crowdfunds',
'total_crowdfunds_pro',
'total_crowdfunds_basic',
'total_crowdfunds_beta',
'total_crowdfunds_proxy',
'total_crowdfunds_admin',
'open_crowdfunds',
'open_crowdfunds_pro',
'open_crowdfunds_basic',
'open_crowdfunds_beta',
'open_crowdfunds_proxy',
'open_crowdfunds_admin',
'closed_crowdfunds_0',
'closed_crowdfunds_0_25',
'closed_crowdfunds_25_50',
'closed_crowdfunds_50_75',
'closed_crowdfunds_75_100',
'closed_crowdfunds_100_125',
'closed_crowdfunds_125_150',
'closed_crowdfunds_150_175',
'closed_crowdfunds_175_200',
'closed_crowdfunds_200',
'total_crowdfund_payments',
'total_crowdfund_payments_loggedin',
'total_crowdfund_payments_loggedout',
'public_projects',
'private_projects',
'unapproved_projects',
'crowdfund_projects',
'project_users',
'project_users_pro',
'project_users_basic',
'project_users_beta',
'project_users_proxy',
'project_users_admin',
'total_exemptions',
'total_invoked_exemptions',
'total_example_appeals',
)
more stats to api, close #1241
"""
Serilizers for the accounts application API
"""
from django.contrib.auth.models import User
from rest_framework import serializers
from muckrock.accounts.models import Profile, Statistics
from muckrock.jurisdiction.models import Jurisdiction
# pylint: disable=too-few-public-methods
class ProfileSerializer(serializers.ModelSerializer):
"""Serializer for Profile model"""
location = serializers.PrimaryKeyRelatedField(
queryset=Jurisdiction.objects.all(),
style={'base_template': 'input.html'})
class Meta:
model = Profile
exclude = ('user',)
class UserSerializer(serializers.ModelSerializer):
"""Serializer for User model"""
profile = ProfileSerializer()
class Meta:
model = User
fields = ('username', 'first_name', 'last_name', 'email', 'is_staff', 'is_superuser',
'last_login', 'date_joined', 'groups', 'profile')
class StatisticsSerializer(serializers.ModelSerializer):
"""Serializer for Statistics model"""
def __init__(self, *args, **kwargs):
# pylint: disable=super-on-old-class
super(StatisticsSerializer, self).__init__(*args, **kwargs)
if 'request' not in self.context or not self.context['request'].user.is_staff:
staff_only = (
'pro_users',
'pro_user_names',
'total_page_views',
'daily_requests_pro',
'daily_requests_basic',
'daily_requests_beta',
'daily_requests_proxy',
'daily_requests_admin',
'daily_requests_org',
'daily_articles',
'total_tasks',
'total_unresolved_tasks',
'total_generic_tasks',
'total_unresolved_generic_tasks',
'total_orphan_tasks',
'total_unresolved_orphan_tasks',
'total_snailmail_tasks',
'total_unresolved_snailmail_tasks',
'total_rejected_tasks',
'total_unresolved_rejected_tasks',
'total_staleagency_tasks',
'total_unresolved_staleagency_tasks',
'total_flagged_tasks',
'total_unresolved_flagged_tasks',
'total_newagency_tasks',
'total_unresolved_newagency_tasks',
'total_response_tasks',
'total_unresolved_response_tasks',
'total_faxfail_tasks',
'total_unresolved_faxfail_tasks',
'total_payment_tasks',
'total_unresolved_payment_tasks',
'total_crowdfundpayment_tasks',
'total_unresolved_crowdfundpayment_tasks',
'daily_robot_response_tasks',
'admin_notes',
'total_active_org_members',
'total_active_orgs',
'sent_communications_email',
'sent_communications_fax',
'sent_communications_mail',
'total_users_filed',
'flag_processing_days',
'unresolved_snailmail_appeals',
'total_crowdfunds',
'total_crowdfunds_pro',
'total_crowdfunds_basic',
'total_crowdfunds_beta',
'total_crowdfunds_proxy',
'total_crowdfunds_admin',
'open_crowdfunds',
'open_crowdfunds_pro',
'open_crowdfunds_basic',
'open_crowdfunds_beta',
'open_crowdfunds_proxy',
'open_crowdfunds_admin',
'closed_crowdfunds_0',
'closed_crowdfunds_0_25',
'closed_crowdfunds_25_50',
'closed_crowdfunds_50_75',
'closed_crowdfunds_75_100',
'closed_crowdfunds_100_125',
'closed_crowdfunds_125_150',
'closed_crowdfunds_150_175',
'closed_crowdfunds_175_200',
'closed_crowdfunds_200',
'total_crowdfund_payments',
'total_crowdfund_payments_loggedin',
'total_crowdfund_payments_loggedout',
'public_projects',
'private_projects',
'unapproved_projects',
'crowdfund_projects',
'project_users',
'project_users_pro',
'project_users_basic',
'project_users_beta',
'project_users_proxy',
'project_users_admin',
'total_exemptions',
'total_invoked_exemptions',
'total_example_appeals',
'requests_processing_days',
)
for field in staff_only:
self.fields.pop(field)
class Meta:
model = Statistics
fields = (
'date',
'total_requests',
'total_requests_success',
'total_requests_denied',
'total_requests_draft',
'total_requests_submitted',
'total_requests_awaiting_ack',
'total_requests_awaiting_response',
'total_requests_awaiting_appeal',
'total_requests_fix_required',
'total_requests_payment_required',
'total_requests_no_docs',
'total_requests_partial',
'total_requests_abandoned',
'total_requests_lawsuit',
'requests_processing_days',
'total_pages',
'total_users',
'total_agencies',
'total_fees',
'pro_users',
'pro_user_names',
'total_page_views',
'daily_requests_pro',
'daily_requests_basic',
'daily_requests_beta',
'daily_requests_proxy',
'daily_requests_admin',
'daily_requests_org',
'daily_articles',
'total_tasks',
'total_unresolved_tasks',
'total_generic_tasks',
'total_unresolved_generic_tasks',
'total_orphan_tasks',
'total_unresolved_orphan_tasks',
'total_snailmail_tasks',
'total_unresolved_snailmail_tasks',
'total_rejected_tasks',
'total_unresolved_rejected_tasks',
'total_staleagency_tasks',
'total_unresolved_staleagency_tasks',
'total_flagged_tasks',
'total_unresolved_flagged_tasks',
'total_newagency_tasks',
'total_unresolved_newagency_tasks',
'total_response_tasks',
'total_unresolved_response_tasks',
'total_faxfail_tasks',
'total_unresolved_faxfail_tasks',
'total_payment_tasks',
'total_unresolved_payment_tasks',
'total_crowdfundpayment_tasks',
'total_unresolved_crowdfundpayment_tasks',
'daily_robot_response_tasks',
'public_notes',
'admin_notes',
'total_active_org_members',
'total_active_orgs',
'sent_communications_email',
'sent_communications_fax',
'sent_communications_mail',
'total_users_filed',
'flag_processing_days',
'unresolved_snailmail_appeals',
'total_crowdfunds',
'total_crowdfunds_pro',
'total_crowdfunds_basic',
'total_crowdfunds_beta',
'total_crowdfunds_proxy',
'total_crowdfunds_admin',
'open_crowdfunds',
'open_crowdfunds_pro',
'open_crowdfunds_basic',
'open_crowdfunds_beta',
'open_crowdfunds_proxy',
'open_crowdfunds_admin',
'closed_crowdfunds_0',
'closed_crowdfunds_0_25',
'closed_crowdfunds_25_50',
'closed_crowdfunds_50_75',
'closed_crowdfunds_75_100',
'closed_crowdfunds_100_125',
'closed_crowdfunds_125_150',
'closed_crowdfunds_150_175',
'closed_crowdfunds_175_200',
'closed_crowdfunds_200',
'total_crowdfund_payments',
'total_crowdfund_payments_loggedin',
'total_crowdfund_payments_loggedout',
'public_projects',
'private_projects',
'unapproved_projects',
'crowdfund_projects',
'project_users',
'project_users_pro',
'project_users_basic',
'project_users_beta',
'project_users_proxy',
'project_users_admin',
'total_exemptions',
'total_invoked_exemptions',
'total_example_appeals',
'machine_requests',
'machine_requests_success',
'machine_requests_denied',
'machine_requests_draft',
'machine_requests_submitted',
'machine_requests_awaiting_ack',
'machine_requests_awaiting_response',
'machine_requests_awaiting_appeal',
'machine_requests_fix_required',
'machine_requests_payment_required',
'machine_requests_no_docs',
'machine_requests_partial',
'machine_requests_abandoned',
'machine_requests_lawsuit',
)
|
'''
Created on Feb 6, 2015
@author: cmccully
'''
from __future__ import absolute_import, division, print_function
from future.utils import iteritems
import om10
import numpy as np
import re
import json
import os
import pandas as pd
import copy
import gzip
import shutil
from lsst.utils import getPackageDir
from lsst.sims.utils import SpecMap, defaultSpecMap
from lsst.sims.catUtils.baseCatalogModels import GalaxyTileCompoundObj
from lsst.sims.catUtils.matchSED import matchBase
from lsst.sims.photUtils import Bandpass, BandpassDict, Sed
from lsst.sims.utils import radiansFromArcsec
from lsst.sims.catUtils.supernovae import SNObject
__all__ = ['sprinklerCompound', 'sprinkler']
class sprinklerCompound(GalaxyTileCompoundObj):
objid = 'sprinklerCompound'
objectTypeId = 66
cached_sprinkling = False
agn_cache_file = None
sne_cache_file = None
defs_file = None
sed_path = None
def _final_pass(self, results):
#From the original GalaxyTileCompoundObj final pass method
for name in results.dtype.fields:
if 'raJ2000' in name or 'decJ2000' in name:
results[name] = np.radians(results[name])
# the stored procedure on fatboy that queries the galaxies
# constructs galtileid by taking
#
# tileid*10^8 + galid
#
# this causes galtileid to be so large that the uniqueIDs in the
# Twinkles InstanceCatalogs are too large for PhoSim to handle.
# Since Twinkles is only focused on one tile on the sky, we will remove
# the factor of 10^8, making the uniqueIDs a more manageable size
# results['galtileid'] = results['galtileid']#%100000000
#Use Sprinkler now
sp = sprinkler(results, self.mjd, self.specFileMap, self.sed_path,
density_param=1.0,
cached_sprinkling=self.cached_sprinkling,
agn_cache_file=self.agn_cache_file,
sne_cache_file=self.sne_cache_file,
defs_file=self.defs_file)
results = sp.sprinkle()
return results
class sprinkler():
def __init__(self, catsim_cat, visit_mjd, specFileMap, sed_path,
om10_cat='twinkles_lenses_v2.fits',
sne_cat = 'dc2_sne_cat.csv', density_param=1., cached_sprinkling=False,
agn_cache_file=None, sne_cache_file=None, defs_file=None,
write_sn_sed=True):
"""
Parameters
----------
catsim_cat: catsim catalog
The results array from an instance catalog.
visit_mjd: float
The mjd of the visit
specFileMap:
This will tell the instance catalog where to write the files
om10_cat: optional, defaults to 'twinkles_lenses_v2.fits
fits file with OM10 catalog
sne_cat: optional, defaults to 'dc2_sne_cat.csv'
density_param: `np.float`, optioanl, defaults to 1.0
the fraction of eligible agn objects that become lensed and should
be between 0.0 and 1.0.
cached_sprinkling: boolean
If true then pick from a preselected list of galtileids
agn_cache_file: str
sne_cache_file: str
defs_file: str
write_sn_sed: boolean
Controls whether or not to actually write supernova
SEDs to disk (default=True)
Returns
-------
updated_catalog:
A new results array with lens systems added.
"""
twinklesDir = getPackageDir('Twinkles')
om10_cat = os.path.join(twinklesDir, 'data', om10_cat)
self.write_sn_sed = write_sn_sed
self.catalog_column_names = catsim_cat.dtype.names
# ****** THIS ASSUMES THAT THE ENVIRONMENT VARIABLE OM10_DIR IS SET *******
lensdb = om10.DB(catalog=om10_cat, vb=False)
self.lenscat = lensdb.lenses.copy()
self.density_param = density_param
self.bandpassDict = BandpassDict.loadTotalBandpassesFromFiles(bandpassNames=['i'])
self.sne_catalog = pd.read_csv(os.path.join(twinklesDir, 'data', sne_cat))
#self.sne_catalog = self.sne_catalog.iloc[:101] ### Remove this after testing
self.used_systems = []
self._visit_mjd = visit_mjd
self.sn_obj = SNObject(0., 0.)
self.write_dir = specFileMap.subdir_map['(^specFileGLSN)']
self.sed_path = sed_path
self.cached_sprinkling = cached_sprinkling
if self.cached_sprinkling is True:
if ((agn_cache_file is None) | (sne_cache_file is None)):
raise AttributeError('Must specify cache files if using cached_sprinkling.')
#agn_cache_file = os.path.join(twinklesDir, 'data', 'test_agn_galtile_cache.csv')
self.agn_cache = pd.read_csv(agn_cache_file)
#sne_cache_file = os.path.join(twinklesDir, 'data', 'test_sne_galtile_cache.csv')
self.sne_cache = pd.read_csv(sne_cache_file)
else:
self.agn_cache = None
self.sne_cache = None
if defs_file is None:
self.defs_file = os.path.join(twinklesDir, 'data', 'catsim_defs.csv')
else:
self.defs_file = defs_file
self.sedDir = getPackageDir('sims_sed_library')
self.imSimBand = Bandpass()
self.imSimBand.imsimBandpass()
#self.LRG_name = 'Burst.25E09.1Z.spec'
#self.LRG = Sed()
#self.LRG.readSED_flambda(str(galDir + self.LRG_name))
#return
#Calculate imsimband magnitudes of source galaxies for matching
agn_fname = str(getPackageDir('sims_sed_library') + '/agnSED/agn.spec.gz')
src_iband = self.lenscat['MAGI_IN']
src_z = self.lenscat['ZSRC']
self.src_mag_norm = []
for src, s_z in zip(src_iband, src_z):
agn_sed = Sed()
agn_sed.readSED_flambda(agn_fname)
agn_sed.redshiftSED(s_z, dimming=True)
self.src_mag_norm.append(matchBase().calcMagNorm([src],
agn_sed,
self.bandpassDict))
#self.src_mag_norm = matchBase().calcMagNorm(src_iband,
# [agn_sed]*len(src_iband),
#
# self.bandpassDict)
has_sn_truth_params = False
for name in self.catalog_column_names:
if 'sn_truth_params' in name:
has_sn_truth_params = True
break
self.defs_dict = {}
self.logging_is_sprinkled = False
self.store_sn_truth_params = False
with open(self.defs_file, 'r') as f:
for line in f:
line_defs = line.strip().split(',')
if len(line_defs) > 1:
if 'is_sprinkled' in line_defs[1]:
self.logging_is_sprinkled = True
if 'sn_truth_params' in line_defs[1] and has_sn_truth_params:
self.store_sn_truth_params = True
if len(line_defs) == 2:
self.defs_dict[line_defs[0]] = line_defs[1]
else:
self.defs_dict[line_defs[0]] = tuple((ll for ll in line_defs[1:]))
@property
def visit_mjd(self):
return self._visit_mjd
@visit_mjd.setter
def visit_mjd(self, val):
self._visit_mjd = val
def sprinkle(self, input_catalog):
# Define a list that we can write out to a text file
lenslines = []
# For each galaxy in the catsim catalog
updated_catalog = input_catalog.copy()
if isinstance(self.defs_dict['galtileid'], tuple):
galid_dex = self.defs_dict['galtileid'][0]
else:
galid_dex = self.defs_dict['galtileid']
agn_magnorm_dex = self.defs_dict['galaxyAgn_magNorm']
agn_magnorm_array = np.array([row[agn_magnorm_dex] for row in input_catalog])
nan_magnorm = np.isnan(agn_magnorm_array)
if self.cached_sprinkling:
if not hasattr(self, '_unq_agn_gid'):
self._unq_agn_gid = np.unique(self.agn_cache['galtileid'].values)
self._unq_sne_gid = np.unique(self.sne_cache['galtileid'].values)
galtileid_array = np.array([row[galid_dex] for row in input_catalog])
valid_agn = np.where(np.logical_and(np.logical_not(nan_magnorm),
np.in1d(galtileid_array,
self._unq_agn_gid,
assume_unique=True)))[0]
valid_sne = np.where(np.logical_and(nan_magnorm,
np.in1d(galtileid_array,
self._unq_sne_gid,
assume_unique=True)))[0]
else:
valid_agn = np.where(np.logical_not(nan_magnorm))[0]
valid_sne = np.where(nan_magnorm)[0]
new_rows = []
# print("Running sprinkler. Catalog Length: ", len(input_catalog))
for rowNum in valid_agn:
row = input_catalog[rowNum]
galtileid = row[galid_dex]
if not self.cached_sprinkling:
candidates = self.find_lens_candidates(row[self.defs_dict['galaxyAgn_redshift']],
row[self.defs_dict['galaxyAgn_magNorm']])
rng = np.random.RandomState(galtileid % (2^32 -1))
pick_value = rng.uniform()
if len(candidates) == 0 or pick_value>self.density_param:
# If there aren't any lensed sources at this redshift from
# OM10 move on the next object
continue
# Randomly choose one the lens systems
# (can decide with or without replacement)
# Sort first to make sure the same choice is made every time
candidates = candidates[np.argsort(candidates['twinklesId'])]
newlens = rng.choice(candidates)
else:
twinkles_sys_cache = self.agn_cache.query('galtileid == %i' % galtileid)['twinkles_system'].values[0]
newlens = self.lenscat[np.where(self.lenscat['twinklesId'] == twinkles_sys_cache)[0]][0]
#varString = json.loads(row[self.defs_dict['galaxyAgn_varParamStr']])
# varString[self.defs_dict['pars']]['t0_mjd'] = 59300.0
#row[self.defs_dict['galaxyAgn_varParamStr']] = json.dumps(varString)
# Append the lens galaxy
# For each image, append the lens images
default_lensrow = None
if newlens['IMG'] > 0:
default_lensrow = row.copy()
default_lensrow[self.defs_dict['galaxyDisk_majorAxis']] = 0.0
default_lensrow[self.defs_dict['galaxyDisk_minorAxis']] = 0.0
default_lensrow[self.defs_dict['galaxyDisk_positionAngle']] = 0.0
default_lensrow[self.defs_dict['galaxyDisk_internalAv']] = 0.0
default_lensrow[self.defs_dict['galaxyDisk_magNorm']] = 999. #np.nan To be fixed post run1.1
default_lensrow[self.defs_dict['galaxyDisk_sedFilename']] = None
default_lensrow[self.defs_dict['galaxyBulge_majorAxis']] = 0.0
default_lensrow[self.defs_dict['galaxyBulge_minorAxis']] = 0.0
default_lensrow[self.defs_dict['galaxyBulge_positionAngle']] = 0.0
default_lensrow[self.defs_dict['galaxyBulge_internalAv']] = 0.0
default_lensrow[self.defs_dict['galaxyBulge_magNorm']] = 999. #np.nan To be fixed post run1.1
default_lensrow[self.defs_dict['galaxyBulge_sedFilename']] = None
default_lensrow[self.defs_dict['galaxyBulge_redshift']] = newlens['ZSRC']
default_lensrow[self.defs_dict['galaxyDisk_redshift']] = newlens['ZSRC']
default_lensrow[self.defs_dict['galaxyAgn_redshift']] = newlens['ZSRC']
for i in range(newlens['NIMG']):
lensrow = default_lensrow.copy()
# XIMG and YIMG are in arcseconds
# raPhSim and decPhoSim are in radians
# Shift all parts of the lensed object,
# not just its agn part
delta_dec = np.radians(newlens['YIMG'][i] / 3600.0)
delta_ra = np.radians(newlens['XIMG'][i] / 3600.0)
for lensPart in ['galaxyBulge', 'galaxyDisk', 'galaxyAgn']:
lens_ra = lensrow[self.defs_dict[str(lensPart+'_raJ2000')]]
lens_dec = lensrow[self.defs_dict[str(lensPart+'_decJ2000')]]
lensrow[self.defs_dict[str(lensPart + '_raJ2000')]] = lens_ra + delta_ra/np.cos(lens_dec)
lensrow[self.defs_dict[str(lensPart + '_decJ2000')]] = lens_dec + delta_dec
mag_adjust = 2.5*np.log10(np.abs(newlens['MAG'][i]))
lensrow[self.defs_dict['galaxyAgn_magNorm']] -= mag_adjust
varString = json.loads(lensrow[self.defs_dict['galaxyAgn_varParamStr']])
varString[self.defs_dict['pars']]['t0Delay'] = newlens['DELAY'][i]
varString[self.defs_dict['varMethodName']] = 'applyAgnTimeDelay'
lensrow[self.defs_dict['galaxyAgn_varParamStr']] = json.dumps(varString)
if self.logging_is_sprinkled:
lensrow[self.defs_dict['galaxyAgn_is_sprinkled']] = 1
lensrow[self.defs_dict['galaxyBulge_is_sprinkled']] = 1
lensrow[self.defs_dict['galaxyDisk_is_sprinkled']] = 1
#To get back twinklesID in lens catalog from phosim catalog id number
#just use np.right_shift(phosimID-28, 10). Take the floor of the last
#3 numbers to get twinklesID in the twinkles lens catalog and the remainder is
#the image number minus 1.
if not isinstance(self.defs_dict['galtileid'], tuple):
lensrow[self.defs_dict['galtileid']] = ((lensrow[self.defs_dict['galtileid']]+int(1.5e10))*10000 +
newlens['twinklesId']*4 + i)
else:
for col_name in self.defs_dict['galtileid']:
lensrow[col_name] = ((lensrow[col_name]+int(1.5e10))*10000 +
newlens['twinklesId']*4 + i)
new_rows.append(lensrow)
#Now manipulate original entry to be the lens galaxy with desired properties
#Start by deleting Disk and AGN properties
if not np.isnan(row[self.defs_dict['galaxyDisk_magNorm']]):
row[self.defs_dict['galaxyDisk_majorAxis']] = 0.0
row[self.defs_dict['galaxyDisk_minorAxis']] = 0.0
row[self.defs_dict['galaxyDisk_positionAngle']] = 0.0
row[self.defs_dict['galaxyDisk_internalAv']] = 0.0
row[self.defs_dict['galaxyDisk_magNorm']] = 999. #np.nan To be fixed post run1.1
row[self.defs_dict['galaxyDisk_sedFilename']] = None
row[self.defs_dict['galaxyAgn_magNorm']] = None #np.nan To be fixed post run1.1
row[self.defs_dict['galaxyDisk_magNorm']] = 999. # To be fixed in run1.1
row[self.defs_dict['galaxyAgn_sedFilename']] = None
#Now insert desired Bulge properties
row[self.defs_dict['galaxyBulge_sedFilename']] = newlens['lens_sed']
row[self.defs_dict['galaxyBulge_redshift']] = newlens['ZLENS']
row[self.defs_dict['galaxyDisk_redshift']] = newlens['ZLENS']
row[self.defs_dict['galaxyAgn_redshift']] = newlens['ZLENS']
row_lens_sed = Sed()
row_lens_sed.readSED_flambda(os.path.join(self.sedDir,
newlens['lens_sed']))
row_lens_sed.redshiftSED(newlens['ZLENS'], dimming=True)
row[self.defs_dict['galaxyBulge_magNorm']] = matchBase().calcMagNorm([newlens['APMAG_I']], row_lens_sed,
self.bandpassDict) #Changed from i band to imsimband
row[self.defs_dict['galaxyBulge_majorAxis']] = radiansFromArcsec(newlens['REFF'] / np.sqrt(1 - newlens['ELLIP']))
row[self.defs_dict['galaxyBulge_minorAxis']] = radiansFromArcsec(newlens['REFF'] * np.sqrt(1 - newlens['ELLIP']))
#Convert orientation angle to west of north from east of north by *-1.0 and convert to radians
row[self.defs_dict['galaxyBulge_positionAngle']] = newlens['PHIE']*(-1.0)*np.pi/180.0
if self.logging_is_sprinkled:
row[self.defs_dict['galaxyAgn_is_sprinkled']] = 1
row[self.defs_dict['galaxyBulge_is_sprinkled']] = 1
row[self.defs_dict['galaxyDisk_is_sprinkled']] = 1
#Replace original entry with new entry
updated_catalog[rowNum] = row
for rowNum in valid_sne:
row = input_catalog[rowNum]
galtileid = row[galid_dex]
if self.cached_sprinkling is True:
if galtileid in self.sne_cache['galtileid'].values:
use_system = self.sne_cache.query('galtileid == %i' % galtileid)['twinkles_system'].values
use_df = self.sne_catalog.query('twinkles_sysno == %i' % use_system)
self.used_systems.append(use_system)
else:
continue
else:
lens_sne_candidates = self.find_sne_lens_candidates(row[self.defs_dict['galaxyDisk_redshift']])
candidate_sysno = np.unique(lens_sne_candidates['twinkles_sysno'])
num_candidates = len(candidate_sysno)
if num_candidates == 0:
continue
used_already = np.array([sys_num in self.used_systems for sys_num in candidate_sysno])
unused_sysno = candidate_sysno[~used_already]
if len(unused_sysno) == 0:
continue
rng2 = np.random.RandomState(galtileid % (2^32 -1))
use_system = rng2.choice(unused_sysno)
use_df = self.sne_catalog.query('twinkles_sysno == %i' % use_system)
default_lensrow = row.copy()
default_lensrow[self.defs_dict['galaxyDisk_majorAxis']] = 0.0
default_lensrow[self.defs_dict['galaxyDisk_minorAxis']] = 0.0
default_lensrow[self.defs_dict['galaxyDisk_positionAngle']] = 0.0
default_lensrow[self.defs_dict['galaxyDisk_internalAv']] = 0.0
default_lensrow[self.defs_dict['galaxyDisk_magNorm']] = 999. #np.nan To be fixed post run1.1
default_lensrow[self.defs_dict['galaxyDisk_sedFilename']] = None
default_lensrow[self.defs_dict['galaxyBulge_majorAxis']] = 0.0
default_lensrow[self.defs_dict['galaxyBulge_minorAxis']] = 0.0
default_lensrow[self.defs_dict['galaxyBulge_positionAngle']] = 0.0
default_lensrow[self.defs_dict['galaxyBulge_internalAv']] = 0.0
default_lensrow[self.defs_dict['galaxyBulge_magNorm']] = 999. #np.nan To be fixed post run1.1
default_lensrow[self.defs_dict['galaxyBulge_sedFilename']] = None
varString = 'None'
default_lensrow[self.defs_dict['galaxyAgn_varParamStr']] = varString
for i in range(len(use_df)):
lensrow = default_lensrow.copy()
delta_ra = np.radians(use_df['x'].iloc[i] / 3600.0)
delta_dec = np.radians(use_df['y'].iloc[i] / 3600.0)
for lensPart in ['galaxyBulge', 'galaxyDisk', 'galaxyAgn']:
lens_ra = lensrow[self.defs_dict[str(lensPart+'_raJ2000')]]
lens_dec = lensrow[self.defs_dict[str(lensPart+'_decJ2000')]]
lensrow[self.defs_dict[str(lensPart + '_raJ2000')]] = lens_ra + delta_ra/np.cos(lens_dec)
lensrow[self.defs_dict[str(lensPart + '_decJ2000')]] = lens_dec + delta_dec
# varString = json.loads(lensrow[self.defs_dict['galaxyAgn_varParamStr']])
z_s = use_df['zs'].iloc[i]
lensrow[self.defs_dict['galaxyBulge_redshift']] = z_s
lensrow[self.defs_dict['galaxyDisk_redshift']] = z_s
lensrow[self.defs_dict['galaxyAgn_redshift']] = z_s
#To get back twinklesID in lens catalog from phosim catalog id number
#just use np.right_shift(phosimID-28, 10). Take the floor of the last
#3 numbers to get twinklesID in the twinkles lens catalog and the remainder is
#the image number minus 1.
if not isinstance(self.defs_dict['galtileid'], tuple):
lensrow[self.defs_dict['galtileid']] = ((lensrow[self.defs_dict['galtileid']]+int(1.5e10))*10000 +
use_system*4 + i)
else:
for col_name in self.defs_dict['galtileid']:
lensrow[col_name] = ((lensrow[col_name]+int(1.5e10))*10000 +
use_system*4 + i)
(add_to_cat, sn_magnorm,
sn_fname, sn_param_dict) = self.create_sn_sed(use_df.iloc[i],
lensrow[self.defs_dict['galaxyAgn_raJ2000']],
lensrow[self.defs_dict['galaxyAgn_decJ2000']],
self.visit_mjd,
write_sn_sed=self.write_sn_sed)
if self.store_sn_truth_params:
add_to_cat = True
lensrow[self.defs_dict['galaxyAgn_sn_truth_params']] = json.dumps(sn_param_dict)
lensrow[self.defs_dict['galaxyAgn_sn_t0']] = sn_param_dict['t0']
if not add_to_cat:
continue
lensrow[self.defs_dict['galaxyAgn_sedFilename']] = sn_fname
lensrow[self.defs_dict['galaxyAgn_magNorm']] = sn_magnorm #This will need to be adjusted to proper band
mag_adjust = 2.5*np.log10(np.abs(use_df['mu'].iloc[i]))
lensrow[self.defs_dict['galaxyAgn_magNorm']] -= mag_adjust
if self.logging_is_sprinkled:
lensrow[self.defs_dict['galaxyAgn_is_sprinkled']] = 1
lensrow[self.defs_dict['galaxyBulge_is_sprinkled']] = 1
lensrow[self.defs_dict['galaxyDisk_is_sprinkled']] = 1
if add_to_cat is True:
new_rows.append(lensrow)
else:
continue
#Now manipulate original entry to be the lens galaxy with desired properties
#Start by deleting Disk and AGN properties
if not np.isnan(row[self.defs_dict['galaxyDisk_magNorm']]):
row[self.defs_dict['galaxyDisk_majorAxis']] = 0.0
row[self.defs_dict['galaxyDisk_minorAxis']] = 0.0
row[self.defs_dict['galaxyDisk_positionAngle']] = 0.0
row[self.defs_dict['galaxyDisk_internalAv']] = 0.0
row[self.defs_dict['galaxyDisk_magNorm']] = 999. #np.nan To be fixed post run1.1
row[self.defs_dict['galaxyDisk_sedFilename']] = None
row[self.defs_dict['galaxyAgn_magNorm']] = None #np.nan To be fixed post run1.1
row[self.defs_dict['galaxyDisk_magNorm']] = 999. #To be fixed post run1.1
row[self.defs_dict['galaxyAgn_sedFilename']] = None
#Now insert desired Bulge properties
row[self.defs_dict['galaxyBulge_sedFilename']] = use_df['lensgal_sed'].iloc[0]
row[self.defs_dict['galaxyBulge_redshift']] = use_df['zl'].iloc[0]
row[self.defs_dict['galaxyDisk_redshift']] = use_df['zl'].iloc[0]
row[self.defs_dict['galaxyAgn_redshift']] = use_df['zl'].iloc[0]
row[self.defs_dict['galaxyBulge_magNorm']] = use_df['lensgal_magnorm'].iloc[0]
# row[self.defs_dict['galaxyBulge_magNorm']] = matchBase().calcMagNorm([newlens['APMAG_I']], self.LRG, self.bandpassDict) #Changed from i band to imsimband
row[self.defs_dict['galaxyBulge_majorAxis']] = radiansFromArcsec(use_df['lensgal_reff'].iloc[0] / np.sqrt(1 - use_df['e'].iloc[0]))
row[self.defs_dict['galaxyBulge_minorAxis']] = radiansFromArcsec(use_df['lensgal_reff'].iloc[0] * np.sqrt(1 - use_df['e'].iloc[0]))
#Convert orientation angle to west of north from east of north by *-1.0 and convert to radians
row[self.defs_dict['galaxyBulge_positionAngle']] = use_df['theta_e'].iloc[0]*(-1.0)*np.pi/180.0
if self.logging_is_sprinkled:
row[self.defs_dict['galaxyAgn_is_sprinkled']] = 1
row[self.defs_dict['galaxyBulge_is_sprinkled']] = 1
row[self.defs_dict['galaxyDisk_is_sprinkled']] = 1
#Replace original entry with new entry
updated_catalog[rowNum] = row
if len(new_rows)>0:
updated_catalog = np.append(updated_catalog, new_rows)
return updated_catalog
def find_lens_candidates(self, galz, gal_mag):
# search the OM10 catalog for all sources +- 0.1 dex in redshift
# and within .25 mags of the CATSIM source
w = np.where((np.abs(np.log10(self.lenscat['ZSRC']) - np.log10(galz)) <= 0.1) &
(np.abs(self.src_mag_norm - gal_mag) <= .25))[0]
lens_candidates = self.lenscat[w]
return lens_candidates
def find_sne_lens_candidates(self, galz):
w = np.where((np.abs(np.log10(self.sne_catalog['zs']) - np.log10(galz)) <= 0.1))
lens_candidates = self.sne_catalog.iloc[w]
return lens_candidates
def create_sn_sed(self, system_df, sn_ra, sn_dec, sed_mjd, write_sn_sed=True):
sn_param_dict = copy.deepcopy(self.sn_obj.SNstate)
sn_param_dict['_ra'] = sn_ra
sn_param_dict['_dec'] = sn_dec
sn_param_dict['z'] = system_df['zs']
sn_param_dict['c'] = system_df['c']
sn_param_dict['x0'] = system_df['x0']
sn_param_dict['x1'] = system_df['x1']
sn_param_dict['t0'] = system_df['t_start']
#sn_param_dict['t0'] = 62746.27 #+1500. ### For testing only
current_sn_obj = self.sn_obj.fromSNState(sn_param_dict)
current_sn_obj.mwEBVfromMaps()
wavelen_max = 1800.
wavelen_min = 30.
wavelen_step = 0.1
sn_sed_obj = current_sn_obj.SNObjectSED(time=sed_mjd,
wavelen=np.arange(wavelen_min, wavelen_max,
wavelen_step))
flux_500 = sn_sed_obj.flambda[np.where(sn_sed_obj.wavelen >= 499.99)][0]
if flux_500 > 0.:
add_to_cat = True
sn_magnorm = current_sn_obj.catsimBandMag(self.imSimBand, sed_mjd)
sn_name = None
if write_sn_sed:
sn_name = 'specFileGLSN_%i_%i_%.4f.txt' % (system_df['twinkles_sysno'],
system_df['imno'], sed_mjd)
sed_filename = '%s/%s' % (self.sed_path, sn_name)
sn_sed_obj.writeSED(sed_filename)
with open(sed_filename, 'rb') as f_in, gzip.open(str(sed_filename + '.gz'), 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(sed_filename)
else:
add_to_cat = False
sn_magnorm = np.nan
sn_name = None
return add_to_cat, sn_magnorm, sn_name, current_sn_obj.SNstate
def update_catsim(self):
# Remove the catsim object
# Add lensed images to the catsim given source brightness and magnifications
# Add lens galaxy to catsim
return
def catsim_to_phosim(self):
# Pass this catsim to phosim to make images
return
fix typo
'''
Created on Feb 6, 2015
@author: cmccully
'''
from __future__ import absolute_import, division, print_function
from future.utils import iteritems
import om10
import numpy as np
import re
import json
import os
import pandas as pd
import copy
import gzip
import shutil
from lsst.utils import getPackageDir
from lsst.sims.utils import SpecMap, defaultSpecMap
from lsst.sims.catUtils.baseCatalogModels import GalaxyTileCompoundObj
from lsst.sims.catUtils.matchSED import matchBase
from lsst.sims.photUtils import Bandpass, BandpassDict, Sed
from lsst.sims.utils import radiansFromArcsec
from lsst.sims.catUtils.supernovae import SNObject
__all__ = ['sprinklerCompound', 'sprinkler']
class sprinklerCompound(GalaxyTileCompoundObj):
objid = 'sprinklerCompound'
objectTypeId = 66
cached_sprinkling = False
agn_cache_file = None
sne_cache_file = None
defs_file = None
sed_path = None
def _final_pass(self, results):
#From the original GalaxyTileCompoundObj final pass method
for name in results.dtype.fields:
if 'raJ2000' in name or 'decJ2000' in name:
results[name] = np.radians(results[name])
# the stored procedure on fatboy that queries the galaxies
# constructs galtileid by taking
#
# tileid*10^8 + galid
#
# this causes galtileid to be so large that the uniqueIDs in the
# Twinkles InstanceCatalogs are too large for PhoSim to handle.
# Since Twinkles is only focused on one tile on the sky, we will remove
# the factor of 10^8, making the uniqueIDs a more manageable size
# results['galtileid'] = results['galtileid']#%100000000
#Use Sprinkler now
sp = sprinkler(results, self.mjd, self.specFileMap, self.sed_path,
density_param=1.0,
cached_sprinkling=self.cached_sprinkling,
agn_cache_file=self.agn_cache_file,
sne_cache_file=self.sne_cache_file,
defs_file=self.defs_file)
results = sp.sprinkle()
return results
class sprinkler():
def __init__(self, catsim_cat, visit_mjd, specFileMap, sed_path,
om10_cat='twinkles_lenses_v2.fits',
sne_cat = 'dc2_sne_cat.csv', density_param=1., cached_sprinkling=False,
agn_cache_file=None, sne_cache_file=None, defs_file=None,
write_sn_sed=True):
"""
Parameters
----------
catsim_cat: catsim catalog
The results array from an instance catalog.
visit_mjd: float
The mjd of the visit
specFileMap:
This will tell the instance catalog where to write the files
om10_cat: optional, defaults to 'twinkles_lenses_v2.fits
fits file with OM10 catalog
sne_cat: optional, defaults to 'dc2_sne_cat.csv'
density_param: `np.float`, optioanl, defaults to 1.0
the fraction of eligible agn objects that become lensed and should
be between 0.0 and 1.0.
cached_sprinkling: boolean
If true then pick from a preselected list of galtileids
agn_cache_file: str
sne_cache_file: str
defs_file: str
write_sn_sed: boolean
Controls whether or not to actually write supernova
SEDs to disk (default=True)
Returns
-------
updated_catalog:
A new results array with lens systems added.
"""
twinklesDir = getPackageDir('Twinkles')
om10_cat = os.path.join(twinklesDir, 'data', om10_cat)
self.write_sn_sed = write_sn_sed
self.catalog_column_names = catsim_cat.dtype.names
# ****** THIS ASSUMES THAT THE ENVIRONMENT VARIABLE OM10_DIR IS SET *******
lensdb = om10.DB(catalog=om10_cat, vb=False)
self.lenscat = lensdb.lenses.copy()
self.density_param = density_param
self.bandpassDict = BandpassDict.loadTotalBandpassesFromFiles(bandpassNames=['i'])
self.sne_catalog = pd.read_csv(os.path.join(twinklesDir, 'data', sne_cat))
#self.sne_catalog = self.sne_catalog.iloc[:101] ### Remove this after testing
self.used_systems = []
self._visit_mjd = visit_mjd
self.sn_obj = SNObject(0., 0.)
self.write_dir = specFileMap.subdir_map['(^specFileGLSN)']
self.sed_path = sed_path
self.cached_sprinkling = cached_sprinkling
if self.cached_sprinkling is True:
if ((agn_cache_file is None) | (sne_cache_file is None)):
raise AttributeError('Must specify cache files if using cached_sprinkling.')
#agn_cache_file = os.path.join(twinklesDir, 'data', 'test_agn_galtile_cache.csv')
self.agn_cache = pd.read_csv(agn_cache_file)
#sne_cache_file = os.path.join(twinklesDir, 'data', 'test_sne_galtile_cache.csv')
self.sne_cache = pd.read_csv(sne_cache_file)
else:
self.agn_cache = None
self.sne_cache = None
if defs_file is None:
self.defs_file = os.path.join(twinklesDir, 'data', 'catsim_defs.csv')
else:
self.defs_file = defs_file
self.sedDir = getPackageDir('sims_sed_library')
self.imSimBand = Bandpass()
self.imSimBand.imsimBandpass()
#self.LRG_name = 'Burst.25E09.1Z.spec'
#self.LRG = Sed()
#self.LRG.readSED_flambda(str(galDir + self.LRG_name))
#return
#Calculate imsimband magnitudes of source galaxies for matching
agn_fname = str(getPackageDir('sims_sed_library') + '/agnSED/agn.spec.gz')
src_iband = self.lenscat['MAGI_IN']
src_z = self.lenscat['ZSRC']
self.src_mag_norm = []
for src, s_z in zip(src_iband, src_z):
agn_sed = Sed()
agn_sed.readSED_flambda(agn_fname)
agn_sed.redshiftSED(s_z, dimming=True)
self.src_mag_norm.append(matchBase().calcMagNorm([src],
agn_sed,
self.bandpassDict))
#self.src_mag_norm = matchBase().calcMagNorm(src_iband,
# [agn_sed]*len(src_iband),
#
# self.bandpassDict)
has_sn_truth_params = False
for name in self.catalog_column_names:
if 'sn_truth_params' in name:
has_sn_truth_params = True
break
self.defs_dict = {}
self.logging_is_sprinkled = False
self.store_sn_truth_params = False
with open(self.defs_file, 'r') as f:
for line in f:
line_defs = line.strip().split(',')
if len(line_defs) > 1:
if 'is_sprinkled' in line_defs[1]:
self.logging_is_sprinkled = True
if 'sn_truth_params' in line_defs[1] and has_sn_truth_params:
self.store_sn_truth_params = True
if len(line_defs) == 2:
self.defs_dict[line_defs[0]] = line_defs[1]
else:
self.defs_dict[line_defs[0]] = tuple((ll for ll in line_defs[1:]))
@property
def visit_mjd(self):
return self._visit_mjd
@visit_mjd.setter
def visit_mjd(self, val):
self._visit_mjd = val
def sprinkle(self, input_catalog):
# Define a list that we can write out to a text file
lenslines = []
# For each galaxy in the catsim catalog
updated_catalog = input_catalog.copy()
if isinstance(self.defs_dict['galtileid'], tuple):
galid_dex = self.defs_dict['galtileid'][0]
else:
galid_dex = self.defs_dict['galtileid']
agn_magnorm_dex = self.defs_dict['galaxyAgn_magNorm']
agn_magnorm_array = np.array([row[agn_magnorm_dex] for row in input_catalog])
nan_magnorm = np.isnan(agn_magnorm_array)
if self.cached_sprinkling:
if not hasattr(self, '_unq_agn_gid'):
self._unq_agn_gid = np.unique(self.agn_cache['galtileid'].values)
self._unq_sne_gid = np.unique(self.sne_cache['galtileid'].values)
galtileid_array = np.array([row[galid_dex] for row in input_catalog])
valid_agn = np.where(np.logical_and(np.logical_not(nan_magnorm),
np.in1d(galtileid_array,
self._unq_agn_gid,
assume_unique=True)))[0]
valid_sne = np.where(np.logical_and(nan_magnorm,
np.in1d(galtileid_array,
self._unq_sne_gid,
assume_unique=True)))[0]
else:
valid_agn = np.where(np.logical_not(nan_magnorm))[0]
valid_sne = np.where(nan_magnorm)[0]
new_rows = []
# print("Running sprinkler. Catalog Length: ", len(input_catalog))
for rowNum in valid_agn:
row = input_catalog[rowNum]
galtileid = row[galid_dex]
if not self.cached_sprinkling:
candidates = self.find_lens_candidates(row[self.defs_dict['galaxyAgn_redshift']],
row[self.defs_dict['galaxyAgn_magNorm']])
rng = np.random.RandomState(galtileid % (2^32 -1))
pick_value = rng.uniform()
if len(candidates) == 0 or pick_value>self.density_param:
# If there aren't any lensed sources at this redshift from
# OM10 move on the next object
continue
# Randomly choose one the lens systems
# (can decide with or without replacement)
# Sort first to make sure the same choice is made every time
candidates = candidates[np.argsort(candidates['twinklesId'])]
newlens = rng.choice(candidates)
else:
twinkles_sys_cache = self.agn_cache.query('galtileid == %i' % galtileid)['twinkles_system'].values[0]
newlens = self.lenscat[np.where(self.lenscat['twinklesId'] == twinkles_sys_cache)[0]][0]
#varString = json.loads(row[self.defs_dict['galaxyAgn_varParamStr']])
# varString[self.defs_dict['pars']]['t0_mjd'] = 59300.0
#row[self.defs_dict['galaxyAgn_varParamStr']] = json.dumps(varString)
# Append the lens galaxy
# For each image, append the lens images
default_lensrow = None
if newlens['NIMG'] > 0:
default_lensrow = row.copy()
default_lensrow[self.defs_dict['galaxyDisk_majorAxis']] = 0.0
default_lensrow[self.defs_dict['galaxyDisk_minorAxis']] = 0.0
default_lensrow[self.defs_dict['galaxyDisk_positionAngle']] = 0.0
default_lensrow[self.defs_dict['galaxyDisk_internalAv']] = 0.0
default_lensrow[self.defs_dict['galaxyDisk_magNorm']] = 999. #np.nan To be fixed post run1.1
default_lensrow[self.defs_dict['galaxyDisk_sedFilename']] = None
default_lensrow[self.defs_dict['galaxyBulge_majorAxis']] = 0.0
default_lensrow[self.defs_dict['galaxyBulge_minorAxis']] = 0.0
default_lensrow[self.defs_dict['galaxyBulge_positionAngle']] = 0.0
default_lensrow[self.defs_dict['galaxyBulge_internalAv']] = 0.0
default_lensrow[self.defs_dict['galaxyBulge_magNorm']] = 999. #np.nan To be fixed post run1.1
default_lensrow[self.defs_dict['galaxyBulge_sedFilename']] = None
default_lensrow[self.defs_dict['galaxyBulge_redshift']] = newlens['ZSRC']
default_lensrow[self.defs_dict['galaxyDisk_redshift']] = newlens['ZSRC']
default_lensrow[self.defs_dict['galaxyAgn_redshift']] = newlens['ZSRC']
for i in range(newlens['NIMG']):
lensrow = default_lensrow.copy()
# XIMG and YIMG are in arcseconds
# raPhSim and decPhoSim are in radians
# Shift all parts of the lensed object,
# not just its agn part
delta_dec = np.radians(newlens['YIMG'][i] / 3600.0)
delta_ra = np.radians(newlens['XIMG'][i] / 3600.0)
for lensPart in ['galaxyBulge', 'galaxyDisk', 'galaxyAgn']:
lens_ra = lensrow[self.defs_dict[str(lensPart+'_raJ2000')]]
lens_dec = lensrow[self.defs_dict[str(lensPart+'_decJ2000')]]
lensrow[self.defs_dict[str(lensPart + '_raJ2000')]] = lens_ra + delta_ra/np.cos(lens_dec)
lensrow[self.defs_dict[str(lensPart + '_decJ2000')]] = lens_dec + delta_dec
mag_adjust = 2.5*np.log10(np.abs(newlens['MAG'][i]))
lensrow[self.defs_dict['galaxyAgn_magNorm']] -= mag_adjust
varString = json.loads(lensrow[self.defs_dict['galaxyAgn_varParamStr']])
varString[self.defs_dict['pars']]['t0Delay'] = newlens['DELAY'][i]
varString[self.defs_dict['varMethodName']] = 'applyAgnTimeDelay'
lensrow[self.defs_dict['galaxyAgn_varParamStr']] = json.dumps(varString)
if self.logging_is_sprinkled:
lensrow[self.defs_dict['galaxyAgn_is_sprinkled']] = 1
lensrow[self.defs_dict['galaxyBulge_is_sprinkled']] = 1
lensrow[self.defs_dict['galaxyDisk_is_sprinkled']] = 1
#To get back twinklesID in lens catalog from phosim catalog id number
#just use np.right_shift(phosimID-28, 10). Take the floor of the last
#3 numbers to get twinklesID in the twinkles lens catalog and the remainder is
#the image number minus 1.
if not isinstance(self.defs_dict['galtileid'], tuple):
lensrow[self.defs_dict['galtileid']] = ((lensrow[self.defs_dict['galtileid']]+int(1.5e10))*10000 +
newlens['twinklesId']*4 + i)
else:
for col_name in self.defs_dict['galtileid']:
lensrow[col_name] = ((lensrow[col_name]+int(1.5e10))*10000 +
newlens['twinklesId']*4 + i)
new_rows.append(lensrow)
#Now manipulate original entry to be the lens galaxy with desired properties
#Start by deleting Disk and AGN properties
if not np.isnan(row[self.defs_dict['galaxyDisk_magNorm']]):
row[self.defs_dict['galaxyDisk_majorAxis']] = 0.0
row[self.defs_dict['galaxyDisk_minorAxis']] = 0.0
row[self.defs_dict['galaxyDisk_positionAngle']] = 0.0
row[self.defs_dict['galaxyDisk_internalAv']] = 0.0
row[self.defs_dict['galaxyDisk_magNorm']] = 999. #np.nan To be fixed post run1.1
row[self.defs_dict['galaxyDisk_sedFilename']] = None
row[self.defs_dict['galaxyAgn_magNorm']] = None #np.nan To be fixed post run1.1
row[self.defs_dict['galaxyDisk_magNorm']] = 999. # To be fixed in run1.1
row[self.defs_dict['galaxyAgn_sedFilename']] = None
#Now insert desired Bulge properties
row[self.defs_dict['galaxyBulge_sedFilename']] = newlens['lens_sed']
row[self.defs_dict['galaxyBulge_redshift']] = newlens['ZLENS']
row[self.defs_dict['galaxyDisk_redshift']] = newlens['ZLENS']
row[self.defs_dict['galaxyAgn_redshift']] = newlens['ZLENS']
row_lens_sed = Sed()
row_lens_sed.readSED_flambda(os.path.join(self.sedDir,
newlens['lens_sed']))
row_lens_sed.redshiftSED(newlens['ZLENS'], dimming=True)
row[self.defs_dict['galaxyBulge_magNorm']] = matchBase().calcMagNorm([newlens['APMAG_I']], row_lens_sed,
self.bandpassDict) #Changed from i band to imsimband
row[self.defs_dict['galaxyBulge_majorAxis']] = radiansFromArcsec(newlens['REFF'] / np.sqrt(1 - newlens['ELLIP']))
row[self.defs_dict['galaxyBulge_minorAxis']] = radiansFromArcsec(newlens['REFF'] * np.sqrt(1 - newlens['ELLIP']))
#Convert orientation angle to west of north from east of north by *-1.0 and convert to radians
row[self.defs_dict['galaxyBulge_positionAngle']] = newlens['PHIE']*(-1.0)*np.pi/180.0
if self.logging_is_sprinkled:
row[self.defs_dict['galaxyAgn_is_sprinkled']] = 1
row[self.defs_dict['galaxyBulge_is_sprinkled']] = 1
row[self.defs_dict['galaxyDisk_is_sprinkled']] = 1
#Replace original entry with new entry
updated_catalog[rowNum] = row
for rowNum in valid_sne:
row = input_catalog[rowNum]
galtileid = row[galid_dex]
if self.cached_sprinkling is True:
if galtileid in self.sne_cache['galtileid'].values:
use_system = self.sne_cache.query('galtileid == %i' % galtileid)['twinkles_system'].values
use_df = self.sne_catalog.query('twinkles_sysno == %i' % use_system)
self.used_systems.append(use_system)
else:
continue
else:
lens_sne_candidates = self.find_sne_lens_candidates(row[self.defs_dict['galaxyDisk_redshift']])
candidate_sysno = np.unique(lens_sne_candidates['twinkles_sysno'])
num_candidates = len(candidate_sysno)
if num_candidates == 0:
continue
used_already = np.array([sys_num in self.used_systems for sys_num in candidate_sysno])
unused_sysno = candidate_sysno[~used_already]
if len(unused_sysno) == 0:
continue
rng2 = np.random.RandomState(galtileid % (2^32 -1))
use_system = rng2.choice(unused_sysno)
use_df = self.sne_catalog.query('twinkles_sysno == %i' % use_system)
default_lensrow = row.copy()
default_lensrow[self.defs_dict['galaxyDisk_majorAxis']] = 0.0
default_lensrow[self.defs_dict['galaxyDisk_minorAxis']] = 0.0
default_lensrow[self.defs_dict['galaxyDisk_positionAngle']] = 0.0
default_lensrow[self.defs_dict['galaxyDisk_internalAv']] = 0.0
default_lensrow[self.defs_dict['galaxyDisk_magNorm']] = 999. #np.nan To be fixed post run1.1
default_lensrow[self.defs_dict['galaxyDisk_sedFilename']] = None
default_lensrow[self.defs_dict['galaxyBulge_majorAxis']] = 0.0
default_lensrow[self.defs_dict['galaxyBulge_minorAxis']] = 0.0
default_lensrow[self.defs_dict['galaxyBulge_positionAngle']] = 0.0
default_lensrow[self.defs_dict['galaxyBulge_internalAv']] = 0.0
default_lensrow[self.defs_dict['galaxyBulge_magNorm']] = 999. #np.nan To be fixed post run1.1
default_lensrow[self.defs_dict['galaxyBulge_sedFilename']] = None
varString = 'None'
default_lensrow[self.defs_dict['galaxyAgn_varParamStr']] = varString
for i in range(len(use_df)):
lensrow = default_lensrow.copy()
delta_ra = np.radians(use_df['x'].iloc[i] / 3600.0)
delta_dec = np.radians(use_df['y'].iloc[i] / 3600.0)
for lensPart in ['galaxyBulge', 'galaxyDisk', 'galaxyAgn']:
lens_ra = lensrow[self.defs_dict[str(lensPart+'_raJ2000')]]
lens_dec = lensrow[self.defs_dict[str(lensPart+'_decJ2000')]]
lensrow[self.defs_dict[str(lensPart + '_raJ2000')]] = lens_ra + delta_ra/np.cos(lens_dec)
lensrow[self.defs_dict[str(lensPart + '_decJ2000')]] = lens_dec + delta_dec
# varString = json.loads(lensrow[self.defs_dict['galaxyAgn_varParamStr']])
z_s = use_df['zs'].iloc[i]
lensrow[self.defs_dict['galaxyBulge_redshift']] = z_s
lensrow[self.defs_dict['galaxyDisk_redshift']] = z_s
lensrow[self.defs_dict['galaxyAgn_redshift']] = z_s
#To get back twinklesID in lens catalog from phosim catalog id number
#just use np.right_shift(phosimID-28, 10). Take the floor of the last
#3 numbers to get twinklesID in the twinkles lens catalog and the remainder is
#the image number minus 1.
if not isinstance(self.defs_dict['galtileid'], tuple):
lensrow[self.defs_dict['galtileid']] = ((lensrow[self.defs_dict['galtileid']]+int(1.5e10))*10000 +
use_system*4 + i)
else:
for col_name in self.defs_dict['galtileid']:
lensrow[col_name] = ((lensrow[col_name]+int(1.5e10))*10000 +
use_system*4 + i)
(add_to_cat, sn_magnorm,
sn_fname, sn_param_dict) = self.create_sn_sed(use_df.iloc[i],
lensrow[self.defs_dict['galaxyAgn_raJ2000']],
lensrow[self.defs_dict['galaxyAgn_decJ2000']],
self.visit_mjd,
write_sn_sed=self.write_sn_sed)
if self.store_sn_truth_params:
add_to_cat = True
lensrow[self.defs_dict['galaxyAgn_sn_truth_params']] = json.dumps(sn_param_dict)
lensrow[self.defs_dict['galaxyAgn_sn_t0']] = sn_param_dict['t0']
if not add_to_cat:
continue
lensrow[self.defs_dict['galaxyAgn_sedFilename']] = sn_fname
lensrow[self.defs_dict['galaxyAgn_magNorm']] = sn_magnorm #This will need to be adjusted to proper band
mag_adjust = 2.5*np.log10(np.abs(use_df['mu'].iloc[i]))
lensrow[self.defs_dict['galaxyAgn_magNorm']] -= mag_adjust
if self.logging_is_sprinkled:
lensrow[self.defs_dict['galaxyAgn_is_sprinkled']] = 1
lensrow[self.defs_dict['galaxyBulge_is_sprinkled']] = 1
lensrow[self.defs_dict['galaxyDisk_is_sprinkled']] = 1
if add_to_cat is True:
new_rows.append(lensrow)
else:
continue
#Now manipulate original entry to be the lens galaxy with desired properties
#Start by deleting Disk and AGN properties
if not np.isnan(row[self.defs_dict['galaxyDisk_magNorm']]):
row[self.defs_dict['galaxyDisk_majorAxis']] = 0.0
row[self.defs_dict['galaxyDisk_minorAxis']] = 0.0
row[self.defs_dict['galaxyDisk_positionAngle']] = 0.0
row[self.defs_dict['galaxyDisk_internalAv']] = 0.0
row[self.defs_dict['galaxyDisk_magNorm']] = 999. #np.nan To be fixed post run1.1
row[self.defs_dict['galaxyDisk_sedFilename']] = None
row[self.defs_dict['galaxyAgn_magNorm']] = None #np.nan To be fixed post run1.1
row[self.defs_dict['galaxyDisk_magNorm']] = 999. #To be fixed post run1.1
row[self.defs_dict['galaxyAgn_sedFilename']] = None
#Now insert desired Bulge properties
row[self.defs_dict['galaxyBulge_sedFilename']] = use_df['lensgal_sed'].iloc[0]
row[self.defs_dict['galaxyBulge_redshift']] = use_df['zl'].iloc[0]
row[self.defs_dict['galaxyDisk_redshift']] = use_df['zl'].iloc[0]
row[self.defs_dict['galaxyAgn_redshift']] = use_df['zl'].iloc[0]
row[self.defs_dict['galaxyBulge_magNorm']] = use_df['lensgal_magnorm'].iloc[0]
# row[self.defs_dict['galaxyBulge_magNorm']] = matchBase().calcMagNorm([newlens['APMAG_I']], self.LRG, self.bandpassDict) #Changed from i band to imsimband
row[self.defs_dict['galaxyBulge_majorAxis']] = radiansFromArcsec(use_df['lensgal_reff'].iloc[0] / np.sqrt(1 - use_df['e'].iloc[0]))
row[self.defs_dict['galaxyBulge_minorAxis']] = radiansFromArcsec(use_df['lensgal_reff'].iloc[0] * np.sqrt(1 - use_df['e'].iloc[0]))
#Convert orientation angle to west of north from east of north by *-1.0 and convert to radians
row[self.defs_dict['galaxyBulge_positionAngle']] = use_df['theta_e'].iloc[0]*(-1.0)*np.pi/180.0
if self.logging_is_sprinkled:
row[self.defs_dict['galaxyAgn_is_sprinkled']] = 1
row[self.defs_dict['galaxyBulge_is_sprinkled']] = 1
row[self.defs_dict['galaxyDisk_is_sprinkled']] = 1
#Replace original entry with new entry
updated_catalog[rowNum] = row
if len(new_rows)>0:
updated_catalog = np.append(updated_catalog, new_rows)
return updated_catalog
def find_lens_candidates(self, galz, gal_mag):
# search the OM10 catalog for all sources +- 0.1 dex in redshift
# and within .25 mags of the CATSIM source
w = np.where((np.abs(np.log10(self.lenscat['ZSRC']) - np.log10(galz)) <= 0.1) &
(np.abs(self.src_mag_norm - gal_mag) <= .25))[0]
lens_candidates = self.lenscat[w]
return lens_candidates
def find_sne_lens_candidates(self, galz):
w = np.where((np.abs(np.log10(self.sne_catalog['zs']) - np.log10(galz)) <= 0.1))
lens_candidates = self.sne_catalog.iloc[w]
return lens_candidates
def create_sn_sed(self, system_df, sn_ra, sn_dec, sed_mjd, write_sn_sed=True):
sn_param_dict = copy.deepcopy(self.sn_obj.SNstate)
sn_param_dict['_ra'] = sn_ra
sn_param_dict['_dec'] = sn_dec
sn_param_dict['z'] = system_df['zs']
sn_param_dict['c'] = system_df['c']
sn_param_dict['x0'] = system_df['x0']
sn_param_dict['x1'] = system_df['x1']
sn_param_dict['t0'] = system_df['t_start']
#sn_param_dict['t0'] = 62746.27 #+1500. ### For testing only
current_sn_obj = self.sn_obj.fromSNState(sn_param_dict)
current_sn_obj.mwEBVfromMaps()
wavelen_max = 1800.
wavelen_min = 30.
wavelen_step = 0.1
sn_sed_obj = current_sn_obj.SNObjectSED(time=sed_mjd,
wavelen=np.arange(wavelen_min, wavelen_max,
wavelen_step))
flux_500 = sn_sed_obj.flambda[np.where(sn_sed_obj.wavelen >= 499.99)][0]
if flux_500 > 0.:
add_to_cat = True
sn_magnorm = current_sn_obj.catsimBandMag(self.imSimBand, sed_mjd)
sn_name = None
if write_sn_sed:
sn_name = 'specFileGLSN_%i_%i_%.4f.txt' % (system_df['twinkles_sysno'],
system_df['imno'], sed_mjd)
sed_filename = '%s/%s' % (self.sed_path, sn_name)
sn_sed_obj.writeSED(sed_filename)
with open(sed_filename, 'rb') as f_in, gzip.open(str(sed_filename + '.gz'), 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(sed_filename)
else:
add_to_cat = False
sn_magnorm = np.nan
sn_name = None
return add_to_cat, sn_magnorm, sn_name, current_sn_obj.SNstate
def update_catsim(self):
# Remove the catsim object
# Add lensed images to the catsim given source brightness and magnifications
# Add lens galaxy to catsim
return
def catsim_to_phosim(self):
# Pass this catsim to phosim to make images
return
|
"""
Run GradeFast.
Licensed under the MIT License. For more, see the LICENSE file.
Author: Jake Hartz <jake@hartz.io>
"""
import threading
import time
import webbrowser
from typing import Sequence
from iochannels import Channel, Msg
from pyprovide import Injector
from gradefast.gradebook.gradebook import GradeBook
from gradefast.grader.grader import Grader
from gradefast.loggingwrapper import get_logger
from gradefast.models import Path, Settings
from gradefast.persister import Persister
_logger = get_logger("run")
class LazyUserError(Exception):
pass
def _try_run_gradebook(gradebook: GradeBook) -> None:
logger = get_logger("run: gradebook")
try:
gradebook.run(debug=True)
except:
logger.exception("Exception when running gradebook server")
def run_gradefast(injector: Injector, submission_paths: Sequence[Path]) -> None:
# Initialize the Channel used to communicate via the CLI (if we haven't already)
channel = injector.get_instance(Channel)
# Initialize the Persister (i.e. save file wrapper)
persister = injector.get_instance(Persister)
# Wrap the rest in a try-finally to ensure the channel and persister get cleaned up properly
try:
_logger.debug("Running GradeFast")
_run_gradefast_internal(injector, submission_paths)
except:
_logger.exception("Error running GradeFast")
finally:
_logger.debug("Closing resources")
persister.close()
channel.close()
def _run_gradefast_internal(injector: Injector, submission_paths: Sequence[Path]) -> None:
settings = injector.get_instance(Settings)
channel = injector.get_instance(Channel)
if settings.gradebook_enabled:
# Create and start the GradeBook WSGI server in a new thread
gradebook = injector.get_instance(GradeBook)
threading.Thread(
name="GradeBookTh",
target=_try_run_gradebook,
args=(gradebook,),
daemon=True
).start()
# Sleep for a bit to give the web server some time to catch up
time.sleep(0.4)
# Wrap the rest in a `try` so that exceptions in the Grader don't kill everything
# (i.e. the web server will still be running)
try:
# Start the grader before showing the gradebook URL so "AuthRequestedEvent"s don't fall
# away into the void
grader = injector.get_instance(Grader)
if settings.gradebook_enabled:
# Give the user the grade book URL
gradebook_url = "http://{host}:{port}/gradefast/gradebook".format(host=settings.host,
port=settings.port)
channel.print()
channel.print_bordered("Grade Book URL: {}", gradebook_url)
channel.print()
if channel.prompt("Open in browser?", ["y", "N"], "n") == "y":
webbrowser.open_new(gradebook_url)
# Sleep for a tad to allow for the auth prompt to come up
time.sleep(0.8)
channel.print()
# Finally... let's start grading!
for path in submission_paths:
grader.add_submissions(path)
if not grader.prompt_for_submissions():
raise LazyUserError("User is too lazy to find any submissions")
grader.run_commands()
# Well, the user thinks they're done
channel.print()
channel.print_bordered("Grading complete!", type=Msg.PartType.STATUS)
channel.print()
except (InterruptedError, KeyboardInterrupt):
channel.print()
channel.print()
channel.print_bordered("INTERRUPTED", type=Msg.PartType.ERROR)
channel.print()
except:
_logger.exception("ERROR RUNNING GRADER")
finally:
channel.print()
channel.print("Download the gradebook and any other data you need.")
channel.print("Once you exit the server, the gradebook is lost.")
channel.print()
try:
channel.input("Press Enter to exit server...")
except (InterruptedError, KeyboardInterrupt):
# Pretend that they pressed "Enter"
channel.print()
Grade book isn't lost on shutdown anymore since we have save files
"""
Run GradeFast.
Licensed under the MIT License. For more, see the LICENSE file.
Author: Jake Hartz <jake@hartz.io>
"""
import threading
import time
import webbrowser
from typing import Sequence
from iochannels import Channel, Msg
from pyprovide import Injector
from gradefast.gradebook.gradebook import GradeBook
from gradefast.grader.grader import Grader
from gradefast.loggingwrapper import get_logger
from gradefast.models import Path, Settings
from gradefast.persister import Persister
_logger = get_logger("run")
class LazyUserError(Exception):
pass
def _try_run_gradebook(gradebook: GradeBook) -> None:
logger = get_logger("run: gradebook")
try:
gradebook.run(debug=True)
except:
logger.exception("Exception when running gradebook server")
def run_gradefast(injector: Injector, submission_paths: Sequence[Path]) -> None:
# Initialize the Channel used to communicate via the CLI (if we haven't already)
channel = injector.get_instance(Channel)
# Initialize the Persister (i.e. save file wrapper)
persister = injector.get_instance(Persister)
# Wrap the rest in a try-finally to ensure the channel and persister get cleaned up properly
try:
_logger.debug("Running GradeFast")
_run_gradefast_internal(injector, submission_paths)
except:
_logger.exception("Error running GradeFast")
finally:
_logger.debug("Closing resources")
persister.close()
channel.close()
def _run_gradefast_internal(injector: Injector, submission_paths: Sequence[Path]) -> None:
settings = injector.get_instance(Settings)
channel = injector.get_instance(Channel)
if settings.gradebook_enabled:
# Create and start the GradeBook WSGI server in a new thread
gradebook = injector.get_instance(GradeBook)
threading.Thread(
name="GradeBookTh",
target=_try_run_gradebook,
args=(gradebook,),
daemon=True
).start()
# Sleep for a bit to give the web server some time to catch up
time.sleep(0.4)
# Wrap the rest in a `try` so that exceptions in the Grader don't kill everything
# (i.e. the web server will still be running)
try:
# Start the grader before showing the gradebook URL so "AuthRequestedEvent"s don't fall
# away into the void
grader = injector.get_instance(Grader)
if settings.gradebook_enabled:
# Give the user the grade book URL
gradebook_url = "http://{host}:{port}/gradefast/gradebook".format(host=settings.host,
port=settings.port)
channel.print()
channel.print_bordered("Grade Book URL: {}", gradebook_url)
channel.print()
if channel.prompt("Open in browser?", ["y", "N"], "n") == "y":
webbrowser.open_new(gradebook_url)
# Sleep for a tad to allow for the auth prompt to come up
time.sleep(0.8)
channel.print()
# Finally... let's start grading!
for path in submission_paths:
grader.add_submissions(path)
if not grader.prompt_for_submissions():
raise LazyUserError("User is too lazy to find any submissions")
grader.run_commands()
# Well, the user thinks they're done
channel.print()
channel.print_bordered("Grading complete!", type=Msg.PartType.STATUS)
channel.print()
except (InterruptedError, KeyboardInterrupt):
channel.print()
channel.print()
channel.print_bordered("INTERRUPTED", type=Msg.PartType.ERROR)
channel.print()
except:
_logger.exception("ERROR RUNNING GRADER")
finally:
channel.print()
channel.print("Download the grades and any other data you need before exiting the server.")
channel.print()
try:
channel.input("Press Enter to exit server...")
except (InterruptedError, KeyboardInterrupt):
# Pretend that they pressed "Enter"
channel.print()
|
"""Extract reference documentation from the NumPy source tree.
"""
import inspect
from nose.plugins.skip import SkipTest
import re
import sys
import types
class Reader(object):
"""A line-based string reader.
"""
def __init__(self, data):
"""
Parameters
----------
data : str
String with lines separated by '\n'.
"""
if isinstance(data,list):
self._str = data
else:
self._str = data.split('\n') # store string as list of lines
self.reset()
def __getitem__(self, n):
return self._str[n]
def reset(self):
self._l = 0 # current line nr
def read(self):
if not self.eof():
out = self[self._l]
self._l += 1
return out
else:
return ''
def seek_next_non_empty_line(self):
for l in self[self._l:]:
if l.strip():
break
else:
self._l += 1
def eof(self):
return self._l >= len(self._str)
def read_to_condition(self, condition_func):
start = self._l
for line in self[start:]:
if condition_func(line):
return self[start:self._l]
self._l += 1
if self.eof():
return self[start:self._l+1]
return []
def read_to_next_empty_line(self):
self.seek_next_non_empty_line()
def is_empty(line):
return not line.strip()
return self.read_to_condition(is_empty)
def read_to_next_unindented_line(self):
def is_unindented(line):
return (line.strip() and (len(line.lstrip()) == len(line)))
return self.read_to_condition(is_unindented)
def peek(self,n=0):
if self._l + n < len(self._str):
return self[self._l + n]
else:
return ''
def is_empty(self):
return not ''.join(self._str).strip()
def __iter__(self):
for line in self._str:
yield line
class NumpyDocString(object):
def __init__(self, docstring):
docstring = docstring.split('\n')
# De-indent paragraph
try:
indent = min(len(s) - len(s.lstrip()) for s in docstring
if s.strip())
except ValueError:
indent = 0
for n,line in enumerate(docstring):
docstring[n] = docstring[n][indent:]
self._doc = Reader(docstring)
self._parsed_data = {
'Signature': '',
'Summary': '',
'Extended Summary': [],
'Parameters': [],
'Other Parameters': [],
'Returns': [],
'Raises': [],
'Warns': [],
'See Also': [],
'Notes': [],
'References': '',
'Examples': '',
'index': {},
'Attributes': [],
'Methods': [],
}
self.section_order = []
self._parse()
def __getitem__(self,key):
return self._parsed_data[key]
def __setitem__(self,key,val):
if not self._parsed_data.has_key(key):
raise ValueError("Unknown section %s" % key)
else:
self._parsed_data[key] = val
def _is_at_section(self):
self._doc.seek_next_non_empty_line()
if self._doc.eof():
return False
l1 = self._doc.peek().strip() # e.g. Parameters
if l1.startswith('.. index::'):
return True
l2 = self._doc.peek(1).strip() # ----------
return (len(l1) == len(l2) and l2 == '-'*len(l1))
def _strip(self,doc):
i = 0
j = 0
for i,line in enumerate(doc):
if line.strip(): break
for j,line in enumerate(doc[::-1]):
if line.strip(): break
return doc[i:len(doc)-j]
def _read_to_next_section(self):
section = self._doc.read_to_next_empty_line()
while not self._is_at_section() and not self._doc.eof():
if not self._doc.peek(-1).strip(): # previous line was empty
section += ['']
section += self._doc.read_to_next_empty_line()
return section
def _read_sections(self):
while not self._doc.eof():
data = self._read_to_next_section()
name = data[0].strip()
if name.startswith('..'): # index section
yield name, data[1:]
elif len(data) < 2:
yield StopIteration
else:
yield name, self._strip(data[2:])
def _parse_param_list(self,content):
r = Reader(content)
params = []
while not r.eof():
header = r.read().strip()
if ' : ' in header:
arg_name, arg_type = header.split(' : ')[:2]
else:
arg_name, arg_type = header, ''
desc = r.read_to_next_unindented_line()
for n,line in enumerate(desc):
desc[n] = line.strip()
desc = desc #'\n'.join(desc)
params.append((arg_name,arg_type,desc))
return params
def _parse_see_also(self, content):
"""
func_name : Descriptive text
continued text
another_func_name : Descriptive text
func_name1, func_name2, func_name3
"""
functions = []
current_func = None
rest = []
for line in content:
if not line.strip(): continue
if ':' in line:
if current_func:
functions.append((current_func, rest))
r = line.split(':', 1)
current_func = r[0].strip()
r[1] = r[1].strip()
if r[1]:
rest = [r[1]]
else:
rest = []
elif not line.startswith(' '):
if current_func:
functions.append((current_func, rest))
current_func = None
rest = []
if ',' in line:
for func in line.split(','):
func = func.strip()
if func:
functions.append((func, []))
elif line.strip():
current_func = line.strip()
elif current_func is not None:
rest.append(line.strip())
if current_func:
functions.append((current_func, rest))
return functions
def _parse_index(self, section, content):
"""
.. index: default
:refguide: something, else, and more
"""
def strip_each_in(lst):
return [s.strip() for s in lst]
out = {}
section = section.split('::')
if len(section) > 1:
out['default'] = strip_each_in(section[1].split(','))[0]
for line in content:
line = line.split(':')
if len(line) > 2:
out[line[1]] = strip_each_in(line[2].split(','))
return out
def _parse_summary(self):
"""Grab signature (if given) and summary"""
summary = self._doc.read_to_next_empty_line()
summary_str = "\n".join([s.strip() for s in summary])
if re.compile('^([\w. ]+=)?[\w\.]+\(.*\)$').match(summary_str):
self['Signature'] = summary_str
if not self._is_at_section():
self['Summary'] = self._doc.read_to_next_empty_line()
elif re.compile('^[\w]+\n[-]+').match(summary_str):
self['Summary'] = ''
self._doc.reset()
else:
self['Summary'] = summary
if not self._is_at_section():
self['Extended Summary'] = self._read_to_next_section()
def _parse(self):
self._doc.reset()
self._parse_summary()
for (section, content) in self._read_sections():
if not section.startswith('..'):
section = ' '.join([s.capitalize()
for s in section.split(' ')])
if section in ('Parameters', 'Other Parameters', 'Returns',
'Raises', 'Warns', 'Attributes', 'Methods'):
self[section] = self._parse_param_list(content)
self.section_order.append(section)
elif section.startswith('.. index::'):
self['index'] = self._parse_index(section, content)
self.section_order.append('index')
elif section.lower() == 'see also':
self['See Also'] = self._parse_see_also(content)
self.section_order.append('See Also')
else:
self[section] = content
self.section_order.append(section)
# string conversion routines
def _str_header(self, name, symbol='-'):
return [name, len(name)*symbol]
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
if not self['Signature']:
return []
return ["*%s*" % self['Signature'].replace('*','\*')] + ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_header(name)
for param,param_type,desc in self[name]:
out += ['%s : %s' % (param, param_type)]
out += self._str_indent(desc)
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += self[name]
out += ['']
return out
def _str_see_also(self):
if not self['See Also']: return []
out = []
out += self._str_header("See Also")
last_had_desc = True
for func, desc in self['See Also']:
if desc or last_had_desc:
out += ['']
out += ["`%s`_" % func]
else:
out[-1] += ", `%s`_" % func
if desc:
out += self._str_indent(desc)
last_had_desc = True
else:
last_had_desc = False
out += ['']
return out
def _str_index(self):
idx = self['index']
out = []
out += ['.. index:: %s' % idx.get('default','')]
for section, references in idx.iteritems():
if section == 'default':
continue
out += [' :%s: %s' % (section, ', '.join(references))]
return out
def __str__(self):
out = []
out += self._str_signature()
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Other Parameters',
'Returns', 'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_see_also()
for s in ('Notes','References','Examples'):
out += self._str_section(s)
out += self._str_index()
return '\n'.join(out)
# --
def get_errors(self, check_order=True):
errors = []
self._doc.reset()
for j, line in enumerate(self._doc):
if len(line) > 75:
errors.append("Line %d exceeds 75 chars"
": \"%s\"..." % (j+1, line[:30]))
if check_order:
canonical_order = ['Signature', 'Summary', 'Extended Summary',
'Attributes', 'Methods', 'Parameters',
'Other Parameters','Returns', 'Raises', 'Warns',
'See Also', 'Notes', 'References', 'Examples',
'index']
canonical_order_copy = list(canonical_order)
for s in self.section_order:
while canonical_order_copy and s != canonical_order_copy[0]:
canonical_order_copy.pop(0)
if not canonical_order_copy:
errors.append(
"Sections in wrong order (starting at %s). The"
" right order is %s" % (s, canonical_order))
return errors
def indent(str,indent=4):
indent_str = ' '*indent
if str is None:
return indent_str
lines = str.split('\n')
return '\n'.join(indent_str + l for l in lines)
class NumpyFunctionDocString(NumpyDocString):
def __init__(self, docstring, function):
super(NumpyFunctionDocString, self).__init__(docstring)
args, varargs, keywords, defaults = inspect.getargspec(function)
if (args and args != ['self']) or varargs or keywords or defaults:
self.has_parameters = True
else:
self.has_parameters = False
def _parse(self):
self._parsed_data = {
'Signature': '',
'Summary': '',
'Extended Summary': [],
'Parameters': [],
'Other Parameters': [],
'Returns': [],
'Raises': [],
'Warns': [],
'See Also': [],
'Notes': [],
'References': '',
'Examples': '',
'index': {},
}
return NumpyDocString._parse(self)
def get_errors(self):
errors = NumpyDocString.get_errors(self)
if not self['Signature']:
#errors.append("No function signature") #this check is currently
#too restrictive. Disabling
#it for now
pass
if not self['Summary']:
errors.append("No function summary line")
if len(" ".join(self['Summary'])) > 3*80:
errors.append("Brief function summary is longer than 3 lines")
if not self['Parameters'] and self.has_parameters:
errors.append("No Parameters section")
return errors
class NumpyClassDocString(NumpyDocString):
def _parse(self):
self._parsed_data = {
'Signature': '',
'Summary': '',
'Extended Summary': [],
'Parameters': [],
'Other Parameters': [],
'Raises': [],
'Warns': [],
'See Also': [],
'Notes': [],
'References': '',
'Examples': '',
'index': {},
'Attributes': [],
'Methods': [],
}
return NumpyDocString._parse(self)
def __str__(self):
out = []
out += self._str_signature()
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Attributes', 'Methods', 'Parameters', 'Raises',
'Warns'):
out += self._str_param_list(param_list)
out += self._str_see_also()
for s in ('Notes','References','Examples'):
out += self._str_section(s)
out += self._str_index()
return '\n'.join(out)
def get_errors(self):
errors = NumpyDocString.get_errors(self)
return errors
class NumpyModuleDocString(NumpyDocString):
"""
Module doc strings: no parsing is done.
"""
def _parse(self):
self.out = []
def __str__(self):
return "\n".join(self._doc._str)
def get_errors(self):
errors = NumpyDocString.get_errors(self, check_order=False)
return errors
def header(text, style='-'):
return text + '\n' + style*len(text) + '\n'
class SphinxDocString(NumpyDocString):
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['**' + name + '**'] + [symbol*(len(name)+4)]
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['``%s``' % self['Signature'].replace('*','\*')] + ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
for param,param_type,desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param, param_type)])
out += ['']
out += self._str_indent(desc,8)
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = self._str_indent(self[name])
out += content
out += ['']
return out
def _str_index(self):
idx = self['index']
out = []
out += ['.. index:: %s' % idx.get('default','')]
for section, references in idx.iteritems():
if section == 'default':
continue
out += [' :%s: %s' % (section, ', '.join(references))]
return out
def __str__(self, indent=0):
out = []
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters','Returns','Raises','Warns'):
out += self._str_param_list(param_list)
for s in ('Notes','References','Examples'):
out += self._str_section(s)
# out += self._str_index()
out = self._str_indent(out,indent)
return '\n'.join(out)
class FunctionDoc(object):
def __init__(self,func):
self._f = func
def __str__(self):
out = ''
doclines = inspect.getdoc(self._f) or ''
try:
doc = SphinxDocString(doclines)
except Exception, e:
print '*'*78
print "ERROR: '%s' while parsing `%s`" % (e, self._f)
print '*'*78
#print "Docstring follows:"
#print doclines
#print '='*78
return out
if doc['Signature']:
out += '%s\n' % header('**%s**' %
doc['Signature'].replace('*','\*'), '-')
else:
try:
# try to read signature
argspec = inspect.getargspec(self._f)
argspec = inspect.formatargspec(*argspec)
argspec = argspec.replace('*','\*')
out += header('%s%s' % (self._f.__name__, argspec), '-')
except TypeError, e:
out += '%s\n' % header('**%s()**' % self._f.__name__, '-')
out += str(doc)
return out
class ClassDoc(object):
def __init__(self,cls,modulename=''):
if not inspect.isclass(cls):
raise ValueError("Initialise using an object")
self._cls = cls
if modulename and not modulename.endswith('.'):
modulename += '.'
self._mod = modulename
self._name = cls.__name__
@property
def methods(self):
return [name for name,func in inspect.getmembers(self._cls)
if not name.startswith('_') and callable(func)]
def __str__(self):
out = ''
def replace_header(match):
return '"'*(match.end() - match.start())
for m in self.methods:
print "Parsing `%s`" % m
out += str(FunctionDoc(getattr(self._cls,m))) + '\n\n'
out += '.. index::\n single: %s; %s\n\n' % (self._name, m)
return out
def handle_function(val, name):
func_errors = []
docstring = inspect.getdoc(val)
if docstring is None:
func_errors.append((name, '**missing** function-level docstring'))
else:
func_errors = [
(name, e) for e in
NumpyFunctionDocString(docstring, val).get_errors()
]
return func_errors
def handle_module(val, name):
module_errors = []
docstring = val
if docstring is None:
module_errors.append((name, '**missing** module-level docstring'))
else:
module_errors = [
(name, e) for e in NumpyModuleDocString(docstring).get_errors()
]
return module_errors
def handle_method(method, method_name, class_name):
method_errors = []
docstring = inspect.getdoc(method)
if docstring is None:
method_errors.append((class_name, method_name,
'**missing** method-level docstring'))
else:
method_errors = [
(class_name, method_name, e) for e in
NumpyFunctionDocString(docstring, method).get_errors()
]
return method_errors
def handle_class(val, class_name):
cls_errors = []
docstring = inspect.getdoc(val)
if docstring is None:
cls_errors.append((class_name,
'**missing** class-level docstring'))
else:
cls_errors = [
(e,) for e in
NumpyClassDocString(docstring).get_errors()
]
# Get public methods and parse their docstrings
methods = dict(((name, func) for name, func in inspect.getmembers(val)
if not name.startswith('_') and callable(func)))
for m_name, method in methods.iteritems():
cls_errors.extend(handle_method(method, m_name, class_name))
return cls_errors
def docstring_errors(filename, global_dict=None):
"""
Run a Python file, parse the docstrings of all the classes
and functions it declares, and return them.
Parameters
----------
filename : str
Filename of the module to run.
global_dict : dict, optional
Globals dictionary to pass along to `execfile()`.
Returns
-------
all_errors : list
Each entry of the list is a tuple, of length 2 or 3, with
format either
(func_or_class_name, docstring_error_description)
or
(class_name, method_name, docstring_error_description)
"""
if global_dict is None:
global_dict = {}
if '__file__' not in global_dict:
global_dict['__file__'] = filename
if '__doc__' not in global_dict:
global_dict['__doc__'] = None
try:
execfile(filename, global_dict)
except SystemExit:
pass
except SkipTest:
raise AssertionError("Couldn't verify format of " + filename +
"due to SkipTest")
all_errors = []
for key, val in global_dict.iteritems():
if not key.startswith('_'):
module_name = ""
if hasattr(inspect.getmodule(val), '__name__'):
module_name = inspect.getmodule(val).__name__
if (inspect.isfunction(val) or inspect.isclass(val)) and\
(inspect.getmodule(val) is None
or module_name == '__builtin__'):
# Functions
if type(val) == types.FunctionType:
all_errors.extend(handle_function(val, key))
# New-style classes
elif type(val) == types.TypeType:
all_errors.extend(handle_class(val, key))
# Old-style classes
elif type(val) == types.ClassType:
all_errors.extend(handle_class(val, key))
elif key == '__doc__':
all_errors.extend(handle_module(val, key))
if all_errors:
all_errors.insert(0, ("%s:"%filename,))
return all_errors
if __name__ == "__main__":
all_errors = docstring_errors(sys.argv[1])
if len(all_errors) > 0:
print "*" * 30, "docstring errors", "*" * 30
for line in all_errors:
print ':'.join(line)
sys.exit(int(len(all_errors) > 0))
Fixed bug where type objects where incorrectly treated like functions.
"""Extract reference documentation from the NumPy source tree.
"""
import inspect
from nose.plugins.skip import SkipTest
import re
import sys
import types
class Reader(object):
"""A line-based string reader.
"""
def __init__(self, data):
"""
Parameters
----------
data : str
String with lines separated by '\n'.
"""
if isinstance(data,list):
self._str = data
else:
self._str = data.split('\n') # store string as list of lines
self.reset()
def __getitem__(self, n):
return self._str[n]
def reset(self):
self._l = 0 # current line nr
def read(self):
if not self.eof():
out = self[self._l]
self._l += 1
return out
else:
return ''
def seek_next_non_empty_line(self):
for l in self[self._l:]:
if l.strip():
break
else:
self._l += 1
def eof(self):
return self._l >= len(self._str)
def read_to_condition(self, condition_func):
start = self._l
for line in self[start:]:
if condition_func(line):
return self[start:self._l]
self._l += 1
if self.eof():
return self[start:self._l+1]
return []
def read_to_next_empty_line(self):
self.seek_next_non_empty_line()
def is_empty(line):
return not line.strip()
return self.read_to_condition(is_empty)
def read_to_next_unindented_line(self):
def is_unindented(line):
return (line.strip() and (len(line.lstrip()) == len(line)))
return self.read_to_condition(is_unindented)
def peek(self,n=0):
if self._l + n < len(self._str):
return self[self._l + n]
else:
return ''
def is_empty(self):
return not ''.join(self._str).strip()
def __iter__(self):
for line in self._str:
yield line
class NumpyDocString(object):
def __init__(self, docstring):
docstring = docstring.split('\n')
# De-indent paragraph
try:
indent = min(len(s) - len(s.lstrip()) for s in docstring
if s.strip())
except ValueError:
indent = 0
for n,line in enumerate(docstring):
docstring[n] = docstring[n][indent:]
self._doc = Reader(docstring)
self._parsed_data = {
'Signature': '',
'Summary': '',
'Extended Summary': [],
'Parameters': [],
'Other Parameters': [],
'Returns': [],
'Raises': [],
'Warns': [],
'See Also': [],
'Notes': [],
'References': '',
'Examples': '',
'index': {},
'Attributes': [],
'Methods': [],
}
self.section_order = []
self._parse()
def __getitem__(self,key):
return self._parsed_data[key]
def __setitem__(self,key,val):
if not self._parsed_data.has_key(key):
raise ValueError("Unknown section %s" % key)
else:
self._parsed_data[key] = val
def _is_at_section(self):
self._doc.seek_next_non_empty_line()
if self._doc.eof():
return False
l1 = self._doc.peek().strip() # e.g. Parameters
if l1.startswith('.. index::'):
return True
l2 = self._doc.peek(1).strip() # ----------
return (len(l1) == len(l2) and l2 == '-'*len(l1))
def _strip(self,doc):
i = 0
j = 0
for i,line in enumerate(doc):
if line.strip(): break
for j,line in enumerate(doc[::-1]):
if line.strip(): break
return doc[i:len(doc)-j]
def _read_to_next_section(self):
section = self._doc.read_to_next_empty_line()
while not self._is_at_section() and not self._doc.eof():
if not self._doc.peek(-1).strip(): # previous line was empty
section += ['']
section += self._doc.read_to_next_empty_line()
return section
def _read_sections(self):
while not self._doc.eof():
data = self._read_to_next_section()
name = data[0].strip()
if name.startswith('..'): # index section
yield name, data[1:]
elif len(data) < 2:
yield StopIteration
else:
yield name, self._strip(data[2:])
def _parse_param_list(self,content):
r = Reader(content)
params = []
while not r.eof():
header = r.read().strip()
if ' : ' in header:
arg_name, arg_type = header.split(' : ')[:2]
else:
arg_name, arg_type = header, ''
desc = r.read_to_next_unindented_line()
for n,line in enumerate(desc):
desc[n] = line.strip()
desc = desc #'\n'.join(desc)
params.append((arg_name,arg_type,desc))
return params
def _parse_see_also(self, content):
"""
func_name : Descriptive text
continued text
another_func_name : Descriptive text
func_name1, func_name2, func_name3
"""
functions = []
current_func = None
rest = []
for line in content:
if not line.strip(): continue
if ':' in line:
if current_func:
functions.append((current_func, rest))
r = line.split(':', 1)
current_func = r[0].strip()
r[1] = r[1].strip()
if r[1]:
rest = [r[1]]
else:
rest = []
elif not line.startswith(' '):
if current_func:
functions.append((current_func, rest))
current_func = None
rest = []
if ',' in line:
for func in line.split(','):
func = func.strip()
if func:
functions.append((func, []))
elif line.strip():
current_func = line.strip()
elif current_func is not None:
rest.append(line.strip())
if current_func:
functions.append((current_func, rest))
return functions
def _parse_index(self, section, content):
"""
.. index: default
:refguide: something, else, and more
"""
def strip_each_in(lst):
return [s.strip() for s in lst]
out = {}
section = section.split('::')
if len(section) > 1:
out['default'] = strip_each_in(section[1].split(','))[0]
for line in content:
line = line.split(':')
if len(line) > 2:
out[line[1]] = strip_each_in(line[2].split(','))
return out
def _parse_summary(self):
"""Grab signature (if given) and summary"""
summary = self._doc.read_to_next_empty_line()
summary_str = "\n".join([s.strip() for s in summary])
if re.compile('^([\w. ]+=)?[\w\.]+\(.*\)$').match(summary_str):
self['Signature'] = summary_str
if not self._is_at_section():
self['Summary'] = self._doc.read_to_next_empty_line()
elif re.compile('^[\w]+\n[-]+').match(summary_str):
self['Summary'] = ''
self._doc.reset()
else:
self['Summary'] = summary
if not self._is_at_section():
self['Extended Summary'] = self._read_to_next_section()
def _parse(self):
self._doc.reset()
self._parse_summary()
for (section, content) in self._read_sections():
if not section.startswith('..'):
section = ' '.join([s.capitalize()
for s in section.split(' ')])
if section in ('Parameters', 'Other Parameters', 'Returns',
'Raises', 'Warns', 'Attributes', 'Methods'):
self[section] = self._parse_param_list(content)
self.section_order.append(section)
elif section.startswith('.. index::'):
self['index'] = self._parse_index(section, content)
self.section_order.append('index')
elif section.lower() == 'see also':
self['See Also'] = self._parse_see_also(content)
self.section_order.append('See Also')
else:
self[section] = content
self.section_order.append(section)
# string conversion routines
def _str_header(self, name, symbol='-'):
return [name, len(name)*symbol]
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
if not self['Signature']:
return []
return ["*%s*" % self['Signature'].replace('*','\*')] + ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_header(name)
for param,param_type,desc in self[name]:
out += ['%s : %s' % (param, param_type)]
out += self._str_indent(desc)
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += self[name]
out += ['']
return out
def _str_see_also(self):
if not self['See Also']: return []
out = []
out += self._str_header("See Also")
last_had_desc = True
for func, desc in self['See Also']:
if desc or last_had_desc:
out += ['']
out += ["`%s`_" % func]
else:
out[-1] += ", `%s`_" % func
if desc:
out += self._str_indent(desc)
last_had_desc = True
else:
last_had_desc = False
out += ['']
return out
def _str_index(self):
idx = self['index']
out = []
out += ['.. index:: %s' % idx.get('default','')]
for section, references in idx.iteritems():
if section == 'default':
continue
out += [' :%s: %s' % (section, ', '.join(references))]
return out
def __str__(self):
out = []
out += self._str_signature()
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Other Parameters',
'Returns', 'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_see_also()
for s in ('Notes','References','Examples'):
out += self._str_section(s)
out += self._str_index()
return '\n'.join(out)
# --
def get_errors(self, check_order=True):
errors = []
self._doc.reset()
for j, line in enumerate(self._doc):
if len(line) > 75:
errors.append("Line %d exceeds 75 chars"
": \"%s\"..." % (j+1, line[:30]))
if check_order:
canonical_order = ['Signature', 'Summary', 'Extended Summary',
'Attributes', 'Methods', 'Parameters',
'Other Parameters','Returns', 'Raises', 'Warns',
'See Also', 'Notes', 'References', 'Examples',
'index']
canonical_order_copy = list(canonical_order)
for s in self.section_order:
while canonical_order_copy and s != canonical_order_copy[0]:
canonical_order_copy.pop(0)
if not canonical_order_copy:
errors.append(
"Sections in wrong order (starting at %s). The"
" right order is %s" % (s, canonical_order))
return errors
def indent(str,indent=4):
indent_str = ' '*indent
if str is None:
return indent_str
lines = str.split('\n')
return '\n'.join(indent_str + l for l in lines)
class NumpyFunctionDocString(NumpyDocString):
def __init__(self, docstring, function):
super(NumpyFunctionDocString, self).__init__(docstring)
args, varargs, keywords, defaults = inspect.getargspec(function)
if (args and args != ['self']) or varargs or keywords or defaults:
self.has_parameters = True
else:
self.has_parameters = False
def _parse(self):
self._parsed_data = {
'Signature': '',
'Summary': '',
'Extended Summary': [],
'Parameters': [],
'Other Parameters': [],
'Returns': [],
'Raises': [],
'Warns': [],
'See Also': [],
'Notes': [],
'References': '',
'Examples': '',
'index': {},
}
return NumpyDocString._parse(self)
def get_errors(self):
errors = NumpyDocString.get_errors(self)
if not self['Signature']:
#errors.append("No function signature") #this check is currently
#too restrictive. Disabling
#it for now
pass
if not self['Summary']:
errors.append("No function summary line")
if len(" ".join(self['Summary'])) > 3*80:
errors.append("Brief function summary is longer than 3 lines")
if not self['Parameters'] and self.has_parameters:
errors.append("No Parameters section")
return errors
class NumpyClassDocString(NumpyDocString):
def _parse(self):
self._parsed_data = {
'Signature': '',
'Summary': '',
'Extended Summary': [],
'Parameters': [],
'Other Parameters': [],
'Raises': [],
'Warns': [],
'See Also': [],
'Notes': [],
'References': '',
'Examples': '',
'index': {},
'Attributes': [],
'Methods': [],
}
return NumpyDocString._parse(self)
def __str__(self):
out = []
out += self._str_signature()
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Attributes', 'Methods', 'Parameters', 'Raises',
'Warns'):
out += self._str_param_list(param_list)
out += self._str_see_also()
for s in ('Notes','References','Examples'):
out += self._str_section(s)
out += self._str_index()
return '\n'.join(out)
def get_errors(self):
errors = NumpyDocString.get_errors(self)
return errors
class NumpyModuleDocString(NumpyDocString):
"""
Module doc strings: no parsing is done.
"""
def _parse(self):
self.out = []
def __str__(self):
return "\n".join(self._doc._str)
def get_errors(self):
errors = NumpyDocString.get_errors(self, check_order=False)
return errors
def header(text, style='-'):
return text + '\n' + style*len(text) + '\n'
class SphinxDocString(NumpyDocString):
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['**' + name + '**'] + [symbol*(len(name)+4)]
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['``%s``' % self['Signature'].replace('*','\*')] + ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
for param,param_type,desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param, param_type)])
out += ['']
out += self._str_indent(desc,8)
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = self._str_indent(self[name])
out += content
out += ['']
return out
def _str_index(self):
idx = self['index']
out = []
out += ['.. index:: %s' % idx.get('default','')]
for section, references in idx.iteritems():
if section == 'default':
continue
out += [' :%s: %s' % (section, ', '.join(references))]
return out
def __str__(self, indent=0):
out = []
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters','Returns','Raises','Warns'):
out += self._str_param_list(param_list)
for s in ('Notes','References','Examples'):
out += self._str_section(s)
# out += self._str_index()
out = self._str_indent(out,indent)
return '\n'.join(out)
class FunctionDoc(object):
def __init__(self,func):
self._f = func
def __str__(self):
out = ''
doclines = inspect.getdoc(self._f) or ''
try:
doc = SphinxDocString(doclines)
except Exception, e:
print '*'*78
print "ERROR: '%s' while parsing `%s`" % (e, self._f)
print '*'*78
#print "Docstring follows:"
#print doclines
#print '='*78
return out
if doc['Signature']:
out += '%s\n' % header('**%s**' %
doc['Signature'].replace('*','\*'), '-')
else:
try:
# try to read signature
argspec = inspect.getargspec(self._f)
argspec = inspect.formatargspec(*argspec)
argspec = argspec.replace('*','\*')
out += header('%s%s' % (self._f.__name__, argspec), '-')
except TypeError, e:
out += '%s\n' % header('**%s()**' % self._f.__name__, '-')
out += str(doc)
return out
class ClassDoc(object):
def __init__(self,cls,modulename=''):
if not inspect.isclass(cls):
raise ValueError("Initialise using an object")
self._cls = cls
if modulename and not modulename.endswith('.'):
modulename += '.'
self._mod = modulename
self._name = cls.__name__
@property
def methods(self):
return [name for name,func in inspect.getmembers(self._cls)
if not name.startswith('_') and callable(func)]
def __str__(self):
out = ''
def replace_header(match):
return '"'*(match.end() - match.start())
for m in self.methods:
print "Parsing `%s`" % m
out += str(FunctionDoc(getattr(self._cls,m))) + '\n\n'
out += '.. index::\n single: %s; %s\n\n' % (self._name, m)
return out
def handle_function(val, name):
func_errors = []
docstring = inspect.getdoc(val)
if docstring is None:
func_errors.append((name, '**missing** function-level docstring'))
else:
func_errors = [
(name, e) for e in
NumpyFunctionDocString(docstring, val).get_errors()
]
return func_errors
def handle_module(val, name):
module_errors = []
docstring = val
if docstring is None:
module_errors.append((name, '**missing** module-level docstring'))
else:
module_errors = [
(name, e) for e in NumpyModuleDocString(docstring).get_errors()
]
return module_errors
def handle_method(method, method_name, class_name):
method_errors = []
docstring = inspect.getdoc(method)
if docstring is None:
method_errors.append((class_name, method_name,
'**missing** method-level docstring'))
else:
method_errors = [
(class_name, method_name, e) for e in
NumpyFunctionDocString(docstring, method).get_errors()
]
return method_errors
def handle_class(val, class_name):
cls_errors = []
docstring = inspect.getdoc(val)
if docstring is None:
cls_errors.append((class_name,
'**missing** class-level docstring'))
else:
cls_errors = [
(e,) for e in
NumpyClassDocString(docstring).get_errors()
]
# Get public methods and parse their docstrings
methods = dict(((name, func) for name, func in inspect.getmembers(val)
if not name.startswith('_') and callable(func) and type(func) is not type))
for m_name, method in methods.iteritems():
cls_errors.extend(handle_method(method, m_name, class_name))
return cls_errors
def docstring_errors(filename, global_dict=None):
"""
Run a Python file, parse the docstrings of all the classes
and functions it declares, and return them.
Parameters
----------
filename : str
Filename of the module to run.
global_dict : dict, optional
Globals dictionary to pass along to `execfile()`.
Returns
-------
all_errors : list
Each entry of the list is a tuple, of length 2 or 3, with
format either
(func_or_class_name, docstring_error_description)
or
(class_name, method_name, docstring_error_description)
"""
if global_dict is None:
global_dict = {}
if '__file__' not in global_dict:
global_dict['__file__'] = filename
if '__doc__' not in global_dict:
global_dict['__doc__'] = None
try:
execfile(filename, global_dict)
except SystemExit:
pass
except SkipTest:
raise AssertionError("Couldn't verify format of " + filename +
"due to SkipTest")
all_errors = []
for key, val in global_dict.iteritems():
if not key.startswith('_'):
module_name = ""
if hasattr(inspect.getmodule(val), '__name__'):
module_name = inspect.getmodule(val).__name__
if (inspect.isfunction(val) or inspect.isclass(val)) and\
(inspect.getmodule(val) is None
or module_name == '__builtin__'):
# Functions
if type(val) == types.FunctionType:
all_errors.extend(handle_function(val, key))
# New-style classes
elif type(val) == types.TypeType:
all_errors.extend(handle_class(val, key))
# Old-style classes
elif type(val) == types.ClassType:
all_errors.extend(handle_class(val, key))
elif key == '__doc__':
all_errors.extend(handle_module(val, key))
if all_errors:
all_errors.insert(0, ("%s:"%filename,))
return all_errors
if __name__ == "__main__":
all_errors = docstring_errors(sys.argv[1])
if len(all_errors) > 0:
print "*" * 30, "docstring errors", "*" * 30
for line in all_errors:
print ':'.join(line)
sys.exit(int(len(all_errors) > 0))
|
"""django-cms plugins for the ``multilingual_orgs`` app."""
from django.utils.translation import ugettext_lazy as _
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from .models import OrganizationPluginModel
class OrganizationPlugin(CMSPluginBase):
model = OrganizationPluginModel
name = _("Organization Plugin")
render_template = "multilingual_orgs/organization_plugin.html"
def render(self, context, instance, placeholder):
context.update({
'plugin': instance,
'organization': instance.organization,
'display_type': instance.display_type,
})
return context
plugin_pool.register_plugin(OrganizationPlugin)
codereview fixes for 23ec229f437bdeeb5a16d978e43f31447d30016d
"""django-cms plugins for the ``multilingual_orgs`` app."""
from django.utils.translation import ugettext_lazy as _
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from .models import OrganizationPluginModel
class OrganizationPlugin(CMSPluginBase):
model = OrganizationPluginModel
name = _('Organization Plugin')
render_template = 'multilingual_orgs/organization_plugin.html'
def render(self, context, instance, placeholder):
context.update({
'plugin': instance,
'organization': instance.organization,
'display_type': instance.display_type,
})
return context
plugin_pool.register_plugin(OrganizationPlugin)
|
'''
Created on Feb 6, 2015
@author: cmccully
'''
from __future__ import absolute_import, division, print_function
from future.utils import iteritems
import om10
import numpy as np
import re
import json
import os
import pandas as pd
import copy
import gzip
import shutil
from lsst.utils import getPackageDir
from lsst.sims.utils import SpecMap, defaultSpecMap
from lsst.sims.catUtils.baseCatalogModels import GalaxyTileCompoundObj
from lsst.sims.catUtils.matchSED import matchBase
from lsst.sims.photUtils import Bandpass, BandpassDict, Sed
from lsst.sims.utils import radiansFromArcsec
from lsst.sims.catUtils.supernovae import SNObject
__all__ = ['sprinklerCompound', 'sprinkler']
class sprinklerCompound(GalaxyTileCompoundObj):
objid = 'sprinklerCompound'
objectTypeId = 66
cached_sprinkling = False
agn_cache_file = None
sne_cache_file = None
defs_file = None
sed_path = None
def _final_pass(self, results):
#From the original GalaxyTileCompoundObj final pass method
for name in results.dtype.fields:
if 'raJ2000' in name or 'decJ2000' in name:
results[name] = np.radians(results[name])
# the stored procedure on fatboy that queries the galaxies
# constructs galtileid by taking
#
# tileid*10^8 + galid
#
# this causes galtileid to be so large that the uniqueIDs in the
# Twinkles InstanceCatalogs are too large for PhoSim to handle.
# Since Twinkles is only focused on one tile on the sky, we will remove
# the factor of 10^8, making the uniqueIDs a more manageable size
# results['galtileid'] = results['galtileid']#%100000000
#Use Sprinkler now
sp = sprinkler(results, self.mjd, self.specFileMap, self.sed_path,
density_param=1.0,
cached_sprinkling=self.cached_sprinkling,
agn_cache_file=self.agn_cache_file,
sne_cache_file=self.sne_cache_file,
defs_file=self.defs_file)
results = sp.sprinkle()
return results
class sprinkler():
def __init__(self, catsim_cat, visit_mjd, specFileMap, sed_path,
om10_cat='twinkles_lenses_v2.fits',
sne_cat = 'dc2_sne_cat.csv', density_param=1., cached_sprinkling=False,
agn_cache_file=None, sne_cache_file=None, defs_file=None,
write_sn_sed=True):
"""
Parameters
----------
catsim_cat: catsim catalog
The results array from an instance catalog.
visit_mjd: float
The mjd of the visit
specFileMap:
This will tell the instance catalog where to write the files
om10_cat: optional, defaults to 'twinkles_lenses_v2.fits
fits file with OM10 catalog
sne_cat: optional, defaults to 'dc2_sne_cat.csv'
density_param: `np.float`, optioanl, defaults to 1.0
the fraction of eligible agn objects that become lensed and should
be between 0.0 and 1.0.
cached_sprinkling: boolean
If true then pick from a preselected list of galtileids
agn_cache_file: str
sne_cache_file: str
defs_file: str
write_sn_sed: boolean
Controls whether or not to actually write supernova
SEDs to disk (default=True)
Returns
-------
updated_catalog:
A new results array with lens systems added.
"""
twinklesDir = getPackageDir('Twinkles')
om10_cat = os.path.join(twinklesDir, 'data', om10_cat)
self.write_sn_sed = write_sn_sed
self.catalog = catsim_cat
self.catalog_column_names = catsim_cat.dtype.names
# ****** THIS ASSUMES THAT THE ENVIRONMENT VARIABLE OM10_DIR IS SET *******
lensdb = om10.DB(catalog=om10_cat, vb=False)
self.lenscat = lensdb.lenses.copy()
self.density_param = density_param
self.bandpassDict = BandpassDict.loadTotalBandpassesFromFiles(bandpassNames=['i'])
self.sne_catalog = pd.read_csv(os.path.join(twinklesDir, 'data', sne_cat))
#self.sne_catalog = self.sne_catalog.iloc[:101] ### Remove this after testing
self.used_systems = []
self.visit_mjd = visit_mjd
self.sn_obj = SNObject(0., 0.)
self.write_dir = specFileMap.subdir_map['(^specFileGLSN)']
self.sed_path = sed_path
self.cached_sprinkling = cached_sprinkling
if self.cached_sprinkling is True:
if ((agn_cache_file is None) | (sne_cache_file is None)):
raise AttributeError('Must specify cache files if using cached_sprinkling.')
#agn_cache_file = os.path.join(twinklesDir, 'data', 'test_agn_galtile_cache.csv')
self.agn_cache = pd.read_csv(agn_cache_file)
#sne_cache_file = os.path.join(twinklesDir, 'data', 'test_sne_galtile_cache.csv')
self.sne_cache = pd.read_csv(sne_cache_file)
else:
self.agn_cache = None
self.sne_cache = None
if defs_file is None:
self.defs_file = os.path.join(twinklesDir, 'data', 'catsim_defs.csv')
else:
self.defs_file = defs_file
specFileStart = 'Burst'
for key, val in sorted(iteritems(SpecMap.subdir_map)):
if re.match(key, specFileStart):
galSpecDir = str(val)
self.galDir = str(getPackageDir('sims_sed_library') + '/' + galSpecDir + '/')
self.imSimBand = Bandpass()
self.imSimBand.imsimBandpass()
#self.LRG_name = 'Burst.25E09.1Z.spec'
#self.LRG = Sed()
#self.LRG.readSED_flambda(str(galDir + self.LRG_name))
#return
#Calculate imsimband magnitudes of source galaxies for matching
agn_fname = str(getPackageDir('sims_sed_library') + '/agnSED/agn.spec.gz')
src_iband = self.lenscat['MAGI_IN']
src_z = self.lenscat['ZSRC']
self.src_mag_norm = []
for src, s_z in zip(src_iband, src_z):
agn_sed = Sed()
agn_sed.readSED_flambda(agn_fname)
agn_sed.redshiftSED(s_z, dimming=True)
self.src_mag_norm.append(matchBase().calcMagNorm([src],
agn_sed,
self.bandpassDict))
#self.src_mag_norm = matchBase().calcMagNorm(src_iband,
# [agn_sed]*len(src_iband),
#
# self.bandpassDict)
has_sn_truth_params = False
for name in self.catalog_column_names:
if 'sn_truth_params' in name:
has_sn_truth_params = True
break
self.defs_dict = {}
self.logging_is_sprinkled = False
self.store_sn_truth_params = False
with open(self.defs_file, 'r') as f:
for line in f:
line_defs = line.strip().split(',')
if len(line_defs) > 1:
if 'is_sprinkled' in line_defs[1]:
self.logging_is_sprinkled = True
if 'sn_truth_params' in line_defs[1] and has_sn_truth_params:
self.store_sn_truth_params = True
if len(line_defs) == 2:
self.defs_dict[line_defs[0]] = line_defs[1]
else:
self.defs_dict[line_defs[0]] = tuple((ll for ll in line_defs[1:]))
def sprinkle(self):
# Define a list that we can write out to a text file
lenslines = []
# For each galaxy in the catsim catalog
updated_catalog = self.catalog.copy()
# print("Running sprinkler. Catalog Length: ", len(self.catalog))
for rowNum, row in enumerate(self.catalog):
if isinstance(self.defs_dict['galtileid'], tuple):
galtileid = row[self.defs_dict['galtileid'][0]]
else:
galtileid = row[self.defs_dict['galtileid']]
# if rowNum == 100 or rowNum % 100000==0:
# print("Gone through ", rowNum, " lines of catalog.")
if not np.isnan(row[self.defs_dict['galaxyAgn_magNorm']]):
candidates = self.find_lens_candidates(row[self.defs_dict['galaxyAgn_redshift']],
row[self.defs_dict['galaxyAgn_magNorm']])
#varString = json.loads(row[self.defs_dict['galaxyAgn_varParamStr']])
# varString[self.defs_dict['pars']]['t0_mjd'] = 59300.0
#row[self.defs_dict['galaxyAgn_varParamStr']] = json.dumps(varString)
np.random.seed(galtileid % (2^32 -1))
pick_value = np.random.uniform()
# If there aren't any lensed sources at this redshift from OM10 move on the next object
if (((len(candidates) > 0) and (pick_value <= self.density_param) and (self.cached_sprinkling is False)) |
((self.cached_sprinkling is True) and (galtileid in self.agn_cache['galtileid'].values))):
# Randomly choose one the lens systems
# (can decide with or without replacement)
# Sort first to make sure the same choice is made every time
if self.cached_sprinkling is True:
twinkles_sys_cache = self.agn_cache.query('galtileid == %i' % galtileid)['twinkles_system'].values[0]
newlens = self.lenscat[np.where(self.lenscat['twinklesId'] == twinkles_sys_cache)[0]][0]
else:
candidates = candidates[np.argsort(candidates['twinklesId'])]
newlens = np.random.choice(candidates)
# Append the lens galaxy
# For each image, append the lens images
for i in range(newlens['NIMG']):
lensrow = row.copy()
# XIMG and YIMG are in arcseconds
# raPhSim and decPhoSim are in radians
#Shift all parts of the lensed object, not just its agn part
for lensPart in ['galaxyBulge', 'galaxyDisk', 'galaxyAgn']:
lens_ra = lensrow[self.defs_dict[str(lensPart+'_raJ2000')]]
lens_dec = lensrow[self.defs_dict[str(lensPart+'_decJ2000')]]
delta_ra = np.radians(newlens['XIMG'][i] / 3600.0) / np.cos(lens_dec)
delta_dec = np.radians(newlens['YIMG'][i] / 3600.0)
lensrow[self.defs_dict[str(lensPart + '_raJ2000')]] = lens_ra + delta_ra
lensrow[self.defs_dict[str(lensPart + '_decJ2000')]] = lens_dec + delta_dec
mag_adjust = 2.5*np.log10(np.abs(newlens['MAG'][i]))
lensrow[self.defs_dict['galaxyAgn_magNorm']] -= mag_adjust
varString = json.loads(lensrow[self.defs_dict['galaxyAgn_varParamStr']])
varString[self.defs_dict['pars']]['t0Delay'] = newlens['DELAY'][i]
varString[self.defs_dict['varMethodName']] = 'applyAgnTimeDelay'
lensrow[self.defs_dict['galaxyAgn_varParamStr']] = json.dumps(varString)
lensrow[self.defs_dict['galaxyDisk_majorAxis']] = 0.0
lensrow[self.defs_dict['galaxyDisk_minorAxis']] = 0.0
lensrow[self.defs_dict['galaxyDisk_positionAngle']] = 0.0
lensrow[self.defs_dict['galaxyDisk_internalAv']] = 0.0
lensrow[self.defs_dict['galaxyDisk_magNorm']] = 999. #np.nan To be fixed post run1.1
lensrow[self.defs_dict['galaxyDisk_sedFilename']] = None
lensrow[self.defs_dict['galaxyBulge_majorAxis']] = 0.0
lensrow[self.defs_dict['galaxyBulge_minorAxis']] = 0.0
lensrow[self.defs_dict['galaxyBulge_positionAngle']] = 0.0
lensrow[self.defs_dict['galaxyBulge_internalAv']] = 0.0
lensrow[self.defs_dict['galaxyBulge_magNorm']] = 999. #np.nan To be fixed post run1.1
lensrow[self.defs_dict['galaxyBulge_sedFilename']] = None
lensrow[self.defs_dict['galaxyBulge_redshift']] = newlens['ZSRC']
lensrow[self.defs_dict['galaxyDisk_redshift']] = newlens['ZSRC']
lensrow[self.defs_dict['galaxyAgn_redshift']] = newlens['ZSRC']
if self.logging_is_sprinkled:
lensrow[self.defs_dict['galaxyAgn_is_sprinkled']] = 1
lensrow[self.defs_dict['galaxyBulge_is_sprinkled']] = 1
lensrow[self.defs_dict['galaxyDisk_is_sprinkled']] = 1
#To get back twinklesID in lens catalog from phosim catalog id number
#just use np.right_shift(phosimID-28, 10). Take the floor of the last
#3 numbers to get twinklesID in the twinkles lens catalog and the remainder is
#the image number minus 1.
if not isinstance(self.defs_dict['galtileid'], tuple):
lensrow[self.defs_dict['galtileid']] = ((lensrow[self.defs_dict['galtileid']]+int(1.5e10))*10000 +
newlens['twinklesId']*4 + i)
else:
for col_name in self.defs_dict['galtileid']:
lensrow[col_name] = ((lensrow[col_name]+int(1.5e10))*10000 +
newlens['twinklesId']*4 + i)
updated_catalog = np.append(updated_catalog, lensrow)
#Now manipulate original entry to be the lens galaxy with desired properties
#Start by deleting Disk and AGN properties
if not np.isnan(row[self.defs_dict['galaxyDisk_magNorm']]):
row[self.defs_dict['galaxyDisk_majorAxis']] = 0.0
row[self.defs_dict['galaxyDisk_minorAxis']] = 0.0
row[self.defs_dict['galaxyDisk_positionAngle']] = 0.0
row[self.defs_dict['galaxyDisk_internalAv']] = 0.0
row[self.defs_dict['galaxyDisk_magNorm']] = 999. #np.nan To be fixed post run1.1
row[self.defs_dict['galaxyDisk_sedFilename']] = None
row[self.defs_dict['galaxyAgn_magNorm']] = None #np.nan To be fixed post run1.1
row[self.defs_dict['galaxyDisk_magNorm']] = 999. # To be fixed in run1.1
row[self.defs_dict['galaxyAgn_sedFilename']] = None
#Now insert desired Bulge properties
row[self.defs_dict['galaxyBulge_sedFilename']] = newlens['lens_sed']
row[self.defs_dict['galaxyBulge_redshift']] = newlens['ZLENS']
row[self.defs_dict['galaxyDisk_redshift']] = newlens['ZLENS']
row[self.defs_dict['galaxyAgn_redshift']] = newlens['ZLENS']
row_lens_sed = Sed()
row_lens_sed.readSED_flambda(str(self.galDir + newlens['lens_sed']))
row_lens_sed.redshiftSED(newlens['ZLENS'], dimming=True)
row[self.defs_dict['galaxyBulge_magNorm']] = matchBase().calcMagNorm([newlens['APMAG_I']], row_lens_sed,
self.bandpassDict) #Changed from i band to imsimband
row[self.defs_dict['galaxyBulge_majorAxis']] = radiansFromArcsec(newlens['REFF'] / np.sqrt(1 - newlens['ELLIP']))
row[self.defs_dict['galaxyBulge_minorAxis']] = radiansFromArcsec(newlens['REFF'] * np.sqrt(1 - newlens['ELLIP']))
#Convert orientation angle to west of north from east of north by *-1.0 and convert to radians
row[self.defs_dict['galaxyBulge_positionAngle']] = newlens['PHIE']*(-1.0)*np.pi/180.0
if self.logging_is_sprinkled:
row[self.defs_dict['galaxyAgn_is_sprinkled']] = 1
row[self.defs_dict['galaxyBulge_is_sprinkled']] = 1
row[self.defs_dict['galaxyDisk_is_sprinkled']] = 1
#Replace original entry with new entry
updated_catalog[rowNum] = row
else:
if self.cached_sprinkling is True:
if galtileid in self.sne_cache['galtileid'].values:
use_system = self.sne_cache.query('galtileid == %i' % galtileid)['twinkles_system'].values
use_df = self.sne_catalog.query('twinkles_sysno == %i' % use_system)
self.used_systems.append(use_system)
else:
continue
else:
lens_sne_candidates = self.find_sne_lens_candidates(row[self.defs_dict['galaxyDisk_redshift']])
candidate_sysno = np.unique(lens_sne_candidates['twinkles_sysno'])
num_candidates = len(candidate_sysno)
if num_candidates == 0:
continue
used_already = np.array([sys_num in self.used_systems for sys_num in candidate_sysno])
unused_sysno = candidate_sysno[~used_already]
if len(unused_sysno) == 0:
continue
np.random.seed(galtileid % (2^32 -1))
use_system = np.random.choice(unused_sysno)
use_df = self.sne_catalog.query('twinkles_sysno == %i' % use_system)
for i in range(len(use_df)):
lensrow = row.copy()
for lensPart in ['galaxyBulge', 'galaxyDisk', 'galaxyAgn']:
lens_ra = lensrow[self.defs_dict[str(lensPart+'_raJ2000')]]
lens_dec = lensrow[self.defs_dict[str(lensPart+'_decJ2000')]]
delta_ra = np.radians(use_df['x'].iloc[i] / 3600.0) / np.cos(lens_dec)
delta_dec = np.radians(use_df['y'].iloc[i] / 3600.0)
lensrow[self.defs_dict[str(lensPart + '_raJ2000')]] = lens_ra + delta_ra
lensrow[self.defs_dict[str(lensPart + '_decJ2000')]] = lens_dec + delta_dec
# varString = json.loads(lensrow[self.defs_dict['galaxyAgn_varParamStr']])
varString = 'None'
lensrow[self.defs_dict['galaxyAgn_varParamStr']] = varString
lensrow[self.defs_dict['galaxyDisk_majorAxis']] = 0.0
lensrow[self.defs_dict['galaxyDisk_minorAxis']] = 0.0
lensrow[self.defs_dict['galaxyDisk_positionAngle']] = 0.0
lensrow[self.defs_dict['galaxyDisk_internalAv']] = 0.0
lensrow[self.defs_dict['galaxyDisk_magNorm']] = 999. #np.nan To be fixed post run1.1
lensrow[self.defs_dict['galaxyDisk_sedFilename']] = None
lensrow[self.defs_dict['galaxyBulge_majorAxis']] = 0.0
lensrow[self.defs_dict['galaxyBulge_minorAxis']] = 0.0
lensrow[self.defs_dict['galaxyBulge_positionAngle']] = 0.0
lensrow[self.defs_dict['galaxyBulge_internalAv']] = 0.0
lensrow[self.defs_dict['galaxyBulge_magNorm']] = 999. #np.nan To be fixed post run1.1
lensrow[self.defs_dict['galaxyBulge_sedFilename']] = None
z_s = use_df['zs'].iloc[i]
lensrow[self.defs_dict['galaxyBulge_redshift']] = z_s
lensrow[self.defs_dict['galaxyDisk_redshift']] = z_s
lensrow[self.defs_dict['galaxyAgn_redshift']] = z_s
#To get back twinklesID in lens catalog from phosim catalog id number
#just use np.right_shift(phosimID-28, 10). Take the floor of the last
#3 numbers to get twinklesID in the twinkles lens catalog and the remainder is
#the image number minus 1.
if not isinstance(self.defs_dict['galtileid'], tuple):
lensrow[self.defs_dict['galtileid']] = ((lensrow[self.defs_dict['galtileid']]+int(1.5e10))*10000 +
use_system*4 + i)
else:
for col_name in self.defs_dict['galtileid']:
lensrow[col_name] = ((lensrow[col_name]+int(1.5e10))*10000 +
use_system*4 + i)
(add_to_cat, sn_magnorm,
sn_fname, sn_param_dict) = self.create_sn_sed(use_df.iloc[i],
lensrow[self.defs_dict['galaxyAgn_raJ2000']],
lensrow[self.defs_dict['galaxyAgn_decJ2000']],
self.visit_mjd,
write_sn_sed=self.write_sn_sed)
lensrow[self.defs_dict['galaxyAgn_sedFilename']] = sn_fname
lensrow[self.defs_dict['galaxyAgn_magNorm']] = sn_magnorm #This will need to be adjusted to proper band
mag_adjust = 2.5*np.log10(np.abs(use_df['mu'].iloc[i]))
lensrow[self.defs_dict['galaxyAgn_magNorm']] -= mag_adjust
if self.store_sn_truth_params:
add_to_cat = True
lensrow[self.defs_dict['galaxyAgn_sn_truth_params']] = json.dumps(sn_param_dict)
lensrow[self.defs_dict['galaxyAgn_sn_t0']] = sn_param_dict['t0']
if self.logging_is_sprinkled:
lensrow[self.defs_dict['galaxyAgn_is_sprinkled']] = 1
lensrow[self.defs_dict['galaxyBulge_is_sprinkled']] = 1
lensrow[self.defs_dict['galaxyDisk_is_sprinkled']] = 1
if add_to_cat is True:
updated_catalog = np.append(updated_catalog, lensrow)
else:
continue
#Now manipulate original entry to be the lens galaxy with desired properties
#Start by deleting Disk and AGN properties
if not np.isnan(row[self.defs_dict['galaxyDisk_magNorm']]):
row[self.defs_dict['galaxyDisk_majorAxis']] = 0.0
row[self.defs_dict['galaxyDisk_minorAxis']] = 0.0
row[self.defs_dict['galaxyDisk_positionAngle']] = 0.0
row[self.defs_dict['galaxyDisk_internalAv']] = 0.0
row[self.defs_dict['galaxyDisk_magNorm']] = 999. #np.nan To be fixed post run1.1
row[self.defs_dict['galaxyDisk_sedFilename']] = None
row[self.defs_dict['galaxyAgn_magNorm']] = None #np.nan To be fixed post run1.1
row[self.defs_dict['galaxyDisk_magNorm']] = 999. #To be fixed post run1.1
row[self.defs_dict['galaxyAgn_sedFilename']] = None
#Now insert desired Bulge properties
row[self.defs_dict['galaxyBulge_sedFilename']] = use_df['lensgal_sed'].iloc[0]
row[self.defs_dict['galaxyBulge_redshift']] = use_df['zl'].iloc[0]
row[self.defs_dict['galaxyDisk_redshift']] = use_df['zl'].iloc[0]
row[self.defs_dict['galaxyAgn_redshift']] = use_df['zl'].iloc[0]
row[self.defs_dict['galaxyBulge_magNorm']] = use_df['lensgal_magnorm'].iloc[0]
# row[self.defs_dict['galaxyBulge_magNorm']] = matchBase().calcMagNorm([newlens['APMAG_I']], self.LRG, self.bandpassDict) #Changed from i band to imsimband
row[self.defs_dict['galaxyBulge_majorAxis']] = radiansFromArcsec(use_df['lensgal_reff'].iloc[0] / np.sqrt(1 - use_df['e'].iloc[0]))
row[self.defs_dict['galaxyBulge_minorAxis']] = radiansFromArcsec(use_df['lensgal_reff'].iloc[0] * np.sqrt(1 - use_df['e'].iloc[0]))
#Convert orientation angle to west of north from east of north by *-1.0 and convert to radians
row[self.defs_dict['galaxyBulge_positionAngle']] = use_df['theta_e'].iloc[0]*(-1.0)*np.pi/180.0
if self.logging_is_sprinkled:
row[self.defs_dict['galaxyAgn_is_sprinkled']] = 1
row[self.defs_dict['galaxyBulge_is_sprinkled']] = 1
row[self.defs_dict['galaxyDisk_is_sprinkled']] = 1
#Replace original entry with new entry
updated_catalog[rowNum] = row
return updated_catalog
def find_lens_candidates(self, galz, gal_mag):
# search the OM10 catalog for all sources +- 0.1 dex in redshift
# and within .25 mags of the CATSIM source
w = np.where((np.abs(np.log10(self.lenscat['ZSRC']) - np.log10(galz)) <= 0.1) &
(np.abs(self.src_mag_norm - gal_mag) <= .25))[0]
lens_candidates = self.lenscat[w]
return lens_candidates
def find_sne_lens_candidates(self, galz):
w = np.where((np.abs(np.log10(self.sne_catalog['zs']) - np.log10(galz)) <= 0.1))
lens_candidates = self.sne_catalog.iloc[w]
return lens_candidates
def create_sn_sed(self, system_df, sn_ra, sn_dec, sed_mjd, write_sn_sed=True):
sn_param_dict = copy.deepcopy(self.sn_obj.SNstate)
sn_param_dict['_ra'] = sn_ra
sn_param_dict['_dec'] = sn_dec
sn_param_dict['z'] = system_df['zs']
sn_param_dict['c'] = system_df['c']
sn_param_dict['x0'] = system_df['x0']
sn_param_dict['x1'] = system_df['x1']
sn_param_dict['t0'] = system_df['t_start']
#sn_param_dict['t0'] = 62746.27 #+1500. ### For testing only
current_sn_obj = self.sn_obj.fromSNState(sn_param_dict)
current_sn_obj.mwEBVfromMaps()
wavelen_max = 1800.
wavelen_min = 30.
wavelen_step = 0.1
sn_sed_obj = current_sn_obj.SNObjectSED(time=sed_mjd,
wavelen=np.arange(wavelen_min, wavelen_max,
wavelen_step))
flux_500 = sn_sed_obj.flambda[np.where(sn_sed_obj.wavelen >= 499.99)][0]
if flux_500 > 0.:
add_to_cat = True
sn_magnorm = current_sn_obj.catsimBandMag(self.imSimBand, sed_mjd)
sn_name = None
if write_sn_sed:
sn_name = 'specFileGLSN_%i_%i_%.4f.txt' % (system_df['twinkles_sysno'],
system_df['imno'], sed_mjd)
sed_filename = '%s/%s' % (self.sed_path, sn_name)
sn_sed_obj.writeSED(sed_filename)
with open(sed_filename, 'rb') as f_in, gzip.open(str(sed_filename + '.gz'), 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(sed_filename)
else:
add_to_cat = False
sn_magnorm = np.nan
sn_name = None
return add_to_cat, sn_magnorm, sn_name, current_sn_obj.SNstate
def update_catsim(self):
# Remove the catsim object
# Add lensed images to the catsim given source brightness and magnifications
# Add lens galaxy to catsim
return
def catsim_to_phosim(self):
# Pass this catsim to phosim to make images
return
just use sedDir, not galDir
'''
Created on Feb 6, 2015
@author: cmccully
'''
from __future__ import absolute_import, division, print_function
from future.utils import iteritems
import om10
import numpy as np
import re
import json
import os
import pandas as pd
import copy
import gzip
import shutil
from lsst.utils import getPackageDir
from lsst.sims.utils import SpecMap, defaultSpecMap
from lsst.sims.catUtils.baseCatalogModels import GalaxyTileCompoundObj
from lsst.sims.catUtils.matchSED import matchBase
from lsst.sims.photUtils import Bandpass, BandpassDict, Sed
from lsst.sims.utils import radiansFromArcsec
from lsst.sims.catUtils.supernovae import SNObject
__all__ = ['sprinklerCompound', 'sprinkler']
class sprinklerCompound(GalaxyTileCompoundObj):
objid = 'sprinklerCompound'
objectTypeId = 66
cached_sprinkling = False
agn_cache_file = None
sne_cache_file = None
defs_file = None
sed_path = None
def _final_pass(self, results):
#From the original GalaxyTileCompoundObj final pass method
for name in results.dtype.fields:
if 'raJ2000' in name or 'decJ2000' in name:
results[name] = np.radians(results[name])
# the stored procedure on fatboy that queries the galaxies
# constructs galtileid by taking
#
# tileid*10^8 + galid
#
# this causes galtileid to be so large that the uniqueIDs in the
# Twinkles InstanceCatalogs are too large for PhoSim to handle.
# Since Twinkles is only focused on one tile on the sky, we will remove
# the factor of 10^8, making the uniqueIDs a more manageable size
# results['galtileid'] = results['galtileid']#%100000000
#Use Sprinkler now
sp = sprinkler(results, self.mjd, self.specFileMap, self.sed_path,
density_param=1.0,
cached_sprinkling=self.cached_sprinkling,
agn_cache_file=self.agn_cache_file,
sne_cache_file=self.sne_cache_file,
defs_file=self.defs_file)
results = sp.sprinkle()
return results
class sprinkler():
def __init__(self, catsim_cat, visit_mjd, specFileMap, sed_path,
om10_cat='twinkles_lenses_v2.fits',
sne_cat = 'dc2_sne_cat.csv', density_param=1., cached_sprinkling=False,
agn_cache_file=None, sne_cache_file=None, defs_file=None,
write_sn_sed=True):
"""
Parameters
----------
catsim_cat: catsim catalog
The results array from an instance catalog.
visit_mjd: float
The mjd of the visit
specFileMap:
This will tell the instance catalog where to write the files
om10_cat: optional, defaults to 'twinkles_lenses_v2.fits
fits file with OM10 catalog
sne_cat: optional, defaults to 'dc2_sne_cat.csv'
density_param: `np.float`, optioanl, defaults to 1.0
the fraction of eligible agn objects that become lensed and should
be between 0.0 and 1.0.
cached_sprinkling: boolean
If true then pick from a preselected list of galtileids
agn_cache_file: str
sne_cache_file: str
defs_file: str
write_sn_sed: boolean
Controls whether or not to actually write supernova
SEDs to disk (default=True)
Returns
-------
updated_catalog:
A new results array with lens systems added.
"""
twinklesDir = getPackageDir('Twinkles')
om10_cat = os.path.join(twinklesDir, 'data', om10_cat)
self.write_sn_sed = write_sn_sed
self.catalog = catsim_cat
self.catalog_column_names = catsim_cat.dtype.names
# ****** THIS ASSUMES THAT THE ENVIRONMENT VARIABLE OM10_DIR IS SET *******
lensdb = om10.DB(catalog=om10_cat, vb=False)
self.lenscat = lensdb.lenses.copy()
self.density_param = density_param
self.bandpassDict = BandpassDict.loadTotalBandpassesFromFiles(bandpassNames=['i'])
self.sne_catalog = pd.read_csv(os.path.join(twinklesDir, 'data', sne_cat))
#self.sne_catalog = self.sne_catalog.iloc[:101] ### Remove this after testing
self.used_systems = []
self.visit_mjd = visit_mjd
self.sn_obj = SNObject(0., 0.)
self.write_dir = specFileMap.subdir_map['(^specFileGLSN)']
self.sed_path = sed_path
self.cached_sprinkling = cached_sprinkling
if self.cached_sprinkling is True:
if ((agn_cache_file is None) | (sne_cache_file is None)):
raise AttributeError('Must specify cache files if using cached_sprinkling.')
#agn_cache_file = os.path.join(twinklesDir, 'data', 'test_agn_galtile_cache.csv')
self.agn_cache = pd.read_csv(agn_cache_file)
#sne_cache_file = os.path.join(twinklesDir, 'data', 'test_sne_galtile_cache.csv')
self.sne_cache = pd.read_csv(sne_cache_file)
else:
self.agn_cache = None
self.sne_cache = None
if defs_file is None:
self.defs_file = os.path.join(twinklesDir, 'data', 'catsim_defs.csv')
else:
self.defs_file = defs_file
self.sedDir = getPackageDir('sims_sed_library')
self.imSimBand = Bandpass()
self.imSimBand.imsimBandpass()
#self.LRG_name = 'Burst.25E09.1Z.spec'
#self.LRG = Sed()
#self.LRG.readSED_flambda(str(galDir + self.LRG_name))
#return
#Calculate imsimband magnitudes of source galaxies for matching
agn_fname = str(getPackageDir('sims_sed_library') + '/agnSED/agn.spec.gz')
src_iband = self.lenscat['MAGI_IN']
src_z = self.lenscat['ZSRC']
self.src_mag_norm = []
for src, s_z in zip(src_iband, src_z):
agn_sed = Sed()
agn_sed.readSED_flambda(agn_fname)
agn_sed.redshiftSED(s_z, dimming=True)
self.src_mag_norm.append(matchBase().calcMagNorm([src],
agn_sed,
self.bandpassDict))
#self.src_mag_norm = matchBase().calcMagNorm(src_iband,
# [agn_sed]*len(src_iband),
#
# self.bandpassDict)
has_sn_truth_params = False
for name in self.catalog_column_names:
if 'sn_truth_params' in name:
has_sn_truth_params = True
break
self.defs_dict = {}
self.logging_is_sprinkled = False
self.store_sn_truth_params = False
with open(self.defs_file, 'r') as f:
for line in f:
line_defs = line.strip().split(',')
if len(line_defs) > 1:
if 'is_sprinkled' in line_defs[1]:
self.logging_is_sprinkled = True
if 'sn_truth_params' in line_defs[1] and has_sn_truth_params:
self.store_sn_truth_params = True
if len(line_defs) == 2:
self.defs_dict[line_defs[0]] = line_defs[1]
else:
self.defs_dict[line_defs[0]] = tuple((ll for ll in line_defs[1:]))
def sprinkle(self):
# Define a list that we can write out to a text file
lenslines = []
# For each galaxy in the catsim catalog
updated_catalog = self.catalog.copy()
# print("Running sprinkler. Catalog Length: ", len(self.catalog))
for rowNum, row in enumerate(self.catalog):
if isinstance(self.defs_dict['galtileid'], tuple):
galtileid = row[self.defs_dict['galtileid'][0]]
else:
galtileid = row[self.defs_dict['galtileid']]
# if rowNum == 100 or rowNum % 100000==0:
# print("Gone through ", rowNum, " lines of catalog.")
if not np.isnan(row[self.defs_dict['galaxyAgn_magNorm']]):
candidates = self.find_lens_candidates(row[self.defs_dict['galaxyAgn_redshift']],
row[self.defs_dict['galaxyAgn_magNorm']])
#varString = json.loads(row[self.defs_dict['galaxyAgn_varParamStr']])
# varString[self.defs_dict['pars']]['t0_mjd'] = 59300.0
#row[self.defs_dict['galaxyAgn_varParamStr']] = json.dumps(varString)
np.random.seed(galtileid % (2^32 -1))
pick_value = np.random.uniform()
# If there aren't any lensed sources at this redshift from OM10 move on the next object
if (((len(candidates) > 0) and (pick_value <= self.density_param) and (self.cached_sprinkling is False)) |
((self.cached_sprinkling is True) and (galtileid in self.agn_cache['galtileid'].values))):
# Randomly choose one the lens systems
# (can decide with or without replacement)
# Sort first to make sure the same choice is made every time
if self.cached_sprinkling is True:
twinkles_sys_cache = self.agn_cache.query('galtileid == %i' % galtileid)['twinkles_system'].values[0]
newlens = self.lenscat[np.where(self.lenscat['twinklesId'] == twinkles_sys_cache)[0]][0]
else:
candidates = candidates[np.argsort(candidates['twinklesId'])]
newlens = np.random.choice(candidates)
# Append the lens galaxy
# For each image, append the lens images
for i in range(newlens['NIMG']):
lensrow = row.copy()
# XIMG and YIMG are in arcseconds
# raPhSim and decPhoSim are in radians
#Shift all parts of the lensed object, not just its agn part
for lensPart in ['galaxyBulge', 'galaxyDisk', 'galaxyAgn']:
lens_ra = lensrow[self.defs_dict[str(lensPart+'_raJ2000')]]
lens_dec = lensrow[self.defs_dict[str(lensPart+'_decJ2000')]]
delta_ra = np.radians(newlens['XIMG'][i] / 3600.0) / np.cos(lens_dec)
delta_dec = np.radians(newlens['YIMG'][i] / 3600.0)
lensrow[self.defs_dict[str(lensPart + '_raJ2000')]] = lens_ra + delta_ra
lensrow[self.defs_dict[str(lensPart + '_decJ2000')]] = lens_dec + delta_dec
mag_adjust = 2.5*np.log10(np.abs(newlens['MAG'][i]))
lensrow[self.defs_dict['galaxyAgn_magNorm']] -= mag_adjust
varString = json.loads(lensrow[self.defs_dict['galaxyAgn_varParamStr']])
varString[self.defs_dict['pars']]['t0Delay'] = newlens['DELAY'][i]
varString[self.defs_dict['varMethodName']] = 'applyAgnTimeDelay'
lensrow[self.defs_dict['galaxyAgn_varParamStr']] = json.dumps(varString)
lensrow[self.defs_dict['galaxyDisk_majorAxis']] = 0.0
lensrow[self.defs_dict['galaxyDisk_minorAxis']] = 0.0
lensrow[self.defs_dict['galaxyDisk_positionAngle']] = 0.0
lensrow[self.defs_dict['galaxyDisk_internalAv']] = 0.0
lensrow[self.defs_dict['galaxyDisk_magNorm']] = 999. #np.nan To be fixed post run1.1
lensrow[self.defs_dict['galaxyDisk_sedFilename']] = None
lensrow[self.defs_dict['galaxyBulge_majorAxis']] = 0.0
lensrow[self.defs_dict['galaxyBulge_minorAxis']] = 0.0
lensrow[self.defs_dict['galaxyBulge_positionAngle']] = 0.0
lensrow[self.defs_dict['galaxyBulge_internalAv']] = 0.0
lensrow[self.defs_dict['galaxyBulge_magNorm']] = 999. #np.nan To be fixed post run1.1
lensrow[self.defs_dict['galaxyBulge_sedFilename']] = None
lensrow[self.defs_dict['galaxyBulge_redshift']] = newlens['ZSRC']
lensrow[self.defs_dict['galaxyDisk_redshift']] = newlens['ZSRC']
lensrow[self.defs_dict['galaxyAgn_redshift']] = newlens['ZSRC']
if self.logging_is_sprinkled:
lensrow[self.defs_dict['galaxyAgn_is_sprinkled']] = 1
lensrow[self.defs_dict['galaxyBulge_is_sprinkled']] = 1
lensrow[self.defs_dict['galaxyDisk_is_sprinkled']] = 1
#To get back twinklesID in lens catalog from phosim catalog id number
#just use np.right_shift(phosimID-28, 10). Take the floor of the last
#3 numbers to get twinklesID in the twinkles lens catalog and the remainder is
#the image number minus 1.
if not isinstance(self.defs_dict['galtileid'], tuple):
lensrow[self.defs_dict['galtileid']] = ((lensrow[self.defs_dict['galtileid']]+int(1.5e10))*10000 +
newlens['twinklesId']*4 + i)
else:
for col_name in self.defs_dict['galtileid']:
lensrow[col_name] = ((lensrow[col_name]+int(1.5e10))*10000 +
newlens['twinklesId']*4 + i)
updated_catalog = np.append(updated_catalog, lensrow)
#Now manipulate original entry to be the lens galaxy with desired properties
#Start by deleting Disk and AGN properties
if not np.isnan(row[self.defs_dict['galaxyDisk_magNorm']]):
row[self.defs_dict['galaxyDisk_majorAxis']] = 0.0
row[self.defs_dict['galaxyDisk_minorAxis']] = 0.0
row[self.defs_dict['galaxyDisk_positionAngle']] = 0.0
row[self.defs_dict['galaxyDisk_internalAv']] = 0.0
row[self.defs_dict['galaxyDisk_magNorm']] = 999. #np.nan To be fixed post run1.1
row[self.defs_dict['galaxyDisk_sedFilename']] = None
row[self.defs_dict['galaxyAgn_magNorm']] = None #np.nan To be fixed post run1.1
row[self.defs_dict['galaxyDisk_magNorm']] = 999. # To be fixed in run1.1
row[self.defs_dict['galaxyAgn_sedFilename']] = None
#Now insert desired Bulge properties
row[self.defs_dict['galaxyBulge_sedFilename']] = newlens['lens_sed']
row[self.defs_dict['galaxyBulge_redshift']] = newlens['ZLENS']
row[self.defs_dict['galaxyDisk_redshift']] = newlens['ZLENS']
row[self.defs_dict['galaxyAgn_redshift']] = newlens['ZLENS']
row_lens_sed = Sed()
row_lens_sed.readSED_flambda(os.path.join(self.sedDir,
newlens['lens_sed']))
row_lens_sed.redshiftSED(newlens['ZLENS'], dimming=True)
row[self.defs_dict['galaxyBulge_magNorm']] = matchBase().calcMagNorm([newlens['APMAG_I']], row_lens_sed,
self.bandpassDict) #Changed from i band to imsimband
row[self.defs_dict['galaxyBulge_majorAxis']] = radiansFromArcsec(newlens['REFF'] / np.sqrt(1 - newlens['ELLIP']))
row[self.defs_dict['galaxyBulge_minorAxis']] = radiansFromArcsec(newlens['REFF'] * np.sqrt(1 - newlens['ELLIP']))
#Convert orientation angle to west of north from east of north by *-1.0 and convert to radians
row[self.defs_dict['galaxyBulge_positionAngle']] = newlens['PHIE']*(-1.0)*np.pi/180.0
if self.logging_is_sprinkled:
row[self.defs_dict['galaxyAgn_is_sprinkled']] = 1
row[self.defs_dict['galaxyBulge_is_sprinkled']] = 1
row[self.defs_dict['galaxyDisk_is_sprinkled']] = 1
#Replace original entry with new entry
updated_catalog[rowNum] = row
else:
if self.cached_sprinkling is True:
if galtileid in self.sne_cache['galtileid'].values:
use_system = self.sne_cache.query('galtileid == %i' % galtileid)['twinkles_system'].values
use_df = self.sne_catalog.query('twinkles_sysno == %i' % use_system)
self.used_systems.append(use_system)
else:
continue
else:
lens_sne_candidates = self.find_sne_lens_candidates(row[self.defs_dict['galaxyDisk_redshift']])
candidate_sysno = np.unique(lens_sne_candidates['twinkles_sysno'])
num_candidates = len(candidate_sysno)
if num_candidates == 0:
continue
used_already = np.array([sys_num in self.used_systems for sys_num in candidate_sysno])
unused_sysno = candidate_sysno[~used_already]
if len(unused_sysno) == 0:
continue
np.random.seed(galtileid % (2^32 -1))
use_system = np.random.choice(unused_sysno)
use_df = self.sne_catalog.query('twinkles_sysno == %i' % use_system)
for i in range(len(use_df)):
lensrow = row.copy()
for lensPart in ['galaxyBulge', 'galaxyDisk', 'galaxyAgn']:
lens_ra = lensrow[self.defs_dict[str(lensPart+'_raJ2000')]]
lens_dec = lensrow[self.defs_dict[str(lensPart+'_decJ2000')]]
delta_ra = np.radians(use_df['x'].iloc[i] / 3600.0) / np.cos(lens_dec)
delta_dec = np.radians(use_df['y'].iloc[i] / 3600.0)
lensrow[self.defs_dict[str(lensPart + '_raJ2000')]] = lens_ra + delta_ra
lensrow[self.defs_dict[str(lensPart + '_decJ2000')]] = lens_dec + delta_dec
# varString = json.loads(lensrow[self.defs_dict['galaxyAgn_varParamStr']])
varString = 'None'
lensrow[self.defs_dict['galaxyAgn_varParamStr']] = varString
lensrow[self.defs_dict['galaxyDisk_majorAxis']] = 0.0
lensrow[self.defs_dict['galaxyDisk_minorAxis']] = 0.0
lensrow[self.defs_dict['galaxyDisk_positionAngle']] = 0.0
lensrow[self.defs_dict['galaxyDisk_internalAv']] = 0.0
lensrow[self.defs_dict['galaxyDisk_magNorm']] = 999. #np.nan To be fixed post run1.1
lensrow[self.defs_dict['galaxyDisk_sedFilename']] = None
lensrow[self.defs_dict['galaxyBulge_majorAxis']] = 0.0
lensrow[self.defs_dict['galaxyBulge_minorAxis']] = 0.0
lensrow[self.defs_dict['galaxyBulge_positionAngle']] = 0.0
lensrow[self.defs_dict['galaxyBulge_internalAv']] = 0.0
lensrow[self.defs_dict['galaxyBulge_magNorm']] = 999. #np.nan To be fixed post run1.1
lensrow[self.defs_dict['galaxyBulge_sedFilename']] = None
z_s = use_df['zs'].iloc[i]
lensrow[self.defs_dict['galaxyBulge_redshift']] = z_s
lensrow[self.defs_dict['galaxyDisk_redshift']] = z_s
lensrow[self.defs_dict['galaxyAgn_redshift']] = z_s
#To get back twinklesID in lens catalog from phosim catalog id number
#just use np.right_shift(phosimID-28, 10). Take the floor of the last
#3 numbers to get twinklesID in the twinkles lens catalog and the remainder is
#the image number minus 1.
if not isinstance(self.defs_dict['galtileid'], tuple):
lensrow[self.defs_dict['galtileid']] = ((lensrow[self.defs_dict['galtileid']]+int(1.5e10))*10000 +
use_system*4 + i)
else:
for col_name in self.defs_dict['galtileid']:
lensrow[col_name] = ((lensrow[col_name]+int(1.5e10))*10000 +
use_system*4 + i)
(add_to_cat, sn_magnorm,
sn_fname, sn_param_dict) = self.create_sn_sed(use_df.iloc[i],
lensrow[self.defs_dict['galaxyAgn_raJ2000']],
lensrow[self.defs_dict['galaxyAgn_decJ2000']],
self.visit_mjd,
write_sn_sed=self.write_sn_sed)
lensrow[self.defs_dict['galaxyAgn_sedFilename']] = sn_fname
lensrow[self.defs_dict['galaxyAgn_magNorm']] = sn_magnorm #This will need to be adjusted to proper band
mag_adjust = 2.5*np.log10(np.abs(use_df['mu'].iloc[i]))
lensrow[self.defs_dict['galaxyAgn_magNorm']] -= mag_adjust
if self.store_sn_truth_params:
add_to_cat = True
lensrow[self.defs_dict['galaxyAgn_sn_truth_params']] = json.dumps(sn_param_dict)
lensrow[self.defs_dict['galaxyAgn_sn_t0']] = sn_param_dict['t0']
if self.logging_is_sprinkled:
lensrow[self.defs_dict['galaxyAgn_is_sprinkled']] = 1
lensrow[self.defs_dict['galaxyBulge_is_sprinkled']] = 1
lensrow[self.defs_dict['galaxyDisk_is_sprinkled']] = 1
if add_to_cat is True:
updated_catalog = np.append(updated_catalog, lensrow)
else:
continue
#Now manipulate original entry to be the lens galaxy with desired properties
#Start by deleting Disk and AGN properties
if not np.isnan(row[self.defs_dict['galaxyDisk_magNorm']]):
row[self.defs_dict['galaxyDisk_majorAxis']] = 0.0
row[self.defs_dict['galaxyDisk_minorAxis']] = 0.0
row[self.defs_dict['galaxyDisk_positionAngle']] = 0.0
row[self.defs_dict['galaxyDisk_internalAv']] = 0.0
row[self.defs_dict['galaxyDisk_magNorm']] = 999. #np.nan To be fixed post run1.1
row[self.defs_dict['galaxyDisk_sedFilename']] = None
row[self.defs_dict['galaxyAgn_magNorm']] = None #np.nan To be fixed post run1.1
row[self.defs_dict['galaxyDisk_magNorm']] = 999. #To be fixed post run1.1
row[self.defs_dict['galaxyAgn_sedFilename']] = None
#Now insert desired Bulge properties
row[self.defs_dict['galaxyBulge_sedFilename']] = use_df['lensgal_sed'].iloc[0]
row[self.defs_dict['galaxyBulge_redshift']] = use_df['zl'].iloc[0]
row[self.defs_dict['galaxyDisk_redshift']] = use_df['zl'].iloc[0]
row[self.defs_dict['galaxyAgn_redshift']] = use_df['zl'].iloc[0]
row[self.defs_dict['galaxyBulge_magNorm']] = use_df['lensgal_magnorm'].iloc[0]
# row[self.defs_dict['galaxyBulge_magNorm']] = matchBase().calcMagNorm([newlens['APMAG_I']], self.LRG, self.bandpassDict) #Changed from i band to imsimband
row[self.defs_dict['galaxyBulge_majorAxis']] = radiansFromArcsec(use_df['lensgal_reff'].iloc[0] / np.sqrt(1 - use_df['e'].iloc[0]))
row[self.defs_dict['galaxyBulge_minorAxis']] = radiansFromArcsec(use_df['lensgal_reff'].iloc[0] * np.sqrt(1 - use_df['e'].iloc[0]))
#Convert orientation angle to west of north from east of north by *-1.0 and convert to radians
row[self.defs_dict['galaxyBulge_positionAngle']] = use_df['theta_e'].iloc[0]*(-1.0)*np.pi/180.0
if self.logging_is_sprinkled:
row[self.defs_dict['galaxyAgn_is_sprinkled']] = 1
row[self.defs_dict['galaxyBulge_is_sprinkled']] = 1
row[self.defs_dict['galaxyDisk_is_sprinkled']] = 1
#Replace original entry with new entry
updated_catalog[rowNum] = row
return updated_catalog
def find_lens_candidates(self, galz, gal_mag):
# search the OM10 catalog for all sources +- 0.1 dex in redshift
# and within .25 mags of the CATSIM source
w = np.where((np.abs(np.log10(self.lenscat['ZSRC']) - np.log10(galz)) <= 0.1) &
(np.abs(self.src_mag_norm - gal_mag) <= .25))[0]
lens_candidates = self.lenscat[w]
return lens_candidates
def find_sne_lens_candidates(self, galz):
w = np.where((np.abs(np.log10(self.sne_catalog['zs']) - np.log10(galz)) <= 0.1))
lens_candidates = self.sne_catalog.iloc[w]
return lens_candidates
def create_sn_sed(self, system_df, sn_ra, sn_dec, sed_mjd, write_sn_sed=True):
sn_param_dict = copy.deepcopy(self.sn_obj.SNstate)
sn_param_dict['_ra'] = sn_ra
sn_param_dict['_dec'] = sn_dec
sn_param_dict['z'] = system_df['zs']
sn_param_dict['c'] = system_df['c']
sn_param_dict['x0'] = system_df['x0']
sn_param_dict['x1'] = system_df['x1']
sn_param_dict['t0'] = system_df['t_start']
#sn_param_dict['t0'] = 62746.27 #+1500. ### For testing only
current_sn_obj = self.sn_obj.fromSNState(sn_param_dict)
current_sn_obj.mwEBVfromMaps()
wavelen_max = 1800.
wavelen_min = 30.
wavelen_step = 0.1
sn_sed_obj = current_sn_obj.SNObjectSED(time=sed_mjd,
wavelen=np.arange(wavelen_min, wavelen_max,
wavelen_step))
flux_500 = sn_sed_obj.flambda[np.where(sn_sed_obj.wavelen >= 499.99)][0]
if flux_500 > 0.:
add_to_cat = True
sn_magnorm = current_sn_obj.catsimBandMag(self.imSimBand, sed_mjd)
sn_name = None
if write_sn_sed:
sn_name = 'specFileGLSN_%i_%i_%.4f.txt' % (system_df['twinkles_sysno'],
system_df['imno'], sed_mjd)
sed_filename = '%s/%s' % (self.sed_path, sn_name)
sn_sed_obj.writeSED(sed_filename)
with open(sed_filename, 'rb') as f_in, gzip.open(str(sed_filename + '.gz'), 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(sed_filename)
else:
add_to_cat = False
sn_magnorm = np.nan
sn_name = None
return add_to_cat, sn_magnorm, sn_name, current_sn_obj.SNstate
def update_catsim(self):
# Remove the catsim object
# Add lensed images to the catsim given source brightness and magnifications
# Add lens galaxy to catsim
return
def catsim_to_phosim(self):
# Pass this catsim to phosim to make images
return
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import, unicode_literals
from grako.model import Node
from grako.rendering import render, Renderer, RenderingFormatter
class DelegatingRenderingFormatter(RenderingFormatter):
def __init__(self, delegate):
assert hasattr(delegate, 'render')
super(DelegatingRenderingFormatter, self).__init__()
self.delegate = delegate
#override
def render(self, item, join='', **fields):
result = self.delegate.render(item, join=join, **fields)
if result is None:
result = super(DelegatingRenderingFormatter).render(item, join=join, **fields)
return result
def convert_field(self, value, conversion):
if isinstance(value, Node):
return self.render(value)
else:
return super(RenderingFormatter, self).convert_field(value, conversion)
class ModelRenderer(Renderer):
def __init__(self, codegen, node, template=None):
super(ModelRenderer, self).__init__(template=template)
self._codegen = codegen
self._node = node
self.formatter = codegen.formatter
# FIXME: What if the node is not an AST or object?
# if isinstance(node, Node):
# for name, value in vars(node).items():
# if not name.startswith('_'):
# setattr(self, name, value)
#else:
# self.value = node
self.__postinit__()
def __postinit__(self):
pass
@property
def node(self):
return self._node
def get_renderer(self, item):
return self._codegen.get_renderer(item)
def render(self, template=None, **fields):
# FIXME: Not needed if Node copies AST entries to attributes
fields.update({k: v for k, v in vars(self.node).items() if not k.startswith('_')})
return super(ModelRenderer, self).render(template=template, **fields)
class CodeGenerator(object):
"""
A **CodeGenerator** is an abstract class that finds a
``ModelRenderer`` class with the same name as each model's node and
uses it to render the node.
"""
def __init__(self):
self.formatter = DelegatingRenderingFormatter(self)
def _find_renderer_class(self, item):
"""
This method is used to find a ``ModelRenderer`` for the given
item. It must be overriden in concrete classes.
"""
pass
def get_renderer(self, item):
rendererClass = self._find_renderer_class(item)
if rendererClass is None:
return None
try:
assert issubclass(rendererClass, ModelRenderer)
return rendererClass(self, item)
except Exception as e:
raise type(e)(str(e), rendererClass.__name__)
def render(self, item, join='', **fields):
renderer = self.get_renderer(item)
if renderer is None:
return render(item, join=join, **fields)
return renderer.render(**fields)
Added a NullRenderer class.
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import, unicode_literals
from grako.model import Node
from grako.rendering import render, Renderer, RenderingFormatter
class DelegatingRenderingFormatter(RenderingFormatter):
def __init__(self, delegate):
assert hasattr(delegate, 'render')
super(DelegatingRenderingFormatter, self).__init__()
self.delegate = delegate
#override
def render(self, item, join='', **fields):
result = self.delegate.render(item, join=join, **fields)
if result is None:
result = super(DelegatingRenderingFormatter).render(item, join=join, **fields)
return result
def convert_field(self, value, conversion):
if isinstance(value, Node):
return self.render(value)
else:
return super(RenderingFormatter, self).convert_field(value, conversion)
class ModelRenderer(Renderer):
def __init__(self, codegen, node, template=None):
super(ModelRenderer, self).__init__(template=template)
self._codegen = codegen
self._node = node
self.formatter = codegen.formatter
# FIXME: What if the node is not an AST or object?
# if isinstance(node, Node):
# for name, value in vars(node).items():
# if not name.startswith('_'):
# setattr(self, name, value)
#else:
# self.value = node
self.__postinit__()
def __postinit__(self):
pass
@property
def node(self):
return self._node
def get_renderer(self, item):
return self._codegen.get_renderer(item)
def render(self, template=None, **fields):
# FIXME: Not needed if Node copies AST entries to attributes
fields.update({k: v for k, v in vars(self.node).items() if not k.startswith('_')})
return super(ModelRenderer, self).render(template=template, **fields)
class NullModelRenderer(ModelRenderer):
"""A `ModelRenderer` that generates nothing.
"""
template = ''
class CodeGenerator(object):
"""
A **CodeGenerator** is an abstract class that finds a
``ModelRenderer`` class with the same name as each model's node and
uses it to render the node.
"""
def __init__(self):
self.formatter = DelegatingRenderingFormatter(self)
def _find_renderer_class(self, item):
"""
This method is used to find a ``ModelRenderer`` for the given
item. It must be overriden in concrete classes.
"""
pass
def get_renderer(self, item):
rendererClass = self._find_renderer_class(item)
if rendererClass is None:
return None
try:
assert issubclass(rendererClass, ModelRenderer)
return rendererClass(self, item)
except Exception as e:
raise type(e)(str(e), rendererClass.__name__)
def render(self, item, join='', **fields):
renderer = self.get_renderer(item)
if renderer is None:
return render(item, join=join, **fields)
return renderer.render(**fields)
|
#!/usr/bin/env python
# pep8.py - Check Python source code formatting, according to PEP 8
# Copyright (C) 2006-2009 Johann C. Rocholl <johann@rocholl.net>
# Copyright (C) 2009-2013 Florent Xicluna <florent.xicluna@gmail.com>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
r"""
Check Python source code formatting, according to PEP 8:
http://www.python.org/dev/peps/pep-0008/
For usage and a list of options, try this:
$ python pep8.py -h
This program and its regression test suite live here:
http://github.com/jcrocholl/pep8
Groups of errors and warnings:
E errors
W warnings
100 indentation
200 whitespace
300 blank lines
400 imports
500 line length
600 deprecation
700 statements
900 syntax error
"""
__version__ = '1.5.1'
import os
import sys
import re
import time
import inspect
import keyword
import tokenize
from optparse import OptionParser
from fnmatch import fnmatch
try:
from configparser import RawConfigParser
from io import TextIOWrapper
except ImportError:
from ConfigParser import RawConfigParser
DEFAULT_EXCLUDE = '.svn,CVS,.bzr,.hg,.git,__pycache__'
DEFAULT_IGNORE = 'E123,E226,E24'
if sys.platform == 'win32':
DEFAULT_CONFIG = os.path.expanduser(r'~\.pep8')
else:
DEFAULT_CONFIG = os.path.join(os.getenv('XDG_CONFIG_HOME') or
os.path.expanduser('~/.config'), 'pep8')
PROJECT_CONFIG = ('setup.cfg', 'tox.ini', '.pep8')
TESTSUITE_PATH = os.path.join(os.path.dirname(__file__), 'testsuite')
MAX_LINE_LENGTH = 79
REPORT_FORMAT = {
'default': '%(path)s:%(row)d:%(col)d: %(code)s %(text)s',
'pylint': '%(path)s:%(row)d: [%(code)s] %(text)s',
}
PyCF_ONLY_AST = 1024
SINGLETONS = frozenset(['False', 'None', 'True'])
KEYWORDS = frozenset(keyword.kwlist + ['print']) - SINGLETONS
UNARY_OPERATORS = frozenset(['>>', '**', '*', '+', '-'])
ARITHMETIC_OP = frozenset(['**', '*', '/', '//', '+', '-'])
WS_OPTIONAL_OPERATORS = ARITHMETIC_OP.union(['^', '&', '|', '<<', '>>', '%'])
WS_NEEDED_OPERATORS = frozenset([
'**=', '*=', '/=', '//=', '+=', '-=', '!=', '<>', '<', '>',
'%=', '^=', '&=', '|=', '==', '<=', '>=', '<<=', '>>=', '='])
WHITESPACE = frozenset(' \t')
SKIP_TOKENS = frozenset([tokenize.COMMENT, tokenize.NL, tokenize.NEWLINE,
tokenize.INDENT, tokenize.DEDENT])
BENCHMARK_KEYS = ['directories', 'files', 'logical lines', 'physical lines']
INDENT_REGEX = re.compile(r'([ \t]*)')
RAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*,')
RERAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*,.*,\s*\w+\s*$')
ERRORCODE_REGEX = re.compile(r'\b[A-Z]\d{3}\b')
DOCSTRING_REGEX = re.compile(r'u?r?["\']')
EXTRANEOUS_WHITESPACE_REGEX = re.compile(r'[[({] | []}),;:]')
WHITESPACE_AFTER_COMMA_REGEX = re.compile(r'[,;:]\s*(?: |\t)')
COMPARE_SINGLETON_REGEX = re.compile(r'([=!]=)\s*(None|False|True)')
COMPARE_NEGATIVE_REGEX = re.compile(r'\b(not)\s+[^[({ ]+\s+(in|is)\s')
COMPARE_TYPE_REGEX = re.compile(r'(?:[=!]=|is(?:\s+not)?)\s*type(?:s.\w+Type'
r'|\s*\(\s*([^)]*[^ )])\s*\))')
KEYWORD_REGEX = re.compile(r'(\s*)\b(?:%s)\b(\s*)' % r'|'.join(KEYWORDS))
OPERATOR_REGEX = re.compile(r'(?:[^,\s])(\s*)(?:[-+*/|!<=>%&^]+)(\s*)')
LAMBDA_REGEX = re.compile(r'\blambda\b')
HUNK_REGEX = re.compile(r'^@@ -\d+(?:,\d+)? \+(\d+)(?:,(\d+))? @@.*$')
# Work around Python < 2.6 behaviour, which does not generate NL after
# a comment which is on a line by itself.
COMMENT_WITH_NL = tokenize.generate_tokens(['#\n'].pop).send(None)[1] == '#\n'
##############################################################################
# Plugins (check functions) for physical lines
##############################################################################
def tabs_or_spaces(physical_line, indent_char):
r"""
Never mix tabs and spaces.
The most popular way of indenting Python is with spaces only. The
second-most popular way is with tabs only. Code indented with a mixture
of tabs and spaces should be converted to using spaces exclusively. When
invoking the Python command line interpreter with the -t option, it issues
warnings about code that illegally mixes tabs and spaces. When using -tt
these warnings become errors. These options are highly recommended!
Okay: if a == 0:\n a = 1\n b = 1
E101: if a == 0:\n a = 1\n\tb = 1
"""
indent = INDENT_REGEX.match(physical_line).group(1)
for offset, char in enumerate(indent):
if char != indent_char:
return offset, "E101 indentation contains mixed spaces and tabs"
def tabs_obsolete(physical_line):
r"""
For new projects, spaces-only are strongly recommended over tabs. Most
editors have features that make this easy to do.
Okay: if True:\n return
W191: if True:\n\treturn
"""
indent = INDENT_REGEX.match(physical_line).group(1)
if '\t' in indent:
return indent.index('\t'), "W191 indentation contains tabs"
def trailing_whitespace(physical_line):
r"""
JCR: Trailing whitespace is superfluous.
FBM: Except when it occurs as part of a blank line (i.e. the line is
nothing but whitespace). According to Python docs[1] a line with only
whitespace is considered a blank line, and is to be ignored. However,
matching a blank line to its indentation level avoids mistakenly
terminating a multi-line statement (e.g. class declaration) when
pasting code into the standard Python interpreter.
[1] http://docs.python.org/reference/lexical_analysis.html#blank-lines
The warning returned varies on whether the line itself is blank, for easier
filtering for those who want to indent their blank lines.
Okay: spam(1)\n#
W291: spam(1) \n#
W293: class Foo(object):\n \n bang = 12
"""
physical_line = physical_line.rstrip('\n') # chr(10), newline
physical_line = physical_line.rstrip('\r') # chr(13), carriage return
physical_line = physical_line.rstrip('\x0c') # chr(12), form feed, ^L
stripped = physical_line.rstrip(' \t\v')
if physical_line != stripped:
if stripped:
return len(stripped), "W291 trailing whitespace"
else:
return 0, "W293 blank line contains whitespace"
def trailing_blank_lines(physical_line, lines, line_number):
r"""
JCR: Trailing blank lines are superfluous.
Okay: spam(1)
W391: spam(1)\n
"""
if not physical_line.rstrip() and line_number == len(lines):
return 0, "W391 blank line at end of file"
def missing_newline(physical_line):
"""
JCR: The last line should have a newline.
Reports warning W292.
"""
if physical_line.rstrip() == physical_line:
return len(physical_line), "W292 no newline at end of file"
def maximum_line_length(physical_line, max_line_length, multiline):
"""
Limit all lines to a maximum of 79 characters.
There are still many devices around that are limited to 80 character
lines; plus, limiting windows to 80 characters makes it possible to have
several windows side-by-side. The default wrapping on such devices looks
ugly. Therefore, please limit all lines to a maximum of 79 characters.
For flowing long blocks of text (docstrings or comments), limiting the
length to 72 characters is recommended.
Reports error E501.
"""
line = physical_line.rstrip()
length = len(line)
if length > max_line_length and not noqa(line):
# Special case for long URLs in multi-line docstrings or comments,
# but still report the error when the 72 first chars are whitespaces.
chunks = line.split()
if ((len(chunks) == 1 and multiline) or
(len(chunks) == 2 and chunks[0] == '#')) and \
len(line) - len(chunks[-1]) < max_line_length - 7:
return
if hasattr(line, 'decode'): # Python 2
# The line could contain multi-byte characters
try:
length = len(line.decode('utf-8'))
except UnicodeError:
pass
if length > max_line_length:
return (max_line_length, "E501 line too long "
"(%d > %d characters)" % (length, max_line_length))
##############################################################################
# Plugins (check functions) for logical lines
##############################################################################
def blank_lines(logical_line, blank_lines, indent_level, line_number,
previous_logical, previous_indent_level):
r"""
Separate top-level function and class definitions with two blank lines.
Method definitions inside a class are separated by a single blank line.
Extra blank lines may be used (sparingly) to separate groups of related
functions. Blank lines may be omitted between a bunch of related
one-liners (e.g. a set of dummy implementations).
Use blank lines in functions, sparingly, to indicate logical sections.
Okay: def a():\n pass\n\n\ndef b():\n pass
Okay: def a():\n pass\n\n\n# Foo\n# Bar\n\ndef b():\n pass
E301: class Foo:\n b = 0\n def bar():\n pass
E302: def a():\n pass\n\ndef b(n):\n pass
E303: def a():\n pass\n\n\n\ndef b(n):\n pass
E303: def a():\n\n\n\n pass
E304: @decorator\n\ndef a():\n pass
"""
if line_number < 3 and not previous_logical:
return # Don't expect blank lines before the first line
if previous_logical.startswith('@'):
if blank_lines:
yield 0, "E304 blank lines found after function decorator"
elif blank_lines > 2 or (indent_level and blank_lines == 2):
yield 0, "E303 too many blank lines (%d)" % blank_lines
elif logical_line.startswith(('def ', 'class ', '@')):
if indent_level:
if not (blank_lines or previous_indent_level < indent_level or
DOCSTRING_REGEX.match(previous_logical)):
yield 0, "E301 expected 1 blank line, found 0"
elif blank_lines != 2:
yield 0, "E302 expected 2 blank lines, found %d" % blank_lines
def extraneous_whitespace(logical_line):
"""
Avoid extraneous whitespace in the following situations:
- Immediately inside parentheses, brackets or braces.
- Immediately before a comma, semicolon, or colon.
Okay: spam(ham[1], {eggs: 2})
E201: spam( ham[1], {eggs: 2})
E201: spam(ham[ 1], {eggs: 2})
E201: spam(ham[1], { eggs: 2})
E202: spam(ham[1], {eggs: 2} )
E202: spam(ham[1 ], {eggs: 2})
E202: spam(ham[1], {eggs: 2 })
E203: if x == 4: print x, y; x, y = y , x
E203: if x == 4: print x, y ; x, y = y, x
E203: if x == 4 : print x, y; x, y = y, x
"""
line = logical_line
for match in EXTRANEOUS_WHITESPACE_REGEX.finditer(line):
text = match.group()
char = text.strip()
found = match.start()
if text == char + ' ':
# assert char in '([{'
yield found + 1, "E201 whitespace after '%s'" % char
elif line[found - 1] != ',':
code = ('E202' if char in '}])' else 'E203') # if char in ',;:'
yield found, "%s whitespace before '%s'" % (code, char)
def whitespace_around_keywords(logical_line):
r"""
Avoid extraneous whitespace around keywords.
Okay: True and False
E271: True and False
E272: True and False
E273: True and\tFalse
E274: True\tand False
"""
for match in KEYWORD_REGEX.finditer(logical_line):
before, after = match.groups()
if '\t' in before:
yield match.start(1), "E274 tab before keyword"
elif len(before) > 1:
yield match.start(1), "E272 multiple spaces before keyword"
if '\t' in after:
yield match.start(2), "E273 tab after keyword"
elif len(after) > 1:
yield match.start(2), "E271 multiple spaces after keyword"
def missing_whitespace(logical_line):
"""
JCR: Each comma, semicolon or colon should be followed by whitespace.
Okay: [a, b]
Okay: (3,)
Okay: a[1:4]
Okay: a[:4]
Okay: a[1:]
Okay: a[1:4:2]
E231: ['a','b']
E231: foo(bar,baz)
E231: [{'a':'b'}]
"""
line = logical_line
for index in range(len(line) - 1):
char = line[index]
if char in ',;:' and line[index + 1] not in WHITESPACE:
before = line[:index]
if char == ':' and before.count('[') > before.count(']') and \
before.rfind('{') < before.rfind('['):
continue # Slice syntax, no space required
if char == ',' and line[index + 1] == ')':
continue # Allow tuple with only one element: (3,)
yield index, "E231 missing whitespace after '%s'" % char
def indentation(logical_line, previous_logical, indent_char,
indent_level, previous_indent_level):
r"""
Use 4 spaces per indentation level.
For really old code that you don't want to mess up, you can continue to
use 8-space tabs.
Okay: a = 1
Okay: if a == 0:\n a = 1
E111: a = 1
Okay: for item in items:\n pass
E112: for item in items:\npass
Okay: a = 1\nb = 2
E113: a = 1\n b = 2
"""
if indent_char == ' ' and indent_level % 4:
yield 0, "E111 indentation is not a multiple of four"
indent_expect = previous_logical.endswith(':')
if indent_expect and indent_level <= previous_indent_level:
yield 0, "E112 expected an indented block"
if indent_level > previous_indent_level and not indent_expect:
yield 0, "E113 unexpected indentation"
def continued_indentation(logical_line, tokens, indent_level, hang_closing,
indent_char, noqa, verbose):
r"""
Continuation lines should align wrapped elements either vertically using
Python's implicit line joining inside parentheses, brackets and braces, or
using a hanging indent.
When using a hanging indent the following considerations should be applied:
- there should be no arguments on the first line, and
- further indentation should be used to clearly distinguish itself as a
continuation line.
Okay: a = (\n)
E123: a = (\n )
Okay: a = (\n 42)
E121: a = (\n 42)
E122: a = (\n42)
E123: a = (\n 42\n )
E124: a = (24,\n 42\n)
E125: if (\n b):\n pass
E126: a = (\n 42)
E127: a = (24,\n 42)
E128: a = (24,\n 42)
E129: if (a or\n b):\n pass
E131: a = (\n 42\n 24)
"""
first_row = tokens[0][2][0]
nrows = 1 + tokens[-1][2][0] - first_row
if noqa or nrows == 1:
return
# indent_next tells us whether the next block is indented; assuming
# that it is indented by 4 spaces, then we should not allow 4-space
# indents on the final continuation line; in turn, some other
# indents are allowed to have an extra 4 spaces.
indent_next = logical_line.endswith(':')
row = depth = 0
valid_hangs = (4,) if indent_char != '\t' else (4, 8)
# remember how many brackets were opened on each line
parens = [0] * nrows
# relative indents of physical lines
rel_indent = [0] * nrows
# for each depth, collect a list of opening rows
open_rows = [[0]]
# for each depth, memorize the hanging indentation
hangs = [None]
# visual indents
indent_chances = {}
last_indent = tokens[0][2]
visual_indent = None
# for each depth, memorize the visual indent column
indent = [last_indent[1]]
if verbose >= 3:
print(">>> " + tokens[0][4].rstrip())
for token_type, text, start, end, line in tokens:
newline = row < start[0] - first_row
if newline:
row = start[0] - first_row
newline = (not last_token_multiline and
token_type not in (tokenize.NL, tokenize.NEWLINE))
if newline:
# this is the beginning of a continuation line.
last_indent = start
if verbose >= 3:
print("... " + line.rstrip())
# record the initial indent.
rel_indent[row] = expand_indent(line) - indent_level
# identify closing bracket
close_bracket = (token_type == tokenize.OP and text in ']})')
# is the indent relative to an opening bracket line?
for open_row in reversed(open_rows[depth]):
hang = rel_indent[row] - rel_indent[open_row]
hanging_indent = hang in valid_hangs
if hanging_indent:
break
if hangs[depth]:
hanging_indent = (hang == hangs[depth])
# is there any chance of visual indent?
visual_indent = (not close_bracket and hang > 0 and
indent_chances.get(start[1]))
if close_bracket and indent[depth]:
# closing bracket for visual indent
if start[1] != indent[depth]:
yield (start, "E124 closing bracket does not match "
"visual indentation")
elif close_bracket and not hang:
# closing bracket matches indentation of opening bracket's line
if hang_closing:
yield start, "E133 closing bracket is missing indentation"
elif indent[depth] and start[1] < indent[depth]:
if visual_indent is not True:
# visual indent is broken
yield (start, "E128 continuation line "
"under-indented for visual indent")
elif hanging_indent or (indent_next and rel_indent[row] == 8):
# hanging indent is verified
if close_bracket and not hang_closing:
yield (start, "E123 closing bracket does not match "
"indentation of opening bracket's line")
hangs[depth] = hang
elif visual_indent is True:
# visual indent is verified
indent[depth] = start[1]
elif visual_indent in (text, str):
# ignore token lined up with matching one from a previous line
pass
else:
# indent is broken
if hang <= 0:
error = "E122", "missing indentation or outdented"
elif indent[depth]:
error = "E127", "over-indented for visual indent"
elif not close_bracket and hangs[depth]:
error = "E131", "unaligned for hanging indent"
else:
hangs[depth] = hang
if hang > 4:
error = "E126", "over-indented for hanging indent"
else:
error = "E121", "under-indented for hanging indent"
yield start, "%s continuation line %s" % error
# look for visual indenting
if (parens[row] and token_type not in (tokenize.NL, tokenize.COMMENT)
and not indent[depth]):
indent[depth] = start[1]
indent_chances[start[1]] = True
if verbose >= 4:
print("bracket depth %s indent to %s" % (depth, start[1]))
# deal with implicit string concatenation
elif (token_type in (tokenize.STRING, tokenize.COMMENT) or
text in ('u', 'ur', 'b', 'br')):
indent_chances[start[1]] = str
# special case for the "if" statement because len("if (") == 4
elif not indent_chances and not row and not depth and text == 'if':
indent_chances[end[1] + 1] = True
elif text == ':' and line[end[1]:].isspace():
open_rows[depth].append(row)
# keep track of bracket depth
if token_type == tokenize.OP:
if text in '([{':
depth += 1
indent.append(0)
hangs.append(None)
if len(open_rows) == depth:
open_rows.append([])
open_rows[depth].append(row)
parens[row] += 1
if verbose >= 4:
print("bracket depth %s seen, col %s, visual min = %s" %
(depth, start[1], indent[depth]))
elif text in ')]}' and depth > 0:
# parent indents should not be more than this one
prev_indent = indent.pop() or last_indent[1]
hangs.pop()
for d in range(depth):
if indent[d] > prev_indent:
indent[d] = 0
for ind in list(indent_chances):
if ind >= prev_indent:
del indent_chances[ind]
del open_rows[depth + 1:]
depth -= 1
if depth:
indent_chances[indent[depth]] = True
for idx in range(row, -1, -1):
if parens[idx]:
parens[idx] -= 1
break
assert len(indent) == depth + 1
if start[1] not in indent_chances:
# allow to line up tokens
indent_chances[start[1]] = text
last_token_multiline = (start[0] != end[0])
if indent_next and expand_indent(line) == indent_level + 4:
if visual_indent:
code = "E129 visually indented line"
else:
code = "E125 continuation line"
yield (last_indent, "%s with same indent as next logical line" % code)
def whitespace_before_parameters(logical_line, tokens):
"""
Avoid extraneous whitespace in the following situations:
- Immediately before the open parenthesis that starts the argument
list of a function call.
- Immediately before the open parenthesis that starts an indexing or
slicing.
Okay: spam(1)
E211: spam (1)
Okay: dict['key'] = list[index]
E211: dict ['key'] = list[index]
E211: dict['key'] = list [index]
"""
prev_type, prev_text, __, prev_end, __ = tokens[0]
for index in range(1, len(tokens)):
token_type, text, start, end, __ = tokens[index]
if (token_type == tokenize.OP and
text in '([' and
start != prev_end and
(prev_type == tokenize.NAME or prev_text in '}])') and
# Syntax "class A (B):" is allowed, but avoid it
(index < 2 or tokens[index - 2][1] != 'class') and
# Allow "return (a.foo for a in range(5))"
not keyword.iskeyword(prev_text)):
yield prev_end, "E211 whitespace before '%s'" % text
prev_type = token_type
prev_text = text
prev_end = end
def whitespace_around_operator(logical_line):
r"""
Avoid extraneous whitespace in the following situations:
- More than one space around an assignment (or other) operator to
align it with another.
Okay: a = 12 + 3
E221: a = 4 + 5
E222: a = 4 + 5
E223: a = 4\t+ 5
E224: a = 4 +\t5
"""
for match in OPERATOR_REGEX.finditer(logical_line):
before, after = match.groups()
if '\t' in before:
yield match.start(1), "E223 tab before operator"
elif len(before) > 1:
yield match.start(1), "E221 multiple spaces before operator"
if '\t' in after:
yield match.start(2), "E224 tab after operator"
elif len(after) > 1:
yield match.start(2), "E222 multiple spaces after operator"
def missing_whitespace_around_operator(logical_line, tokens):
r"""
- Always surround these binary operators with a single space on
either side: assignment (=), augmented assignment (+=, -= etc.),
comparisons (==, <, >, !=, <>, <=, >=, in, not in, is, is not),
Booleans (and, or, not).
- Use spaces around arithmetic operators.
Okay: i = i + 1
Okay: submitted += 1
Okay: x = x * 2 - 1
Okay: hypot2 = x * x + y * y
Okay: c = (a + b) * (a - b)
Okay: foo(bar, key='word', *args, **kwargs)
Okay: alpha[:-i]
E225: i=i+1
E225: submitted +=1
E225: x = x /2 - 1
E225: z = x **y
E226: c = (a+b) * (a-b)
E226: hypot2 = x*x + y*y
E227: c = a|b
E228: msg = fmt%(errno, errmsg)
"""
parens = 0
need_space = False
prev_type = tokenize.OP
prev_text = prev_end = None
for token_type, text, start, end, line in tokens:
if token_type in (tokenize.NL, tokenize.NEWLINE, tokenize.ERRORTOKEN):
# ERRORTOKEN is triggered by backticks in Python 3
continue
if text in ('(', 'lambda'):
parens += 1
elif text == ')':
parens -= 1
if need_space:
if start != prev_end:
# Found a (probably) needed space
if need_space is not True and not need_space[1]:
yield (need_space[0],
"E225 missing whitespace around operator")
need_space = False
elif text == '>' and prev_text in ('<', '-'):
# Tolerate the "<>" operator, even if running Python 3
# Deal with Python 3's annotated return value "->"
pass
else:
if need_space is True or need_space[1]:
# A needed trailing space was not found
yield prev_end, "E225 missing whitespace around operator"
else:
code, optype = 'E226', 'arithmetic'
if prev_text == '%':
code, optype = 'E228', 'modulo'
elif prev_text not in ARITHMETIC_OP:
code, optype = 'E227', 'bitwise or shift'
yield (need_space[0], "%s missing whitespace "
"around %s operator" % (code, optype))
need_space = False
elif token_type == tokenize.OP and prev_end is not None:
if text == '=' and parens:
# Allow keyword args or defaults: foo(bar=None).
pass
elif text in WS_NEEDED_OPERATORS:
need_space = True
elif text in UNARY_OPERATORS:
# Check if the operator is being used as a binary operator
# Allow unary operators: -123, -x, +1.
# Allow argument unpacking: foo(*args, **kwargs).
if prev_type == tokenize.OP:
binary_usage = (prev_text in '}])')
elif prev_type == tokenize.NAME:
binary_usage = (prev_text not in KEYWORDS)
else:
binary_usage = (prev_type not in SKIP_TOKENS)
if binary_usage:
need_space = None
elif text in WS_OPTIONAL_OPERATORS:
need_space = None
if need_space is None:
# Surrounding space is optional, but ensure that
# trailing space matches opening space
need_space = (prev_end, start != prev_end)
elif need_space and start == prev_end:
# A needed opening space was not found
yield prev_end, "E225 missing whitespace around operator"
need_space = False
prev_type = token_type
prev_text = text
prev_end = end
def whitespace_around_comma(logical_line):
r"""
Avoid extraneous whitespace in the following situations:
- More than one space around an assignment (or other) operator to
align it with another.
Note: these checks are disabled by default
Okay: a = (1, 2)
E241: a = (1, 2)
E242: a = (1,\t2)
"""
line = logical_line
for m in WHITESPACE_AFTER_COMMA_REGEX.finditer(line):
found = m.start() + 1
if '\t' in m.group():
yield found, "E242 tab after '%s'" % m.group()[0]
else:
yield found, "E241 multiple spaces after '%s'" % m.group()[0]
def whitespace_around_named_parameter_equals(logical_line, tokens):
"""
Don't use spaces around the '=' sign when used to indicate a
keyword argument or a default parameter value.
Okay: def complex(real, imag=0.0):
Okay: return magic(r=real, i=imag)
Okay: boolean(a == b)
Okay: boolean(a != b)
Okay: boolean(a <= b)
Okay: boolean(a >= b)
E251: def complex(real, imag = 0.0):
E251: return magic(r = real, i = imag)
"""
parens = 0
no_space = False
prev_end = None
message = "E251 unexpected spaces around keyword / parameter equals"
for token_type, text, start, end, line in tokens:
if no_space:
no_space = False
if start != prev_end:
yield (prev_end, message)
elif token_type == tokenize.OP:
if text == '(':
parens += 1
elif text == ')':
parens -= 1
elif parens and text == '=':
no_space = True
if start != prev_end:
yield (prev_end, message)
prev_end = end
def whitespace_before_comment(logical_line, tokens):
"""
Separate inline comments by at least two spaces.
An inline comment is a comment on the same line as a statement. Inline
comments should be separated by at least two spaces from the statement.
They should start with a # and a single space.
Each line of a block comment starts with a # and a single space
(unless it is indented text inside the comment).
Okay: x = x + 1 # Increment x
Okay: x = x + 1 # Increment x
Okay: # Block comment
E261: x = x + 1 # Increment x
E262: x = x + 1 #Increment x
E262: x = x + 1 # Increment x
E265: #Block comment
"""
prev_end = (0, 0)
for token_type, text, start, end, line in tokens:
if token_type == tokenize.COMMENT:
inline_comment = line[:start[1]].strip()
if inline_comment:
if prev_end[0] == start[0] and start[1] < prev_end[1] + 2:
yield (prev_end,
"E261 at least two spaces before inline comment")
symbol, sp, comment = text.partition(' ')
bad_prefix = symbol not in ('#', '#:')
if inline_comment:
if bad_prefix or comment[:1].isspace():
yield start, "E262 inline comment should start with '# '"
elif bad_prefix:
if text.rstrip('#') and (start[0] > 1 or symbol[1] != '!'):
yield start, "E265 block comment should start with '# '"
elif token_type != tokenize.NL:
prev_end = end
def imports_on_separate_lines(logical_line):
r"""
Imports should usually be on separate lines.
Okay: import os\nimport sys
E401: import sys, os
Okay: from subprocess import Popen, PIPE
Okay: from myclas import MyClass
Okay: from foo.bar.yourclass import YourClass
Okay: import myclass
Okay: import foo.bar.yourclass
"""
line = logical_line
if line.startswith('import '):
found = line.find(',')
if -1 < found and ';' not in line[:found]:
yield found, "E401 multiple imports on one line"
def compound_statements(logical_line):
r"""
Compound statements (multiple statements on the same line) are
generally discouraged.
While sometimes it's okay to put an if/for/while with a small body
on the same line, never do this for multi-clause statements. Also
avoid folding such long lines!
Okay: if foo == 'blah':\n do_blah_thing()
Okay: do_one()
Okay: do_two()
Okay: do_three()
E701: if foo == 'blah': do_blah_thing()
E701: for x in lst: total += x
E701: while t < 10: t = delay()
E701: if foo == 'blah': do_blah_thing()
E701: else: do_non_blah_thing()
E701: try: something()
E701: finally: cleanup()
E701: if foo == 'blah': one(); two(); three()
E702: do_one(); do_two(); do_three()
E703: do_four(); # useless semicolon
"""
line = logical_line
last_char = len(line) - 1
found = line.find(':')
while -1 < found < last_char:
before = line[:found]
if (before.count('{') <= before.count('}') and # {'a': 1} (dict)
before.count('[') <= before.count(']') and # [1:2] (slice)
before.count('(') <= before.count(')') and # (Python 3 annotation)
not LAMBDA_REGEX.search(before)): # lambda x: x
yield found, "E701 multiple statements on one line (colon)"
found = line.find(':', found + 1)
found = line.find(';')
while -1 < found:
if found < last_char:
yield found, "E702 multiple statements on one line (semicolon)"
else:
yield found, "E703 statement ends with a semicolon"
found = line.find(';', found + 1)
def explicit_line_join(logical_line, tokens):
r"""
Avoid explicit line join between brackets.
The preferred way of wrapping long lines is by using Python's implied line
continuation inside parentheses, brackets and braces. Long lines can be
broken over multiple lines by wrapping expressions in parentheses. These
should be used in preference to using a backslash for line continuation.
E502: aaa = [123, \\n 123]
E502: aaa = ("bbb " \\n "ccc")
Okay: aaa = [123,\n 123]
Okay: aaa = ("bbb "\n "ccc")
Okay: aaa = "bbb " \\n "ccc"
"""
prev_start = prev_end = parens = 0
for token_type, text, start, end, line in tokens:
if start[0] != prev_start and parens and backslash:
yield backslash, "E502 the backslash is redundant between brackets"
if end[0] != prev_end:
if line.rstrip('\r\n').endswith('\\'):
backslash = (end[0], len(line.splitlines()[-1]) - 1)
else:
backslash = None
prev_start = prev_end = end[0]
else:
prev_start = start[0]
if token_type == tokenize.OP:
if text in '([{':
parens += 1
elif text in ')]}':
parens -= 1
def comparison_to_singleton(logical_line, noqa):
"""
Comparisons to singletons like None should always be done
with "is" or "is not", never the equality operators.
Okay: if arg is not None:
E711: if arg != None:
E712: if arg == True:
Also, beware of writing if x when you really mean if x is not None --
e.g. when testing whether a variable or argument that defaults to None was
set to some other value. The other value might have a type (such as a
container) that could be false in a boolean context!
"""
match = not noqa and COMPARE_SINGLETON_REGEX.search(logical_line)
if match:
same = (match.group(1) == '==')
singleton = match.group(2)
msg = "'if cond is %s:'" % (('' if same else 'not ') + singleton)
if singleton in ('None',):
code = 'E711'
else:
code = 'E712'
nonzero = ((singleton == 'True' and same) or
(singleton == 'False' and not same))
msg += " or 'if %scond:'" % ('' if nonzero else 'not ')
yield match.start(1), ("%s comparison to %s should be %s" %
(code, singleton, msg))
def comparison_negative(logical_line):
r"""
Negative comparison, either identity or membership, should be
done using "not in" and "is not".
Okay: if x not in y:\n pass
Okay: assert (X in Y or X is Z)
Okay: if not (X in Y):\n pass
Okay: zz = x is not y
E713: Z = not X in Y
E713: if not X.B in Y:\n pass
E714: if not X is Y:\n pass
E714: Z = not X.B is Y
"""
match = COMPARE_NEGATIVE_REGEX.search(logical_line)
if match:
pos = match.start(1)
if match.group(2) == 'in':
yield pos, "E713 test for membership should be 'not in'"
else:
yield pos, "E714 test for object identity should be 'is not'"
def comparison_type(logical_line):
"""
Object type comparisons should always use isinstance() instead of
comparing types directly.
Okay: if isinstance(obj, int):
E721: if type(obj) is type(1):
When checking if an object is a string, keep in mind that it might be a
unicode string too! In Python 2.3, str and unicode have a common base
class, basestring, so you can do:
Okay: if isinstance(obj, basestring):
Okay: if type(a1) is type(b1):
"""
match = COMPARE_TYPE_REGEX.search(logical_line)
if match:
inst = match.group(1)
if inst and isidentifier(inst) and inst not in SINGLETONS:
return # Allow comparison for types which are not obvious
yield match.start(), "E721 do not compare types, use 'isinstance()'"
def python_3000_has_key(logical_line, noqa):
r"""
The {}.has_key() method is removed in the Python 3.
Use the 'in' operation instead.
Okay: if "alph" in d:\n print d["alph"]
W601: assert d.has_key('alph')
"""
pos = logical_line.find('.has_key(')
if pos > -1 and not noqa:
yield pos, "W601 .has_key() is deprecated, use 'in'"
def python_3000_raise_comma(logical_line):
"""
When raising an exception, use "raise ValueError('message')"
instead of the older form "raise ValueError, 'message'".
The paren-using form is preferred because when the exception arguments
are long or include string formatting, you don't need to use line
continuation characters thanks to the containing parentheses. The older
form is removed in Python 3.
Okay: raise DummyError("Message")
W602: raise DummyError, "Message"
"""
match = RAISE_COMMA_REGEX.match(logical_line)
if match and not RERAISE_COMMA_REGEX.match(logical_line):
yield match.end() - 1, "W602 deprecated form of raising exception"
def python_3000_not_equal(logical_line):
"""
!= can also be written <>, but this is an obsolete usage kept for
backwards compatibility only. New code should always use !=.
The older syntax is removed in Python 3.
Okay: if a != 'no':
W603: if a <> 'no':
"""
pos = logical_line.find('<>')
if pos > -1:
yield pos, "W603 '<>' is deprecated, use '!='"
def python_3000_backticks(logical_line):
"""
Backticks are removed in Python 3.
Use repr() instead.
Okay: val = repr(1 + 2)
W604: val = `1 + 2`
"""
pos = logical_line.find('`')
if pos > -1:
yield pos, "W604 backticks are deprecated, use 'repr()'"
##############################################################################
# Helper functions
##############################################################################
if '' == ''.encode():
# Python 2: implicit encoding.
def readlines(filename):
f = open(filename)
try:
return f.readlines()
finally:
f.close()
isidentifier = re.compile(r'[a-zA-Z_]\w*').match
stdin_get_value = sys.stdin.read
else:
# Python 3
def readlines(filename):
f = open(filename, 'rb')
try:
coding, lines = tokenize.detect_encoding(f.readline)
f = TextIOWrapper(f, coding, line_buffering=True)
return [l.decode(coding) for l in lines] + f.readlines()
except (LookupError, SyntaxError, UnicodeError):
f.close()
# Fall back if files are improperly declared
f = open(filename, encoding='latin-1')
return f.readlines()
finally:
f.close()
isidentifier = str.isidentifier
def stdin_get_value():
return TextIOWrapper(sys.stdin.buffer, errors='ignore').read()
readlines.__doc__ = " Read the source code."
noqa = re.compile(r'# no(?:qa|pep8)\b', re.I).search
def expand_indent(line):
r"""
Return the amount of indentation.
Tabs are expanded to the next multiple of 8.
>>> expand_indent(' ')
4
>>> expand_indent('\t')
8
>>> expand_indent(' \t')
8
>>> expand_indent(' \t')
8
>>> expand_indent(' \t')
16
"""
if '\t' not in line:
return len(line) - len(line.lstrip())
result = 0
for char in line:
if char == '\t':
result = result // 8 * 8 + 8
elif char == ' ':
result += 1
else:
break
return result
def mute_string(text):
"""
Replace contents with 'xxx' to prevent syntax matching.
>>> mute_string('"abc"')
'"xxx"'
>>> mute_string("'''abc'''")
"'''xxx'''"
>>> mute_string("r'abc'")
"r'xxx'"
"""
# String modifiers (e.g. u or r)
start = text.index(text[-1]) + 1
end = len(text) - 1
# Triple quotes
if text[-3:] in ('"""', "'''"):
start += 2
end -= 2
return text[:start] + 'x' * (end - start) + text[end:]
def parse_udiff(diff, patterns=None, parent='.'):
"""Return a dictionary of matching lines."""
# For each file of the diff, the entry key is the filename,
# and the value is a set of row numbers to consider.
rv = {}
path = nrows = None
for line in diff.splitlines():
if nrows:
if line[:1] != '-':
nrows -= 1
continue
if line[:3] == '@@ ':
hunk_match = HUNK_REGEX.match(line)
row, nrows = [int(g or '1') for g in hunk_match.groups()]
rv[path].update(range(row, row + nrows))
elif line[:3] == '+++':
path = line[4:].split('\t', 1)[0]
if path[:2] == 'b/':
path = path[2:]
rv[path] = set()
return dict([(os.path.join(parent, path), rows)
for (path, rows) in rv.items()
if rows and filename_match(path, patterns)])
def normalize_paths(value, parent=os.curdir):
"""Parse a comma-separated list of paths.
Return a list of absolute paths.
"""
if not value or isinstance(value, list):
return value
paths = []
for path in value.split(','):
if '/' in path:
path = os.path.abspath(os.path.join(parent, path))
paths.append(path.rstrip('/'))
return paths
def filename_match(filename, patterns, default=True):
"""
Check if patterns contains a pattern that matches filename.
If patterns is unspecified, this always returns True.
"""
if not patterns:
return default
return any(fnmatch(filename, pattern) for pattern in patterns)
##############################################################################
# Framework to run all checks
##############################################################################
_checks = {'physical_line': {}, 'logical_line': {}, 'tree': {}}
def register_check(check, codes=None):
"""
Register a new check object.
"""
def _add_check(check, kind, codes, args):
if check in _checks[kind]:
_checks[kind][check][0].extend(codes or [])
else:
_checks[kind][check] = (codes or [''], args)
if inspect.isfunction(check):
args = inspect.getargspec(check)[0]
if args and args[0] in ('physical_line', 'logical_line'):
if codes is None:
codes = ERRORCODE_REGEX.findall(check.__doc__ or '')
_add_check(check, args[0], codes, args)
elif inspect.isclass(check):
if inspect.getargspec(check.__init__)[0][:2] == ['self', 'tree']:
_add_check(check, 'tree', codes, None)
def init_checks_registry():
"""
Register all globally visible functions where the first argument name
is 'physical_line' or 'logical_line'.
"""
mod = inspect.getmodule(register_check)
for (name, function) in inspect.getmembers(mod, inspect.isfunction):
register_check(function)
init_checks_registry()
class Checker(object):
"""
Load a Python source file, tokenize it, check coding style.
"""
def __init__(self, filename=None, lines=None,
options=None, report=None, **kwargs):
if options is None:
options = StyleGuide(kwargs).options
else:
assert not kwargs
self._io_error = None
self._physical_checks = options.physical_checks
self._logical_checks = options.logical_checks
self._ast_checks = options.ast_checks
self.max_line_length = options.max_line_length
self.multiline = False # in a multiline string?
self.hang_closing = options.hang_closing
self.verbose = options.verbose
self.filename = filename
if filename is None:
self.filename = 'stdin'
self.lines = lines or []
elif filename == '-':
self.filename = 'stdin'
self.lines = stdin_get_value().splitlines(True)
elif lines is None:
try:
self.lines = readlines(filename)
except IOError:
(exc_type, exc) = sys.exc_info()[:2]
self._io_error = '%s: %s' % (exc_type.__name__, exc)
self.lines = []
else:
self.lines = lines
if self.lines:
ord0 = ord(self.lines[0][0])
if ord0 in (0xef, 0xfeff): # Strip the UTF-8 BOM
if ord0 == 0xfeff:
self.lines[0] = self.lines[0][1:]
elif self.lines[0][:3] == '\xef\xbb\xbf':
self.lines[0] = self.lines[0][3:]
self.report = report or options.report
self.report_error = self.report.error
def report_invalid_syntax(self):
(exc_type, exc) = sys.exc_info()[:2]
if len(exc.args) > 1:
offset = exc.args[1]
if len(offset) > 2:
offset = offset[1:3]
else:
offset = (1, 0)
self.report_error(offset[0], offset[1] or 0,
'E901 %s: %s' % (exc_type.__name__, exc.args[0]),
self.report_invalid_syntax)
report_invalid_syntax.__doc__ = " Check if the syntax is valid."
def readline(self):
"""
Get the next line from the input buffer.
"""
self.line_number += 1
if self.line_number > len(self.lines):
return ''
line = self.lines[self.line_number - 1]
if self.indent_char is None and line[:1] in WHITESPACE:
self.indent_char = line[0]
return line
def run_check(self, check, argument_names):
"""
Run a check plugin.
"""
arguments = []
for name in argument_names:
arguments.append(getattr(self, name))
return check(*arguments)
def check_physical(self, line):
"""
Run all physical checks on a raw input line.
"""
self.physical_line = line
for name, check, argument_names in self._physical_checks:
result = self.run_check(check, argument_names)
if result is not None:
(offset, text) = result
self.report_error(self.line_number, offset, text, check)
if text[:4] == 'E101':
self.indent_char = line[0]
def build_tokens_line(self):
"""
Build a logical line from tokens.
"""
self.mapping = []
logical = []
comments = []
length = 0
previous = None
for token in self.tokens:
(token_type, text) = token[0:2]
if token_type == tokenize.COMMENT:
comments.append(text)
continue
if token_type in SKIP_TOKENS:
continue
if token_type == tokenize.STRING:
text = mute_string(text)
if previous:
(end_row, end) = previous[3]
(start_row, start) = token[2]
if end_row != start_row: # different row
prev_text = self.lines[end_row - 1][end - 1]
if prev_text == ',' or (prev_text not in '{[('
and text not in '}])'):
logical.append(' ')
length += 1
elif end != start: # different column
fill = self.lines[end_row - 1][end:start]
logical.append(fill)
length += len(fill)
self.mapping.append((length, token))
logical.append(text)
length += len(text)
previous = token
self.logical_line = ''.join(logical)
self.noqa = comments and noqa(''.join(comments))
# With Python 2, if the line ends with '\r\r\n' the assertion fails
# assert self.logical_line.strip() == self.logical_line
def check_logical(self):
"""
Build a line from tokens and run all logical checks on it.
"""
self.build_tokens_line()
self.report.increment_logical_line()
token0 = self.mapping[0][1] if self.mapping else self.tokens[0]
first_line = self.lines[token0[2][0] - 1]
indent = first_line[:token0[2][1]]
self.indent_level = expand_indent(indent)
if self.verbose >= 2:
print(self.logical_line[:80].rstrip())
for name, check, argument_names in self._logical_checks:
if self.verbose >= 4:
print(' ' + name)
for result in self.run_check(check, argument_names) or ():
(offset, text) = result
if isinstance(offset, tuple):
(orig_number, orig_offset) = offset
else:
orig_number = token0[2][0]
orig_offset = token0[2][1] + offset
for token_offset, token in self.mapping:
if offset >= token_offset:
orig_number = token[2][0]
orig_offset = (token[2][1] + offset - token_offset)
self.report_error(orig_number, orig_offset, text, check)
if self.logical_line:
self.previous_indent_level = self.indent_level
self.previous_logical = self.logical_line
self.tokens = []
def check_ast(self):
try:
tree = compile(''.join(self.lines), '', 'exec', PyCF_ONLY_AST)
except (SyntaxError, TypeError):
return self.report_invalid_syntax()
for name, cls, __ in self._ast_checks:
checker = cls(tree, self.filename)
for lineno, offset, text, check in checker.run():
if not self.lines or not noqa(self.lines[lineno - 1]):
self.report_error(lineno, offset, text, check)
def generate_tokens(self):
if self._io_error:
self.report_error(1, 0, 'E902 %s' % self._io_error, readlines)
tokengen = tokenize.generate_tokens(self.readline)
try:
for token in tokengen:
self.maybe_check_physical(token)
yield token
except (SyntaxError, tokenize.TokenError):
self.report_invalid_syntax()
def maybe_check_physical(self, token):
"""
If appropriate (based on token), check current physical line(s).
"""
# Called after every token, but act only on end of line.
if token[0] in (tokenize.NEWLINE, tokenize.NL):
# Obviously, a newline token ends a single physical line.
self.check_physical(token[4])
elif token[0] == tokenize.STRING and '\n' in token[1]:
# Less obviously, a string that contains newlines is a
# multiline string, either triple-quoted or with internal
# newlines backslash-escaped. Check every physical line in the
# string *except* for the last one: its newline is outside of
# the multiline string, so we consider it a regular physical
# line, and will check it like any other physical line.
#
# Subtleties:
# - we don't *completely* ignore the last line; if it contains
# the magical "# noqa" comment, we disable all physical
# checks for the entire multiline string
# - have to wind self.line_number back because initially it
# points to the last line of the string, and we want
# check_physical() to give accurate feedback
if noqa(token[4]):
return
self.multiline = True
self.line_number = token[2][0]
for line in token[1].split('\n')[:-1]:
self.check_physical(line + '\n')
self.line_number += 1
self.multiline = False
def check_all(self, expected=None, line_offset=0):
"""
Run all checks on the input file.
"""
self.report.init_file(self.filename, self.lines, expected, line_offset)
if self._ast_checks:
self.check_ast()
self.line_number = 0
self.indent_char = None
self.indent_level = 0
self.previous_indent_level = 0
self.previous_logical = ''
self.tokens = []
self.blank_lines = blank_lines_before_comment = 0
parens = 0
for token in self.generate_tokens():
self.tokens.append(token)
token_type, text = token[0:2]
if self.verbose >= 3:
if token[2][0] == token[3][0]:
pos = '[%s:%s]' % (token[2][1] or '', token[3][1])
else:
pos = 'l.%s' % token[3][0]
print('l.%s\t%s\t%s\t%r' %
(token[2][0], pos, tokenize.tok_name[token[0]], text))
if token_type == tokenize.OP:
if text in '([{':
parens += 1
elif text in '}])':
parens -= 1
elif not parens:
if token_type == tokenize.NEWLINE:
if self.blank_lines < blank_lines_before_comment:
self.blank_lines = blank_lines_before_comment
self.check_logical()
self.blank_lines = blank_lines_before_comment = 0
elif token_type == tokenize.NL:
if len(self.tokens) == 1:
# The physical line contains only this token.
self.blank_lines += 1
del self.tokens[0]
else:
self.check_logical()
elif token_type == tokenize.COMMENT and len(self.tokens) == 1:
if blank_lines_before_comment < self.blank_lines:
blank_lines_before_comment = self.blank_lines
self.blank_lines = 0
if COMMENT_WITH_NL:
# The comment also ends a physical line
text = text.rstrip('\r\n')
self.tokens = [(token_type, text) + token[2:]]
self.check_logical()
return self.report.get_file_results()
class BaseReport(object):
"""Collect the results of the checks."""
print_filename = False
def __init__(self, options):
self._benchmark_keys = options.benchmark_keys
self._ignore_code = options.ignore_code
# Results
self.elapsed = 0
self.total_errors = 0
self.counters = dict.fromkeys(self._benchmark_keys, 0)
self.messages = {}
def start(self):
"""Start the timer."""
self._start_time = time.time()
def stop(self):
"""Stop the timer."""
self.elapsed = time.time() - self._start_time
def init_file(self, filename, lines, expected, line_offset):
"""Signal a new file."""
self.filename = filename
self.lines = lines
self.expected = expected or ()
self.line_offset = line_offset
self.file_errors = 0
self.counters['files'] += 1
self.counters['physical lines'] += len(lines)
def increment_logical_line(self):
"""Signal a new logical line."""
self.counters['logical lines'] += 1
def error(self, line_number, offset, text, check):
"""Report an error, according to options."""
code = text[:4]
if self._ignore_code(code):
return
if code in self.counters:
self.counters[code] += 1
else:
self.counters[code] = 1
self.messages[code] = text[5:]
# Don't care about expected errors or warnings
if code in self.expected:
return
if self.print_filename and not self.file_errors:
print(self.filename)
self.file_errors += 1
self.total_errors += 1
return code
def get_file_results(self):
"""Return the count of errors and warnings for this file."""
return self.file_errors
def get_count(self, prefix=''):
"""Return the total count of errors and warnings."""
return sum([self.counters[key]
for key in self.messages if key.startswith(prefix)])
def get_statistics(self, prefix=''):
"""
Get statistics for message codes that start with the prefix.
prefix='' matches all errors and warnings
prefix='E' matches all errors
prefix='W' matches all warnings
prefix='E4' matches all errors that have to do with imports
"""
return ['%-7s %s %s' % (self.counters[key], key, self.messages[key])
for key in sorted(self.messages) if key.startswith(prefix)]
def print_statistics(self, prefix=''):
"""Print overall statistics (number of errors and warnings)."""
for line in self.get_statistics(prefix):
print(line)
def print_benchmark(self):
"""Print benchmark numbers."""
print('%-7.2f %s' % (self.elapsed, 'seconds elapsed'))
if self.elapsed:
for key in self._benchmark_keys:
print('%-7d %s per second (%d total)' %
(self.counters[key] / self.elapsed, key,
self.counters[key]))
class FileReport(BaseReport):
"""Collect the results of the checks and print only the filenames."""
print_filename = True
class StandardReport(BaseReport):
"""Collect and print the results of the checks."""
def __init__(self, options):
super(StandardReport, self).__init__(options)
self._fmt = REPORT_FORMAT.get(options.format.lower(),
options.format)
self._repeat = options.repeat
self._show_source = options.show_source
self._show_pep8 = options.show_pep8
def init_file(self, filename, lines, expected, line_offset):
"""Signal a new file."""
self._deferred_print = []
return super(StandardReport, self).init_file(
filename, lines, expected, line_offset)
def error(self, line_number, offset, text, check):
"""Report an error, according to options."""
code = super(StandardReport, self).error(line_number, offset,
text, check)
if code and (self.counters[code] == 1 or self._repeat):
self._deferred_print.append(
(line_number, offset, code, text[5:], check.__doc__))
return code
def get_file_results(self):
"""Print the result and return the overall count for this file."""
self._deferred_print.sort()
for line_number, offset, code, text, doc in self._deferred_print:
print(self._fmt % {
'path': self.filename,
'row': self.line_offset + line_number, 'col': offset + 1,
'code': code, 'text': text,
})
if self._show_source:
if line_number > len(self.lines):
line = ''
else:
line = self.lines[line_number - 1]
print(line.rstrip())
print(' ' * offset + '^')
if self._show_pep8 and doc:
print(doc.lstrip('\n').rstrip())
return self.file_errors
class DiffReport(StandardReport):
"""Collect and print the results for the changed lines only."""
def __init__(self, options):
super(DiffReport, self).__init__(options)
self._selected = options.selected_lines
def error(self, line_number, offset, text, check):
if line_number not in self._selected[self.filename]:
return
return super(DiffReport, self).error(line_number, offset, text, check)
class StyleGuide(object):
"""Initialize a PEP-8 instance with few options."""
def __init__(self, *args, **kwargs):
# build options from the command line
self.checker_class = kwargs.pop('checker_class', Checker)
parse_argv = kwargs.pop('parse_argv', False)
config_file = kwargs.pop('config_file', None)
parser = kwargs.pop('parser', None)
# build options from dict
options_dict = dict(*args, **kwargs)
arglist = None if parse_argv else options_dict.get('paths', None)
options, self.paths = process_options(
arglist, parse_argv, config_file, parser)
if options_dict:
options.__dict__.update(options_dict)
if 'paths' in options_dict:
self.paths = options_dict['paths']
self.runner = self.input_file
self.options = options
if not options.reporter:
options.reporter = BaseReport if options.quiet else StandardReport
options.select = tuple(options.select or ())
if not (options.select or options.ignore or
options.testsuite or options.doctest) and DEFAULT_IGNORE:
# The default choice: ignore controversial checks
options.ignore = tuple(DEFAULT_IGNORE.split(','))
else:
# Ignore all checks which are not explicitly selected
options.ignore = ('',) if options.select else tuple(options.ignore)
options.benchmark_keys = BENCHMARK_KEYS[:]
options.ignore_code = self.ignore_code
options.physical_checks = self.get_checks('physical_line')
options.logical_checks = self.get_checks('logical_line')
options.ast_checks = self.get_checks('tree')
self.init_report()
def init_report(self, reporter=None):
"""Initialize the report instance."""
self.options.report = (reporter or self.options.reporter)(self.options)
return self.options.report
def check_files(self, paths=None):
"""Run all checks on the paths."""
if paths is None:
paths = self.paths
report = self.options.report
runner = self.runner
report.start()
try:
for path in paths:
if os.path.isdir(path):
self.input_dir(path)
elif not self.excluded(path):
runner(path)
except KeyboardInterrupt:
print('... stopped')
report.stop()
return report
def input_file(self, filename, lines=None, expected=None, line_offset=0):
"""Run all checks on a Python source file."""
if self.options.verbose:
print('checking %s' % filename)
fchecker = self.checker_class(
filename, lines=lines, options=self.options)
return fchecker.check_all(expected=expected, line_offset=line_offset)
def input_dir(self, dirname):
"""Check all files in this directory and all subdirectories."""
dirname = dirname.rstrip('/')
if self.excluded(dirname):
return 0
counters = self.options.report.counters
verbose = self.options.verbose
filepatterns = self.options.filename
runner = self.runner
for root, dirs, files in os.walk(dirname):
if verbose:
print('directory ' + root)
counters['directories'] += 1
for subdir in sorted(dirs):
if self.excluded(subdir, root):
dirs.remove(subdir)
for filename in sorted(files):
# contain a pattern that matches?
if ((filename_match(filename, filepatterns) and
not self.excluded(filename, root))):
runner(os.path.join(root, filename))
def excluded(self, filename, parent=None):
"""
Check if options.exclude contains a pattern that matches filename.
"""
if not self.options.exclude:
return False
basename = os.path.basename(filename)
if filename_match(basename, self.options.exclude):
return True
if parent:
filename = os.path.join(parent, filename)
filename = os.path.abspath(filename)
return filename_match(filename, self.options.exclude)
def ignore_code(self, code):
"""
Check if the error code should be ignored.
If 'options.select' contains a prefix of the error code,
return False. Else, if 'options.ignore' contains a prefix of
the error code, return True.
"""
if len(code) < 4 and any(s.startswith(code)
for s in self.options.select):
return False
return (code.startswith(self.options.ignore) and
not code.startswith(self.options.select))
def get_checks(self, argument_name):
"""
Find all globally visible functions where the first argument name
starts with argument_name and which contain selected tests.
"""
checks = []
for check, attrs in _checks[argument_name].items():
(codes, args) = attrs
if any(not (code and self.ignore_code(code)) for code in codes):
checks.append((check.__name__, check, args))
return sorted(checks)
def get_parser(prog='pep8', version=__version__):
parser = OptionParser(prog=prog, version=version,
usage="%prog [options] input ...")
parser.config_options = [
'exclude', 'filename', 'select', 'ignore', 'max-line-length',
'hang-closing', 'count', 'format', 'quiet', 'show-pep8',
'show-source', 'statistics', 'verbose']
parser.add_option('-v', '--verbose', default=0, action='count',
help="print status messages, or debug with -vv")
parser.add_option('-q', '--quiet', default=0, action='count',
help="report only file names, or nothing with -qq")
parser.add_option('-r', '--repeat', default=True, action='store_true',
help="(obsolete) show all occurrences of the same error")
parser.add_option('--first', action='store_false', dest='repeat',
help="show first occurrence of each error")
parser.add_option('--exclude', metavar='patterns', default=DEFAULT_EXCLUDE,
help="exclude files or directories which match these "
"comma separated patterns (default: %default)")
parser.add_option('--filename', metavar='patterns', default='*.py',
help="when parsing directories, only check filenames "
"matching these comma separated patterns "
"(default: %default)")
parser.add_option('--select', metavar='errors', default='',
help="select errors and warnings (e.g. E,W6)")
parser.add_option('--ignore', metavar='errors', default='',
help="skip errors and warnings (e.g. E4,W)")
parser.add_option('--show-source', action='store_true',
help="show source code for each error")
parser.add_option('--show-pep8', action='store_true',
help="show text of PEP 8 for each error "
"(implies --first)")
parser.add_option('--statistics', action='store_true',
help="count errors and warnings")
parser.add_option('--count', action='store_true',
help="print total number of errors and warnings "
"to standard error and set exit code to 1 if "
"total is not null")
parser.add_option('--max-line-length', type='int', metavar='n',
default=MAX_LINE_LENGTH,
help="set maximum allowed line length "
"(default: %default)")
parser.add_option('--hang-closing', action='store_true',
help="hang closing bracket instead of matching "
"indentation of opening bracket's line")
parser.add_option('--format', metavar='format', default='default',
help="set the error format [default|pylint|<custom>]")
parser.add_option('--diff', action='store_true',
help="report only lines changed according to the "
"unified diff received on STDIN")
group = parser.add_option_group("Testing Options")
if os.path.exists(TESTSUITE_PATH):
group.add_option('--testsuite', metavar='dir',
help="run regression tests from dir")
group.add_option('--doctest', action='store_true',
help="run doctest on myself")
group.add_option('--benchmark', action='store_true',
help="measure processing speed")
return parser
def read_config(options, args, arglist, parser):
"""Read both user configuration and local configuration."""
config = RawConfigParser()
user_conf = options.config
if user_conf and os.path.isfile(user_conf):
if options.verbose:
print('user configuration: %s' % user_conf)
config.read(user_conf)
local_dir = os.curdir
parent = tail = args and os.path.abspath(os.path.commonprefix(args))
while tail:
if config.read([os.path.join(parent, fn) for fn in PROJECT_CONFIG]):
local_dir = parent
if options.verbose:
print('local configuration: in %s' % parent)
break
(parent, tail) = os.path.split(parent)
pep8_section = parser.prog
if config.has_section(pep8_section):
option_list = dict([(o.dest, o.type or o.action)
for o in parser.option_list])
# First, read the default values
(new_options, __) = parser.parse_args([])
# Second, parse the configuration
for opt in config.options(pep8_section):
if options.verbose > 1:
print(" %s = %s" % (opt, config.get(pep8_section, opt)))
if opt.replace('_', '-') not in parser.config_options:
print("Unknown option: '%s'\n not in [%s]" %
(opt, ' '.join(parser.config_options)))
sys.exit(1)
normalized_opt = opt.replace('-', '_')
opt_type = option_list[normalized_opt]
if opt_type in ('int', 'count'):
value = config.getint(pep8_section, opt)
elif opt_type == 'string':
value = config.get(pep8_section, opt)
if normalized_opt == 'exclude':
value = normalize_paths(value, local_dir)
else:
assert opt_type in ('store_true', 'store_false')
value = config.getboolean(pep8_section, opt)
setattr(new_options, normalized_opt, value)
# Third, overwrite with the command-line options
(options, __) = parser.parse_args(arglist, values=new_options)
options.doctest = options.testsuite = False
return options
def process_options(arglist=None, parse_argv=False, config_file=None,
parser=None):
"""Process options passed either via arglist or via command line args."""
if not parser:
parser = get_parser()
if not parser.has_option('--config'):
if config_file is True:
config_file = DEFAULT_CONFIG
group = parser.add_option_group("Configuration", description=(
"The project options are read from the [%s] section of the "
"tox.ini file or the setup.cfg file located in any parent folder "
"of the path(s) being processed. Allowed options are: %s." %
(parser.prog, ', '.join(parser.config_options))))
group.add_option('--config', metavar='path', default=config_file,
help="user config file location (default: %default)")
# Don't read the command line if the module is used as a library.
if not arglist and not parse_argv:
arglist = []
# If parse_argv is True and arglist is None, arguments are
# parsed from the command line (sys.argv)
(options, args) = parser.parse_args(arglist)
options.reporter = None
if options.ensure_value('testsuite', False):
args.append(options.testsuite)
elif not options.ensure_value('doctest', False):
if parse_argv and not args:
if options.diff or any(os.path.exists(name)
for name in PROJECT_CONFIG):
args = ['.']
else:
parser.error('input not specified')
options = read_config(options, args, arglist, parser)
options.reporter = parse_argv and options.quiet == 1 and FileReport
options.filename = options.filename and options.filename.split(',')
options.exclude = normalize_paths(options.exclude)
options.select = options.select and options.select.split(',')
options.ignore = options.ignore and options.ignore.split(',')
if options.diff:
options.reporter = DiffReport
stdin = stdin_get_value()
options.selected_lines = parse_udiff(stdin, options.filename, args[0])
args = sorted(options.selected_lines)
return options, args
def _main():
"""Parse options and run checks on Python source."""
pep8style = StyleGuide(parse_argv=True, config_file=True)
options = pep8style.options
if options.doctest or options.testsuite:
#disabled test suite
#from testsuite.support import run_tests
#report = run_tests(pep8style)
pass
else:
report = pep8style.check_files()
if options.statistics:
report.print_statistics()
if options.benchmark:
report.print_benchmark()
if options.testsuite and not options.quiet:
#disabled test suite
#report.print_results()
pass
if report.total_errors:
if options.count:
sys.stderr.write(str(report.total_errors) + '\n')
sys.exit(1)
if __name__ == '__main__':
_main()
Fixed issues detected by pyflakes.
#!/usr/bin/env python
# pep8.py - Check Python source code formatting, according to PEP 8
# Copyright (C) 2006-2009 Johann C. Rocholl <johann@rocholl.net>
# Copyright (C) 2009-2013 Florent Xicluna <florent.xicluna@gmail.com>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
r"""
Check Python source code formatting, according to PEP 8:
http://www.python.org/dev/peps/pep-0008/
For usage and a list of options, try this:
$ python pep8.py -h
This program and its regression test suite live here:
http://github.com/jcrocholl/pep8
Groups of errors and warnings:
E errors
W warnings
100 indentation
200 whitespace
300 blank lines
400 imports
500 line length
600 deprecation
700 statements
900 syntax error
"""
__version__ = '1.5.1'
import os
import sys
import re
import time
import inspect
import keyword
import tokenize
from optparse import OptionParser
from fnmatch import fnmatch
try:
from configparser import RawConfigParser
from io import TextIOWrapper
except ImportError:
from ConfigParser import RawConfigParser
DEFAULT_EXCLUDE = '.svn,CVS,.bzr,.hg,.git,__pycache__'
DEFAULT_IGNORE = 'E123,E226,E24'
if sys.platform == 'win32':
DEFAULT_CONFIG = os.path.expanduser(r'~\.pep8')
else:
DEFAULT_CONFIG = os.path.join(os.getenv('XDG_CONFIG_HOME') or
os.path.expanduser('~/.config'), 'pep8')
PROJECT_CONFIG = ('setup.cfg', 'tox.ini', '.pep8')
TESTSUITE_PATH = os.path.join(os.path.dirname(__file__), 'testsuite')
MAX_LINE_LENGTH = 79
REPORT_FORMAT = {
'default': '%(path)s:%(row)d:%(col)d: %(code)s %(text)s',
'pylint': '%(path)s:%(row)d: [%(code)s] %(text)s',
}
PyCF_ONLY_AST = 1024
SINGLETONS = frozenset(['False', 'None', 'True'])
KEYWORDS = frozenset(keyword.kwlist + ['print']) - SINGLETONS
UNARY_OPERATORS = frozenset(['>>', '**', '*', '+', '-'])
ARITHMETIC_OP = frozenset(['**', '*', '/', '//', '+', '-'])
WS_OPTIONAL_OPERATORS = ARITHMETIC_OP.union(['^', '&', '|', '<<', '>>', '%'])
WS_NEEDED_OPERATORS = frozenset([
'**=', '*=', '/=', '//=', '+=', '-=', '!=', '<>', '<', '>',
'%=', '^=', '&=', '|=', '==', '<=', '>=', '<<=', '>>=', '='])
WHITESPACE = frozenset(' \t')
SKIP_TOKENS = frozenset([tokenize.COMMENT, tokenize.NL, tokenize.NEWLINE,
tokenize.INDENT, tokenize.DEDENT])
BENCHMARK_KEYS = ['directories', 'files', 'logical lines', 'physical lines']
INDENT_REGEX = re.compile(r'([ \t]*)')
RAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*,')
RERAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*,.*,\s*\w+\s*$')
ERRORCODE_REGEX = re.compile(r'\b[A-Z]\d{3}\b')
DOCSTRING_REGEX = re.compile(r'u?r?["\']')
EXTRANEOUS_WHITESPACE_REGEX = re.compile(r'[[({] | []}),;:]')
WHITESPACE_AFTER_COMMA_REGEX = re.compile(r'[,;:]\s*(?: |\t)')
COMPARE_SINGLETON_REGEX = re.compile(r'([=!]=)\s*(None|False|True)')
COMPARE_NEGATIVE_REGEX = re.compile(r'\b(not)\s+[^[({ ]+\s+(in|is)\s')
COMPARE_TYPE_REGEX = re.compile(r'(?:[=!]=|is(?:\s+not)?)\s*type(?:s.\w+Type'
r'|\s*\(\s*([^)]*[^ )])\s*\))')
KEYWORD_REGEX = re.compile(r'(\s*)\b(?:%s)\b(\s*)' % r'|'.join(KEYWORDS))
OPERATOR_REGEX = re.compile(r'(?:[^,\s])(\s*)(?:[-+*/|!<=>%&^]+)(\s*)')
LAMBDA_REGEX = re.compile(r'\blambda\b')
HUNK_REGEX = re.compile(r'^@@ -\d+(?:,\d+)? \+(\d+)(?:,(\d+))? @@.*$')
# Work around Python < 2.6 behaviour, which does not generate NL after
# a comment which is on a line by itself.
COMMENT_WITH_NL = tokenize.generate_tokens(['#\n'].pop).send(None)[1] == '#\n'
##############################################################################
# Plugins (check functions) for physical lines
##############################################################################
def tabs_or_spaces(physical_line, indent_char):
r"""
Never mix tabs and spaces.
The most popular way of indenting Python is with spaces only. The
second-most popular way is with tabs only. Code indented with a mixture
of tabs and spaces should be converted to using spaces exclusively. When
invoking the Python command line interpreter with the -t option, it issues
warnings about code that illegally mixes tabs and spaces. When using -tt
these warnings become errors. These options are highly recommended!
Okay: if a == 0:\n a = 1\n b = 1
E101: if a == 0:\n a = 1\n\tb = 1
"""
indent = INDENT_REGEX.match(physical_line).group(1)
for offset, char in enumerate(indent):
if char != indent_char:
return offset, "E101 indentation contains mixed spaces and tabs"
def tabs_obsolete(physical_line):
r"""
For new projects, spaces-only are strongly recommended over tabs. Most
editors have features that make this easy to do.
Okay: if True:\n return
W191: if True:\n\treturn
"""
indent = INDENT_REGEX.match(physical_line).group(1)
if '\t' in indent:
return indent.index('\t'), "W191 indentation contains tabs"
def trailing_whitespace(physical_line):
r"""
JCR: Trailing whitespace is superfluous.
FBM: Except when it occurs as part of a blank line (i.e. the line is
nothing but whitespace). According to Python docs[1] a line with only
whitespace is considered a blank line, and is to be ignored. However,
matching a blank line to its indentation level avoids mistakenly
terminating a multi-line statement (e.g. class declaration) when
pasting code into the standard Python interpreter.
[1] http://docs.python.org/reference/lexical_analysis.html#blank-lines
The warning returned varies on whether the line itself is blank, for easier
filtering for those who want to indent their blank lines.
Okay: spam(1)\n#
W291: spam(1) \n#
W293: class Foo(object):\n \n bang = 12
"""
physical_line = physical_line.rstrip('\n') # chr(10), newline
physical_line = physical_line.rstrip('\r') # chr(13), carriage return
physical_line = physical_line.rstrip('\x0c') # chr(12), form feed, ^L
stripped = physical_line.rstrip(' \t\v')
if physical_line != stripped:
if stripped:
return len(stripped), "W291 trailing whitespace"
else:
return 0, "W293 blank line contains whitespace"
def trailing_blank_lines(physical_line, lines, line_number):
r"""
JCR: Trailing blank lines are superfluous.
Okay: spam(1)
W391: spam(1)\n
"""
if not physical_line.rstrip() and line_number == len(lines):
return 0, "W391 blank line at end of file"
def missing_newline(physical_line):
"""
JCR: The last line should have a newline.
Reports warning W292.
"""
if physical_line.rstrip() == physical_line:
return len(physical_line), "W292 no newline at end of file"
def maximum_line_length(physical_line, max_line_length, multiline):
"""
Limit all lines to a maximum of 79 characters.
There are still many devices around that are limited to 80 character
lines; plus, limiting windows to 80 characters makes it possible to have
several windows side-by-side. The default wrapping on such devices looks
ugly. Therefore, please limit all lines to a maximum of 79 characters.
For flowing long blocks of text (docstrings or comments), limiting the
length to 72 characters is recommended.
Reports error E501.
"""
line = physical_line.rstrip()
length = len(line)
if length > max_line_length and not noqa(line):
# Special case for long URLs in multi-line docstrings or comments,
# but still report the error when the 72 first chars are whitespaces.
chunks = line.split()
if ((len(chunks) == 1 and multiline) or
(len(chunks) == 2 and chunks[0] == '#')) and \
len(line) - len(chunks[-1]) < max_line_length - 7:
return
if hasattr(line, 'decode'): # Python 2
# The line could contain multi-byte characters
try:
length = len(line.decode('utf-8'))
except UnicodeError:
pass
if length > max_line_length:
return (max_line_length, "E501 line too long "
"(%d > %d characters)" % (length, max_line_length))
##############################################################################
# Plugins (check functions) for logical lines
##############################################################################
def blank_lines(logical_line, blank_lines, indent_level, line_number,
previous_logical, previous_indent_level):
r"""
Separate top-level function and class definitions with two blank lines.
Method definitions inside a class are separated by a single blank line.
Extra blank lines may be used (sparingly) to separate groups of related
functions. Blank lines may be omitted between a bunch of related
one-liners (e.g. a set of dummy implementations).
Use blank lines in functions, sparingly, to indicate logical sections.
Okay: def a():\n pass\n\n\ndef b():\n pass
Okay: def a():\n pass\n\n\n# Foo\n# Bar\n\ndef b():\n pass
E301: class Foo:\n b = 0\n def bar():\n pass
E302: def a():\n pass\n\ndef b(n):\n pass
E303: def a():\n pass\n\n\n\ndef b(n):\n pass
E303: def a():\n\n\n\n pass
E304: @decorator\n\ndef a():\n pass
"""
if line_number < 3 and not previous_logical:
return # Don't expect blank lines before the first line
if previous_logical.startswith('@'):
if blank_lines:
yield 0, "E304 blank lines found after function decorator"
elif blank_lines > 2 or (indent_level and blank_lines == 2):
yield 0, "E303 too many blank lines (%d)" % blank_lines
elif logical_line.startswith(('def ', 'class ', '@')):
if indent_level:
if not (blank_lines or previous_indent_level < indent_level or
DOCSTRING_REGEX.match(previous_logical)):
yield 0, "E301 expected 1 blank line, found 0"
elif blank_lines != 2:
yield 0, "E302 expected 2 blank lines, found %d" % blank_lines
def extraneous_whitespace(logical_line):
"""
Avoid extraneous whitespace in the following situations:
- Immediately inside parentheses, brackets or braces.
- Immediately before a comma, semicolon, or colon.
Okay: spam(ham[1], {eggs: 2})
E201: spam( ham[1], {eggs: 2})
E201: spam(ham[ 1], {eggs: 2})
E201: spam(ham[1], { eggs: 2})
E202: spam(ham[1], {eggs: 2} )
E202: spam(ham[1 ], {eggs: 2})
E202: spam(ham[1], {eggs: 2 })
E203: if x == 4: print x, y; x, y = y , x
E203: if x == 4: print x, y ; x, y = y, x
E203: if x == 4 : print x, y; x, y = y, x
"""
line = logical_line
for match in EXTRANEOUS_WHITESPACE_REGEX.finditer(line):
text = match.group()
char = text.strip()
found = match.start()
if text == char + ' ':
# assert char in '([{'
yield found + 1, "E201 whitespace after '%s'" % char
elif line[found - 1] != ',':
code = ('E202' if char in '}])' else 'E203') # if char in ',;:'
yield found, "%s whitespace before '%s'" % (code, char)
def whitespace_around_keywords(logical_line):
r"""
Avoid extraneous whitespace around keywords.
Okay: True and False
E271: True and False
E272: True and False
E273: True and\tFalse
E274: True\tand False
"""
for match in KEYWORD_REGEX.finditer(logical_line):
before, after = match.groups()
if '\t' in before:
yield match.start(1), "E274 tab before keyword"
elif len(before) > 1:
yield match.start(1), "E272 multiple spaces before keyword"
if '\t' in after:
yield match.start(2), "E273 tab after keyword"
elif len(after) > 1:
yield match.start(2), "E271 multiple spaces after keyword"
def missing_whitespace(logical_line):
"""
JCR: Each comma, semicolon or colon should be followed by whitespace.
Okay: [a, b]
Okay: (3,)
Okay: a[1:4]
Okay: a[:4]
Okay: a[1:]
Okay: a[1:4:2]
E231: ['a','b']
E231: foo(bar,baz)
E231: [{'a':'b'}]
"""
line = logical_line
for index in range(len(line) - 1):
char = line[index]
if char in ',;:' and line[index + 1] not in WHITESPACE:
before = line[:index]
if char == ':' and before.count('[') > before.count(']') and \
before.rfind('{') < before.rfind('['):
continue # Slice syntax, no space required
if char == ',' and line[index + 1] == ')':
continue # Allow tuple with only one element: (3,)
yield index, "E231 missing whitespace after '%s'" % char
def indentation(logical_line, previous_logical, indent_char,
indent_level, previous_indent_level):
r"""
Use 4 spaces per indentation level.
For really old code that you don't want to mess up, you can continue to
use 8-space tabs.
Okay: a = 1
Okay: if a == 0:\n a = 1
E111: a = 1
Okay: for item in items:\n pass
E112: for item in items:\npass
Okay: a = 1\nb = 2
E113: a = 1\n b = 2
"""
if indent_char == ' ' and indent_level % 4:
yield 0, "E111 indentation is not a multiple of four"
indent_expect = previous_logical.endswith(':')
if indent_expect and indent_level <= previous_indent_level:
yield 0, "E112 expected an indented block"
if indent_level > previous_indent_level and not indent_expect:
yield 0, "E113 unexpected indentation"
def continued_indentation(logical_line, tokens, indent_level, hang_closing,
indent_char, noqa, verbose):
r"""
Continuation lines should align wrapped elements either vertically using
Python's implicit line joining inside parentheses, brackets and braces, or
using a hanging indent.
When using a hanging indent the following considerations should be applied:
- there should be no arguments on the first line, and
- further indentation should be used to clearly distinguish itself as a
continuation line.
Okay: a = (\n)
E123: a = (\n )
Okay: a = (\n 42)
E121: a = (\n 42)
E122: a = (\n42)
E123: a = (\n 42\n )
E124: a = (24,\n 42\n)
E125: if (\n b):\n pass
E126: a = (\n 42)
E127: a = (24,\n 42)
E128: a = (24,\n 42)
E129: if (a or\n b):\n pass
E131: a = (\n 42\n 24)
"""
first_row = tokens[0][2][0]
nrows = 1 + tokens[-1][2][0] - first_row
if noqa or nrows == 1:
return
# indent_next tells us whether the next block is indented; assuming
# that it is indented by 4 spaces, then we should not allow 4-space
# indents on the final continuation line; in turn, some other
# indents are allowed to have an extra 4 spaces.
indent_next = logical_line.endswith(':')
row = depth = 0
valid_hangs = (4,) if indent_char != '\t' else (4, 8)
# remember how many brackets were opened on each line
parens = [0] * nrows
# relative indents of physical lines
rel_indent = [0] * nrows
# for each depth, collect a list of opening rows
open_rows = [[0]]
# for each depth, memorize the hanging indentation
hangs = [None]
# visual indents
indent_chances = {}
last_indent = tokens[0][2]
visual_indent = None
# for each depth, memorize the visual indent column
indent = [last_indent[1]]
if verbose >= 3:
print(">>> " + tokens[0][4].rstrip())
last_token_multiline = None
for token_type, text, start, end, line in tokens:
newline = row < start[0] - first_row
if newline:
row = start[0] - first_row
newline = (not last_token_multiline and
token_type not in (tokenize.NL, tokenize.NEWLINE))
if newline:
# this is the beginning of a continuation line.
last_indent = start
if verbose >= 3:
print("... " + line.rstrip())
# record the initial indent.
rel_indent[row] = expand_indent(line) - indent_level
# identify closing bracket
close_bracket = (token_type == tokenize.OP and text in ']})')
# is the indent relative to an opening bracket line?
for open_row in reversed(open_rows[depth]):
hang = rel_indent[row] - rel_indent[open_row]
hanging_indent = hang in valid_hangs
if hanging_indent:
break
if hangs[depth]:
hanging_indent = (hang == hangs[depth])
# is there any chance of visual indent?
visual_indent = (not close_bracket and hang > 0 and
indent_chances.get(start[1]))
if close_bracket and indent[depth]:
# closing bracket for visual indent
if start[1] != indent[depth]:
yield (start, "E124 closing bracket does not match "
"visual indentation")
elif close_bracket and not hang:
# closing bracket matches indentation of opening bracket's line
if hang_closing:
yield start, "E133 closing bracket is missing indentation"
elif indent[depth] and start[1] < indent[depth]:
if visual_indent is not True:
# visual indent is broken
yield (start, "E128 continuation line "
"under-indented for visual indent")
elif hanging_indent or (indent_next and rel_indent[row] == 8):
# hanging indent is verified
if close_bracket and not hang_closing:
yield (start, "E123 closing bracket does not match "
"indentation of opening bracket's line")
hangs[depth] = hang
elif visual_indent is True:
# visual indent is verified
indent[depth] = start[1]
elif visual_indent in (text, str):
# ignore token lined up with matching one from a previous line
pass
else:
# indent is broken
if hang <= 0:
error = "E122", "missing indentation or outdented"
elif indent[depth]:
error = "E127", "over-indented for visual indent"
elif not close_bracket and hangs[depth]:
error = "E131", "unaligned for hanging indent"
else:
hangs[depth] = hang
if hang > 4:
error = "E126", "over-indented for hanging indent"
else:
error = "E121", "under-indented for hanging indent"
yield start, "%s continuation line %s" % error
# look for visual indenting
if (parens[row] and token_type not in (tokenize.NL, tokenize.COMMENT)
and not indent[depth]):
indent[depth] = start[1]
indent_chances[start[1]] = True
if verbose >= 4:
print("bracket depth %s indent to %s" % (depth, start[1]))
# deal with implicit string concatenation
elif (token_type in (tokenize.STRING, tokenize.COMMENT) or
text in ('u', 'ur', 'b', 'br')):
indent_chances[start[1]] = str
# special case for the "if" statement because len("if (") == 4
elif not indent_chances and not row and not depth and text == 'if':
indent_chances[end[1] + 1] = True
elif text == ':' and line[end[1]:].isspace():
open_rows[depth].append(row)
# keep track of bracket depth
if token_type == tokenize.OP:
if text in '([{':
depth += 1
indent.append(0)
hangs.append(None)
if len(open_rows) == depth:
open_rows.append([])
open_rows[depth].append(row)
parens[row] += 1
if verbose >= 4:
print("bracket depth %s seen, col %s, visual min = %s" %
(depth, start[1], indent[depth]))
elif text in ')]}' and depth > 0:
# parent indents should not be more than this one
prev_indent = indent.pop() or last_indent[1]
hangs.pop()
for d in range(depth):
if indent[d] > prev_indent:
indent[d] = 0
for ind in list(indent_chances):
if ind >= prev_indent:
del indent_chances[ind]
del open_rows[depth + 1:]
depth -= 1
if depth:
indent_chances[indent[depth]] = True
for idx in range(row, -1, -1):
if parens[idx]:
parens[idx] -= 1
break
assert len(indent) == depth + 1
if start[1] not in indent_chances:
# allow to line up tokens
indent_chances[start[1]] = text
last_token_multiline = (start[0] != end[0])
if indent_next and expand_indent(line) == indent_level + 4:
if visual_indent:
code = "E129 visually indented line"
else:
code = "E125 continuation line"
yield (last_indent, "%s with same indent as next logical line" % code)
def whitespace_before_parameters(logical_line, tokens):
"""
Avoid extraneous whitespace in the following situations:
- Immediately before the open parenthesis that starts the argument
list of a function call.
- Immediately before the open parenthesis that starts an indexing or
slicing.
Okay: spam(1)
E211: spam (1)
Okay: dict['key'] = list[index]
E211: dict ['key'] = list[index]
E211: dict['key'] = list [index]
"""
prev_type, prev_text, __, prev_end, __ = tokens[0]
for index in range(1, len(tokens)):
token_type, text, start, end, __ = tokens[index]
if (token_type == tokenize.OP and
text in '([' and
start != prev_end and
(prev_type == tokenize.NAME or prev_text in '}])') and
# Syntax "class A (B):" is allowed, but avoid it
(index < 2 or tokens[index - 2][1] != 'class') and
# Allow "return (a.foo for a in range(5))"
not keyword.iskeyword(prev_text)):
yield prev_end, "E211 whitespace before '%s'" % text
prev_type = token_type
prev_text = text
prev_end = end
def whitespace_around_operator(logical_line):
r"""
Avoid extraneous whitespace in the following situations:
- More than one space around an assignment (or other) operator to
align it with another.
Okay: a = 12 + 3
E221: a = 4 + 5
E222: a = 4 + 5
E223: a = 4\t+ 5
E224: a = 4 +\t5
"""
for match in OPERATOR_REGEX.finditer(logical_line):
before, after = match.groups()
if '\t' in before:
yield match.start(1), "E223 tab before operator"
elif len(before) > 1:
yield match.start(1), "E221 multiple spaces before operator"
if '\t' in after:
yield match.start(2), "E224 tab after operator"
elif len(after) > 1:
yield match.start(2), "E222 multiple spaces after operator"
def missing_whitespace_around_operator(logical_line, tokens):
r"""
- Always surround these binary operators with a single space on
either side: assignment (=), augmented assignment (+=, -= etc.),
comparisons (==, <, >, !=, <>, <=, >=, in, not in, is, is not),
Booleans (and, or, not).
- Use spaces around arithmetic operators.
Okay: i = i + 1
Okay: submitted += 1
Okay: x = x * 2 - 1
Okay: hypot2 = x * x + y * y
Okay: c = (a + b) * (a - b)
Okay: foo(bar, key='word', *args, **kwargs)
Okay: alpha[:-i]
E225: i=i+1
E225: submitted +=1
E225: x = x /2 - 1
E225: z = x **y
E226: c = (a+b) * (a-b)
E226: hypot2 = x*x + y*y
E227: c = a|b
E228: msg = fmt%(errno, errmsg)
"""
parens = 0
need_space = False
prev_type = tokenize.OP
prev_text = prev_end = None
for token_type, text, start, end, line in tokens:
if token_type in (tokenize.NL, tokenize.NEWLINE, tokenize.ERRORTOKEN):
# ERRORTOKEN is triggered by backticks in Python 3
continue
if text in ('(', 'lambda'):
parens += 1
elif text == ')':
parens -= 1
if need_space:
if start != prev_end:
# Found a (probably) needed space
if need_space is not True and not need_space[1]:
yield (need_space[0],
"E225 missing whitespace around operator")
need_space = False
elif text == '>' and prev_text in ('<', '-'):
# Tolerate the "<>" operator, even if running Python 3
# Deal with Python 3's annotated return value "->"
pass
else:
if need_space is True or need_space[1]:
# A needed trailing space was not found
yield prev_end, "E225 missing whitespace around operator"
else:
code, optype = 'E226', 'arithmetic'
if prev_text == '%':
code, optype = 'E228', 'modulo'
elif prev_text not in ARITHMETIC_OP:
code, optype = 'E227', 'bitwise or shift'
yield (need_space[0], "%s missing whitespace "
"around %s operator" % (code, optype))
need_space = False
elif token_type == tokenize.OP and prev_end is not None:
if text == '=' and parens:
# Allow keyword args or defaults: foo(bar=None).
pass
elif text in WS_NEEDED_OPERATORS:
need_space = True
elif text in UNARY_OPERATORS:
# Check if the operator is being used as a binary operator
# Allow unary operators: -123, -x, +1.
# Allow argument unpacking: foo(*args, **kwargs).
if prev_type == tokenize.OP:
binary_usage = (prev_text in '}])')
elif prev_type == tokenize.NAME:
binary_usage = (prev_text not in KEYWORDS)
else:
binary_usage = (prev_type not in SKIP_TOKENS)
if binary_usage:
need_space = None
elif text in WS_OPTIONAL_OPERATORS:
need_space = None
if need_space is None:
# Surrounding space is optional, but ensure that
# trailing space matches opening space
need_space = (prev_end, start != prev_end)
elif need_space and start == prev_end:
# A needed opening space was not found
yield prev_end, "E225 missing whitespace around operator"
need_space = False
prev_type = token_type
prev_text = text
prev_end = end
def whitespace_around_comma(logical_line):
r"""
Avoid extraneous whitespace in the following situations:
- More than one space around an assignment (or other) operator to
align it with another.
Note: these checks are disabled by default
Okay: a = (1, 2)
E241: a = (1, 2)
E242: a = (1,\t2)
"""
line = logical_line
for m in WHITESPACE_AFTER_COMMA_REGEX.finditer(line):
found = m.start() + 1
if '\t' in m.group():
yield found, "E242 tab after '%s'" % m.group()[0]
else:
yield found, "E241 multiple spaces after '%s'" % m.group()[0]
def whitespace_around_named_parameter_equals(logical_line, tokens):
"""
Don't use spaces around the '=' sign when used to indicate a
keyword argument or a default parameter value.
Okay: def complex(real, imag=0.0):
Okay: return magic(r=real, i=imag)
Okay: boolean(a == b)
Okay: boolean(a != b)
Okay: boolean(a <= b)
Okay: boolean(a >= b)
E251: def complex(real, imag = 0.0):
E251: return magic(r = real, i = imag)
"""
parens = 0
no_space = False
prev_end = None
message = "E251 unexpected spaces around keyword / parameter equals"
for token_type, text, start, end, line in tokens:
if no_space:
no_space = False
if start != prev_end:
yield (prev_end, message)
elif token_type == tokenize.OP:
if text == '(':
parens += 1
elif text == ')':
parens -= 1
elif parens and text == '=':
no_space = True
if start != prev_end:
yield (prev_end, message)
prev_end = end
def whitespace_before_comment(logical_line, tokens):
"""
Separate inline comments by at least two spaces.
An inline comment is a comment on the same line as a statement. Inline
comments should be separated by at least two spaces from the statement.
They should start with a # and a single space.
Each line of a block comment starts with a # and a single space
(unless it is indented text inside the comment).
Okay: x = x + 1 # Increment x
Okay: x = x + 1 # Increment x
Okay: # Block comment
E261: x = x + 1 # Increment x
E262: x = x + 1 #Increment x
E262: x = x + 1 # Increment x
E265: #Block comment
"""
prev_end = (0, 0)
for token_type, text, start, end, line in tokens:
if token_type == tokenize.COMMENT:
inline_comment = line[:start[1]].strip()
if inline_comment:
if prev_end[0] == start[0] and start[1] < prev_end[1] + 2:
yield (prev_end,
"E261 at least two spaces before inline comment")
symbol, sp, comment = text.partition(' ')
bad_prefix = symbol not in ('#', '#:')
if inline_comment:
if bad_prefix or comment[:1].isspace():
yield start, "E262 inline comment should start with '# '"
elif bad_prefix:
if text.rstrip('#') and (start[0] > 1 or symbol[1] != '!'):
yield start, "E265 block comment should start with '# '"
elif token_type != tokenize.NL:
prev_end = end
def imports_on_separate_lines(logical_line):
r"""
Imports should usually be on separate lines.
Okay: import os\nimport sys
E401: import sys, os
Okay: from subprocess import Popen, PIPE
Okay: from myclas import MyClass
Okay: from foo.bar.yourclass import YourClass
Okay: import myclass
Okay: import foo.bar.yourclass
"""
line = logical_line
if line.startswith('import '):
found = line.find(',')
if -1 < found and ';' not in line[:found]:
yield found, "E401 multiple imports on one line"
def compound_statements(logical_line):
r"""
Compound statements (multiple statements on the same line) are
generally discouraged.
While sometimes it's okay to put an if/for/while with a small body
on the same line, never do this for multi-clause statements. Also
avoid folding such long lines!
Okay: if foo == 'blah':\n do_blah_thing()
Okay: do_one()
Okay: do_two()
Okay: do_three()
E701: if foo == 'blah': do_blah_thing()
E701: for x in lst: total += x
E701: while t < 10: t = delay()
E701: if foo == 'blah': do_blah_thing()
E701: else: do_non_blah_thing()
E701: try: something()
E701: finally: cleanup()
E701: if foo == 'blah': one(); two(); three()
E702: do_one(); do_two(); do_three()
E703: do_four(); # useless semicolon
"""
line = logical_line
last_char = len(line) - 1
found = line.find(':')
while -1 < found < last_char:
before = line[:found]
if (before.count('{') <= before.count('}') and # {'a': 1} (dict)
before.count('[') <= before.count(']') and # [1:2] (slice)
before.count('(') <= before.count(')') and # (Python 3 annotation)
not LAMBDA_REGEX.search(before)): # lambda x: x
yield found, "E701 multiple statements on one line (colon)"
found = line.find(':', found + 1)
found = line.find(';')
while -1 < found:
if found < last_char:
yield found, "E702 multiple statements on one line (semicolon)"
else:
yield found, "E703 statement ends with a semicolon"
found = line.find(';', found + 1)
def explicit_line_join(logical_line, tokens):
r"""
Avoid explicit line join between brackets.
The preferred way of wrapping long lines is by using Python's implied line
continuation inside parentheses, brackets and braces. Long lines can be
broken over multiple lines by wrapping expressions in parentheses. These
should be used in preference to using a backslash for line continuation.
E502: aaa = [123, \\n 123]
E502: aaa = ("bbb " \\n "ccc")
Okay: aaa = [123,\n 123]
Okay: aaa = ("bbb "\n "ccc")
Okay: aaa = "bbb " \\n "ccc"
"""
prev_start = prev_end = parens = 0
backslash = None
for token_type, text, start, end, line in tokens:
if start[0] != prev_start and parens and backslash:
yield backslash, "E502 the backslash is redundant between brackets"
if end[0] != prev_end:
if line.rstrip('\r\n').endswith('\\'):
backslash = (end[0], len(line.splitlines()[-1]) - 1)
else:
backslash = None
prev_start = prev_end = end[0]
else:
prev_start = start[0]
if token_type == tokenize.OP:
if text in '([{':
parens += 1
elif text in ')]}':
parens -= 1
def comparison_to_singleton(logical_line, noqa):
"""
Comparisons to singletons like None should always be done
with "is" or "is not", never the equality operators.
Okay: if arg is not None:
E711: if arg != None:
E712: if arg == True:
Also, beware of writing if x when you really mean if x is not None --
e.g. when testing whether a variable or argument that defaults to None was
set to some other value. The other value might have a type (such as a
container) that could be false in a boolean context!
"""
match = not noqa and COMPARE_SINGLETON_REGEX.search(logical_line)
if match:
same = (match.group(1) == '==')
singleton = match.group(2)
msg = "'if cond is %s:'" % (('' if same else 'not ') + singleton)
if singleton in ('None',):
code = 'E711'
else:
code = 'E712'
nonzero = ((singleton == 'True' and same) or
(singleton == 'False' and not same))
msg += " or 'if %scond:'" % ('' if nonzero else 'not ')
yield match.start(1), ("%s comparison to %s should be %s" %
(code, singleton, msg))
def comparison_negative(logical_line):
r"""
Negative comparison, either identity or membership, should be
done using "not in" and "is not".
Okay: if x not in y:\n pass
Okay: assert (X in Y or X is Z)
Okay: if not (X in Y):\n pass
Okay: zz = x is not y
E713: Z = not X in Y
E713: if not X.B in Y:\n pass
E714: if not X is Y:\n pass
E714: Z = not X.B is Y
"""
match = COMPARE_NEGATIVE_REGEX.search(logical_line)
if match:
pos = match.start(1)
if match.group(2) == 'in':
yield pos, "E713 test for membership should be 'not in'"
else:
yield pos, "E714 test for object identity should be 'is not'"
def comparison_type(logical_line):
"""
Object type comparisons should always use isinstance() instead of
comparing types directly.
Okay: if isinstance(obj, int):
E721: if type(obj) is type(1):
When checking if an object is a string, keep in mind that it might be a
unicode string too! In Python 2.3, str and unicode have a common base
class, basestring, so you can do:
Okay: if isinstance(obj, basestring):
Okay: if type(a1) is type(b1):
"""
match = COMPARE_TYPE_REGEX.search(logical_line)
if match:
inst = match.group(1)
if inst and isidentifier(inst) and inst not in SINGLETONS:
return # Allow comparison for types which are not obvious
yield match.start(), "E721 do not compare types, use 'isinstance()'"
def python_3000_has_key(logical_line, noqa):
r"""
The {}.has_key() method is removed in the Python 3.
Use the 'in' operation instead.
Okay: if "alph" in d:\n print d["alph"]
W601: assert d.has_key('alph')
"""
pos = logical_line.find('.has_key(')
if pos > -1 and not noqa:
yield pos, "W601 .has_key() is deprecated, use 'in'"
def python_3000_raise_comma(logical_line):
"""
When raising an exception, use "raise ValueError('message')"
instead of the older form "raise ValueError, 'message'".
The paren-using form is preferred because when the exception arguments
are long or include string formatting, you don't need to use line
continuation characters thanks to the containing parentheses. The older
form is removed in Python 3.
Okay: raise DummyError("Message")
W602: raise DummyError, "Message"
"""
match = RAISE_COMMA_REGEX.match(logical_line)
if match and not RERAISE_COMMA_REGEX.match(logical_line):
yield match.end() - 1, "W602 deprecated form of raising exception"
def python_3000_not_equal(logical_line):
"""
!= can also be written <>, but this is an obsolete usage kept for
backwards compatibility only. New code should always use !=.
The older syntax is removed in Python 3.
Okay: if a != 'no':
W603: if a <> 'no':
"""
pos = logical_line.find('<>')
if pos > -1:
yield pos, "W603 '<>' is deprecated, use '!='"
def python_3000_backticks(logical_line):
"""
Backticks are removed in Python 3.
Use repr() instead.
Okay: val = repr(1 + 2)
W604: val = `1 + 2`
"""
pos = logical_line.find('`')
if pos > -1:
yield pos, "W604 backticks are deprecated, use 'repr()'"
##############################################################################
# Helper functions
##############################################################################
if '' == ''.encode():
# Python 2: implicit encoding.
def readlines(filename):
f = open(filename)
try:
return f.readlines()
finally:
f.close()
isidentifier = re.compile(r'[a-zA-Z_]\w*').match
stdin_get_value = sys.stdin.read
else:
# Python 3
def readlines(filename):
f = open(filename, 'rb')
try:
coding, lines = tokenize.detect_encoding(f.readline)
f = TextIOWrapper(f, coding, line_buffering=True)
return [l.decode(coding) for l in lines] + f.readlines()
except (LookupError, SyntaxError, UnicodeError):
f.close()
# Fall back if files are improperly declared
f = open(filename, encoding='latin-1')
return f.readlines()
finally:
f.close()
isidentifier = str.isidentifier
def stdin_get_value():
return TextIOWrapper(sys.stdin.buffer, errors='ignore').read()
readlines.__doc__ = " Read the source code."
noqa = re.compile(r'# no(?:qa|pep8)\b', re.I).search
def expand_indent(line):
r"""
Return the amount of indentation.
Tabs are expanded to the next multiple of 8.
>>> expand_indent(' ')
4
>>> expand_indent('\t')
8
>>> expand_indent(' \t')
8
>>> expand_indent(' \t')
8
>>> expand_indent(' \t')
16
"""
if '\t' not in line:
return len(line) - len(line.lstrip())
result = 0
for char in line:
if char == '\t':
result = result // 8 * 8 + 8
elif char == ' ':
result += 1
else:
break
return result
def mute_string(text):
"""
Replace contents with 'xxx' to prevent syntax matching.
>>> mute_string('"abc"')
'"xxx"'
>>> mute_string("'''abc'''")
"'''xxx'''"
>>> mute_string("r'abc'")
"r'xxx'"
"""
# String modifiers (e.g. u or r)
start = text.index(text[-1]) + 1
end = len(text) - 1
# Triple quotes
if text[-3:] in ('"""', "'''"):
start += 2
end -= 2
return text[:start] + 'x' * (end - start) + text[end:]
def parse_udiff(diff, patterns=None, parent='.'):
"""Return a dictionary of matching lines."""
# For each file of the diff, the entry key is the filename,
# and the value is a set of row numbers to consider.
rv = {}
path = nrows = None
for line in diff.splitlines():
if nrows:
if line[:1] != '-':
nrows -= 1
continue
if line[:3] == '@@ ':
hunk_match = HUNK_REGEX.match(line)
row, nrows = [int(g or '1') for g in hunk_match.groups()]
rv[path].update(range(row, row + nrows))
elif line[:3] == '+++':
path = line[4:].split('\t', 1)[0]
if path[:2] == 'b/':
path = path[2:]
rv[path] = set()
return dict([(os.path.join(parent, p), rows)
for (p, rows) in rv.items()
if rows and filename_match(p, patterns)])
def normalize_paths(value, parent=os.curdir):
"""Parse a comma-separated list of paths.
Return a list of absolute paths.
"""
if not value or isinstance(value, list):
return value
paths = []
for path in value.split(','):
if '/' in path:
path = os.path.abspath(os.path.join(parent, path))
paths.append(path.rstrip('/'))
return paths
def filename_match(filename, patterns, default=True):
"""
Check if patterns contains a pattern that matches filename.
If patterns is unspecified, this always returns True.
"""
if not patterns:
return default
return any(fnmatch(filename, pattern) for pattern in patterns)
##############################################################################
# Framework to run all checks
##############################################################################
_checks = {'physical_line': {}, 'logical_line': {}, 'tree': {}}
def register_check(check, codes=None):
"""
Register a new check object.
"""
def _add_check(check, kind, codes, args):
if check in _checks[kind]:
_checks[kind][check][0].extend(codes or [])
else:
_checks[kind][check] = (codes or [''], args)
if inspect.isfunction(check):
args = inspect.getargspec(check)[0]
if args and args[0] in ('physical_line', 'logical_line'):
if codes is None:
codes = ERRORCODE_REGEX.findall(check.__doc__ or '')
_add_check(check, args[0], codes, args)
elif inspect.isclass(check):
if inspect.getargspec(check.__init__)[0][:2] == ['self', 'tree']:
_add_check(check, 'tree', codes, None)
def init_checks_registry():
"""
Register all globally visible functions where the first argument name
is 'physical_line' or 'logical_line'.
"""
mod = inspect.getmodule(register_check)
for (name, function) in inspect.getmembers(mod, inspect.isfunction):
register_check(function)
init_checks_registry()
class Checker(object):
"""
Load a Python source file, tokenize it, check coding style.
"""
def __init__(self, filename=None, lines=None,
options=None, report=None, **kwargs):
if options is None:
options = StyleGuide(kwargs).options
else:
assert not kwargs
self._io_error = None
self._physical_checks = options.physical_checks
self._logical_checks = options.logical_checks
self._ast_checks = options.ast_checks
self.max_line_length = options.max_line_length
self.multiline = False # in a multiline string?
self.hang_closing = options.hang_closing
self.verbose = options.verbose
self.filename = filename
if filename is None:
self.filename = 'stdin'
self.lines = lines or []
elif filename == '-':
self.filename = 'stdin'
self.lines = stdin_get_value().splitlines(True)
elif lines is None:
try:
self.lines = readlines(filename)
except IOError:
(exc_type, exc) = sys.exc_info()[:2]
self._io_error = '%s: %s' % (exc_type.__name__, exc)
self.lines = []
else:
self.lines = lines
if self.lines:
ord0 = ord(self.lines[0][0])
if ord0 in (0xef, 0xfeff): # Strip the UTF-8 BOM
if ord0 == 0xfeff:
self.lines[0] = self.lines[0][1:]
elif self.lines[0][:3] == '\xef\xbb\xbf':
self.lines[0] = self.lines[0][3:]
self.report = report or options.report
self.report_error = self.report.error
def report_invalid_syntax(self):
(exc_type, exc) = sys.exc_info()[:2]
if len(exc.args) > 1:
offset = exc.args[1]
if len(offset) > 2:
offset = offset[1:3]
else:
offset = (1, 0)
self.report_error(offset[0], offset[1] or 0,
'E901 %s: %s' % (exc_type.__name__, exc.args[0]),
self.report_invalid_syntax)
report_invalid_syntax.__doc__ = " Check if the syntax is valid."
def readline(self):
"""
Get the next line from the input buffer.
"""
self.line_number += 1
if self.line_number > len(self.lines):
return ''
line = self.lines[self.line_number - 1]
if self.indent_char is None and line[:1] in WHITESPACE:
self.indent_char = line[0]
return line
def run_check(self, check, argument_names):
"""
Run a check plugin.
"""
arguments = []
for name in argument_names:
arguments.append(getattr(self, name))
return check(*arguments)
def check_physical(self, line):
"""
Run all physical checks on a raw input line.
"""
self.physical_line = line
for name, check, argument_names in self._physical_checks:
result = self.run_check(check, argument_names)
if result is not None:
(offset, text) = result
self.report_error(self.line_number, offset, text, check)
if text[:4] == 'E101':
self.indent_char = line[0]
def build_tokens_line(self):
"""
Build a logical line from tokens.
"""
self.mapping = []
logical = []
comments = []
length = 0
previous = None
for token in self.tokens:
(token_type, text) = token[0:2]
if token_type == tokenize.COMMENT:
comments.append(text)
continue
if token_type in SKIP_TOKENS:
continue
if token_type == tokenize.STRING:
text = mute_string(text)
if previous:
(end_row, end) = previous[3]
(start_row, start) = token[2]
if end_row != start_row: # different row
prev_text = self.lines[end_row - 1][end - 1]
if prev_text == ',' or (prev_text not in '{[('
and text not in '}])'):
logical.append(' ')
length += 1
elif end != start: # different column
fill = self.lines[end_row - 1][end:start]
logical.append(fill)
length += len(fill)
self.mapping.append((length, token))
logical.append(text)
length += len(text)
previous = token
self.logical_line = ''.join(logical)
self.noqa = comments and noqa(''.join(comments))
# With Python 2, if the line ends with '\r\r\n' the assertion fails
# assert self.logical_line.strip() == self.logical_line
def check_logical(self):
"""
Build a line from tokens and run all logical checks on it.
"""
self.build_tokens_line()
self.report.increment_logical_line()
token0 = self.mapping[0][1] if self.mapping else self.tokens[0]
first_line = self.lines[token0[2][0] - 1]
indent = first_line[:token0[2][1]]
self.indent_level = expand_indent(indent)
if self.verbose >= 2:
print(self.logical_line[:80].rstrip())
for name, check, argument_names in self._logical_checks:
if self.verbose >= 4:
print(' ' + name)
for result in self.run_check(check, argument_names) or ():
(offset, text) = result
if isinstance(offset, tuple):
(orig_number, orig_offset) = offset
else:
orig_number = token0[2][0]
orig_offset = token0[2][1] + offset
for token_offset, token in self.mapping:
if offset >= token_offset:
orig_number = token[2][0]
orig_offset = (token[2][1] + offset - token_offset)
self.report_error(orig_number, orig_offset, text, check)
if self.logical_line:
self.previous_indent_level = self.indent_level
self.previous_logical = self.logical_line
self.tokens = []
def check_ast(self):
try:
tree = compile(''.join(self.lines), '', 'exec', PyCF_ONLY_AST)
except (SyntaxError, TypeError):
return self.report_invalid_syntax()
for name, cls, __ in self._ast_checks:
checker = cls(tree, self.filename)
for lineno, offset, text, check in checker.run():
if not self.lines or not noqa(self.lines[lineno - 1]):
self.report_error(lineno, offset, text, check)
def generate_tokens(self):
if self._io_error:
self.report_error(1, 0, 'E902 %s' % self._io_error, readlines)
tokengen = tokenize.generate_tokens(self.readline)
try:
for token in tokengen:
self.maybe_check_physical(token)
yield token
except (SyntaxError, tokenize.TokenError):
self.report_invalid_syntax()
def maybe_check_physical(self, token):
"""
If appropriate (based on token), check current physical line(s).
"""
# Called after every token, but act only on end of line.
if token[0] in (tokenize.NEWLINE, tokenize.NL):
# Obviously, a newline token ends a single physical line.
self.check_physical(token[4])
elif token[0] == tokenize.STRING and '\n' in token[1]:
# Less obviously, a string that contains newlines is a
# multiline string, either triple-quoted or with internal
# newlines backslash-escaped. Check every physical line in the
# string *except* for the last one: its newline is outside of
# the multiline string, so we consider it a regular physical
# line, and will check it like any other physical line.
#
# Subtleties:
# - we don't *completely* ignore the last line; if it contains
# the magical "# noqa" comment, we disable all physical
# checks for the entire multiline string
# - have to wind self.line_number back because initially it
# points to the last line of the string, and we want
# check_physical() to give accurate feedback
if noqa(token[4]):
return
self.multiline = True
self.line_number = token[2][0]
for line in token[1].split('\n')[:-1]:
self.check_physical(line + '\n')
self.line_number += 1
self.multiline = False
def check_all(self, expected=None, line_offset=0):
"""
Run all checks on the input file.
"""
self.report.init_file(self.filename, self.lines, expected, line_offset)
if self._ast_checks:
self.check_ast()
self.line_number = 0
self.indent_char = None
self.indent_level = 0
self.previous_indent_level = 0
self.previous_logical = ''
self.tokens = []
self.blank_lines = blank_lines_before_comment = 0
parens = 0
for token in self.generate_tokens():
self.tokens.append(token)
token_type, text = token[0:2]
if self.verbose >= 3:
if token[2][0] == token[3][0]:
pos = '[%s:%s]' % (token[2][1] or '', token[3][1])
else:
pos = 'l.%s' % token[3][0]
print('l.%s\t%s\t%s\t%r' %
(token[2][0], pos, tokenize.tok_name[token[0]], text))
if token_type == tokenize.OP:
if text in '([{':
parens += 1
elif text in '}])':
parens -= 1
elif not parens:
if token_type == tokenize.NEWLINE:
if self.blank_lines < blank_lines_before_comment:
self.blank_lines = blank_lines_before_comment
self.check_logical()
self.blank_lines = blank_lines_before_comment = 0
elif token_type == tokenize.NL:
if len(self.tokens) == 1:
# The physical line contains only this token.
self.blank_lines += 1
del self.tokens[0]
else:
self.check_logical()
elif token_type == tokenize.COMMENT and len(self.tokens) == 1:
if blank_lines_before_comment < self.blank_lines:
blank_lines_before_comment = self.blank_lines
self.blank_lines = 0
if COMMENT_WITH_NL:
# The comment also ends a physical line
text = text.rstrip('\r\n')
self.tokens = [(token_type, text) + token[2:]]
self.check_logical()
return self.report.get_file_results()
class BaseReport(object):
"""Collect the results of the checks."""
print_filename = False
def __init__(self, options):
self._benchmark_keys = options.benchmark_keys
self._ignore_code = options.ignore_code
# Results
self.elapsed = 0
self.total_errors = 0
self.counters = dict.fromkeys(self._benchmark_keys, 0)
self.messages = {}
def start(self):
"""Start the timer."""
self._start_time = time.time()
def stop(self):
"""Stop the timer."""
self.elapsed = time.time() - self._start_time
def init_file(self, filename, lines, expected, line_offset):
"""Signal a new file."""
self.filename = filename
self.lines = lines
self.expected = expected or ()
self.line_offset = line_offset
self.file_errors = 0
self.counters['files'] += 1
self.counters['physical lines'] += len(lines)
def increment_logical_line(self):
"""Signal a new logical line."""
self.counters['logical lines'] += 1
def error(self, line_number, offset, text, check):
"""Report an error, according to options."""
code = text[:4]
if self._ignore_code(code):
return
if code in self.counters:
self.counters[code] += 1
else:
self.counters[code] = 1
self.messages[code] = text[5:]
# Don't care about expected errors or warnings
if code in self.expected:
return
if self.print_filename and not self.file_errors:
print(self.filename)
self.file_errors += 1
self.total_errors += 1
return code
def get_file_results(self):
"""Return the count of errors and warnings for this file."""
return self.file_errors
def get_count(self, prefix=''):
"""Return the total count of errors and warnings."""
return sum([self.counters[key]
for key in self.messages if key.startswith(prefix)])
def get_statistics(self, prefix=''):
"""
Get statistics for message codes that start with the prefix.
prefix='' matches all errors and warnings
prefix='E' matches all errors
prefix='W' matches all warnings
prefix='E4' matches all errors that have to do with imports
"""
return ['%-7s %s %s' % (self.counters[key], key, self.messages[key])
for key in sorted(self.messages) if key.startswith(prefix)]
def print_statistics(self, prefix=''):
"""Print overall statistics (number of errors and warnings)."""
for line in self.get_statistics(prefix):
print(line)
def print_benchmark(self):
"""Print benchmark numbers."""
print('%-7.2f %s' % (self.elapsed, 'seconds elapsed'))
if self.elapsed:
for key in self._benchmark_keys:
print('%-7d %s per second (%d total)' %
(self.counters[key] / self.elapsed, key,
self.counters[key]))
class FileReport(BaseReport):
"""Collect the results of the checks and print only the filenames."""
print_filename = True
class StandardReport(BaseReport):
"""Collect and print the results of the checks."""
def __init__(self, options):
super(StandardReport, self).__init__(options)
self._fmt = REPORT_FORMAT.get(options.format.lower(),
options.format)
self._repeat = options.repeat
self._show_source = options.show_source
self._show_pep8 = options.show_pep8
def init_file(self, filename, lines, expected, line_offset):
"""Signal a new file."""
self._deferred_print = []
return super(StandardReport, self).init_file(
filename, lines, expected, line_offset)
def error(self, line_number, offset, text, check):
"""Report an error, according to options."""
code = super(StandardReport, self).error(line_number, offset,
text, check)
if code and (self.counters[code] == 1 or self._repeat):
self._deferred_print.append(
(line_number, offset, code, text[5:], check.__doc__))
return code
def get_file_results(self):
"""Print the result and return the overall count for this file."""
self._deferred_print.sort()
for line_number, offset, code, text, doc in self._deferred_print:
print(self._fmt % {
'path': self.filename,
'row': self.line_offset + line_number, 'col': offset + 1,
'code': code, 'text': text,
})
if self._show_source:
if line_number > len(self.lines):
line = ''
else:
line = self.lines[line_number - 1]
print(line.rstrip())
print(' ' * offset + '^')
if self._show_pep8 and doc:
print(doc.lstrip('\n').rstrip())
return self.file_errors
class DiffReport(StandardReport):
"""Collect and print the results for the changed lines only."""
def __init__(self, options):
super(DiffReport, self).__init__(options)
self._selected = options.selected_lines
def error(self, line_number, offset, text, check):
if line_number not in self._selected[self.filename]:
return
return super(DiffReport, self).error(line_number, offset, text, check)
class StyleGuide(object):
"""Initialize a PEP-8 instance with few options."""
def __init__(self, *args, **kwargs):
# build options from the command line
self.checker_class = kwargs.pop('checker_class', Checker)
parse_argv = kwargs.pop('parse_argv', False)
config_file = kwargs.pop('config_file', None)
parser = kwargs.pop('parser', None)
# build options from dict
options_dict = dict(*args, **kwargs)
arglist = None if parse_argv else options_dict.get('paths', None)
options, self.paths = process_options(
arglist, parse_argv, config_file, parser)
if options_dict:
options.__dict__.update(options_dict)
if 'paths' in options_dict:
self.paths = options_dict['paths']
self.runner = self.input_file
self.options = options
if not options.reporter:
options.reporter = BaseReport if options.quiet else StandardReport
options.select = tuple(options.select or ())
if not (options.select or options.ignore or
options.testsuite or options.doctest) and DEFAULT_IGNORE:
# The default choice: ignore controversial checks
options.ignore = tuple(DEFAULT_IGNORE.split(','))
else:
# Ignore all checks which are not explicitly selected
options.ignore = ('',) if options.select else tuple(options.ignore)
options.benchmark_keys = BENCHMARK_KEYS[:]
options.ignore_code = self.ignore_code
options.physical_checks = self.get_checks('physical_line')
options.logical_checks = self.get_checks('logical_line')
options.ast_checks = self.get_checks('tree')
self.init_report()
def init_report(self, reporter=None):
"""Initialize the report instance."""
self.options.report = (reporter or self.options.reporter)(self.options)
return self.options.report
def check_files(self, paths=None):
"""Run all checks on the paths."""
if paths is None:
paths = self.paths
report = self.options.report
runner = self.runner
report.start()
try:
for path in paths:
if os.path.isdir(path):
self.input_dir(path)
elif not self.excluded(path):
runner(path)
except KeyboardInterrupt:
print('... stopped')
report.stop()
return report
def input_file(self, filename, lines=None, expected=None, line_offset=0):
"""Run all checks on a Python source file."""
if self.options.verbose:
print('checking %s' % filename)
fchecker = self.checker_class(
filename, lines=lines, options=self.options)
return fchecker.check_all(expected=expected, line_offset=line_offset)
def input_dir(self, dirname):
"""Check all files in this directory and all subdirectories."""
dirname = dirname.rstrip('/')
if self.excluded(dirname):
return 0
counters = self.options.report.counters
verbose = self.options.verbose
filepatterns = self.options.filename
runner = self.runner
for root, dirs, files in os.walk(dirname):
if verbose:
print('directory ' + root)
counters['directories'] += 1
for subdir in sorted(dirs):
if self.excluded(subdir, root):
dirs.remove(subdir)
for filename in sorted(files):
# contain a pattern that matches?
if ((filename_match(filename, filepatterns) and
not self.excluded(filename, root))):
runner(os.path.join(root, filename))
def excluded(self, filename, parent=None):
"""
Check if options.exclude contains a pattern that matches filename.
"""
if not self.options.exclude:
return False
basename = os.path.basename(filename)
if filename_match(basename, self.options.exclude):
return True
if parent:
filename = os.path.join(parent, filename)
filename = os.path.abspath(filename)
return filename_match(filename, self.options.exclude)
def ignore_code(self, code):
"""
Check if the error code should be ignored.
If 'options.select' contains a prefix of the error code,
return False. Else, if 'options.ignore' contains a prefix of
the error code, return True.
"""
if len(code) < 4 and any(s.startswith(code)
for s in self.options.select):
return False
return (code.startswith(self.options.ignore) and
not code.startswith(self.options.select))
def get_checks(self, argument_name):
"""
Find all globally visible functions where the first argument name
starts with argument_name and which contain selected tests.
"""
checks = []
for check, attrs in _checks[argument_name].items():
(codes, args) = attrs
if any(not (code and self.ignore_code(code)) for code in codes):
checks.append((check.__name__, check, args))
return sorted(checks)
def get_parser(prog='pep8', version=__version__):
parser = OptionParser(prog=prog, version=version,
usage="%prog [options] input ...")
parser.config_options = [
'exclude', 'filename', 'select', 'ignore', 'max-line-length',
'hang-closing', 'count', 'format', 'quiet', 'show-pep8',
'show-source', 'statistics', 'verbose']
parser.add_option('-v', '--verbose', default=0, action='count',
help="print status messages, or debug with -vv")
parser.add_option('-q', '--quiet', default=0, action='count',
help="report only file names, or nothing with -qq")
parser.add_option('-r', '--repeat', default=True, action='store_true',
help="(obsolete) show all occurrences of the same error")
parser.add_option('--first', action='store_false', dest='repeat',
help="show first occurrence of each error")
parser.add_option('--exclude', metavar='patterns', default=DEFAULT_EXCLUDE,
help="exclude files or directories which match these "
"comma separated patterns (default: %default)")
parser.add_option('--filename', metavar='patterns', default='*.py',
help="when parsing directories, only check filenames "
"matching these comma separated patterns "
"(default: %default)")
parser.add_option('--select', metavar='errors', default='',
help="select errors and warnings (e.g. E,W6)")
parser.add_option('--ignore', metavar='errors', default='',
help="skip errors and warnings (e.g. E4,W)")
parser.add_option('--show-source', action='store_true',
help="show source code for each error")
parser.add_option('--show-pep8', action='store_true',
help="show text of PEP 8 for each error "
"(implies --first)")
parser.add_option('--statistics', action='store_true',
help="count errors and warnings")
parser.add_option('--count', action='store_true',
help="print total number of errors and warnings "
"to standard error and set exit code to 1 if "
"total is not null")
parser.add_option('--max-line-length', type='int', metavar='n',
default=MAX_LINE_LENGTH,
help="set maximum allowed line length "
"(default: %default)")
parser.add_option('--hang-closing', action='store_true',
help="hang closing bracket instead of matching "
"indentation of opening bracket's line")
parser.add_option('--format', metavar='format', default='default',
help="set the error format [default|pylint|<custom>]")
parser.add_option('--diff', action='store_true',
help="report only lines changed according to the "
"unified diff received on STDIN")
group = parser.add_option_group("Testing Options")
if os.path.exists(TESTSUITE_PATH):
group.add_option('--testsuite', metavar='dir',
help="run regression tests from dir")
group.add_option('--doctest', action='store_true',
help="run doctest on myself")
group.add_option('--benchmark', action='store_true',
help="measure processing speed")
return parser
def read_config(options, args, arglist, parser):
"""Read both user configuration and local configuration."""
config = RawConfigParser()
user_conf = options.config
if user_conf and os.path.isfile(user_conf):
if options.verbose:
print('user configuration: %s' % user_conf)
config.read(user_conf)
local_dir = os.curdir
parent = tail = args and os.path.abspath(os.path.commonprefix(args))
while tail:
if config.read([os.path.join(parent, fn) for fn in PROJECT_CONFIG]):
local_dir = parent
if options.verbose:
print('local configuration: in %s' % parent)
break
(parent, tail) = os.path.split(parent)
pep8_section = parser.prog
if config.has_section(pep8_section):
option_list = dict([(o.dest, o.type or o.action)
for o in parser.option_list])
# First, read the default values
(new_options, __) = parser.parse_args([])
# Second, parse the configuration
for opt in config.options(pep8_section):
if options.verbose > 1:
print(" %s = %s" % (opt, config.get(pep8_section, opt)))
if opt.replace('_', '-') not in parser.config_options:
print("Unknown option: '%s'\n not in [%s]" %
(opt, ' '.join(parser.config_options)))
sys.exit(1)
normalized_opt = opt.replace('-', '_')
opt_type = option_list[normalized_opt]
if opt_type in ('int', 'count'):
value = config.getint(pep8_section, opt)
elif opt_type == 'string':
value = config.get(pep8_section, opt)
if normalized_opt == 'exclude':
value = normalize_paths(value, local_dir)
else:
assert opt_type in ('store_true', 'store_false')
value = config.getboolean(pep8_section, opt)
setattr(new_options, normalized_opt, value)
# Third, overwrite with the command-line options
(options, __) = parser.parse_args(arglist, values=new_options)
options.doctest = options.testsuite = False
return options
def process_options(arglist=None, parse_argv=False, config_file=None,
parser=None):
"""Process options passed either via arglist or via command line args."""
if not parser:
parser = get_parser()
if not parser.has_option('--config'):
if config_file is True:
config_file = DEFAULT_CONFIG
group = parser.add_option_group("Configuration", description=(
"The project options are read from the [%s] section of the "
"tox.ini file or the setup.cfg file located in any parent folder "
"of the path(s) being processed. Allowed options are: %s." %
(parser.prog, ', '.join(parser.config_options))))
group.add_option('--config', metavar='path', default=config_file,
help="user config file location (default: %default)")
# Don't read the command line if the module is used as a library.
if not arglist and not parse_argv:
arglist = []
# If parse_argv is True and arglist is None, arguments are
# parsed from the command line (sys.argv)
(options, args) = parser.parse_args(arglist)
options.reporter = None
if options.ensure_value('testsuite', False):
args.append(options.testsuite)
elif not options.ensure_value('doctest', False):
if parse_argv and not args:
if options.diff or any(os.path.exists(name)
for name in PROJECT_CONFIG):
args = ['.']
else:
parser.error('input not specified')
options = read_config(options, args, arglist, parser)
options.reporter = parse_argv and options.quiet == 1 and FileReport
options.filename = options.filename and options.filename.split(',')
options.exclude = normalize_paths(options.exclude)
options.select = options.select and options.select.split(',')
options.ignore = options.ignore and options.ignore.split(',')
if options.diff:
options.reporter = DiffReport
stdin = stdin_get_value()
options.selected_lines = parse_udiff(stdin, options.filename, args[0])
args = sorted(options.selected_lines)
return options, args
def _main():
"""Parse options and run checks on Python source."""
pep8style = StyleGuide(parse_argv=True, config_file=True)
options = pep8style.options
if options.doctest or options.testsuite:
#disabled test suite
#from testsuite.support import run_tests
#report = run_tests(pep8style)
pass
else:
report = pep8style.check_files()
if options.statistics:
report.print_statistics()
if options.benchmark:
report.print_benchmark()
if options.testsuite and not options.quiet:
#disabled test suite
#report.print_results()
pass
if report.total_errors:
if options.count:
sys.stderr.write(str(report.total_errors) + '\n')
sys.exit(1)
if __name__ == '__main__':
_main()
|
#!/usr/bin/env python3
# -*- coding:UTF-8 -*-
# Copyright (c) 2019 Nicolas Iooss
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Resolve a list DNS domains with a caching directory
Usage example to resolve domains using Google's DNS-over-HTTPS API, a list in
domains.txt, a cache in directory dns/, writing results in results.out.txt:
./resolve_domains.py -gMOs -o dns_resolutions.out.txt -d cache_dns domains.txt
@author: Nicolas Iooss
@license: MIT
"""
import argparse
import binascii
import ipaddress
import itertools
import json
from pathlib import Path
import random
import re
import ssl
import struct
import time
import urllib.parse
import urllib.request
try:
import dns.resolver
HAVE_DNSPYTHON = True
except ImportError:
HAVE_DNSPYTHON = False
else:
import dns
import dns.flags
import dns.rcode
import dns.rdataclass
import dns.rdatatype
# Types of DNS records that are resolved
DNS_TYPES = ('A', 'AAAA', 'MX', 'NS', 'PTR', 'TXT', 'ANY')
DNS_SRV_TYPES = ('NS', 'SRV', 'TXT', 'ANY')
# Identifiers of record data types
DNS_RDATA_TYPES = {
'NONE': 0,
'A': 1,
'NS': 2,
'MD': 3,
'MF': 4,
'CNAME': 5,
'SOA': 6,
'MB': 7,
'MG': 8,
'MR': 9,
'NULL': 10,
'WKS': 11,
'PTR': 12,
'HINFO': 13,
'MINFO': 14,
'MX': 15,
'TXT': 16,
'RP': 17,
'AFSDB': 18,
'X25': 19,
'ISDN': 20,
'RT': 21,
'NSAP': 22,
'NSAP_PTR': 23,
'SIG': 24,
'KEY': 25,
'PX': 26,
'GPOS': 27,
'AAAA': 28,
'LOC': 29,
'NXT': 30,
'SRV': 33,
'NAPTR': 35,
'KX': 36,
'CERT': 37,
'A6': 38,
'DNAME': 39,
'OPT': 41,
'APL': 42,
'DS': 43,
'SSHFP': 44,
'IPSECKEY': 45,
'RRSIG': 46,
'NSEC': 47,
'DNSKEY': 48,
'DHCID': 49,
'NSEC3': 50,
'NSEC3PARAM': 51,
'TLSA': 52,
'HIP': 55,
'CDS': 59,
'CDNSKEY': 60,
'CSYNC': 62,
'SPF': 99,
'UNSPEC': 103,
'EUI48': 108,
'EUI64': 109,
'TKEY': 249,
'TSIG': 250,
'IXFR': 251,
'AXFR': 252,
'MAILB': 253,
'MAILA': 254,
'ANY': 255,
'URI': 256,
'CAA': 257,
'AVC': 258,
'TA': 32768,
'DLV': 32769,
'RESERVED-65534': 65534,
}
DNS_TYPE_ITOA = dict((v, k) for k, v in DNS_RDATA_TYPES.items())
DNS_RESPONSE_CODES = {
0: 'NOERROR', # DNS Query completed successfully
1: 'FORMERR', # DNS Query Format Error
2: 'SERVFAIL', # Server failed to complete the DNS request
3: 'NXDOMAIN', # Domain name does not exist
4: 'NOTIMP', # Function not implemented
5: 'REFUSED', # The server refused to answer for the query
}
# Well-known prefixes seen on domain names
WELLKNOWN_PREFIXES = (
'_amazonses',
'_dmarc', # DMARC, Domain-based Message Authentication, Reporting & Conformance
'_domainkey', # DKIM, DomainKeys Identified Email
'_ipp._tcp', # IPP, Internet Printing Protocol
'_kerberos._tcp',
'_kerberos._tcp.dc._msdcs',
'_ldap._tcp', # LDAP, Lightweight Directory Access Protocol
'_ldap._tcp.dc._msdcs',
'_ldap._tcp.gc._msdcs',
'_ldap._tcp.pdc._msdcs',
'_ldaps._tcp',
'_msdcs', # Microsoft Domain Controller Server
'_mta-sts', # MTA-STS, SMTP Mail Transfer Agent Strict Transport Security
'_psl', # PSL, Public Suffix List
'_sip._tcp',
'_sip._udp',
'_sips._tcp',
'a',
'about',
'account',
'adm',
'admin',
'agent',
'alpha',
'answers',
'api',
'app',
'app1',
'archive',
'assets',
'auth',
'autodiscover',
'b',
'back',
'backup',
'bck',
'beta',
'bit',
'bits',
'blog',
'bot',
'business',
'c',
'cache',
'calendar',
'cdn',
'center',
'chat',
'cloud',
'club',
'code',
'collect',
'collectd',
'com',
'commute',
'connect',
'console',
'corp',
'cpanel',
'crl',
'cvs',
'data',
'database',
'db',
'dc1',
'dc2',
'demo',
'dev',
'developer',
'dmz',
'dns',
'dns1',
'dns2',
'doc',
'docs',
'domains',
'en',
'esxi',
'eu',
'euro',
'exchange',
'ext',
'external',
'extra',
'extranet',
'faq',
'filer',
'files',
'forum',
'fr',
'free',
'ftp',
'fw',
'gc._msdcs',
'geo',
'git',
'gitlab',
'google._domainkey',
'gp',
'grafana',
'graph',
'group',
'guide',
'gw',
'help',
'helpdesk',
'hg',
'icinga',
'icingaweb',
'identity',
'idp',
'imap',
'ins',
'inside',
'int',
'intra',
'intranet',
'irc',
'jenkins',
'jira',
'job',
'jobs',
'join',
'k1._domainkey', # Mailchimp DKIM key
'ldap',
'ldaps',
'learn',
'list',
'lists',
'local',
'log',
'login',
'lyncdiscover',
'm',
'mail',
'mail1',
'mail2',
'master',
'matrix',
'mattermost',
'mdm',
'media',
'mf1',
'mfa',
'mobile',
'mobility',
'msoid',
'mssql',
'mx1',
'mx2',
'my',
'mysql',
'nagios',
'name',
'nas',
'net',
'new',
'news',
'ng',
'ns1',
'ns2',
'ntp',
'oauth',
'old',
'open',
'openpgpkey',
'opensource',
'org',
'outlook',
'pass',
'pdns',
'phone',
'phpmyadmin',
'pki',
'pop',
'pop3',
'pop3s',
'portal',
'prd',
'preprod',
'prod',
'product',
'products',
'proxmox',
'proxy',
'public',
'publish',
'qat',
'qual',
'qualification',
'queue',
'rabbitmq',
'random',
'redis',
'redmine',
'register',
'registration',
'registry',
'release',
'releases',
'repo',
'research',
'rest',
'rsa',
'rss',
'sap',
'search',
'secure',
'share',
'sharing',
'shop',
'sign-in',
'signin',
'sip',
'smtp',
'smtp-in',
'smtp-out',
'smtp._domainkey',
'smtp1',
'smtp1._domainkey',
'smtp2',
'smtps',
'sonar',
'spf',
'splunk',
'spot',
'sql',
'ssl',
'sso',
'staff',
'stage',
'staging',
'stat',
'static',
'stats',
'sts',
'subversion',
'support',
'svn',
'sync',
'test',
'test1',
'tls',
'token',
'tool',
'tools',
'torrent',
'tracker',
'tv',
'uat',
'uk',
'us',
'v2',
'vault',
'voip',
'vpn',
'web',
'webapp',
'webchat',
'webmail',
'wifi',
'wiki',
'wildcard',
'wireless',
'www',
'www1',
'www2',
'www3',
'xyz',
'zammad',
'zero',
'zeromq',
'zimbra',
)
def get_comment_for_domain(domain):
"""Describe a domain name to produce a comment"""
if domain.endswith((
'.akamaiedge.net.',
'.akamaized.net',
'.edgekey.net.',
'.static.akamaitechnologies.com.')):
return 'Akamai CDN'
if domain.endswith('.amazonaws.com.'):
return 'Amazon AWS'
if domain.endswith('.cdn.cloudflare.net.'):
return 'Cloudflare CDN'
if domain.endswith('.mail.gandi.net.') or domain == 'webmail.gandi.net.':
return 'Gandi mail hosting'
if domain == 'webredir.vip.gandi.net.':
return 'Gandi web forwarding hosting'
if domain == 'dkim.mcsv.net.':
return 'Mailchimp mail sender'
if domain.endswith('.azurewebsites.net.'):
return 'Microsoft Azure hosting'
if domain.endswith('.lync.com.'):
return 'Microsoft Lync'
if domain == 'clientconfig.microsoftonline-p.net.':
# https://docs.microsoft.com/en-gb/office365/enterprise/external-domain-name-system-records
return 'Microsoft Office 365 tenant'
if domain.endswith(('.office.com.', '.office365.com.')):
return 'Microsoft Office 365'
if domain.endswith('.outlook.com.'):
return 'Microsoft Outlook mail'
if domain in ('redirect.ovh.net.', 'ssl0.ovh.net.'):
return 'OVH mail provider'
if domain.endswith('.hosting.ovh.net.'):
return 'OVH shared web hosting'
if domain.endswith('.rev.sfr.net.'):
return 'SFR provider'
return None
def get_comment_for_record(domain, rtype, data):
"""Describe a DNS record to produce a comment"""
if rtype == 'PTR':
# produce the same comment as for the reverse-PTR record
return get_comment_for_domain(data)
if rtype == 'CNAME':
# Try describing the alias target
return get_comment_for_domain(domain) or get_comment_for_domain(data)
if rtype == 'MX':
data = data.lower()
if data.endswith(('.google.com.', '.googlemail.com.')):
return 'Google mail server'
if data.endswith('.outlook.com.'):
return 'Microsoft Outlook mail server'
if data.endswith('.outlook.com.'):
return 'Microsoft Outlook mail server'
if data.endswith('.pphosted.com.'):
return 'Proofpoint mail server'
# Try matching the name of MX servers
matches = re.match(r'^[0-9]+\s+(\S+)$', data)
if matches:
return get_comment_for_domain(matches.group(1))
if rtype == 'NS':
if data.endswith('.gandi.net.'):
return 'Gandi DNS server'
if data.endswith('.ovh.net.'):
return 'OVH DNS server'
return get_comment_for_domain(domain)
def dns_sortkey(name):
"""Get the sort key of a domain name"""
reversed_parts = name.lower().split('.')[::-1]
# Make sure the uppercase domain got before the lowercase one, for two same domains
# BUT a.tld stays before subdomain.a.tld, so do not append to the list
return (reversed_parts, name)
class Resolver:
def __init__(self, cache_directory, time_sleep=1, use_google=False, use_cloudflare=False, no_ssl=False):
self.cache_directory = cache_directory
self.time_sleep = time_sleep
assert not (use_google and use_cloudflare)
self.use_google = use_google
self.use_cloudflare = use_cloudflare
self.no_ssl = no_ssl
self.dns_questions = None
self.dns_records = None
self.is_cache_dirty = True
self.has_show_dnspython_any_warning = False
self.load_cache(if_dirty=False)
def load_cache(self, if_dirty=True):
"""Load cached DNS results from the cache directory"""
if if_dirty and not self.is_cache_dirty:
# Do not reload the cache if it has not been modified
return
self.dns_questions = set()
self.dns_records = set()
for filepath in self.cache_directory.glob('*.json'):
with filepath.open(mode='r') as fjson:
for line in fjson:
json_data = json.loads(line)
# Add the question to the list of asked ones
for question in json_data['Question']:
self.dns_questions.add(
(question['name'].lower().strip('.'), DNS_TYPE_ITOA[question['type']])
)
# Ignore failed responses
rcode_name = DNS_RESPONSE_CODES.get(json_data['Status'])
if rcode_name in ('SERVFAIL', 'NXDOMAIN', 'NOTIMP', 'REFUSED'):
continue
if rcode_name != 'NOERROR':
raise ValueError("Invalid status {} ({}) in {}".format(
json_data['Status'], rcode_name, repr(filepath)))
# Ignore empty responses
if 'Answer' not in json_data:
continue
for answer in json_data['Answer']:
asc_type = DNS_TYPE_ITOA[answer['type']]
self.dns_records.add((answer['name'], asc_type, answer['data']))
# Add fake reverse-PTR entry
if asc_type == 'PTR':
matches = re.match(
r'^([0-9]+)\.([0-9]+)\.([0-9]+)\.([0-9]+)\.in-addr\.arpa\.$',
answer['name'], re.I)
if matches:
# IPv4 PTR record
ip_addr = '.'.join(matches.groups()[::-1])
self.dns_records.add((answer['data'], 'rPTR', ip_addr))
continue
matches = re.match(r'^(([0-9a-f]+\.){32})ip6\.arpa\.$', answer['name'])
if matches:
# IPv6 PTR record
packed_addr = binascii.unhexlify(matches.group(1).replace('.', '')[::-1])
ip_addr_expanded = ':'.join(
'{:04x}'.format(x) for x in struct.unpack('>8H', packed_addr))
ip_addr = ipaddress.IPv6Address(ip_addr_expanded).compressed
self.dns_records.add((answer['data'], 'rPTR', ip_addr))
continue
print("Warning: invalid PTR record name {}".format(repr(answer['name'])))
self.is_cache_dirty = False
def merge_cache_files(self):
"""Merge all cache files into one"""
# Load all the JSON records, and deduplicate them
all_files = set()
all_lines = set()
for filepath in self.cache_directory.glob('*.json'):
all_files.add(filepath)
with filepath.open(mode='r') as fjson:
for line in fjson:
all_lines.add(line.strip() + '\n')
all_lines = sorted(all_lines)
merged_file = self.cache_directory / 'all.json'
new_merged_file = self.cache_directory / 'all.json.new'
try:
with new_merged_file.open(mode='w') as fout:
fout.write(''.join(all_lines))
except MemoryError:
# This can occur with too many domains.
# In such as situation, do not join the lines
with new_merged_file.open(mode='w') as fout:
for line in all_lines:
fout.write(line)
new_merged_file.rename(merged_file)
for filepath in all_files:
if filepath != merged_file:
filepath.unlink()
def resolve_in_cache(self, domain, rtype):
"""Resolve a domain name, writing the result in a cache file"""
domain = domain.strip('.')
# NB. use dns_questions instead of dns_records in order to perform
# specific queries (A, AAAA, TXT, etc.) even after an ANY query.
if (domain, rtype) in self.dns_questions:
return
cache_file = self.cache_directory / '{}_{}.json'.format(domain, rtype)
if cache_file.exists():
print("Warning: cache file exists for {} <{}> but was not loaded".format(domain, rtype))
return
if self.use_google:
response = self.query_google(domain, rtype)
elif self.use_cloudflare:
response = self.query_cloudflare(domain, rtype)
else:
response = self.query_dns(domain, rtype)
if not response:
return
# Write the cache file
response = response.strip(b'\n')
with cache_file.open(mode='wb') as fout:
fout.write(response)
fout.write(b'\n')
self.is_cache_dirty = True
# Sleep after the DNS query
if self.time_sleep:
# Inform the user that we are sleeping with a small sign
print('-', end='\r')
time.sleep(self.time_sleep)
@staticmethod
def get_ptr_name_for_ip(ip_addr, version=None):
"""Get the PTR domain name matching an IP address"""
if hasattr(ip_addr, 'reverse_pointer'):
# Python 3.5 introduced a property to compute the PTR name
return ip_addr.reverse_pointer
if isinstance(ip_addr, ipaddress.IPv4Address):
return '{0[3]}.{0[2]}.{0[1]}.{0[0]}.in-addr.arpa.'.format(struct.unpack('BBBB', ip_addr.packed))
if isinstance(ip_addr, ipaddress.IPv6Address):
addr_hex = binascii.hexlify(ip_addr.packed).decode('ascii')
return '{}.ip6.arpa.'.format('.'.join(addr_hex[::-1]))
# Here, ip_addr has to be a string.
if version is None:
# Guess the version from the IP address
version = 6 if ':' in ip_addr else 4
if version == 4:
return '{0[3]}.{0[2]}.{0[1]}.{0[0]}.in-addr.arpa.'.format(ip_addr.split('.'))
if version == 6:
addr_hex = binascii.hexlify(ipaddress.IPv6Address(ip_addr).packed).decode('ascii')
return '{}.ip6.arpa.'.format('.'.join(addr_hex[::-1]))
raise ValueError("Unknown IP version {}".format(repr(version)))
def resolve_ip(self, ip_addr, version=None):
"""Resolve an IP address by querying a PTR record"""
domain = self.get_ptr_name_for_ip(ip_addr, version)
return self.resolve_in_cache(domain, 'PTR')
def query_dns(self, domain, rdtype_text):
if not HAVE_DNSPYTHON:
raise RuntimeError("Using DNS requires dnspython. Either install it or use -g to use Google DNS API")
# dnspython does not like DNS metaqueries such as ANY requests
if rdtype_text == 'ANY':
if not self.has_show_dnspython_any_warning:
print("Warning: refusing to query DNS for type ANY (dnspython does not like it)")
self.has_show_dnspython_any_warning = True
return None
print("Querying DNS for {} <{}>...".format(domain, rdtype_text))
resolver = dns.resolver.Resolver()
resolver.use_edns(0, dns.flags.DO, 4096)
dot_domain = domain + '.'
rdtype = dns.rdatatype.from_text(rdtype_text)
rdclass = dns.rdataclass.IN
result = {
'Status': 0,
'Question': [
{
'name': dot_domain,
'type': rdtype,
},
],
'Answer': [],
}
try:
answers = resolver.query(dot_domain, rdtype, rdclass, True)
except dns.resolver.NoAnswer:
pass # Empty answer is successful
except dns.resolver.NXDOMAIN:
assert dns.rcode.NXDOMAIN == 3
result['Status'] = 3
else:
result['Flags'] = {
'raw': answers.response.flags,
'QR': bool(answers.response.flags & dns.flags.QR), # Query Response (0x8000)
'AA': bool(answers.response.flags & dns.flags.AA), # Authoritative Answer (0x0400)
'TC': bool(answers.response.flags & dns.flags.TC), # Truncated Response (0x0200)
'RD': bool(answers.response.flags & dns.flags.RD), # Recursion Desired (0x0100)
'RA': bool(answers.response.flags & dns.flags.RA), # Recursion Available (0x0080)
'AD': bool(answers.response.flags & dns.flags.AD), # Authentic Data (0x0020)
'CD': bool(answers.response.flags & dns.flags.CD), # Checking Disabled (0x0010)
}
result['Answer'] = [
{
'name': answers.name.to_text(omit_final_dot=False),
'type': answer.rdtype,
'TTL': answers.ttl,
'data': answer.to_text(),
}
for answer in answers
]
return json.dumps(result).encode('ascii')
def query_google(self, domain, rtype):
"""Perform a DNS query using https://dns.google.com/ API"""
print("Querying dns.google.com for {} <{}>...".format(domain, rtype))
params = {
'name': domain,
'type': rtype,
}
url = 'https://dns.google.com/resolve?' + urllib.parse.urlencode(params)
ctx = ssl.create_default_context()
if self.no_ssl:
# Disable HTTPS certificate verification, for example when recording
# the requests using a HTTPS proxy such as BurpSuite.
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE # noqa
opener = urllib.request.build_opener(urllib.request.HTTPSHandler(context=ctx))
req = urllib.request.Request(
url,
headers={
'Accept': 'application/json, text/plain, */*',
'Connection': 'close',
})
with opener.open(req) as resp:
if resp.status not in (200, 204):
raise ValueError("Request to {} returned HTTP status {}".format(url, resp.status))
content_length = resp.getheader('Content-Length')
if content_length:
data = resp.read(int(content_length))
else:
data = resp.read()
if not data:
raise ValueError("No data in response to {}".format(url))
return data
def query_cloudflare(self, domain, rtype):
"""Perform a DNS query using https://cloudflare-dns.com/ API"""
print("Querying cloudflare-dns.com for {} <{}>...".format(domain, rtype))
params = {
'name': domain,
'type': rtype,
}
url = 'https://cloudflare-dns.com/dns-query?' + urllib.parse.urlencode(params)
ctx = ssl.create_default_context()
if self.no_ssl:
# Disable HTTPS certificate verification, for example when recording
# the requests using a HTTPS proxy such as BurpSuite.
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE # noqa
opener = urllib.request.build_opener(urllib.request.HTTPSHandler(context=ctx))
req = urllib.request.Request(
url,
headers={
'Accept': 'application/dns-json',
'Connection': 'close',
})
with opener.open(req) as resp:
if resp.status not in (200, 204):
raise ValueError("Request to {} returned HTTP status {}".format(url, resp.status))
content_length = resp.getheader('Content-Length')
if content_length:
data = resp.read(int(content_length))
else:
data = resp.read()
if not data:
raise ValueError("No data in response to {}".format(url))
return data
def dump_records(self, hide_dnssec=False):
"""Enumerate the DNS records"""
comments_for_data = {}
def add_comment(key, comment):
if key not in comments_for_data:
comments_for_data[key] = set()
comments_for_data[key].add(comment)
# Find out wildcard domains using resolutions for "b.domain",
# "random.domain" and "xyz.domain"
wildcard_detectors = ('b.', 'random.', 'xyz.')
wildcard_witness = {}
# Describe known providers
for domain, rtype, data in self.dns_records:
if hide_dnssec and rtype in ('DNSKEY', 'NSEC3PARAM', 'NSEC3', 'RRSIG'):
continue
comment = get_comment_for_record(domain, rtype, data)
if comment:
add_comment(data, comment)
if domain.startswith(wildcard_detectors):
wild_suffix = domain.split('.', 1)[1]
if wild_suffix not in wildcard_witness:
wildcard_witness[wild_suffix] = {}
if rtype not in wildcard_witness[wild_suffix]:
wildcard_witness[wild_suffix][rtype] = {}
if domain not in wildcard_witness[wild_suffix][rtype]:
wildcard_witness[wild_suffix][rtype][domain] = set()
wildcard_witness[wild_suffix][rtype][domain].add(data)
# Compute wildcard records
all_records = self.dns_records.copy()
wildcard_records_by_data = {}
for wild_suffix, suffix_types_witnesses in wildcard_witness.items():
for rtype, witnesses in suffix_types_witnesses.items():
if len(witnesses) != len(wildcard_detectors):
continue
wild_several_data = None
try:
for several_data in witnesses.values():
if wild_several_data is None:
wild_several_data = several_data
if wild_several_data != several_data:
raise ValueError
except ValueError:
# Not a wildcard
break
assert wild_several_data is not None
# Add a wildcard record and filter-out existing ones
for data in wild_several_data:
all_records.add(('*.' + wild_suffix, rtype, data))
# Identify wildcard records by their data
if (rtype, data) not in wildcard_records_by_data:
wildcard_records_by_data[(rtype, data)] = set()
wildcard_records_by_data[(rtype, data)].add(wild_suffix)
# Filter-out wildcard records and compute the maximum length of a domain name
max_domain_len = 0
deleted_records = set()
for domain, rtype, data in all_records:
is_deleted = False
for possible_wild_suffix in wildcard_records_by_data.get((rtype, data), []):
if domain != '*.' + possible_wild_suffix and domain.endswith('.' + possible_wild_suffix):
deleted_records.add((domain, rtype, data))
is_deleted = True
continue
if is_deleted:
continue
if rtype == 'PTR':
# Ignore long PTR records in max_domain_len computation
continue
if max_domain_len < len(domain):
max_domain_len = len(domain)
for rec in deleted_records:
all_records.remove(rec)
# Sort by domain name, and place rPTR entries right after A and AAAA ones.
items = sorted(
all_records,
key=lambda x: (dns_sortkey(x[0]), x[1].replace('rPTR', 'ArPTR'), x[2]))
for domain, rtype, data in items:
if hide_dnssec and rtype in ('DNSKEY', 'NSEC3PARAM', 'NSEC3', 'RRSIG'):
continue
padding = ' ' * (max_domain_len - len(domain)) if len(domain) < max_domain_len else ''
line = '{}{} {:6} {}'.format(padding, domain, rtype, data)
comments = comments_for_data.get(data)
if comments:
line += ' # ' + ', '.join(sorted(comments))
yield line
def main(argv=None):
parser = argparse.ArgumentParser(description="Resolve DNS records")
parser.add_argument('file', metavar="DOMAINS_FILE", type=Path,
help="file containing a list of domains to resolve")
parser.add_argument('-d', '--directory', type=Path,
help="directory where DNS results are cached")
parser.add_argument('-D', '--hide-dnssec', action='store_true',
help="hide entries related to DNSSEC")
parser.add_argument('-F', '--filter-exist', action='store_true',
help="filter-out non-existing domains from the input file")
parser.add_argument('-g', '--use-google', action='store_true',
help="use https://dns.google.com/ API")
parser.add_argument('-C', '--use-cloudflare', action='store_true',
help="use https://cloudflare-dns.com/ API")
parser.add_argument('-o', '--output', type=Path,
help="file where the DNS entries are written")
parser.add_argument('-O', '--stdout', action='store_true',
help="print the results, when a file is also written")
parser.add_argument('-i', '--ipaddr', metavar="IP_NETWORK",
nargs='*', type=ipaddress.ip_network,
help="resolve reverse (PTR) records for the IP addresses")
parser.add_argument('-M', '--merge-cache', action='store_true',
help="merge cache files together")
parser.add_argument('-p', '--prefixes', action='store_true',
help="add some well-known prefixes to the domains")
parser.add_argument('-s', '--sort', action='store_true',
help="sort the domains of the input file")
parser.add_argument('-S', '--no-ssl', action='store_true',
help="disable security of HTTPS queries")
parser.add_argument('-t', '--time-sleep', type=int, default=1,
help="number of seconds to sleep between DNS queries")
args = parser.parse_args(argv)
if args.directory is None:
parser.error("please provide a cache directory with option -d/--directory")
if args.use_google and args.use_cloudflare:
parser.error("options to use a DNS-JSON provider are mutually exclusive")
# Load the list of domains
with args.file.open(mode='r') as fdomains:
raw_domains = [l.rstrip('\n') for l in fdomains.readlines()]
domains = [l.strip().rstrip('.').lower() for l in raw_domains]
domains_set = set(domains)
if '' in domains_set:
domains_set.remove('')
if args.sort:
sorted_domains = sorted(domains_set, key=dns_sortkey)
if sorted_domains != raw_domains:
# Write the sorted list back
with args.file.open(mode='w') as fout:
fout.write(''.join((d + '\n') for d in sorted_domains))
# Create the cache directory, if it does not exist
args.directory.mkdir(exist_ok=True)
resolver = Resolver(
cache_directory=args.directory,
time_sleep=args.time_sleep,
use_google=args.use_google,
use_cloudflare=args.use_cloudflare,
no_ssl=args.no_ssl,
)
# Fill the cache
domains = list(domains_set)
random.SystemRandom().shuffle(domains) # Do not be predictable
for domain in domains:
# Treat SRV records in a special way, to restrict the requested record type
resolving_types = DNS_SRV_TYPES if '._tcp.' in domain or '._udp.' in domain else DNS_TYPES
for rtype in resolving_types:
# Do not resolve PTR for normal domains
if rtype != 'PTR':
resolver.resolve_in_cache(domain, rtype)
# Resolve with well-known prefixes
if args.prefixes:
domains_with_prefixes = list(
'{}.{}'.format(p, d)
for p, d in itertools.product(WELLKNOWN_PREFIXES, domains))
random.SystemRandom().shuffle(domains_with_prefixes) # Do not be predictable
for domain in domains_with_prefixes:
resolving_types = DNS_SRV_TYPES if '._tcp.' in domain or '._udp.' in domain else DNS_TYPES
for rtype in resolving_types:
if rtype != 'PTR':
resolver.resolve_in_cache(domain, rtype)
# Load the cache
resolver.load_cache(if_dirty=True)
# Resolve PTR records given on the command line
if args.ipaddr:
for ip_net in args.ipaddr:
resolver.resolve_ip(ip_net.network_address)
if ip_net.num_addresses >= 2:
for ip_addr in ip_net.hosts():
resolver.resolve_ip(ip_addr)
resolver.resolve_ip(ip_net.broadcast_address)
resolver.load_cache(if_dirty=True)
# Get all the A records, in order to get PTR
all_ipv4_addresses = set(x[2] for x in resolver.dns_records if x[1] == 'A')
for ip_addr in all_ipv4_addresses:
resolver.resolve_ip(ip_addr, version=4)
# Get all the AAAA records, in order to get PTR
all_ipv6_addresses = set(x[2] for x in resolver.dns_records if x[1] == 'AAAA')
for ip_addr in all_ipv6_addresses:
resolver.resolve_ip(ip_addr, version=6)
# Reload the cache, if needed
resolver.load_cache(if_dirty=True)
# Filter-out non-existing domains from the input file
if args.filter_exist:
found_domains = set(x[0].rstrip('.') for x in resolver.dns_records)
sorted_domains = sorted(set(domains).intersection(found_domains), key=dns_sortkey)
if sorted_domains != domains:
# Write the sorted list back
with args.file.open(mode='w') as fout:
fout.write(''.join((d + '\n') for d in sorted_domains))
# Produce the output
if args.output:
with args.output.open(mode='w') as fout:
for line in resolver.dump_records(hide_dnssec=args.hide_dnssec):
fout.write(line + '\n')
if args.stdout or not args.output:
for line in resolver.dump_records(hide_dnssec=args.hide_dnssec):
print(line)
# Merge all cache files together
if args.merge_cache:
resolver.merge_cache_files()
if __name__ == '__main__':
main()
python/network/resolve_domains: several improvements
* limit the number of queries
* add domain prefixes from https://docs.microsoft.com/en-us/skypeforbusiness/plan-your-deployment/network-requirements/dns
#!/usr/bin/env python3
# -*- coding:UTF-8 -*-
# Copyright (c) 2019 Nicolas Iooss
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Resolve a list DNS domains with a caching directory
Usage example to resolve domains using Google's DNS-over-HTTPS API, a list in
domains.txt, a cache in directory dns/, writing results in results.out.txt:
./resolve_domains.py -gMOs -o dns_resolutions.out.txt -d cache_dns domains.txt
@author: Nicolas Iooss
@license: MIT
"""
import argparse
import binascii
import ipaddress
import itertools
import json
from pathlib import Path
import random
import re
import ssl
import struct
import sys
import time
import urllib.parse
import urllib.request
try:
import dns.resolver
HAVE_DNSPYTHON = True
except ImportError:
HAVE_DNSPYTHON = False
else:
import dns
import dns.flags
import dns.rcode
import dns.rdataclass
import dns.rdatatype
# Types of DNS records that are resolved
DNS_TYPES = ('A', 'AAAA', 'MX', 'NS', 'PTR', 'TXT', 'ANY')
DNS_SRV_TYPES = ('NS', 'SRV', 'TXT', 'ANY')
# Identifiers of record data types
DNS_RDATA_TYPES = {
'NONE': 0,
'A': 1,
'NS': 2,
'MD': 3,
'MF': 4,
'CNAME': 5,
'SOA': 6,
'MB': 7,
'MG': 8,
'MR': 9,
'NULL': 10,
'WKS': 11,
'PTR': 12,
'HINFO': 13,
'MINFO': 14,
'MX': 15,
'TXT': 16,
'RP': 17,
'AFSDB': 18,
'X25': 19,
'ISDN': 20,
'RT': 21,
'NSAP': 22,
'NSAP_PTR': 23,
'SIG': 24,
'KEY': 25,
'PX': 26,
'GPOS': 27,
'AAAA': 28,
'LOC': 29,
'NXT': 30,
'SRV': 33,
'NAPTR': 35,
'KX': 36,
'CERT': 37,
'A6': 38,
'DNAME': 39,
'OPT': 41,
'APL': 42,
'DS': 43,
'SSHFP': 44,
'IPSECKEY': 45,
'RRSIG': 46,
'NSEC': 47,
'DNSKEY': 48,
'DHCID': 49,
'NSEC3': 50,
'NSEC3PARAM': 51,
'TLSA': 52,
'HIP': 55,
'CDS': 59,
'CDNSKEY': 60,
'CSYNC': 62,
'SPF': 99,
'UNSPEC': 103,
'EUI48': 108,
'EUI64': 109,
'TKEY': 249,
'TSIG': 250,
'IXFR': 251,
'AXFR': 252,
'MAILB': 253,
'MAILA': 254,
'ANY': 255,
'URI': 256,
'CAA': 257,
'AVC': 258,
'TA': 32768,
'DLV': 32769,
'RESERVED-65534': 65534,
}
DNS_TYPE_ITOA = dict((v, k) for k, v in DNS_RDATA_TYPES.items())
DNS_RESPONSE_CODES = {
0: 'NOERROR', # DNS Query completed successfully
1: 'FORMERR', # DNS Query Format Error
2: 'SERVFAIL', # Server failed to complete the DNS request
3: 'NXDOMAIN', # Domain name does not exist
4: 'NOTIMP', # Function not implemented
5: 'REFUSED', # The server refused to answer for the query
}
# Well-known prefixes seen on domain names
WELLKNOWN_PREFIXES = (
'_amazonses',
'_dmarc', # DMARC, Domain-based Message Authentication, Reporting & Conformance
'_domainkey', # DKIM, DomainKeys Identified Email
'_ipp._tcp', # IPP, Internet Printing Protocol
'_kerberos._tcp',
'_kerberos._tcp.dc._msdcs',
'_ldap._tcp', # LDAP, Lightweight Directory Access Protocol
'_ldap._tcp.dc._msdcs',
'_ldap._tcp.gc._msdcs',
'_ldap._tcp.pdc._msdcs',
'_ldaps._tcp',
'_msdcs', # Microsoft Domain Controller Server
'_mta-sts', # MTA-STS, SMTP Mail Transfer Agent Strict Transport Security
'_psl', # PSL, Public Suffix List
'_sip._tcp',
'_sip._tls',
'_sip._udp',
'_sips._tcp',
'a',
'about',
'access1',
'account',
'adm',
'admin',
'agent',
'alpha',
'answers',
'api',
'app',
'app1',
'archive',
'assets',
'auth',
'autodiscover',
'av',
'b',
'back',
'backup',
'bck',
'beta',
'bit',
'bits',
'blog',
'bot',
'business',
'c',
'cache',
'calendar',
'cdn',
'center',
'chat',
'cloud',
'club',
'code',
'collect',
'collectd',
'com',
'commute',
'connect',
'console',
'corp',
'cpanel',
'crl',
'cvs',
'data',
'database',
'db',
'dc1',
'dc2',
'demo',
'dev',
'developer',
'dial-in',
'dmz',
'dns',
'dns1',
'dns2',
'doc',
'docs',
'domains',
'en',
'esxi',
'eu',
'euro',
'exchange',
'ext',
'external',
'extra',
'extranet',
'faq',
'filer',
'files',
'forum',
'fr',
'free',
'ftp',
'fw',
'gc._msdcs',
'geo',
'git',
'gitlab',
'google._domainkey',
'gp',
'grafana',
'graph',
'group',
'guide',
'gw',
'help',
'helpdesk',
'hg',
'icinga',
'icingaweb',
'identity',
'idp',
'imap',
'ins',
'inside',
'int',
'intra',
'intranet',
'irc',
'jenkins',
'jira',
'job',
'jobs',
'join',
'k1._domainkey', # Mailchimp DKIM key
'ldap',
'ldaps',
'learn',
'list',
'lists',
'local',
'log',
'login',
'lyncdiscover',
'm',
'mail',
'mail1',
'mail2',
'master',
'matrix',
'mattermost',
'mdm',
'media',
'meet',
'mf1',
'mfa',
'mobile',
'mobility',
'msoid',
'mssql',
'mx1',
'mx2',
'my',
'mysql',
'nagios',
'name',
'nas',
'net',
'new',
'news',
'ng',
'ns1',
'ns2',
'ntp',
'oauth',
'old',
'open',
'openpgpkey',
'opensource',
'org',
'outlook',
'owa',
'pass',
'pdns',
'phone',
'phpmyadmin',
'pki',
'pop',
'pop3',
'pop3s',
'portal',
'prd',
'preprod',
'prod',
'product',
'products',
'proxmox',
'proxy',
'public',
'publish',
'qat',
'qual',
'qualification',
'queue',
'rabbitmq',
'random',
'redis',
'redmine',
'register',
'registration',
'registry',
'release',
'releases',
'repo',
'research',
'rest',
'rsa',
'rss',
'sap',
'search',
'secure',
'share',
'sharing',
'shop',
'sign-in',
'signin',
'sip',
'smtp',
'smtp-in',
'smtp-out',
'smtp._domainkey',
'smtp1',
'smtp1._domainkey',
'smtp2',
'smtps',
'sonar',
'spf',
'splunk',
'spot',
'sql',
'ssl',
'sso',
'staff',
'stage',
'staging',
'stat',
'static',
'stats',
'sts',
'subversion',
'support',
'svn',
'sync',
'test',
'test1',
'tls',
'token',
'tool',
'tools',
'torrent',
'tracker',
'tv',
'uat',
'uk',
'us',
'v2',
'vault',
'voip',
'vpn',
'web',
'web-ext',
'webapp',
'webchat',
'webcon1',
'webmail',
'wifi',
'wiki',
'wildcard',
'wireless',
'www',
'www1',
'www2',
'www3',
'xyz',
'zammad',
'zero',
'zeromq',
'zimbra',
)
def get_comment_for_domain(domain):
"""Describe a domain name to produce a comment"""
if domain.endswith((
'.akamaiedge.net.',
'.akamaized.net',
'.edgekey.net.',
'.static.akamaitechnologies.com.')):
return 'Akamai CDN'
if domain.endswith('.amazonaws.com.'):
return 'Amazon AWS'
if domain.endswith('.cdn.cloudflare.net.'):
return 'Cloudflare CDN'
if domain.endswith('.mail.gandi.net.') or domain == 'webmail.gandi.net.':
return 'Gandi mail hosting'
if domain == 'webredir.vip.gandi.net.':
return 'Gandi web forwarding hosting'
if domain == 'dkim.mcsv.net.':
return 'Mailchimp mail sender'
if domain.endswith('.azurewebsites.net.'):
return 'Microsoft Azure hosting'
if domain.endswith('.lync.com.'):
return 'Microsoft Lync'
if domain == 'clientconfig.microsoftonline-p.net.':
# https://docs.microsoft.com/en-gb/office365/enterprise/external-domain-name-system-records
return 'Microsoft Office 365 tenant'
if domain.endswith(('.office.com.', '.office365.com.')):
return 'Microsoft Office 365'
if domain.endswith('.outlook.com.'):
return 'Microsoft Outlook mail'
if domain in ('redirect.ovh.net.', 'ssl0.ovh.net.'):
return 'OVH mail provider'
if domain.endswith('.hosting.ovh.net.'):
return 'OVH shared web hosting'
if domain.endswith('.rev.sfr.net.'):
return 'SFR provider'
return None
def get_comment_for_record(domain, rtype, data):
"""Describe a DNS record to produce a comment"""
if rtype == 'PTR':
# produce the same comment as for the reverse-PTR record
return get_comment_for_domain(data)
if rtype == 'CNAME':
# Try describing the alias target
return get_comment_for_domain(domain) or get_comment_for_domain(data)
if rtype == 'MX':
data = data.lower()
if data.endswith(('.google.com.', '.googlemail.com.')):
return 'Google mail server'
if data.endswith('.outlook.com.'):
return 'Microsoft Outlook mail server'
if data.endswith('.outlook.com.'):
return 'Microsoft Outlook mail server'
if data.endswith('.pphosted.com.'):
return 'Proofpoint mail server'
# Try matching the name of MX servers
matches = re.match(r'^[0-9]+\s+(\S+)$', data)
if matches:
return get_comment_for_domain(matches.group(1))
if rtype == 'NS':
if data.endswith('.gandi.net.'):
return 'Gandi DNS server'
if data.endswith('.ovh.net.'):
return 'OVH DNS server'
return get_comment_for_domain(domain)
def dns_sortkey(name):
"""Get the sort key of a domain name"""
reversed_parts = name.lower().split('.')[::-1]
# Make sure the uppercase domain got before the lowercase one, for two same domains
# BUT a.tld stays before subdomain.a.tld, so do not append to the list
return (reversed_parts, name)
class Resolver:
def __init__(self, cache_directory, time_sleep=1, use_google=False, use_cloudflare=False, no_ssl=False):
self.cache_directory = cache_directory
self.time_sleep = time_sleep
assert not (use_google and use_cloudflare)
self.use_google = use_google
self.use_cloudflare = use_cloudflare
self.no_ssl = no_ssl
self.dns_questions = None
self.dns_records = None
self.is_cache_dirty = True
self.has_show_dnspython_any_warning = False
self.query_count = 0
self.load_cache(if_dirty=False)
def load_cache(self, if_dirty=True):
"""Load cached DNS results from the cache directory"""
if if_dirty and not self.is_cache_dirty:
# Do not reload the cache if it has not been modified
return
self.dns_questions = set()
self.dns_records = set()
for filepath in self.cache_directory.glob('*.json'):
with filepath.open(mode='r') as fjson:
for line in fjson:
json_data = json.loads(line)
# Add the question to the list of asked ones
for question in json_data['Question']:
self.dns_questions.add(
(question['name'].lower().strip('.'), DNS_TYPE_ITOA[question['type']])
)
# Ignore failed responses
rcode_name = DNS_RESPONSE_CODES.get(json_data['Status'])
if rcode_name in ('SERVFAIL', 'NXDOMAIN', 'NOTIMP', 'REFUSED'):
continue
if rcode_name != 'NOERROR':
raise ValueError("Invalid status {} ({}) in {}".format(
json_data['Status'], rcode_name, repr(filepath)))
# Ignore empty responses
if 'Answer' not in json_data:
continue
for answer in json_data['Answer']:
asc_type = DNS_TYPE_ITOA[answer['type']]
self.dns_records.add((answer['name'], asc_type, answer['data']))
# Add fake reverse-PTR entry
if asc_type == 'PTR':
matches = re.match(
r'^([0-9]+)\.([0-9]+)\.([0-9]+)\.([0-9]+)\.in-addr\.arpa\.$',
answer['name'], re.I)
if matches:
# IPv4 PTR record
ip_addr = '.'.join(matches.groups()[::-1])
self.dns_records.add((answer['data'], 'rPTR', ip_addr))
continue
matches = re.match(r'^(([0-9a-f]+\.){32})ip6\.arpa\.$', answer['name'])
if matches:
# IPv6 PTR record
packed_addr = binascii.unhexlify(matches.group(1).replace('.', '')[::-1])
ip_addr_expanded = ':'.join(
'{:04x}'.format(x) for x in struct.unpack('>8H', packed_addr))
ip_addr = ipaddress.IPv6Address(ip_addr_expanded).compressed
self.dns_records.add((answer['data'], 'rPTR', ip_addr))
continue
print("Warning: invalid PTR record name {}".format(repr(answer['name'])))
self.is_cache_dirty = False
def merge_cache_files(self):
"""Merge all cache files into one"""
# Load all the JSON records, and deduplicate them
all_files = set()
all_lines = set()
for filepath in self.cache_directory.glob('*.json'):
all_files.add(filepath)
with filepath.open(mode='r') as fjson:
for line in fjson:
all_lines.add(line.strip() + '\n')
all_lines = sorted(all_lines)
merged_file = self.cache_directory / 'all.json'
new_merged_file = self.cache_directory / 'all.json.new'
try:
with new_merged_file.open(mode='w') as fout:
fout.write(''.join(all_lines))
except MemoryError:
# This can occur with too many domains.
# In such as situation, do not join the lines
with new_merged_file.open(mode='w') as fout:
for line in all_lines:
fout.write(line)
new_merged_file.rename(merged_file)
for filepath in all_files:
if filepath != merged_file:
filepath.unlink()
def resolve_in_cache(self, domain, rtype):
"""Resolve a domain name, writing the result in a cache file"""
domain = domain.strip('.')
# NB. use dns_questions instead of dns_records in order to perform
# specific queries (A, AAAA, TXT, etc.) even after an ANY query.
if (domain, rtype) in self.dns_questions:
return
cache_file = self.cache_directory / '{}_{}.json'.format(domain, rtype)
cache_file_temp = self.cache_directory / '{}_{}.json.temp'.format(domain, rtype)
if cache_file.exists():
print("Warning: cache file exists for {} <{}> but was not loaded".format(domain, rtype))
return
if self.use_google:
response = self.query_google(domain, rtype)
elif self.use_cloudflare:
response = self.query_cloudflare(domain, rtype)
else:
response = self.query_dns(domain, rtype)
if not response:
return
# Write the cache file
response = response.strip(b'\n')
with cache_file_temp.open(mode='wb') as fout:
fout.write(response)
fout.write(b'\n')
cache_file_temp.replace(cache_file)
self.is_cache_dirty = True
# Sleep after the DNS query
if self.time_sleep:
# Inform the user that we are sleeping with a small sign
print('-', end='\r')
time.sleep(self.time_sleep)
@staticmethod
def get_ptr_name_for_ip(ip_addr, version=None):
"""Get the PTR domain name matching an IP address"""
if hasattr(ip_addr, 'reverse_pointer'):
# Python 3.5 introduced a property to compute the PTR name
return ip_addr.reverse_pointer
if isinstance(ip_addr, ipaddress.IPv4Address):
return '{0[3]}.{0[2]}.{0[1]}.{0[0]}.in-addr.arpa.'.format(struct.unpack('BBBB', ip_addr.packed))
if isinstance(ip_addr, ipaddress.IPv6Address):
addr_hex = binascii.hexlify(ip_addr.packed).decode('ascii')
return '{}.ip6.arpa.'.format('.'.join(addr_hex[::-1]))
# Here, ip_addr has to be a string.
if version is None:
# Guess the version from the IP address
version = 6 if ':' in ip_addr else 4
if version == 4:
return '{0[3]}.{0[2]}.{0[1]}.{0[0]}.in-addr.arpa.'.format(ip_addr.split('.'))
if version == 6:
addr_hex = binascii.hexlify(ipaddress.IPv6Address(ip_addr).packed).decode('ascii')
return '{}.ip6.arpa.'.format('.'.join(addr_hex[::-1]))
raise ValueError("Unknown IP version {}".format(repr(version)))
def resolve_ip(self, ip_addr, version=None):
"""Resolve an IP address by querying a PTR record"""
domain = self.get_ptr_name_for_ip(ip_addr, version)
return self.resolve_in_cache(domain, 'PTR')
def query_dns(self, domain, rdtype_text):
if not HAVE_DNSPYTHON:
raise RuntimeError("Using DNS requires dnspython. Either install it or use -g to use Google DNS API")
# dnspython does not like DNS metaqueries such as ANY requests
if rdtype_text == 'ANY':
if not self.has_show_dnspython_any_warning:
print("Warning: refusing to query DNS for type ANY (dnspython does not like it)")
self.has_show_dnspython_any_warning = True
return None
print("Querying DNS for {} <{}>...".format(domain, rdtype_text))
self.query_count += 1
resolver = dns.resolver.Resolver()
resolver.use_edns(0, dns.flags.DO, 4096)
dot_domain = domain + '.'
rdtype = dns.rdatatype.from_text(rdtype_text)
rdclass = dns.rdataclass.IN
result = {
'Status': 0,
'Question': [
{
'name': dot_domain,
'type': rdtype,
},
],
'Answer': [],
}
try:
answers = resolver.query(dot_domain, rdtype, rdclass, True)
except dns.resolver.NoAnswer:
pass # Empty answer is successful
except dns.resolver.NXDOMAIN:
assert dns.rcode.NXDOMAIN == 3
result['Status'] = 3
else:
result['Flags'] = {
'raw': answers.response.flags,
'QR': bool(answers.response.flags & dns.flags.QR), # Query Response (0x8000)
'AA': bool(answers.response.flags & dns.flags.AA), # Authoritative Answer (0x0400)
'TC': bool(answers.response.flags & dns.flags.TC), # Truncated Response (0x0200)
'RD': bool(answers.response.flags & dns.flags.RD), # Recursion Desired (0x0100)
'RA': bool(answers.response.flags & dns.flags.RA), # Recursion Available (0x0080)
'AD': bool(answers.response.flags & dns.flags.AD), # Authentic Data (0x0020)
'CD': bool(answers.response.flags & dns.flags.CD), # Checking Disabled (0x0010)
}
result['Answer'] = [
{
'name': answers.name.to_text(omit_final_dot=False),
'type': answer.rdtype,
'TTL': answers.ttl,
'data': answer.to_text(),
}
for answer in answers
]
return json.dumps(result).encode('ascii')
def query_google(self, domain, rtype):
"""Perform a DNS query using https://dns.google.com/ API"""
print("Querying dns.google.com for {} <{}>...".format(domain, rtype))
self.query_count += 1
params = {
'name': domain,
'type': rtype,
}
url = 'https://dns.google.com/resolve?' + urllib.parse.urlencode(params)
ctx = ssl.create_default_context()
if self.no_ssl:
# Disable HTTPS certificate verification, for example when recording
# the requests using a HTTPS proxy such as BurpSuite.
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE # noqa
opener = urllib.request.build_opener(urllib.request.HTTPSHandler(context=ctx))
req = urllib.request.Request(
url,
headers={
'Accept': 'application/json, text/plain, */*',
'Connection': 'close',
})
with opener.open(req) as resp:
if resp.status not in (200, 204):
raise ValueError("Request to {} returned HTTP status {}".format(url, resp.status))
content_length = resp.getheader('Content-Length')
if content_length:
data = resp.read(int(content_length))
else:
data = resp.read()
if not data:
raise ValueError("No data in response to {}".format(url))
return data
def query_cloudflare(self, domain, rtype):
"""Perform a DNS query using https://cloudflare-dns.com/ API"""
print("Querying cloudflare-dns.com for {} <{}>...".format(domain, rtype))
self.query_count += 1
params = {
'name': domain,
'type': rtype,
}
url = 'https://cloudflare-dns.com/dns-query?' + urllib.parse.urlencode(params)
ctx = ssl.create_default_context()
if self.no_ssl:
# Disable HTTPS certificate verification, for example when recording
# the requests using a HTTPS proxy such as BurpSuite.
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE # noqa
opener = urllib.request.build_opener(urllib.request.HTTPSHandler(context=ctx))
req = urllib.request.Request(
url,
headers={
'Accept': 'application/dns-json',
'Connection': 'close',
})
with opener.open(req) as resp:
if resp.status not in (200, 204):
raise ValueError("Request to {} returned HTTP status {}".format(url, resp.status))
content_length = resp.getheader('Content-Length')
if content_length:
data = resp.read(int(content_length))
else:
data = resp.read()
if not data:
raise ValueError("No data in response to {}".format(url))
return data
def dump_records(self, hide_dnssec=False):
"""Enumerate the DNS records"""
comments_for_data = {}
def add_comment(key, comment):
if key not in comments_for_data:
comments_for_data[key] = set()
comments_for_data[key].add(comment)
# Find out wildcard domains using resolutions for "b.domain",
# "random.domain" and "xyz.domain"
wildcard_detectors = ('b.', 'random.', 'xyz.')
wildcard_witness = {}
# Describe known providers
for domain, rtype, data in self.dns_records:
if hide_dnssec and rtype in ('DNSKEY', 'NSEC3PARAM', 'NSEC3', 'RRSIG'):
continue
comment = get_comment_for_record(domain, rtype, data)
if comment:
add_comment(data, comment)
if domain.startswith(wildcard_detectors):
wild_suffix = domain.split('.', 1)[1]
if wild_suffix not in wildcard_witness:
wildcard_witness[wild_suffix] = {}
if rtype not in wildcard_witness[wild_suffix]:
wildcard_witness[wild_suffix][rtype] = {}
if domain not in wildcard_witness[wild_suffix][rtype]:
wildcard_witness[wild_suffix][rtype][domain] = set()
wildcard_witness[wild_suffix][rtype][domain].add(data)
# Compute wildcard records
all_records = self.dns_records.copy()
wildcard_records_by_data = {}
for wild_suffix, suffix_types_witnesses in wildcard_witness.items():
for rtype, witnesses in suffix_types_witnesses.items():
if len(witnesses) != len(wildcard_detectors):
continue
wild_several_data = None
try:
for several_data in witnesses.values():
if wild_several_data is None:
wild_several_data = several_data
if wild_several_data != several_data:
raise ValueError
except ValueError:
# Not a wildcard
break
assert wild_several_data is not None
# Add a wildcard record and filter-out existing ones
for data in wild_several_data:
all_records.add(('*.' + wild_suffix, rtype, data))
# Identify wildcard records by their data
if (rtype, data) not in wildcard_records_by_data:
wildcard_records_by_data[(rtype, data)] = set()
wildcard_records_by_data[(rtype, data)].add(wild_suffix)
# Filter-out wildcard records and compute the maximum length of a domain name
max_domain_len = 0
deleted_records = set()
for domain, rtype, data in all_records:
# Never display HINFO RFC8482 entries from cloudflare DNS
# cf. https://blog.cloudflare.com/rfc8482-saying-goodbye-to-any/
if rtype == 'HINFO' and data == 'RFC8482 ':
deleted_records.add((domain, rtype, data))
continue
is_deleted = False
for possible_wild_suffix in wildcard_records_by_data.get((rtype, data), []):
if domain != '*.' + possible_wild_suffix and domain.endswith('.' + possible_wild_suffix):
deleted_records.add((domain, rtype, data))
is_deleted = True
break
if is_deleted:
continue
if rtype == 'PTR':
# Ignore long PTR records in max_domain_len computation
continue
if max_domain_len < len(domain):
max_domain_len = len(domain)
for rec in deleted_records:
all_records.remove(rec)
# Sort by domain name, and place rPTR entries right after A and AAAA ones.
items = sorted(
all_records,
key=lambda x: (dns_sortkey(x[0]), x[1].replace('rPTR', 'ArPTR'), x[2]))
for domain, rtype, data in items:
if hide_dnssec and rtype in ('DNSKEY', 'NSEC3PARAM', 'NSEC3', 'RRSIG'):
continue
padding = ' ' * (max_domain_len - len(domain)) if len(domain) < max_domain_len else ''
line = '{}{} {:6} {}'.format(padding, domain, rtype, data)
comments = comments_for_data.get(data)
if comments:
line += ' # ' + ', '.join(sorted(comments))
yield line
def main(argv=None):
parser = argparse.ArgumentParser(description="Resolve DNS records")
parser.add_argument('file', metavar="DOMAINS_FILE", type=Path,
help="file containing a list of domains to resolve")
parser.add_argument('-d', '--directory', type=Path,
help="directory where DNS results are cached")
parser.add_argument('-D', '--hide-dnssec', action='store_true',
help="hide entries related to DNSSEC")
parser.add_argument('-F', '--filter-exist', action='store_true',
help="filter-out non-existing domains from the input file")
parser.add_argument('-g', '--use-google', action='store_true',
help="use https://dns.google.com/ API")
parser.add_argument('-C', '--use-cloudflare', action='store_true',
help="use https://cloudflare-dns.com/ API")
parser.add_argument('-o', '--output', type=Path,
help="file where the DNS entries are written")
parser.add_argument('-O', '--stdout', action='store_true',
help="print the results, when a file is also written")
parser.add_argument('-i', '--ipaddr', metavar="IP_NETWORK",
nargs='*', type=ipaddress.ip_network,
help="resolve reverse (PTR) records for the IP addresses")
parser.add_argument('-L', '--limit', type=int,
help="limit the number of DNS queries to perform")
parser.add_argument('-M', '--merge-cache', action='store_true',
help="merge cache files together")
parser.add_argument('-p', '--prefixes', action='store_true',
help="add some well-known prefixes to the domains")
parser.add_argument('-s', '--sort', action='store_true',
help="sort the domains of the input file")
parser.add_argument('-S', '--no-ssl', action='store_true',
help="disable security of HTTPS queries")
parser.add_argument('-t', '--time-sleep', type=int, default=1,
help="number of seconds to sleep between DNS queries")
args = parser.parse_args(argv)
if args.directory is None:
parser.error("please provide a cache directory with option -d/--directory")
if args.use_google and args.use_cloudflare:
parser.error("options to use a DNS-JSON provider are mutually exclusive")
# Load the list of domains
with args.file.open(mode='r') as fdomains:
raw_domains = [l.rstrip('\n') for l in fdomains.readlines()]
domains = [l.strip().rstrip('.').lower() for l in raw_domains]
domains_set = set(domains)
if '' in domains_set:
domains_set.remove('')
if args.sort:
sorted_domains = sorted(domains_set, key=dns_sortkey)
if sorted_domains != raw_domains:
# Write the sorted list back
with args.file.open(mode='w') as fout:
fout.write(''.join((d + '\n') for d in sorted_domains))
# Create the cache directory, if it does not exist
args.directory.mkdir(exist_ok=True)
resolver = Resolver(
cache_directory=args.directory,
time_sleep=args.time_sleep,
use_google=args.use_google,
use_cloudflare=args.use_cloudflare,
no_ssl=args.no_ssl,
)
# Fill the cache
domains = list(domains_set)
random.SystemRandom().shuffle(domains) # Do not be predictable
for domain in domains:
# Treat SRV records in a special way, to restrict the requested record type
resolving_types = DNS_SRV_TYPES if '._tcp.' in domain or '._udp.' in domain else DNS_TYPES
for rtype in resolving_types:
# Do not resolve PTR for normal domains
if rtype != 'PTR':
resolver.resolve_in_cache(domain, rtype)
if args.limit and resolver.query_count >= args.limit:
print("Performed {} queries, stopping now".format(resolver.query_count))
sys.exit(2)
# Resolve with well-known prefixes
if args.prefixes:
domains_with_prefixes = list(
'{}.{}'.format(p, d)
for p, d in itertools.product(WELLKNOWN_PREFIXES, domains))
random.SystemRandom().shuffle(domains_with_prefixes) # Do not be predictable
for domain in domains_with_prefixes:
resolving_types = DNS_SRV_TYPES if '._tcp.' in domain or '._udp.' in domain else DNS_TYPES
for rtype in resolving_types:
if rtype == 'PTR':
continue
resolver.resolve_in_cache(domain, rtype)
if args.limit and resolver.query_count >= args.limit:
print("Performed {} queries, stopping now".format(resolver.query_count))
sys.exit(2)
# Load the cache
resolver.load_cache(if_dirty=True)
# Resolve PTR records given on the command line
if args.ipaddr:
for ip_net in args.ipaddr:
resolver.resolve_ip(ip_net.network_address)
if ip_net.num_addresses >= 2:
for ip_addr in ip_net.hosts():
resolver.resolve_ip(ip_addr)
if args.limit and resolver.query_count >= args.limit:
print("Performed {} queries, stopping now".format(resolver.query_count))
sys.exit(2)
resolver.resolve_ip(ip_net.broadcast_address)
resolver.load_cache(if_dirty=True)
# Get all the A records, in order to get PTR
all_ipv4_addresses = set(x[2] for x in resolver.dns_records if x[1] == 'A')
for ip_addr in all_ipv4_addresses:
resolver.resolve_ip(ip_addr, version=4)
# Get all the AAAA records, in order to get PTR
all_ipv6_addresses = set(x[2] for x in resolver.dns_records if x[1] == 'AAAA')
for ip_addr in all_ipv6_addresses:
resolver.resolve_ip(ip_addr, version=6)
if args.limit and resolver.query_count >= args.limit:
print("Performed {} queries, stopping now".format(resolver.query_count))
sys.exit(2)
# Reload the cache, if needed
resolver.load_cache(if_dirty=True)
# Filter-out non-existing domains from the input file
if args.filter_exist:
found_domains = set(x[0].rstrip('.') for x in resolver.dns_records)
sorted_domains = sorted(set(domains).intersection(found_domains), key=dns_sortkey)
if sorted_domains != domains:
# Write the sorted list back
with args.file.open(mode='w') as fout:
fout.write(''.join((d + '\n') for d in sorted_domains))
# Produce the output
if args.output:
with args.output.open(mode='w') as fout:
for line in resolver.dump_records(hide_dnssec=args.hide_dnssec):
fout.write(line + '\n')
if args.stdout or not args.output:
for line in resolver.dump_records(hide_dnssec=args.hide_dnssec):
print(line)
# Merge all cache files together
if args.merge_cache:
resolver.merge_cache_files()
if __name__ == '__main__':
main()
|
# Copyright (c) 2009-2016 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
R""" Pair potentials.
Generally, pair forces are short range and are summed over all non-bonded particles
within a certain cutoff radius of each particle. Any number of pair forces
can be defined in a single simulation. The net force on each particle due to
all types of pair forces is summed.
Pair forces require that parameters be set for each unique type pair. Coefficients
are set through the aid of the :py:class:`coeff` class. To set this coefficients, specify
a pair force and save it in a variable::
my_force = pair.some_pair_force(arguments...)
Then the coefficients can be set using the saved variable::
my_force.pair_coeff.set('A', 'A', epsilon=1.0, sigma=1.0)
my_force.pair_coeff.set('A', 'B', epsilon=1.0, sigma=2.0)
my_force.pair_coeff.set('B', 'B', epsilon=2.0, sigma=1.0)
This example set the parameters *epsilon* and *sigma*
(which are used in :py:class:`lj`). Different pair forces require that different
coefficients are set. Check the documentation of each to see the definition
of the coefficients.
"""
from hoomd import _hoomd
from hoomd.md import _md
from hoomd.md import force;
from hoomd.md import nlist as nl # to avoid naming conflicts
import hoomd;
import math;
import sys;
from collections import OrderedDict
class coeff:
R""" Define pair coefficients
All pair forces use :py:class:`coeff` to specify the coefficients between different
pairs of particles indexed by type. The set of pair coefficients is a symmetric
matrix defined over all possible pairs of particle types.
There are two ways to set the coefficients for a particular pair force.
The first way is to save the pair force in a variable and call :py:meth:`set()` directly.
The second method is to build the :py:class:`coeff` class first and then assign it to the
pair force. There are some advantages to this method in that you could specify a
complicated set of pair coefficients in a separate python file and import it into
your job script.
Example (**force_field.py**)::
from hoomd import md
my_coeffs = md.pair.coeff();
my_force.pair_coeff.set('A', 'A', epsilon=1.0, sigma=1.0)
my_force.pair_coeff.set('A', 'B', epsilon=1.0, sigma=2.0)
my_force.pair_coeff.set('B', 'B', epsilon=2.0, sigma=1.0)
Example job script::
from hoomd import md
import force_field
.....
my_force = md.pair.some_pair_force(arguments...)
my_force.pair_coeff = force_field.my_coeffs
"""
## \internal
# \brief Initializes the class
# \details
# The main task to be performed during initialization is just to init some variables
# \param self Python required class instance variable
def __init__(self):
self.values = {};
self.default_coeff = {}
## \internal
# \brief Return a compact representation of the pair coefficients
def get_metadata(self):
# return list for easy serialization
l = []
for (a,b) in self.values:
item = OrderedDict()
item['typei'] = a
item['typej'] = b
for coeff in self.values[(a,b)]:
item[coeff] = self.values[(a,b)][coeff]
l.append(item)
return l
## \var values
# \internal
# \brief Contains the matrix of set values in a dictionary
## \var default_coeff
# \internal
# \brief default_coeff['coeff'] lists the default value for \a coeff, if it is set
## \internal
# \brief Sets a default value for a given coefficient
# \details
# \param name Name of the coefficient to for which to set the default
# \param value Default value to set
#
# Some coefficients have reasonable default values and the user should not be burdened with typing them in
# all the time. set_default_coeff() sets
def set_default_coeff(self, name, value):
self.default_coeff[name] = value;
def set(self, a, b, **coeffs):
R""" Sets parameters for one type pair.
Args:
a (str): First particle type in the pair (or a list of type names)
b (str): Second particle type in the pair (or a list of type names)
coeffs: Named coefficients (see below for examples)
Calling :py:meth:`set()` results in one or more parameters being set for a single type pair
or set of type pairs.
Particle types are identified by name, and parameters are also added by name.
Which parameters you need to specify depends on the pair force you are setting
these coefficients for, see the corresponding documentation.
All possible type pairs as defined in the simulation box must be specified before
executing :py:class:`hoomd.run()`. You will receive an error if you fail to do so. It is not an error,
however, to specify coefficients for particle types that do not exist in the simulation.
This can be useful in defining a force field for many different types of particles even
when some simulations only include a subset.
There is no need to specify coefficients for both pairs 'A', 'B' and 'B', 'A'. Specifying
only one is sufficient.
To set the same coefficients between many particle types, provide a list of type names instead of a single
one. All pairs between the two lists will be set to the same parameters.
Examples::
coeff.set('A', 'A', epsilon=1.0, sigma=1.0)
coeff.set('B', 'B', epsilon=2.0, sigma=1.0)
coeff.set('A', 'B', epsilon=1.5, sigma=1.0)
coeff.set(['A', 'B', 'C', 'D'], 'F', epsilon=2.0)
coeff.set(['A', 'B', 'C', 'D'], ['A', 'B', 'C', 'D'], epsilon=1.0)
system = init.read_xml('init.xml')
coeff.set(system.particles.types, system.particles.types, epsilon=2.0)
coeff.set('A', system.particles.types, epsilon=1.2)
Note:
Single parameters can be updated. If both epsilon and sigma have already been
set for a type pair, then executing ``coeff.set('A', 'B', epsilon=1.1)`` will update
the value of epsilon and leave sigma as it was previously set.
Some pair potentials assign default values to certain parameters. If the default setting for a given coefficient
(as documented in the respective pair command) is not set explicitly, the default will be used.
"""
hoomd.util.print_status_line();
# listify the inputs
if isinstance(a, str):
a = [a];
if isinstance(b, str):
b = [b];
for ai in a:
for bi in b:
self.set_single(ai, bi, coeffs);
## \internal
# \brief Sets a single parameter
def set_single(self, a, b, coeffs):
a = str(a);
b = str(b);
# create the pair if it hasn't been created it
if (not (a,b) in self.values) and (not (b,a) in self.values):
self.values[(a,b)] = {};
# Find the pair to update
if (a,b) in self.values:
cur_pair = (a,b);
elif (b,a) in self.values:
cur_pair = (b,a);
else:
hoomd.context.msg.error("Bug detected in pair.coeff. Please report\n");
raise RuntimeError("Error setting pair coeff");
# update each of the values provided
if len(coeffs) == 0:
hoomd.context.msg.error("No coefficents specified\n");
for name, val in coeffs.items():
self.values[cur_pair][name] = val;
# set the default values
for name, val in self.default_coeff.items():
# don't override a coeff if it is already set
if not name in self.values[cur_pair]:
self.values[cur_pair][name] = val;
## \internal
# \brief Verifies set parameters form a full matrix with all values set
# \details
# \param self Python required self variable
# \param required_coeffs list of required variables
#
# This can only be run after the system has been initialized
def verify(self, required_coeffs):
# first, check that the system has been initialized
if not hoomd.init.is_initialized():
hoomd.context.msg.error("Cannot verify pair coefficients before initialization\n");
raise RuntimeError('Error verifying pair coefficients');
# get a list of types from the particle data
ntypes = hoomd.context.current.system_definition.getParticleData().getNTypes();
type_list = [];
for i in range(0,ntypes):
type_list.append(hoomd.context.current.system_definition.getParticleData().getNameByType(i));
valid = True;
# loop over all possible pairs and verify that all required variables are set
for i in range(0,ntypes):
for j in range(i,ntypes):
a = type_list[i];
b = type_list[j];
# find which half of the pair is set
if (a,b) in self.values:
cur_pair = (a,b);
elif (b,a) in self.values:
cur_pair = (b,a);
else:
hoomd.context.msg.error("Type pair " + str((a,b)) + " not found in pair coeff\n");
valid = False;
continue;
# verify that all required values are set by counting the matches
count = 0;
for coeff_name in self.values[cur_pair].keys():
if not coeff_name in required_coeffs:
hoomd.context.msg.notice(2, "Notice: Possible typo? Pair coeff " + str(coeff_name) + " is specified for pair " + str((a,b)) + \
", but is not used by the pair force\n");
else:
count += 1;
if count != len(required_coeffs):
hoomd.context.msg.error("Type pair " + str((a,b)) + " is missing required coefficients\n");
valid = False;
return valid;
## \internal
# \brief Try to get whether a single pair coefficient
# \detail
# \param a First name in the type pair
# \param b Second name in the type pair
# \param coeff_name Coefficient to get
def get(self,a,b,coeff_name):
if (a,b) in self.values:
cur_pair = (a,b);
elif (b,a) in self.values:
cur_pair = (b,a);
else:
return None;
if coeff_name in self.values[cur_pair]:
return self.values[cur_pair][coeff_name];
else:
return None;
class pair(force._force):
R""" Common pair potential documentation.
Users should not invoke :py:class:`pair` directly. It is a base command that provides common
features to all standard pair forces. Common documentation for all pair potentials is documented here.
All pair force commands specify that a given potential energy and force be computed on all non-excluded particle
pairs in the system within a short range cutoff distance :math:`r_{\mathrm{cut}}`.
The force :math:`\vec{F}` applied between each pair of particles is:
.. math::
:nowrap:
\begin{eqnarray*}
\vec{F} = & -\nabla V(r) & r < r_{\mathrm{cut}} \\
= & 0 & r \ge r_{\mathrm{cut}} \\
\end{eqnarray*}
where :math:`\vec{r}` is the vector pointing from one particle to the other in the pair, and :math:`V(r)` is
chosen by a mode switch (see :py:meth:`set_params()`):
.. math::
:nowrap:
\begin{eqnarray*}
V(r) = & V_{\mathrm{pair}}(r) & \mathrm{mode\ is\ no\_shift} \\
= & V_{\mathrm{pair}}(r) - V_{\mathrm{pair}}(r_{\mathrm{cut}}) & \mathrm{mode\ is\ shift} \\
= & S(r) \cdot V_{\mathrm{pair}}(r) & \mathrm{mode\ is\ xplor\ and\ } r_{\mathrm{on}} < r_{\mathrm{cut}} \\
= & V_{\mathrm{pair}}(r) - V_{\mathrm{pair}}(r_{\mathrm{cut}}) & \mathrm{mode\ is\ xplor\ and\ } r_{\mathrm{on}} \ge r_{\mathrm{cut}}
\end{eqnarray*}
:math:`S(r)` is the XPLOR smoothing function:
.. math::
:nowrap:
\begin{eqnarray*}
S(r) = & 1 & r < r_{\mathrm{on}} \\
= & \frac{(r_{\mathrm{cut}}^2 - r^2)^2 \cdot (r_{\mathrm{cut}}^2 + 2r^2 -
3r_{\mathrm{on}}^2)}{(r_{\mathrm{cut}}^2 - r_{\mathrm{on}}^2)^3}
& r_{\mathrm{on}} \le r \le r_{\mathrm{cut}} \\
= & 0 & r > r_{\mathrm{cut}} \\
\end{eqnarray*}
and :math:`V_{\mathrm{pair}}(r)` is the specific pair potential chosen by the respective command.
Enabling the XPLOR smoothing function :math:`S(r)` results in both the potential energy and the force going smoothly
to 0 at :math:`r = r_{\mathrm{cut}}`, reducing the rate of energy drift in long simulations.
:math:`r_{\mathrm{on}}` controls the point at which the smoothing starts, so it can be set to only slightly modify
the tail of the potential. It is suggested that you plot your potentials with various values of
:math:`r_{\mathrm{on}}` in order to find a good balance between a smooth potential function and minimal modification
of the original :math:`V_{\mathrm{pair}}(r)`. A good value for the LJ potential is
:math:`r_{\mathrm{on}} = 2 \cdot \sigma`.
The split smoothing / shifting of the potential when the mode is ``xplor`` is designed for use in mixed WCA / LJ
systems. The WCA potential and it's first derivative already go smoothly to 0 at the cutoff, so there is no need
to apply the smoothing function. In such mixed systems, set :math:`r_{\mathrm{on}}` to a value greater than
:math:`r_{\mathrm{cut}}` for those pairs that interact via WCA in order to enable shifting of the WCA potential
to 0 at the cuttoff.
The following coefficients must be set per unique pair of particle types. See :py:mod:`hoomd.md.pair` for information
on how to set coefficients:
- :math:`r_{\mathrm{cut}}` - *r_cut* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
- :math:`r_{\mathrm{on}}` - *r_on* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
When :math:`r_{\mathrm{cut}} \le 0` or is set to False, the particle type pair interaction is excluded from the neighbor
list. This mechanism can be used in conjunction with multiple neighbor lists to make efficient calculations in systems
with large size disparity. Functionally, this is equivalent to setting :math:`r_{\mathrm{cut}} = 0` in the pair force
because negative :math:`r_{\mathrm{cut}}` has no physical meaning.
"""
## \internal
# \brief Initialize the pair force
# \details
# The derived class must set
# - self.cpp_class (the pair class to instantiate)
# - self.required_coeffs (a list of the coeff names the derived class needs)
# - self.process_coeffs() (a method that takes in the coeffs and spits out a param struct to use in
# self.cpp_force.set_params())
def __init__(self, r_cut, nlist, name=None):
# initialize the base class
force._force.__init__(self, name);
# convert r_cut False to a floating point type
if r_cut is False:
r_cut = -1.0
self.global_r_cut = r_cut;
# setup the coefficent matrix
self.pair_coeff = coeff();
self.pair_coeff.set_default_coeff('r_cut', self.global_r_cut);
self.pair_coeff.set_default_coeff('r_on', self.global_r_cut);
# setup the neighbor list
self.nlist = nlist
self.nlist.subscribe(lambda:self.get_rcut())
self.nlist.update_rcut()
def set_params(self, mode=None):
R""" Set parameters controlling the way forces are computed.
Args:
mode (str): (if set) Set the mode with which potentials are handled at the cutoff.
Valid values for *mode* are: "none" (the default), "shift", and "xplor":
- **none** - No shifting is performed and potentials are abruptly cut off
- **shift** - A constant shift is applied to the entire potential so that it is 0 at the cutoff
- **xplor** - A smoothing function is applied to gradually decrease both the force and potential to 0 at the
cutoff when ron < rcut, and shifts the potential to 0 at the cutoff when ron >= rcut.
See :py:class:`pair` for the equations.
Examples::
mypair.set_params(mode="shift")
mypair.set_params(mode="no_shift")
mypair.set_params(mode="xplor")
"""
hoomd.util.print_status_line();
if mode is not None:
if mode == "no_shift":
self.cpp_force.setShiftMode(self.cpp_class.energyShiftMode.no_shift)
elif mode == "shift":
self.cpp_force.setShiftMode(self.cpp_class.energyShiftMode.shift)
elif mode == "xplor":
self.cpp_force.setShiftMode(self.cpp_class.energyShiftMode.xplor)
else:
hoomd.context.msg.error("Invalid mode\n");
raise RuntimeError("Error changing parameters in pair force");
def process_coeff(self, coeff):
hoomd.context.msg.error("Bug in hoomd_script, please report\n");
raise RuntimeError("Error processing coefficients");
def update_coeffs(self):
coeff_list = self.required_coeffs + ["r_cut", "r_on"];
# check that the pair coefficents are valid
if not self.pair_coeff.verify(coeff_list):
hoomd.context.msg.error("Not all pair coefficients are set\n");
raise RuntimeError("Error updating pair coefficients");
# set all the params
ntypes = hoomd.context.current.system_definition.getParticleData().getNTypes();
type_list = [];
for i in range(0,ntypes):
type_list.append(hoomd.context.current.system_definition.getParticleData().getNameByType(i));
for i in range(0,ntypes):
for j in range(i,ntypes):
# build a dict of the coeffs to pass to process_coeff
coeff_dict = {};
for name in coeff_list:
coeff_dict[name] = self.pair_coeff.get(type_list[i], type_list[j], name);
param = self.process_coeff(coeff_dict);
self.cpp_force.setParams(i, j, param);
# rcut can now have "invalid" C++ values, which we round up to zero
self.cpp_force.setRcut(i, j, max(coeff_dict['r_cut'], 0.0));
self.cpp_force.setRon(i, j, max(coeff_dict['r_on'], 0.0));
## \internal
# \brief Get the maximum r_cut value set for any type pair
# \pre update_coeffs must be called before get_max_rcut to verify that the coeffs are set
def get_max_rcut(self):
# go through the list of only the active particle types in the sim
ntypes = hoomd.context.current.system_definition.getParticleData().getNTypes();
type_list = [];
for i in range(0,ntypes):
type_list.append(hoomd.context.current.system_definition.getParticleData().getNameByType(i));
# find the maximum r_cut
max_rcut = 0.0;
for i in range(0,ntypes):
for j in range(i,ntypes):
# get the r_cut value
r_cut = self.pair_coeff.get(type_list[i], type_list[j], 'r_cut');
max_rcut = max(max_rcut, r_cut);
return max_rcut;
## \internal
# \brief Get the r_cut pair dictionary
# \returns The rcut(i,j) dict if logging is on, and None if logging is off
def get_rcut(self):
if not self.log:
return None
# go through the list of only the active particle types in the sim
ntypes = hoomd.context.current.system_definition.getParticleData().getNTypes();
type_list = [];
for i in range(0,ntypes):
type_list.append(hoomd.context.current.system_definition.getParticleData().getNameByType(i));
# update the rcut by pair type
r_cut_dict = nl.rcut();
for i in range(0,ntypes):
for j in range(i,ntypes):
# get the r_cut value
r_cut = self.pair_coeff.get(type_list[i], type_list[j], 'r_cut');
if r_cut is not None: # use the defined value
if r_cut is False: # interaction is turned off
r_cut_dict.set_pair(type_list[i],type_list[j], -1.0);
else:
r_cut_dict.set_pair(type_list[i],type_list[j], r_cut);
else: # use the global default
r_cut_dict.set_pair(type_list[i],type_list[j],self.global_r_cut);
return r_cut_dict;
## \internal
# \brief Return metadata for this pair potential
def get_metadata(self):
data = force._force.get_metadata(self)
# make sure all coefficients are set
self.update_coeffs()
data['pair_coeff'] = self.pair_coeff
return data
def compute_energy(self, tags1, tags2):
R""" Compute the energy between two sets of particles.
Args:
tags1 (``ndarray<int32>``): a numpy array of particle tags in the first group
tags2 (``ndarray<int32>``): a numpy array of particle tags in the second group
.. math::
U = \sum_{i \in \mathrm{tags1}, j \in \mathrm{tags2}} V_{ij}(r)
where :math:`V_{ij}(r)` is the pairwise energy between two particles :math:`i` and :math:`j`.
Assumed properties of the sets *tags1* and *tags2* are:
- *tags1* and *tags2* are disjoint
- all elements in *tags1* and *tags2* are unique
- *tags1* and *tags2* are contiguous numpy arrays of dtype int32
None of these properties are validated.
Examples::
tags=numpy.linspace(0,N-1,1, dtype=numpy.int32)
# computes the energy between even and odd particles
U = mypair.compute_energy(tags1=numpy.array(tags[0:N:2]), tags2=numpy.array(tags[1:N:2]))
"""
# future versions could use np functions to test the assumptions above and raise an error if they occur.
return self.cpp_force.computeEnergyBetweenSets(tags1, tags2);
class lj(pair):
R""" Lennard-Jones pair potential.
Args:
r_cut (float): Default cutoff radius (in distance units).
nlist (:py:mod:`hoomd.md.nlist`): Neighbor list
name (str): Name of the force instance.
:py:class:`lj` specifies that a Lennard-Jones pair potential should be applied between every
non-excluded particle pair in the simulation.
.. math::
:nowrap:
\begin{eqnarray*}
V_{\mathrm{LJ}}(r) = & 4 \varepsilon \left[ \left( \frac{\sigma}{r} \right)^{12} -
\alpha \left( \frac{\sigma}{r} \right)^{6} \right] & r < r_{\mathrm{cut}} \\
= & 0 & r \ge r_{\mathrm{cut}} \\
\end{eqnarray*}
See :py:class:`pair` for details on how forces are calculated and the available energy shifting and smoothing modes.
Use :py:meth:`pair_coeff.set <coeff.set>` to set potential coefficients.
The following coefficients must be set per unique pair of particle types:
- :math:`\varepsilon` - *epsilon* (in energy units)
- :math:`\sigma` - *sigma* (in distance units)
- :math:`\alpha` - *alpha* (unitless) - *optional*: defaults to 1.0
- :math:`r_{\mathrm{cut}}` - *r_cut* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
- :math:`r_{\mathrm{on}}`- *r_on* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
Example::
nl = nlist.cell()
lj = pair.lj(r_cut=3.0, nlist=nl)
lj.pair_coeff.set('A', 'A', epsilon=1.0, sigma=1.0)
lj.pair_coeff.set('A', 'B', epsilon=2.0, sigma=1.0, alpha=0.5, r_cut=3.0, r_on=2.0);
lj.pair_coeff.set('B', 'B', epsilon=1.0, sigma=1.0, r_cut=2**(1.0/6.0), r_on=2.0);
lj.pair_coeff.set(['A', 'B'], ['C', 'D'], epsilon=1.5, sigma=2.0)
"""
def __init__(self, r_cut, nlist, name=None):
hoomd.util.print_status_line();
# tell the base class how we operate
# initialize the base class
pair.__init__(self, r_cut, nlist, name);
# create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.PotentialPairLJ(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairLJ;
else:
self.nlist.cpp_nlist.setStorageMode(_md.NeighborList.storageMode.full);
self.cpp_force = _md.PotentialPairLJGPU(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairLJGPU;
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
# setup the coefficent options
self.required_coeffs = ['epsilon', 'sigma', 'alpha'];
self.pair_coeff.set_default_coeff('alpha', 1.0);
def process_coeff(self, coeff):
epsilon = coeff['epsilon'];
sigma = coeff['sigma'];
alpha = coeff['alpha'];
lj1 = 4.0 * epsilon * math.pow(sigma, 12.0);
lj2 = alpha * 4.0 * epsilon * math.pow(sigma, 6.0);
return _hoomd.make_scalar2(lj1, lj2);
class gauss(pair):
R""" Gaussian pair potential.
Args:
r_cut (float): Default cutoff radius (in distance units).
nlist (:py:mod:`hoomd.md.nlist`): Neighbor list
name (str): Name of the force instance.
:py:class:`gauss` specifies that a Gaussian pair potential should be applied between every
non-excluded particle pair in the simulation.
.. math::
:nowrap:
\begin{eqnarray*}
V_{\mathrm{gauss}}(r) = & \varepsilon \exp \left[ -\frac{1}{2}\left( \frac{r}{\sigma} \right)^2 \right]
& r < r_{\mathrm{cut}} \\
= & 0 & r \ge r_{\mathrm{cut}} \\
\end{eqnarray*}
See :py:class:`pair` for details on how forces are calculated and the available energy shifting and smoothing modes.
Use :py:meth:`pair_coeff.set <coeff.set>` to set potential coefficients.
The following coefficients must be set per unique pair of particle types:
- :math:`\varepsilon` - *epsilon* (in energy units)
- :math:`\sigma` - *sigma* (in distance units)
- :math:`r_{\mathrm{cut}}` - *r_cut* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
- :math:`r_{\mathrm{on}}`- *r_on* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
Example::
nl = nlist.cell()
gauss = pair.gauss(r_cut=3.0, nlist=nl)
gauss.pair_coeff.set('A', 'A', epsilon=1.0, sigma=1.0)
gauss.pair_coeff.set('A', 'B', epsilon=2.0, sigma=1.0, r_cut=3.0, r_on=2.0);
gauss.pair_coeff.set(['A', 'B'], ['C', 'D'], epsilon=3.0, sigma=0.5)
"""
def __init__(self, r_cut, nlist, name=None):
hoomd.util.print_status_line();
# tell the base class how we operate
# initialize the base class
pair.__init__(self, r_cut, nlist, name);
# create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.PotentialPairGauss(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairGauss;
else:
self.nlist.cpp_nlist.setStorageMode(_md.NeighborList.storageMode.full);
self.cpp_force = _md.PotentialPairGaussGPU(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairGaussGPU;
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
# setup the coefficent options
self.required_coeffs = ['epsilon', 'sigma'];
def process_coeff(self, coeff):
epsilon = coeff['epsilon'];
sigma = coeff['sigma'];
return _hoomd.make_scalar2(epsilon, sigma);
class slj(pair):
R""" Shifted Lennard-Jones pair potential.
Args:
r_cut (float): Default cutoff radius (in distance units).
nlist (:py:mod:`hoomd.md.nlist`): Neighbor list
name (str): Name of the force instance.
d_max (float): Maximum diameter particles in the simulation will have (in distance units)
:py:class:`slj` specifies that a shifted Lennard-Jones type pair potential should be applied between every
non-excluded particle pair in the simulation.
.. math::
:nowrap:
\begin{eqnarray*}
V_{\mathrm{SLJ}}(r) = & 4 \varepsilon \left[ \left( \frac{\sigma}{r - \Delta} \right)^{12} -
\left( \frac{\sigma}{r - \Delta} \right)^{6} \right] & r < (r_{\mathrm{cut}} + \Delta) \\
= & 0 & r \ge (r_{\mathrm{cut}} + \Delta) \\
\end{eqnarray*}
where :math:`\Delta = (d_i + d_j)/2 - 1` and :math:`d_i` is the diameter of particle :math:`i`.
See :py:class:`pair` for details on how forces are calculated and the available energy shifting and smoothing modes.
Use :py:meth:`pair_coeff.set <coeff.set>` to set potential coefficients.
The following coefficients must be set per unique pair of particle types:
- :math:`\varepsilon` - *epsilon* (in energy units)
- :math:`\sigma` - *sigma* (in distance units)
- *optional*: defaults to 1.0
- :math:`r_{\mathrm{cut}}` - *r_cut* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
.. attention::
Due to the way that pair.slj modifies the cutoff criteria, a shift_mode of *xplor* is not supported.
The actual cutoff radius for pair.slj is shifted by the diameter of two particles interacting. Thus to determine
the maximum possible actual r_cut in simulation
pair.slj must know the maximum diameter of all the particles over the entire run, *d_max* .
This value is either determined automatically from the initialization or can be set by the user and can be
modified between runs with :py:meth:`hoomd.md.nlist.nlist.set_params()`. In most cases, the correct value can be
identified automatically.
The specified value of *d_max* will be used to properly determine the neighbor lists during the following
:py:func:`hoomd.run()` commands. If not specified, :py:class:`slj` will set d_max to the largest diameter
in particle data at the time it is initialized.
If particle diameters change after initialization, it is **imperative** that *d_max* be the largest
diameter that any particle will attain at any time during the following :py:func:`hoomd.run()` commands.
If *d_max* is smaller than it should be, some particles will effectively have a smaller value of *r_cut*
then was set and the simulation will be incorrect. *d_max* can be changed between runs by calling
:py:meth:`hoomd.md.nlist.nlist.set_params()`.
Example::
nl = nlist.cell()
slj = pair.slj(r_cut=3.0, nlist=nl, d_max = 2.0)
slj.pair_coeff.set('A', 'A', epsilon=1.0)
slj.pair_coeff.set('A', 'B', epsilon=2.0, r_cut=3.0);
slj.pair_coeff.set('B', 'B', epsilon=1.0, r_cut=2**(1.0/6.0));
slj.pair_coeff.set(['A', 'B'], ['C', 'D'], espilon=2.0)
"""
def __init__(self, r_cut, nlist, d_max=None, name=None):
hoomd.util.print_status_line();
# tell the base class how we operate
# initialize the base class
pair.__init__(self, r_cut, nlist, name);
# update the neighbor list
if d_max is None :
sysdef = hoomd.context.current.system_definition;
d_max = sysdef.getParticleData().getMaxDiameter()
hoomd.context.msg.notice(2, "Notice: slj set d_max=" + str(d_max) + "\n");
# SLJ requires diameter shifting to be on
self.nlist.cpp_nlist.setDiameterShift(True);
self.nlist.cpp_nlist.setMaximumDiameter(d_max);
# create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.PotentialPairSLJ(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairSLJ;
else:
self.nlist.cpp_nlist.setStorageMode(_md.NeighborList.storageMode.full);
self.cpp_force = _md.PotentialPairSLJGPU(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairSLJGPU;
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
# setup the coefficient options
self.required_coeffs = ['epsilon', 'sigma', 'alpha'];
self.pair_coeff.set_default_coeff('alpha', 1.0);
def process_coeff(self, coeff):
epsilon = coeff['epsilon'];
sigma = coeff['sigma'];
alpha = coeff['alpha'];
lj1 = 4.0 * epsilon * math.pow(sigma, 12.0);
lj2 = alpha * 4.0 * epsilon * math.pow(sigma, 6.0);
return _hoomd.make_scalar2(lj1, lj2);
def set_params(self, mode=None):
R""" Set parameters controlling the way forces are computed.
See :py:meth:`pair.set_params()`.
Note:
**xplor** is not a valid setting for :py:class:`slj`.
"""
hoomd.util.print_status_line();
if mode == "xplor":
hoomd.context.msg.error("XPLOR is smoothing is not supported with slj\n");
raise RuntimeError("Error changing parameters in pair force");
pair.set_params(self, mode=mode);
class yukawa(pair):
R""" Yukawa pair potential.
Args:
r_cut (float): Default cutoff radius (in distance units).
nlist (:py:mod:`hoomd.md.nlist`): Neighbor list
name (str): Name of the force instance.
:py:class:`yukawa` specifies that a Yukawa pair potential should be applied between every
non-excluded particle pair in the simulation.
.. math::
:nowrap:
\begin{eqnarray*}
V_{\mathrm{yukawa}}(r) = & \varepsilon \frac{ \exp \left( -\kappa r \right) }{r} & r < r_{\mathrm{cut}} \\
= & 0 & r \ge r_{\mathrm{cut}} \\
\end{eqnarray*}
See :py:class:`pair` for details on how forces are calculated and the available energy shifting and smoothing modes.
Use :py:meth:`pair_coeff.set <coeff.set>` to set potential coefficients.
The following coefficients must be set per unique pair of particle types:
- :math:`\varepsilon` - *epsilon* (in energy units)
- :math:`\kappa` - *kappa* (in units of 1/distance)
- :math:`r_{\mathrm{cut}}` - *r_cut* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
- :math:`r_{\mathrm{on}}`- *r_on* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
Example::
nl = nlist.cell()
yukawa = pair.lj(r_cut=3.0, nlist=nl)
yukawa.pair_coeff.set('A', 'A', epsilon=1.0, kappa=1.0)
yukawa.pair_coeff.set('A', 'B', epsilon=2.0, kappa=0.5, r_cut=3.0, r_on=2.0);
yukawa.pair_coeff.set(['A', 'B'], ['C', 'D'], epsilon=0.5, kappa=3.0)
"""
def __init__(self, r_cut, nlist, name=None):
hoomd.util.print_status_line();
# tell the base class how we operate
# initialize the base class
pair.__init__(self, r_cut, nlist, name);
# create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.PotentialPairYukawa(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairYukawa;
else:
self.nlist.cpp_nlist.setStorageMode(_md.NeighborList.storageMode.full);
self.cpp_force = _md.PotentialPairYukawaGPU(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairYukawaGPU;
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
# setup the coefficent options
self.required_coeffs = ['epsilon', 'kappa'];
def process_coeff(self, coeff):
epsilon = coeff['epsilon'];
kappa = coeff['kappa'];
return _hoomd.make_scalar2(epsilon, kappa);
class ewald(pair):
R""" Ewald pair potential.
:py:class:`ewald` specifies that a Ewald pair potential should be applied between every
non-excluded particle pair in the simulation.
.. math::
:nowrap:
\begin{eqnarray*}
V_{\mathrm{ewald}}(r) = & q_i q_j \left[\mathrm{erfc}\left(\kappa r + \frac{\alpha}{2\kappa}\right) \exp(\alpha r)+
\mathrm{erfc}\left(\kappa r - \frac{\alpha}{2 \kappa}\right) \exp(-\alpha r) & r < r_{\mathrm{cut}} \\
= & 0 & r \ge r_{\mathrm{cut}} \\
\end{eqnarray*}
The Ewald potential is designed to be used in conjunction with :py:class:`hoomd.md.charge.pppm`.
See :py:class:`pair` for details on how forces are calculated and the available energy shifting and smoothing modes.
Use :py:meth:`pair_coeff.set <coeff.set>` to set potential coefficients.
The following coefficients must be set per unique pair of particle types:
- :math:`\kappa` - *kappa* (Splitting parameter, in 1/distance units)
- :math:`\alpha` - *alpha* (Debye screening length, in 1/distance units)
.. versionadded:: 2.1
- :math:`r_{\mathrm{cut}}` - *r_cut* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
- :math:`r_{\mathrm{on}}`- *r_on* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
Example::
nl = nlist.cell()
ewald = pair.ewald(r_cut=3.0, nlist=nl)
ewald.pair_coeff.set('A', 'A', kappa=1.0)
ewald.pair_coeff.set('A', 'A', kappa=1.0, alpha=1.5)
ewald.pair_coeff.set('A', 'B', kappa=1.0, r_cut=3.0, r_on=2.0);
Warning:
**DO NOT** use in conjunction with :py:class:`hoomd.md.charge.pppm`. It automatically creates and configures
:py:class:`ewald` for you.
"""
def __init__(self, r_cut, nlist, name=None):
hoomd.util.print_status_line();
# tell the base class how we operate
# initialize the base class
pair.__init__(self, r_cut, nlist, name);
# create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.PotentialPairEwald(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairEwald;
else:
self.nlist.cpp_nlist.setStorageMode(_md.NeighborList.storageMode.full);
self.cpp_force = _md.PotentialPairEwaldGPU(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairEwaldGPU;
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
# setup the coefficent options
self.required_coeffs = ['kappa','alpha'];
self.pair_coeff.set_default_coeff('alpha', 0.0);
def process_coeff(self, coeff):
kappa = coeff['kappa'];
alpha = coeff['alpha'];
return _hoomd.make_scalar2(kappa, alpha)
def set_params(self, coeff):
""" :py:class:`ewald` has no energy shift modes """
raise RuntimeError('Not implemented for DPD Conservative');
return;
def _table_eval(r, rmin, rmax, V, F, width):
dr = (rmax - rmin) / float(width-1);
i = int(round((r - rmin)/dr))
return (V[i], F[i])
class table(force._force):
R""" Tabulated pair potential.
Args:
width (int): Number of points to use to interpolate V and F.
nlist (:py:mod:`hoomd.md.nlist`): Neighbor list (default of None automatically creates a global cell-list based neighbor list)
name (str): Name of the force instance
:py:class:`table` specifies that a tabulated pair potential should be applied between every
non-excluded particle pair in the simulation.
The force :math:`\vec{F}` is (in force units):
.. math::
:nowrap:
\begin{eqnarray*}
\vec{F}(\vec{r}) = & 0 & r < r_{\mathrm{min}} \\
= & F_{\mathrm{user}}(r)\hat{r} & r_{\mathrm{min}} \le r < r_{\mathrm{max}} \\
= & 0 & r \ge r_{\mathrm{max}} \\
\end{eqnarray*}
and the potential :math:`V(r)` is (in energy units)
.. math::
:nowrap:
\begin{eqnarray*}
V(r) = & 0 & r < r_{\mathrm{min}} \\
= & V_{\mathrm{user}}(r) & r_{\mathrm{min}} \le r < r_{\mathrm{max}} \\
= & 0 & r \ge r_{\mathrm{max}} \\
\end{eqnarray*}
where :math:`\vec{r}` is the vector pointing from one particle to the other in the pair.
:math:`F_{\mathrm{user}}(r)` and :math:`V_{\mathrm{user}}(r)` are evaluated on *width* grid points between
:math:`r_{\mathrm{min}}` and :math:`r_{\mathrm{max}}`. Values are interpolated linearly between grid points.
For correctness, you must specify the force defined by: :math:`F = -\frac{\partial V}{\partial r}`.
The following coefficients must be set per unique pair of particle types:
- :math:`V_{\mathrm{user}}(r)` and :math:`F_{\mathrm{user}}(r)` - evaluated by ``func`` (see example)
- coefficients passed to ``func`` - *coeff* (see example)
- :math:`_{\mathrm{min}}` - *rmin* (in distance units)
- :math:`_{\mathrm{max}}` - *rmax* (in distance units)
.. rubric:: Set table from a given function
When you have a functional form for V and F, you can enter that
directly into python. :py:class:`table` will evaluate the given function over *width* points between
*rmin* and *rmax* and use the resulting values in the table::
def lj(r, rmin, rmax, epsilon, sigma):
V = 4 * epsilon * ( (sigma / r)**12 - (sigma / r)**6);
F = 4 * epsilon / r * ( 12 * (sigma / r)**12 - 6 * (sigma / r)**6);
return (V, F)
table = pair.table(width=1000)
table.pair_coeff.set('A', 'A', func=lj, rmin=0.8, rmax=3.0, coeff=dict(epsilon=1.5, sigma=1.0))
table.pair_coeff.set('A', 'B', func=lj, rmin=0.8, rmax=3.0, coeff=dict(epsilon=2.0, sigma=1.2))
table.pair_coeff.set('B', 'B', func=lj, rmin=0.8, rmax=3.0, coeff=dict(epsilon=0.5, sigma=1.0))
.. rubric:: Set a table from a file
When you have no function for for *V* or *F*, or you otherwise have the data listed in a file,
:py:class:`table` can use the given values directly. You must first specify the number of rows
in your tables when initializing pair.table. Then use :py:meth:`set_from_file()` to read the file::
nl = nlist.cell()
table = pair.table(width=1000, nlist=nl)
table.set_from_file('A', 'A', filename='table_AA.dat')
table.set_from_file('A', 'B', filename='table_AB.dat')
table.set_from_file('B', 'B', filename='table_BB.dat')
Note:
For potentials that diverge near r=0, make sure to set *rmin* to a reasonable value. If a potential does
not diverge near r=0, then a setting of *rmin=0* is valid.
"""
def __init__(self, width, nlist, name=None):
hoomd.util.print_status_line();
# initialize the base class
force._force.__init__(self, name);
# setup the coefficent matrix
self.pair_coeff = coeff();
self.nlist = nlist
self.nlist.subscribe(lambda:self.get_rcut())
self.nlist.update_rcut()
# create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.TablePotential(hoomd.context.current.system_definition, self.nlist.cpp_nlist, int(width), self.name);
else:
self.nlist.cpp_nlist.setStorageMode(_md.NeighborList.storageMode.full);
self.cpp_force = _md.TablePotentialGPU(hoomd.context.current.system_definition, self.nlist.cpp_nlist, int(width), self.name);
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
# stash the width for later use
self.width = width;
def update_pair_table(self, typei, typej, func, rmin, rmax, coeff):
# allocate arrays to store V and F
Vtable = _hoomd.std_vector_scalar();
Ftable = _hoomd.std_vector_scalar();
# calculate dr
dr = (rmax - rmin) / float(self.width-1);
# evaluate each point of the function
for i in range(0, self.width):
r = rmin + dr * i;
(V,F) = func(r, rmin, rmax, **coeff);
# fill out the tables
Vtable.append(V);
Ftable.append(F);
# pass the tables on to the underlying cpp compute
self.cpp_force.setTable(typei, typej, Vtable, Ftable, rmin, rmax);
## \internal
# \brief Get the r_cut pair dictionary
# \returns rcut(i,j) dict if logging is on, and None otherwise
def get_rcut(self):
if not self.log:
return None
# go through the list of only the active particle types in the sim
ntypes = hoomd.context.current.system_definition.getParticleData().getNTypes();
type_list = [];
for i in range(0,ntypes):
type_list.append(hoomd.context.current.system_definition.getParticleData().getNameByType(i));
# update the rcut by pair type
r_cut_dict = nl.rcut();
for i in range(0,ntypes):
for j in range(i,ntypes):
# get the r_cut value
rmax = self.pair_coeff.get(type_list[i], type_list[j], 'rmax');
r_cut_dict.set_pair(type_list[i],type_list[j], rmax);
return r_cut_dict;
def get_max_rcut(self):
# loop only over current particle types
ntypes = hoomd.context.current.system_definition.getParticleData().getNTypes();
type_list = [];
for i in range(0,ntypes):
type_list.append(hoomd.context.current.system_definition.getParticleData().getNameByType(i));
# find the maximum rmax to update the neighbor list with
maxrmax = 0.0;
# loop through all of the unique type pairs and find the maximum rmax
for i in range(0,ntypes):
for j in range(i,ntypes):
rmax = self.pair_coeff.get(type_list[i], type_list[j], "rmax");
maxrmax = max(maxrmax, rmax);
return maxrmax;
def update_coeffs(self):
# check that the pair coefficents are valid
if not self.pair_coeff.verify(["func", "rmin", "rmax", "coeff"]):
hoomd.context.msg.error("Not all pair coefficients are set for pair.table\n");
raise RuntimeError("Error updating pair coefficients");
# set all the params
ntypes = hoomd.context.current.system_definition.getParticleData().getNTypes();
type_list = [];
for i in range(0,ntypes):
type_list.append(hoomd.context.current.system_definition.getParticleData().getNameByType(i));
# loop through all of the unique type pairs and evaluate the table
for i in range(0,ntypes):
for j in range(i,ntypes):
func = self.pair_coeff.get(type_list[i], type_list[j], "func");
rmin = self.pair_coeff.get(type_list[i], type_list[j], "rmin");
rmax = self.pair_coeff.get(type_list[i], type_list[j], "rmax");
coeff = self.pair_coeff.get(type_list[i], type_list[j], "coeff");
self.update_pair_table(i, j, func, rmin, rmax, coeff);
def set_from_file(self, a, b, filename):
R""" Set a pair interaction from a file.
Args:
a (str): Name of type A in pair
b (str): Name of type B in pair
filename (str): Name of the file to read
The provided file specifies V and F at equally spaced r values.
Example::
#r V F
1.0 2.0 -3.0
1.1 3.0 -4.0
1.2 2.0 -3.0
1.3 1.0 -2.0
1.4 0.0 -1.0
1.5 -1.0 0.0
The first r value sets *rmin*, the last sets *rmax*. Any line with # as the first non-whitespace character is
is treated as a comment. The *r* values must monotonically increase and be equally spaced. The table is read
directly into the grid points used to evaluate :math:`F_{\mathrm{user}}(r)` and :math:`_{\mathrm{user}}(r)`.
"""
hoomd.util.print_status_line();
# open the file
f = open(filename);
r_table = [];
V_table = [];
F_table = [];
# read in lines from the file
for line in f.readlines():
line = line.strip();
# skip comment lines
if line[0] == '#':
continue;
# split out the columns
cols = line.split();
values = [float(f) for f in cols];
# validate the input
if len(values) != 3:
hoomd.context.msg.error("pair.table: file must have exactly 3 columns\n");
raise RuntimeError("Error reading table file");
# append to the tables
r_table.append(values[0]);
V_table.append(values[1]);
F_table.append(values[2]);
# validate input
if self.width != len(r_table):
hoomd.context.msg.error("pair.table: file must have exactly " + str(self.width) + " rows\n");
raise RuntimeError("Error reading table file");
# extract rmin and rmax
rmin_table = r_table[0];
rmax_table = r_table[-1];
# check for even spacing
dr = (rmax_table - rmin_table) / float(self.width-1);
for i in range(0,self.width):
r = rmin_table + dr * i;
if math.fabs(r - r_table[i]) > 1e-3:
hoomd.context.msg.error("pair.table: r must be monotonically increasing and evenly spaced\n");
raise RuntimeError("Error reading table file");
hoomd.util.quiet_status();
self.pair_coeff.set(a, b, func=_table_eval, rmin=rmin_table, rmax=rmax_table, coeff=dict(V=V_table, F=F_table, width=self.width))
hoomd.util.unquiet_status();
class morse(pair):
R""" Morse pair potential.
:py:class:`morse` specifies that a Morse pair potential should be applied between every
non-excluded particle pair in the simulation.
.. math::
:nowrap:
\begin{eqnarray*}
V_{\mathrm{morse}}(r) = & D_0 \left[ \exp \left(-2\alpha\left(r-r_0\right)\right) -2\exp \left(-\alpha\left(r-r_0\right)\right) \right] & r < r_{\mathrm{cut}} \\
= & 0 & r \ge r_{\mathrm{cut}} \\
\end{eqnarray*}
See :py:class:`pair` for details on how forces are calculated and the available energy shifting and smoothing modes.
Use :py:meth:`pair_coeff.set <coeff.set>` to set potential coefficients.
The following coefficients must be set per unique pair of particle types:
- :math:`D_0` - *D0*, depth of the potential at its minimum (in energy units)
- :math:`\alpha` - *alpha*, controls the width of the potential well (in units of 1/distance)
- :math:`r_0` - *r0*, position of the minimum (in distance units)
- :math:`r_{\mathrm{cut}}` - *r_cut* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
- :math:`r_{\mathrm{on}}`- *r_on* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
Example::
nl = nlist.cell()
morse = pair.morse(r_cut=3.0, nlist=nl)
morse.pair_coeff.set('A', 'A', D0=1.0, alpha=3.0, r0=1.0)
morse.pair_coeff.set('A', 'B', D0=1.0, alpha=3.0, r0=1.0, r_cut=3.0, r_on=2.0);
morse.pair_coeff.set(['A', 'B'], ['C', 'D'], D0=1.0, alpha=3.0)
"""
def __init__(self, r_cut, nlist, name=None):
hoomd.util.print_status_line();
# tell the base class how we operate
# initialize the base class
pair.__init__(self, r_cut, nlist, name);
# create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.PotentialPairMorse(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairMorse;
else:
self.nlist.cpp_nlist.setStorageMode(_md.NeighborList.storageMode.full);
self.cpp_force = _md.PotentialPairMorseGPU(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairMorseGPU;
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
# setup the coefficent options
self.required_coeffs = ['D0', 'alpha', 'r0'];
def process_coeff(self, coeff):
D0 = coeff['D0'];
alpha = coeff['alpha'];
r0 = coeff['r0']
return _hoomd.make_scalar4(D0, alpha, r0, 0.0);
class dpd(pair):
R""" Dissipative Particle Dynamics.
Args:
r_cut (float): Default cutoff radius (in distance units).
nlist (:py:mod:`hoomd.md.nlist`): Neighbor list
kT (:py:mod:`hoomd.variant` or :py:obj:`float`): Temperature of thermostat (in energy units).
seed (int): seed for the PRNG in the DPD thermostat.
name (str): Name of the force instance.
:py:class:`dpd` specifies that a DPD pair force should be applied between every
non-excluded particle pair in the simulation, including an interaction potential,
pairwise drag force, and pairwise random force. See `Groot and Warren 1997 <http://dx.doi.org/10.1063/1.474784>`_.
.. math::
:nowrap:
\begin{eqnarray*}
F = F_{\mathrm{C}}(r) + F_{\mathrm{R,ij}}(r_{ij}) + F_{\mathrm{D,ij}}(v_{ij}) \\
\end{eqnarray*}
.. math::
:nowrap:
\begin{eqnarray*}
F_{\mathrm{C}}(r) = & A \cdot w(r_{ij}) \\
F_{\mathrm{R, ij}}(r_{ij}) = & - \theta_{ij}\sqrt{3} \sqrt{\frac{2k_b\gamma T}{\Delta t}}\cdot w(r_{ij}) \\
F_{\mathrm{D, ij}}(r_{ij}) = & - \gamma w^2(r_{ij})\left( \hat r_{ij} \circ v_{ij} \right) \\
\end{eqnarray*}
.. math::
:nowrap:
\begin{eqnarray*}
w(r_{ij}) = &\left( 1 - r/r_{\mathrm{cut}} \right) & r < r_{\mathrm{cut}} \\
= & 0 & r \ge r_{\mathrm{cut}} \\
\end{eqnarray*}
where :math:`\hat r_{ij}` is a normalized vector from particle i to particle j, :math:`v_{ij} = v_i - v_j`,
and :math:`\theta_{ij}` is a uniformly distributed random number in the range [-1, 1].
:py:class:`dpd` generates random numbers by hashing together the particle tags in the pair, the user seed,
and cthe urrent time step index.
.. attention::
Change the seed if you reset the simulation time step to 0. If you keep the same seed, the simulation
will continue with the same sequence of random numbers used previously and may cause unphysical correlations.
`C. L. Phillips et. al. 2011 <http://dx.doi.org/10.1016/j.jcp.2011.05.021>`_ describes the DPD implementation
details in HOOMD-blue. Cite it if you utilize the DPD functionality in your work.
:py:class:`dpd` does not implement and energy shift / smoothing modes due to the function of the force.
Use :py:meth:`pair_coeff.set <coeff.set>` to set potential coefficients.
The following coefficients must be set per unique pair of particle types:
- :math:`A` - *A* (in force units)
- :math:`\gamma` - *gamma* (in units of force/velocity)
- :math:`r_{\mathrm{cut}}` - *r_cut* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
To use the DPD thermostat, an :py:class:`hoomd.md.integrate.nve` integrator must be applied to the system and
the user must specify a temperature. Use of the dpd thermostat pair force with other integrators will result
in unphysical behavior. To use pair.dpd with a different conservative potential than :math:`F_C`,
set A to zero and define the conservative pair potential separately. Note that DPD thermostats
are often defined in terms of :math:`\sigma` where :math:`\sigma = \sqrt{2k_b\gamma T}`.
Example::
nl = nlist.cell()
dpd = pair.dpd(r_cut=1.0, nlist=nl, kT=1.0, seed=0)
dpd.pair_coeff.set('A', 'A', A=25.0, gamma = 4.5)
dpd.pair_coeff.set('A', 'B', A=40.0, gamma = 4.5)
dpd.pair_coeff.set('B', 'B', A=25.0, gamma = 4.5)
dpd.pair_coeff.set(['A', 'B'], ['C', 'D'], A=12.0, gamma = 1.2)
dpd.set_params(kT = 1.0)
integrate.mode_standard(dt=0.02)
integrate.nve(group=group.all())
"""
def __init__(self, r_cut, nlist, kT, seed, name=None):
hoomd.util.print_status_line();
# register the citation
c = hoomd.cite.article(cite_key='phillips2011',
author=['C L Phillips', 'J A Anderson', 'S C Glotzer'],
title='Pseudo-random number generation for Brownian Dynamics and Dissipative Particle Dynamics simulations on GPU devices',
journal='Journal of Computational Physics',
volume=230,
number=19,
pages='7191--7201',
month='Aug',
year='2011',
doi='10.1016/j.jcp.2011.05.021',
feature='DPD')
hoomd.cite._ensure_global_bib().add(c)
# tell the base class how we operate
# initialize the base class
pair.__init__(self, r_cut, nlist, name);
# create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.PotentialPairDPDThermoDPD(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairDPDThermoDPD;
else:
self.nlist.cpp_nlist.setStorageMode(_md.NeighborList.storageMode.full);
self.cpp_force = _md.PotentialPairDPDThermoDPDGPU(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairDPDThermoDPDGPU;
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
# setup the coefficent options
self.required_coeffs = ['A', 'gamma'];
# set the seed for dpd thermostat
self.cpp_force.setSeed(seed);
# set the temperature
# setup the variant inputs
kT = hoomd.variant._setup_variant_input(kT);
self.cpp_force.setT(kT.cpp_variant);
def set_params(self, kT=None):
R""" Changes parameters.
Args:
kT (:py:mod:`hoomd.variant` or :py:obj:`float`): Temperature of thermostat (in energy units).
Example::
dpd.set_params(kT=2.0)
"""
hoomd.util.print_status_line();
self.check_initialization();
# change the parameters
if kT is not None:
# setup the variant inputs
kT = hoomd.variant._setup_variant_input(kT);
self.cpp_force.setT(kT.cpp_variant);
def process_coeff(self, coeff):
a = coeff['A'];
gamma = coeff['gamma'];
return _hoomd.make_scalar2(a, gamma);
class dpd_conservative(pair):
R""" DPD Conservative pair force.
Args:
r_cut (float): Default cutoff radius (in distance units).
nlist (:py:mod:`hoomd.md.nlist`): Neighbor list
name (str): Name of the force instance.
:py:class:`dpd_conservative` specifies the conservative part of the DPD pair potential should be applied between
every non-excluded particle pair in the simulation. No thermostat (e.g. Drag Force and Random Force) is applied,
as is in :py:class:`dpd`.
.. math::
:nowrap:
\begin{eqnarray*}
V_{\mathrm{DPD-C}}(r) = & A \cdot \left( r_{\mathrm{cut}} - r \right)
- \frac{1}{2} \cdot \frac{A}{r_{\mathrm{cut}}} \cdot \left(r_{\mathrm{cut}}^2 - r^2 \right)
& r < r_{\mathrm{cut}} \\
= & 0 & r \ge r_{\mathrm{cut}} \\
\end{eqnarray*}
:py:class:`dpd_conservative` does not implement and energy shift / smoothing modes due to the function of the force.
Use :py:meth:`pair_coeff.set <coeff.set>` to set potential coefficients.
The following coefficients must be set per unique pair of particle types:
- :math:`A` - *A* (in force units)
- :math:`r_{\mathrm{cut}}` - *r_cut* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
Example::
nl = nlist.cell()
dpdc = pair.dpd_conservative(r_cut=3.0, nlist=nl)
dpdc.pair_coeff.set('A', 'A', A=1.0)
dpdc.pair_coeff.set('A', 'B', A=2.0, r_cut = 1.0)
dpdc.pair_coeff.set('B', 'B', A=1.0)
dpdc.pair_coeff.set(['A', 'B'], ['C', 'D'], A=5.0)
"""
def __init__(self, r_cut, nlist, name=None):
hoomd.util.print_status_line();
# register the citation
c = hoomd.cite.article(cite_key='phillips2011',
author=['C L Phillips', 'J A Anderson', 'S C Glotzer'],
title='Pseudo-random number generation for Brownian Dynamics and Dissipative Particle Dynamics simulations on GPU devices',
journal='Journal of Computational Physics',
volume=230,
number=19,
pages='7191--7201',
month='Aug',
year='2011',
doi='10.1016/j.jcp.2011.05.021',
feature='DPD')
hoomd.cite._ensure_global_bib().add(c)
# tell the base class how we operate
# initialize the base class
pair.__init__(self, r_cut, nlist, name);
# create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.PotentialPairDPD(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairDPD;
else:
self.nlist.cpp_nlist.setStorageMode(_md.NeighborList.storageMode.full);
self.cpp_force = _md.PotentialPairDPDGPU(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairDPDGPU;
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
# setup the coefficent options
self.required_coeffs = ['A'];
def process_coeff(self, coeff):
a = coeff['A'];
gamma = 0;
return _hoomd.make_scalar2(a, gamma);
def set_params(self, coeff):
""" :py:class:`dpd_conservative` has no energy shift modes """
raise RuntimeError('Not implemented for DPD Conservative');
return;
class dpdlj(pair):
R""" Dissipative Particle Dynamics with a LJ conservative force
Args:
r_cut (float): Default cutoff radius (in distance units).
nlist (:py:mod:`hoomd.md.nlist`): Neighbor list
kT (:py:mod:`hoomd.variant` or :py:obj:`float`): Temperature of thermostat (in energy units).
seed (int): seed for the PRNG in the DPD thermostat.
name (str): Name of the force instance.
:py:class:`dpdlj` specifies that a DPD thermostat and a Lennard-Jones pair potential should be applied between
every non-excluded particle pair in the simulation.
`C. L. Phillips et. al. 2011 <http://dx.doi.org/10.1016/j.jcp.2011.05.021>`_ describes the DPD implementation
details in HOOMD-blue. Cite it if you utilize the DPD functionality in your work.
.. math::
:nowrap:
\begin{eqnarray*}
F = F_{\mathrm{C}}(r) + F_{\mathrm{R,ij}}(r_{ij}) + F_{\mathrm{D,ij}}(v_{ij}) \\
\end{eqnarray*}
.. math::
:nowrap:
\begin{eqnarray*}
F_{\mathrm{C}}(r) = & \partial V_{\mathrm{LJ}} / \partial r \\
F_{\mathrm{R, ij}}(r_{ij}) = & - \theta_{ij}\sqrt{3} \sqrt{\frac{2k_b\gamma T}{\Delta t}}\cdot w(r_{ij}) \\
F_{\mathrm{D, ij}}(r_{ij}) = & - \gamma w^2(r_{ij})\left( \hat r_{ij} \circ v_{ij} \right) \\
\end{eqnarray*}
.. math::
:nowrap:
\begin{eqnarray*}
V_{\mathrm{LJ}}(r) = & 4 \varepsilon \left[ \left( \frac{\sigma}{r} \right)^{12} -
\alpha \left( \frac{\sigma}{r} \right)^{6} \right] & r < r_{\mathrm{cut}} \\
= & 0 & r \ge r_{\mathrm{cut}} \\
\end{eqnarray*}
.. math::
:nowrap:
\begin{eqnarray*}
w(r_{ij}) = &\left( 1 - r/r_{\mathrm{cut}} \right) & r < r_{\mathrm{cut}} \\
= & 0 & r \ge r_{\mathrm{cut}} \\
\end{eqnarray*}
where :math:`\hat r_{ij}` is a normalized vector from particle i to particle j, :math:`v_{ij} = v_i - v_j`,
and :math:`\theta_{ij}` is a uniformly distributed random number in the range [-1, 1].
Use :py:meth:`pair_coeff.set <coeff.set>` to set potential coefficients.
The following coefficients must be set per unique pair of particle types:
- :math:`\varepsilon` - *epsilon* (in energy units)
- :math:`\sigma` - *sigma* (in distance units)
- :math:`\alpha` - *alpha* (unitless)
- *optional*: defaults to 1.0
- :math:`\gamma` - *gamma* (in units of force/velocity)
- :math:`r_{\mathrm{cut}}` - *r_cut* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
To use the DPD thermostat, an :py:class:`hoomd.md.integrate.nve` integrator must be applied to the system and
the user must specify a temperature. Use of the dpd thermostat pair force with other integrators will result
in unphysical behavior.
Example::
nl = nlist.cell()
dpdlj = pair.dpdlj(r_cut=2.5, nlist=nl, kT=1.0, seed=0)
dpdlj.pair_coeff.set('A', 'A', epsilon=1.0, sigma = 1.0, gamma = 4.5)
dpdlj.pair_coeff.set('A', 'B', epsilon=0.0, sigma = 1.0 gamma = 4.5)
dpdlj.pair_coeff.set('B', 'B', epsilon=1.0, sigma = 1.0 gamma = 4.5, r_cut = 2.0**(1.0/6.0))
dpdlj.pair_coeff.set(['A', 'B'], ['C', 'D'], epsilon = 3.0,sigma=1.0, gamma = 1.2)
dpdlj.set_params(T = 1.0)
integrate.mode_standard(dt=0.005)
integrate.nve(group=group.all())
"""
def __init__(self, r_cut, nlist, kT, seed, name=None):
hoomd.util.print_status_line();
# register the citation
c = hoomd.cite.article(cite_key='phillips2011',
author=['C L Phillips', 'J A Anderson', 'S C Glotzer'],
title='Pseudo-random number generation for Brownian Dynamics and Dissipative Particle Dynamics simulations on GPU devices',
journal='Journal of Computational Physics',
volume=230,
number=19,
pages='7191--7201',
month='Aug',
year='2011',
doi='10.1016/j.jcp.2011.05.021',
feature='DPD')
hoomd.cite._ensure_global_bib().add(c)
# tell the base class how we operate
# initialize the base class
pair.__init__(self, r_cut, nlist, name);
# create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.PotentialPairDPDLJThermoDPD(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairDPDLJThermoDPD;
else:
self.nlist.cpp_nlist.setStorageMode(_md.NeighborList.storageMode.full);
self.cpp_force = _md.PotentialPairDPDLJThermoDPDGPU(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairDPDLJThermoDPDGPU;
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
# setup the coefficent options
self.required_coeffs = ['epsilon','sigma', 'alpha', 'gamma'];
self.pair_coeff.set_default_coeff('alpha', 1.0);
# set the seed for dpdlj thermostat
self.cpp_force.setSeed(seed);
# set the temperature
# setup the variant inputs
kT = hoomd.variant._setup_variant_input(kT);
self.cpp_force.setT(kT.cpp_variant);
def set_params(self, kT=None, mode=None):
R""" Changes parameters.
Args:
T (:py:mod:`hoomd.variant` or :py:obj:`float`): Temperature (if set) (in energy units)
mode (str): energy shift/smoothing mode (default noshift).
Examples::
dpdlj.set_params(kT=variant.linear_interp(points = [(0, 1.0), (1e5, 2.0)]))
dpdlj.set_params(kT=2.0, mode="shift")
"""
hoomd.util.print_status_line();
self.check_initialization();
# change the parameters
if kT is not None:
# setup the variant inputs
kT = hoomd.variant._setup_variant_input(kT);
self.cpp_force.setT(kT.cpp_variant);
if mode is not None:
if mode == "xplor":
hoomd.context.msg.error("XPLOR is smoothing is not supported with pair.dpdlj\n");
raise RuntimeError("Error changing parameters in pair force");
#use the inherited set_params
pair.set_params(self, mode=mode)
def process_coeff(self, coeff):
epsilon = coeff['epsilon'];
sigma = coeff['sigma'];
gamma = coeff['gamma'];
alpha = coeff['alpha'];
lj1 = 4.0 * epsilon * math.pow(sigma, 12.0);
lj2 = alpha * 4.0 * epsilon * math.pow(sigma, 6.0);
return _hoomd.make_scalar4(lj1, lj2, gamma, 0.0);
class force_shifted_lj(pair):
R""" Force-shifted Lennard-Jones pair potential.
Args:
r_cut (float): Default cutoff radius (in distance units).
nlist (:py:mod:`hoomd.md.nlist`): Neighbor list
name (str): Name of the force instance.
:py:class:`force_shifted_lj` specifies that a modified Lennard-Jones pair force should be applied between
non-excluded particle pair in the simulation. The force differs from the one calculated by :py:class:`lj`
by the subtraction of the value of the force at :math:`r_{\mathrm{cut}}`, such that the force smoothly goes
to zero at the cut-off. The potential is modified by a linear function. This potential can be used as a substitute
for :py:class:`lj`, when the exact analytical form of the latter is not required but a smaller cut-off radius is
desired for computational efficency. See `Toxvaerd et. al. 2011 <http://dx.doi.org/10.1063/1.3558787>`_
for a discussion of this potential.
.. math::
:nowrap:
\begin{eqnarray*}
V(r) = & 4 \varepsilon \left[ \left( \frac{\sigma}{r} \right)^{12} -
\alpha \left( \frac{\sigma}{r} \right)^{6} \right] + \Delta V(r) & r < r_{\mathrm{cut}}\\
= & 0 & r \ge r_{\mathrm{cut}} \\
\end{eqnarray*}
.. math::
\Delta V(r) = -(r - r_{\mathrm{cut}}) \frac{\partial V_{\mathrm{LJ}}}{\partial r}(r_{\mathrm{cut}})
See :py:class:`pair` for details on how forces are calculated and the available energy shifting and smoothing modes.
Use :py:meth:`pair_coeff.set <coeff.set>` to set potential coefficients.
The following coefficients must be set per unique pair of particle types:
- :math:`\varepsilon` - *epsilon* (in energy units)
- :math:`\sigma` - *sigma* (in distance units)
- :math:`\alpha` - *alpha* (unitless) - *optional*: defaults to 1.0
- :math:`r_{\mathrm{cut}}` - *r_cut* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
- :math:`r_{\mathrm{on}}`- *r_on* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
Example::
nl = nlist.cell()
fslj = pair.force_shifted_lj(r_cut=1.5, nlist=nl)
fslj.pair_coeff.set('A', 'A', epsilon=1.0, sigma=1.0)
"""
def __init__(self, r_cut, nlist, name=None):
hoomd.util.print_status_line();
# tell the base class how we operate
# initialize the base class
pair.__init__(self, r_cut, nlist, name);
# create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.PotentialPairForceShiftedLJ(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairForceShiftedLJ;
else:
self.nlist.cpp_nlist.setStorageMode(_md.NeighborList.storageMode.full);
self.cpp_force = _md.PotentialPairForceShiftedLJGPU(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairForceShiftedLJGPU;
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
# setup the coefficent options
self.required_coeffs = ['epsilon', 'sigma', 'alpha'];
self.pair_coeff.set_default_coeff('alpha', 1.0);
def process_coeff(self, coeff):
epsilon = coeff['epsilon'];
sigma = coeff['sigma'];
alpha = coeff['alpha'];
lj1 = 4.0 * epsilon * math.pow(sigma, 12.0);
lj2 = alpha * 4.0 * epsilon * math.pow(sigma, 6.0);
return _hoomd.make_scalar2(lj1, lj2);
class moliere(pair):
R""" Moliere pair potential.
Args:
r_cut (float): Default cutoff radius (in distance units).
nlist (:py:mod:`hoomd.md.nlist`): Neighbor list
name (str): Name of the force instance.
:py:class:`moliere` specifies that a Moliere type pair potential should be applied between every
non-excluded particle pair in the simulation.
.. math::
:nowrap:
\begin{eqnarray*}
V_{\mathrm{Moliere}}(r) = & \frac{Z_i Z_j e^2}{4 \pi \epsilon_0 r_{ij}} \left[ 0.35 \exp \left( -0.3 \frac{r_{ij}}{a_F} \right) + 0.55 \exp \left( -1.2 \frac{r_{ij}}{a_F} \right) + 0.10 \exp \left( -6.0 \frac{r_{ij}}{a_F} \right) \right] & r < r_{\mathrm{cut}} \\
= & 0 & r > r_{\mathrm{cut}} \\
\end{eqnarray*}
See :py:class:`pair` for details on how forces are calculated and the available energy shifting and smoothing modes.
Use :py:meth:`pair_coeff.set <coeff.set>` to set potential coefficients.
The following coefficients must be set per unique pair of particle types:
- :math:`Z_i` - *Z_i* - Atomic number of species i (unitless)
- :math:`Z_j` - *Z_j* - Atomic number of species j (unitless)
- :math:`e` - *elementary_charge* - The elementary charge (in charge units)
- :math:`a_0` - *a_0* - The Bohr radius (in distance units)
- :math:`r_{\mathrm{cut}}` - *r_cut* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
- :math:`r_{\mathrm{on}}`- *r_on* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
Example::
nl = nlist.cell()
moliere = pair.moliere(r_cut = 3.0, nlist=nl)
moliere.pair_coeff.set('A', 'B', Z_i = 54.0, Z_j = 7.0, elementary_charge = 1.0, a_0 = 1.0);
"""
def __init__(self, r_cut, nlist, name=None):
hoomd.util.print_status_line();
# tell the base class how we operate
# initialize the base class
pair.__init__(self, r_cut, nlist, name);
# create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.PotentialPairMoliere(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairMoliere;
else:
self.nlist.cpp_nlist.setStorageMode(_md.NeighborList.storageMode.full);
self.cpp_force = _md.PotentialPairMoliereGPU(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairMoliereGPU;
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
# setup the coefficient options
self.required_coeffs = ['Z_i', 'Z_j', 'elementary_charge', 'a_0'];
self.pair_coeff.set_default_coeff('elementary_charge', 1.0);
self.pair_coeff.set_default_coeff('a_0', 1.0);
def process_coeff(self, coeff):
Z_i = coeff['Z_i'];
Z_j = coeff['Z_j'];
elementary_charge = coeff['elementary_charge'];
a_0 = coeff['a_0'];
Zsq = Z_i * Z_j * elementary_charge * elementary_charge;
if (not (Z_i == 0)) or (not (Z_j == 0)):
aF = 0.8853 * a_0 / math.pow(math.sqrt(Z_i) + math.sqrt(Z_j), 2.0 / 3.0);
else:
aF = 1.0;
return _hoomd.make_scalar2(Zsq, aF);
class zbl(pair):
R""" ZBL pair potential.
Args:
r_cut (float): Default cutoff radius (in distance units).
nlist (:py:mod:`hoomd.md.nlist`): Neighbor list
name (str): Name of the force instance.
:py:class:`zbl` specifies that a Ziegler-Biersack-Littmark pair potential should be applied between every
non-excluded particle pair in the simulation.
.. math::
:nowrap:
\begin{eqnarray*}
V_{\mathrm{ZBL}}(r) = & \frac{Z_i Z_j e^2}{4 \pi \epsilon_0 r_{ij}} \left[ 0.1818 \exp \left( -3.2 \frac{r_{ij}}{a_F} \right) + 0.5099 \exp \left( -0.9423 \frac{r_{ij}}{a_F} \right) + 0.2802 \exp \left( -0.4029 \frac{r_{ij}}{a_F} \right) + 0.02817 \exp \left( -0.2016 \frac{r_{ij}}{a_F} \right) \right], & r < r_{\mathrm{cut}} \\
= & 0, & r > r_{\mathrm{cut}} \\
\end{eqnarray*}
See :py:class:`pair` for details on how forces are calculated and the available energy shifting and smoothing modes.
Use :py:meth:`pair_coeff.set <coeff.set>` to set potential coefficients.
The following coefficients must be set per unique pair of particle types:
- :math:`Z_i` - *Z_i* - Atomic number of species i (unitless)
- :math:`Z_j` - *Z_j* - Atomic number of species j (unitless)
- :math:`e` - *elementary_charge* - The elementary charge (in charge units)
- :math:`a_0` - *a_0* - The Bohr radius (in distance units)
- :math:`r_{\mathrm{cut}}` - *r_cut* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
- :math:`r_{\mathrm{on}}`- *r_on* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
Example::
nl = nlist.cell()
zbl = pair.zbl(r_cut = 3.0, nlist=nl)
zbl.pair_coeff.set('A', 'B', Z_i = 54.0, Z_j = 7.0, elementary_charge = 1.0, a_0 = 1.0);
"""
def __init__(self, r_cut, nlist, name=None):
hoomd.util.print_status_line();
# tell the base class how we operate
# initialize the base class
pair.__init__(self, r_cut, nlist, name);
# create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.PotentialPairZBL(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairZBL;
else:
self.nlist.cpp_nlist.setStorageMode(_md.NeighborList.storageMode.full);
self.cpp_force = _md.PotentialPairZBLGPU(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairZBLGPU;
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
# setup the coefficient options
self.required_coeffs = ['Z_i', 'Z_j', 'elementary_charge', 'a_0'];
self.pair_coeff.set_default_coeff('elementary_charge', 1.0);
self.pair_coeff.set_default_coeff('a_0', 1.0);
def process_coeff(self, coeff):
Z_i = coeff['Z_i'];
Z_j = coeff['Z_j'];
elementary_charge = coeff['elementary_charge'];
a_0 = coeff['a_0'];
Zsq = Z_i * Z_j * elementary_charge * elementary_charge;
if (not (Z_i == 0)) or (not (Z_j == 0)):
aF = 0.88534 * a_0 / ( math.pow( Z_i, 0.23 ) + math.pow( Z_j, 0.23 ) );
else:
aF = 1.0;
return _hoomd.make_scalar2(Zsq, aF);
def set_params(self, coeff):
""" :py:class:`zbl` has no energy shift modes """
raise RuntimeError('Not implemented for DPD Conservative');
return;
class tersoff(pair):
R""" Tersoff Potential.
Args:
r_cut (float): Default cutoff radius (in distance units).
nlist (:py:mod:`hoomd.md.nlist`): Neighbor list
name (str): Name of the force instance.
:py:class:`tersoff` specifies that the Tersoff three-body potential should be applied to every
non-bonded particle pair in the simulation. Despite the fact that the Tersoff potential accounts
for the effects of third bodies, it is included in the pair potentials because the species of the
third body is irrelevant. It can thus use type-pair parameters similar to those of the pair potentials.
The Tersoff potential is a bond-order potential based on the Morse potential that accounts for the weakening of
individual bonds with increasing coordination number. It does this by computing a modifier to the
attractive term of the potential. The modifier contains the effects of third-bodies on the bond
energies. The potential also includes a smoothing function around the cutoff. The smoothing function
used in this work is exponential in nature as opposed to the sinusoid used by Tersoff. The exponential
function provides continuity up (I believe) the second derivative.
"""
def __init__(self, r_cut, nlist, name=None):
hoomd.util.print_status_line();
# tell the base class how we operate
# initialize the base class
pair.__init__(self, r_cut, nlist, name);
# this potential cannot handle a half neighbor list
self.nlist.cpp_nlist.setStorageMode(_md.NeighborList.storageMode.full);
# create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.PotentialTersoff(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialTersoff;
else:
self.cpp_force = _md.PotentialTersoffGPU(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialTersoffGPU;
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
# setup the coefficients
self.required_coeffs = ['cutoff_thickness', 'C1', 'C2', 'lambda1', 'lambda2', 'dimer_r', 'n', 'gamma', 'lambda3', 'c', 'd', 'm', 'alpha']
self.pair_coeff.set_default_coeff('cutoff_thickness', 0.2);
self.pair_coeff.set_default_coeff('dimer_r', 1.5);
self.pair_coeff.set_default_coeff('C1', 1.0);
self.pair_coeff.set_default_coeff('C2', 1.0);
self.pair_coeff.set_default_coeff('lambda1', 2.0);
self.pair_coeff.set_default_coeff('lambda2', 1.0);
self.pair_coeff.set_default_coeff('lambda3', 0.0);
self.pair_coeff.set_default_coeff('n', 0.0);
self.pair_coeff.set_default_coeff('m', 0.0);
self.pair_coeff.set_default_coeff('c', 0.0);
self.pair_coeff.set_default_coeff('d', 1.0);
self.pair_coeff.set_default_coeff('gamma', 0.0);
self.pair_coeff.set_default_coeff('alpha', 3.0);
def process_coeff(self, coeff):
cutoff_d = coeff['cutoff_thickness'];
C1 = coeff['C1'];
C2 = coeff['C2'];
lambda1 = coeff['lambda1'];
lambda2 = coeff['lambda2'];
dimer_r = coeff['dimer_r'];
n = coeff['n'];
gamma = coeff['gamma'];
lambda3 = coeff['lambda3'];
c = coeff['c'];
d = coeff['d'];
m = coeff['m'];
alpha = coeff['alpha'];
gamman = math.pow(gamma, n);
c2 = c * c;
d2 = d * d;
lambda3_cube = lambda3 * lambda3 * lambda3;
tersoff_coeffs = _hoomd.make_scalar2(C1, C2);
exp_consts = _hoomd.make_scalar2(lambda1, lambda2);
ang_consts = _hoomd.make_scalar3(c2, d2, m);
return _md.make_tersoff_params(cutoff_d, tersoff_coeffs, exp_consts, dimer_r, n, gamman, lambda3_cube, ang_consts, alpha);
class mie(pair):
R""" Mie pair potential.
Args:
r_cut (float): Default cutoff radius (in distance units).
nlist (:py:mod:`hoomd.md.nlist`): Neighbor list
name (str): Name of the force instance.
:py:class:`mie` specifies that a Mie pair potential should be applied between every
non-excluded particle pair in the simulation.
.. math::
:nowrap:
\begin{eqnarray*}
V_{\mathrm{mie}}(r) = & \left( \frac{n}{n-m} \right) {\left( \frac{n}{m} \right)}^{\frac{m}{n-m}} \varepsilon \left[ \left( \frac{\sigma}{r} \right)^{n} -
\left( \frac{\sigma}{r} \right)^{m} \right] & r < r_{\mathrm{cut}} \\
= & 0 & r \ge r_{\mathrm{cut}} \\
\end{eqnarray*}
See :py:class:`pair` for details on how forces are calculated and the available energy shifting and smoothing modes.
Use :py:meth:`pair_coeff.set <coeff.set>` to set potential coefficients.
The following coefficients must be set per unique pair of particle types:
- :math:`\varepsilon` - *epsilon* (in energy units)
- :math:`\sigma` - *sigma* (in distance units)
- :math:`n` - *n* (unitless)
- :math:`m` - *m* (unitless)
- :math:`r_{\mathrm{cut}}` - *r_cut* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
- :math:`r_{\mathrm{on}}`- *r_on* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
Example::
nl = nlist.cell()
mie = pair.mie(r_cut=3.0, nlist=nl)
mie.pair_coeff.set('A', 'A', epsilon=1.0, sigma=1.0, n=12, m=6)
mie.pair_coeff.set('A', 'B', epsilon=2.0, sigma=1.0, n=14, m=7, r_cut=3.0, r_on=2.0);
mie.pair_coeff.set('B', 'B', epsilon=1.0, sigma=1.0, n=15.1, m=6.5, r_cut=2**(1.0/6.0), r_on=2.0);
mie.pair_coeff.set(['A', 'B'], ['C', 'D'], epsilon=1.5, sigma=2.0)
"""
def __init__(self, r_cut, nlist, name=None):
hoomd.util.print_status_line();
# tell the base class how we operate
# initialize the base class
pair.__init__(self, r_cut, nlist, name);
# create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.PotentialPairMie(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairMie;
else:
self.nlist.cpp_nlist.setStorageMode(_md.NeighborList.storageMode.full);
self.cpp_force = _md.PotentialPairMieGPU(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairMieGPU;
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
# setup the coefficent options
self.required_coeffs = ['epsilon', 'sigma', 'n', 'm'];
def process_coeff(self, coeff):
epsilon = float(coeff['epsilon']);
sigma = float(coeff['sigma']);
n = float(coeff['n']);
m = float(coeff['m']);
mie1 = epsilon * math.pow(sigma, n) * (n/(n-m)) * math.pow(n/m,m/(n-m));
mie2 = epsilon * math.pow(sigma, m) * (n/(n-m)) * math.pow(n/m,m/(n-m));
mie3 = n
mie4 = m
return _hoomd.make_scalar4(mie1, mie2, mie3, mie4);
class ai_pair(pair):
R""" Generic anisotropic pair potential.
Users should not instantiate :py:class:`ai_pair` directly. It is a base class that
provides common features to all anisotropic pair forces. Rather than repeating all of that documentation in a
dozen different places, it is collected here.
All anisotropic pair potential commands specify that a given potential energy, force and torque be computed
on all non-excluded particle pairs in the system within a short range cutoff distance :math:`r_{\mathrm{cut}}`.
The interaction energy, forces and torque depend on the inter-particle separation
:math:`\vec r` and on the orientations :math:`\vec q_i`, :math:`q_j`, of the particles.
"""
## \internal
# \brief Initialize the pair force
# \details
# The derived class must set
# - self.cpp_class (the pair class to instantiate)
# - self.required_coeffs (a list of the coeff names the derived class needs)
# - self.process_coeffs() (a method that takes in the coeffs and spits out a param struct to use in
# self.cpp_force.set_params())
def __init__(self, r_cut, nlist, name=None):
# initialize the base class
force._force.__init__(self, name);
self.global_r_cut = r_cut;
# setup the coefficent matrix
self.pair_coeff = coeff();
self.pair_coeff.set_default_coeff('r_cut', self.global_r_cut);
# setup the neighbor list
self.nlist = nlist
self.nlist.subscribe(lambda:self.get_rcut())
self.nlist.update_rcut()
def set_params(self, mode=None):
R""" Set parameters controlling the way forces are computed.
Args:
mode (str): (if set) Set the mode with which potentials are handled at the cutoff
valid values for mode are: "none" (the default) and "shift":
- *none* - No shifting is performed and potentials are abruptly cut off
- *shift* - A constant shift is applied to the entire potential so that it is 0 at the cutoff
Examples::
mypair.set_params(mode="shift")
mypair.set_params(mode="no_shift")
"""
hoomd.util.print_status_line();
if mode is not None:
if mode == "no_shift":
self.cpp_force.setShiftMode(self.cpp_class.energyShiftMode.no_shift)
elif mode == "shift":
self.cpp_force.setShiftMode(self.cpp_class.energyShiftMode.shift)
else:
hoomd.context.msg.error("Invalid mode\n");
raise RuntimeError("Error changing parameters in pair force");
def update_coeffs(self):
coeff_list = self.required_coeffs + ["r_cut"];
# check that the pair coefficents are valid
if not self.pair_coeff.verify(coeff_list):
hoomd.context.msg.error("Not all pair coefficients are set\n");
raise RuntimeError("Error updating pair coefficients");
# set all the params
ntypes = hoomd.context.current.system_definition.getParticleData().getNTypes();
type_list = [];
for i in range(0,ntypes):
type_list.append(hoomd.context.current.system_definition.getParticleData().getNameByType(i));
for i in range(0,ntypes):
for j in range(i,ntypes):
# build a dict of the coeffs to pass to process_coeff
coeff_dict = {};
for name in coeff_list:
coeff_dict[name] = self.pair_coeff.get(type_list[i], type_list[j], name);
param = self.process_coeff(coeff_dict);
self.cpp_force.setParams(i, j, param);
self.cpp_force.setRcut(i, j, coeff_dict['r_cut']);
class gb(ai_pair):
R""" Gay-Berne anisotropic pair potential.
Args:
r_cut (float): Default cutoff radius (in distance units).
nlist (:py:mod:`hoomd.md.nlist`): Neighbor list
name (str): Name of the force instance.
:py:class:`gb` computes the Gay-Berne potential between anisotropic particles.
This version of the Gay-Berne potential supports identical pairs of uniaxial ellipsoids,
with orientation-independent energy-well depth.
The interaction energy for this anisotropic pair potential is
(`Allen et. al. 2006 <http://dx.doi.org/10.1080/00268970601075238>`_):
.. math::
:nowrap:
\begin{eqnarray*}
V_{\mathrm{GB}}(\vec r, \vec e_i, \vec e_j) = & 4 \varepsilon \left[ \zeta^{-12} -
\zeta^{-6} \right] & \zeta < \zeta_{\mathrm{cut}} \\
= & 0 & \zeta \ge \zeta_{\mathrm{cut}} \\
\end{eqnarray*}
.. math::
\zeta = \left(\frac{r-\sigma+\sigma_{\mathrm{min}}}{\sigma_{\mathrm{min}}}\right)
\sigma^{-2} = \frac{1}{2} \hat{\vec{r}}\cdot\vec{H^{-1}}\cdot\hat{\vec{r}}
\vec{H} = 2 \ell_\perp^2 \vec{1} + (\ell_\parallel^2 - \ell_\perp^2) (\vec{e_i} \otimes \vec{e_i} + \vec{e_j} \otimes \vec{e_j})
with :math:`\sigma_{\mathrm{min}} = 2 \min(\ell_\perp, \ell_\parallel)`.
The cut-off parameter :math:`r_{\mathrm{cut}}` is defined for two particles oriented parallel along
the **long** axis, i.e.
:math:`\zeta_{\mathrm{cut}} = \left(\frac{r-\sigma_{\mathrm{max}} + \sigma_{\mathrm{min}}}{\sigma_{\mathrm{min}}}\right)`
where :math:`\sigma_{\mathrm{max}} = 2 \max(\ell_\perp, \ell_\parallel)` .
The quantities :math:`\ell_\parallel` and :math:`\ell_\perp` denote the semi-axis lengths parallel
and perpendicular to particle orientation.
Use :py:meth:`pair_coeff.set <coeff.set>` to set potential coefficients.
The following coefficients must be set per unique pair of particle types:
- :math:`\varepsilon` - *epsilon* (in energy units)
- :math:`\ell_\perp` - *lperp* (in distance units)
- :math:`\ell_\parallel` - *lpar* (in distance units)
- :math:`r_{\mathrm{cut}}` - *r_cut* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
Example::
nl = nlist.cell()
gb = pair.gb(r_cut=2.5, nlist=nl)
gb.pair_coeff.set('A', 'A', epsilon=1.0, lperp=0.45, lpar=0.5)
gb.pair_coeff.set('A', 'B', epsilon=2.0, lperp=0.45, lpar=0.5, r_cut=2**(1.0/6.0));
"""
def __init__(self, r_cut, nlist, name=None):
hoomd.util.print_status_line();
# tell the base class how we operate
# initialize the base class
ai_pair.__init__(self, r_cut, nlist, name);
# create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.AnisoPotentialPairGB(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.AnisoPotentialPairGB;
else:
self.nlist.cpp_nlist.setStorageMode(_md.NeighborList.storageMode.full);
self.cpp_force = _md.AnisoPotentialPairGBGPU(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.AnisoPotentialPairGBGPU;
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
# setup the coefficent options
self.required_coeffs = ['epsilon', 'lperp', 'lpar'];
def process_coeff(self, coeff):
epsilon = coeff['epsilon'];
lperp = coeff['lperp'];
lpar = coeff['lpar'];
return _hoomd.make_scalar3(epsilon, lperp, lpar);
class dipole(ai_pair):
R""" Screened dipole-dipole interactions.
Args:
r_cut (float): Default cutoff radius (in distance units).
nlist (:py:mod:`hoomd.md.nlist`): Neighbor list
name (str): Name of the force instance.
:py:class:`dipole` computes the (screened) interaction between pairs of
particles with dipoles and electrostatic charges. The total energy
computed is:
.. math::
U_{dipole} = U_{dd} + U_{de} + U_{ee}
U_{dd} = A e^{-\kappa r} \left(\frac{\vec{\mu_i}\cdot\vec{\mu_j}}{r^3} - 3\frac{(\vec{\mu_i}\cdot \vec{r_{ji}})(\vec{\mu_j}\cdot \vec{r_{ji}})}{r^5}\right)
U_{de} = A e^{-\kappa r} \left(\frac{(\vec{\mu_j}\cdot \vec{r_{ji}})q_i}{r^3} - \frac{(\vec{\mu_i}\cdot \vec{r_{ji}})q_j}{r^3}\right)
U_{ee} = A e^{-\kappa r} \frac{q_i q_j}{r}
Use :py:meth:`pair_coeff.set <coeff.set>` to set potential coefficients.
The following coefficients must be set per unique pair of particle types:
- mu - magnitude of :math:`\vec{\mu} = \mu (1, 0, 0)` in the particle local reference frame
- A - electrostatic energy scale :math:`A` (default value 1.0)
- kappa - inverse screening length :math:`\kappa`
Example::
# A/A interact only with screened electrostatics
dipole.pair_coeff.set('A', 'A', mu=0.0, A=1.0, kappa=1.0)
dipole.pair_coeff.set('A', 'B', mu=0.5, kappa=1.0)
"""
def __init__(self, r_cut, nlist, name=None):
hoomd.util.print_status_line();
## tell the base class how we operate
# initialize the base class
ai_pair.__init__(self, r_cut, nlist, name);
## create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.AnisoPotentialPairDipole(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.AnisoPotentialPairDipole;
else:
self.nlist.cpp_nlist.setStorageMode(_md.NeighborList.storageMode.full);
self.cpp_force = _md.AnisoPotentialPairDipoleGPU(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.AnisoPotentialPairDipoleGPU;
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
## setup the coefficent options
self.required_coeffs = ['mu', 'A', 'kappa'];
self.pair_coeff.set_default_coeff('A', 1.0)
def process_coeff(self, coeff):
mu = float(coeff['mu']);
A = float(coeff['A']);
kappa = float(coeff['kappa']);
params = _hoomd.make_scalar3(mu, A, kappa)
return params
class reaction_field(pair):
R""" Onsager reaction field pair potential.
Args:
r_cut (float): Default cutoff radius (in distance units).
nlist (:py:mod:`hoomd.md.nlist`): Neighbor list
name (str): Name of the force instance.
:py:class:`reaction_field` specifies that an Onsager reaction field pair potential should be applied between every
non-excluded particle pair in the simulation.
Reaction field electrostatics is an approximation to the screened electrostatic interaction,
which assumes that the medium can be treated as an electrostatic continuum of dielectric
constant :math:`\epsilon_{RF}` outside the cutoff sphere of radius :math:`r_{\mathrm{cut}}`.
See: `Barker et. al. 1973 <http://dx.doi.org/10.1080/00268977300102101>`_.
.. math::
V_{\mathrm{RF}}(r) = \varepsilon \left[ \frac{1}{r} +
\frac{(\epsilon_{RF}-1) r^2}{(2 \epsilon_{RF} + 1) r_c^3} \right]
By default, the reaction field potential does not require charge or diameter to be set. Two parameters,
:math:`\varepsilon` and :math:`\epsilon_{RF}` are needed. If :math:`epsilon_{RF}` is specified as zero,
it will represent infinity.
If *use_charge* is set to True, the following formula is evaluated instead:
.. math::
V_{\mathrm{RF}}(r) = q_i q_j \varepsilon \left[ \frac{1}{r} +
\frac{(\epsilon_{RF}-1) r^2}{(2 \epsilon_{RF} + 1) r_c^3} \right]
where :math:`q_i` and :math:`q_j` are the charges of the particle pair.
See :py:class:`pair` for details on how forces are calculated and the available energy shifting and smoothing modes.
Use :py:meth:`pair_coeff.set <coeff.set>` to set potential coefficients.
The following coefficients must be set per unique pair of particle types:
- :math:`\varepsilon` - *epsilon* (in units of energy*distance)
- :math:`\epsilon_{RF}` - *eps_rf* (dimensionless)
- :math:`r_{\mathrm{cut}}` - *r_cut* (in units of distance)
- *optional*: defaults to the global r_cut specified in the pair command
- :math:`r_{\mathrm{on}}` - *r_on* (in units of distance)
- *optional*: defaults to the global r_cut specified in the pair command
- *use_charge* (boolean), evaluate potential using particle charges
- *optional*: defaults to False
.. versionadded:: 2.1
Example::
nl = nlist.cell()
reaction_field = pair.reaction_field(r_cut=3.0, nlist=nl)
reaction_field.pair_coeff.set('A', 'A', epsilon=1.0, eps_rf=1.0)
reaction_field.pair_coeff.set('A', 'B', epsilon=-1.0, eps_rf=0.0)
reaction_field.pair_coeff.set('B', 'B', epsilon=1.0, eps_rf=0.0)
reaction_field.pair_coeff.set(system.particles.types, system.particles.types, epsilon=1.0, eps_rf=0.0, use_charge=True)
"""
def __init__(self, r_cut, nlist, name=None):
hoomd.util.print_status_line();
# tell the base class how we operate
# initialize the base class
pair.__init__(self, r_cut, nlist, name);
# create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.PotentialPairReactionField(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairReactionField;
else:
self.nlist.cpp_nlist.setStorageMode(_md.NeighborList.storageMode.full);
self.cpp_force = _md.PotentialPairReactionFieldGPU(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairReactionFieldGPU;
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
# setup the coefficent options
self.required_coeffs = ['epsilon', 'eps_rf', 'use_charge'];
self.pair_coeff.set_default_coeff('use_charge', False)
def process_coeff(self, coeff):
epsilon = coeff['epsilon'];
eps_rf = coeff['eps_rf'];
use_charge = coeff['use_charge']
return _hoomd.make_scalar3(epsilon, eps_rf, _hoomd.int_as_scalar(int(use_charge)));
Document that md.pair.dipole has no shift modes
fixes #178
The reporter of #178 has not commented on or taken any action for
several months. I am resolving this issue by documenting the current
behavior.
# Copyright (c) 2009-2016 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
R""" Pair potentials.
Generally, pair forces are short range and are summed over all non-bonded particles
within a certain cutoff radius of each particle. Any number of pair forces
can be defined in a single simulation. The net force on each particle due to
all types of pair forces is summed.
Pair forces require that parameters be set for each unique type pair. Coefficients
are set through the aid of the :py:class:`coeff` class. To set this coefficients, specify
a pair force and save it in a variable::
my_force = pair.some_pair_force(arguments...)
Then the coefficients can be set using the saved variable::
my_force.pair_coeff.set('A', 'A', epsilon=1.0, sigma=1.0)
my_force.pair_coeff.set('A', 'B', epsilon=1.0, sigma=2.0)
my_force.pair_coeff.set('B', 'B', epsilon=2.0, sigma=1.0)
This example set the parameters *epsilon* and *sigma*
(which are used in :py:class:`lj`). Different pair forces require that different
coefficients are set. Check the documentation of each to see the definition
of the coefficients.
"""
from hoomd import _hoomd
from hoomd.md import _md
from hoomd.md import force;
from hoomd.md import nlist as nl # to avoid naming conflicts
import hoomd;
import math;
import sys;
from collections import OrderedDict
class coeff:
R""" Define pair coefficients
All pair forces use :py:class:`coeff` to specify the coefficients between different
pairs of particles indexed by type. The set of pair coefficients is a symmetric
matrix defined over all possible pairs of particle types.
There are two ways to set the coefficients for a particular pair force.
The first way is to save the pair force in a variable and call :py:meth:`set()` directly.
The second method is to build the :py:class:`coeff` class first and then assign it to the
pair force. There are some advantages to this method in that you could specify a
complicated set of pair coefficients in a separate python file and import it into
your job script.
Example (**force_field.py**)::
from hoomd import md
my_coeffs = md.pair.coeff();
my_force.pair_coeff.set('A', 'A', epsilon=1.0, sigma=1.0)
my_force.pair_coeff.set('A', 'B', epsilon=1.0, sigma=2.0)
my_force.pair_coeff.set('B', 'B', epsilon=2.0, sigma=1.0)
Example job script::
from hoomd import md
import force_field
.....
my_force = md.pair.some_pair_force(arguments...)
my_force.pair_coeff = force_field.my_coeffs
"""
## \internal
# \brief Initializes the class
# \details
# The main task to be performed during initialization is just to init some variables
# \param self Python required class instance variable
def __init__(self):
self.values = {};
self.default_coeff = {}
## \internal
# \brief Return a compact representation of the pair coefficients
def get_metadata(self):
# return list for easy serialization
l = []
for (a,b) in self.values:
item = OrderedDict()
item['typei'] = a
item['typej'] = b
for coeff in self.values[(a,b)]:
item[coeff] = self.values[(a,b)][coeff]
l.append(item)
return l
## \var values
# \internal
# \brief Contains the matrix of set values in a dictionary
## \var default_coeff
# \internal
# \brief default_coeff['coeff'] lists the default value for \a coeff, if it is set
## \internal
# \brief Sets a default value for a given coefficient
# \details
# \param name Name of the coefficient to for which to set the default
# \param value Default value to set
#
# Some coefficients have reasonable default values and the user should not be burdened with typing them in
# all the time. set_default_coeff() sets
def set_default_coeff(self, name, value):
self.default_coeff[name] = value;
def set(self, a, b, **coeffs):
R""" Sets parameters for one type pair.
Args:
a (str): First particle type in the pair (or a list of type names)
b (str): Second particle type in the pair (or a list of type names)
coeffs: Named coefficients (see below for examples)
Calling :py:meth:`set()` results in one or more parameters being set for a single type pair
or set of type pairs.
Particle types are identified by name, and parameters are also added by name.
Which parameters you need to specify depends on the pair force you are setting
these coefficients for, see the corresponding documentation.
All possible type pairs as defined in the simulation box must be specified before
executing :py:class:`hoomd.run()`. You will receive an error if you fail to do so. It is not an error,
however, to specify coefficients for particle types that do not exist in the simulation.
This can be useful in defining a force field for many different types of particles even
when some simulations only include a subset.
There is no need to specify coefficients for both pairs 'A', 'B' and 'B', 'A'. Specifying
only one is sufficient.
To set the same coefficients between many particle types, provide a list of type names instead of a single
one. All pairs between the two lists will be set to the same parameters.
Examples::
coeff.set('A', 'A', epsilon=1.0, sigma=1.0)
coeff.set('B', 'B', epsilon=2.0, sigma=1.0)
coeff.set('A', 'B', epsilon=1.5, sigma=1.0)
coeff.set(['A', 'B', 'C', 'D'], 'F', epsilon=2.0)
coeff.set(['A', 'B', 'C', 'D'], ['A', 'B', 'C', 'D'], epsilon=1.0)
system = init.read_xml('init.xml')
coeff.set(system.particles.types, system.particles.types, epsilon=2.0)
coeff.set('A', system.particles.types, epsilon=1.2)
Note:
Single parameters can be updated. If both epsilon and sigma have already been
set for a type pair, then executing ``coeff.set('A', 'B', epsilon=1.1)`` will update
the value of epsilon and leave sigma as it was previously set.
Some pair potentials assign default values to certain parameters. If the default setting for a given coefficient
(as documented in the respective pair command) is not set explicitly, the default will be used.
"""
hoomd.util.print_status_line();
# listify the inputs
if isinstance(a, str):
a = [a];
if isinstance(b, str):
b = [b];
for ai in a:
for bi in b:
self.set_single(ai, bi, coeffs);
## \internal
# \brief Sets a single parameter
def set_single(self, a, b, coeffs):
a = str(a);
b = str(b);
# create the pair if it hasn't been created it
if (not (a,b) in self.values) and (not (b,a) in self.values):
self.values[(a,b)] = {};
# Find the pair to update
if (a,b) in self.values:
cur_pair = (a,b);
elif (b,a) in self.values:
cur_pair = (b,a);
else:
hoomd.context.msg.error("Bug detected in pair.coeff. Please report\n");
raise RuntimeError("Error setting pair coeff");
# update each of the values provided
if len(coeffs) == 0:
hoomd.context.msg.error("No coefficents specified\n");
for name, val in coeffs.items():
self.values[cur_pair][name] = val;
# set the default values
for name, val in self.default_coeff.items():
# don't override a coeff if it is already set
if not name in self.values[cur_pair]:
self.values[cur_pair][name] = val;
## \internal
# \brief Verifies set parameters form a full matrix with all values set
# \details
# \param self Python required self variable
# \param required_coeffs list of required variables
#
# This can only be run after the system has been initialized
def verify(self, required_coeffs):
# first, check that the system has been initialized
if not hoomd.init.is_initialized():
hoomd.context.msg.error("Cannot verify pair coefficients before initialization\n");
raise RuntimeError('Error verifying pair coefficients');
# get a list of types from the particle data
ntypes = hoomd.context.current.system_definition.getParticleData().getNTypes();
type_list = [];
for i in range(0,ntypes):
type_list.append(hoomd.context.current.system_definition.getParticleData().getNameByType(i));
valid = True;
# loop over all possible pairs and verify that all required variables are set
for i in range(0,ntypes):
for j in range(i,ntypes):
a = type_list[i];
b = type_list[j];
# find which half of the pair is set
if (a,b) in self.values:
cur_pair = (a,b);
elif (b,a) in self.values:
cur_pair = (b,a);
else:
hoomd.context.msg.error("Type pair " + str((a,b)) + " not found in pair coeff\n");
valid = False;
continue;
# verify that all required values are set by counting the matches
count = 0;
for coeff_name in self.values[cur_pair].keys():
if not coeff_name in required_coeffs:
hoomd.context.msg.notice(2, "Notice: Possible typo? Pair coeff " + str(coeff_name) + " is specified for pair " + str((a,b)) + \
", but is not used by the pair force\n");
else:
count += 1;
if count != len(required_coeffs):
hoomd.context.msg.error("Type pair " + str((a,b)) + " is missing required coefficients\n");
valid = False;
return valid;
## \internal
# \brief Try to get whether a single pair coefficient
# \detail
# \param a First name in the type pair
# \param b Second name in the type pair
# \param coeff_name Coefficient to get
def get(self,a,b,coeff_name):
if (a,b) in self.values:
cur_pair = (a,b);
elif (b,a) in self.values:
cur_pair = (b,a);
else:
return None;
if coeff_name in self.values[cur_pair]:
return self.values[cur_pair][coeff_name];
else:
return None;
class pair(force._force):
R""" Common pair potential documentation.
Users should not invoke :py:class:`pair` directly. It is a base command that provides common
features to all standard pair forces. Common documentation for all pair potentials is documented here.
All pair force commands specify that a given potential energy and force be computed on all non-excluded particle
pairs in the system within a short range cutoff distance :math:`r_{\mathrm{cut}}`.
The force :math:`\vec{F}` applied between each pair of particles is:
.. math::
:nowrap:
\begin{eqnarray*}
\vec{F} = & -\nabla V(r) & r < r_{\mathrm{cut}} \\
= & 0 & r \ge r_{\mathrm{cut}} \\
\end{eqnarray*}
where :math:`\vec{r}` is the vector pointing from one particle to the other in the pair, and :math:`V(r)` is
chosen by a mode switch (see :py:meth:`set_params()`):
.. math::
:nowrap:
\begin{eqnarray*}
V(r) = & V_{\mathrm{pair}}(r) & \mathrm{mode\ is\ no\_shift} \\
= & V_{\mathrm{pair}}(r) - V_{\mathrm{pair}}(r_{\mathrm{cut}}) & \mathrm{mode\ is\ shift} \\
= & S(r) \cdot V_{\mathrm{pair}}(r) & \mathrm{mode\ is\ xplor\ and\ } r_{\mathrm{on}} < r_{\mathrm{cut}} \\
= & V_{\mathrm{pair}}(r) - V_{\mathrm{pair}}(r_{\mathrm{cut}}) & \mathrm{mode\ is\ xplor\ and\ } r_{\mathrm{on}} \ge r_{\mathrm{cut}}
\end{eqnarray*}
:math:`S(r)` is the XPLOR smoothing function:
.. math::
:nowrap:
\begin{eqnarray*}
S(r) = & 1 & r < r_{\mathrm{on}} \\
= & \frac{(r_{\mathrm{cut}}^2 - r^2)^2 \cdot (r_{\mathrm{cut}}^2 + 2r^2 -
3r_{\mathrm{on}}^2)}{(r_{\mathrm{cut}}^2 - r_{\mathrm{on}}^2)^3}
& r_{\mathrm{on}} \le r \le r_{\mathrm{cut}} \\
= & 0 & r > r_{\mathrm{cut}} \\
\end{eqnarray*}
and :math:`V_{\mathrm{pair}}(r)` is the specific pair potential chosen by the respective command.
Enabling the XPLOR smoothing function :math:`S(r)` results in both the potential energy and the force going smoothly
to 0 at :math:`r = r_{\mathrm{cut}}`, reducing the rate of energy drift in long simulations.
:math:`r_{\mathrm{on}}` controls the point at which the smoothing starts, so it can be set to only slightly modify
the tail of the potential. It is suggested that you plot your potentials with various values of
:math:`r_{\mathrm{on}}` in order to find a good balance between a smooth potential function and minimal modification
of the original :math:`V_{\mathrm{pair}}(r)`. A good value for the LJ potential is
:math:`r_{\mathrm{on}} = 2 \cdot \sigma`.
The split smoothing / shifting of the potential when the mode is ``xplor`` is designed for use in mixed WCA / LJ
systems. The WCA potential and it's first derivative already go smoothly to 0 at the cutoff, so there is no need
to apply the smoothing function. In such mixed systems, set :math:`r_{\mathrm{on}}` to a value greater than
:math:`r_{\mathrm{cut}}` for those pairs that interact via WCA in order to enable shifting of the WCA potential
to 0 at the cuttoff.
The following coefficients must be set per unique pair of particle types. See :py:mod:`hoomd.md.pair` for information
on how to set coefficients:
- :math:`r_{\mathrm{cut}}` - *r_cut* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
- :math:`r_{\mathrm{on}}` - *r_on* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
When :math:`r_{\mathrm{cut}} \le 0` or is set to False, the particle type pair interaction is excluded from the neighbor
list. This mechanism can be used in conjunction with multiple neighbor lists to make efficient calculations in systems
with large size disparity. Functionally, this is equivalent to setting :math:`r_{\mathrm{cut}} = 0` in the pair force
because negative :math:`r_{\mathrm{cut}}` has no physical meaning.
"""
## \internal
# \brief Initialize the pair force
# \details
# The derived class must set
# - self.cpp_class (the pair class to instantiate)
# - self.required_coeffs (a list of the coeff names the derived class needs)
# - self.process_coeffs() (a method that takes in the coeffs and spits out a param struct to use in
# self.cpp_force.set_params())
def __init__(self, r_cut, nlist, name=None):
# initialize the base class
force._force.__init__(self, name);
# convert r_cut False to a floating point type
if r_cut is False:
r_cut = -1.0
self.global_r_cut = r_cut;
# setup the coefficent matrix
self.pair_coeff = coeff();
self.pair_coeff.set_default_coeff('r_cut', self.global_r_cut);
self.pair_coeff.set_default_coeff('r_on', self.global_r_cut);
# setup the neighbor list
self.nlist = nlist
self.nlist.subscribe(lambda:self.get_rcut())
self.nlist.update_rcut()
def set_params(self, mode=None):
R""" Set parameters controlling the way forces are computed.
Args:
mode (str): (if set) Set the mode with which potentials are handled at the cutoff.
Valid values for *mode* are: "none" (the default), "shift", and "xplor":
- **none** - No shifting is performed and potentials are abruptly cut off
- **shift** - A constant shift is applied to the entire potential so that it is 0 at the cutoff
- **xplor** - A smoothing function is applied to gradually decrease both the force and potential to 0 at the
cutoff when ron < rcut, and shifts the potential to 0 at the cutoff when ron >= rcut.
See :py:class:`pair` for the equations.
Examples::
mypair.set_params(mode="shift")
mypair.set_params(mode="no_shift")
mypair.set_params(mode="xplor")
"""
hoomd.util.print_status_line();
if mode is not None:
if mode == "no_shift":
self.cpp_force.setShiftMode(self.cpp_class.energyShiftMode.no_shift)
elif mode == "shift":
self.cpp_force.setShiftMode(self.cpp_class.energyShiftMode.shift)
elif mode == "xplor":
self.cpp_force.setShiftMode(self.cpp_class.energyShiftMode.xplor)
else:
hoomd.context.msg.error("Invalid mode\n");
raise RuntimeError("Error changing parameters in pair force");
def process_coeff(self, coeff):
hoomd.context.msg.error("Bug in hoomd_script, please report\n");
raise RuntimeError("Error processing coefficients");
def update_coeffs(self):
coeff_list = self.required_coeffs + ["r_cut", "r_on"];
# check that the pair coefficents are valid
if not self.pair_coeff.verify(coeff_list):
hoomd.context.msg.error("Not all pair coefficients are set\n");
raise RuntimeError("Error updating pair coefficients");
# set all the params
ntypes = hoomd.context.current.system_definition.getParticleData().getNTypes();
type_list = [];
for i in range(0,ntypes):
type_list.append(hoomd.context.current.system_definition.getParticleData().getNameByType(i));
for i in range(0,ntypes):
for j in range(i,ntypes):
# build a dict of the coeffs to pass to process_coeff
coeff_dict = {};
for name in coeff_list:
coeff_dict[name] = self.pair_coeff.get(type_list[i], type_list[j], name);
param = self.process_coeff(coeff_dict);
self.cpp_force.setParams(i, j, param);
# rcut can now have "invalid" C++ values, which we round up to zero
self.cpp_force.setRcut(i, j, max(coeff_dict['r_cut'], 0.0));
self.cpp_force.setRon(i, j, max(coeff_dict['r_on'], 0.0));
## \internal
# \brief Get the maximum r_cut value set for any type pair
# \pre update_coeffs must be called before get_max_rcut to verify that the coeffs are set
def get_max_rcut(self):
# go through the list of only the active particle types in the sim
ntypes = hoomd.context.current.system_definition.getParticleData().getNTypes();
type_list = [];
for i in range(0,ntypes):
type_list.append(hoomd.context.current.system_definition.getParticleData().getNameByType(i));
# find the maximum r_cut
max_rcut = 0.0;
for i in range(0,ntypes):
for j in range(i,ntypes):
# get the r_cut value
r_cut = self.pair_coeff.get(type_list[i], type_list[j], 'r_cut');
max_rcut = max(max_rcut, r_cut);
return max_rcut;
## \internal
# \brief Get the r_cut pair dictionary
# \returns The rcut(i,j) dict if logging is on, and None if logging is off
def get_rcut(self):
if not self.log:
return None
# go through the list of only the active particle types in the sim
ntypes = hoomd.context.current.system_definition.getParticleData().getNTypes();
type_list = [];
for i in range(0,ntypes):
type_list.append(hoomd.context.current.system_definition.getParticleData().getNameByType(i));
# update the rcut by pair type
r_cut_dict = nl.rcut();
for i in range(0,ntypes):
for j in range(i,ntypes):
# get the r_cut value
r_cut = self.pair_coeff.get(type_list[i], type_list[j], 'r_cut');
if r_cut is not None: # use the defined value
if r_cut is False: # interaction is turned off
r_cut_dict.set_pair(type_list[i],type_list[j], -1.0);
else:
r_cut_dict.set_pair(type_list[i],type_list[j], r_cut);
else: # use the global default
r_cut_dict.set_pair(type_list[i],type_list[j],self.global_r_cut);
return r_cut_dict;
## \internal
# \brief Return metadata for this pair potential
def get_metadata(self):
data = force._force.get_metadata(self)
# make sure all coefficients are set
self.update_coeffs()
data['pair_coeff'] = self.pair_coeff
return data
def compute_energy(self, tags1, tags2):
R""" Compute the energy between two sets of particles.
Args:
tags1 (``ndarray<int32>``): a numpy array of particle tags in the first group
tags2 (``ndarray<int32>``): a numpy array of particle tags in the second group
.. math::
U = \sum_{i \in \mathrm{tags1}, j \in \mathrm{tags2}} V_{ij}(r)
where :math:`V_{ij}(r)` is the pairwise energy between two particles :math:`i` and :math:`j`.
Assumed properties of the sets *tags1* and *tags2* are:
- *tags1* and *tags2* are disjoint
- all elements in *tags1* and *tags2* are unique
- *tags1* and *tags2* are contiguous numpy arrays of dtype int32
None of these properties are validated.
Examples::
tags=numpy.linspace(0,N-1,1, dtype=numpy.int32)
# computes the energy between even and odd particles
U = mypair.compute_energy(tags1=numpy.array(tags[0:N:2]), tags2=numpy.array(tags[1:N:2]))
"""
# future versions could use np functions to test the assumptions above and raise an error if they occur.
return self.cpp_force.computeEnergyBetweenSets(tags1, tags2);
class lj(pair):
R""" Lennard-Jones pair potential.
Args:
r_cut (float): Default cutoff radius (in distance units).
nlist (:py:mod:`hoomd.md.nlist`): Neighbor list
name (str): Name of the force instance.
:py:class:`lj` specifies that a Lennard-Jones pair potential should be applied between every
non-excluded particle pair in the simulation.
.. math::
:nowrap:
\begin{eqnarray*}
V_{\mathrm{LJ}}(r) = & 4 \varepsilon \left[ \left( \frac{\sigma}{r} \right)^{12} -
\alpha \left( \frac{\sigma}{r} \right)^{6} \right] & r < r_{\mathrm{cut}} \\
= & 0 & r \ge r_{\mathrm{cut}} \\
\end{eqnarray*}
See :py:class:`pair` for details on how forces are calculated and the available energy shifting and smoothing modes.
Use :py:meth:`pair_coeff.set <coeff.set>` to set potential coefficients.
The following coefficients must be set per unique pair of particle types:
- :math:`\varepsilon` - *epsilon* (in energy units)
- :math:`\sigma` - *sigma* (in distance units)
- :math:`\alpha` - *alpha* (unitless) - *optional*: defaults to 1.0
- :math:`r_{\mathrm{cut}}` - *r_cut* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
- :math:`r_{\mathrm{on}}`- *r_on* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
Example::
nl = nlist.cell()
lj = pair.lj(r_cut=3.0, nlist=nl)
lj.pair_coeff.set('A', 'A', epsilon=1.0, sigma=1.0)
lj.pair_coeff.set('A', 'B', epsilon=2.0, sigma=1.0, alpha=0.5, r_cut=3.0, r_on=2.0);
lj.pair_coeff.set('B', 'B', epsilon=1.0, sigma=1.0, r_cut=2**(1.0/6.0), r_on=2.0);
lj.pair_coeff.set(['A', 'B'], ['C', 'D'], epsilon=1.5, sigma=2.0)
"""
def __init__(self, r_cut, nlist, name=None):
hoomd.util.print_status_line();
# tell the base class how we operate
# initialize the base class
pair.__init__(self, r_cut, nlist, name);
# create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.PotentialPairLJ(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairLJ;
else:
self.nlist.cpp_nlist.setStorageMode(_md.NeighborList.storageMode.full);
self.cpp_force = _md.PotentialPairLJGPU(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairLJGPU;
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
# setup the coefficent options
self.required_coeffs = ['epsilon', 'sigma', 'alpha'];
self.pair_coeff.set_default_coeff('alpha', 1.0);
def process_coeff(self, coeff):
epsilon = coeff['epsilon'];
sigma = coeff['sigma'];
alpha = coeff['alpha'];
lj1 = 4.0 * epsilon * math.pow(sigma, 12.0);
lj2 = alpha * 4.0 * epsilon * math.pow(sigma, 6.0);
return _hoomd.make_scalar2(lj1, lj2);
class gauss(pair):
R""" Gaussian pair potential.
Args:
r_cut (float): Default cutoff radius (in distance units).
nlist (:py:mod:`hoomd.md.nlist`): Neighbor list
name (str): Name of the force instance.
:py:class:`gauss` specifies that a Gaussian pair potential should be applied between every
non-excluded particle pair in the simulation.
.. math::
:nowrap:
\begin{eqnarray*}
V_{\mathrm{gauss}}(r) = & \varepsilon \exp \left[ -\frac{1}{2}\left( \frac{r}{\sigma} \right)^2 \right]
& r < r_{\mathrm{cut}} \\
= & 0 & r \ge r_{\mathrm{cut}} \\
\end{eqnarray*}
See :py:class:`pair` for details on how forces are calculated and the available energy shifting and smoothing modes.
Use :py:meth:`pair_coeff.set <coeff.set>` to set potential coefficients.
The following coefficients must be set per unique pair of particle types:
- :math:`\varepsilon` - *epsilon* (in energy units)
- :math:`\sigma` - *sigma* (in distance units)
- :math:`r_{\mathrm{cut}}` - *r_cut* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
- :math:`r_{\mathrm{on}}`- *r_on* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
Example::
nl = nlist.cell()
gauss = pair.gauss(r_cut=3.0, nlist=nl)
gauss.pair_coeff.set('A', 'A', epsilon=1.0, sigma=1.0)
gauss.pair_coeff.set('A', 'B', epsilon=2.0, sigma=1.0, r_cut=3.0, r_on=2.0);
gauss.pair_coeff.set(['A', 'B'], ['C', 'D'], epsilon=3.0, sigma=0.5)
"""
def __init__(self, r_cut, nlist, name=None):
hoomd.util.print_status_line();
# tell the base class how we operate
# initialize the base class
pair.__init__(self, r_cut, nlist, name);
# create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.PotentialPairGauss(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairGauss;
else:
self.nlist.cpp_nlist.setStorageMode(_md.NeighborList.storageMode.full);
self.cpp_force = _md.PotentialPairGaussGPU(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairGaussGPU;
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
# setup the coefficent options
self.required_coeffs = ['epsilon', 'sigma'];
def process_coeff(self, coeff):
epsilon = coeff['epsilon'];
sigma = coeff['sigma'];
return _hoomd.make_scalar2(epsilon, sigma);
class slj(pair):
R""" Shifted Lennard-Jones pair potential.
Args:
r_cut (float): Default cutoff radius (in distance units).
nlist (:py:mod:`hoomd.md.nlist`): Neighbor list
name (str): Name of the force instance.
d_max (float): Maximum diameter particles in the simulation will have (in distance units)
:py:class:`slj` specifies that a shifted Lennard-Jones type pair potential should be applied between every
non-excluded particle pair in the simulation.
.. math::
:nowrap:
\begin{eqnarray*}
V_{\mathrm{SLJ}}(r) = & 4 \varepsilon \left[ \left( \frac{\sigma}{r - \Delta} \right)^{12} -
\left( \frac{\sigma}{r - \Delta} \right)^{6} \right] & r < (r_{\mathrm{cut}} + \Delta) \\
= & 0 & r \ge (r_{\mathrm{cut}} + \Delta) \\
\end{eqnarray*}
where :math:`\Delta = (d_i + d_j)/2 - 1` and :math:`d_i` is the diameter of particle :math:`i`.
See :py:class:`pair` for details on how forces are calculated and the available energy shifting and smoothing modes.
Use :py:meth:`pair_coeff.set <coeff.set>` to set potential coefficients.
The following coefficients must be set per unique pair of particle types:
- :math:`\varepsilon` - *epsilon* (in energy units)
- :math:`\sigma` - *sigma* (in distance units)
- *optional*: defaults to 1.0
- :math:`r_{\mathrm{cut}}` - *r_cut* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
.. attention::
Due to the way that pair.slj modifies the cutoff criteria, a shift_mode of *xplor* is not supported.
The actual cutoff radius for pair.slj is shifted by the diameter of two particles interacting. Thus to determine
the maximum possible actual r_cut in simulation
pair.slj must know the maximum diameter of all the particles over the entire run, *d_max* .
This value is either determined automatically from the initialization or can be set by the user and can be
modified between runs with :py:meth:`hoomd.md.nlist.nlist.set_params()`. In most cases, the correct value can be
identified automatically.
The specified value of *d_max* will be used to properly determine the neighbor lists during the following
:py:func:`hoomd.run()` commands. If not specified, :py:class:`slj` will set d_max to the largest diameter
in particle data at the time it is initialized.
If particle diameters change after initialization, it is **imperative** that *d_max* be the largest
diameter that any particle will attain at any time during the following :py:func:`hoomd.run()` commands.
If *d_max* is smaller than it should be, some particles will effectively have a smaller value of *r_cut*
then was set and the simulation will be incorrect. *d_max* can be changed between runs by calling
:py:meth:`hoomd.md.nlist.nlist.set_params()`.
Example::
nl = nlist.cell()
slj = pair.slj(r_cut=3.0, nlist=nl, d_max = 2.0)
slj.pair_coeff.set('A', 'A', epsilon=1.0)
slj.pair_coeff.set('A', 'B', epsilon=2.0, r_cut=3.0);
slj.pair_coeff.set('B', 'B', epsilon=1.0, r_cut=2**(1.0/6.0));
slj.pair_coeff.set(['A', 'B'], ['C', 'D'], espilon=2.0)
"""
def __init__(self, r_cut, nlist, d_max=None, name=None):
hoomd.util.print_status_line();
# tell the base class how we operate
# initialize the base class
pair.__init__(self, r_cut, nlist, name);
# update the neighbor list
if d_max is None :
sysdef = hoomd.context.current.system_definition;
d_max = sysdef.getParticleData().getMaxDiameter()
hoomd.context.msg.notice(2, "Notice: slj set d_max=" + str(d_max) + "\n");
# SLJ requires diameter shifting to be on
self.nlist.cpp_nlist.setDiameterShift(True);
self.nlist.cpp_nlist.setMaximumDiameter(d_max);
# create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.PotentialPairSLJ(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairSLJ;
else:
self.nlist.cpp_nlist.setStorageMode(_md.NeighborList.storageMode.full);
self.cpp_force = _md.PotentialPairSLJGPU(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairSLJGPU;
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
# setup the coefficient options
self.required_coeffs = ['epsilon', 'sigma', 'alpha'];
self.pair_coeff.set_default_coeff('alpha', 1.0);
def process_coeff(self, coeff):
epsilon = coeff['epsilon'];
sigma = coeff['sigma'];
alpha = coeff['alpha'];
lj1 = 4.0 * epsilon * math.pow(sigma, 12.0);
lj2 = alpha * 4.0 * epsilon * math.pow(sigma, 6.0);
return _hoomd.make_scalar2(lj1, lj2);
def set_params(self, mode=None):
R""" Set parameters controlling the way forces are computed.
See :py:meth:`pair.set_params()`.
Note:
**xplor** is not a valid setting for :py:class:`slj`.
"""
hoomd.util.print_status_line();
if mode == "xplor":
hoomd.context.msg.error("XPLOR is smoothing is not supported with slj\n");
raise RuntimeError("Error changing parameters in pair force");
pair.set_params(self, mode=mode);
class yukawa(pair):
R""" Yukawa pair potential.
Args:
r_cut (float): Default cutoff radius (in distance units).
nlist (:py:mod:`hoomd.md.nlist`): Neighbor list
name (str): Name of the force instance.
:py:class:`yukawa` specifies that a Yukawa pair potential should be applied between every
non-excluded particle pair in the simulation.
.. math::
:nowrap:
\begin{eqnarray*}
V_{\mathrm{yukawa}}(r) = & \varepsilon \frac{ \exp \left( -\kappa r \right) }{r} & r < r_{\mathrm{cut}} \\
= & 0 & r \ge r_{\mathrm{cut}} \\
\end{eqnarray*}
See :py:class:`pair` for details on how forces are calculated and the available energy shifting and smoothing modes.
Use :py:meth:`pair_coeff.set <coeff.set>` to set potential coefficients.
The following coefficients must be set per unique pair of particle types:
- :math:`\varepsilon` - *epsilon* (in energy units)
- :math:`\kappa` - *kappa* (in units of 1/distance)
- :math:`r_{\mathrm{cut}}` - *r_cut* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
- :math:`r_{\mathrm{on}}`- *r_on* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
Example::
nl = nlist.cell()
yukawa = pair.lj(r_cut=3.0, nlist=nl)
yukawa.pair_coeff.set('A', 'A', epsilon=1.0, kappa=1.0)
yukawa.pair_coeff.set('A', 'B', epsilon=2.0, kappa=0.5, r_cut=3.0, r_on=2.0);
yukawa.pair_coeff.set(['A', 'B'], ['C', 'D'], epsilon=0.5, kappa=3.0)
"""
def __init__(self, r_cut, nlist, name=None):
hoomd.util.print_status_line();
# tell the base class how we operate
# initialize the base class
pair.__init__(self, r_cut, nlist, name);
# create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.PotentialPairYukawa(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairYukawa;
else:
self.nlist.cpp_nlist.setStorageMode(_md.NeighborList.storageMode.full);
self.cpp_force = _md.PotentialPairYukawaGPU(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairYukawaGPU;
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
# setup the coefficent options
self.required_coeffs = ['epsilon', 'kappa'];
def process_coeff(self, coeff):
epsilon = coeff['epsilon'];
kappa = coeff['kappa'];
return _hoomd.make_scalar2(epsilon, kappa);
class ewald(pair):
R""" Ewald pair potential.
:py:class:`ewald` specifies that a Ewald pair potential should be applied between every
non-excluded particle pair in the simulation.
.. math::
:nowrap:
\begin{eqnarray*}
V_{\mathrm{ewald}}(r) = & q_i q_j \left[\mathrm{erfc}\left(\kappa r + \frac{\alpha}{2\kappa}\right) \exp(\alpha r)+
\mathrm{erfc}\left(\kappa r - \frac{\alpha}{2 \kappa}\right) \exp(-\alpha r) & r < r_{\mathrm{cut}} \\
= & 0 & r \ge r_{\mathrm{cut}} \\
\end{eqnarray*}
The Ewald potential is designed to be used in conjunction with :py:class:`hoomd.md.charge.pppm`.
See :py:class:`pair` for details on how forces are calculated and the available energy shifting and smoothing modes.
Use :py:meth:`pair_coeff.set <coeff.set>` to set potential coefficients.
The following coefficients must be set per unique pair of particle types:
- :math:`\kappa` - *kappa* (Splitting parameter, in 1/distance units)
- :math:`\alpha` - *alpha* (Debye screening length, in 1/distance units)
.. versionadded:: 2.1
- :math:`r_{\mathrm{cut}}` - *r_cut* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
- :math:`r_{\mathrm{on}}`- *r_on* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
Example::
nl = nlist.cell()
ewald = pair.ewald(r_cut=3.0, nlist=nl)
ewald.pair_coeff.set('A', 'A', kappa=1.0)
ewald.pair_coeff.set('A', 'A', kappa=1.0, alpha=1.5)
ewald.pair_coeff.set('A', 'B', kappa=1.0, r_cut=3.0, r_on=2.0);
Warning:
**DO NOT** use in conjunction with :py:class:`hoomd.md.charge.pppm`. It automatically creates and configures
:py:class:`ewald` for you.
"""
def __init__(self, r_cut, nlist, name=None):
hoomd.util.print_status_line();
# tell the base class how we operate
# initialize the base class
pair.__init__(self, r_cut, nlist, name);
# create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.PotentialPairEwald(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairEwald;
else:
self.nlist.cpp_nlist.setStorageMode(_md.NeighborList.storageMode.full);
self.cpp_force = _md.PotentialPairEwaldGPU(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairEwaldGPU;
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
# setup the coefficent options
self.required_coeffs = ['kappa','alpha'];
self.pair_coeff.set_default_coeff('alpha', 0.0);
def process_coeff(self, coeff):
kappa = coeff['kappa'];
alpha = coeff['alpha'];
return _hoomd.make_scalar2(kappa, alpha)
def set_params(self, coeff):
""" :py:class:`ewald` has no energy shift modes """
raise RuntimeError('Not implemented for DPD Conservative');
return;
def _table_eval(r, rmin, rmax, V, F, width):
dr = (rmax - rmin) / float(width-1);
i = int(round((r - rmin)/dr))
return (V[i], F[i])
class table(force._force):
R""" Tabulated pair potential.
Args:
width (int): Number of points to use to interpolate V and F.
nlist (:py:mod:`hoomd.md.nlist`): Neighbor list (default of None automatically creates a global cell-list based neighbor list)
name (str): Name of the force instance
:py:class:`table` specifies that a tabulated pair potential should be applied between every
non-excluded particle pair in the simulation.
The force :math:`\vec{F}` is (in force units):
.. math::
:nowrap:
\begin{eqnarray*}
\vec{F}(\vec{r}) = & 0 & r < r_{\mathrm{min}} \\
= & F_{\mathrm{user}}(r)\hat{r} & r_{\mathrm{min}} \le r < r_{\mathrm{max}} \\
= & 0 & r \ge r_{\mathrm{max}} \\
\end{eqnarray*}
and the potential :math:`V(r)` is (in energy units)
.. math::
:nowrap:
\begin{eqnarray*}
V(r) = & 0 & r < r_{\mathrm{min}} \\
= & V_{\mathrm{user}}(r) & r_{\mathrm{min}} \le r < r_{\mathrm{max}} \\
= & 0 & r \ge r_{\mathrm{max}} \\
\end{eqnarray*}
where :math:`\vec{r}` is the vector pointing from one particle to the other in the pair.
:math:`F_{\mathrm{user}}(r)` and :math:`V_{\mathrm{user}}(r)` are evaluated on *width* grid points between
:math:`r_{\mathrm{min}}` and :math:`r_{\mathrm{max}}`. Values are interpolated linearly between grid points.
For correctness, you must specify the force defined by: :math:`F = -\frac{\partial V}{\partial r}`.
The following coefficients must be set per unique pair of particle types:
- :math:`V_{\mathrm{user}}(r)` and :math:`F_{\mathrm{user}}(r)` - evaluated by ``func`` (see example)
- coefficients passed to ``func`` - *coeff* (see example)
- :math:`_{\mathrm{min}}` - *rmin* (in distance units)
- :math:`_{\mathrm{max}}` - *rmax* (in distance units)
.. rubric:: Set table from a given function
When you have a functional form for V and F, you can enter that
directly into python. :py:class:`table` will evaluate the given function over *width* points between
*rmin* and *rmax* and use the resulting values in the table::
def lj(r, rmin, rmax, epsilon, sigma):
V = 4 * epsilon * ( (sigma / r)**12 - (sigma / r)**6);
F = 4 * epsilon / r * ( 12 * (sigma / r)**12 - 6 * (sigma / r)**6);
return (V, F)
table = pair.table(width=1000)
table.pair_coeff.set('A', 'A', func=lj, rmin=0.8, rmax=3.0, coeff=dict(epsilon=1.5, sigma=1.0))
table.pair_coeff.set('A', 'B', func=lj, rmin=0.8, rmax=3.0, coeff=dict(epsilon=2.0, sigma=1.2))
table.pair_coeff.set('B', 'B', func=lj, rmin=0.8, rmax=3.0, coeff=dict(epsilon=0.5, sigma=1.0))
.. rubric:: Set a table from a file
When you have no function for for *V* or *F*, or you otherwise have the data listed in a file,
:py:class:`table` can use the given values directly. You must first specify the number of rows
in your tables when initializing pair.table. Then use :py:meth:`set_from_file()` to read the file::
nl = nlist.cell()
table = pair.table(width=1000, nlist=nl)
table.set_from_file('A', 'A', filename='table_AA.dat')
table.set_from_file('A', 'B', filename='table_AB.dat')
table.set_from_file('B', 'B', filename='table_BB.dat')
Note:
For potentials that diverge near r=0, make sure to set *rmin* to a reasonable value. If a potential does
not diverge near r=0, then a setting of *rmin=0* is valid.
"""
def __init__(self, width, nlist, name=None):
hoomd.util.print_status_line();
# initialize the base class
force._force.__init__(self, name);
# setup the coefficent matrix
self.pair_coeff = coeff();
self.nlist = nlist
self.nlist.subscribe(lambda:self.get_rcut())
self.nlist.update_rcut()
# create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.TablePotential(hoomd.context.current.system_definition, self.nlist.cpp_nlist, int(width), self.name);
else:
self.nlist.cpp_nlist.setStorageMode(_md.NeighborList.storageMode.full);
self.cpp_force = _md.TablePotentialGPU(hoomd.context.current.system_definition, self.nlist.cpp_nlist, int(width), self.name);
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
# stash the width for later use
self.width = width;
def update_pair_table(self, typei, typej, func, rmin, rmax, coeff):
# allocate arrays to store V and F
Vtable = _hoomd.std_vector_scalar();
Ftable = _hoomd.std_vector_scalar();
# calculate dr
dr = (rmax - rmin) / float(self.width-1);
# evaluate each point of the function
for i in range(0, self.width):
r = rmin + dr * i;
(V,F) = func(r, rmin, rmax, **coeff);
# fill out the tables
Vtable.append(V);
Ftable.append(F);
# pass the tables on to the underlying cpp compute
self.cpp_force.setTable(typei, typej, Vtable, Ftable, rmin, rmax);
## \internal
# \brief Get the r_cut pair dictionary
# \returns rcut(i,j) dict if logging is on, and None otherwise
def get_rcut(self):
if not self.log:
return None
# go through the list of only the active particle types in the sim
ntypes = hoomd.context.current.system_definition.getParticleData().getNTypes();
type_list = [];
for i in range(0,ntypes):
type_list.append(hoomd.context.current.system_definition.getParticleData().getNameByType(i));
# update the rcut by pair type
r_cut_dict = nl.rcut();
for i in range(0,ntypes):
for j in range(i,ntypes):
# get the r_cut value
rmax = self.pair_coeff.get(type_list[i], type_list[j], 'rmax');
r_cut_dict.set_pair(type_list[i],type_list[j], rmax);
return r_cut_dict;
def get_max_rcut(self):
# loop only over current particle types
ntypes = hoomd.context.current.system_definition.getParticleData().getNTypes();
type_list = [];
for i in range(0,ntypes):
type_list.append(hoomd.context.current.system_definition.getParticleData().getNameByType(i));
# find the maximum rmax to update the neighbor list with
maxrmax = 0.0;
# loop through all of the unique type pairs and find the maximum rmax
for i in range(0,ntypes):
for j in range(i,ntypes):
rmax = self.pair_coeff.get(type_list[i], type_list[j], "rmax");
maxrmax = max(maxrmax, rmax);
return maxrmax;
def update_coeffs(self):
# check that the pair coefficents are valid
if not self.pair_coeff.verify(["func", "rmin", "rmax", "coeff"]):
hoomd.context.msg.error("Not all pair coefficients are set for pair.table\n");
raise RuntimeError("Error updating pair coefficients");
# set all the params
ntypes = hoomd.context.current.system_definition.getParticleData().getNTypes();
type_list = [];
for i in range(0,ntypes):
type_list.append(hoomd.context.current.system_definition.getParticleData().getNameByType(i));
# loop through all of the unique type pairs and evaluate the table
for i in range(0,ntypes):
for j in range(i,ntypes):
func = self.pair_coeff.get(type_list[i], type_list[j], "func");
rmin = self.pair_coeff.get(type_list[i], type_list[j], "rmin");
rmax = self.pair_coeff.get(type_list[i], type_list[j], "rmax");
coeff = self.pair_coeff.get(type_list[i], type_list[j], "coeff");
self.update_pair_table(i, j, func, rmin, rmax, coeff);
def set_from_file(self, a, b, filename):
R""" Set a pair interaction from a file.
Args:
a (str): Name of type A in pair
b (str): Name of type B in pair
filename (str): Name of the file to read
The provided file specifies V and F at equally spaced r values.
Example::
#r V F
1.0 2.0 -3.0
1.1 3.0 -4.0
1.2 2.0 -3.0
1.3 1.0 -2.0
1.4 0.0 -1.0
1.5 -1.0 0.0
The first r value sets *rmin*, the last sets *rmax*. Any line with # as the first non-whitespace character is
is treated as a comment. The *r* values must monotonically increase and be equally spaced. The table is read
directly into the grid points used to evaluate :math:`F_{\mathrm{user}}(r)` and :math:`_{\mathrm{user}}(r)`.
"""
hoomd.util.print_status_line();
# open the file
f = open(filename);
r_table = [];
V_table = [];
F_table = [];
# read in lines from the file
for line in f.readlines():
line = line.strip();
# skip comment lines
if line[0] == '#':
continue;
# split out the columns
cols = line.split();
values = [float(f) for f in cols];
# validate the input
if len(values) != 3:
hoomd.context.msg.error("pair.table: file must have exactly 3 columns\n");
raise RuntimeError("Error reading table file");
# append to the tables
r_table.append(values[0]);
V_table.append(values[1]);
F_table.append(values[2]);
# validate input
if self.width != len(r_table):
hoomd.context.msg.error("pair.table: file must have exactly " + str(self.width) + " rows\n");
raise RuntimeError("Error reading table file");
# extract rmin and rmax
rmin_table = r_table[0];
rmax_table = r_table[-1];
# check for even spacing
dr = (rmax_table - rmin_table) / float(self.width-1);
for i in range(0,self.width):
r = rmin_table + dr * i;
if math.fabs(r - r_table[i]) > 1e-3:
hoomd.context.msg.error("pair.table: r must be monotonically increasing and evenly spaced\n");
raise RuntimeError("Error reading table file");
hoomd.util.quiet_status();
self.pair_coeff.set(a, b, func=_table_eval, rmin=rmin_table, rmax=rmax_table, coeff=dict(V=V_table, F=F_table, width=self.width))
hoomd.util.unquiet_status();
class morse(pair):
R""" Morse pair potential.
:py:class:`morse` specifies that a Morse pair potential should be applied between every
non-excluded particle pair in the simulation.
.. math::
:nowrap:
\begin{eqnarray*}
V_{\mathrm{morse}}(r) = & D_0 \left[ \exp \left(-2\alpha\left(r-r_0\right)\right) -2\exp \left(-\alpha\left(r-r_0\right)\right) \right] & r < r_{\mathrm{cut}} \\
= & 0 & r \ge r_{\mathrm{cut}} \\
\end{eqnarray*}
See :py:class:`pair` for details on how forces are calculated and the available energy shifting and smoothing modes.
Use :py:meth:`pair_coeff.set <coeff.set>` to set potential coefficients.
The following coefficients must be set per unique pair of particle types:
- :math:`D_0` - *D0*, depth of the potential at its minimum (in energy units)
- :math:`\alpha` - *alpha*, controls the width of the potential well (in units of 1/distance)
- :math:`r_0` - *r0*, position of the minimum (in distance units)
- :math:`r_{\mathrm{cut}}` - *r_cut* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
- :math:`r_{\mathrm{on}}`- *r_on* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
Example::
nl = nlist.cell()
morse = pair.morse(r_cut=3.0, nlist=nl)
morse.pair_coeff.set('A', 'A', D0=1.0, alpha=3.0, r0=1.0)
morse.pair_coeff.set('A', 'B', D0=1.0, alpha=3.0, r0=1.0, r_cut=3.0, r_on=2.0);
morse.pair_coeff.set(['A', 'B'], ['C', 'D'], D0=1.0, alpha=3.0)
"""
def __init__(self, r_cut, nlist, name=None):
hoomd.util.print_status_line();
# tell the base class how we operate
# initialize the base class
pair.__init__(self, r_cut, nlist, name);
# create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.PotentialPairMorse(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairMorse;
else:
self.nlist.cpp_nlist.setStorageMode(_md.NeighborList.storageMode.full);
self.cpp_force = _md.PotentialPairMorseGPU(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairMorseGPU;
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
# setup the coefficent options
self.required_coeffs = ['D0', 'alpha', 'r0'];
def process_coeff(self, coeff):
D0 = coeff['D0'];
alpha = coeff['alpha'];
r0 = coeff['r0']
return _hoomd.make_scalar4(D0, alpha, r0, 0.0);
class dpd(pair):
R""" Dissipative Particle Dynamics.
Args:
r_cut (float): Default cutoff radius (in distance units).
nlist (:py:mod:`hoomd.md.nlist`): Neighbor list
kT (:py:mod:`hoomd.variant` or :py:obj:`float`): Temperature of thermostat (in energy units).
seed (int): seed for the PRNG in the DPD thermostat.
name (str): Name of the force instance.
:py:class:`dpd` specifies that a DPD pair force should be applied between every
non-excluded particle pair in the simulation, including an interaction potential,
pairwise drag force, and pairwise random force. See `Groot and Warren 1997 <http://dx.doi.org/10.1063/1.474784>`_.
.. math::
:nowrap:
\begin{eqnarray*}
F = F_{\mathrm{C}}(r) + F_{\mathrm{R,ij}}(r_{ij}) + F_{\mathrm{D,ij}}(v_{ij}) \\
\end{eqnarray*}
.. math::
:nowrap:
\begin{eqnarray*}
F_{\mathrm{C}}(r) = & A \cdot w(r_{ij}) \\
F_{\mathrm{R, ij}}(r_{ij}) = & - \theta_{ij}\sqrt{3} \sqrt{\frac{2k_b\gamma T}{\Delta t}}\cdot w(r_{ij}) \\
F_{\mathrm{D, ij}}(r_{ij}) = & - \gamma w^2(r_{ij})\left( \hat r_{ij} \circ v_{ij} \right) \\
\end{eqnarray*}
.. math::
:nowrap:
\begin{eqnarray*}
w(r_{ij}) = &\left( 1 - r/r_{\mathrm{cut}} \right) & r < r_{\mathrm{cut}} \\
= & 0 & r \ge r_{\mathrm{cut}} \\
\end{eqnarray*}
where :math:`\hat r_{ij}` is a normalized vector from particle i to particle j, :math:`v_{ij} = v_i - v_j`,
and :math:`\theta_{ij}` is a uniformly distributed random number in the range [-1, 1].
:py:class:`dpd` generates random numbers by hashing together the particle tags in the pair, the user seed,
and cthe urrent time step index.
.. attention::
Change the seed if you reset the simulation time step to 0. If you keep the same seed, the simulation
will continue with the same sequence of random numbers used previously and may cause unphysical correlations.
`C. L. Phillips et. al. 2011 <http://dx.doi.org/10.1016/j.jcp.2011.05.021>`_ describes the DPD implementation
details in HOOMD-blue. Cite it if you utilize the DPD functionality in your work.
:py:class:`dpd` does not implement and energy shift / smoothing modes due to the function of the force.
Use :py:meth:`pair_coeff.set <coeff.set>` to set potential coefficients.
The following coefficients must be set per unique pair of particle types:
- :math:`A` - *A* (in force units)
- :math:`\gamma` - *gamma* (in units of force/velocity)
- :math:`r_{\mathrm{cut}}` - *r_cut* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
To use the DPD thermostat, an :py:class:`hoomd.md.integrate.nve` integrator must be applied to the system and
the user must specify a temperature. Use of the dpd thermostat pair force with other integrators will result
in unphysical behavior. To use pair.dpd with a different conservative potential than :math:`F_C`,
set A to zero and define the conservative pair potential separately. Note that DPD thermostats
are often defined in terms of :math:`\sigma` where :math:`\sigma = \sqrt{2k_b\gamma T}`.
Example::
nl = nlist.cell()
dpd = pair.dpd(r_cut=1.0, nlist=nl, kT=1.0, seed=0)
dpd.pair_coeff.set('A', 'A', A=25.0, gamma = 4.5)
dpd.pair_coeff.set('A', 'B', A=40.0, gamma = 4.5)
dpd.pair_coeff.set('B', 'B', A=25.0, gamma = 4.5)
dpd.pair_coeff.set(['A', 'B'], ['C', 'D'], A=12.0, gamma = 1.2)
dpd.set_params(kT = 1.0)
integrate.mode_standard(dt=0.02)
integrate.nve(group=group.all())
"""
def __init__(self, r_cut, nlist, kT, seed, name=None):
hoomd.util.print_status_line();
# register the citation
c = hoomd.cite.article(cite_key='phillips2011',
author=['C L Phillips', 'J A Anderson', 'S C Glotzer'],
title='Pseudo-random number generation for Brownian Dynamics and Dissipative Particle Dynamics simulations on GPU devices',
journal='Journal of Computational Physics',
volume=230,
number=19,
pages='7191--7201',
month='Aug',
year='2011',
doi='10.1016/j.jcp.2011.05.021',
feature='DPD')
hoomd.cite._ensure_global_bib().add(c)
# tell the base class how we operate
# initialize the base class
pair.__init__(self, r_cut, nlist, name);
# create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.PotentialPairDPDThermoDPD(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairDPDThermoDPD;
else:
self.nlist.cpp_nlist.setStorageMode(_md.NeighborList.storageMode.full);
self.cpp_force = _md.PotentialPairDPDThermoDPDGPU(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairDPDThermoDPDGPU;
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
# setup the coefficent options
self.required_coeffs = ['A', 'gamma'];
# set the seed for dpd thermostat
self.cpp_force.setSeed(seed);
# set the temperature
# setup the variant inputs
kT = hoomd.variant._setup_variant_input(kT);
self.cpp_force.setT(kT.cpp_variant);
def set_params(self, kT=None):
R""" Changes parameters.
Args:
kT (:py:mod:`hoomd.variant` or :py:obj:`float`): Temperature of thermostat (in energy units).
Example::
dpd.set_params(kT=2.0)
"""
hoomd.util.print_status_line();
self.check_initialization();
# change the parameters
if kT is not None:
# setup the variant inputs
kT = hoomd.variant._setup_variant_input(kT);
self.cpp_force.setT(kT.cpp_variant);
def process_coeff(self, coeff):
a = coeff['A'];
gamma = coeff['gamma'];
return _hoomd.make_scalar2(a, gamma);
class dpd_conservative(pair):
R""" DPD Conservative pair force.
Args:
r_cut (float): Default cutoff radius (in distance units).
nlist (:py:mod:`hoomd.md.nlist`): Neighbor list
name (str): Name of the force instance.
:py:class:`dpd_conservative` specifies the conservative part of the DPD pair potential should be applied between
every non-excluded particle pair in the simulation. No thermostat (e.g. Drag Force and Random Force) is applied,
as is in :py:class:`dpd`.
.. math::
:nowrap:
\begin{eqnarray*}
V_{\mathrm{DPD-C}}(r) = & A \cdot \left( r_{\mathrm{cut}} - r \right)
- \frac{1}{2} \cdot \frac{A}{r_{\mathrm{cut}}} \cdot \left(r_{\mathrm{cut}}^2 - r^2 \right)
& r < r_{\mathrm{cut}} \\
= & 0 & r \ge r_{\mathrm{cut}} \\
\end{eqnarray*}
:py:class:`dpd_conservative` does not implement and energy shift / smoothing modes due to the function of the force.
Use :py:meth:`pair_coeff.set <coeff.set>` to set potential coefficients.
The following coefficients must be set per unique pair of particle types:
- :math:`A` - *A* (in force units)
- :math:`r_{\mathrm{cut}}` - *r_cut* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
Example::
nl = nlist.cell()
dpdc = pair.dpd_conservative(r_cut=3.0, nlist=nl)
dpdc.pair_coeff.set('A', 'A', A=1.0)
dpdc.pair_coeff.set('A', 'B', A=2.0, r_cut = 1.0)
dpdc.pair_coeff.set('B', 'B', A=1.0)
dpdc.pair_coeff.set(['A', 'B'], ['C', 'D'], A=5.0)
"""
def __init__(self, r_cut, nlist, name=None):
hoomd.util.print_status_line();
# register the citation
c = hoomd.cite.article(cite_key='phillips2011',
author=['C L Phillips', 'J A Anderson', 'S C Glotzer'],
title='Pseudo-random number generation for Brownian Dynamics and Dissipative Particle Dynamics simulations on GPU devices',
journal='Journal of Computational Physics',
volume=230,
number=19,
pages='7191--7201',
month='Aug',
year='2011',
doi='10.1016/j.jcp.2011.05.021',
feature='DPD')
hoomd.cite._ensure_global_bib().add(c)
# tell the base class how we operate
# initialize the base class
pair.__init__(self, r_cut, nlist, name);
# create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.PotentialPairDPD(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairDPD;
else:
self.nlist.cpp_nlist.setStorageMode(_md.NeighborList.storageMode.full);
self.cpp_force = _md.PotentialPairDPDGPU(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairDPDGPU;
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
# setup the coefficent options
self.required_coeffs = ['A'];
def process_coeff(self, coeff):
a = coeff['A'];
gamma = 0;
return _hoomd.make_scalar2(a, gamma);
def set_params(self, coeff):
""" :py:class:`dpd_conservative` has no energy shift modes """
raise RuntimeError('Not implemented for DPD Conservative');
return;
class dpdlj(pair):
R""" Dissipative Particle Dynamics with a LJ conservative force
Args:
r_cut (float): Default cutoff radius (in distance units).
nlist (:py:mod:`hoomd.md.nlist`): Neighbor list
kT (:py:mod:`hoomd.variant` or :py:obj:`float`): Temperature of thermostat (in energy units).
seed (int): seed for the PRNG in the DPD thermostat.
name (str): Name of the force instance.
:py:class:`dpdlj` specifies that a DPD thermostat and a Lennard-Jones pair potential should be applied between
every non-excluded particle pair in the simulation.
`C. L. Phillips et. al. 2011 <http://dx.doi.org/10.1016/j.jcp.2011.05.021>`_ describes the DPD implementation
details in HOOMD-blue. Cite it if you utilize the DPD functionality in your work.
.. math::
:nowrap:
\begin{eqnarray*}
F = F_{\mathrm{C}}(r) + F_{\mathrm{R,ij}}(r_{ij}) + F_{\mathrm{D,ij}}(v_{ij}) \\
\end{eqnarray*}
.. math::
:nowrap:
\begin{eqnarray*}
F_{\mathrm{C}}(r) = & \partial V_{\mathrm{LJ}} / \partial r \\
F_{\mathrm{R, ij}}(r_{ij}) = & - \theta_{ij}\sqrt{3} \sqrt{\frac{2k_b\gamma T}{\Delta t}}\cdot w(r_{ij}) \\
F_{\mathrm{D, ij}}(r_{ij}) = & - \gamma w^2(r_{ij})\left( \hat r_{ij} \circ v_{ij} \right) \\
\end{eqnarray*}
.. math::
:nowrap:
\begin{eqnarray*}
V_{\mathrm{LJ}}(r) = & 4 \varepsilon \left[ \left( \frac{\sigma}{r} \right)^{12} -
\alpha \left( \frac{\sigma}{r} \right)^{6} \right] & r < r_{\mathrm{cut}} \\
= & 0 & r \ge r_{\mathrm{cut}} \\
\end{eqnarray*}
.. math::
:nowrap:
\begin{eqnarray*}
w(r_{ij}) = &\left( 1 - r/r_{\mathrm{cut}} \right) & r < r_{\mathrm{cut}} \\
= & 0 & r \ge r_{\mathrm{cut}} \\
\end{eqnarray*}
where :math:`\hat r_{ij}` is a normalized vector from particle i to particle j, :math:`v_{ij} = v_i - v_j`,
and :math:`\theta_{ij}` is a uniformly distributed random number in the range [-1, 1].
Use :py:meth:`pair_coeff.set <coeff.set>` to set potential coefficients.
The following coefficients must be set per unique pair of particle types:
- :math:`\varepsilon` - *epsilon* (in energy units)
- :math:`\sigma` - *sigma* (in distance units)
- :math:`\alpha` - *alpha* (unitless)
- *optional*: defaults to 1.0
- :math:`\gamma` - *gamma* (in units of force/velocity)
- :math:`r_{\mathrm{cut}}` - *r_cut* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
To use the DPD thermostat, an :py:class:`hoomd.md.integrate.nve` integrator must be applied to the system and
the user must specify a temperature. Use of the dpd thermostat pair force with other integrators will result
in unphysical behavior.
Example::
nl = nlist.cell()
dpdlj = pair.dpdlj(r_cut=2.5, nlist=nl, kT=1.0, seed=0)
dpdlj.pair_coeff.set('A', 'A', epsilon=1.0, sigma = 1.0, gamma = 4.5)
dpdlj.pair_coeff.set('A', 'B', epsilon=0.0, sigma = 1.0 gamma = 4.5)
dpdlj.pair_coeff.set('B', 'B', epsilon=1.0, sigma = 1.0 gamma = 4.5, r_cut = 2.0**(1.0/6.0))
dpdlj.pair_coeff.set(['A', 'B'], ['C', 'D'], epsilon = 3.0,sigma=1.0, gamma = 1.2)
dpdlj.set_params(T = 1.0)
integrate.mode_standard(dt=0.005)
integrate.nve(group=group.all())
"""
def __init__(self, r_cut, nlist, kT, seed, name=None):
hoomd.util.print_status_line();
# register the citation
c = hoomd.cite.article(cite_key='phillips2011',
author=['C L Phillips', 'J A Anderson', 'S C Glotzer'],
title='Pseudo-random number generation for Brownian Dynamics and Dissipative Particle Dynamics simulations on GPU devices',
journal='Journal of Computational Physics',
volume=230,
number=19,
pages='7191--7201',
month='Aug',
year='2011',
doi='10.1016/j.jcp.2011.05.021',
feature='DPD')
hoomd.cite._ensure_global_bib().add(c)
# tell the base class how we operate
# initialize the base class
pair.__init__(self, r_cut, nlist, name);
# create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.PotentialPairDPDLJThermoDPD(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairDPDLJThermoDPD;
else:
self.nlist.cpp_nlist.setStorageMode(_md.NeighborList.storageMode.full);
self.cpp_force = _md.PotentialPairDPDLJThermoDPDGPU(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairDPDLJThermoDPDGPU;
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
# setup the coefficent options
self.required_coeffs = ['epsilon','sigma', 'alpha', 'gamma'];
self.pair_coeff.set_default_coeff('alpha', 1.0);
# set the seed for dpdlj thermostat
self.cpp_force.setSeed(seed);
# set the temperature
# setup the variant inputs
kT = hoomd.variant._setup_variant_input(kT);
self.cpp_force.setT(kT.cpp_variant);
def set_params(self, kT=None, mode=None):
R""" Changes parameters.
Args:
T (:py:mod:`hoomd.variant` or :py:obj:`float`): Temperature (if set) (in energy units)
mode (str): energy shift/smoothing mode (default noshift).
Examples::
dpdlj.set_params(kT=variant.linear_interp(points = [(0, 1.0), (1e5, 2.0)]))
dpdlj.set_params(kT=2.0, mode="shift")
"""
hoomd.util.print_status_line();
self.check_initialization();
# change the parameters
if kT is not None:
# setup the variant inputs
kT = hoomd.variant._setup_variant_input(kT);
self.cpp_force.setT(kT.cpp_variant);
if mode is not None:
if mode == "xplor":
hoomd.context.msg.error("XPLOR is smoothing is not supported with pair.dpdlj\n");
raise RuntimeError("Error changing parameters in pair force");
#use the inherited set_params
pair.set_params(self, mode=mode)
def process_coeff(self, coeff):
epsilon = coeff['epsilon'];
sigma = coeff['sigma'];
gamma = coeff['gamma'];
alpha = coeff['alpha'];
lj1 = 4.0 * epsilon * math.pow(sigma, 12.0);
lj2 = alpha * 4.0 * epsilon * math.pow(sigma, 6.0);
return _hoomd.make_scalar4(lj1, lj2, gamma, 0.0);
class force_shifted_lj(pair):
R""" Force-shifted Lennard-Jones pair potential.
Args:
r_cut (float): Default cutoff radius (in distance units).
nlist (:py:mod:`hoomd.md.nlist`): Neighbor list
name (str): Name of the force instance.
:py:class:`force_shifted_lj` specifies that a modified Lennard-Jones pair force should be applied between
non-excluded particle pair in the simulation. The force differs from the one calculated by :py:class:`lj`
by the subtraction of the value of the force at :math:`r_{\mathrm{cut}}`, such that the force smoothly goes
to zero at the cut-off. The potential is modified by a linear function. This potential can be used as a substitute
for :py:class:`lj`, when the exact analytical form of the latter is not required but a smaller cut-off radius is
desired for computational efficency. See `Toxvaerd et. al. 2011 <http://dx.doi.org/10.1063/1.3558787>`_
for a discussion of this potential.
.. math::
:nowrap:
\begin{eqnarray*}
V(r) = & 4 \varepsilon \left[ \left( \frac{\sigma}{r} \right)^{12} -
\alpha \left( \frac{\sigma}{r} \right)^{6} \right] + \Delta V(r) & r < r_{\mathrm{cut}}\\
= & 0 & r \ge r_{\mathrm{cut}} \\
\end{eqnarray*}
.. math::
\Delta V(r) = -(r - r_{\mathrm{cut}}) \frac{\partial V_{\mathrm{LJ}}}{\partial r}(r_{\mathrm{cut}})
See :py:class:`pair` for details on how forces are calculated and the available energy shifting and smoothing modes.
Use :py:meth:`pair_coeff.set <coeff.set>` to set potential coefficients.
The following coefficients must be set per unique pair of particle types:
- :math:`\varepsilon` - *epsilon* (in energy units)
- :math:`\sigma` - *sigma* (in distance units)
- :math:`\alpha` - *alpha* (unitless) - *optional*: defaults to 1.0
- :math:`r_{\mathrm{cut}}` - *r_cut* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
- :math:`r_{\mathrm{on}}`- *r_on* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
Example::
nl = nlist.cell()
fslj = pair.force_shifted_lj(r_cut=1.5, nlist=nl)
fslj.pair_coeff.set('A', 'A', epsilon=1.0, sigma=1.0)
"""
def __init__(self, r_cut, nlist, name=None):
hoomd.util.print_status_line();
# tell the base class how we operate
# initialize the base class
pair.__init__(self, r_cut, nlist, name);
# create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.PotentialPairForceShiftedLJ(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairForceShiftedLJ;
else:
self.nlist.cpp_nlist.setStorageMode(_md.NeighborList.storageMode.full);
self.cpp_force = _md.PotentialPairForceShiftedLJGPU(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairForceShiftedLJGPU;
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
# setup the coefficent options
self.required_coeffs = ['epsilon', 'sigma', 'alpha'];
self.pair_coeff.set_default_coeff('alpha', 1.0);
def process_coeff(self, coeff):
epsilon = coeff['epsilon'];
sigma = coeff['sigma'];
alpha = coeff['alpha'];
lj1 = 4.0 * epsilon * math.pow(sigma, 12.0);
lj2 = alpha * 4.0 * epsilon * math.pow(sigma, 6.0);
return _hoomd.make_scalar2(lj1, lj2);
class moliere(pair):
R""" Moliere pair potential.
Args:
r_cut (float): Default cutoff radius (in distance units).
nlist (:py:mod:`hoomd.md.nlist`): Neighbor list
name (str): Name of the force instance.
:py:class:`moliere` specifies that a Moliere type pair potential should be applied between every
non-excluded particle pair in the simulation.
.. math::
:nowrap:
\begin{eqnarray*}
V_{\mathrm{Moliere}}(r) = & \frac{Z_i Z_j e^2}{4 \pi \epsilon_0 r_{ij}} \left[ 0.35 \exp \left( -0.3 \frac{r_{ij}}{a_F} \right) + 0.55 \exp \left( -1.2 \frac{r_{ij}}{a_F} \right) + 0.10 \exp \left( -6.0 \frac{r_{ij}}{a_F} \right) \right] & r < r_{\mathrm{cut}} \\
= & 0 & r > r_{\mathrm{cut}} \\
\end{eqnarray*}
See :py:class:`pair` for details on how forces are calculated and the available energy shifting and smoothing modes.
Use :py:meth:`pair_coeff.set <coeff.set>` to set potential coefficients.
The following coefficients must be set per unique pair of particle types:
- :math:`Z_i` - *Z_i* - Atomic number of species i (unitless)
- :math:`Z_j` - *Z_j* - Atomic number of species j (unitless)
- :math:`e` - *elementary_charge* - The elementary charge (in charge units)
- :math:`a_0` - *a_0* - The Bohr radius (in distance units)
- :math:`r_{\mathrm{cut}}` - *r_cut* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
- :math:`r_{\mathrm{on}}`- *r_on* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
Example::
nl = nlist.cell()
moliere = pair.moliere(r_cut = 3.0, nlist=nl)
moliere.pair_coeff.set('A', 'B', Z_i = 54.0, Z_j = 7.0, elementary_charge = 1.0, a_0 = 1.0);
"""
def __init__(self, r_cut, nlist, name=None):
hoomd.util.print_status_line();
# tell the base class how we operate
# initialize the base class
pair.__init__(self, r_cut, nlist, name);
# create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.PotentialPairMoliere(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairMoliere;
else:
self.nlist.cpp_nlist.setStorageMode(_md.NeighborList.storageMode.full);
self.cpp_force = _md.PotentialPairMoliereGPU(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairMoliereGPU;
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
# setup the coefficient options
self.required_coeffs = ['Z_i', 'Z_j', 'elementary_charge', 'a_0'];
self.pair_coeff.set_default_coeff('elementary_charge', 1.0);
self.pair_coeff.set_default_coeff('a_0', 1.0);
def process_coeff(self, coeff):
Z_i = coeff['Z_i'];
Z_j = coeff['Z_j'];
elementary_charge = coeff['elementary_charge'];
a_0 = coeff['a_0'];
Zsq = Z_i * Z_j * elementary_charge * elementary_charge;
if (not (Z_i == 0)) or (not (Z_j == 0)):
aF = 0.8853 * a_0 / math.pow(math.sqrt(Z_i) + math.sqrt(Z_j), 2.0 / 3.0);
else:
aF = 1.0;
return _hoomd.make_scalar2(Zsq, aF);
class zbl(pair):
R""" ZBL pair potential.
Args:
r_cut (float): Default cutoff radius (in distance units).
nlist (:py:mod:`hoomd.md.nlist`): Neighbor list
name (str): Name of the force instance.
:py:class:`zbl` specifies that a Ziegler-Biersack-Littmark pair potential should be applied between every
non-excluded particle pair in the simulation.
.. math::
:nowrap:
\begin{eqnarray*}
V_{\mathrm{ZBL}}(r) = & \frac{Z_i Z_j e^2}{4 \pi \epsilon_0 r_{ij}} \left[ 0.1818 \exp \left( -3.2 \frac{r_{ij}}{a_F} \right) + 0.5099 \exp \left( -0.9423 \frac{r_{ij}}{a_F} \right) + 0.2802 \exp \left( -0.4029 \frac{r_{ij}}{a_F} \right) + 0.02817 \exp \left( -0.2016 \frac{r_{ij}}{a_F} \right) \right], & r < r_{\mathrm{cut}} \\
= & 0, & r > r_{\mathrm{cut}} \\
\end{eqnarray*}
See :py:class:`pair` for details on how forces are calculated and the available energy shifting and smoothing modes.
Use :py:meth:`pair_coeff.set <coeff.set>` to set potential coefficients.
The following coefficients must be set per unique pair of particle types:
- :math:`Z_i` - *Z_i* - Atomic number of species i (unitless)
- :math:`Z_j` - *Z_j* - Atomic number of species j (unitless)
- :math:`e` - *elementary_charge* - The elementary charge (in charge units)
- :math:`a_0` - *a_0* - The Bohr radius (in distance units)
- :math:`r_{\mathrm{cut}}` - *r_cut* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
- :math:`r_{\mathrm{on}}`- *r_on* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
Example::
nl = nlist.cell()
zbl = pair.zbl(r_cut = 3.0, nlist=nl)
zbl.pair_coeff.set('A', 'B', Z_i = 54.0, Z_j = 7.0, elementary_charge = 1.0, a_0 = 1.0);
"""
def __init__(self, r_cut, nlist, name=None):
hoomd.util.print_status_line();
# tell the base class how we operate
# initialize the base class
pair.__init__(self, r_cut, nlist, name);
# create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.PotentialPairZBL(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairZBL;
else:
self.nlist.cpp_nlist.setStorageMode(_md.NeighborList.storageMode.full);
self.cpp_force = _md.PotentialPairZBLGPU(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairZBLGPU;
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
# setup the coefficient options
self.required_coeffs = ['Z_i', 'Z_j', 'elementary_charge', 'a_0'];
self.pair_coeff.set_default_coeff('elementary_charge', 1.0);
self.pair_coeff.set_default_coeff('a_0', 1.0);
def process_coeff(self, coeff):
Z_i = coeff['Z_i'];
Z_j = coeff['Z_j'];
elementary_charge = coeff['elementary_charge'];
a_0 = coeff['a_0'];
Zsq = Z_i * Z_j * elementary_charge * elementary_charge;
if (not (Z_i == 0)) or (not (Z_j == 0)):
aF = 0.88534 * a_0 / ( math.pow( Z_i, 0.23 ) + math.pow( Z_j, 0.23 ) );
else:
aF = 1.0;
return _hoomd.make_scalar2(Zsq, aF);
def set_params(self, coeff):
""" :py:class:`zbl` has no energy shift modes """
raise RuntimeError('Not implemented for DPD Conservative');
return;
class tersoff(pair):
R""" Tersoff Potential.
Args:
r_cut (float): Default cutoff radius (in distance units).
nlist (:py:mod:`hoomd.md.nlist`): Neighbor list
name (str): Name of the force instance.
:py:class:`tersoff` specifies that the Tersoff three-body potential should be applied to every
non-bonded particle pair in the simulation. Despite the fact that the Tersoff potential accounts
for the effects of third bodies, it is included in the pair potentials because the species of the
third body is irrelevant. It can thus use type-pair parameters similar to those of the pair potentials.
The Tersoff potential is a bond-order potential based on the Morse potential that accounts for the weakening of
individual bonds with increasing coordination number. It does this by computing a modifier to the
attractive term of the potential. The modifier contains the effects of third-bodies on the bond
energies. The potential also includes a smoothing function around the cutoff. The smoothing function
used in this work is exponential in nature as opposed to the sinusoid used by Tersoff. The exponential
function provides continuity up (I believe) the second derivative.
"""
def __init__(self, r_cut, nlist, name=None):
hoomd.util.print_status_line();
# tell the base class how we operate
# initialize the base class
pair.__init__(self, r_cut, nlist, name);
# this potential cannot handle a half neighbor list
self.nlist.cpp_nlist.setStorageMode(_md.NeighborList.storageMode.full);
# create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.PotentialTersoff(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialTersoff;
else:
self.cpp_force = _md.PotentialTersoffGPU(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialTersoffGPU;
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
# setup the coefficients
self.required_coeffs = ['cutoff_thickness', 'C1', 'C2', 'lambda1', 'lambda2', 'dimer_r', 'n', 'gamma', 'lambda3', 'c', 'd', 'm', 'alpha']
self.pair_coeff.set_default_coeff('cutoff_thickness', 0.2);
self.pair_coeff.set_default_coeff('dimer_r', 1.5);
self.pair_coeff.set_default_coeff('C1', 1.0);
self.pair_coeff.set_default_coeff('C2', 1.0);
self.pair_coeff.set_default_coeff('lambda1', 2.0);
self.pair_coeff.set_default_coeff('lambda2', 1.0);
self.pair_coeff.set_default_coeff('lambda3', 0.0);
self.pair_coeff.set_default_coeff('n', 0.0);
self.pair_coeff.set_default_coeff('m', 0.0);
self.pair_coeff.set_default_coeff('c', 0.0);
self.pair_coeff.set_default_coeff('d', 1.0);
self.pair_coeff.set_default_coeff('gamma', 0.0);
self.pair_coeff.set_default_coeff('alpha', 3.0);
def process_coeff(self, coeff):
cutoff_d = coeff['cutoff_thickness'];
C1 = coeff['C1'];
C2 = coeff['C2'];
lambda1 = coeff['lambda1'];
lambda2 = coeff['lambda2'];
dimer_r = coeff['dimer_r'];
n = coeff['n'];
gamma = coeff['gamma'];
lambda3 = coeff['lambda3'];
c = coeff['c'];
d = coeff['d'];
m = coeff['m'];
alpha = coeff['alpha'];
gamman = math.pow(gamma, n);
c2 = c * c;
d2 = d * d;
lambda3_cube = lambda3 * lambda3 * lambda3;
tersoff_coeffs = _hoomd.make_scalar2(C1, C2);
exp_consts = _hoomd.make_scalar2(lambda1, lambda2);
ang_consts = _hoomd.make_scalar3(c2, d2, m);
return _md.make_tersoff_params(cutoff_d, tersoff_coeffs, exp_consts, dimer_r, n, gamman, lambda3_cube, ang_consts, alpha);
class mie(pair):
R""" Mie pair potential.
Args:
r_cut (float): Default cutoff radius (in distance units).
nlist (:py:mod:`hoomd.md.nlist`): Neighbor list
name (str): Name of the force instance.
:py:class:`mie` specifies that a Mie pair potential should be applied between every
non-excluded particle pair in the simulation.
.. math::
:nowrap:
\begin{eqnarray*}
V_{\mathrm{mie}}(r) = & \left( \frac{n}{n-m} \right) {\left( \frac{n}{m} \right)}^{\frac{m}{n-m}} \varepsilon \left[ \left( \frac{\sigma}{r} \right)^{n} -
\left( \frac{\sigma}{r} \right)^{m} \right] & r < r_{\mathrm{cut}} \\
= & 0 & r \ge r_{\mathrm{cut}} \\
\end{eqnarray*}
See :py:class:`pair` for details on how forces are calculated and the available energy shifting and smoothing modes.
Use :py:meth:`pair_coeff.set <coeff.set>` to set potential coefficients.
The following coefficients must be set per unique pair of particle types:
- :math:`\varepsilon` - *epsilon* (in energy units)
- :math:`\sigma` - *sigma* (in distance units)
- :math:`n` - *n* (unitless)
- :math:`m` - *m* (unitless)
- :math:`r_{\mathrm{cut}}` - *r_cut* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
- :math:`r_{\mathrm{on}}`- *r_on* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
Example::
nl = nlist.cell()
mie = pair.mie(r_cut=3.0, nlist=nl)
mie.pair_coeff.set('A', 'A', epsilon=1.0, sigma=1.0, n=12, m=6)
mie.pair_coeff.set('A', 'B', epsilon=2.0, sigma=1.0, n=14, m=7, r_cut=3.0, r_on=2.0);
mie.pair_coeff.set('B', 'B', epsilon=1.0, sigma=1.0, n=15.1, m=6.5, r_cut=2**(1.0/6.0), r_on=2.0);
mie.pair_coeff.set(['A', 'B'], ['C', 'D'], epsilon=1.5, sigma=2.0)
"""
def __init__(self, r_cut, nlist, name=None):
hoomd.util.print_status_line();
# tell the base class how we operate
# initialize the base class
pair.__init__(self, r_cut, nlist, name);
# create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.PotentialPairMie(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairMie;
else:
self.nlist.cpp_nlist.setStorageMode(_md.NeighborList.storageMode.full);
self.cpp_force = _md.PotentialPairMieGPU(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairMieGPU;
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
# setup the coefficent options
self.required_coeffs = ['epsilon', 'sigma', 'n', 'm'];
def process_coeff(self, coeff):
epsilon = float(coeff['epsilon']);
sigma = float(coeff['sigma']);
n = float(coeff['n']);
m = float(coeff['m']);
mie1 = epsilon * math.pow(sigma, n) * (n/(n-m)) * math.pow(n/m,m/(n-m));
mie2 = epsilon * math.pow(sigma, m) * (n/(n-m)) * math.pow(n/m,m/(n-m));
mie3 = n
mie4 = m
return _hoomd.make_scalar4(mie1, mie2, mie3, mie4);
class ai_pair(pair):
R""" Generic anisotropic pair potential.
Users should not instantiate :py:class:`ai_pair` directly. It is a base class that
provides common features to all anisotropic pair forces. Rather than repeating all of that documentation in a
dozen different places, it is collected here.
All anisotropic pair potential commands specify that a given potential energy, force and torque be computed
on all non-excluded particle pairs in the system within a short range cutoff distance :math:`r_{\mathrm{cut}}`.
The interaction energy, forces and torque depend on the inter-particle separation
:math:`\vec r` and on the orientations :math:`\vec q_i`, :math:`q_j`, of the particles.
"""
## \internal
# \brief Initialize the pair force
# \details
# The derived class must set
# - self.cpp_class (the pair class to instantiate)
# - self.required_coeffs (a list of the coeff names the derived class needs)
# - self.process_coeffs() (a method that takes in the coeffs and spits out a param struct to use in
# self.cpp_force.set_params())
def __init__(self, r_cut, nlist, name=None):
# initialize the base class
force._force.__init__(self, name);
self.global_r_cut = r_cut;
# setup the coefficent matrix
self.pair_coeff = coeff();
self.pair_coeff.set_default_coeff('r_cut', self.global_r_cut);
# setup the neighbor list
self.nlist = nlist
self.nlist.subscribe(lambda:self.get_rcut())
self.nlist.update_rcut()
def set_params(self, mode=None):
R""" Set parameters controlling the way forces are computed.
Args:
mode (str): (if set) Set the mode with which potentials are handled at the cutoff
valid values for mode are: "none" (the default) and "shift":
- *none* - No shifting is performed and potentials are abruptly cut off
- *shift* - A constant shift is applied to the entire potential so that it is 0 at the cutoff
Examples::
mypair.set_params(mode="shift")
mypair.set_params(mode="no_shift")
"""
hoomd.util.print_status_line();
if mode is not None:
if mode == "no_shift":
self.cpp_force.setShiftMode(self.cpp_class.energyShiftMode.no_shift)
elif mode == "shift":
self.cpp_force.setShiftMode(self.cpp_class.energyShiftMode.shift)
else:
hoomd.context.msg.error("Invalid mode\n");
raise RuntimeError("Error changing parameters in pair force");
def update_coeffs(self):
coeff_list = self.required_coeffs + ["r_cut"];
# check that the pair coefficents are valid
if not self.pair_coeff.verify(coeff_list):
hoomd.context.msg.error("Not all pair coefficients are set\n");
raise RuntimeError("Error updating pair coefficients");
# set all the params
ntypes = hoomd.context.current.system_definition.getParticleData().getNTypes();
type_list = [];
for i in range(0,ntypes):
type_list.append(hoomd.context.current.system_definition.getParticleData().getNameByType(i));
for i in range(0,ntypes):
for j in range(i,ntypes):
# build a dict of the coeffs to pass to process_coeff
coeff_dict = {};
for name in coeff_list:
coeff_dict[name] = self.pair_coeff.get(type_list[i], type_list[j], name);
param = self.process_coeff(coeff_dict);
self.cpp_force.setParams(i, j, param);
self.cpp_force.setRcut(i, j, coeff_dict['r_cut']);
class gb(ai_pair):
R""" Gay-Berne anisotropic pair potential.
Args:
r_cut (float): Default cutoff radius (in distance units).
nlist (:py:mod:`hoomd.md.nlist`): Neighbor list
name (str): Name of the force instance.
:py:class:`gb` computes the Gay-Berne potential between anisotropic particles.
This version of the Gay-Berne potential supports identical pairs of uniaxial ellipsoids,
with orientation-independent energy-well depth.
The interaction energy for this anisotropic pair potential is
(`Allen et. al. 2006 <http://dx.doi.org/10.1080/00268970601075238>`_):
.. math::
:nowrap:
\begin{eqnarray*}
V_{\mathrm{GB}}(\vec r, \vec e_i, \vec e_j) = & 4 \varepsilon \left[ \zeta^{-12} -
\zeta^{-6} \right] & \zeta < \zeta_{\mathrm{cut}} \\
= & 0 & \zeta \ge \zeta_{\mathrm{cut}} \\
\end{eqnarray*}
.. math::
\zeta = \left(\frac{r-\sigma+\sigma_{\mathrm{min}}}{\sigma_{\mathrm{min}}}\right)
\sigma^{-2} = \frac{1}{2} \hat{\vec{r}}\cdot\vec{H^{-1}}\cdot\hat{\vec{r}}
\vec{H} = 2 \ell_\perp^2 \vec{1} + (\ell_\parallel^2 - \ell_\perp^2) (\vec{e_i} \otimes \vec{e_i} + \vec{e_j} \otimes \vec{e_j})
with :math:`\sigma_{\mathrm{min}} = 2 \min(\ell_\perp, \ell_\parallel)`.
The cut-off parameter :math:`r_{\mathrm{cut}}` is defined for two particles oriented parallel along
the **long** axis, i.e.
:math:`\zeta_{\mathrm{cut}} = \left(\frac{r-\sigma_{\mathrm{max}} + \sigma_{\mathrm{min}}}{\sigma_{\mathrm{min}}}\right)`
where :math:`\sigma_{\mathrm{max}} = 2 \max(\ell_\perp, \ell_\parallel)` .
The quantities :math:`\ell_\parallel` and :math:`\ell_\perp` denote the semi-axis lengths parallel
and perpendicular to particle orientation.
Use :py:meth:`pair_coeff.set <coeff.set>` to set potential coefficients.
The following coefficients must be set per unique pair of particle types:
- :math:`\varepsilon` - *epsilon* (in energy units)
- :math:`\ell_\perp` - *lperp* (in distance units)
- :math:`\ell_\parallel` - *lpar* (in distance units)
- :math:`r_{\mathrm{cut}}` - *r_cut* (in distance units)
- *optional*: defaults to the global r_cut specified in the pair command
Example::
nl = nlist.cell()
gb = pair.gb(r_cut=2.5, nlist=nl)
gb.pair_coeff.set('A', 'A', epsilon=1.0, lperp=0.45, lpar=0.5)
gb.pair_coeff.set('A', 'B', epsilon=2.0, lperp=0.45, lpar=0.5, r_cut=2**(1.0/6.0));
"""
def __init__(self, r_cut, nlist, name=None):
hoomd.util.print_status_line();
# tell the base class how we operate
# initialize the base class
ai_pair.__init__(self, r_cut, nlist, name);
# create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.AnisoPotentialPairGB(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.AnisoPotentialPairGB;
else:
self.nlist.cpp_nlist.setStorageMode(_md.NeighborList.storageMode.full);
self.cpp_force = _md.AnisoPotentialPairGBGPU(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.AnisoPotentialPairGBGPU;
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
# setup the coefficent options
self.required_coeffs = ['epsilon', 'lperp', 'lpar'];
def process_coeff(self, coeff):
epsilon = coeff['epsilon'];
lperp = coeff['lperp'];
lpar = coeff['lpar'];
return _hoomd.make_scalar3(epsilon, lperp, lpar);
class dipole(ai_pair):
R""" Screened dipole-dipole interactions.
Args:
r_cut (float): Default cutoff radius (in distance units).
nlist (:py:mod:`hoomd.md.nlist`): Neighbor list
name (str): Name of the force instance.
:py:class:`dipole` computes the (screened) interaction between pairs of
particles with dipoles and electrostatic charges. The total energy
computed is:
.. math::
U_{dipole} = U_{dd} + U_{de} + U_{ee}
U_{dd} = A e^{-\kappa r} \left(\frac{\vec{\mu_i}\cdot\vec{\mu_j}}{r^3} - 3\frac{(\vec{\mu_i}\cdot \vec{r_{ji}})(\vec{\mu_j}\cdot \vec{r_{ji}})}{r^5}\right)
U_{de} = A e^{-\kappa r} \left(\frac{(\vec{\mu_j}\cdot \vec{r_{ji}})q_i}{r^3} - \frac{(\vec{\mu_i}\cdot \vec{r_{ji}})q_j}{r^3}\right)
U_{ee} = A e^{-\kappa r} \frac{q_i q_j}{r}
Use :py:meth:`pair_coeff.set <coeff.set>` to set potential coefficients.
:py:class:`dipole` does not implement and energy shift / smoothing modes due to the function of the force.
The following coefficients must be set per unique pair of particle types:
- mu - magnitude of :math:`\vec{\mu} = \mu (1, 0, 0)` in the particle local reference frame
- A - electrostatic energy scale :math:`A` (default value 1.0)
- kappa - inverse screening length :math:`\kappa`
Example::
# A/A interact only with screened electrostatics
dipole.pair_coeff.set('A', 'A', mu=0.0, A=1.0, kappa=1.0)
dipole.pair_coeff.set('A', 'B', mu=0.5, kappa=1.0)
"""
def __init__(self, r_cut, nlist, name=None):
hoomd.util.print_status_line();
## tell the base class how we operate
# initialize the base class
ai_pair.__init__(self, r_cut, nlist, name);
## create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.AnisoPotentialPairDipole(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.AnisoPotentialPairDipole;
else:
self.nlist.cpp_nlist.setStorageMode(_md.NeighborList.storageMode.full);
self.cpp_force = _md.AnisoPotentialPairDipoleGPU(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.AnisoPotentialPairDipoleGPU;
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
## setup the coefficent options
self.required_coeffs = ['mu', 'A', 'kappa'];
self.pair_coeff.set_default_coeff('A', 1.0)
def process_coeff(self, coeff):
mu = float(coeff['mu']);
A = float(coeff['A']);
kappa = float(coeff['kappa']);
params = _hoomd.make_scalar3(mu, A, kappa)
return params
def set_params(self, coeff):
""" :py:class:`dipole` has no energy shift modes """
raise RuntimeError('Not implemented for dipole');
return;
class reaction_field(pair):
R""" Onsager reaction field pair potential.
Args:
r_cut (float): Default cutoff radius (in distance units).
nlist (:py:mod:`hoomd.md.nlist`): Neighbor list
name (str): Name of the force instance.
:py:class:`reaction_field` specifies that an Onsager reaction field pair potential should be applied between every
non-excluded particle pair in the simulation.
Reaction field electrostatics is an approximation to the screened electrostatic interaction,
which assumes that the medium can be treated as an electrostatic continuum of dielectric
constant :math:`\epsilon_{RF}` outside the cutoff sphere of radius :math:`r_{\mathrm{cut}}`.
See: `Barker et. al. 1973 <http://dx.doi.org/10.1080/00268977300102101>`_.
.. math::
V_{\mathrm{RF}}(r) = \varepsilon \left[ \frac{1}{r} +
\frac{(\epsilon_{RF}-1) r^2}{(2 \epsilon_{RF} + 1) r_c^3} \right]
By default, the reaction field potential does not require charge or diameter to be set. Two parameters,
:math:`\varepsilon` and :math:`\epsilon_{RF}` are needed. If :math:`epsilon_{RF}` is specified as zero,
it will represent infinity.
If *use_charge* is set to True, the following formula is evaluated instead:
.. math::
V_{\mathrm{RF}}(r) = q_i q_j \varepsilon \left[ \frac{1}{r} +
\frac{(\epsilon_{RF}-1) r^2}{(2 \epsilon_{RF} + 1) r_c^3} \right]
where :math:`q_i` and :math:`q_j` are the charges of the particle pair.
See :py:class:`pair` for details on how forces are calculated and the available energy shifting and smoothing modes.
Use :py:meth:`pair_coeff.set <coeff.set>` to set potential coefficients.
The following coefficients must be set per unique pair of particle types:
- :math:`\varepsilon` - *epsilon* (in units of energy*distance)
- :math:`\epsilon_{RF}` - *eps_rf* (dimensionless)
- :math:`r_{\mathrm{cut}}` - *r_cut* (in units of distance)
- *optional*: defaults to the global r_cut specified in the pair command
- :math:`r_{\mathrm{on}}` - *r_on* (in units of distance)
- *optional*: defaults to the global r_cut specified in the pair command
- *use_charge* (boolean), evaluate potential using particle charges
- *optional*: defaults to False
.. versionadded:: 2.1
Example::
nl = nlist.cell()
reaction_field = pair.reaction_field(r_cut=3.0, nlist=nl)
reaction_field.pair_coeff.set('A', 'A', epsilon=1.0, eps_rf=1.0)
reaction_field.pair_coeff.set('A', 'B', epsilon=-1.0, eps_rf=0.0)
reaction_field.pair_coeff.set('B', 'B', epsilon=1.0, eps_rf=0.0)
reaction_field.pair_coeff.set(system.particles.types, system.particles.types, epsilon=1.0, eps_rf=0.0, use_charge=True)
"""
def __init__(self, r_cut, nlist, name=None):
hoomd.util.print_status_line();
# tell the base class how we operate
# initialize the base class
pair.__init__(self, r_cut, nlist, name);
# create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.PotentialPairReactionField(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairReactionField;
else:
self.nlist.cpp_nlist.setStorageMode(_md.NeighborList.storageMode.full);
self.cpp_force = _md.PotentialPairReactionFieldGPU(hoomd.context.current.system_definition, self.nlist.cpp_nlist, self.name);
self.cpp_class = _md.PotentialPairReactionFieldGPU;
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
# setup the coefficent options
self.required_coeffs = ['epsilon', 'eps_rf', 'use_charge'];
self.pair_coeff.set_default_coeff('use_charge', False)
def process_coeff(self, coeff):
epsilon = coeff['epsilon'];
eps_rf = coeff['eps_rf'];
use_charge = coeff['use_charge']
return _hoomd.make_scalar3(epsilon, eps_rf, _hoomd.int_as_scalar(int(use_charge)));
|
"""cffLib.py -- read/write tools for Adobe CFF fonts."""
__version__ = "1.0b1"
__author__ = "jvr"
import struct, sstruct
import string
import types
import psCharStrings
cffHeaderFormat = """
major: B
minor: B
hdrSize: B
offSize: B
"""
class CFFFontSet:
def __init__(self):
self.fonts = {}
def decompile(self, data):
sstruct.unpack(cffHeaderFormat, data[:4], self)
assert self.major == 1 and self.minor == 0, \
"unknown CFF format: %d.%d" % (self.major, self.minor)
restdata = data[self.hdrSize:]
self.fontNames, restdata = readINDEX(restdata)
topDicts, restdata = readINDEX(restdata)
strings, restdata = readINDEX(restdata)
strings = IndexedStrings(strings)
globalSubrs, restdata = readINDEX(restdata)
self.GlobalSubrs = map(psCharStrings.T2CharString, globalSubrs)
for i in range(len(topDicts)):
font = self.fonts[self.fontNames[i]] = CFFFont()
font.GlobalSubrs = self.GlobalSubrs # Hmm.
font.decompile(data, topDicts[i], strings, self) # maybe only 'on demand'?
def compile(self):
strings = IndexedStrings()
XXXX
def toXML(self, xmlWriter, progress=None):
xmlWriter.newline()
for fontName in self.fontNames:
xmlWriter.begintag("CFFFont", name=fontName)
xmlWriter.newline()
font = self.fonts[fontName]
font.toXML(xmlWriter, progress)
xmlWriter.endtag("CFFFont")
xmlWriter.newline()
xmlWriter.newline()
xmlWriter.begintag("GlobalSubrs")
xmlWriter.newline()
for i in range(len(self.GlobalSubrs)):
xmlWriter.newline()
xmlWriter.begintag("CharString", id=i)
xmlWriter.newline()
self.GlobalSubrs[i].toXML(xmlWriter)
xmlWriter.endtag("CharString")
xmlWriter.newline()
xmlWriter.newline()
xmlWriter.endtag("GlobalSubrs")
xmlWriter.newline()
xmlWriter.newline()
def fromXML(self, (name, attrs, content)):
xxx
class IndexedStrings:
def __init__(self, strings=None):
if strings is None:
strings = []
self.strings = strings
def __getitem__(self, SID):
if SID < cffStandardStringCount:
return cffStandardStrings[SID]
else:
return self.strings[SID - cffStandardStringCount]
def getSID(self, s):
if not hasattr(self, "stringMapping"):
self.buildStringMapping()
if cffStandardStringMapping.has_key(s):
SID = cffStandardStringMapping[s]
if self.stringMapping.has_key(s):
SID = self.stringMapping[s]
else:
SID = len(self.strings) + cffStandardStringCount
self.strings.append(s)
self.stringMapping[s] = SID
return SID
def getStrings(self):
return self.strings
def buildStringMapping(self):
self.stringMapping = {}
for index in range(len(self.strings)):
self.stringMapping[self.strings[index]] = index + cffStandardStringCount
class CFFFont:
defaults = psCharStrings.topDictDefaults
def __init__(self):
pass
def __getattr__(self, attr):
if not self.defaults.has_key(attr):
raise AttributeError, attr
return self.defaults[attr]
def fromDict(self, dict):
self.__dict__.update(dict)
def decompile(self, data, topDictData, strings, fontSet):
top = psCharStrings.TopDictDecompiler(strings)
top.decompile(topDictData)
self.fromDict(top.getDict())
# get private dict
size, offset = self.Private
#print "YYY Private (size, offset):", size, offset
privateData = data[offset:offset+size]
self.Private = PrivateDict()
self.Private.decompile(data[offset:], privateData, strings)
# get raw charstrings
#print "YYYY CharStrings offset:", self.CharStrings
rawCharStrings, restdata = readINDEX(data[self.CharStrings:])
nGlyphs = len(rawCharStrings)
# get charset (or rather: get glyphNames)
charsetOffset = self.charset
if charsetOffset == 0:
xxx # standard charset
else:
#print "YYYYY charsetOffset:", charsetOffset
format = ord(data[charsetOffset])
if format == 0:
xxx
elif format == 1:
charSet = parseCharsetFormat1(nGlyphs,
data[charsetOffset+1:], strings)
elif format == 2:
charSet = parseCharsetFormat2(nGlyphs,
data[charsetOffset+1:], strings)
elif format == 3:
xxx
else:
xxx
self.charset = charSet
assert len(charSet) == nGlyphs
self.CharStrings = charStrings = {}
if self.CharstringType == 2:
# Type 2 CharStrings
charStringClass = psCharStrings.T2CharString
else:
# Type 1 CharStrings
charStringClass = psCharStrings.T1CharString
for i in range(nGlyphs):
charStrings[charSet[i]] = charStringClass(rawCharStrings[i])
assert len(charStrings) == nGlyphs
# XXX Encoding!
encoding = self.Encoding
if encoding not in (0, 1):
# encoding is an _offset_ from the beginning of 'data' to an encoding subtable
XXX
self.Encoding = encoding
def getGlyphOrder(self):
return self.charset
def setGlyphOrder(self, glyphOrder):
self.charset = glyphOrder
def decompileAllCharStrings(self):
if self.CharstringType == 2:
# Type 2 CharStrings
decompiler = psCharStrings.SimpleT2Decompiler(self.Private.Subrs, self.GlobalSubrs)
for charString in self.CharStrings.values():
if charString.needsDecompilation():
decompiler.reset()
decompiler.execute(charString)
else:
# Type 1 CharStrings
for charString in self.CharStrings.values():
charString.decompile()
def toXML(self, xmlWriter, progress=None):
xmlWriter.newline()
# first dump the simple values
self.toXMLSimpleValues(xmlWriter)
# dump charset
# XXX
# decompile all charstrings
if progress:
progress.setlabel("Decompiling CharStrings...")
self.decompileAllCharStrings()
# dump private dict
xmlWriter.begintag("Private")
xmlWriter.newline()
self.Private.toXML(xmlWriter)
xmlWriter.endtag("Private")
xmlWriter.newline()
self.toXMLCharStrings(xmlWriter, progress)
def toXMLSimpleValues(self, xmlWriter):
keys = self.__dict__.keys()
keys.remove("CharStrings")
keys.remove("Private")
keys.remove("charset")
keys.remove("GlobalSubrs")
keys.sort()
for key in keys:
value = getattr(self, key)
if key == "Encoding":
if value == 0:
# encoding is (Adobe) Standard Encoding
value = "StandardEncoding"
elif value == 1:
# encoding is Expert Encoding
value = "ExpertEncoding"
if type(value) == types.ListType:
value = string.join(map(str, value), " ")
else:
value = str(value)
xmlWriter.begintag(key)
if hasattr(value, "toXML"):
xmlWriter.newline()
value.toXML(xmlWriter)
xmlWriter.newline()
else:
xmlWriter.write(value)
xmlWriter.endtag(key)
xmlWriter.newline()
xmlWriter.newline()
def toXMLCharStrings(self, xmlWriter, progress=None):
charStrings = self.CharStrings
xmlWriter.newline()
xmlWriter.begintag("CharStrings")
xmlWriter.newline()
glyphNames = charStrings.keys()
glyphNames.sort()
for glyphName in glyphNames:
if progress:
progress.setlabel("Dumping 'CFF ' table... (%s)" % glyphName)
progress.increment()
xmlWriter.newline()
charString = charStrings[glyphName]
xmlWriter.begintag("CharString", name=glyphName)
xmlWriter.newline()
charString.toXML(xmlWriter)
xmlWriter.endtag("CharString")
xmlWriter.newline()
xmlWriter.newline()
xmlWriter.endtag("CharStrings")
xmlWriter.newline()
class PrivateDict:
defaults = psCharStrings.privateDictDefaults
def __init__(self):
pass
def decompile(self, data, privateData, strings):
p = psCharStrings.PrivateDictDecompiler(strings)
p.decompile(privateData)
self.fromDict(p.getDict())
# get local subrs
#print "YYY Private.Subrs:", self.Subrs
chunk = data[self.Subrs:]
localSubrs, restdata = readINDEX(chunk)
self.Subrs = map(psCharStrings.T2CharString, localSubrs)
def toXML(self, xmlWriter):
xmlWriter.newline()
keys = self.__dict__.keys()
keys.remove("Subrs")
for key in keys:
value = getattr(self, key)
if type(value) == types.ListType:
value = string.join(map(str, value), " ")
else:
value = str(value)
xmlWriter.begintag(key)
xmlWriter.write(value)
xmlWriter.endtag(key)
xmlWriter.newline()
# write subroutines
xmlWriter.newline()
xmlWriter.begintag("Subrs")
xmlWriter.newline()
for i in range(len(self.Subrs)):
xmlWriter.newline()
xmlWriter.begintag("CharString", id=i)
xmlWriter.newline()
self.Subrs[i].toXML(xmlWriter)
xmlWriter.endtag("CharString")
xmlWriter.newline()
xmlWriter.newline()
xmlWriter.endtag("Subrs")
xmlWriter.newline()
xmlWriter.newline()
def __getattr__(self, attr):
if not self.defaults.has_key(attr):
raise AttributeError, attr
return self.defaults[attr]
def fromDict(self, dict):
self.__dict__.update(dict)
def readINDEX(data):
count, = struct.unpack(">H", data[:2])
count = int(count)
offSize = ord(data[2])
data = data[3:]
offsets = []
for index in range(count+1):
chunk = data[index * offSize: (index+1) * offSize]
chunk = '\0' * (4 - offSize) + chunk
offset, = struct.unpack(">L", chunk)
offset = int(offset)
offsets.append(offset)
data = data[(count+1) * offSize:]
prev = offsets[0]
stuff = []
for next in offsets[1:]:
chunk = data[prev-1:next-1]
assert len(chunk) == next - prev
stuff.append(chunk)
prev = next
data = data[next-1:]
return stuff, data
def parseCharsetFormat1(nGlyphs, data, strings):
charSet = ['.notdef']
count = 1
while count < nGlyphs:
first = int(struct.unpack(">H", data[:2])[0])
nLeft = ord(data[2])
data = data[3:]
for SID in range(first, first+nLeft+1):
charSet.append(strings[SID])
count = count + nLeft + 1
return charSet
def parseCharsetFormat2(nGlyphs, data, strings):
charSet = ['.notdef']
count = 1
while count < nGlyphs:
first = int(struct.unpack(">H", data[:2])[0])
nLeft = int(struct.unpack(">H", data[2:4])[0])
data = data[4:]
for SID in range(first, first+nLeft+1):
charSet.append(strings[SID])
count = count + nLeft + 1
return charSet
# The 391 Standard Strings as used in the CFF format.
# from Adobe Technical None #5176, version 1.0, 18 March 1998
cffStandardStrings = ['.notdef', 'space', 'exclam', 'quotedbl', 'numbersign',
'dollar', 'percent', 'ampersand', 'quoteright', 'parenleft', 'parenright',
'asterisk', 'plus', 'comma', 'hyphen', 'period', 'slash', 'zero', 'one',
'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'colon',
'semicolon', 'less', 'equal', 'greater', 'question', 'at', 'A', 'B', 'C',
'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R',
'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'bracketleft', 'backslash',
'bracketright', 'asciicircum', 'underscore', 'quoteleft', 'a', 'b', 'c',
'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r',
's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'braceleft', 'bar', 'braceright',
'asciitilde', 'exclamdown', 'cent', 'sterling', 'fraction', 'yen', 'florin',
'section', 'currency', 'quotesingle', 'quotedblleft', 'guillemotleft',
'guilsinglleft', 'guilsinglright', 'fi', 'fl', 'endash', 'dagger',
'daggerdbl', 'periodcentered', 'paragraph', 'bullet', 'quotesinglbase',
'quotedblbase', 'quotedblright', 'guillemotright', 'ellipsis', 'perthousand',
'questiondown', 'grave', 'acute', 'circumflex', 'tilde', 'macron', 'breve',
'dotaccent', 'dieresis', 'ring', 'cedilla', 'hungarumlaut', 'ogonek', 'caron',
'emdash', 'AE', 'ordfeminine', 'Lslash', 'Oslash', 'OE', 'ordmasculine', 'ae',
'dotlessi', 'lslash', 'oslash', 'oe', 'germandbls', 'onesuperior',
'logicalnot', 'mu', 'trademark', 'Eth', 'onehalf', 'plusminus', 'Thorn',
'onequarter', 'divide', 'brokenbar', 'degree', 'thorn', 'threequarters',
'twosuperior', 'registered', 'minus', 'eth', 'multiply', 'threesuperior',
'copyright', 'Aacute', 'Acircumflex', 'Adieresis', 'Agrave', 'Aring',
'Atilde', 'Ccedilla', 'Eacute', 'Ecircumflex', 'Edieresis', 'Egrave',
'Iacute', 'Icircumflex', 'Idieresis', 'Igrave', 'Ntilde', 'Oacute',
'Ocircumflex', 'Odieresis', 'Ograve', 'Otilde', 'Scaron', 'Uacute',
'Ucircumflex', 'Udieresis', 'Ugrave', 'Yacute', 'Ydieresis', 'Zcaron',
'aacute', 'acircumflex', 'adieresis', 'agrave', 'aring', 'atilde', 'ccedilla',
'eacute', 'ecircumflex', 'edieresis', 'egrave', 'iacute', 'icircumflex',
'idieresis', 'igrave', 'ntilde', 'oacute', 'ocircumflex', 'odieresis',
'ograve', 'otilde', 'scaron', 'uacute', 'ucircumflex', 'udieresis', 'ugrave',
'yacute', 'ydieresis', 'zcaron', 'exclamsmall', 'Hungarumlautsmall',
'dollaroldstyle', 'dollarsuperior', 'ampersandsmall', 'Acutesmall',
'parenleftsuperior', 'parenrightsuperior', 'twodotenleader', 'onedotenleader',
'zerooldstyle', 'oneoldstyle', 'twooldstyle', 'threeoldstyle', 'fouroldstyle',
'fiveoldstyle', 'sixoldstyle', 'sevenoldstyle', 'eightoldstyle',
'nineoldstyle', 'commasuperior', 'threequartersemdash', 'periodsuperior',
'questionsmall', 'asuperior', 'bsuperior', 'centsuperior', 'dsuperior',
'esuperior', 'isuperior', 'lsuperior', 'msuperior', 'nsuperior', 'osuperior',
'rsuperior', 'ssuperior', 'tsuperior', 'ff', 'ffi', 'ffl', 'parenleftinferior',
'parenrightinferior', 'Circumflexsmall', 'hyphensuperior', 'Gravesmall',
'Asmall', 'Bsmall', 'Csmall', 'Dsmall', 'Esmall', 'Fsmall', 'Gsmall', 'Hsmall',
'Ismall', 'Jsmall', 'Ksmall', 'Lsmall', 'Msmall', 'Nsmall', 'Osmall', 'Psmall',
'Qsmall', 'Rsmall', 'Ssmall', 'Tsmall', 'Usmall', 'Vsmall', 'Wsmall', 'Xsmall',
'Ysmall', 'Zsmall', 'colonmonetary', 'onefitted', 'rupiah', 'Tildesmall',
'exclamdownsmall', 'centoldstyle', 'Lslashsmall', 'Scaronsmall', 'Zcaronsmall',
'Dieresissmall', 'Brevesmall', 'Caronsmall', 'Dotaccentsmall', 'Macronsmall',
'figuredash', 'hypheninferior', 'Ogoneksmall', 'Ringsmall', 'Cedillasmall',
'questiondownsmall', 'oneeighth', 'threeeighths', 'fiveeighths', 'seveneighths',
'onethird', 'twothirds', 'zerosuperior', 'foursuperior', 'fivesuperior',
'sixsuperior', 'sevensuperior', 'eightsuperior', 'ninesuperior', 'zeroinferior',
'oneinferior', 'twoinferior', 'threeinferior', 'fourinferior', 'fiveinferior',
'sixinferior', 'seveninferior', 'eightinferior', 'nineinferior', 'centinferior',
'dollarinferior', 'periodinferior', 'commainferior', 'Agravesmall',
'Aacutesmall', 'Acircumflexsmall', 'Atildesmall', 'Adieresissmall', 'Aringsmall',
'AEsmall', 'Ccedillasmall', 'Egravesmall', 'Eacutesmall', 'Ecircumflexsmall',
'Edieresissmall', 'Igravesmall', 'Iacutesmall', 'Icircumflexsmall',
'Idieresissmall', 'Ethsmall', 'Ntildesmall', 'Ogravesmall', 'Oacutesmall',
'Ocircumflexsmall', 'Otildesmall', 'Odieresissmall', 'OEsmall', 'Oslashsmall',
'Ugravesmall', 'Uacutesmall', 'Ucircumflexsmall', 'Udieresissmall',
'Yacutesmall', 'Thornsmall', 'Ydieresissmall', '001.000', '001.001', '001.002',
'001.003', 'Black', 'Bold', 'Book', 'Light', 'Medium', 'Regular', 'Roman',
'Semibold'
]
cffStandardStringCount = 391
assert len(cffStandardStrings) == cffStandardStringCount
# build reverse mapping
cffStandardStringMapping = {}
for _i in range(cffStandardStringCount):
cffStandardStringMapping[cffStandardStrings[_i]] = _i
added $Id$ tag.
git-svn-id: 05b73559aeb8bace4cf49b5ea964569f1305eff8@25 4cde692c-a291-49d1-8350-778aa11640f8
"""cffLib.py -- read/write tools for Adobe CFF fonts."""
__version__ = "$Id: cffLib.py,v 1.2 1999-12-20 21:59:49 Just Exp $"
__author__ = "jvr"
import struct, sstruct
import string
import types
import psCharStrings
cffHeaderFormat = """
major: B
minor: B
hdrSize: B
offSize: B
"""
class CFFFontSet:
def __init__(self):
self.fonts = {}
def decompile(self, data):
sstruct.unpack(cffHeaderFormat, data[:4], self)
assert self.major == 1 and self.minor == 0, \
"unknown CFF format: %d.%d" % (self.major, self.minor)
restdata = data[self.hdrSize:]
self.fontNames, restdata = readINDEX(restdata)
topDicts, restdata = readINDEX(restdata)
strings, restdata = readINDEX(restdata)
strings = IndexedStrings(strings)
globalSubrs, restdata = readINDEX(restdata)
self.GlobalSubrs = map(psCharStrings.T2CharString, globalSubrs)
for i in range(len(topDicts)):
font = self.fonts[self.fontNames[i]] = CFFFont()
font.GlobalSubrs = self.GlobalSubrs # Hmm.
font.decompile(data, topDicts[i], strings, self) # maybe only 'on demand'?
def compile(self):
strings = IndexedStrings()
XXXX
def toXML(self, xmlWriter, progress=None):
xmlWriter.newline()
for fontName in self.fontNames:
xmlWriter.begintag("CFFFont", name=fontName)
xmlWriter.newline()
font = self.fonts[fontName]
font.toXML(xmlWriter, progress)
xmlWriter.endtag("CFFFont")
xmlWriter.newline()
xmlWriter.newline()
xmlWriter.begintag("GlobalSubrs")
xmlWriter.newline()
for i in range(len(self.GlobalSubrs)):
xmlWriter.newline()
xmlWriter.begintag("CharString", id=i)
xmlWriter.newline()
self.GlobalSubrs[i].toXML(xmlWriter)
xmlWriter.endtag("CharString")
xmlWriter.newline()
xmlWriter.newline()
xmlWriter.endtag("GlobalSubrs")
xmlWriter.newline()
xmlWriter.newline()
def fromXML(self, (name, attrs, content)):
xxx
class IndexedStrings:
def __init__(self, strings=None):
if strings is None:
strings = []
self.strings = strings
def __getitem__(self, SID):
if SID < cffStandardStringCount:
return cffStandardStrings[SID]
else:
return self.strings[SID - cffStandardStringCount]
def getSID(self, s):
if not hasattr(self, "stringMapping"):
self.buildStringMapping()
if cffStandardStringMapping.has_key(s):
SID = cffStandardStringMapping[s]
if self.stringMapping.has_key(s):
SID = self.stringMapping[s]
else:
SID = len(self.strings) + cffStandardStringCount
self.strings.append(s)
self.stringMapping[s] = SID
return SID
def getStrings(self):
return self.strings
def buildStringMapping(self):
self.stringMapping = {}
for index in range(len(self.strings)):
self.stringMapping[self.strings[index]] = index + cffStandardStringCount
class CFFFont:
defaults = psCharStrings.topDictDefaults
def __init__(self):
pass
def __getattr__(self, attr):
if not self.defaults.has_key(attr):
raise AttributeError, attr
return self.defaults[attr]
def fromDict(self, dict):
self.__dict__.update(dict)
def decompile(self, data, topDictData, strings, fontSet):
top = psCharStrings.TopDictDecompiler(strings)
top.decompile(topDictData)
self.fromDict(top.getDict())
# get private dict
size, offset = self.Private
#print "YYY Private (size, offset):", size, offset
privateData = data[offset:offset+size]
self.Private = PrivateDict()
self.Private.decompile(data[offset:], privateData, strings)
# get raw charstrings
#print "YYYY CharStrings offset:", self.CharStrings
rawCharStrings, restdata = readINDEX(data[self.CharStrings:])
nGlyphs = len(rawCharStrings)
# get charset (or rather: get glyphNames)
charsetOffset = self.charset
if charsetOffset == 0:
xxx # standard charset
else:
#print "YYYYY charsetOffset:", charsetOffset
format = ord(data[charsetOffset])
if format == 0:
xxx
elif format == 1:
charSet = parseCharsetFormat1(nGlyphs,
data[charsetOffset+1:], strings)
elif format == 2:
charSet = parseCharsetFormat2(nGlyphs,
data[charsetOffset+1:], strings)
elif format == 3:
xxx
else:
xxx
self.charset = charSet
assert len(charSet) == nGlyphs
self.CharStrings = charStrings = {}
if self.CharstringType == 2:
# Type 2 CharStrings
charStringClass = psCharStrings.T2CharString
else:
# Type 1 CharStrings
charStringClass = psCharStrings.T1CharString
for i in range(nGlyphs):
charStrings[charSet[i]] = charStringClass(rawCharStrings[i])
assert len(charStrings) == nGlyphs
# XXX Encoding!
encoding = self.Encoding
if encoding not in (0, 1):
# encoding is an _offset_ from the beginning of 'data' to an encoding subtable
XXX
self.Encoding = encoding
def getGlyphOrder(self):
return self.charset
def setGlyphOrder(self, glyphOrder):
self.charset = glyphOrder
def decompileAllCharStrings(self):
if self.CharstringType == 2:
# Type 2 CharStrings
decompiler = psCharStrings.SimpleT2Decompiler(self.Private.Subrs, self.GlobalSubrs)
for charString in self.CharStrings.values():
if charString.needsDecompilation():
decompiler.reset()
decompiler.execute(charString)
else:
# Type 1 CharStrings
for charString in self.CharStrings.values():
charString.decompile()
def toXML(self, xmlWriter, progress=None):
xmlWriter.newline()
# first dump the simple values
self.toXMLSimpleValues(xmlWriter)
# dump charset
# XXX
# decompile all charstrings
if progress:
progress.setlabel("Decompiling CharStrings...")
self.decompileAllCharStrings()
# dump private dict
xmlWriter.begintag("Private")
xmlWriter.newline()
self.Private.toXML(xmlWriter)
xmlWriter.endtag("Private")
xmlWriter.newline()
self.toXMLCharStrings(xmlWriter, progress)
def toXMLSimpleValues(self, xmlWriter):
keys = self.__dict__.keys()
keys.remove("CharStrings")
keys.remove("Private")
keys.remove("charset")
keys.remove("GlobalSubrs")
keys.sort()
for key in keys:
value = getattr(self, key)
if key == "Encoding":
if value == 0:
# encoding is (Adobe) Standard Encoding
value = "StandardEncoding"
elif value == 1:
# encoding is Expert Encoding
value = "ExpertEncoding"
if type(value) == types.ListType:
value = string.join(map(str, value), " ")
else:
value = str(value)
xmlWriter.begintag(key)
if hasattr(value, "toXML"):
xmlWriter.newline()
value.toXML(xmlWriter)
xmlWriter.newline()
else:
xmlWriter.write(value)
xmlWriter.endtag(key)
xmlWriter.newline()
xmlWriter.newline()
def toXMLCharStrings(self, xmlWriter, progress=None):
charStrings = self.CharStrings
xmlWriter.newline()
xmlWriter.begintag("CharStrings")
xmlWriter.newline()
glyphNames = charStrings.keys()
glyphNames.sort()
for glyphName in glyphNames:
if progress:
progress.setlabel("Dumping 'CFF ' table... (%s)" % glyphName)
progress.increment()
xmlWriter.newline()
charString = charStrings[glyphName]
xmlWriter.begintag("CharString", name=glyphName)
xmlWriter.newline()
charString.toXML(xmlWriter)
xmlWriter.endtag("CharString")
xmlWriter.newline()
xmlWriter.newline()
xmlWriter.endtag("CharStrings")
xmlWriter.newline()
class PrivateDict:
defaults = psCharStrings.privateDictDefaults
def __init__(self):
pass
def decompile(self, data, privateData, strings):
p = psCharStrings.PrivateDictDecompiler(strings)
p.decompile(privateData)
self.fromDict(p.getDict())
# get local subrs
#print "YYY Private.Subrs:", self.Subrs
chunk = data[self.Subrs:]
localSubrs, restdata = readINDEX(chunk)
self.Subrs = map(psCharStrings.T2CharString, localSubrs)
def toXML(self, xmlWriter):
xmlWriter.newline()
keys = self.__dict__.keys()
keys.remove("Subrs")
for key in keys:
value = getattr(self, key)
if type(value) == types.ListType:
value = string.join(map(str, value), " ")
else:
value = str(value)
xmlWriter.begintag(key)
xmlWriter.write(value)
xmlWriter.endtag(key)
xmlWriter.newline()
# write subroutines
xmlWriter.newline()
xmlWriter.begintag("Subrs")
xmlWriter.newline()
for i in range(len(self.Subrs)):
xmlWriter.newline()
xmlWriter.begintag("CharString", id=i)
xmlWriter.newline()
self.Subrs[i].toXML(xmlWriter)
xmlWriter.endtag("CharString")
xmlWriter.newline()
xmlWriter.newline()
xmlWriter.endtag("Subrs")
xmlWriter.newline()
xmlWriter.newline()
def __getattr__(self, attr):
if not self.defaults.has_key(attr):
raise AttributeError, attr
return self.defaults[attr]
def fromDict(self, dict):
self.__dict__.update(dict)
def readINDEX(data):
count, = struct.unpack(">H", data[:2])
count = int(count)
offSize = ord(data[2])
data = data[3:]
offsets = []
for index in range(count+1):
chunk = data[index * offSize: (index+1) * offSize]
chunk = '\0' * (4 - offSize) + chunk
offset, = struct.unpack(">L", chunk)
offset = int(offset)
offsets.append(offset)
data = data[(count+1) * offSize:]
prev = offsets[0]
stuff = []
for next in offsets[1:]:
chunk = data[prev-1:next-1]
assert len(chunk) == next - prev
stuff.append(chunk)
prev = next
data = data[next-1:]
return stuff, data
def parseCharsetFormat1(nGlyphs, data, strings):
charSet = ['.notdef']
count = 1
while count < nGlyphs:
first = int(struct.unpack(">H", data[:2])[0])
nLeft = ord(data[2])
data = data[3:]
for SID in range(first, first+nLeft+1):
charSet.append(strings[SID])
count = count + nLeft + 1
return charSet
def parseCharsetFormat2(nGlyphs, data, strings):
charSet = ['.notdef']
count = 1
while count < nGlyphs:
first = int(struct.unpack(">H", data[:2])[0])
nLeft = int(struct.unpack(">H", data[2:4])[0])
data = data[4:]
for SID in range(first, first+nLeft+1):
charSet.append(strings[SID])
count = count + nLeft + 1
return charSet
# The 391 Standard Strings as used in the CFF format.
# from Adobe Technical None #5176, version 1.0, 18 March 1998
cffStandardStrings = ['.notdef', 'space', 'exclam', 'quotedbl', 'numbersign',
'dollar', 'percent', 'ampersand', 'quoteright', 'parenleft', 'parenright',
'asterisk', 'plus', 'comma', 'hyphen', 'period', 'slash', 'zero', 'one',
'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'colon',
'semicolon', 'less', 'equal', 'greater', 'question', 'at', 'A', 'B', 'C',
'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R',
'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'bracketleft', 'backslash',
'bracketright', 'asciicircum', 'underscore', 'quoteleft', 'a', 'b', 'c',
'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r',
's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'braceleft', 'bar', 'braceright',
'asciitilde', 'exclamdown', 'cent', 'sterling', 'fraction', 'yen', 'florin',
'section', 'currency', 'quotesingle', 'quotedblleft', 'guillemotleft',
'guilsinglleft', 'guilsinglright', 'fi', 'fl', 'endash', 'dagger',
'daggerdbl', 'periodcentered', 'paragraph', 'bullet', 'quotesinglbase',
'quotedblbase', 'quotedblright', 'guillemotright', 'ellipsis', 'perthousand',
'questiondown', 'grave', 'acute', 'circumflex', 'tilde', 'macron', 'breve',
'dotaccent', 'dieresis', 'ring', 'cedilla', 'hungarumlaut', 'ogonek', 'caron',
'emdash', 'AE', 'ordfeminine', 'Lslash', 'Oslash', 'OE', 'ordmasculine', 'ae',
'dotlessi', 'lslash', 'oslash', 'oe', 'germandbls', 'onesuperior',
'logicalnot', 'mu', 'trademark', 'Eth', 'onehalf', 'plusminus', 'Thorn',
'onequarter', 'divide', 'brokenbar', 'degree', 'thorn', 'threequarters',
'twosuperior', 'registered', 'minus', 'eth', 'multiply', 'threesuperior',
'copyright', 'Aacute', 'Acircumflex', 'Adieresis', 'Agrave', 'Aring',
'Atilde', 'Ccedilla', 'Eacute', 'Ecircumflex', 'Edieresis', 'Egrave',
'Iacute', 'Icircumflex', 'Idieresis', 'Igrave', 'Ntilde', 'Oacute',
'Ocircumflex', 'Odieresis', 'Ograve', 'Otilde', 'Scaron', 'Uacute',
'Ucircumflex', 'Udieresis', 'Ugrave', 'Yacute', 'Ydieresis', 'Zcaron',
'aacute', 'acircumflex', 'adieresis', 'agrave', 'aring', 'atilde', 'ccedilla',
'eacute', 'ecircumflex', 'edieresis', 'egrave', 'iacute', 'icircumflex',
'idieresis', 'igrave', 'ntilde', 'oacute', 'ocircumflex', 'odieresis',
'ograve', 'otilde', 'scaron', 'uacute', 'ucircumflex', 'udieresis', 'ugrave',
'yacute', 'ydieresis', 'zcaron', 'exclamsmall', 'Hungarumlautsmall',
'dollaroldstyle', 'dollarsuperior', 'ampersandsmall', 'Acutesmall',
'parenleftsuperior', 'parenrightsuperior', 'twodotenleader', 'onedotenleader',
'zerooldstyle', 'oneoldstyle', 'twooldstyle', 'threeoldstyle', 'fouroldstyle',
'fiveoldstyle', 'sixoldstyle', 'sevenoldstyle', 'eightoldstyle',
'nineoldstyle', 'commasuperior', 'threequartersemdash', 'periodsuperior',
'questionsmall', 'asuperior', 'bsuperior', 'centsuperior', 'dsuperior',
'esuperior', 'isuperior', 'lsuperior', 'msuperior', 'nsuperior', 'osuperior',
'rsuperior', 'ssuperior', 'tsuperior', 'ff', 'ffi', 'ffl', 'parenleftinferior',
'parenrightinferior', 'Circumflexsmall', 'hyphensuperior', 'Gravesmall',
'Asmall', 'Bsmall', 'Csmall', 'Dsmall', 'Esmall', 'Fsmall', 'Gsmall', 'Hsmall',
'Ismall', 'Jsmall', 'Ksmall', 'Lsmall', 'Msmall', 'Nsmall', 'Osmall', 'Psmall',
'Qsmall', 'Rsmall', 'Ssmall', 'Tsmall', 'Usmall', 'Vsmall', 'Wsmall', 'Xsmall',
'Ysmall', 'Zsmall', 'colonmonetary', 'onefitted', 'rupiah', 'Tildesmall',
'exclamdownsmall', 'centoldstyle', 'Lslashsmall', 'Scaronsmall', 'Zcaronsmall',
'Dieresissmall', 'Brevesmall', 'Caronsmall', 'Dotaccentsmall', 'Macronsmall',
'figuredash', 'hypheninferior', 'Ogoneksmall', 'Ringsmall', 'Cedillasmall',
'questiondownsmall', 'oneeighth', 'threeeighths', 'fiveeighths', 'seveneighths',
'onethird', 'twothirds', 'zerosuperior', 'foursuperior', 'fivesuperior',
'sixsuperior', 'sevensuperior', 'eightsuperior', 'ninesuperior', 'zeroinferior',
'oneinferior', 'twoinferior', 'threeinferior', 'fourinferior', 'fiveinferior',
'sixinferior', 'seveninferior', 'eightinferior', 'nineinferior', 'centinferior',
'dollarinferior', 'periodinferior', 'commainferior', 'Agravesmall',
'Aacutesmall', 'Acircumflexsmall', 'Atildesmall', 'Adieresissmall', 'Aringsmall',
'AEsmall', 'Ccedillasmall', 'Egravesmall', 'Eacutesmall', 'Ecircumflexsmall',
'Edieresissmall', 'Igravesmall', 'Iacutesmall', 'Icircumflexsmall',
'Idieresissmall', 'Ethsmall', 'Ntildesmall', 'Ogravesmall', 'Oacutesmall',
'Ocircumflexsmall', 'Otildesmall', 'Odieresissmall', 'OEsmall', 'Oslashsmall',
'Ugravesmall', 'Uacutesmall', 'Ucircumflexsmall', 'Udieresissmall',
'Yacutesmall', 'Thornsmall', 'Ydieresissmall', '001.000', '001.001', '001.002',
'001.003', 'Black', 'Bold', 'Book', 'Light', 'Medium', 'Regular', 'Roman',
'Semibold'
]
cffStandardStringCount = 391
assert len(cffStandardStrings) == cffStandardStringCount
# build reverse mapping
cffStandardStringMapping = {}
for _i in range(cffStandardStringCount):
cffStandardStringMapping[cffStandardStrings[_i]] = _i
|
import connect
from engine.event import *
from engine.action import *
from engine.code import *
from engine.player import *
from engine.round import *
from engine.team import *
from engine.spawn import *
from flask import Flask, render_template, request, json, session, jsonify
import os
import json
app = Flask(__name__)
SESSION_TYPE = 'Redis'
app.config.from_object(__name__)
app.secret_key = "huehuehuehuehuehuehue"#os.urandom(24)
# START BLOCK
# Player registration
def registration_template():
return render_template("regi")
def pending_template():
return render_template("pending", user=session["user"], phone=session["phone"])
def playing_template():
return render_template("p_stats")
def login_status():
try:
if session["user"] == None:
return False
else:
return True
except KeyError:
return False
@app.route("/")
def is_logged_in():
if login_status() and is_free():
return playing_template()
elif login_status():
return pending_template()
else:
return registration_template()
@app.route("/register", methods=["GET"])
def save_new():
_user = request.args.get("user")
_phone = request.args.get("phone")
session["user"] = _user
session["phone"] = _phone
if _user and _phone:
if Action.addPlayerWOEmail(_user, _phone):
return is_logged_in()
else:
return registration_template()
else:
return registration_template()
@app.route("/wrongInfo")
def wrong_info():
Player.delPlayer(session["user"])
session.clear()
return "User data removed"
@app.route("/jf")
def is_free():
if login_status:
user = session["user"]
with open('stats.json') as data_file:
stats = json.load(data_file)
if stats["roundName"] != None:
for player in stats["teamlessPlayers"]:
if player["name"] == user:
return str(player["nowInLiberty"])
for team in stats["teams"]:
for player in team["players"]:
if player["name"] == user:
return str(player["nowInLiberty"])
else:
return "False"
# Player registration
# END BLOCK
# START BLOCK
# Getting data
@app.route("/user")
def userName():
if login_status():
return session["user"]
@app.route("/events")
def events():
try:
with open('events.json') as data_file:
events = json.load(data_file)
response = jsonify(events)
return response
except:
return "File not found"
@app.route("/stats")
def stats():
try:
with open('stats.json') as data_file:
stats = json.load(data_file)
response = jsonify(stats)
return response
except:
return "File not found"
# Getting data
# END BLOCK
# START BLOCK
# Spawnmaster screen
@app.route("/masterlogin")
def masterLoginTemplate():
return render_template("m_auth")
def masterView():
players, teamless = Stats.playersDetailed()
for person in teamless:
print(person)
rounds = Round.getRounds()
return render_template("master", rounds=rounds, teamless=teamless)
def isMaster():
try:
if session["master"] == 1:
return True
else:
return False
except KeyError:
return False
@app.route("/spawn")
def spawnmaster():
if isMaster():
return masterView()
else:
return masterLoginTemplate()
@app.route("/login", methods=["GET"])
def masterLogin():
try:
_user = request.args.get("user")
_pw = request.args.get("pw")
acc = Spawn.login()
if _user == acc["name"][0] and _pw == acc["pw"][0]:
session["master"] = 1
return spawnmaster()
else:
return "Connection foridden"
except:
return "Connection forbidden"
@app.route("/masterout")
def masterLogout():
session.clear()
return "Spanwmaster has logged out"
@app.route("/s")
def hue():
with open('events.json') as data_file:
events = json.load(data_file)
with open('stats.json') as data_file:
stats = json.load(data_file)
return render_template("stats", events = events, stats = stats)
# Spawnmaster screen
# END BLOCK
# START BLOCK
# Spawnmaster's actions
# Adding a new round
@app.route("/addRound", methods=["GET"])
def startRound():
roundName = request.args.get("roundName")
# How many minutes does the round last
roundLength = request.args.get("roundLength")
# In how many minutes does the round begin
startsIn = request.args.get("startsIn")
try:
int(roundLength)
int(startsIn)
except ValueError:
return "Round length and starttime has to be entered as integers."
startTime = datetime.datetime.now() + datetime.timedelta(seconds = int(startsIn) * 60)
endTime = startTime + datetime.timedelta(seconds = int(roundLength) * 60)
startTimeString = format(startTime, dateformat)
endTimeString = format(endTime, dateformat)
if not roundName or not roundLength or not startsIn:
return "Puudulik info uue roundi jaoks."
else:
if Round.add(roundName, startTimeString, endTimeString):
return "New round \"" + roundName + "\" start time " + startTimeString + ", end time " + endTimeString + "."
else:
return "Error: New round has overlapping time. not added: \"" + roundName + "\" start time " + startTimeString + ", end time " + endTimeString + "."
# Adding player to a team in a round
@app.route("/addToTeam", methods = ["GET"])
def addToTeam():
roundId = request.args.get("roundId")
playerId = request.args.get("playerId")
if roundId and playerId:
try:
team.add(playerId, roundId)
return "Player " + Player.getNameById(playerId) + " added to round" + roundId
except:
return "Round or player id were given as invalid values."
else:
return "Missing round or player id."
# Spawnmaster's actions
# END BLOCK
# START BLOCK
# Routes to player screen templates
@app.route("/tagging")
def tagging():
user = "rjurik"
team = 2
if team == 1:
team = "blue"
else:
team = "red"
score = 520
rank = 3
return render_template("playerPlaying.html", user=user, team=team, score=score, rank=rank)
@app.route("/tagged")
def tagged():
user = "rjurik"
team = 1
if team == 1:
team = "blue"
else:
team = "red"
score = 520
rank = 3
tagger = "LOLer"
return render_template("playerTagged.html", team=team, tagger=tagger, user=user, score=score, rank=rank)
# Routes to player screen templates
# END BLOCK
# Start program
try:
connection = connect.connectDB()
except:
print("Problem with the database connection")
cursor = connection.cursor()
Action.initAllConnect(cursor)
Round.updateActiveId()
if __name__ == "__main__":
app.run(debug=True)
Make gameserver directly executable.
#!/usr/bin/python3
import connect
from engine.event import *
from engine.action import *
from engine.code import *
from engine.player import *
from engine.round import *
from engine.team import *
from engine.spawn import *
from flask import Flask, render_template, request, json, session, jsonify
import os
import json
app = Flask(__name__)
SESSION_TYPE = 'Redis'
app.config.from_object(__name__)
app.secret_key = "huehuehuehuehuehuehue"#os.urandom(24)
# START BLOCK
# Player registration
def registration_template():
return render_template("regi")
def pending_template():
return render_template("pending", user=session["user"], phone=session["phone"])
def playing_template():
return render_template("p_stats")
def login_status():
try:
if session["user"] == None:
return False
else:
return True
except KeyError:
return False
@app.route("/")
def is_logged_in():
if login_status() and is_free():
return playing_template()
elif login_status():
return pending_template()
else:
return registration_template()
@app.route("/register", methods=["GET"])
def save_new():
_user = request.args.get("user")
_phone = request.args.get("phone")
session["user"] = _user
session["phone"] = _phone
if _user and _phone:
if Action.addPlayerWOEmail(_user, _phone):
return is_logged_in()
else:
return registration_template()
else:
return registration_template()
@app.route("/wrongInfo")
def wrong_info():
Player.delPlayer(session["user"])
session.clear()
return "User data removed"
@app.route("/jf")
def is_free():
if login_status:
user = session["user"]
with open('stats.json') as data_file:
stats = json.load(data_file)
if stats["roundName"] != None:
for player in stats["teamlessPlayers"]:
if player["name"] == user:
return str(player["nowInLiberty"])
for team in stats["teams"]:
for player in team["players"]:
if player["name"] == user:
return str(player["nowInLiberty"])
else:
return "False"
# Player registration
# END BLOCK
# START BLOCK
# Getting data
@app.route("/user")
def userName():
if login_status():
return session["user"]
@app.route("/events")
def events():
try:
with open('events.json') as data_file:
events = json.load(data_file)
response = jsonify(events)
return response
except:
return "File not found"
@app.route("/stats")
def stats():
try:
with open('stats.json') as data_file:
stats = json.load(data_file)
response = jsonify(stats)
return response
except:
return "File not found"
# Getting data
# END BLOCK
# START BLOCK
# Spawnmaster screen
@app.route("/masterlogin")
def masterLoginTemplate():
return render_template("m_auth")
def masterView():
players, teamless = Stats.playersDetailed()
for person in teamless:
print(person)
rounds = Round.getRounds()
return render_template("master", rounds=rounds, teamless=teamless)
def isMaster():
try:
if session["master"] == 1:
return True
else:
return False
except KeyError:
return False
@app.route("/spawn")
def spawnmaster():
if isMaster():
return masterView()
else:
return masterLoginTemplate()
@app.route("/login", methods=["GET"])
def masterLogin():
try:
_user = request.args.get("user")
_pw = request.args.get("pw")
acc = Spawn.login()
if _user == acc["name"][0] and _pw == acc["pw"][0]:
session["master"] = 1
return spawnmaster()
else:
return "Connection foridden"
except:
return "Connection forbidden"
@app.route("/masterout")
def masterLogout():
session.clear()
return "Spanwmaster has logged out"
@app.route("/s")
def hue():
with open('events.json') as data_file:
events = json.load(data_file)
with open('stats.json') as data_file:
stats = json.load(data_file)
return render_template("stats", events = events, stats = stats)
# Spawnmaster screen
# END BLOCK
# START BLOCK
# Spawnmaster's actions
# Adding a new round
@app.route("/addRound", methods=["GET"])
def startRound():
roundName = request.args.get("roundName")
# How many minutes does the round last
roundLength = request.args.get("roundLength")
# In how many minutes does the round begin
startsIn = request.args.get("startsIn")
try:
int(roundLength)
int(startsIn)
except ValueError:
return "Round length and starttime has to be entered as integers."
startTime = datetime.datetime.now() + datetime.timedelta(seconds = int(startsIn) * 60)
endTime = startTime + datetime.timedelta(seconds = int(roundLength) * 60)
startTimeString = format(startTime, dateformat)
endTimeString = format(endTime, dateformat)
if not roundName or not roundLength or not startsIn:
return "Puudulik info uue roundi jaoks."
else:
if Round.add(roundName, startTimeString, endTimeString):
return "New round \"" + roundName + "\" start time " + startTimeString + ", end time " + endTimeString + "."
else:
return "Error: New round has overlapping time. not added: \"" + roundName + "\" start time " + startTimeString + ", end time " + endTimeString + "."
# Adding player to a team in a round
@app.route("/addToTeam", methods = ["GET"])
def addToTeam():
roundId = request.args.get("roundId")
playerId = request.args.get("playerId")
if roundId and playerId:
try:
team.add(playerId, roundId)
return "Player " + Player.getNameById(playerId) + " added to round" + roundId
except:
return "Round or player id were given as invalid values."
else:
return "Missing round or player id."
# Spawnmaster's actions
# END BLOCK
# START BLOCK
# Routes to player screen templates
@app.route("/tagging")
def tagging():
user = "rjurik"
team = 2
if team == 1:
team = "blue"
else:
team = "red"
score = 520
rank = 3
return render_template("playerPlaying.html", user=user, team=team, score=score, rank=rank)
@app.route("/tagged")
def tagged():
user = "rjurik"
team = 1
if team == 1:
team = "blue"
else:
team = "red"
score = 520
rank = 3
tagger = "LOLer"
return render_template("playerTagged.html", team=team, tagger=tagger, user=user, score=score, rank=rank)
# Routes to player screen templates
# END BLOCK
# Start program
try:
connection = connect.connectDB()
except:
print("Problem with the database connection")
cursor = connection.cursor()
Action.initAllConnect(cursor)
Round.updateActiveId()
if __name__ == "__main__":
app.run(debug=True)
|
"""Iterables of DOT source code lines (including final newline)."""
import typing
from . import copying
__all__ = ['Base']
class LineIterator:
"""Iterable of DOT Source code lines."""
def __iter__(self) -> typing.Iterator[str]:
r"""Yield the generated DOT source line by line.
Yields: Line ending with a newline (``'\n'``).
"""
raise NotImplementedError('to be implemented by concrete subclasses')
# Common base interface for all exposed classes
class Base(LineIterator, copying.Copy):
"""LineIterator with ``.source`` attribute, that it returns for ``str()``."""
@property
def source(self) -> str:
raise NotImplementedError('to be implemented by concrete subclasses')
def __str__(self):
"""The DOT source code as string."""
return self.source
rename base class
"""Iterables of DOT source code lines (including final newline)."""
import typing
from . import copying
__all__ = ['Base']
class LineIterable:
"""Iterable of DOT Source code lines
(mimics ``file`` objects in text mode)."""
def __iter__(self) -> typing.Iterator[str]:
r"""Yield the generated DOT source line by line.
Yields: Line ending with a newline (``'\n'``).
"""
raise NotImplementedError('to be implemented by concrete subclasses')
# Common base interface for all exposed classes
class Base(LineIterable, copying.Copy):
"""LineIterator with ``.source`` attribute, that it returns for ``str()``."""
@property
def source(self) -> str:
raise NotImplementedError('to be implemented by concrete subclasses')
def __str__(self):
"""The DOT source code as string."""
return self.source
|
from flask import Flask, jsonify, abort, request
from goshna import *
from goshna import ApiFunctions, DisplayFlight
from datetime import date, time;
class Flight:
def __init__(self, id, date, airline_id, source_airport_id, dest_airport_id, flight_number, departure_time):
self.id = id
self.date = date;
self.airline_id = airline_id;
self.source_airport_id = source_airport_id;
self.dest_airport_id = dest_airport_id;
self.flight_number = flight_number;
self.departure_time = departure_time;
def to_json(self):
return {
'id': self.id,
'date': self.date,
'airline_id': self.airline_id,
'source_airport_id': self.source_airport_id,
'dest_airport_id': self.dest_airport_id,
'flight_number': self.flight_number,
'departure_time': self.departure_time
}
@app.route('/goshna/api/flights', methods=['POST'])
def create_flight():
if not request.json or not 'airline_id' in request.json or not 'source_airport_id' in request.json or not 'dest_airport_id' in request.json or not 'flight_number' in request.json or not 'departure_time' in request.json or not 'date' in request.json:
abort(400)
date = request.json['date']
airline_id = request.json['airline_id']
source_airport_id = request.json['source_airport_id']
dest_airport_id = request.json['dest_airport_id']
flight_number = request.json['flight_number']
departure_time = request.json['departure_time']
result = ApiFunctions.post_db("INSERT INTO flights VALUES (NULL, ?, ?, ?, ?, ?, ?)", [date, airline_id, source_airport_id, dest_airport_id, flight_number, departure_time]);
inserted_id = c.lastrowid
print u'Inserted new flight at row ' + str(inserted_id)
return jsonify({'id': str(inserted_id)}), 201
@app.route('/goshna/api/flights/find', methods=['POST'])
def find_flights():
if not request.json or not 'airline_id' in request.json or not 'airport_id' in request.json:
abort(400)
airline_id = request.json['airline_id']
airport_id = request.json['airport_id']
flights = []
results = ApiFunctions.query_db("SELECT * FROM flights")
for row in results:
airline = ApiFunctions.query_db("SELECT * FROM airlines WHERE id = ?", [row['airline_id']], one=True)
dest_airport = ApiFunctions.query_db("SELECT * FROM airports WHERE id = ?", [row['dest_airport_id']], one=True)
source_airport = ApiFunctions.query_db("SELECT * FROM airports WHERE id = ?", [row['source_airport_id']], one=True)
flight = DisplayFlight.DisplayFlight(
row['id'],
row['date'],
airline['airline_short'],
source_airport['airport_short'],
dest_airport['airport_short'],
row['flight_number'],
row['departure_time'],
row['airline_id'],
row['dest_airport_id'],
row['source_airport_id']
)
flights.append(flight.to_json())
return jsonify({'flights': flights})
@app.route('/goshna/api/flights', methods=['GET'])
def get_flights():
flights = []
results = ApiFunctions.query_db("SELECT * FROM flights")
for row in results:
flight = Flight(
row['id'],
row['date'],
row['airline_id'],
row['source_airport_id'],
row['dest_airport_id'],
row['flight_number'],
row['departure_time']
)
flights.append(flight.to_json())
return jsonify({'flights': flights})
@app.route('/goshna/api/flights/<int:flight_id>', methods=['GET'])
def get_flight(flight_id):
row = ApiFunctions.query_db("SELECT * FROM flights WHERE id = ?", [flight_id], one=True)
if row is None:
abort(404)
flight = Flight(
row['id'],
row['date'],
row['airline_id'],
row['source_airport_id'],
row['dest_airport_id'],
row['flight_number'],
row['departure_time']
)
return jsonify({'flight': flight.to_json()})
@app.route('/goshna/api/flights/<int:flight_id>', methods=['DELETE'])
def delete_flight(flight_id):
ApiFunctions.post_db("DELETE FROM flights WHERE id=?", [flight_id])
print u'Deleted flight with ID ' + str(inserted_id)
return jsonify({'result': True})
Added optional parameters to the find_flights() function
# Please enter the commit message for your changes. Lines starting
# with '#' will be ignored, and an empty message aborts the commit.
# On branch master
# Your branch is up-to-date with 'origin/master'.
#
# Changes to be committed:
# modified: Flight.py
#
# Changes not staged for commit:
# modified: goshna.sqlite
#
# Untracked files:
# .Flight.py.swp
#
# ------------------------ >8 ------------------------
# Do not touch the line above.
# Everything below will be removed.
diff --git a/goshna/Flight.py b/goshna/Flight.py
index cf36ff8..8ba78ea 100644
--- a/goshna/Flight.py
+++ b/goshna/Flight.py
@@ -44,14 +44,36 @@ class Flight:
@app.route('/goshna/api/flights/find', methods=['POST'])
def find_flights():
- if not request.json or not 'airline_id' in request.json or not 'airport_id' in request.json:
- abort(400)
+ airline_id = 0
+ airport_id = 0
- airline_id = request.json['airline_id']
- airport_id = request.json['airport_id']
+ if request.json and 'airline_id' in request.json:
+ airline_id = request.json['airline_id']
+
+ if request.json and 'airport_id' in request.json:
+ airport_id = request.json['airport_id']
flights = []
- results = ApiFunctions.query_db("SELECT * FROM flights")
+
+ if(airline_id == 0):
+
+ # Airport and Airline are 'ALL'
+ if(airport_id == 0):
+ results = ApiFunctions.query_db("SELECT * FROM flights", [])
+
+ # Airline is 'ALL'
+ else:
+ results = ApiFunctions.query_db("SELECT * FROM flights where source_airport_id=?", [airport_id])
+
+ # Airport is 'ALL'
+ elif(airport_id == 0):
+ results = ApiFunctions.query_db("SELECT * FROM flights where airline_id=?", [airline_id])
+
+ # Neither is 'ALL'
+ else:
+ results = ApiFunctions.query_db("SELECT * FROM flights where airline_id=? and source_airport_id=?", [airline_id, airport_id])
+
+
for row in results:
airline = ApiFunctions.query_db("SELECT * FROM airlines WHERE id = ?", [row['airline_id']], one=True)
dest_airport = ApiFunctions.query_db("SELECT * FROM airports WHERE id = ?", [row['dest_airport_id']], one=True)
from flask import Flask, jsonify, abort, request
from goshna import *
from goshna import ApiFunctions, DisplayFlight
from datetime import date, time;
class Flight:
def __init__(self, id, date, airline_id, source_airport_id, dest_airport_id, flight_number, departure_time):
self.id = id
self.date = date;
self.airline_id = airline_id;
self.source_airport_id = source_airport_id;
self.dest_airport_id = dest_airport_id;
self.flight_number = flight_number;
self.departure_time = departure_time;
def to_json(self):
return {
'id': self.id,
'date': self.date,
'airline_id': self.airline_id,
'source_airport_id': self.source_airport_id,
'dest_airport_id': self.dest_airport_id,
'flight_number': self.flight_number,
'departure_time': self.departure_time
}
@app.route('/goshna/api/flights', methods=['POST'])
def create_flight():
if not request.json or not 'airline_id' in request.json or not 'source_airport_id' in request.json or not 'dest_airport_id' in request.json or not 'flight_number' in request.json or not 'departure_time' in request.json or not 'date' in request.json:
abort(400)
date = request.json['date']
airline_id = request.json['airline_id']
source_airport_id = request.json['source_airport_id']
dest_airport_id = request.json['dest_airport_id']
flight_number = request.json['flight_number']
departure_time = request.json['departure_time']
result = ApiFunctions.post_db("INSERT INTO flights VALUES (NULL, ?, ?, ?, ?, ?, ?)", [date, airline_id, source_airport_id, dest_airport_id, flight_number, departure_time]);
inserted_id = c.lastrowid
print u'Inserted new flight at row ' + str(inserted_id)
return jsonify({'id': str(inserted_id)}), 201
@app.route('/goshna/api/flights/find', methods=['POST'])
def find_flights():
airline_id = 0
airport_id = 0
if request.json and 'airline_id' in request.json:
airline_id = request.json['airline_id']
if request.json and 'airport_id' in request.json:
airport_id = request.json['airport_id']
flights = []
if(airline_id == 0):
# Airport and Airline are 'ALL'
if(airport_id == 0):
results = ApiFunctions.query_db("SELECT * FROM flights", [])
# Airline is 'ALL'
else:
results = ApiFunctions.query_db("SELECT * FROM flights where source_airport_id=?", [airport_id])
# Airport is 'ALL'
elif(airport_id == 0):
results = ApiFunctions.query_db("SELECT * FROM flights where airline_id=?", [airline_id])
# Neither is 'ALL'
else:
results = ApiFunctions.query_db("SELECT * FROM flights where airline_id=? and source_airport_id=?", [airline_id, airport_id])
for row in results:
airline = ApiFunctions.query_db("SELECT * FROM airlines WHERE id = ?", [row['airline_id']], one=True)
dest_airport = ApiFunctions.query_db("SELECT * FROM airports WHERE id = ?", [row['dest_airport_id']], one=True)
source_airport = ApiFunctions.query_db("SELECT * FROM airports WHERE id = ?", [row['source_airport_id']], one=True)
flight = DisplayFlight.DisplayFlight(
row['id'],
row['date'],
airline['airline_short'],
source_airport['airport_short'],
dest_airport['airport_short'],
row['flight_number'],
row['departure_time'],
row['airline_id'],
row['dest_airport_id'],
row['source_airport_id']
)
flights.append(flight.to_json())
return jsonify({'flights': flights})
@app.route('/goshna/api/flights', methods=['GET'])
def get_flights():
flights = []
results = ApiFunctions.query_db("SELECT * FROM flights")
for row in results:
flight = Flight(
row['id'],
row['date'],
row['airline_id'],
row['source_airport_id'],
row['dest_airport_id'],
row['flight_number'],
row['departure_time']
)
flights.append(flight.to_json())
return jsonify({'flights': flights})
@app.route('/goshna/api/flights/<int:flight_id>', methods=['GET'])
def get_flight(flight_id):
row = ApiFunctions.query_db("SELECT * FROM flights WHERE id = ?", [flight_id], one=True)
if row is None:
abort(404)
flight = Flight(
row['id'],
row['date'],
row['airline_id'],
row['source_airport_id'],
row['dest_airport_id'],
row['flight_number'],
row['departure_time']
)
return jsonify({'flight': flight.to_json()})
@app.route('/goshna/api/flights/<int:flight_id>', methods=['DELETE'])
def delete_flight(flight_id):
ApiFunctions.post_db("DELETE FROM flights WHERE id=?", [flight_id])
print u'Deleted flight with ID ' + str(inserted_id)
return jsonify({'result': True})
|
from math import floor
from decimal import Decimal
import settings
def format_timedelta(value, time_format="{days} days, {hours2}:{minutes2}:{seconds2}"):
if hasattr(value, 'seconds'):
seconds = value.seconds + value.days * 24 * 3600
else:
seconds = int(value)
seconds_total = seconds
minutes = int(floor(seconds / 60))
minutes_total = minutes
seconds -= minutes * 60
hours = int(floor(minutes / 60))
hours_total = hours
minutes -= hours * 60
days = int(floor(hours / 24))
days_total = days
hours -= days * 24
years = int(floor(days / 365))
years_total = years
days -= years * 365
return time_format.format(**{
'seconds': seconds,
'seconds2': str(seconds).zfill(2),
'minutes': minutes,
'minutes2': str(minutes).zfill(2),
'hours': hours,
'hours2': str(hours).zfill(2),
'days': days,
'years': years,
'seconds_total': seconds_total,
'minutes_total': minutes_total,
'hours_total': hours_total,
'days_total': days_total,
'years_total': years_total,
})
def parseAmount(amountStr):
d = Decimal(amountStr)
if d < settings.SMALLESTAMOUNT:
raise ValueError("Invalid amount. The amount needs to be a positive number at least equal to one cent.")
if d.remainder_near(settings.SMALLESTAMOUNT) != 0:
raise ValueError("Invalid amount. The amount parameter should not contain sub-cent digits.")
return d
Added regex matching
from math import floor
from decimal import Decimal
import settings
import re
r = re.compile(r"^Y?\d+(\.\d{2})?$")
def format_timedelta(value, time_format="{days} days, {hours2}:{minutes2}:{seconds2}"):
if hasattr(value, 'seconds'):
seconds = value.seconds + value.days * 24 * 3600
else:
seconds = int(value)
seconds_total = seconds
minutes = int(floor(seconds / 60))
minutes_total = minutes
seconds -= minutes * 60
hours = int(floor(minutes / 60))
hours_total = hours
minutes -= hours * 60
days = int(floor(hours / 24))
days_total = days
hours -= days * 24
years = int(floor(days / 365))
years_total = years
days -= years * 365
return time_format.format(**{
'seconds': seconds,
'seconds2': str(seconds).zfill(2),
'minutes': minutes,
'minutes2': str(minutes).zfill(2),
'hours': hours,
'hours2': str(hours).zfill(2),
'days': days,
'years': years,
'seconds_total': seconds_total,
'minutes_total': minutes_total,
'hours_total': hours_total,
'days_total': days_total,
'years_total': years_total,
})
def parseAmount(amountStr):
if len(r.findall(amountStr)) != 1:
raise ValueError("Invalid amount. The amount needs to be a positive number at least equal to one cent. You can write it with or without a leading Y, with two or zero decimal places.")
d = Decimal(amountStr)
if d < settings.SMALLESTAMOUNT:
raise ValueError("Invalid amount. The amount needs to be a positive number at least equal to one cent.")
if d.remainder_near(settings.SMALLESTAMOUNT) != 0:
raise ValueError("Invalid amount. The amount parameter should not contain sub-cent digits.")
return d |
#!/usr/bin/python
# Based on the topo.py script from the P4 tutorial
# Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mininet.net import Mininet
from mininet.topo import Topo
from mininet.log import setLogLevel, info
from mininet.cli import CLI
from mininet.link import TCLink
import json
from p4_mininet import P4Switch, P4Host
import argparse
from time import sleep
import os
import signal
import subprocess
import time
_THIS_DIR = os.path.dirname(os.path.realpath(__file__))
_THRIFT_BASE_PORT = 22222
parser = argparse.ArgumentParser(description='Mininet demo')
parser.add_argument('--behavioral-exe', help='Path to behavioral executable',
type=str, action="store", required=True)
parser.add_argument('--spec', help='Path to NetKAT+ spec file',
type=str, action="store", required=True)
parser.add_argument('--cfg', help='Path to NetKAT+ config file',
type=str, action="store", required=True)
parser.add_argument('--cli', help='Path to BM CLI',
type=str, action="store", required=True)
parser.add_argument('--nkp', help='Path to NetKAT+ compiler',
type=str, action="store", required=True)
parser.add_argument('--p4c', help='Path to P4C-to-json compiler',
type=str, action="store", required=True)
args = parser.parse_args()
class MyTopo(Topo):
def __init__(self, sw_path, topology, netname, netdir, **opts):
# Initialize topology and default options
Topo.__init__(self, **opts)
thrift_port = _THRIFT_BASE_PORT
for sw in topology['switches']:
hostname = sw['opts']['hostname']
switch = self.addSwitch(hostname,
sw_path = sw_path,
json_path = os.path.join(netdir, netname) + '.' + hostname + '.' + 'json',
thrift_port = _THRIFT_BASE_PORT + sw['opts']['nodeNum'],
pcap_dump = True,
device_id = sw['opts']['nodeNum'])
for h in topology['hosts']:
host = self.addHost(h['opts']['hostname'])
for link in topology['links']:
self.addLink(link['src'], link['dest'], port1 = link['srcport'], port2 = link['destport'])
def updateConfig(nkp, loadedTopology, oldts):
# send signal to the netkat+ process
nkp.send_signal(signal.SIGHUP)
# read output until magic line appears
for line in nkp.stdout:
print "netkat+: " + line
if line == "Network configuration complete":
break
if nkp.poll() != None:
raise Exception(args.nkp + " terminated with error code " + str(nkp.returncode))
# re-apply switch configuration files whose timestamps are newer than the previous timestamp
for sw in loadedTopology['switches']:
hostname = sw['opts']['hostname']
cmd = [args.cli, "--json", os.path.join(netdir, netname) + '.' + hostname + '.' + 'json',
"--thrift-port", str(_THRIFT_BASE_PORT + sw['opts']['nodeNum'])]
swcfgpath = os.path.join(netdir, netname) + '.' + hostname + '.' + 'txt'
if os.path.getmtime(swcfgpath) > oldts:
with open(swcfgpath, "r") as f:
print " ".join(cmd)
try:
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output = p.communicate("reset_state")
print output
output = subprocess.check_output(cmd, stdin = f)
print output
except subprocess.CalledProcessError as e:
print e
print e.output
sleep(1)
def main():
oldts = time.time()
# Start the NetKAT+ process. Wait for it to generate network topology,
# and leave it running for future network updates
cmd = [args.nkp, args.spec]
print " ".join(cmd)
nkp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in nkp.stdout:
print "netkat+: " + line
if line == "Network generation complete":
break
if nkp.poll() != None:
raise Exception(args.nkp + " terminated with error code " + str(nkp.returncode))
specdir, specfname = os.path.split(args.spec)
netname, specext = os.path.splitext(specfname)
netdir = os.path.join(specdir, specname)
mnfname = os.path.join(netdir, netname+".mn")
print "Loading network topology from " + mnfname
mnfile = open(mnfname, "r")
loadedTopology = json.load(mnfile)
# convert .p4 switches to json
for sw in loadedTopology['switches']:
hostname = sw['opts']['hostname']
cmd = [args.p4c,
os.path.join(netdir, netname) + '.' + hostname + '.' + 'p4',
"--json", os.path.join(netdir, netname) + '.' + hostname + '.' + 'json']
print " ".join(cmd)
try:
output = subprocess.check_output(cmd)
print output
except subprocess.CalledProcessError as e:
print e
# build mininet topology
topo = MyTopo(args.behavioral_exe, loadedTopology, netname, netdir)
net = Mininet(topo = topo,
host = P4Host,
switch = P4Switch,
controller = None )
net.start()
# configure hosts
for n in loadedTopology['hosts']:
hostname = n['opts']['hostname']
h = net.get(hostname)
ip = n['opts']['ip4']
for off in ["rx", "tx", "sg"]:
cmd = "/sbin/ethtool --offload eth0 %s off" % off
print cmd
h.cmd(cmd)
print hostname + ": set IP address " + ip
h.cmd("ifconfig eth0 " + ip)
print "disable ipv6"
h.cmd("sysctl -w net.ipv6.conf.all.disable_ipv6=1")
h.cmd("sysctl -w net.ipv6.conf.default.disable_ipv6=1")
h.cmd("sysctl -w net.ipv6.conf.lo.disable_ipv6=1")
h.cmd("sysctl -w net.ipv4.tcp_congestion_control=reno")
h.cmd("iptables -I OUTPUT -p icmp --icmp-type destination-unreachable -j DROP")
sleep(1)
while True:
newts = time.time()
updateConfig(nkp, loadedTopology, oldts)
oldts = newts
CLI( net )
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
main()
correctly read netkat+ process output in run_network.py
#!/usr/bin/python
# Based on the topo.py script from the P4 tutorial
# Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mininet.net import Mininet
from mininet.topo import Topo
from mininet.log import setLogLevel, info
from mininet.cli import CLI
from mininet.link import TCLink
import json
from p4_mininet import P4Switch, P4Host
import argparse
from time import sleep
import os
import signal
import subprocess
import time
import sys
_THIS_DIR = os.path.dirname(os.path.realpath(__file__))
_THRIFT_BASE_PORT = 22222
parser = argparse.ArgumentParser(description='Mininet demo')
parser.add_argument('--behavioral-exe', help='Path to behavioral executable',
type=str, action="store", required=True)
parser.add_argument('--spec', help='Path to NetKAT+ spec file',
type=str, action="store", required=True)
parser.add_argument('--cfg', help='Path to NetKAT+ config file',
type=str, action="store", required=True)
parser.add_argument('--cli', help='Path to BM CLI',
type=str, action="store", required=True)
parser.add_argument('--nkp', help='Path to NetKAT+ compiler',
type=str, action="store", required=True)
parser.add_argument('--p4c', help='Path to P4C-to-json compiler',
type=str, action="store", required=True)
args = parser.parse_args()
class MyTopo(Topo):
def __init__(self, sw_path, topology, netname, netdir, **opts):
# Initialize topology and default options
Topo.__init__(self, **opts)
thrift_port = _THRIFT_BASE_PORT
for sw in topology['switches']:
hostname = sw['opts']['hostname']
switch = self.addSwitch(hostname,
sw_path = sw_path,
json_path = os.path.join(netdir, netname) + '.' + hostname + '.' + 'json',
thrift_port = _THRIFT_BASE_PORT + sw['opts']['nodeNum'],
pcap_dump = True,
device_id = sw['opts']['nodeNum'])
for h in topology['hosts']:
host = self.addHost(h['opts']['hostname'])
for link in topology['links']:
self.addLink(link['src'], link['dest'], port1 = link['srcport'], port2 = link['destport'])
def updateConfig(nkp, loadedTopology):
# send signal to the netkat+ process
nkp.send_signal(signal.SIGHUP)
# read output until magic line appears
while True:
line = nkp.stdout.readline()
sys.stdout.write("netkat+: " + line)
if line == "Network configuration complete\n":
break
if nkp.poll() != None:
raise Exception(args.nkp + " terminated with error code " + str(nkp.returncode))
def applyConfig(loadedTopology, netdir, netname, oldts):
# re-apply switch configuration files whose timestamps are newer than the previous timestamp
for sw in loadedTopology['switches']:
hostname = sw['opts']['hostname']
cmd = [args.cli, "--json", os.path.join(netdir, netname) + '.' + hostname + '.' + 'json',
"--thrift-port", str(_THRIFT_BASE_PORT + sw['opts']['nodeNum'])]
swcfgpath = os.path.join(netdir, netname) + '.' + hostname + '.' + 'txt'
if os.path.getmtime(swcfgpath) > oldts:
with open(swcfgpath, "r") as f:
print " ".join(cmd)
try:
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output = p.communicate("reset_state")
print output
output = subprocess.check_output(cmd, stdin = f)
print output
except subprocess.CalledProcessError as e:
print e
print e.output
sleep(1)
def main():
oldts = time.time()
# Start the NetKAT+ process. Wait for it to generate network topology,
# and leave it running for future network updates
cmd = [args.nkp, args.spec, args.cfg]
print " ".join(cmd)
nkp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
while True:
line = nkp.stdout.readline() # This blocks until it receives a newline.
sys.stdout.write("netkat+: " + line)
if line == "Network generation complete\n":
break
if nkp.poll() != None:
raise Exception(args.nkp + " terminated with error code " + str(nkp.returncode))
specdir, specfname = os.path.split(args.spec)
netname, specext = os.path.splitext(specfname)
netdir = os.path.join(specdir, netname)
mnfname = os.path.join(netdir, netname+".mn")
print "Loading network topology from " + mnfname
mnfile = open(mnfname, "r")
loadedTopology = json.load(mnfile)
# convert .p4 switches to json
for sw in loadedTopology['switches']:
hostname = sw['opts']['hostname']
cmd = [args.p4c,
os.path.join(netdir, netname) + '.' + hostname + '.' + 'p4',
"--json", os.path.join(netdir, netname) + '.' + hostname + '.' + 'json']
print " ".join(cmd)
try:
output = subprocess.check_output(cmd)
print output
except subprocess.CalledProcessError as e:
print e
# build mininet topology
topo = MyTopo(args.behavioral_exe, loadedTopology, netname, netdir)
net = Mininet(topo = topo,
host = P4Host,
switch = P4Switch,
controller = None )
net.start()
# configure hosts
for n in loadedTopology['hosts']:
hostname = n['opts']['hostname']
h = net.get(hostname)
ip = n['opts']['ip4']
for off in ["rx", "tx", "sg"]:
cmd = "/sbin/ethtool --offload eth0 %s off" % off
print cmd
h.cmd(cmd)
print hostname + ": set IP address " + ip
h.cmd("ifconfig eth0 " + ip)
print "disable ipv6"
h.cmd("sysctl -w net.ipv6.conf.all.disable_ipv6=1")
h.cmd("sysctl -w net.ipv6.conf.default.disable_ipv6=1")
h.cmd("sysctl -w net.ipv6.conf.lo.disable_ipv6=1")
h.cmd("sysctl -w net.ipv4.tcp_congestion_control=reno")
h.cmd("iptables -I OUTPUT -p icmp --icmp-type destination-unreachable -j DROP")
sleep(1)
newts = oldts
applyConfig(loadedTopology, netdir, netname, oldts)
oldts = newts
# while True:
# updateConfig(nkp, loadedTopology)
# newts = time.time()
# applyConfig(loadedTopology, netdir, netname, oldts)
# oldts = newts
CLI( net )
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
main()
|
#
# This file is part of Bakefile (http://www.bakefile.org)
#
# Copyright (C) 2012 Vaclav Slavik
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
"""
Toolsets for Visual Studio 2003, 2005 and 2008.
"""
from bkl.plugins.vsbase import *
from bkl.expr import concat
# Misc constants for obscure numbers in the output format:
typeApplication = 1
typeDynamicLibrary = 2
typeStaticLibrary = 4
linkIncrementalDefault = 0
linkIncrementalNo = 1
linkIncrementalYes = 2
optReferencesDefault = 0
optNoReferences = 1
optReferences = 2
optFoldingDefault = 0
optNoFolding = 1
optFolding = 2
optimizeDisabled = 0
optimizeMinSpace = 1
optimizeMaxSpeed = 2
optimizeFull = 3
optimizeCustom = 4
machineX86 = 1
machineARM = 3
machineAMD64 = 17
subSystemNotSet = 0
subSystemConsole = 1
subSystemWindows = 2
pchNone = 0
pchCreateUsingSpecific = 1
pchGenerateAuto = 2
pchUseUsingSpecificic = 3
rtMultiThreaded = 0
rtMultiThreadedDebug = 1
rtMultiThreadedDLL = 2
rtMultiThreadedDebugDLL = 3
rtSingleThreaded = 4
rtSingleThreadedDebug = 5
debugDisabled = 0
debugOldStyleInfo = 1
debugLineInfoOnly = 2
debugEnabled = 3
debugEditAndContinue = 4
VCPROJ_CHARSET = "Windows-1252"
class VS200xExprFormatter(VSExprFormatter):
def literal(self, e):
if '"' in e.value:
return e.value.replace('"', '\\"')
else:
return e.value
class VS200xXmlFormatter(XmlFormatter):
"""
XmlFormatter for VS 200x output.
"""
indent_step = "\t"
ExprFormatter = VS200xExprFormatter
def __init__(self, paths_info):
super(VS200xXmlFormatter, self).__init__(paths_info, charset=VCPROJ_CHARSET)
# these are always written as <foo>\n<foo>, not <foo/>
elems_not_collapsed = set(["References",
"Globals",
"File",
"Filter",
"ToolFiles"])
def format_node(self, name, attrs, text, children_markup, indent):
"""
Formats given Node instance, indented with *indent* text.
Content is either *text* or *children_markup*; the other is None.
"""
s = "%s<%s" % (indent, name)
if attrs:
for key, value in attrs:
s += "\n%s\t%s=%s" % (indent, key, value)
if text:
s += ">%s</%s>\n" % (text, name)
elif children_markup:
if attrs:
s += "\n%s\t" % indent
s += ">\n%s%s</%s>\n" % (children_markup, indent, name)
else:
if name in self.elems_not_collapsed:
if attrs:
s += "\n%s\t" % indent
s += ">\n%s</%s>\n" % (indent, name)
else:
s += "\n%s/>\n" % indent
return s
# TODO: Put more content into these classes, use them properly
class VS200xProject(VSProjectBase):
def __init__(self, name, guid, projectfile, deps):
self.name = name
self.guid = guid
self.projectfile = projectfile
self.dependencies = deps
class VS2003Project(VS200xProject):
version = 7.1
class VS2005Project(VS200xProject):
version = 8
class VS2008Project(VS200xProject):
version = 9
class VS2003Solution(VSSolutionBase):
format_version = "8.00"
human_version = None
class VS2005Solution(VSSolutionBase):
format_version = "9.00"
human_version = "2005"
class VS2008Solution(VSSolutionBase):
format_version = "10.00"
human_version = "2008"
class VS200xToolsetBase(VSToolsetBase):
"""Base class for VS200{358} toolsets."""
#: Extension of format files
proj_extension = "vcproj"
# TODO: temporary hardcoded configs
configs = ["Debug", "Release"]
def gen_for_target(self, target):
projectfile = target["%s.projectfile" % self.name]
filename = projectfile.as_native_path_for_output(target)
paths_info = bkl.expr.PathAnchorsInfo(
dirsep="\\",
outfile=filename,
builddir=self.get_builddir_for(target).as_native_path_for_output(target),
model=target)
guid = target["%s.guid" % self.name]
root = Node("VisualStudioProject")
root["ProjectType"] = "Visual C++"
root["Version"] = "%.2f" % self.version
root["Name"] = target.name
root["ProjectGUID"] = guid
root["RootNamespace"] = target.name
root["Keyword"] = "Win32Proj"
root["TargetFrameworkVersion"] = 196613
self._add_extra_options_to_node(target, root)
n_platforms = Node("Platforms")
n_platforms.add("Platform", Name="Win32")
root.add(n_platforms)
root.add(Node("ToolFiles"))
n_configs = Node("Configurations")
root.add(n_configs)
for c in self.configs:
n = Node("Configuration", Name="%s|Win32" % c)
n_configs.add(n)
# TODO: handle the defaults in a nicer way
if target["outputdir"].as_native_path(paths_info) != paths_info.builddir_abs:
n["OutputDirectory"] = target["outputdir"]
else:
n["OutputDirectory"] = "$(SolutionDir)$(ConfigurationName)"
n["IntermediateDirectory"] = "$(ConfigurationName)"
if is_exe(target):
n["ConfigurationType"] = typeApplication
elif is_library(target):
n["ConfigurationType"] = typeStaticLibrary
elif is_dll(target):
n["ConfigurationType"] = typeDynamicLibrary
else:
return None
if target["win32-unicode"]:
n["CharacterSet"] = 1
self._add_extra_options_to_node(target, n)
for tool in self.tool_functions:
if hasattr(self, tool):
f_tool = getattr(self, tool)
n_tool = f_tool(target, c)
else:
n_tool = Node("Tool", Name=tool)
if n_tool:
self._add_extra_options_to_node(target, n_tool)
n.add(n_tool)
root.add(Node("References"))
root.add(self.build_files_list(target))
root.add(Node("Globals"))
f = OutputFile(filename, EOL_WINDOWS, charset=VCPROJ_CHARSET,
creator=self, create_for=target)
f.write(VS200xXmlFormatter(paths_info).format(root))
f.commit()
target_deps = target["deps"].as_py()
return self.Project(target.name, guid, projectfile, target_deps)
def VCPreBuildEventTool(self, target, cfg):
n = Node("Tool", Name="VCPreBuildEventTool")
n["CommandLine"] = VSList("\r\n", target["pre-build-commands"])
return n
def VCPostBuildEventTool(self, target, cfg):
n = Node("Tool", Name="VCPostBuildEventTool")
n["CommandLine"] = VSList("\r\n", target["post-build-commands"])
return n
def VCAppVerifierTool(self, target, cfg):
return Node("Tool", Name="VCAppVerifierTool") if is_exe(target) else None
def VCCLCompilerTool(self, target, cfg):
n = Node("Tool", Name="VCCLCompilerTool")
# Currently we don't make any distinction between preprocessor, C
# and C++ flags as they're basically all the same at MSVS level
# too and all go into the same place in the IDE and same
# AdditionalOptions node in the project file.
all_cflags = VSList(" ", target["compiler-options"],
target["c-compiler-options"],
target["cxx-compiler-options"])
all_cflags.append("/MP") # parallel compilation
n["AdditionalOptions"] = all_cflags
n["Optimization"] = optimizeMaxSpeed if cfg == "Release" else optimizeDisabled
if cfg == "Release":
n["EnableIntrinsicFunctions"] = True
n["AdditionalIncludeDirectories"] = target["includedirs"]
n["PreprocessorDefinitions"] = list(target["defines"]) + self.get_std_defines(target, cfg)
if target["win32-crt-linkage"] == "dll":
n["RuntimeLibrary"] = rtMultiThreadedDebugDLL if cfg == "Debug" else rtMultiThreadedDLL
else:
n["RuntimeLibrary"] = rtMultiThreadedDebug if cfg == "Debug" else rtMultiThreaded
if cfg == "Release":
n["EnableFunctionLevelLinking"] = True
n["UsePrecompiledHeader"] = pchNone
n["WarningLevel"] = 3
n["DebugInformationFormat"] = debugEditAndContinue if cfg == "Debug" else debugEnabled
return n
def VCLinkerTool(self, target, cfg):
if is_library(target):
return None
n = Node("Tool", Name="VCLinkerTool")
n["AdditionalOptions"] = VSList(" ", target["link-options"])
libs = target["libs"]
if libs:
n["AdditionalDependencies"] = VSList(" ", ("%s.lib" % x.as_py() for x in libs))
targetname = target[target.type.basename_prop]
if targetname != target.name:
n["OutputFile"] = concat("$(OutDir)\\", targetname, ".", target.type.target_file(self, target).get_extension())
if cfg == "Debug":
n["LinkIncremental"] = linkIncrementalYes
else:
n["LinkIncremental"] = linkIncrementalNo
# VS: creates debug info for release too; TODO: make this configurable
n["GenerateDebugInformation"] = True
if is_exe(target) and target["win32-subsystem"] == "console":
n["SubSystem"] = subSystemConsole
else:
n["SubSystem"] = subSystemWindows
if cfg == "Release":
n["OptimizeReferences"] = optReferences
n["EnableCOMDATFolding"] = optFolding
n["TargetMachine"] = machineX86
return n
def VCLibrarianTool(self, target, cfg):
if not is_library(target):
return None
n = Node("Tool", Name="VCLibrarianTool")
targetname = target[target.type.basename_prop]
if targetname != target.name:
n["OutputFile"] = concat("$(OutDir)\\", targetname, ".", target.type.target_file(self, target).get_extension())
return n
#: List of functions to call to generate <Configuration> children. Note
#: that the order is slightly different for different VC versions and not
#: all nodes are present in all versions.
tool_functions = [
"VCPreBuildEventTool",
"VCCustomBuildTool",
"VCXMLDataGeneratorTool",
"VCWebServiceProxyGeneratorTool",
"VCMIDLTool",
"VCCLCompilerTool",
"VCManagedResourceCompilerTool",
"VCResourceCompilerTool",
"VCPreLinkEventTool",
"VCLibrarianTool",
"VCLinkerTool",
"VCALinkTool",
"VCManifestTool",
"VCXDCMakeTool",
"VCBscMakeTool",
"VCFxCopTool",
"VCAppVerifierTool",
"VCPostBuildEventTool",
]
def build_files_list(self, target):
files = Node("Files")
# TODO: use groups definition, filter into groups, add Resource Files
sources = Node("Filter", Name="Source Files")
sources["Filter"] = "cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx"
sources["UniqueIdentifier"] = "{4FC737F1-C7A5-4376-A066-2A32D752A2FF}"
for sfile in target.sources:
ext = sfile.filename.get_extension()
# TODO: share this code with VS2010
# FIXME: make this more solid
if ext in ['cpp', 'cxx', 'cc', 'c']:
sources.add("File", RelativePath=sfile.filename)
else:
# FIXME: handle both compilation into cpp and c files
genfiletype = bkl.compilers.CxxFileType.get()
genname = bkl.expr.PathExpr([bkl.expr.LiteralExpr(sfile.filename.get_basename())],
bkl.expr.ANCHOR_BUILDDIR,
pos=sfile.filename.pos).change_extension("cpp")
ft_from = bkl.compilers.get_file_type(ext)
compiler = bkl.compilers.get_compiler(self, ft_from, genfiletype)
n_file = Node("File", RelativePath=sfile.filename)
sources.add(n_file)
for cfg in self.configs:
n_cfg = Node("FileConfiguration", Name=cfg)
tool = Node("Tool", Name="VCCustomBuildTool")
tool["CommandLine"] = compiler.commands(self, target, sfile.filename, genname)
tool["Outputs"] = genname
n_cfg.add(tool)
n_file.add(n_cfg)
sources.add("File", RelativePath=genname)
files.add(sources)
headers = Node("Filter", Name="Header Files")
headers["Filter"] = "h;hpp;hxx;hm;inl;inc;xsd"
headers["UniqueIdentifier"] = "{93995380-89BD-4b04-88EB-625FBE52EBFB}"
for sfile in target.headers:
headers.add("File", RelativePath=sfile.filename)
files.add(headers)
resources = Node("Filter", Name="Resource Files")
resources["Filter"] = "rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav"
resources["UniqueIdentifier"] = "{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}"
files.add(resources)
return files
def _add_extra_options_to_node(self, target, node):
"""Add extra native options specified in vs200x.option.* properties."""
if node.name == "VisualStudioProject":
scope = ""
elif node.name == "Configuration":
scope = "Configuration"
else:
scope = node["Name"]
for key, value in self.collect_extra_options_for_node(target, scope):
node[key] = value
def get_builddir_for(self, target):
prj = target["%s.projectfile" % self.name]
# TODO: reference Configuration setting properly, as bkl setting, move this to vsbase
return bkl.expr.PathExpr(prj.components[:-1] + [bkl.expr.LiteralExpr("$(ConfigurationName)")], prj.anchor)
class VS2008Toolset(VS200xToolsetBase):
"""
Visual Studio 2008.
Special properties
------------------
In addition to the properties described below, it's possible to specify any
of the ``vcproj`` properties directly in a bakefile. To do so, you have to
set specially named variables on the target.
The variables are prefixed with ``vs2008.option.``, followed by tool name and
attribute name. For example:
- ``vs2008.option.VCCLCompilerTool.EnableFunctionLevelLinking``
- ``vs2008.option.VCLinkerTool.EnableCOMDATFolding``
Additionally, the following are support for non-tool nodes:
The following nodes are supported:
- ``vs2008.option.*`` (attributes of the root ``VisualStudioProject`` node)
- ``vs2008.option.Configuration.*`` (``Configuration`` node attributes)
Examples:
.. code-block:: bkl
vs2008.option.VCCLCompilerTool.EnableFunctionLevelLinking = false;
"""
name = "vs2008"
version = 9
proj_versions = [9]
Solution = VS2008Solution
Project = VS2008Project
Add Visual Studio 2005 toolset.
#
# This file is part of Bakefile (http://www.bakefile.org)
#
# Copyright (C) 2012 Vaclav Slavik
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
"""
Toolsets for Visual Studio 2003, 2005 and 2008.
"""
from bkl.plugins.vsbase import *
from bkl.expr import concat
# Misc constants for obscure numbers in the output format:
typeApplication = 1
typeDynamicLibrary = 2
typeStaticLibrary = 4
linkIncrementalDefault = 0
linkIncrementalNo = 1
linkIncrementalYes = 2
optReferencesDefault = 0
optNoReferences = 1
optReferences = 2
optFoldingDefault = 0
optNoFolding = 1
optFolding = 2
optimizeDisabled = 0
optimizeMinSpace = 1
optimizeMaxSpeed = 2
optimizeFull = 3
optimizeCustom = 4
machineX86 = 1
machineARM = 3
machineAMD64 = 17
subSystemNotSet = 0
subSystemConsole = 1
subSystemWindows = 2
pchNone = 0
pchCreateUsingSpecific = 1
pchGenerateAuto = 2
pchUseUsingSpecificic = 3
rtMultiThreaded = 0
rtMultiThreadedDebug = 1
rtMultiThreadedDLL = 2
rtMultiThreadedDebugDLL = 3
rtSingleThreaded = 4
rtSingleThreadedDebug = 5
debugDisabled = 0
debugOldStyleInfo = 1
debugLineInfoOnly = 2
debugEnabled = 3
debugEditAndContinue = 4
VCPROJ_CHARSET = "Windows-1252"
class VS200xExprFormatter(VSExprFormatter):
def literal(self, e):
if '"' in e.value:
return e.value.replace('"', '\\"')
else:
return e.value
class VS200xXmlFormatter(XmlFormatter):
"""
XmlFormatter for VS 200x output.
"""
indent_step = "\t"
ExprFormatter = VS200xExprFormatter
def __init__(self, paths_info):
super(VS200xXmlFormatter, self).__init__(paths_info, charset=VCPROJ_CHARSET)
# these are always written as <foo>\n<foo>, not <foo/>
elems_not_collapsed = set(["References",
"Globals",
"File",
"Filter",
"ToolFiles"])
def format_node(self, name, attrs, text, children_markup, indent):
"""
Formats given Node instance, indented with *indent* text.
Content is either *text* or *children_markup*; the other is None.
"""
s = "%s<%s" % (indent, name)
if attrs:
for key, value in attrs:
s += "\n%s\t%s=%s" % (indent, key, value)
if text:
s += ">%s</%s>\n" % (text, name)
elif children_markup:
if attrs:
s += "\n%s\t" % indent
s += ">\n%s%s</%s>\n" % (children_markup, indent, name)
else:
if name in self.elems_not_collapsed:
if attrs:
s += "\n%s\t" % indent
s += ">\n%s</%s>\n" % (indent, name)
else:
s += "\n%s/>\n" % indent
return s
# TODO: Put more content into these classes, use them properly
class VS200xProject(VSProjectBase):
def __init__(self, name, guid, projectfile, deps):
self.name = name
self.guid = guid
self.projectfile = projectfile
self.dependencies = deps
class VS2003Project(VS200xProject):
version = 7.1
class VS2005Project(VS200xProject):
version = 8
class VS2008Project(VS200xProject):
version = 9
class VS2003Solution(VSSolutionBase):
format_version = "8.00"
human_version = None
class VS2005Solution(VSSolutionBase):
format_version = "9.00"
human_version = "2005"
class VS2008Solution(VSSolutionBase):
format_version = "10.00"
human_version = "2008"
class VS200xToolsetBase(VSToolsetBase):
"""Base class for VS200{358} toolsets."""
#: Extension of format files
proj_extension = "vcproj"
# TODO: temporary hardcoded configs
configs = ["Debug", "Release"]
#: Whether /MP switch is supported
has_parallel_compilation = False
#: Whether Detect64BitPortabilityProblems is supported
detect_64bit_problems = True
def gen_for_target(self, target):
projectfile = target["%s.projectfile" % self.name]
filename = projectfile.as_native_path_for_output(target)
paths_info = bkl.expr.PathAnchorsInfo(
dirsep="\\",
outfile=filename,
builddir=self.get_builddir_for(target).as_native_path_for_output(target),
model=target)
guid = target["%s.guid" % self.name]
root = Node("VisualStudioProject")
root["ProjectType"] = "Visual C++"
root["Version"] = "%.2f" % self.version
root["Name"] = target.name
root["ProjectGUID"] = guid
root["RootNamespace"] = target.name
root["Keyword"] = "Win32Proj"
self._add_extra_options_to_node(target, root)
n_platforms = Node("Platforms")
n_platforms.add("Platform", Name="Win32")
root.add(n_platforms)
root.add(Node("ToolFiles"))
n_configs = Node("Configurations")
root.add(n_configs)
for c in self.configs:
n = Node("Configuration", Name="%s|Win32" % c)
n_configs.add(n)
# TODO: handle the defaults in a nicer way
if target["outputdir"].as_native_path(paths_info) != paths_info.builddir_abs:
n["OutputDirectory"] = target["outputdir"]
else:
n["OutputDirectory"] = "$(SolutionDir)$(ConfigurationName)"
n["IntermediateDirectory"] = "$(ConfigurationName)"
if is_exe(target):
n["ConfigurationType"] = typeApplication
elif is_library(target):
n["ConfigurationType"] = typeStaticLibrary
elif is_dll(target):
n["ConfigurationType"] = typeDynamicLibrary
else:
return None
if target["win32-unicode"]:
n["CharacterSet"] = 1
self._add_extra_options_to_node(target, n)
for tool in self.tool_functions:
if hasattr(self, tool):
f_tool = getattr(self, tool)
n_tool = f_tool(target, c)
else:
n_tool = Node("Tool", Name=tool)
if n_tool:
self._add_extra_options_to_node(target, n_tool)
n.add(n_tool)
root.add(Node("References"))
root.add(self.build_files_list(target))
root.add(Node("Globals"))
f = OutputFile(filename, EOL_WINDOWS, charset=VCPROJ_CHARSET,
creator=self, create_for=target)
f.write(VS200xXmlFormatter(paths_info).format(root))
f.commit()
target_deps = target["deps"].as_py()
return self.Project(target.name, guid, projectfile, target_deps)
def VCPreBuildEventTool(self, target, cfg):
n = Node("Tool", Name="VCPreBuildEventTool")
n["CommandLine"] = VSList("\r\n", target["pre-build-commands"])
return n
def VCPostBuildEventTool(self, target, cfg):
n = Node("Tool", Name="VCPostBuildEventTool")
n["CommandLine"] = VSList("\r\n", target["post-build-commands"])
return n
def VCAppVerifierTool(self, target, cfg):
return Node("Tool", Name="VCAppVerifierTool") if is_exe(target) else None
def VCWebDeploymentTool(self, target, cfg):
return Node("Tool", Name="VCWebDeploymentTool") if is_exe(target) else None
def VCCLCompilerTool(self, target, cfg):
n = Node("Tool", Name="VCCLCompilerTool")
# Currently we don't make any distinction between preprocessor, C
# and C++ flags as they're basically all the same at MSVS level
# too and all go into the same place in the IDE and same
# AdditionalOptions node in the project file.
all_cflags = VSList(" ", target["compiler-options"],
target["c-compiler-options"],
target["cxx-compiler-options"])
if self.has_parallel_compilation:
all_cflags.append("/MP") # parallel compilation
n["AdditionalOptions"] = all_cflags
n["Optimization"] = optimizeMaxSpeed if cfg == "Release" else optimizeDisabled
if cfg == "Release":
n["EnableIntrinsicFunctions"] = True
n["AdditionalIncludeDirectories"] = target["includedirs"]
n["PreprocessorDefinitions"] = list(target["defines"]) + self.get_std_defines(target, cfg)
if not self.has_parallel_compilation and cfg == "Debug":
n["MinimalRebuild"] = True
if target["win32-crt-linkage"] == "dll":
n["RuntimeLibrary"] = rtMultiThreadedDebugDLL if cfg == "Debug" else rtMultiThreadedDLL
else:
n["RuntimeLibrary"] = rtMultiThreadedDebug if cfg == "Debug" else rtMultiThreaded
if cfg == "Release":
n["EnableFunctionLevelLinking"] = True
n["UsePrecompiledHeader"] = pchNone
n["WarningLevel"] = 3
if self.detect_64bit_problems:
n["Detect64BitPortabilityProblems"] = True
n["DebugInformationFormat"] = debugEditAndContinue if cfg == "Debug" else debugEnabled
return n
def VCLinkerTool(self, target, cfg):
if is_library(target):
return None
n = Node("Tool", Name="VCLinkerTool")
n["AdditionalOptions"] = VSList(" ", target["link-options"])
libs = target["libs"]
if libs:
n["AdditionalDependencies"] = VSList(" ", ("%s.lib" % x.as_py() for x in libs))
targetname = target[target.type.basename_prop]
if targetname != target.name:
n["OutputFile"] = concat("$(OutDir)\\", targetname, ".", target.type.target_file(self, target).get_extension())
if cfg == "Debug":
n["LinkIncremental"] = linkIncrementalYes
else:
n["LinkIncremental"] = linkIncrementalNo
# VS: creates debug info for release too; TODO: make this configurable
n["GenerateDebugInformation"] = True
if is_exe(target) and target["win32-subsystem"] == "console":
n["SubSystem"] = subSystemConsole
else:
n["SubSystem"] = subSystemWindows
if cfg == "Release":
n["OptimizeReferences"] = optReferences
n["EnableCOMDATFolding"] = optFolding
n["TargetMachine"] = machineX86
return n
def VCLibrarianTool(self, target, cfg):
if not is_library(target):
return None
n = Node("Tool", Name="VCLibrarianTool")
targetname = target[target.type.basename_prop]
if targetname != target.name:
n["OutputFile"] = concat("$(OutDir)\\", targetname, ".", target.type.target_file(self, target).get_extension())
return n
#: List of functions to call to generate <Configuration> children. Note
#: that the order is slightly different for different VC versions and not
#: all nodes are present in all versions.
tool_functions = [
"VCPreBuildEventTool",
"VCCustomBuildTool",
"VCXMLDataGeneratorTool",
"VCWebServiceProxyGeneratorTool",
"VCMIDLTool",
"VCCLCompilerTool",
"VCManagedResourceCompilerTool",
"VCResourceCompilerTool",
"VCPreLinkEventTool",
"VCLibrarianTool",
"VCLinkerTool",
"VCALinkTool",
"VCManifestTool",
"VCXDCMakeTool",
"VCBscMakeTool",
"VCFxCopTool",
"VCAppVerifierTool",
"VCWebDeploymentTool",
"VCPostBuildEventTool",
]
def build_files_list(self, target):
files = Node("Files")
# TODO: use groups definition, filter into groups, add Resource Files
sources = Node("Filter", Name="Source Files")
sources["Filter"] = "cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx"
sources["UniqueIdentifier"] = "{4FC737F1-C7A5-4376-A066-2A32D752A2FF}"
for sfile in target.sources:
ext = sfile.filename.get_extension()
# TODO: share this code with VS2010
# FIXME: make this more solid
if ext in ['cpp', 'cxx', 'cc', 'c']:
sources.add("File", RelativePath=sfile.filename)
else:
# FIXME: handle both compilation into cpp and c files
genfiletype = bkl.compilers.CxxFileType.get()
genname = bkl.expr.PathExpr([bkl.expr.LiteralExpr(sfile.filename.get_basename())],
bkl.expr.ANCHOR_BUILDDIR,
pos=sfile.filename.pos).change_extension("cpp")
ft_from = bkl.compilers.get_file_type(ext)
compiler = bkl.compilers.get_compiler(self, ft_from, genfiletype)
n_file = Node("File", RelativePath=sfile.filename)
sources.add(n_file)
for cfg in self.configs:
n_cfg = Node("FileConfiguration", Name=cfg)
tool = Node("Tool", Name="VCCustomBuildTool")
tool["CommandLine"] = compiler.commands(self, target, sfile.filename, genname)
tool["Outputs"] = genname
n_cfg.add(tool)
n_file.add(n_cfg)
sources.add("File", RelativePath=genname)
files.add(sources)
headers = Node("Filter", Name="Header Files")
headers["Filter"] = "h;hpp;hxx;hm;inl;inc;xsd"
headers["UniqueIdentifier"] = "{93995380-89BD-4b04-88EB-625FBE52EBFB}"
for sfile in target.headers:
headers.add("File", RelativePath=sfile.filename)
files.add(headers)
resources = Node("Filter", Name="Resource Files")
resources["Filter"] = "rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav"
resources["UniqueIdentifier"] = "{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}"
files.add(resources)
return files
def _add_extra_options_to_node(self, target, node):
"""Add extra native options specified in vs200x.option.* properties."""
if node.name == "VisualStudioProject":
scope = ""
elif node.name == "Configuration":
scope = "Configuration"
else:
scope = node["Name"]
for key, value in self.collect_extra_options_for_node(target, scope):
node[key] = value
def get_builddir_for(self, target):
prj = target["%s.projectfile" % self.name]
# TODO: reference Configuration setting properly, as bkl setting, move this to vsbase
return bkl.expr.PathExpr(prj.components[:-1] + [bkl.expr.LiteralExpr("$(ConfigurationName)")], prj.anchor)
class VS2008Toolset(VS200xToolsetBase):
"""
Visual Studio 2008.
Special properties
------------------
In addition to the properties described below, it's possible to specify any
of the ``vcproj`` properties directly in a bakefile. To do so, you have to
set specially named variables on the target.
The variables are prefixed with ``vs2008.option.``, followed by tool name and
attribute name. For example:
- ``vs2008.option.VCCLCompilerTool.EnableFunctionLevelLinking``
- ``vs2008.option.VCLinkerTool.EnableCOMDATFolding``
Additionally, the following are support for non-tool nodes:
The following nodes are supported:
- ``vs2008.option.*`` (attributes of the root ``VisualStudioProject`` node)
- ``vs2008.option.Configuration.*`` (``Configuration`` node attributes)
Examples:
.. code-block:: bkl
vs2008.option.VCCLCompilerTool.EnableFunctionLevelLinking = false;
"""
name = "vs2008"
version = 9
proj_versions = [9]
Solution = VS2008Solution
Project = VS2008Project
has_parallel_compilation = True
detect_64bit_problems = False
def VCWebDeploymentTool(self, target, cfg):
# This tool was removed in 2008
return None
class VS2005Toolset(VS200xToolsetBase):
"""
Visual Studio 2005.
Special properties
------------------
This toolset supports the same special properties that
:ref:`ref_toolset_vs2008`. The only difference is that they are prefixed
with ``vs2005.option.`` instead of ``vs2008.option.``.
"""
name = "vs2005"
version = 8
proj_versions = [8]
Solution = VS2005Solution
Project = VS2005Project
has_parallel_compilation = False
detect_64bit_problems = True
|
# -*- coding: utf-8 -*-
from flask import Blueprint, current_app, flash, jsonify, redirect, render_template, request, url_for
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import joinedload
from doorman.forms import (
AddDistributedQueryForm,
CreateQueryForm,
UpdateQueryForm,
CreateTagForm,
UploadPackForm,
FilePathForm,
)
from doorman.database import db
from doorman.models import DistributedQuery, FilePath, Node, Pack, Query, Tag
from doorman.utils import create_query_pack_from_upload, flash_errors
blueprint = Blueprint('manage', __name__,
template_folder='./templates/manage',
url_prefix='/manage')
@blueprint.context_processor
def inject_models():
return dict(Node=Node, Pack=Pack, Query=Query, Tag=Tag,
DistributedQuery=DistributedQuery)
@blueprint.route('/')
def index():
return render_template('index.html')
@blueprint.route('/nodes')
def nodes():
nodes = Node.query.all()
return render_template('nodes.html', nodes=nodes)
@blueprint.route('/nodes/add', methods=['GET', 'POST'])
def add_node():
return redirect(url_for('.nodes'))
@blueprint.route('/nodes/tagged/<string:tags>')
def nodes_by_tag(tags):
if tags == 'null':
nodes = Node.query.filter(Node.tags == None).all()
else:
tag_names = [t.strip() for t in tags.split(',')]
nodes = Node.query.filter(Node.tags.any(Tag.value.in_(tag_names))).all()
return render_template('nodes.html', nodes=nodes)
@blueprint.route('/node/<int:node_id>')
def get_node(node_id):
node = Node.query.filter(Node.id == node_id).one()
return render_template('node.html', node=node)
@blueprint.route('/node/<int:node_id>/activity')
def node_activity(node_id):
node = Node.query.filter(Node.id == node_id).one()
return render_template('activity.html', node=node)
@blueprint.route('/node/<int:node_id>/tags', methods=['GET', 'POST'])
def tag_node(node_id):
node = Node.query.filter(Node.id == node_id).one()
if request.is_xhr and request.method == 'POST':
node.tags = create_tags(*request.get_json())
node.save()
return jsonify({}), 202
return redirect(url_for('.get_node', node_id=node.id))
@blueprint.route('/node/<int:node_id>/distributed/result/<string:guid>')
def get_distributed_result(node_id, guid):
node = Node.query.filter(Node.id == node_id).one()
query = DistributedQuery.query.filter(
DistributedQuery.guid == guid,
DistributedQuery.node == node,
).one()
return render_template('distributed.result.html', node=node, query=query)
@blueprint.route('/packs')
def packs():
packs = Pack.query.options(joinedload(Pack.queries).joinedload(Query.packs)).all()
return render_template('packs.html', packs=packs)
@blueprint.route('/packs/add', methods=['GET', 'POST'])
@blueprint.route('/packs/upload', methods=['POST'])
def add_pack():
form = UploadPackForm()
if form.validate_on_submit():
pack = create_query_pack_from_upload(form.pack)
# Only redirect back to the pack list if everything was successful
if pack is not None:
return redirect(url_for('.packs', _anchor=pack.name))
flash_errors(form)
return render_template('pack.html', form=form)
@blueprint.route('/pack/<string:pack_name>/tags', methods=['GET', 'POST'])
def tag_pack(pack_name):
pack = Pack.query.filter(Pack.name == pack_name).one()
if request.is_xhr:
if request.method == 'POST':
pack.tags = create_tags(*request.get_json())
pack.save()
return jsonify(tags=[t.value for t in pack.tags])
return redirect(url_for('.packs'))
@blueprint.route('/queries')
def queries():
queries = Query.query.options(joinedload(Query.packs)).all()
return render_template('queries.html', queries=queries)
@blueprint.route('/queries/add', methods=['GET', 'POST'])
def add_query():
form = CreateQueryForm()
form.set_choices()
if form.validate_on_submit():
query = Query(name=form.name.data,
sql=form.sql.data,
interval=form.interval.data,
platform=form.platform.data,
version=form.version.data,
description=form.description.data,
value=form.value.data,
removed=form.removed.data)
query.tags = create_tags(*form.tags.data.splitlines())
query.save()
return redirect(url_for('.query', query_id=query.id))
flash_errors(form)
return render_template('query.html', form=form)
@blueprint.route('/queries/distributed')
@blueprint.route('/queries/distributed/<any(new, pending, complete):status>')
@blueprint.route('/node/<int:node_id>/distributed/<any(new, pending, complete):status>')
def distributed(node_id=None, status=None):
if status == 'new':
queries = DistributedQuery.query.filter(
DistributedQuery.status == DistributedQuery.NEW)
elif status == 'pending':
queries = DistributedQuery.query.filter(
DistributedQuery.status == DistributedQuery.PENDING)
elif status == 'complete':
queries = DistributedQuery.query.filter(
DistributedQuery.status == DistributedQuery.COMPLETE)
else:
queries = DistributedQuery.query
if node_id:
node = Node.query.filter(Node.id == node_id).one()
queries = queries.filter(DistributedQuery.node_id == node.id)
return render_template('distributed.html', queries=queries, status=status)
@blueprint.route('/queries/distributed/add', methods=['GET', 'POST'])
def add_distributed():
form = AddDistributedQueryForm()
form.set_choices()
if form.validate_on_submit():
nodes = []
if not form.nodes.data and not form.tag.data:
# all nodes get this query
nodes = Node.query.all()
if form.nodes.data:
nodes.extend(
Node.query.filter(
Node.node_key.in_(form.nodes.data)
).all()
)
if form.tags.data:
nodes.extend(
Node.query.filter(
Node.tags.any(
Tag.value.in_(tag_names)
)
).all()
)
for node in nodes:
query = DistributedQuery(sql=form.sql.data,
node=node,
not_before=form.not_before.data)
db.session.add(query)
else:
db.session.commit()
return redirect(url_for('.distributed', status='new'))
flash_errors(form)
return render_template('distributed.html', form=form)
@blueprint.route('/queries/tagged/<string:tags>')
def queries_by_tag(tags):
tag_names = [t.strip() for t in tags.split(',')]
queries = Query.query.filter(Query.tags.any(Tag.value.in_(tag_names))).all()
return render_template('queries.html', queries=queries)
@blueprint.route('/query/<int:query_id>', methods=['GET', 'POST'])
def query(query_id):
query = Query.query.filter(Query.id == query_id).one()
form = UpdateQueryForm(request.form)
if form.validate_on_submit():
if form.packs.data:
query.packs = Pack.query.filter(Pack.name.in_(form.packs.data)).all()
else:
query.packs = []
query.tags = create_tags(*form.tags.data.splitlines())
query = query.update(name=form.name.data,
sql=form.sql.data,
interval=form.interval.data,
platform=form.platform.data,
version=form.version.data,
description=form.description.data,
value=form.value.data,
removed=form.removed.data)
return redirect(url_for('.query', query_id=query.id))
form = UpdateQueryForm(request.form, obj=query)
flash_errors(form)
return render_template('query.html', form=form, query=query)
@blueprint.route('/query/<int:query_id>/tags', methods=['GET', 'POST'])
def tag_query(query_id):
query = Query.query.filter(Query.id == query_id).one()
if request.is_xhr:
if request.method == 'POST':
query.tags = create_tags(*request.get_json())
query.save()
return jsonify(tags=[t.value for t in query.tags])
return redirect(url_for('.query', query_id=query.id))
@blueprint.route('/files')
def files():
file_paths = FilePath.query.all()
return render_template('files.html', file_paths=file_paths)
@blueprint.route('/files/add', methods=['GET', 'POST'])
def add_file():
form = FilePathForm()
if form.validate_on_submit():
FilePath.create(category=form.category.data,
target_paths=form.target_paths.data.splitlines())
return redirect(url_for('.files'))
flash_errors(form)
return render_template('file.html', form=form)
@blueprint.route('/file/<int:file_path_id>/tags', methods=['GET', 'POST'])
def tag_file(file_path_id):
file_path = FilePath.query.filter(FilePath.id == file_path_id).one()
if request.is_xhr:
if request.method == 'POST':
file_path.tags = create_tags(*request.get_json())
file_path.save()
return jsonify(tags=[t.value for t in file_path.tags])
return redirect(url_for('.files'))
@blueprint.route('/tags')
def tags():
if request.is_xhr:
return jsonify(tags=[t.value for t in Tag.query.all()])
return render_template('tags.html', tags=Tag.query)
@blueprint.route('/tags/add', methods=['GET', 'POST'])
def add_tag():
form = CreateTagForm()
if form.validate_on_submit():
create_tags(*form.value.data.splitlines())
return redirect(url_for('.tags'))
flash_errors(form)
return render_template('tag.html', form=form)
@blueprint.route('/tag/<string:tag_value>')
def get_tag(tag_value):
tag = Tag.query.filter(Tag.value == tag_value).one()
return render_template('tag.html', tag=tag)
@blueprint.route('/tag/<string:tag_value>', methods=['DELETE'])
def delete_tag(tag_value):
tag = Tag.query.filter(Tag.value == tag_value).one()
tag.delete()
return jsonify({}), 204
def create_tags(*tags):
values = []
existing = []
# create a set, because we haven't yet done our association_proxy in
# sqlalchemy
for value in (v.strip() for v in set(tags) if v.strip()):
tag = Tag.query.filter(Tag.value == value).first()
if not tag:
values.append(Tag.create(value=value))
else:
existing.append(tag)
else:
if values:
flash(u"Created tag{0} {1}".format(
's' if len(values) > 1 else '',
', '.join(tag.value for tag in values)),
'info')
return values + existing
fix small NameError bug here
# -*- coding: utf-8 -*-
from flask import Blueprint, current_app, flash, jsonify, redirect, render_template, request, url_for
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import joinedload
from doorman.forms import (
AddDistributedQueryForm,
CreateQueryForm,
UpdateQueryForm,
CreateTagForm,
UploadPackForm,
FilePathForm,
)
from doorman.database import db
from doorman.models import DistributedQuery, FilePath, Node, Pack, Query, Tag
from doorman.utils import create_query_pack_from_upload, flash_errors
blueprint = Blueprint('manage', __name__,
template_folder='./templates/manage',
url_prefix='/manage')
@blueprint.context_processor
def inject_models():
return dict(Node=Node, Pack=Pack, Query=Query, Tag=Tag,
DistributedQuery=DistributedQuery)
@blueprint.route('/')
def index():
return render_template('index.html')
@blueprint.route('/nodes')
def nodes():
nodes = Node.query.all()
return render_template('nodes.html', nodes=nodes)
@blueprint.route('/nodes/add', methods=['GET', 'POST'])
def add_node():
return redirect(url_for('.nodes'))
@blueprint.route('/nodes/tagged/<string:tags>')
def nodes_by_tag(tags):
if tags == 'null':
nodes = Node.query.filter(Node.tags == None).all()
else:
tag_names = [t.strip() for t in tags.split(',')]
nodes = Node.query.filter(Node.tags.any(Tag.value.in_(tag_names))).all()
return render_template('nodes.html', nodes=nodes)
@blueprint.route('/node/<int:node_id>')
def get_node(node_id):
node = Node.query.filter(Node.id == node_id).one()
return render_template('node.html', node=node)
@blueprint.route('/node/<int:node_id>/activity')
def node_activity(node_id):
node = Node.query.filter(Node.id == node_id).one()
return render_template('activity.html', node=node)
@blueprint.route('/node/<int:node_id>/tags', methods=['GET', 'POST'])
def tag_node(node_id):
node = Node.query.filter(Node.id == node_id).one()
if request.is_xhr and request.method == 'POST':
node.tags = create_tags(*request.get_json())
node.save()
return jsonify({}), 202
return redirect(url_for('.get_node', node_id=node.id))
@blueprint.route('/node/<int:node_id>/distributed/result/<string:guid>')
def get_distributed_result(node_id, guid):
node = Node.query.filter(Node.id == node_id).one()
query = DistributedQuery.query.filter(
DistributedQuery.guid == guid,
DistributedQuery.node == node,
).one()
return render_template('distributed.result.html', node=node, query=query)
@blueprint.route('/packs')
def packs():
packs = Pack.query.options(joinedload(Pack.queries).joinedload(Query.packs)).all()
return render_template('packs.html', packs=packs)
@blueprint.route('/packs/add', methods=['GET', 'POST'])
@blueprint.route('/packs/upload', methods=['POST'])
def add_pack():
form = UploadPackForm()
if form.validate_on_submit():
pack = create_query_pack_from_upload(form.pack)
# Only redirect back to the pack list if everything was successful
if pack is not None:
return redirect(url_for('.packs', _anchor=pack.name))
flash_errors(form)
return render_template('pack.html', form=form)
@blueprint.route('/pack/<string:pack_name>/tags', methods=['GET', 'POST'])
def tag_pack(pack_name):
pack = Pack.query.filter(Pack.name == pack_name).one()
if request.is_xhr:
if request.method == 'POST':
pack.tags = create_tags(*request.get_json())
pack.save()
return jsonify(tags=[t.value for t in pack.tags])
return redirect(url_for('.packs'))
@blueprint.route('/queries')
def queries():
queries = Query.query.options(joinedload(Query.packs)).all()
return render_template('queries.html', queries=queries)
@blueprint.route('/queries/add', methods=['GET', 'POST'])
def add_query():
form = CreateQueryForm()
form.set_choices()
if form.validate_on_submit():
query = Query(name=form.name.data,
sql=form.sql.data,
interval=form.interval.data,
platform=form.platform.data,
version=form.version.data,
description=form.description.data,
value=form.value.data,
removed=form.removed.data)
query.tags = create_tags(*form.tags.data.splitlines())
query.save()
return redirect(url_for('.query', query_id=query.id))
flash_errors(form)
return render_template('query.html', form=form)
@blueprint.route('/queries/distributed')
@blueprint.route('/queries/distributed/<any(new, pending, complete):status>')
@blueprint.route('/node/<int:node_id>/distributed/<any(new, pending, complete):status>')
def distributed(node_id=None, status=None):
if status == 'new':
queries = DistributedQuery.query.filter(
DistributedQuery.status == DistributedQuery.NEW)
elif status == 'pending':
queries = DistributedQuery.query.filter(
DistributedQuery.status == DistributedQuery.PENDING)
elif status == 'complete':
queries = DistributedQuery.query.filter(
DistributedQuery.status == DistributedQuery.COMPLETE)
else:
queries = DistributedQuery.query
if node_id:
node = Node.query.filter(Node.id == node_id).one()
queries = queries.filter(DistributedQuery.node_id == node.id)
return render_template('distributed.html', queries=queries, status=status)
@blueprint.route('/queries/distributed/add', methods=['GET', 'POST'])
def add_distributed():
form = AddDistributedQueryForm()
form.set_choices()
if form.validate_on_submit():
nodes = []
if not form.nodes.data and not form.tags.data:
# all nodes get this query
nodes = Node.query.all()
if form.nodes.data:
nodes.extend(
Node.query.filter(
Node.node_key.in_(form.nodes.data)
).all()
)
if form.tags.data:
nodes.extend(
Node.query.filter(
Node.tags.any(
Tag.value.in_(tag_names)
)
).all()
)
for node in nodes:
query = DistributedQuery(sql=form.sql.data,
node=node,
not_before=form.not_before.data)
db.session.add(query)
else:
db.session.commit()
return redirect(url_for('.distributed', status='new'))
flash_errors(form)
return render_template('distributed.html', form=form)
@blueprint.route('/queries/tagged/<string:tags>')
def queries_by_tag(tags):
tag_names = [t.strip() for t in tags.split(',')]
queries = Query.query.filter(Query.tags.any(Tag.value.in_(tag_names))).all()
return render_template('queries.html', queries=queries)
@blueprint.route('/query/<int:query_id>', methods=['GET', 'POST'])
def query(query_id):
query = Query.query.filter(Query.id == query_id).one()
form = UpdateQueryForm(request.form)
if form.validate_on_submit():
if form.packs.data:
query.packs = Pack.query.filter(Pack.name.in_(form.packs.data)).all()
else:
query.packs = []
query.tags = create_tags(*form.tags.data.splitlines())
query = query.update(name=form.name.data,
sql=form.sql.data,
interval=form.interval.data,
platform=form.platform.data,
version=form.version.data,
description=form.description.data,
value=form.value.data,
removed=form.removed.data)
return redirect(url_for('.query', query_id=query.id))
form = UpdateQueryForm(request.form, obj=query)
flash_errors(form)
return render_template('query.html', form=form, query=query)
@blueprint.route('/query/<int:query_id>/tags', methods=['GET', 'POST'])
def tag_query(query_id):
query = Query.query.filter(Query.id == query_id).one()
if request.is_xhr:
if request.method == 'POST':
query.tags = create_tags(*request.get_json())
query.save()
return jsonify(tags=[t.value for t in query.tags])
return redirect(url_for('.query', query_id=query.id))
@blueprint.route('/files')
def files():
file_paths = FilePath.query.all()
return render_template('files.html', file_paths=file_paths)
@blueprint.route('/files/add', methods=['GET', 'POST'])
def add_file():
form = FilePathForm()
if form.validate_on_submit():
FilePath.create(category=form.category.data,
target_paths=form.target_paths.data.splitlines())
return redirect(url_for('.files'))
flash_errors(form)
return render_template('file.html', form=form)
@blueprint.route('/file/<int:file_path_id>/tags', methods=['GET', 'POST'])
def tag_file(file_path_id):
file_path = FilePath.query.filter(FilePath.id == file_path_id).one()
if request.is_xhr:
if request.method == 'POST':
file_path.tags = create_tags(*request.get_json())
file_path.save()
return jsonify(tags=[t.value for t in file_path.tags])
return redirect(url_for('.files'))
@blueprint.route('/tags')
def tags():
if request.is_xhr:
return jsonify(tags=[t.value for t in Tag.query.all()])
return render_template('tags.html', tags=Tag.query)
@blueprint.route('/tags/add', methods=['GET', 'POST'])
def add_tag():
form = CreateTagForm()
if form.validate_on_submit():
create_tags(*form.value.data.splitlines())
return redirect(url_for('.tags'))
flash_errors(form)
return render_template('tag.html', form=form)
@blueprint.route('/tag/<string:tag_value>')
def get_tag(tag_value):
tag = Tag.query.filter(Tag.value == tag_value).one()
return render_template('tag.html', tag=tag)
@blueprint.route('/tag/<string:tag_value>', methods=['DELETE'])
def delete_tag(tag_value):
tag = Tag.query.filter(Tag.value == tag_value).one()
tag.delete()
return jsonify({}), 204
def create_tags(*tags):
values = []
existing = []
# create a set, because we haven't yet done our association_proxy in
# sqlalchemy
for value in (v.strip() for v in set(tags) if v.strip()):
tag = Tag.query.filter(Tag.value == value).first()
if not tag:
values.append(Tag.create(value=value))
else:
existing.append(tag)
else:
if values:
flash(u"Created tag{0} {1}".format(
's' if len(values) > 1 else '',
', '.join(tag.value for tag in values)),
'info')
return values + existing
|
# Copyright 2013 Google, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0(the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Google Author(s): Behdad Esfahbod
"""Python OpenType Layout Subsetter.
Later grown into full OpenType subsetter, supporting all standard tables.
"""
import sys
import struct
import time
import array
import fontTools.ttLib
import fontTools.ttLib.tables
import fontTools.ttLib.tables.otTables
import fontTools.cffLib
import fontTools.misc.psCharStrings
import fontTools.pens.basePen
def _add_method(*clazzes):
"""Returns a decorator function that adds a new method to one or
more classes."""
def wrapper(method):
for clazz in clazzes:
assert clazz.__name__ != 'DefaultTable', 'Oops, table class not found.'
assert not hasattr(clazz, method.func_name), \
"Oops, class '%s' has method '%s'." % (clazz.__name__,
method.func_name)
setattr(clazz, method.func_name, method)
return None
return wrapper
def _uniq_sort(l):
return sorted(set(l))
def _set_update(s, *others):
# Jython's set.update only takes one other argument.
# Emulate real set.update...
for other in others:
s.update(other)
@_add_method(fontTools.ttLib.tables.otTables.Coverage)
def intersect(self, glyphs):
"Returns ascending list of matching coverage values."
return [i for i,g in enumerate(self.glyphs) if g in glyphs]
@_add_method(fontTools.ttLib.tables.otTables.Coverage)
def intersect_glyphs(self, glyphs):
"Returns set of intersecting glyphs."
return set(g for g in self.glyphs if g in glyphs)
@_add_method(fontTools.ttLib.tables.otTables.Coverage)
def subset(self, glyphs):
"Returns ascending list of remaining coverage values."
indices = self.intersect(glyphs)
self.glyphs = [g for g in self.glyphs if g in glyphs]
return indices
@_add_method(fontTools.ttLib.tables.otTables.Coverage)
def remap(self, coverage_map):
"Remaps coverage."
self.glyphs = [self.glyphs[i] for i in coverage_map]
@_add_method(fontTools.ttLib.tables.otTables.ClassDef)
def intersect(self, glyphs):
"Returns ascending list of matching class values."
return _uniq_sort(
([0] if any(g not in self.classDefs for g in glyphs) else []) +
[v for g,v in self.classDefs.iteritems() if g in glyphs])
@_add_method(fontTools.ttLib.tables.otTables.ClassDef)
def intersect_class(self, glyphs, klass):
"Returns set of glyphs matching class."
if klass == 0:
return set(g for g in glyphs if g not in self.classDefs)
return set(g for g,v in self.classDefs.iteritems()
if v == klass and g in glyphs)
@_add_method(fontTools.ttLib.tables.otTables.ClassDef)
def subset(self, glyphs, remap=False):
"Returns ascending list of remaining classes."
self.classDefs = dict((g,v) for g,v in self.classDefs.iteritems() if g in glyphs)
# Note: while class 0 has the special meaning of "not matched",
# if no glyph will ever /not match/, we can optimize class 0 out too.
indices = _uniq_sort(
([0] if any(g not in self.classDefs for g in glyphs) else []) +
self.classDefs.values())
if remap:
self.remap(indices)
return indices
@_add_method(fontTools.ttLib.tables.otTables.ClassDef)
def remap(self, class_map):
"Remaps classes."
self.classDefs = dict((g,class_map.index(v))
for g,v in self.classDefs.iteritems())
@_add_method(fontTools.ttLib.tables.otTables.SingleSubst)
def closure_glyphs(self, s, cur_glyphs=None):
if cur_glyphs == None: cur_glyphs = s.glyphs
if self.Format in [1, 2]:
s.glyphs.update(v for g,v in self.mapping.iteritems() if g in cur_glyphs)
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.SingleSubst)
def subset_glyphs(self, s):
if self.Format in [1, 2]:
self.mapping = dict((g,v) for g,v in self.mapping.iteritems()
if g in s.glyphs and v in s.glyphs)
return bool(self.mapping)
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.MultipleSubst)
def closure_glyphs(self, s, cur_glyphs=None):
if cur_glyphs == None: cur_glyphs = s.glyphs
if self.Format == 1:
indices = self.Coverage.intersect(cur_glyphs)
_set_update(s.glyphs, *(self.Sequence[i].Substitute for i in indices))
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.MultipleSubst)
def subset_glyphs(self, s):
if self.Format == 1:
indices = self.Coverage.subset(s.glyphs)
self.Sequence = [self.Sequence[i] for i in indices]
# Now drop rules generating glyphs we don't want
indices = [i for i,seq in enumerate(self.Sequence)
if all(sub in s.glyphs for sub in seq.Substitute)]
self.Sequence = [self.Sequence[i] for i in indices]
self.Coverage.remap(indices)
self.SequenceCount = len(self.Sequence)
return bool(self.SequenceCount)
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.AlternateSubst)
def closure_glyphs(self, s, cur_glyphs=None):
if cur_glyphs == None: cur_glyphs = s.glyphs
if self.Format == 1:
_set_update(s.glyphs, *(vlist for g,vlist in self.alternates.iteritems()
if g in cur_glyphs))
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.AlternateSubst)
def subset_glyphs(self, s):
if self.Format == 1:
self.alternates = dict((g,vlist)
for g,vlist in self.alternates.iteritems()
if g in s.glyphs and
all(v in s.glyphs for v in vlist))
return bool(self.alternates)
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.LigatureSubst)
def closure_glyphs(self, s, cur_glyphs=None):
if cur_glyphs == None: cur_glyphs = s.glyphs
if self.Format == 1:
_set_update(s.glyphs, *([seq.LigGlyph for seq in seqs
if all(c in s.glyphs for c in seq.Component)]
for g,seqs in self.ligatures.iteritems()
if g in cur_glyphs))
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.LigatureSubst)
def subset_glyphs(self, s):
if self.Format == 1:
self.ligatures = dict((g,v) for g,v in self.ligatures.iteritems()
if g in s.glyphs)
self.ligatures = dict((g,[seq for seq in seqs
if seq.LigGlyph in s.glyphs and
all(c in s.glyphs for c in seq.Component)])
for g,seqs in self.ligatures.iteritems())
self.ligatures = dict((g,v) for g,v in self.ligatures.iteritems() if v)
return bool(self.ligatures)
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.ReverseChainSingleSubst)
def closure_glyphs(self, s, cur_glyphs=None):
if cur_glyphs == None: cur_glyphs = s.glyphs
if self.Format == 1:
indices = self.Coverage.intersect(cur_glyphs)
if(not indices or
not all(c.intersect(s.glyphs)
for c in self.LookAheadCoverage + self.BacktrackCoverage)):
return
s.glyphs.update(self.Substitute[i] for i in indices)
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.ReverseChainSingleSubst)
def subset_glyphs(self, s):
if self.Format == 1:
indices = self.Coverage.subset(s.glyphs)
self.Substitute = [self.Substitute[i] for i in indices]
# Now drop rules generating glyphs we don't want
indices = [i for i,sub in enumerate(self.Substitute)
if sub in s.glyphs]
self.Substitute = [self.Substitute[i] for i in indices]
self.Coverage.remap(indices)
self.GlyphCount = len(self.Substitute)
return bool(self.GlyphCount and
all(c.subset(s.glyphs)
for c in self.LookAheadCoverage+self.BacktrackCoverage))
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.SinglePos)
def subset_glyphs(self, s):
if self.Format == 1:
return len(self.Coverage.subset(s.glyphs))
elif self.Format == 2:
indices = self.Coverage.subset(s.glyphs)
self.Value = [self.Value[i] for i in indices]
self.ValueCount = len(self.Value)
return bool(self.ValueCount)
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.SinglePos)
def prune_post_subset(self, options):
if not options.hinting:
# Drop device tables
self.ValueFormat &= ~0x00F0
return True
@_add_method(fontTools.ttLib.tables.otTables.PairPos)
def subset_glyphs(self, s):
if self.Format == 1:
indices = self.Coverage.subset(s.glyphs)
self.PairSet = [self.PairSet[i] for i in indices]
for p in self.PairSet:
p.PairValueRecord = [r for r in p.PairValueRecord
if r.SecondGlyph in s.glyphs]
p.PairValueCount = len(p.PairValueRecord)
self.PairSet = [p for p in self.PairSet if p.PairValueCount]
self.PairSetCount = len(self.PairSet)
return bool(self.PairSetCount)
elif self.Format == 2:
class1_map = self.ClassDef1.subset(s.glyphs, remap=True)
class2_map = self.ClassDef2.subset(s.glyphs, remap=True)
self.Class1Record = [self.Class1Record[i] for i in class1_map]
for c in self.Class1Record:
c.Class2Record = [c.Class2Record[i] for i in class2_map]
self.Class1Count = len(class1_map)
self.Class2Count = len(class2_map)
return bool(self.Class1Count and
self.Class2Count and
self.Coverage.subset(s.glyphs))
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.PairPos)
def prune_post_subset(self, options):
if not options.hinting:
# Drop device tables
self.ValueFormat1 &= ~0x00F0
self.ValueFormat2 &= ~0x00F0
return True
@_add_method(fontTools.ttLib.tables.otTables.CursivePos)
def subset_glyphs(self, s):
if self.Format == 1:
indices = self.Coverage.subset(s.glyphs)
self.EntryExitRecord = [self.EntryExitRecord[i] for i in indices]
self.EntryExitCount = len(self.EntryExitRecord)
return bool(self.EntryExitCount)
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.Anchor)
def prune_hints(self):
# Drop device tables / contour anchor point
self.Format = 1
@_add_method(fontTools.ttLib.tables.otTables.CursivePos)
def prune_post_subset(self, options):
if not options.hinting:
for rec in self.EntryExitRecord:
if rec.EntryAnchor: rec.EntryAnchor.prune_hints()
if rec.ExitAnchor: rec.ExitAnchor.prune_hints()
return True
@_add_method(fontTools.ttLib.tables.otTables.MarkBasePos)
def subset_glyphs(self, s):
if self.Format == 1:
mark_indices = self.MarkCoverage.subset(s.glyphs)
self.MarkArray.MarkRecord = [self.MarkArray.MarkRecord[i]
for i in mark_indices]
self.MarkArray.MarkCount = len(self.MarkArray.MarkRecord)
base_indices = self.BaseCoverage.subset(s.glyphs)
self.BaseArray.BaseRecord = [self.BaseArray.BaseRecord[i]
for i in base_indices]
self.BaseArray.BaseCount = len(self.BaseArray.BaseRecord)
# Prune empty classes
class_indices = _uniq_sort(v.Class for v in self.MarkArray.MarkRecord)
self.ClassCount = len(class_indices)
for m in self.MarkArray.MarkRecord:
m.Class = class_indices.index(m.Class)
for b in self.BaseArray.BaseRecord:
b.BaseAnchor = [b.BaseAnchor[i] for i in class_indices]
return bool(self.ClassCount and
self.MarkArray.MarkCount and
self.BaseArray.BaseCount)
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.MarkBasePos)
def prune_post_subset(self, options):
if not options.hinting:
for m in self.MarkArray.MarkRecord:
m.MarkAnchor.prune_hints()
for b in self.BaseArray.BaseRecord:
for a in b.BaseAnchor:
a.prune_hints()
return True
@_add_method(fontTools.ttLib.tables.otTables.MarkLigPos)
def subset_glyphs(self, s):
if self.Format == 1:
mark_indices = self.MarkCoverage.subset(s.glyphs)
self.MarkArray.MarkRecord = [self.MarkArray.MarkRecord[i]
for i in mark_indices]
self.MarkArray.MarkCount = len(self.MarkArray.MarkRecord)
ligature_indices = self.LigatureCoverage.subset(s.glyphs)
self.LigatureArray.LigatureAttach = [self.LigatureArray.LigatureAttach[i]
for i in ligature_indices]
self.LigatureArray.LigatureCount = len(self.LigatureArray.LigatureAttach)
# Prune empty classes
class_indices = _uniq_sort(v.Class for v in self.MarkArray.MarkRecord)
self.ClassCount = len(class_indices)
for m in self.MarkArray.MarkRecord:
m.Class = class_indices.index(m.Class)
for l in self.LigatureArray.LigatureAttach:
for c in l.ComponentRecord:
c.LigatureAnchor = [c.LigatureAnchor[i] for i in class_indices]
return bool(self.ClassCount and
self.MarkArray.MarkCount and
self.LigatureArray.LigatureCount)
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.MarkLigPos)
def prune_post_subset(self, options):
if not options.hinting:
for m in self.MarkArray.MarkRecord:
m.MarkAnchor.prune_hints()
for l in self.LigatureArray.LigatureAttach:
for c in l.ComponentRecord:
for a in c.LigatureAnchor:
a.prune_hints()
return True
@_add_method(fontTools.ttLib.tables.otTables.MarkMarkPos)
def subset_glyphs(self, s):
if self.Format == 1:
mark1_indices = self.Mark1Coverage.subset(s.glyphs)
self.Mark1Array.MarkRecord = [self.Mark1Array.MarkRecord[i]
for i in mark1_indices]
self.Mark1Array.MarkCount = len(self.Mark1Array.MarkRecord)
mark2_indices = self.Mark2Coverage.subset(s.glyphs)
self.Mark2Array.Mark2Record = [self.Mark2Array.Mark2Record[i]
for i in mark2_indices]
self.Mark2Array.MarkCount = len(self.Mark2Array.Mark2Record)
# Prune empty classes
class_indices = _uniq_sort(v.Class for v in self.Mark1Array.MarkRecord)
self.ClassCount = len(class_indices)
for m in self.Mark1Array.MarkRecord:
m.Class = class_indices.index(m.Class)
for b in self.Mark2Array.Mark2Record:
b.Mark2Anchor = [b.Mark2Anchor[i] for i in class_indices]
return bool(self.ClassCount and
self.Mark1Array.MarkCount and
self.Mark2Array.MarkCount)
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.MarkMarkPos)
def prune_post_subset(self, options):
if not options.hinting:
# Drop device tables or contour anchor point
for m in self.Mark1Array.MarkRecord:
m.MarkAnchor.prune_hints()
for b in self.Mark2Array.Mark2Record:
for m in rec.Mark2Anchor:
m.prune_hints()
return True
@_add_method(fontTools.ttLib.tables.otTables.SingleSubst,
fontTools.ttLib.tables.otTables.MultipleSubst,
fontTools.ttLib.tables.otTables.AlternateSubst,
fontTools.ttLib.tables.otTables.LigatureSubst,
fontTools.ttLib.tables.otTables.ReverseChainSingleSubst,
fontTools.ttLib.tables.otTables.SinglePos,
fontTools.ttLib.tables.otTables.PairPos,
fontTools.ttLib.tables.otTables.CursivePos,
fontTools.ttLib.tables.otTables.MarkBasePos,
fontTools.ttLib.tables.otTables.MarkLigPos,
fontTools.ttLib.tables.otTables.MarkMarkPos)
def subset_lookups(self, lookup_indices):
pass
@_add_method(fontTools.ttLib.tables.otTables.SingleSubst,
fontTools.ttLib.tables.otTables.MultipleSubst,
fontTools.ttLib.tables.otTables.AlternateSubst,
fontTools.ttLib.tables.otTables.LigatureSubst,
fontTools.ttLib.tables.otTables.ReverseChainSingleSubst,
fontTools.ttLib.tables.otTables.SinglePos,
fontTools.ttLib.tables.otTables.PairPos,
fontTools.ttLib.tables.otTables.CursivePos,
fontTools.ttLib.tables.otTables.MarkBasePos,
fontTools.ttLib.tables.otTables.MarkLigPos,
fontTools.ttLib.tables.otTables.MarkMarkPos)
def collect_lookups(self):
return []
@_add_method(fontTools.ttLib.tables.otTables.SingleSubst,
fontTools.ttLib.tables.otTables.MultipleSubst,
fontTools.ttLib.tables.otTables.AlternateSubst,
fontTools.ttLib.tables.otTables.LigatureSubst,
fontTools.ttLib.tables.otTables.ContextSubst,
fontTools.ttLib.tables.otTables.ChainContextSubst,
fontTools.ttLib.tables.otTables.ReverseChainSingleSubst,
fontTools.ttLib.tables.otTables.SinglePos,
fontTools.ttLib.tables.otTables.PairPos,
fontTools.ttLib.tables.otTables.CursivePos,
fontTools.ttLib.tables.otTables.MarkBasePos,
fontTools.ttLib.tables.otTables.MarkLigPos,
fontTools.ttLib.tables.otTables.MarkMarkPos,
fontTools.ttLib.tables.otTables.ContextPos,
fontTools.ttLib.tables.otTables.ChainContextPos)
def prune_pre_subset(self, options):
return True
@_add_method(fontTools.ttLib.tables.otTables.SingleSubst,
fontTools.ttLib.tables.otTables.MultipleSubst,
fontTools.ttLib.tables.otTables.AlternateSubst,
fontTools.ttLib.tables.otTables.LigatureSubst,
fontTools.ttLib.tables.otTables.ReverseChainSingleSubst,
fontTools.ttLib.tables.otTables.ContextSubst,
fontTools.ttLib.tables.otTables.ChainContextSubst,
fontTools.ttLib.tables.otTables.ContextPos,
fontTools.ttLib.tables.otTables.ChainContextPos)
def prune_post_subset(self, options):
return True
@_add_method(fontTools.ttLib.tables.otTables.SingleSubst,
fontTools.ttLib.tables.otTables.AlternateSubst,
fontTools.ttLib.tables.otTables.ReverseChainSingleSubst)
def may_have_non_1to1(self):
return False
@_add_method(fontTools.ttLib.tables.otTables.MultipleSubst,
fontTools.ttLib.tables.otTables.LigatureSubst,
fontTools.ttLib.tables.otTables.ContextSubst,
fontTools.ttLib.tables.otTables.ChainContextSubst)
def may_have_non_1to1(self):
return True
@_add_method(fontTools.ttLib.tables.otTables.ContextSubst,
fontTools.ttLib.tables.otTables.ChainContextSubst,
fontTools.ttLib.tables.otTables.ContextPos,
fontTools.ttLib.tables.otTables.ChainContextPos)
def __classify_context(self):
class ContextHelper(object):
def __init__(self, klass, Format):
if klass.__name__.endswith('Subst'):
Typ = 'Sub'
Type = 'Subst'
else:
Typ = 'Pos'
Type = 'Pos'
if klass.__name__.startswith('Chain'):
Chain = 'Chain'
else:
Chain = ''
ChainTyp = Chain+Typ
self.Typ = Typ
self.Type = Type
self.Chain = Chain
self.ChainTyp = ChainTyp
self.LookupRecord = Type+'LookupRecord'
if Format == 1:
Coverage = lambda r: r.Coverage
ChainCoverage = lambda r: r.Coverage
ContextData = lambda r:(None,)
ChainContextData = lambda r:(None, None, None)
RuleData = lambda r:(r.Input,)
ChainRuleData = lambda r:(r.Backtrack, r.Input, r.LookAhead)
SetRuleData = None
ChainSetRuleData = None
elif Format == 2:
Coverage = lambda r: r.Coverage
ChainCoverage = lambda r: r.Coverage
ContextData = lambda r:(r.ClassDef,)
ChainContextData = lambda r:(r.LookAheadClassDef,
r.InputClassDef,
r.BacktrackClassDef)
RuleData = lambda r:(r.Class,)
ChainRuleData = lambda r:(r.LookAhead, r.Input, r.Backtrack)
def SetRuleData(r, d):(r.Class,) = d
def ChainSetRuleData(r, d):(r.LookAhead, r.Input, r.Backtrack) = d
elif Format == 3:
Coverage = lambda r: r.Coverage[0]
ChainCoverage = lambda r: r.InputCoverage[0]
ContextData = None
ChainContextData = None
RuleData = lambda r: r.Coverage
ChainRuleData = lambda r:(r.LookAheadCoverage +
r.InputCoverage +
r.BacktrackCoverage)
SetRuleData = None
ChainSetRuleData = None
else:
assert 0, "unknown format: %s" % Format
if Chain:
self.Coverage = ChainCoverage
self.ContextData = ChainContextData
self.RuleData = ChainRuleData
self.SetRuleData = ChainSetRuleData
else:
self.Coverage = Coverage
self.ContextData = ContextData
self.RuleData = RuleData
self.SetRuleData = SetRuleData
if Format == 1:
self.Rule = ChainTyp+'Rule'
self.RuleCount = ChainTyp+'RuleCount'
self.RuleSet = ChainTyp+'RuleSet'
self.RuleSetCount = ChainTyp+'RuleSetCount'
self.Intersect = lambda glyphs, c, r: [r] if r in glyphs else []
elif Format == 2:
self.Rule = ChainTyp+'ClassRule'
self.RuleCount = ChainTyp+'ClassRuleCount'
self.RuleSet = ChainTyp+'ClassSet'
self.RuleSetCount = ChainTyp+'ClassSetCount'
self.Intersect = lambda glyphs, c, r: c.intersect_class(glyphs, r)
self.ClassDef = 'InputClassDef' if Chain else 'ClassDef'
self.Input = 'Input' if Chain else 'Class'
if self.Format not in [1, 2, 3]:
return None # Don't shoot the messenger; let it go
if not hasattr(self.__class__, "__ContextHelpers"):
self.__class__.__ContextHelpers = {}
if self.Format not in self.__class__.__ContextHelpers:
helper = ContextHelper(self.__class__, self.Format)
self.__class__.__ContextHelpers[self.Format] = helper
return self.__class__.__ContextHelpers[self.Format]
@_add_method(fontTools.ttLib.tables.otTables.ContextSubst,
fontTools.ttLib.tables.otTables.ChainContextSubst)
def closure_glyphs(self, s, cur_glyphs=None):
if cur_glyphs == None: cur_glyphs = s.glyphs
c = self.__classify_context()
indices = c.Coverage(self).intersect(s.glyphs)
if not indices:
return []
cur_glyphs = c.Coverage(self).intersect_glyphs(s.glyphs);
if self.Format == 1:
ContextData = c.ContextData(self)
rss = getattr(self, c.RuleSet)
for i in indices:
if not rss[i]: continue
for r in getattr(rss[i], c.Rule):
if not r: continue
if all(all(c.Intersect(s.glyphs, cd, k) for k in klist)
for cd,klist in zip(ContextData, c.RuleData(r))):
chaos = False
for ll in getattr(r, c.LookupRecord):
if not ll: continue
seqi = ll.SequenceIndex
if chaos:
pos_glyphs = s.glyphs
else:
if seqi == 0:
pos_glyphs = set([c.Coverage(self).glyphs[i]])
else:
pos_glyphs = set([r.Input[seqi - 1]])
lookup = s.table.LookupList.Lookup[ll.LookupListIndex]
chaos = chaos or lookup.may_have_non_1to1()
lookup.closure_glyphs(s, cur_glyphs=pos_glyphs)
elif self.Format == 2:
ClassDef = getattr(self, c.ClassDef)
indices = ClassDef.intersect(cur_glyphs)
ContextData = c.ContextData(self)
rss = getattr(self, c.RuleSet)
for i in indices:
if not rss[i]: continue
for r in getattr(rss[i], c.Rule):
if not r: continue
if all(all(c.Intersect(s.glyphs, cd, k) for k in klist)
for cd,klist in zip(ContextData, c.RuleData(r))):
chaos = False
for ll in getattr(r, c.LookupRecord):
if not ll: continue
seqi = ll.SequenceIndex
if chaos:
pos_glyphs = s.glyphs
else:
if seqi == 0:
pos_glyphs = ClassDef.intersect_class(cur_glyphs, i)
else:
pos_glyphs = ClassDef.intersect_class(s.glyphs,
getattr(r, c.Input)[seqi - 1])
lookup = s.table.LookupList.Lookup[ll.LookupListIndex]
chaos = chaos or lookup.may_have_non_1to1()
lookup.closure_glyphs(s, cur_glyphs=pos_glyphs)
elif self.Format == 3:
if not all(x.intersect(s.glyphs) for x in c.RuleData(self)):
return []
r = self
chaos = False
for ll in getattr(r, c.LookupRecord):
if not ll: continue
seqi = ll.SequenceIndex
if chaos:
pos_glyphs = s.glyphs
else:
if seqi == 0:
pos_glyphs = cur_glyphs
else:
pos_glyphs = r.InputCoverage[seqi].intersect_glyphs(s.glyphs)
lookup = s.table.LookupList.Lookup[ll.LookupListIndex]
chaos = chaos or lookup.may_have_non_1to1()
lookup.closure_glyphs(s, cur_glyphs=pos_glyphs)
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.ContextSubst,
fontTools.ttLib.tables.otTables.ContextPos,
fontTools.ttLib.tables.otTables.ChainContextSubst,
fontTools.ttLib.tables.otTables.ChainContextPos)
def subset_glyphs(self, s):
c = self.__classify_context()
if self.Format == 1:
indices = self.Coverage.subset(s.glyphs)
rss = getattr(self, c.RuleSet)
rss = [rss[i] for i in indices]
for rs in rss:
if not rs: continue
ss = getattr(rs, c.Rule)
ss = [r for r in ss
if r and all(all(g in s.glyphs for g in glist)
for glist in c.RuleData(r))]
setattr(rs, c.Rule, ss)
setattr(rs, c.RuleCount, len(ss))
# Prune empty subrulesets
rss = [rs for rs in rss if rs and getattr(rs, c.Rule)]
setattr(self, c.RuleSet, rss)
setattr(self, c.RuleSetCount, len(rss))
return bool(rss)
elif self.Format == 2:
if not self.Coverage.subset(s.glyphs):
return False
indices = getattr(self, c.ClassDef).subset(self.Coverage.glyphs,
remap=False)
rss = getattr(self, c.RuleSet)
rss = [rss[i] for i in indices]
ContextData = c.ContextData(self)
klass_maps = [x.subset(s.glyphs, remap=True) for x in ContextData]
for rs in rss:
if not rs: continue
ss = getattr(rs, c.Rule)
ss = [r for r in ss
if r and all(all(k in klass_map for k in klist)
for klass_map,klist in zip(klass_maps, c.RuleData(r)))]
setattr(rs, c.Rule, ss)
setattr(rs, c.RuleCount, len(ss))
# Remap rule classes
for r in ss:
c.SetRuleData(r, [[klass_map.index(k) for k in klist]
for klass_map,klist in zip(klass_maps, c.RuleData(r))])
# Prune empty subrulesets
rss = [rs for rs in rss if rs and getattr(rs, c.Rule)]
setattr(self, c.RuleSet, rss)
setattr(self, c.RuleSetCount, len(rss))
return bool(rss)
elif self.Format == 3:
return all(x.subset(s.glyphs) for x in c.RuleData(self))
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.ContextSubst,
fontTools.ttLib.tables.otTables.ChainContextSubst,
fontTools.ttLib.tables.otTables.ContextPos,
fontTools.ttLib.tables.otTables.ChainContextPos)
def subset_lookups(self, lookup_indices):
c = self.__classify_context()
if self.Format in [1, 2]:
for rs in getattr(self, c.RuleSet):
if not rs: continue
for r in getattr(rs, c.Rule):
if not r: continue
setattr(r, c.LookupRecord,
[ll for ll in getattr(r, c.LookupRecord)
if ll and ll.LookupListIndex in lookup_indices])
for ll in getattr(r, c.LookupRecord):
if not ll: continue
ll.LookupListIndex = lookup_indices.index(ll.LookupListIndex)
elif self.Format == 3:
setattr(self, c.LookupRecord,
[ll for ll in getattr(self, c.LookupRecord)
if ll and ll.LookupListIndex in lookup_indices])
for ll in getattr(self, c.LookupRecord):
if not ll: continue
ll.LookupListIndex = lookup_indices.index(ll.LookupListIndex)
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.ContextSubst,
fontTools.ttLib.tables.otTables.ChainContextSubst,
fontTools.ttLib.tables.otTables.ContextPos,
fontTools.ttLib.tables.otTables.ChainContextPos)
def collect_lookups(self):
c = self.__classify_context()
if self.Format in [1, 2]:
return [ll.LookupListIndex
for rs in getattr(self, c.RuleSet) if rs
for r in getattr(rs, c.Rule) if r
for ll in getattr(r, c.LookupRecord) if ll]
elif self.Format == 3:
return [ll.LookupListIndex
for ll in getattr(self, c.LookupRecord) if ll]
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.ExtensionSubst)
def closure_glyphs(self, s, cur_glyphs=None):
if self.Format == 1:
self.ExtSubTable.closure_glyphs(s, cur_glyphs)
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.ExtensionSubst)
def may_have_non_1to1(self):
if self.Format == 1:
return self.ExtSubTable.may_have_non_1to1()
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.ExtensionSubst,
fontTools.ttLib.tables.otTables.ExtensionPos)
def prune_pre_subset(self, options):
if self.Format == 1:
return self.ExtSubTable.prune_pre_subset(options)
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.ExtensionSubst,
fontTools.ttLib.tables.otTables.ExtensionPos)
def subset_glyphs(self, s):
if self.Format == 1:
return self.ExtSubTable.subset_glyphs(s)
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.ExtensionSubst,
fontTools.ttLib.tables.otTables.ExtensionPos)
def prune_post_subset(self, options):
if self.Format == 1:
return self.ExtSubTable.prune_post_subset(options)
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.ExtensionSubst,
fontTools.ttLib.tables.otTables.ExtensionPos)
def subset_lookups(self, lookup_indices):
if self.Format == 1:
return self.ExtSubTable.subset_lookups(lookup_indices)
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.ExtensionSubst,
fontTools.ttLib.tables.otTables.ExtensionPos)
def collect_lookups(self):
if self.Format == 1:
return self.ExtSubTable.collect_lookups()
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.Lookup)
def closure_glyphs(self, s, cur_glyphs=None):
for st in self.SubTable:
if not st: continue
st.closure_glyphs(s, cur_glyphs)
@_add_method(fontTools.ttLib.tables.otTables.Lookup)
def prune_pre_subset(self, options):
ret = False
for st in self.SubTable:
if not st: continue
if st.prune_pre_subset(options): ret = True
return ret
@_add_method(fontTools.ttLib.tables.otTables.Lookup)
def subset_glyphs(self, s):
self.SubTable = [st for st in self.SubTable if st and st.subset_glyphs(s)]
self.SubTableCount = len(self.SubTable)
return bool(self.SubTableCount)
@_add_method(fontTools.ttLib.tables.otTables.Lookup)
def prune_post_subset(self, options):
ret = False
for st in self.SubTable:
if not st: continue
if st.prune_post_subset(options): ret = True
return ret
@_add_method(fontTools.ttLib.tables.otTables.Lookup)
def subset_lookups(self, lookup_indices):
for s in self.SubTable:
s.subset_lookups(lookup_indices)
@_add_method(fontTools.ttLib.tables.otTables.Lookup)
def collect_lookups(self):
return _uniq_sort(sum((st.collect_lookups() for st in self.SubTable
if st), []))
@_add_method(fontTools.ttLib.tables.otTables.Lookup)
def may_have_non_1to1(self):
return any(st.may_have_non_1to1() for st in self.SubTable if st)
@_add_method(fontTools.ttLib.tables.otTables.LookupList)
def prune_pre_subset(self, options):
ret = False
for l in self.Lookup:
if not l: continue
if l.prune_pre_subset(options): ret = True
return ret
@_add_method(fontTools.ttLib.tables.otTables.LookupList)
def subset_glyphs(self, s):
"Returns the indices of nonempty lookups."
return [i for i,l in enumerate(self.Lookup) if l and l.subset_glyphs(s)]
@_add_method(fontTools.ttLib.tables.otTables.LookupList)
def prune_post_subset(self, options):
ret = False
for l in self.Lookup:
if not l: continue
if l.prune_post_subset(options): ret = True
return ret
@_add_method(fontTools.ttLib.tables.otTables.LookupList)
def subset_lookups(self, lookup_indices):
self.Lookup = [self.Lookup[i] for i in lookup_indices
if i < self.LookupCount]
self.LookupCount = len(self.Lookup)
for l in self.Lookup:
l.subset_lookups(lookup_indices)
@_add_method(fontTools.ttLib.tables.otTables.LookupList)
def closure_lookups(self, lookup_indices):
lookup_indices = _uniq_sort(lookup_indices)
recurse = lookup_indices
while True:
recurse_lookups = sum((self.Lookup[i].collect_lookups()
for i in recurse if i < self.LookupCount), [])
recurse_lookups = [l for l in recurse_lookups
if l not in lookup_indices and l < self.LookupCount]
if not recurse_lookups:
return _uniq_sort(lookup_indices)
recurse_lookups = _uniq_sort(recurse_lookups)
lookup_indices.extend(recurse_lookups)
recurse = recurse_lookups
@_add_method(fontTools.ttLib.tables.otTables.Feature)
def subset_lookups(self, lookup_indices):
self.LookupListIndex = [l for l in self.LookupListIndex
if l in lookup_indices]
# Now map them.
self.LookupListIndex = [lookup_indices.index(l)
for l in self.LookupListIndex]
self.LookupCount = len(self.LookupListIndex)
return self.LookupCount
@_add_method(fontTools.ttLib.tables.otTables.Feature)
def collect_lookups(self):
return self.LookupListIndex[:]
@_add_method(fontTools.ttLib.tables.otTables.FeatureList)
def subset_lookups(self, lookup_indices):
"Returns the indices of nonempty features."
feature_indices = [i for i,f in enumerate(self.FeatureRecord)
if f.Feature.subset_lookups(lookup_indices)]
self.subset_features(feature_indices)
return feature_indices
@_add_method(fontTools.ttLib.tables.otTables.FeatureList)
def collect_lookups(self, feature_indices):
return _uniq_sort(sum((self.FeatureRecord[i].Feature.collect_lookups()
for i in feature_indices
if i < self.FeatureCount), []))
@_add_method(fontTools.ttLib.tables.otTables.FeatureList)
def subset_features(self, feature_indices):
self.FeatureRecord = [self.FeatureRecord[i] for i in feature_indices]
self.FeatureCount = len(self.FeatureRecord)
return bool(self.FeatureCount)
@_add_method(fontTools.ttLib.tables.otTables.DefaultLangSys,
fontTools.ttLib.tables.otTables.LangSys)
def subset_features(self, feature_indices):
if self.ReqFeatureIndex in feature_indices:
self.ReqFeatureIndex = feature_indices.index(self.ReqFeatureIndex)
else:
self.ReqFeatureIndex = 65535
self.FeatureIndex = [f for f in self.FeatureIndex if f in feature_indices]
# Now map them.
self.FeatureIndex = [feature_indices.index(f) for f in self.FeatureIndex
if f in feature_indices]
self.FeatureCount = len(self.FeatureIndex)
return bool(self.FeatureCount or self.ReqFeatureIndex != 65535)
@_add_method(fontTools.ttLib.tables.otTables.DefaultLangSys,
fontTools.ttLib.tables.otTables.LangSys)
def collect_features(self):
feature_indices = self.FeatureIndex[:]
if self.ReqFeatureIndex != 65535:
feature_indices.append(self.ReqFeatureIndex)
return _uniq_sort(feature_indices)
@_add_method(fontTools.ttLib.tables.otTables.Script)
def subset_features(self, feature_indices):
if(self.DefaultLangSys and
not self.DefaultLangSys.subset_features(feature_indices)):
self.DefaultLangSys = None
self.LangSysRecord = [l for l in self.LangSysRecord
if l.LangSys.subset_features(feature_indices)]
self.LangSysCount = len(self.LangSysRecord)
return bool(self.LangSysCount or self.DefaultLangSys)
@_add_method(fontTools.ttLib.tables.otTables.Script)
def collect_features(self):
feature_indices = [l.LangSys.collect_features() for l in self.LangSysRecord]
if self.DefaultLangSys:
feature_indices.append(self.DefaultLangSys.collect_features())
return _uniq_sort(sum(feature_indices, []))
@_add_method(fontTools.ttLib.tables.otTables.ScriptList)
def subset_features(self, feature_indices):
self.ScriptRecord = [s for s in self.ScriptRecord
if s.Script.subset_features(feature_indices)]
self.ScriptCount = len(self.ScriptRecord)
return bool(self.ScriptCount)
@_add_method(fontTools.ttLib.tables.otTables.ScriptList)
def collect_features(self):
return _uniq_sort(sum((s.Script.collect_features()
for s in self.ScriptRecord), []))
@_add_method(fontTools.ttLib.getTableClass('GSUB'))
def closure_glyphs(self, s):
s.table = self.table
feature_indices = self.table.ScriptList.collect_features()
lookup_indices = self.table.FeatureList.collect_lookups(feature_indices)
while True:
orig_glyphs = s.glyphs.copy()
for i in lookup_indices:
if i >= self.table.LookupList.LookupCount: continue
if not self.table.LookupList.Lookup[i]: continue
self.table.LookupList.Lookup[i].closure_glyphs(s)
if orig_glyphs == s.glyphs:
break
del s.table
@_add_method(fontTools.ttLib.getTableClass('GSUB'),
fontTools.ttLib.getTableClass('GPOS'))
def subset_glyphs(self, s):
s.glyphs = s.glyphs_gsubed
lookup_indices = self.table.LookupList.subset_glyphs(s)
self.subset_lookups(lookup_indices)
self.prune_lookups()
return True
@_add_method(fontTools.ttLib.getTableClass('GSUB'),
fontTools.ttLib.getTableClass('GPOS'))
def subset_lookups(self, lookup_indices):
"""Retrains specified lookups, then removes empty features, language
systems, and scripts."""
self.table.LookupList.subset_lookups(lookup_indices)
feature_indices = self.table.FeatureList.subset_lookups(lookup_indices)
self.table.ScriptList.subset_features(feature_indices)
@_add_method(fontTools.ttLib.getTableClass('GSUB'),
fontTools.ttLib.getTableClass('GPOS'))
def prune_lookups(self):
"Remove unreferenced lookups"
feature_indices = self.table.ScriptList.collect_features()
lookup_indices = self.table.FeatureList.collect_lookups(feature_indices)
lookup_indices = self.table.LookupList.closure_lookups(lookup_indices)
self.subset_lookups(lookup_indices)
@_add_method(fontTools.ttLib.getTableClass('GSUB'),
fontTools.ttLib.getTableClass('GPOS'))
def subset_feature_tags(self, feature_tags):
feature_indices = [i for i,f in
enumerate(self.table.FeatureList.FeatureRecord)
if f.FeatureTag in feature_tags]
self.table.FeatureList.subset_features(feature_indices)
self.table.ScriptList.subset_features(feature_indices)
@_add_method(fontTools.ttLib.getTableClass('GSUB'),
fontTools.ttLib.getTableClass('GPOS'))
def prune_pre_subset(self, options):
if '*' not in options.layout_features:
self.subset_feature_tags(options.layout_features)
self.prune_lookups()
self.table.LookupList.prune_pre_subset(options);
return True
@_add_method(fontTools.ttLib.getTableClass('GSUB'),
fontTools.ttLib.getTableClass('GPOS'))
def prune_post_subset(self, options):
self.table.LookupList.prune_post_subset(options);
return True
@_add_method(fontTools.ttLib.getTableClass('GDEF'))
def subset_glyphs(self, s):
glyphs = s.glyphs_gsubed
table = self.table
if table.LigCaretList:
indices = table.LigCaretList.Coverage.subset(glyphs)
table.LigCaretList.LigGlyph = [table.LigCaretList.LigGlyph[i]
for i in indices]
table.LigCaretList.LigGlyphCount = len(table.LigCaretList.LigGlyph)
if not table.LigCaretList.LigGlyphCount:
table.LigCaretList = None
if table.MarkAttachClassDef:
table.MarkAttachClassDef.classDefs = dict((g,v) for g,v in
table.MarkAttachClassDef.
classDefs.iteritems()
if g in glyphs)
if not table.MarkAttachClassDef.classDefs:
table.MarkAttachClassDef = None
if table.GlyphClassDef:
table.GlyphClassDef.classDefs = dict((g,v) for g,v in
table.GlyphClassDef.
classDefs.iteritems()
if g in glyphs)
if not table.GlyphClassDef.classDefs:
table.GlyphClassDef = None
if table.AttachList:
indices = table.AttachList.Coverage.subset(glyphs)
table.AttachList.AttachPoint = [table.AttachList.AttachPoint[i]
for i in indices]
table.AttachList.GlyphCount = len(table.AttachList.AttachPoint)
if not table.AttachList.GlyphCount:
table.AttachList = None
return bool(table.LigCaretList or
table.MarkAttachClassDef or
table.GlyphClassDef or
table.AttachList)
@_add_method(fontTools.ttLib.getTableClass('kern'))
def prune_pre_subset(self, options):
# Prune unknown kern table types
self.kernTables = [t for t in self.kernTables if hasattr(t, 'kernTable')]
return bool(self.kernTables)
@_add_method(fontTools.ttLib.getTableClass('kern'))
def subset_glyphs(self, s):
glyphs = s.glyphs_gsubed
for t in self.kernTables:
t.kernTable = dict(((a,b),v) for (a,b),v in t.kernTable.iteritems()
if a in glyphs and b in glyphs)
self.kernTables = [t for t in self.kernTables if t.kernTable]
return bool(self.kernTables)
@_add_method(fontTools.ttLib.getTableClass('vmtx'),
fontTools.ttLib.getTableClass('hmtx'))
def subset_glyphs(self, s):
self.metrics = dict((g,v) for g,v in self.metrics.iteritems() if g in s.glyphs)
return bool(self.metrics)
@_add_method(fontTools.ttLib.getTableClass('hdmx'))
def subset_glyphs(self, s):
self.hdmx = dict((sz,_dict((g,v) for g,v in l.iteritems() if g in s.glyphs))
for sz,l in self.hdmx.iteritems())
return bool(self.hdmx)
@_add_method(fontTools.ttLib.getTableClass('VORG'))
def subset_glyphs(self, s):
self.VOriginRecords = dict((g,v) for g,v in self.VOriginRecords.iteritems()
if g in s.glyphs)
self.numVertOriginYMetrics = len(self.VOriginRecords)
return True # Never drop; has default metrics
@_add_method(fontTools.ttLib.getTableClass('post'))
def prune_pre_subset(self, options):
if not options.glyph_names:
self.formatType = 3.0
return True
@_add_method(fontTools.ttLib.getTableClass('post'))
def subset_glyphs(self, s):
self.extraNames = [] # This seems to do it
return True
@_add_method(fontTools.ttLib.getTableModule('glyf').Glyph)
def getComponentNamesFast(self, glyfTable):
if not self.data or struct.unpack(">h", self.data[:2])[0] >= 0:
return [] # Not composite
data = self.data
i = 10
components = []
more = 1
while more:
flags, glyphID = struct.unpack(">HH", data[i:i+4])
i += 4
flags = int(flags)
components.append(glyfTable.getGlyphName(int(glyphID)))
if flags & 0x0001: i += 4 # ARG_1_AND_2_ARE_WORDS
else: i += 2
if flags & 0x0008: i += 2 # WE_HAVE_A_SCALE
elif flags & 0x0040: i += 4 # WE_HAVE_AN_X_AND_Y_SCALE
elif flags & 0x0080: i += 8 # WE_HAVE_A_TWO_BY_TWO
more = flags & 0x0020 # MORE_COMPONENTS
return components
@_add_method(fontTools.ttLib.getTableModule('glyf').Glyph)
def remapComponentsFast(self, indices):
if not self.data or struct.unpack(">h", self.data[:2])[0] >= 0:
return # Not composite
data = array.array("B", self.data)
i = 10
more = 1
while more:
flags =(data[i] << 8) | data[i+1]
glyphID =(data[i+2] << 8) | data[i+3]
# Remap
glyphID = indices.index(glyphID)
data[i+2] = glyphID >> 8
data[i+3] = glyphID & 0xFF
i += 4
flags = int(flags)
if flags & 0x0001: i += 4 # ARG_1_AND_2_ARE_WORDS
else: i += 2
if flags & 0x0008: i += 2 # WE_HAVE_A_SCALE
elif flags & 0x0040: i += 4 # WE_HAVE_AN_X_AND_Y_SCALE
elif flags & 0x0080: i += 8 # WE_HAVE_A_TWO_BY_TWO
more = flags & 0x0020 # MORE_COMPONENTS
self.data = data.tostring()
@_add_method(fontTools.ttLib.getTableModule('glyf').Glyph)
def dropInstructionsFast(self):
if not self.data:
return
numContours = struct.unpack(">h", self.data[:2])[0]
data = array.array("B", self.data)
i = 10
if numContours >= 0:
i += 2 * numContours # endPtsOfContours
instructionLen =(data[i] << 8) | data[i+1]
# Zero it
data[i] = data [i+1] = 0
i += 2
if instructionLen:
# Splice it out
data = data[:i] + data[i+instructionLen:]
else:
more = 1
while more:
flags =(data[i] << 8) | data[i+1]
# Turn instruction flag off
flags &= ~0x0100 # WE_HAVE_INSTRUCTIONS
data[i+0] = flags >> 8
data[i+1] = flags & 0xFF
i += 4
flags = int(flags)
if flags & 0x0001: i += 4 # ARG_1_AND_2_ARE_WORDS
else: i += 2
if flags & 0x0008: i += 2 # WE_HAVE_A_SCALE
elif flags & 0x0040: i += 4 # WE_HAVE_AN_X_AND_Y_SCALE
elif flags & 0x0080: i += 8 # WE_HAVE_A_TWO_BY_TWO
more = flags & 0x0020 # MORE_COMPONENTS
# Cut off
data = data[:i]
if len(data) % 4:
# add pad bytes
nPadBytes = 4 -(len(data) % 4)
for i in range(nPadBytes):
data.append(0)
self.data = data.tostring()
@_add_method(fontTools.ttLib.getTableClass('glyf'))
def closure_glyphs(self, s):
decompose = s.glyphs
# I don't know if component glyphs can be composite themselves.
# We handle them anyway.
while True:
components = set()
for g in decompose:
if g not in self.glyphs:
continue
gl = self.glyphs[g]
if hasattr(gl, "data"):
for c in gl.getComponentNamesFast(self):
if c not in s.glyphs:
components.add(c)
else:
# TTX seems to expand gid0..3 always
if gl.isComposite():
for c in gl.components:
if c.glyphName not in s.glyphs:
components.add(c.glyphName)
components = set(c for c in components if c not in s.glyphs)
if not components:
break
decompose = components
s.glyphs.update(components)
@_add_method(fontTools.ttLib.getTableClass('glyf'))
def prune_pre_subset(self, options):
if options.notdef_glyph and not options.notdef_outline:
g = self[self.glyphOrder[0]]
# Yay, easy!
g.__dict__.clear()
g.data = ""
return True
@_add_method(fontTools.ttLib.getTableClass('glyf'))
def subset_glyphs(self, s):
self.glyphs = dict((g,v) for g,v in self.glyphs.iteritems() if g in s.glyphs)
indices = [i for i,g in enumerate(self.glyphOrder) if g in s.glyphs]
for v in self.glyphs.itervalues():
if hasattr(v, "data"):
v.remapComponentsFast(indices)
else:
pass # No need
self.glyphOrder = [g for g in self.glyphOrder if g in s.glyphs]
# Don't drop empty 'glyf' tables, otherwise 'loca' doesn't get subset.
return True
@_add_method(fontTools.ttLib.getTableClass('glyf'))
def prune_post_subset(self, options):
if not options.hinting:
for v in self.glyphs.itervalues():
if hasattr(v, "data"):
v.dropInstructionsFast()
else:
v.program = fontTools.ttLib.tables.ttProgram.Program()
v.program.fromBytecode([])
return True
@_add_method(fontTools.ttLib.getTableClass('CFF '))
def prune_pre_subset(self, options):
cff = self.cff
# CFF table must have one font only
cff.fontNames = cff.fontNames[:1]
if options.notdef_glyph and not options.notdef_outline:
for fontname in cff.keys():
font = cff[fontname]
c,_ = font.CharStrings.getItemAndSelector('.notdef')
c.bytecode = '\x0e' # endchar
c.program = None
return True # bool(cff.fontNames)
@_add_method(fontTools.ttLib.getTableClass('CFF '))
def subset_glyphs(self, s):
cff = self.cff
for fontname in cff.keys():
font = cff[fontname]
cs = font.CharStrings
# Load all glyphs
for g in font.charset:
if g not in s.glyphs: continue
c,sel = cs.getItemAndSelector(g)
if cs.charStringsAreIndexed:
indices = [i for i,g in enumerate(font.charset) if g in s.glyphs]
csi = cs.charStringsIndex
csi.items = [csi.items[i] for i in indices]
csi.count = len(csi.items)
del csi.file, csi.offsets
if hasattr(font, "FDSelect"):
sel = font.FDSelect
sel.format = None
sel.gidArray = [sel.gidArray[i] for i in indices]
cs.charStrings = dict((g,indices.index(v))
for g,v in cs.charStrings.iteritems()
if g in s.glyphs)
else:
cs.charStrings = dict((g,v)
for g,v in cs.charStrings.iteritems()
if g in s.glyphs)
font.charset = [g for g in font.charset if g in s.glyphs]
font.numGlyphs = len(font.charset)
return True # any(cff[fontname].numGlyphs for fontname in cff.keys())
@_add_method(fontTools.misc.psCharStrings.T2CharString)
def subset_subroutines(self, subrs, gsubrs):
p = self.program
assert len(p)
for i in xrange(1, len(p)):
if p[i] == 'callsubr':
assert type(p[i-1]) is int
p[i-1] = subrs._used.index(p[i-1] + subrs._old_bias) - subrs._new_bias
elif p[i] == 'callgsubr':
assert type(p[i-1]) is int
p[i-1] = gsubrs._used.index(p[i-1] + gsubrs._old_bias) - gsubrs._new_bias
@_add_method(fontTools.misc.psCharStrings.T2CharString)
def drop_hints(self):
hints = self._hints
if hints.has_hint:
self.program = self.program[hints.last_hint:]
if hasattr(self, 'width'):
# Insert width back if needed
if self.width != self.private.defaultWidthX:
self.program.insert(0, self.width - self.private.nominalWidthX)
if hints.has_hintmask:
i = 0
p = self.program
while i < len(p):
if p[i] in ['hintmask', 'cntrmask']:
assert i + 1 <= len(p)
del p[i:i+2]
continue
i += 1
assert len(self.program)
del self._hints
class _MarkingT2Decompiler(fontTools.misc.psCharStrings.SimpleT2Decompiler):
def __init__(self, localSubrs, globalSubrs):
fontTools.misc.psCharStrings.SimpleT2Decompiler.__init__(self,
localSubrs,
globalSubrs)
for subrs in [localSubrs, globalSubrs]:
if subrs and not hasattr(subrs, "_used"):
subrs._used = set()
def op_callsubr(self, index):
self.localSubrs._used.add(self.operandStack[-1]+self.localBias)
fontTools.misc.psCharStrings.SimpleT2Decompiler.op_callsubr(self, index)
def op_callgsubr(self, index):
self.globalSubrs._used.add(self.operandStack[-1]+self.globalBias)
fontTools.misc.psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index)
class _DehintingT2Decompiler(fontTools.misc.psCharStrings.SimpleT2Decompiler):
class Hints:
def __init__(self):
# Whether calling this charstring produces any hint stems
self.has_hint = False
# Index to start at to drop all hints
self.last_hint = 0
# Index up to which we know more hints are possible. Only
# relevant if status is 0 or 1.
self.last_checked = 0
# The status means:
# 0: after dropping hints, this charstring is empty
# 1: after dropping hints, there may be more hints continuing after this
# 2: no more hints possible after this charstring
self.status = 0
# Has hintmask instructions; not recursive
self.has_hintmask = False
pass
def __init__(self, css, localSubrs, globalSubrs):
self._css = css
fontTools.misc.psCharStrings.SimpleT2Decompiler.__init__(self,
localSubrs,
globalSubrs)
def execute(self, charString):
old_hints = charString._hints if hasattr(charString, '_hints') else None
charString._hints = self.Hints()
fontTools.misc.psCharStrings.SimpleT2Decompiler.execute(self, charString)
hints = charString._hints
if hints.has_hint or hints.has_hintmask:
self._css.add(charString)
if hints.status != 2:
# Check from last_check, make sure we didn't have any operators.
for i in xrange(hints.last_checked, len(charString.program) - 1):
if type(charString.program[i]) == str:
hints.status = 2
break;
else:
hints.status = 1 # There's *something* here
hints.last_checked = len(charString.program)
if old_hints:
assert hints.__dict__ == old_hints.__dict__
def op_callsubr(self, index):
subr = self.localSubrs[self.operandStack[-1]+self.localBias]
fontTools.misc.psCharStrings.SimpleT2Decompiler.op_callsubr(self, index)
self.processSubr(index, subr)
def op_callgsubr(self, index):
subr = self.globalSubrs[self.operandStack[-1]+self.globalBias]
fontTools.misc.psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index)
self.processSubr(index, subr)
def op_hstem(self, index):
fontTools.misc.psCharStrings.SimpleT2Decompiler.op_hstem(self, index)
self.processHint(index)
def op_vstem(self, index):
fontTools.misc.psCharStrings.SimpleT2Decompiler.op_vstem(self, index)
self.processHint(index)
def op_hstemhm(self, index):
fontTools.misc.psCharStrings.SimpleT2Decompiler.op_hstemhm(self, index)
self.processHint(index)
def op_vstemhm(self, index):
fontTools.misc.psCharStrings.SimpleT2Decompiler.op_vstemhm(self, index)
self.processHint(index)
def op_hintmask(self, index):
fontTools.misc.psCharStrings.SimpleT2Decompiler.op_hintmask(self, index)
self.processHintmask(index)
def op_cntrmask(self, index):
fontTools.misc.psCharStrings.SimpleT2Decompiler.op_cntrmask(self, index)
self.processHintmask(index)
def processHintmask(self, index):
cs = self.callingStack[-1]
hints = cs._hints
hints.has_hintmask = True
if hints.status != 2 and hints.has_hint:
# Check from last_check, see if we may be an implicit vstem
for i in xrange(hints.last_checked, index - 1):
if type(cs.program[i]) == str:
hints.status = 2
break;
if hints.status != 2:
# We are an implicit vstem
hints.last_hint = index + 1
hints.status = 0
hints.last_checked = index + 1
def processHint(self, index):
cs = self.callingStack[-1]
hints = cs._hints
hints.has_hint = True
hints.last_hint = index
hints.last_checked = index
def processSubr(self, index, subr):
cs = self.callingStack[-1]
hints = cs._hints
subr_hints = subr._hints
if subr_hints.has_hint:
if hints.status != 2:
hints.has_hint = True
self.last_checked = index
self.status = subr_hints.status
# Decide where to chop off from
if subr_hints.status == 0:
self.last_hint = index
else:
self.last_hint = index - 2 # Leave the subr call in
else:
# In my understanding, this is a font bug. Ie. it has hint stems
# *after* path construction. I've seen this in widespread fonts.
# Best to ignore the hints I suppose...
pass
#assert 0
else:
hints.status = max(hints.status, subr_hints.status)
if hints.status != 2:
# Check from last_check, make sure we didn't have
# any operators.
for i in xrange(hints.last_checked, index - 1):
if type(cs.program[i]) == str:
hints.status = 2
break;
hints.last_checked = index
@_add_method(fontTools.ttLib.getTableClass('CFF '))
def prune_post_subset(self, options):
cff = self.cff
for fontname in cff.keys():
font = cff[fontname]
cs = font.CharStrings
#
# Drop unused FontDictionaries
#
if hasattr(font, "FDSelect"):
sel = font.FDSelect
indices = _uniq_sort(sel.gidArray)
sel.gidArray = [indices.index (ss) for ss in sel.gidArray]
arr = font.FDArray
arr.items = [arr[i] for i in indices]
arr.count = len(arr.items)
del arr.file, arr.offsets
#
# Drop hints if not needed
#
if not options.hinting:
#
# This can be tricky, but doesn't have to. What we do is:
#
# - Run all used glyph charstrings and recurse into subroutines,
# - For each charstring (including subroutines), if it has any
# of the hint stem operators, we mark it as such. Upon returning,
# for each charstring we note all the subroutine calls it makes
# that (recursively) contain a stem,
# - Dropping hinting then consists of the following two ops:
# * Drop the piece of the program in each charstring before the
# last call to a stem op or a stem-calling subroutine,
# * Drop all hintmask operations.
# - It's trickier... A hintmask right after hints and a few numbers
# will act as an implicit vstemhm. As such, we track whether
# we have seen any non-hint operators so far and do the right
# thing, recursively... Good luck understanding that :(
#
css = set()
for g in font.charset:
c,sel = cs.getItemAndSelector(g)
# Make sure it's decompiled. We want our "decompiler" to walk
# the program, not the bytecode.
c.draw(fontTools.pens.basePen.NullPen())
subrs = getattr(c.private, "Subrs", [])
decompiler = _DehintingT2Decompiler(css, subrs, c.globalSubrs)
decompiler.execute(c)
for charstring in css:
charstring.drop_hints()
#
# Renumber subroutines to remove unused ones
#
# Mark all used subroutines
for g in font.charset:
c,sel = cs.getItemAndSelector(g)
subrs = getattr(c.private, "Subrs", [])
decompiler = _MarkingT2Decompiler(subrs, c.globalSubrs)
decompiler.execute(c)
all_subrs = [font.GlobalSubrs]
if hasattr(font, 'FDSelect'):
all_subrs.extend(fd.Private.Subrs for fd in font.FDArray if hasattr(fd.Private, 'Subrs') and fd.Private.Subrs)
elif hasattr(font.Private, 'Subrs') and font.Private.Subrs:
all_subrs.append(font.Private.Subrs)
subrs = set(subrs) # Remove duplicates
# Prepare
for subrs in all_subrs:
if not hasattr(subrs, '_used'):
subrs._used = set()
subrs._used = _uniq_sort(subrs._used)
subrs._old_bias = fontTools.misc.psCharStrings.calcSubrBias(subrs)
subrs._new_bias = fontTools.misc.psCharStrings.calcSubrBias(subrs._used)
# Renumber glyph charstrings
for g in font.charset:
c,sel = cs.getItemAndSelector(g)
subrs = getattr(c.private, "Subrs", [])
c.subset_subroutines (subrs, font.GlobalSubrs)
# Renumber subroutines themselves
for subrs in all_subrs:
if subrs == font.GlobalSubrs:
if not hasattr(font, 'FDSelect') and hasattr(font.Private, 'Subrs'):
local_subrs = font.Private.Subrs
else:
local_subrs = []
else:
local_subrs = subrs
subrs.items = [subrs.items[i] for i in subrs._used]
subrs.count = len(subrs.items)
del subrs.file
if hasattr(subrs, 'offsets'):
del subrs.offsets
for i in xrange (subrs.count):
subrs[i].subset_subroutines (local_subrs, font.GlobalSubrs)
# Cleanup
for subrs in all_subrs:
del subrs._used, subrs._old_bias, subrs._new_bias
return True
@_add_method(fontTools.ttLib.getTableClass('cmap'))
def closure_glyphs(self, s):
tables = [t for t in self.tables
if t.platformID == 3 and t.platEncID in [1, 10]]
for u in s.unicodes_requested:
found = False
for table in tables:
if u in table.cmap:
s.glyphs.add(table.cmap[u])
found = True
break
if not found:
s.log("No glyph for Unicode value %s; skipping." % u)
@_add_method(fontTools.ttLib.getTableClass('cmap'))
def prune_pre_subset(self, options):
if not options.legacy_cmap:
# Drop non-Unicode / non-Symbol cmaps
self.tables = [t for t in self.tables
if t.platformID == 3 and t.platEncID in [0, 1, 10]]
if not options.symbol_cmap:
self.tables = [t for t in self.tables
if t.platformID == 3 and t.platEncID in [1, 10]]
# TODO(behdad) Only keep one subtable?
# For now, drop format=0 which can't be subset_glyphs easily?
self.tables = [t for t in self.tables if t.format != 0]
return bool(self.tables)
@_add_method(fontTools.ttLib.getTableClass('cmap'))
def subset_glyphs(self, s):
s.glyphs = s.glyphs_cmaped
for t in self.tables:
# For reasons I don't understand I need this here
# to force decompilation of the cmap format 14.
try:
getattr(t, "asdf")
except AttributeError:
pass
if t.format == 14:
# TODO(behdad) XXX We drop all the default-UVS mappings(g==None).
t.uvsDict = dict((v,[(u,g) for u,g in l if g in s.glyphs])
for v,l in t.uvsDict.iteritems())
t.uvsDict = dict((v,l) for v,l in t.uvsDict.iteritems() if l)
else:
t.cmap = dict((u,g) for u,g in t.cmap.iteritems()
if g in s.glyphs_requested or u in s.unicodes_requested)
self.tables = [t for t in self.tables
if (t.cmap if t.format != 14 else t.uvsDict)]
# TODO(behdad) Convert formats when needed.
# In particular, if we have a format=12 without non-BMP
# characters, either drop format=12 one or convert it
# to format=4 if there's not one.
return bool(self.tables)
@_add_method(fontTools.ttLib.getTableClass('name'))
def prune_pre_subset(self, options):
if '*' not in options.name_IDs:
self.names = [n for n in self.names if n.nameID in options.name_IDs]
if not options.name_legacy:
self.names = [n for n in self.names
if n.platformID == 3 and n.platEncID == 1]
if '*' not in options.name_languages:
self.names = [n for n in self.names if n.langID in options.name_languages]
return True # Retain even if empty
# TODO(behdad) OS/2 ulUnicodeRange / ulCodePageRange?
# TODO(behdad) Drop unneeded GSUB/GPOS Script/LangSys entries.
# TODO(behdad) Drop empty GSUB/GPOS, and GDEF if no GSUB/GPOS left
# TODO(behdad) Drop GDEF subitems if unused by lookups
# TODO(behdad) Avoid recursing too much (in GSUB/GPOS and in CFF)
# TODO(behdad) Text direction considerations.
# TODO(behdad) Text script / language considerations.
class Options(object):
class UnknownOptionError(Exception):
pass
_drop_tables_default = ['BASE', 'JSTF', 'DSIG', 'EBDT', 'EBLC', 'EBSC', 'SVG ',
'PCLT', 'LTSH']
_drop_tables_default += ['Feat', 'Glat', 'Gloc', 'Silf', 'Sill'] # Graphite
_drop_tables_default += ['CBLC', 'CBDT', 'sbix', 'COLR', 'CPAL'] # Color
_no_subset_tables_default = ['gasp', 'head', 'hhea', 'maxp', 'vhea', 'OS/2',
'loca', 'name', 'cvt ', 'fpgm', 'prep']
_hinting_tables_default = ['cvt ', 'fpgm', 'prep', 'hdmx', 'VDMX']
# Based on HarfBuzz shapers
_layout_features_groups = {
# Default shaper
'common': ['ccmp', 'liga', 'locl', 'mark', 'mkmk', 'rlig'],
'horizontal': ['calt', 'clig', 'curs', 'kern', 'rclt'],
'vertical': ['valt', 'vert', 'vkrn', 'vpal', 'vrt2'],
'ltr': ['ltra', 'ltrm'],
'rtl': ['rtla', 'rtlm'],
# Complex shapers
'arabic': ['init', 'medi', 'fina', 'isol', 'med2', 'fin2', 'fin3',
'cswh', 'mset'],
'hangul': ['ljmo', 'vjmo', 'tjmo'],
'tibetal': ['abvs', 'blws', 'abvm', 'blwm'],
'indic': ['nukt', 'akhn', 'rphf', 'rkrf', 'pref', 'blwf', 'half',
'abvf', 'pstf', 'cfar', 'vatu', 'cjct', 'init', 'pres',
'abvs', 'blws', 'psts', 'haln', 'dist', 'abvm', 'blwm'],
}
_layout_features_default = _uniq_sort(sum(
_layout_features_groups.itervalues(), []))
drop_tables = _drop_tables_default
no_subset_tables = _no_subset_tables_default
hinting_tables = _hinting_tables_default
layout_features = _layout_features_default
hinting = False
glyph_names = False
legacy_cmap = False
symbol_cmap = False
name_IDs = [1, 2] # Family and Style
name_legacy = False
name_languages = [0x0409] # English
notdef_glyph = True # gid0 for TrueType / .notdef for CFF
notdef_outline = False # No need for notdef to have an outline really
recommended_glyphs = False # gid1, gid2, gid3 for TrueType
recalc_bounds = False # Recalculate font bounding boxes
canonical_order = False # Order tables as recommended
flavor = None # May be 'woff'
def __init__(self, **kwargs):
self.set(**kwargs)
def set(self, **kwargs):
for k,v in kwargs.iteritems():
if not hasattr(self, k):
raise self.UnknownOptionError("Unknown option '%s'" % k)
setattr(self, k, v)
def parse_opts(self, argv, ignore_unknown=False):
ret = []
opts = {}
for a in argv:
orig_a = a
if not a.startswith('--'):
ret.append(a)
continue
a = a[2:]
i = a.find('=')
op = '='
if i == -1:
if a.startswith("no-"):
k = a[3:]
v = False
else:
k = a
v = True
else:
k = a[:i]
if k[-1] in "-+":
op = k[-1]+'=' # Ops is '-=' or '+=' now.
k = k[:-1]
v = a[i+1:]
k = k.replace('-', '_')
if not hasattr(self, k):
if ignore_unknown == True or k in ignore_unknown:
ret.append(orig_a)
continue
else:
raise self.UnknownOptionError("Unknown option '%s'" % a)
ov = getattr(self, k)
if isinstance(ov, bool):
v = bool(v)
elif isinstance(ov, int):
v = int(v)
elif isinstance(ov, list):
vv = v.split(',')
if vv == ['']:
vv = []
vv = [int(x, 0) if len(x) and x[0] in "0123456789" else x for x in vv]
if op == '=':
v = vv
elif op == '+=':
v = ov
v.extend(vv)
elif op == '-=':
v = ov
for x in vv:
if x in v:
v.remove(x)
else:
assert 0
opts[k] = v
self.set(**opts)
return ret
class Subsetter(object):
def __init__(self, options=None, log=None):
if not log:
log = Logger()
if not options:
options = Options()
self.options = options
self.log = log
self.unicodes_requested = set()
self.glyphs_requested = set()
self.glyphs = set()
def populate(self, glyphs=[], unicodes=[], text=""):
self.unicodes_requested.update(unicodes)
if isinstance(text, str):
text = text.decode("utf8")
for u in text:
self.unicodes_requested.add(ord(u))
self.glyphs_requested.update(glyphs)
self.glyphs.update(glyphs)
def _prune_pre_subset(self, font):
for tag in font.keys():
if tag == 'GlyphOrder': continue
if(tag in self.options.drop_tables or
(tag in self.options.hinting_tables and not self.options.hinting)):
self.log(tag, "dropped")
del font[tag]
continue
clazz = fontTools.ttLib.getTableClass(tag)
if hasattr(clazz, 'prune_pre_subset'):
table = font[tag]
retain = table.prune_pre_subset(self.options)
self.log.lapse("prune '%s'" % tag)
if not retain:
self.log(tag, "pruned to empty; dropped")
del font[tag]
continue
else:
self.log(tag, "pruned")
def _closure_glyphs(self, font):
self.glyphs = self.glyphs_requested.copy()
if 'cmap' in font:
font['cmap'].closure_glyphs(self)
self.glyphs_cmaped = self.glyphs
if self.options.notdef_glyph:
if 'glyf' in font:
self.glyphs.add(font.getGlyphName(0))
self.log("Added gid0 to subset")
else:
self.glyphs.add('.notdef')
self.log("Added .notdef to subset")
if self.options.recommended_glyphs:
if 'glyf' in font:
for i in range(4):
self.glyphs.add(font.getGlyphName(i))
self.log("Added first four glyphs to subset")
if 'GSUB' in font:
self.log("Closing glyph list over 'GSUB': %d glyphs before" %
len(self.glyphs))
self.log.glyphs(self.glyphs, font=font)
font['GSUB'].closure_glyphs(self)
self.log("Closed glyph list over 'GSUB': %d glyphs after" %
len(self.glyphs))
self.log.glyphs(self.glyphs, font=font)
self.log.lapse("close glyph list over 'GSUB'")
self.glyphs_gsubed = self.glyphs.copy()
if 'glyf' in font:
self.log("Closing glyph list over 'glyf': %d glyphs before" %
len(self.glyphs))
self.log.glyphs(self.glyphs, font=font)
font['glyf'].closure_glyphs(self)
self.log("Closed glyph list over 'glyf': %d glyphs after" %
len(self.glyphs))
self.log.glyphs(self.glyphs, font=font)
self.log.lapse("close glyph list over 'glyf'")
self.glyphs_glyfed = self.glyphs.copy()
self.glyphs_all = self.glyphs.copy()
self.log("Retaining %d glyphs: " % len(self.glyphs_all))
def _subset_glyphs(self, font):
for tag in font.keys():
if tag == 'GlyphOrder': continue
clazz = fontTools.ttLib.getTableClass(tag)
if tag in self.options.no_subset_tables:
self.log(tag, "subsetting not needed")
elif hasattr(clazz, 'subset_glyphs'):
table = font[tag]
self.glyphs = self.glyphs_all
retain = table.subset_glyphs(self)
self.glyphs = self.glyphs_all
self.log.lapse("subset '%s'" % tag)
if not retain:
self.log(tag, "subsetted to empty; dropped")
del font[tag]
else:
self.log(tag, "subsetted")
else:
self.log(tag, "NOT subset; don't know how to subset; dropped")
del font[tag]
glyphOrder = font.getGlyphOrder()
glyphOrder = [g for g in glyphOrder if g in self.glyphs_all]
font.setGlyphOrder(glyphOrder)
font._buildReverseGlyphOrderDict()
self.log.lapse("subset GlyphOrder")
def _prune_post_subset(self, font):
for tag in font.keys():
if tag == 'GlyphOrder': continue
clazz = fontTools.ttLib.getTableClass(tag)
if hasattr(clazz, 'prune_post_subset'):
table = font[tag]
retain = table.prune_post_subset(self.options)
self.log.lapse("prune '%s'" % tag)
if not retain:
self.log(tag, "pruned to empty; dropped")
del font[tag]
else:
self.log(tag, "pruned")
def subset(self, font):
self._prune_pre_subset(font)
self._closure_glyphs(font)
self._subset_glyphs(font)
self._prune_post_subset(font)
class Logger(object):
def __init__(self, verbose=False, xml=False, timing=False):
self.verbose = verbose
self.xml = xml
self.timing = timing
self.last_time = self.start_time = time.time()
def parse_opts(self, argv):
argv = argv[:]
for v in ['verbose', 'xml', 'timing']:
if "--"+v in argv:
setattr(self, v, True)
argv.remove("--"+v)
return argv
def __call__(self, *things):
if not self.verbose:
return
print ' '.join(str(x) for x in things)
def lapse(self, *things):
if not self.timing:
return
new_time = time.time()
print "Took %0.3fs to %s" %(new_time - self.last_time,
' '.join(str(x) for x in things))
self.last_time = new_time
def glyphs(self, glyphs, font=None):
self("Names: ", sorted(glyphs))
if font:
reverseGlyphMap = font.getReverseGlyphMap()
self("Gids : ", sorted(reverseGlyphMap[g] for g in glyphs))
def font(self, font, file=sys.stdout):
if not self.xml:
return
import xmlWriter
writer = xmlWriter.XMLWriter(file)
font.disassembleInstructions = False # Work around ttLib bug
for tag in font.keys():
writer.begintag(tag)
writer.newline()
font[tag].toXML(writer, font)
writer.endtag(tag)
writer.newline()
def load_font(fontFile,
options,
checkChecksums=False,
dontLoadGlyphNames=False):
font = fontTools.ttLib.TTFont(fontFile,
checkChecksums=checkChecksums,
recalcBBoxes=options.recalc_bounds)
# Hack:
#
# If we don't need glyph names, change 'post' class to not try to
# load them. It avoid lots of headache with broken fonts as well
# as loading time.
#
# Ideally ttLib should provide a way to ask it to skip loading
# glyph names. But it currently doesn't provide such a thing.
#
if dontLoadGlyphNames:
post = fontTools.ttLib.getTableClass('post')
saved = post.decode_format_2_0
post.decode_format_2_0 = post.decode_format_3_0
f = font['post']
if f.formatType == 2.0:
f.formatType = 3.0
post.decode_format_2_0 = saved
return font
def save_font(font, outfile, options):
if options.flavor and not hasattr(font, 'flavor'):
raise Exception("fonttools version does not support flavors.")
font.flavor = options.flavor
font.save(outfile, reorderTables=options.canonical_order)
def main(args):
log = Logger()
args = log.parse_opts(args)
options = Options()
args = options.parse_opts(args, ignore_unknown=['text'])
if len(args) < 2:
print >>sys.stderr, "usage: pyftsubset font-file glyph... [--text=ABC]... [--option=value]..."
sys.exit(1)
fontfile = args[0]
args = args[1:]
dontLoadGlyphNames =(not options.glyph_names and
all(any(g.startswith(p)
for p in ['gid', 'glyph', 'uni', 'U+'])
for g in args))
font = load_font(fontfile, options, dontLoadGlyphNames=dontLoadGlyphNames)
subsetter = Subsetter(options=options, log=log)
log.lapse("load font")
names = font.getGlyphNames()
log.lapse("loading glyph names")
glyphs = []
unicodes = []
text = ""
for g in args:
if g == '*':
glyphs.extend(font.getGlyphOrder())
continue
if g in names:
glyphs.append(g)
continue
if g.startswith('--text='):
text += g[7:]
continue
if g.startswith('uni') or g.startswith('U+'):
if g.startswith('uni') and len(g) > 3:
g = g[3:]
elif g.startswith('U+') and len(g) > 2:
g = g[2:]
u = int(g, 16)
unicodes.append(u)
continue
if g.startswith('gid') or g.startswith('glyph'):
if g.startswith('gid') and len(g) > 3:
g = g[3:]
elif g.startswith('glyph') and len(g) > 5:
g = g[5:]
try:
glyphs.append(font.getGlyphName(int(g), requireReal=1))
except ValueError:
raise Exception("Invalid glyph identifier: %s" % g)
continue
raise Exception("Invalid glyph identifier: %s" % g)
log.lapse("compile glyph list")
log("Unicodes:", unicodes)
log("Glyphs:", glyphs)
subsetter.populate(glyphs=glyphs, unicodes=unicodes, text=text)
subsetter.subset(font)
outfile = fontfile + '.subset'
save_font (font, outfile, options)
log.lapse("compile and save font")
log.last_time = log.start_time
log.lapse("make one with everything(TOTAL TIME)")
if log.verbose:
import os
log("Input font: %d bytes" % os.path.getsize(fontfile))
log("Subset font: %d bytes" % os.path.getsize(outfile))
log.font(font)
font.close()
__all__ = [
'Options',
'Subsetter',
'Logger',
'load_font',
'save_font',
'main'
]
if __name__ == '__main__':
main(sys.argv[1:])
[subset] Minor verbose output improvement
# Copyright 2013 Google, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0(the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Google Author(s): Behdad Esfahbod
"""Python OpenType Layout Subsetter.
Later grown into full OpenType subsetter, supporting all standard tables.
"""
import sys
import struct
import time
import array
import fontTools.ttLib
import fontTools.ttLib.tables
import fontTools.ttLib.tables.otTables
import fontTools.cffLib
import fontTools.misc.psCharStrings
import fontTools.pens.basePen
def _add_method(*clazzes):
"""Returns a decorator function that adds a new method to one or
more classes."""
def wrapper(method):
for clazz in clazzes:
assert clazz.__name__ != 'DefaultTable', 'Oops, table class not found.'
assert not hasattr(clazz, method.func_name), \
"Oops, class '%s' has method '%s'." % (clazz.__name__,
method.func_name)
setattr(clazz, method.func_name, method)
return None
return wrapper
def _uniq_sort(l):
return sorted(set(l))
def _set_update(s, *others):
# Jython's set.update only takes one other argument.
# Emulate real set.update...
for other in others:
s.update(other)
@_add_method(fontTools.ttLib.tables.otTables.Coverage)
def intersect(self, glyphs):
"Returns ascending list of matching coverage values."
return [i for i,g in enumerate(self.glyphs) if g in glyphs]
@_add_method(fontTools.ttLib.tables.otTables.Coverage)
def intersect_glyphs(self, glyphs):
"Returns set of intersecting glyphs."
return set(g for g in self.glyphs if g in glyphs)
@_add_method(fontTools.ttLib.tables.otTables.Coverage)
def subset(self, glyphs):
"Returns ascending list of remaining coverage values."
indices = self.intersect(glyphs)
self.glyphs = [g for g in self.glyphs if g in glyphs]
return indices
@_add_method(fontTools.ttLib.tables.otTables.Coverage)
def remap(self, coverage_map):
"Remaps coverage."
self.glyphs = [self.glyphs[i] for i in coverage_map]
@_add_method(fontTools.ttLib.tables.otTables.ClassDef)
def intersect(self, glyphs):
"Returns ascending list of matching class values."
return _uniq_sort(
([0] if any(g not in self.classDefs for g in glyphs) else []) +
[v for g,v in self.classDefs.iteritems() if g in glyphs])
@_add_method(fontTools.ttLib.tables.otTables.ClassDef)
def intersect_class(self, glyphs, klass):
"Returns set of glyphs matching class."
if klass == 0:
return set(g for g in glyphs if g not in self.classDefs)
return set(g for g,v in self.classDefs.iteritems()
if v == klass and g in glyphs)
@_add_method(fontTools.ttLib.tables.otTables.ClassDef)
def subset(self, glyphs, remap=False):
"Returns ascending list of remaining classes."
self.classDefs = dict((g,v) for g,v in self.classDefs.iteritems() if g in glyphs)
# Note: while class 0 has the special meaning of "not matched",
# if no glyph will ever /not match/, we can optimize class 0 out too.
indices = _uniq_sort(
([0] if any(g not in self.classDefs for g in glyphs) else []) +
self.classDefs.values())
if remap:
self.remap(indices)
return indices
@_add_method(fontTools.ttLib.tables.otTables.ClassDef)
def remap(self, class_map):
"Remaps classes."
self.classDefs = dict((g,class_map.index(v))
for g,v in self.classDefs.iteritems())
@_add_method(fontTools.ttLib.tables.otTables.SingleSubst)
def closure_glyphs(self, s, cur_glyphs=None):
if cur_glyphs == None: cur_glyphs = s.glyphs
if self.Format in [1, 2]:
s.glyphs.update(v for g,v in self.mapping.iteritems() if g in cur_glyphs)
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.SingleSubst)
def subset_glyphs(self, s):
if self.Format in [1, 2]:
self.mapping = dict((g,v) for g,v in self.mapping.iteritems()
if g in s.glyphs and v in s.glyphs)
return bool(self.mapping)
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.MultipleSubst)
def closure_glyphs(self, s, cur_glyphs=None):
if cur_glyphs == None: cur_glyphs = s.glyphs
if self.Format == 1:
indices = self.Coverage.intersect(cur_glyphs)
_set_update(s.glyphs, *(self.Sequence[i].Substitute for i in indices))
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.MultipleSubst)
def subset_glyphs(self, s):
if self.Format == 1:
indices = self.Coverage.subset(s.glyphs)
self.Sequence = [self.Sequence[i] for i in indices]
# Now drop rules generating glyphs we don't want
indices = [i for i,seq in enumerate(self.Sequence)
if all(sub in s.glyphs for sub in seq.Substitute)]
self.Sequence = [self.Sequence[i] for i in indices]
self.Coverage.remap(indices)
self.SequenceCount = len(self.Sequence)
return bool(self.SequenceCount)
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.AlternateSubst)
def closure_glyphs(self, s, cur_glyphs=None):
if cur_glyphs == None: cur_glyphs = s.glyphs
if self.Format == 1:
_set_update(s.glyphs, *(vlist for g,vlist in self.alternates.iteritems()
if g in cur_glyphs))
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.AlternateSubst)
def subset_glyphs(self, s):
if self.Format == 1:
self.alternates = dict((g,vlist)
for g,vlist in self.alternates.iteritems()
if g in s.glyphs and
all(v in s.glyphs for v in vlist))
return bool(self.alternates)
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.LigatureSubst)
def closure_glyphs(self, s, cur_glyphs=None):
if cur_glyphs == None: cur_glyphs = s.glyphs
if self.Format == 1:
_set_update(s.glyphs, *([seq.LigGlyph for seq in seqs
if all(c in s.glyphs for c in seq.Component)]
for g,seqs in self.ligatures.iteritems()
if g in cur_glyphs))
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.LigatureSubst)
def subset_glyphs(self, s):
if self.Format == 1:
self.ligatures = dict((g,v) for g,v in self.ligatures.iteritems()
if g in s.glyphs)
self.ligatures = dict((g,[seq for seq in seqs
if seq.LigGlyph in s.glyphs and
all(c in s.glyphs for c in seq.Component)])
for g,seqs in self.ligatures.iteritems())
self.ligatures = dict((g,v) for g,v in self.ligatures.iteritems() if v)
return bool(self.ligatures)
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.ReverseChainSingleSubst)
def closure_glyphs(self, s, cur_glyphs=None):
if cur_glyphs == None: cur_glyphs = s.glyphs
if self.Format == 1:
indices = self.Coverage.intersect(cur_glyphs)
if(not indices or
not all(c.intersect(s.glyphs)
for c in self.LookAheadCoverage + self.BacktrackCoverage)):
return
s.glyphs.update(self.Substitute[i] for i in indices)
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.ReverseChainSingleSubst)
def subset_glyphs(self, s):
if self.Format == 1:
indices = self.Coverage.subset(s.glyphs)
self.Substitute = [self.Substitute[i] for i in indices]
# Now drop rules generating glyphs we don't want
indices = [i for i,sub in enumerate(self.Substitute)
if sub in s.glyphs]
self.Substitute = [self.Substitute[i] for i in indices]
self.Coverage.remap(indices)
self.GlyphCount = len(self.Substitute)
return bool(self.GlyphCount and
all(c.subset(s.glyphs)
for c in self.LookAheadCoverage+self.BacktrackCoverage))
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.SinglePos)
def subset_glyphs(self, s):
if self.Format == 1:
return len(self.Coverage.subset(s.glyphs))
elif self.Format == 2:
indices = self.Coverage.subset(s.glyphs)
self.Value = [self.Value[i] for i in indices]
self.ValueCount = len(self.Value)
return bool(self.ValueCount)
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.SinglePos)
def prune_post_subset(self, options):
if not options.hinting:
# Drop device tables
self.ValueFormat &= ~0x00F0
return True
@_add_method(fontTools.ttLib.tables.otTables.PairPos)
def subset_glyphs(self, s):
if self.Format == 1:
indices = self.Coverage.subset(s.glyphs)
self.PairSet = [self.PairSet[i] for i in indices]
for p in self.PairSet:
p.PairValueRecord = [r for r in p.PairValueRecord
if r.SecondGlyph in s.glyphs]
p.PairValueCount = len(p.PairValueRecord)
self.PairSet = [p for p in self.PairSet if p.PairValueCount]
self.PairSetCount = len(self.PairSet)
return bool(self.PairSetCount)
elif self.Format == 2:
class1_map = self.ClassDef1.subset(s.glyphs, remap=True)
class2_map = self.ClassDef2.subset(s.glyphs, remap=True)
self.Class1Record = [self.Class1Record[i] for i in class1_map]
for c in self.Class1Record:
c.Class2Record = [c.Class2Record[i] for i in class2_map]
self.Class1Count = len(class1_map)
self.Class2Count = len(class2_map)
return bool(self.Class1Count and
self.Class2Count and
self.Coverage.subset(s.glyphs))
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.PairPos)
def prune_post_subset(self, options):
if not options.hinting:
# Drop device tables
self.ValueFormat1 &= ~0x00F0
self.ValueFormat2 &= ~0x00F0
return True
@_add_method(fontTools.ttLib.tables.otTables.CursivePos)
def subset_glyphs(self, s):
if self.Format == 1:
indices = self.Coverage.subset(s.glyphs)
self.EntryExitRecord = [self.EntryExitRecord[i] for i in indices]
self.EntryExitCount = len(self.EntryExitRecord)
return bool(self.EntryExitCount)
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.Anchor)
def prune_hints(self):
# Drop device tables / contour anchor point
self.Format = 1
@_add_method(fontTools.ttLib.tables.otTables.CursivePos)
def prune_post_subset(self, options):
if not options.hinting:
for rec in self.EntryExitRecord:
if rec.EntryAnchor: rec.EntryAnchor.prune_hints()
if rec.ExitAnchor: rec.ExitAnchor.prune_hints()
return True
@_add_method(fontTools.ttLib.tables.otTables.MarkBasePos)
def subset_glyphs(self, s):
if self.Format == 1:
mark_indices = self.MarkCoverage.subset(s.glyphs)
self.MarkArray.MarkRecord = [self.MarkArray.MarkRecord[i]
for i in mark_indices]
self.MarkArray.MarkCount = len(self.MarkArray.MarkRecord)
base_indices = self.BaseCoverage.subset(s.glyphs)
self.BaseArray.BaseRecord = [self.BaseArray.BaseRecord[i]
for i in base_indices]
self.BaseArray.BaseCount = len(self.BaseArray.BaseRecord)
# Prune empty classes
class_indices = _uniq_sort(v.Class for v in self.MarkArray.MarkRecord)
self.ClassCount = len(class_indices)
for m in self.MarkArray.MarkRecord:
m.Class = class_indices.index(m.Class)
for b in self.BaseArray.BaseRecord:
b.BaseAnchor = [b.BaseAnchor[i] for i in class_indices]
return bool(self.ClassCount and
self.MarkArray.MarkCount and
self.BaseArray.BaseCount)
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.MarkBasePos)
def prune_post_subset(self, options):
if not options.hinting:
for m in self.MarkArray.MarkRecord:
m.MarkAnchor.prune_hints()
for b in self.BaseArray.BaseRecord:
for a in b.BaseAnchor:
a.prune_hints()
return True
@_add_method(fontTools.ttLib.tables.otTables.MarkLigPos)
def subset_glyphs(self, s):
if self.Format == 1:
mark_indices = self.MarkCoverage.subset(s.glyphs)
self.MarkArray.MarkRecord = [self.MarkArray.MarkRecord[i]
for i in mark_indices]
self.MarkArray.MarkCount = len(self.MarkArray.MarkRecord)
ligature_indices = self.LigatureCoverage.subset(s.glyphs)
self.LigatureArray.LigatureAttach = [self.LigatureArray.LigatureAttach[i]
for i in ligature_indices]
self.LigatureArray.LigatureCount = len(self.LigatureArray.LigatureAttach)
# Prune empty classes
class_indices = _uniq_sort(v.Class for v in self.MarkArray.MarkRecord)
self.ClassCount = len(class_indices)
for m in self.MarkArray.MarkRecord:
m.Class = class_indices.index(m.Class)
for l in self.LigatureArray.LigatureAttach:
for c in l.ComponentRecord:
c.LigatureAnchor = [c.LigatureAnchor[i] for i in class_indices]
return bool(self.ClassCount and
self.MarkArray.MarkCount and
self.LigatureArray.LigatureCount)
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.MarkLigPos)
def prune_post_subset(self, options):
if not options.hinting:
for m in self.MarkArray.MarkRecord:
m.MarkAnchor.prune_hints()
for l in self.LigatureArray.LigatureAttach:
for c in l.ComponentRecord:
for a in c.LigatureAnchor:
a.prune_hints()
return True
@_add_method(fontTools.ttLib.tables.otTables.MarkMarkPos)
def subset_glyphs(self, s):
if self.Format == 1:
mark1_indices = self.Mark1Coverage.subset(s.glyphs)
self.Mark1Array.MarkRecord = [self.Mark1Array.MarkRecord[i]
for i in mark1_indices]
self.Mark1Array.MarkCount = len(self.Mark1Array.MarkRecord)
mark2_indices = self.Mark2Coverage.subset(s.glyphs)
self.Mark2Array.Mark2Record = [self.Mark2Array.Mark2Record[i]
for i in mark2_indices]
self.Mark2Array.MarkCount = len(self.Mark2Array.Mark2Record)
# Prune empty classes
class_indices = _uniq_sort(v.Class for v in self.Mark1Array.MarkRecord)
self.ClassCount = len(class_indices)
for m in self.Mark1Array.MarkRecord:
m.Class = class_indices.index(m.Class)
for b in self.Mark2Array.Mark2Record:
b.Mark2Anchor = [b.Mark2Anchor[i] for i in class_indices]
return bool(self.ClassCount and
self.Mark1Array.MarkCount and
self.Mark2Array.MarkCount)
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.MarkMarkPos)
def prune_post_subset(self, options):
if not options.hinting:
# Drop device tables or contour anchor point
for m in self.Mark1Array.MarkRecord:
m.MarkAnchor.prune_hints()
for b in self.Mark2Array.Mark2Record:
for m in rec.Mark2Anchor:
m.prune_hints()
return True
@_add_method(fontTools.ttLib.tables.otTables.SingleSubst,
fontTools.ttLib.tables.otTables.MultipleSubst,
fontTools.ttLib.tables.otTables.AlternateSubst,
fontTools.ttLib.tables.otTables.LigatureSubst,
fontTools.ttLib.tables.otTables.ReverseChainSingleSubst,
fontTools.ttLib.tables.otTables.SinglePos,
fontTools.ttLib.tables.otTables.PairPos,
fontTools.ttLib.tables.otTables.CursivePos,
fontTools.ttLib.tables.otTables.MarkBasePos,
fontTools.ttLib.tables.otTables.MarkLigPos,
fontTools.ttLib.tables.otTables.MarkMarkPos)
def subset_lookups(self, lookup_indices):
pass
@_add_method(fontTools.ttLib.tables.otTables.SingleSubst,
fontTools.ttLib.tables.otTables.MultipleSubst,
fontTools.ttLib.tables.otTables.AlternateSubst,
fontTools.ttLib.tables.otTables.LigatureSubst,
fontTools.ttLib.tables.otTables.ReverseChainSingleSubst,
fontTools.ttLib.tables.otTables.SinglePos,
fontTools.ttLib.tables.otTables.PairPos,
fontTools.ttLib.tables.otTables.CursivePos,
fontTools.ttLib.tables.otTables.MarkBasePos,
fontTools.ttLib.tables.otTables.MarkLigPos,
fontTools.ttLib.tables.otTables.MarkMarkPos)
def collect_lookups(self):
return []
@_add_method(fontTools.ttLib.tables.otTables.SingleSubst,
fontTools.ttLib.tables.otTables.MultipleSubst,
fontTools.ttLib.tables.otTables.AlternateSubst,
fontTools.ttLib.tables.otTables.LigatureSubst,
fontTools.ttLib.tables.otTables.ContextSubst,
fontTools.ttLib.tables.otTables.ChainContextSubst,
fontTools.ttLib.tables.otTables.ReverseChainSingleSubst,
fontTools.ttLib.tables.otTables.SinglePos,
fontTools.ttLib.tables.otTables.PairPos,
fontTools.ttLib.tables.otTables.CursivePos,
fontTools.ttLib.tables.otTables.MarkBasePos,
fontTools.ttLib.tables.otTables.MarkLigPos,
fontTools.ttLib.tables.otTables.MarkMarkPos,
fontTools.ttLib.tables.otTables.ContextPos,
fontTools.ttLib.tables.otTables.ChainContextPos)
def prune_pre_subset(self, options):
return True
@_add_method(fontTools.ttLib.tables.otTables.SingleSubst,
fontTools.ttLib.tables.otTables.MultipleSubst,
fontTools.ttLib.tables.otTables.AlternateSubst,
fontTools.ttLib.tables.otTables.LigatureSubst,
fontTools.ttLib.tables.otTables.ReverseChainSingleSubst,
fontTools.ttLib.tables.otTables.ContextSubst,
fontTools.ttLib.tables.otTables.ChainContextSubst,
fontTools.ttLib.tables.otTables.ContextPos,
fontTools.ttLib.tables.otTables.ChainContextPos)
def prune_post_subset(self, options):
return True
@_add_method(fontTools.ttLib.tables.otTables.SingleSubst,
fontTools.ttLib.tables.otTables.AlternateSubst,
fontTools.ttLib.tables.otTables.ReverseChainSingleSubst)
def may_have_non_1to1(self):
return False
@_add_method(fontTools.ttLib.tables.otTables.MultipleSubst,
fontTools.ttLib.tables.otTables.LigatureSubst,
fontTools.ttLib.tables.otTables.ContextSubst,
fontTools.ttLib.tables.otTables.ChainContextSubst)
def may_have_non_1to1(self):
return True
@_add_method(fontTools.ttLib.tables.otTables.ContextSubst,
fontTools.ttLib.tables.otTables.ChainContextSubst,
fontTools.ttLib.tables.otTables.ContextPos,
fontTools.ttLib.tables.otTables.ChainContextPos)
def __classify_context(self):
class ContextHelper(object):
def __init__(self, klass, Format):
if klass.__name__.endswith('Subst'):
Typ = 'Sub'
Type = 'Subst'
else:
Typ = 'Pos'
Type = 'Pos'
if klass.__name__.startswith('Chain'):
Chain = 'Chain'
else:
Chain = ''
ChainTyp = Chain+Typ
self.Typ = Typ
self.Type = Type
self.Chain = Chain
self.ChainTyp = ChainTyp
self.LookupRecord = Type+'LookupRecord'
if Format == 1:
Coverage = lambda r: r.Coverage
ChainCoverage = lambda r: r.Coverage
ContextData = lambda r:(None,)
ChainContextData = lambda r:(None, None, None)
RuleData = lambda r:(r.Input,)
ChainRuleData = lambda r:(r.Backtrack, r.Input, r.LookAhead)
SetRuleData = None
ChainSetRuleData = None
elif Format == 2:
Coverage = lambda r: r.Coverage
ChainCoverage = lambda r: r.Coverage
ContextData = lambda r:(r.ClassDef,)
ChainContextData = lambda r:(r.LookAheadClassDef,
r.InputClassDef,
r.BacktrackClassDef)
RuleData = lambda r:(r.Class,)
ChainRuleData = lambda r:(r.LookAhead, r.Input, r.Backtrack)
def SetRuleData(r, d):(r.Class,) = d
def ChainSetRuleData(r, d):(r.LookAhead, r.Input, r.Backtrack) = d
elif Format == 3:
Coverage = lambda r: r.Coverage[0]
ChainCoverage = lambda r: r.InputCoverage[0]
ContextData = None
ChainContextData = None
RuleData = lambda r: r.Coverage
ChainRuleData = lambda r:(r.LookAheadCoverage +
r.InputCoverage +
r.BacktrackCoverage)
SetRuleData = None
ChainSetRuleData = None
else:
assert 0, "unknown format: %s" % Format
if Chain:
self.Coverage = ChainCoverage
self.ContextData = ChainContextData
self.RuleData = ChainRuleData
self.SetRuleData = ChainSetRuleData
else:
self.Coverage = Coverage
self.ContextData = ContextData
self.RuleData = RuleData
self.SetRuleData = SetRuleData
if Format == 1:
self.Rule = ChainTyp+'Rule'
self.RuleCount = ChainTyp+'RuleCount'
self.RuleSet = ChainTyp+'RuleSet'
self.RuleSetCount = ChainTyp+'RuleSetCount'
self.Intersect = lambda glyphs, c, r: [r] if r in glyphs else []
elif Format == 2:
self.Rule = ChainTyp+'ClassRule'
self.RuleCount = ChainTyp+'ClassRuleCount'
self.RuleSet = ChainTyp+'ClassSet'
self.RuleSetCount = ChainTyp+'ClassSetCount'
self.Intersect = lambda glyphs, c, r: c.intersect_class(glyphs, r)
self.ClassDef = 'InputClassDef' if Chain else 'ClassDef'
self.Input = 'Input' if Chain else 'Class'
if self.Format not in [1, 2, 3]:
return None # Don't shoot the messenger; let it go
if not hasattr(self.__class__, "__ContextHelpers"):
self.__class__.__ContextHelpers = {}
if self.Format not in self.__class__.__ContextHelpers:
helper = ContextHelper(self.__class__, self.Format)
self.__class__.__ContextHelpers[self.Format] = helper
return self.__class__.__ContextHelpers[self.Format]
@_add_method(fontTools.ttLib.tables.otTables.ContextSubst,
fontTools.ttLib.tables.otTables.ChainContextSubst)
def closure_glyphs(self, s, cur_glyphs=None):
if cur_glyphs == None: cur_glyphs = s.glyphs
c = self.__classify_context()
indices = c.Coverage(self).intersect(s.glyphs)
if not indices:
return []
cur_glyphs = c.Coverage(self).intersect_glyphs(s.glyphs);
if self.Format == 1:
ContextData = c.ContextData(self)
rss = getattr(self, c.RuleSet)
for i in indices:
if not rss[i]: continue
for r in getattr(rss[i], c.Rule):
if not r: continue
if all(all(c.Intersect(s.glyphs, cd, k) for k in klist)
for cd,klist in zip(ContextData, c.RuleData(r))):
chaos = False
for ll in getattr(r, c.LookupRecord):
if not ll: continue
seqi = ll.SequenceIndex
if chaos:
pos_glyphs = s.glyphs
else:
if seqi == 0:
pos_glyphs = set([c.Coverage(self).glyphs[i]])
else:
pos_glyphs = set([r.Input[seqi - 1]])
lookup = s.table.LookupList.Lookup[ll.LookupListIndex]
chaos = chaos or lookup.may_have_non_1to1()
lookup.closure_glyphs(s, cur_glyphs=pos_glyphs)
elif self.Format == 2:
ClassDef = getattr(self, c.ClassDef)
indices = ClassDef.intersect(cur_glyphs)
ContextData = c.ContextData(self)
rss = getattr(self, c.RuleSet)
for i in indices:
if not rss[i]: continue
for r in getattr(rss[i], c.Rule):
if not r: continue
if all(all(c.Intersect(s.glyphs, cd, k) for k in klist)
for cd,klist in zip(ContextData, c.RuleData(r))):
chaos = False
for ll in getattr(r, c.LookupRecord):
if not ll: continue
seqi = ll.SequenceIndex
if chaos:
pos_glyphs = s.glyphs
else:
if seqi == 0:
pos_glyphs = ClassDef.intersect_class(cur_glyphs, i)
else:
pos_glyphs = ClassDef.intersect_class(s.glyphs,
getattr(r, c.Input)[seqi - 1])
lookup = s.table.LookupList.Lookup[ll.LookupListIndex]
chaos = chaos or lookup.may_have_non_1to1()
lookup.closure_glyphs(s, cur_glyphs=pos_glyphs)
elif self.Format == 3:
if not all(x.intersect(s.glyphs) for x in c.RuleData(self)):
return []
r = self
chaos = False
for ll in getattr(r, c.LookupRecord):
if not ll: continue
seqi = ll.SequenceIndex
if chaos:
pos_glyphs = s.glyphs
else:
if seqi == 0:
pos_glyphs = cur_glyphs
else:
pos_glyphs = r.InputCoverage[seqi].intersect_glyphs(s.glyphs)
lookup = s.table.LookupList.Lookup[ll.LookupListIndex]
chaos = chaos or lookup.may_have_non_1to1()
lookup.closure_glyphs(s, cur_glyphs=pos_glyphs)
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.ContextSubst,
fontTools.ttLib.tables.otTables.ContextPos,
fontTools.ttLib.tables.otTables.ChainContextSubst,
fontTools.ttLib.tables.otTables.ChainContextPos)
def subset_glyphs(self, s):
c = self.__classify_context()
if self.Format == 1:
indices = self.Coverage.subset(s.glyphs)
rss = getattr(self, c.RuleSet)
rss = [rss[i] for i in indices]
for rs in rss:
if not rs: continue
ss = getattr(rs, c.Rule)
ss = [r for r in ss
if r and all(all(g in s.glyphs for g in glist)
for glist in c.RuleData(r))]
setattr(rs, c.Rule, ss)
setattr(rs, c.RuleCount, len(ss))
# Prune empty subrulesets
rss = [rs for rs in rss if rs and getattr(rs, c.Rule)]
setattr(self, c.RuleSet, rss)
setattr(self, c.RuleSetCount, len(rss))
return bool(rss)
elif self.Format == 2:
if not self.Coverage.subset(s.glyphs):
return False
indices = getattr(self, c.ClassDef).subset(self.Coverage.glyphs,
remap=False)
rss = getattr(self, c.RuleSet)
rss = [rss[i] for i in indices]
ContextData = c.ContextData(self)
klass_maps = [x.subset(s.glyphs, remap=True) for x in ContextData]
for rs in rss:
if not rs: continue
ss = getattr(rs, c.Rule)
ss = [r for r in ss
if r and all(all(k in klass_map for k in klist)
for klass_map,klist in zip(klass_maps, c.RuleData(r)))]
setattr(rs, c.Rule, ss)
setattr(rs, c.RuleCount, len(ss))
# Remap rule classes
for r in ss:
c.SetRuleData(r, [[klass_map.index(k) for k in klist]
for klass_map,klist in zip(klass_maps, c.RuleData(r))])
# Prune empty subrulesets
rss = [rs for rs in rss if rs and getattr(rs, c.Rule)]
setattr(self, c.RuleSet, rss)
setattr(self, c.RuleSetCount, len(rss))
return bool(rss)
elif self.Format == 3:
return all(x.subset(s.glyphs) for x in c.RuleData(self))
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.ContextSubst,
fontTools.ttLib.tables.otTables.ChainContextSubst,
fontTools.ttLib.tables.otTables.ContextPos,
fontTools.ttLib.tables.otTables.ChainContextPos)
def subset_lookups(self, lookup_indices):
c = self.__classify_context()
if self.Format in [1, 2]:
for rs in getattr(self, c.RuleSet):
if not rs: continue
for r in getattr(rs, c.Rule):
if not r: continue
setattr(r, c.LookupRecord,
[ll for ll in getattr(r, c.LookupRecord)
if ll and ll.LookupListIndex in lookup_indices])
for ll in getattr(r, c.LookupRecord):
if not ll: continue
ll.LookupListIndex = lookup_indices.index(ll.LookupListIndex)
elif self.Format == 3:
setattr(self, c.LookupRecord,
[ll for ll in getattr(self, c.LookupRecord)
if ll and ll.LookupListIndex in lookup_indices])
for ll in getattr(self, c.LookupRecord):
if not ll: continue
ll.LookupListIndex = lookup_indices.index(ll.LookupListIndex)
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.ContextSubst,
fontTools.ttLib.tables.otTables.ChainContextSubst,
fontTools.ttLib.tables.otTables.ContextPos,
fontTools.ttLib.tables.otTables.ChainContextPos)
def collect_lookups(self):
c = self.__classify_context()
if self.Format in [1, 2]:
return [ll.LookupListIndex
for rs in getattr(self, c.RuleSet) if rs
for r in getattr(rs, c.Rule) if r
for ll in getattr(r, c.LookupRecord) if ll]
elif self.Format == 3:
return [ll.LookupListIndex
for ll in getattr(self, c.LookupRecord) if ll]
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.ExtensionSubst)
def closure_glyphs(self, s, cur_glyphs=None):
if self.Format == 1:
self.ExtSubTable.closure_glyphs(s, cur_glyphs)
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.ExtensionSubst)
def may_have_non_1to1(self):
if self.Format == 1:
return self.ExtSubTable.may_have_non_1to1()
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.ExtensionSubst,
fontTools.ttLib.tables.otTables.ExtensionPos)
def prune_pre_subset(self, options):
if self.Format == 1:
return self.ExtSubTable.prune_pre_subset(options)
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.ExtensionSubst,
fontTools.ttLib.tables.otTables.ExtensionPos)
def subset_glyphs(self, s):
if self.Format == 1:
return self.ExtSubTable.subset_glyphs(s)
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.ExtensionSubst,
fontTools.ttLib.tables.otTables.ExtensionPos)
def prune_post_subset(self, options):
if self.Format == 1:
return self.ExtSubTable.prune_post_subset(options)
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.ExtensionSubst,
fontTools.ttLib.tables.otTables.ExtensionPos)
def subset_lookups(self, lookup_indices):
if self.Format == 1:
return self.ExtSubTable.subset_lookups(lookup_indices)
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.ExtensionSubst,
fontTools.ttLib.tables.otTables.ExtensionPos)
def collect_lookups(self):
if self.Format == 1:
return self.ExtSubTable.collect_lookups()
else:
assert 0, "unknown format: %s" % self.Format
@_add_method(fontTools.ttLib.tables.otTables.Lookup)
def closure_glyphs(self, s, cur_glyphs=None):
for st in self.SubTable:
if not st: continue
st.closure_glyphs(s, cur_glyphs)
@_add_method(fontTools.ttLib.tables.otTables.Lookup)
def prune_pre_subset(self, options):
ret = False
for st in self.SubTable:
if not st: continue
if st.prune_pre_subset(options): ret = True
return ret
@_add_method(fontTools.ttLib.tables.otTables.Lookup)
def subset_glyphs(self, s):
self.SubTable = [st for st in self.SubTable if st and st.subset_glyphs(s)]
self.SubTableCount = len(self.SubTable)
return bool(self.SubTableCount)
@_add_method(fontTools.ttLib.tables.otTables.Lookup)
def prune_post_subset(self, options):
ret = False
for st in self.SubTable:
if not st: continue
if st.prune_post_subset(options): ret = True
return ret
@_add_method(fontTools.ttLib.tables.otTables.Lookup)
def subset_lookups(self, lookup_indices):
for s in self.SubTable:
s.subset_lookups(lookup_indices)
@_add_method(fontTools.ttLib.tables.otTables.Lookup)
def collect_lookups(self):
return _uniq_sort(sum((st.collect_lookups() for st in self.SubTable
if st), []))
@_add_method(fontTools.ttLib.tables.otTables.Lookup)
def may_have_non_1to1(self):
return any(st.may_have_non_1to1() for st in self.SubTable if st)
@_add_method(fontTools.ttLib.tables.otTables.LookupList)
def prune_pre_subset(self, options):
ret = False
for l in self.Lookup:
if not l: continue
if l.prune_pre_subset(options): ret = True
return ret
@_add_method(fontTools.ttLib.tables.otTables.LookupList)
def subset_glyphs(self, s):
"Returns the indices of nonempty lookups."
return [i for i,l in enumerate(self.Lookup) if l and l.subset_glyphs(s)]
@_add_method(fontTools.ttLib.tables.otTables.LookupList)
def prune_post_subset(self, options):
ret = False
for l in self.Lookup:
if not l: continue
if l.prune_post_subset(options): ret = True
return ret
@_add_method(fontTools.ttLib.tables.otTables.LookupList)
def subset_lookups(self, lookup_indices):
self.Lookup = [self.Lookup[i] for i in lookup_indices
if i < self.LookupCount]
self.LookupCount = len(self.Lookup)
for l in self.Lookup:
l.subset_lookups(lookup_indices)
@_add_method(fontTools.ttLib.tables.otTables.LookupList)
def closure_lookups(self, lookup_indices):
lookup_indices = _uniq_sort(lookup_indices)
recurse = lookup_indices
while True:
recurse_lookups = sum((self.Lookup[i].collect_lookups()
for i in recurse if i < self.LookupCount), [])
recurse_lookups = [l for l in recurse_lookups
if l not in lookup_indices and l < self.LookupCount]
if not recurse_lookups:
return _uniq_sort(lookup_indices)
recurse_lookups = _uniq_sort(recurse_lookups)
lookup_indices.extend(recurse_lookups)
recurse = recurse_lookups
@_add_method(fontTools.ttLib.tables.otTables.Feature)
def subset_lookups(self, lookup_indices):
self.LookupListIndex = [l for l in self.LookupListIndex
if l in lookup_indices]
# Now map them.
self.LookupListIndex = [lookup_indices.index(l)
for l in self.LookupListIndex]
self.LookupCount = len(self.LookupListIndex)
return self.LookupCount
@_add_method(fontTools.ttLib.tables.otTables.Feature)
def collect_lookups(self):
return self.LookupListIndex[:]
@_add_method(fontTools.ttLib.tables.otTables.FeatureList)
def subset_lookups(self, lookup_indices):
"Returns the indices of nonempty features."
feature_indices = [i for i,f in enumerate(self.FeatureRecord)
if f.Feature.subset_lookups(lookup_indices)]
self.subset_features(feature_indices)
return feature_indices
@_add_method(fontTools.ttLib.tables.otTables.FeatureList)
def collect_lookups(self, feature_indices):
return _uniq_sort(sum((self.FeatureRecord[i].Feature.collect_lookups()
for i in feature_indices
if i < self.FeatureCount), []))
@_add_method(fontTools.ttLib.tables.otTables.FeatureList)
def subset_features(self, feature_indices):
self.FeatureRecord = [self.FeatureRecord[i] for i in feature_indices]
self.FeatureCount = len(self.FeatureRecord)
return bool(self.FeatureCount)
@_add_method(fontTools.ttLib.tables.otTables.DefaultLangSys,
fontTools.ttLib.tables.otTables.LangSys)
def subset_features(self, feature_indices):
if self.ReqFeatureIndex in feature_indices:
self.ReqFeatureIndex = feature_indices.index(self.ReqFeatureIndex)
else:
self.ReqFeatureIndex = 65535
self.FeatureIndex = [f for f in self.FeatureIndex if f in feature_indices]
# Now map them.
self.FeatureIndex = [feature_indices.index(f) for f in self.FeatureIndex
if f in feature_indices]
self.FeatureCount = len(self.FeatureIndex)
return bool(self.FeatureCount or self.ReqFeatureIndex != 65535)
@_add_method(fontTools.ttLib.tables.otTables.DefaultLangSys,
fontTools.ttLib.tables.otTables.LangSys)
def collect_features(self):
feature_indices = self.FeatureIndex[:]
if self.ReqFeatureIndex != 65535:
feature_indices.append(self.ReqFeatureIndex)
return _uniq_sort(feature_indices)
@_add_method(fontTools.ttLib.tables.otTables.Script)
def subset_features(self, feature_indices):
if(self.DefaultLangSys and
not self.DefaultLangSys.subset_features(feature_indices)):
self.DefaultLangSys = None
self.LangSysRecord = [l for l in self.LangSysRecord
if l.LangSys.subset_features(feature_indices)]
self.LangSysCount = len(self.LangSysRecord)
return bool(self.LangSysCount or self.DefaultLangSys)
@_add_method(fontTools.ttLib.tables.otTables.Script)
def collect_features(self):
feature_indices = [l.LangSys.collect_features() for l in self.LangSysRecord]
if self.DefaultLangSys:
feature_indices.append(self.DefaultLangSys.collect_features())
return _uniq_sort(sum(feature_indices, []))
@_add_method(fontTools.ttLib.tables.otTables.ScriptList)
def subset_features(self, feature_indices):
self.ScriptRecord = [s for s in self.ScriptRecord
if s.Script.subset_features(feature_indices)]
self.ScriptCount = len(self.ScriptRecord)
return bool(self.ScriptCount)
@_add_method(fontTools.ttLib.tables.otTables.ScriptList)
def collect_features(self):
return _uniq_sort(sum((s.Script.collect_features()
for s in self.ScriptRecord), []))
@_add_method(fontTools.ttLib.getTableClass('GSUB'))
def closure_glyphs(self, s):
s.table = self.table
feature_indices = self.table.ScriptList.collect_features()
lookup_indices = self.table.FeatureList.collect_lookups(feature_indices)
while True:
orig_glyphs = s.glyphs.copy()
for i in lookup_indices:
if i >= self.table.LookupList.LookupCount: continue
if not self.table.LookupList.Lookup[i]: continue
self.table.LookupList.Lookup[i].closure_glyphs(s)
if orig_glyphs == s.glyphs:
break
del s.table
@_add_method(fontTools.ttLib.getTableClass('GSUB'),
fontTools.ttLib.getTableClass('GPOS'))
def subset_glyphs(self, s):
s.glyphs = s.glyphs_gsubed
lookup_indices = self.table.LookupList.subset_glyphs(s)
self.subset_lookups(lookup_indices)
self.prune_lookups()
return True
@_add_method(fontTools.ttLib.getTableClass('GSUB'),
fontTools.ttLib.getTableClass('GPOS'))
def subset_lookups(self, lookup_indices):
"""Retrains specified lookups, then removes empty features, language
systems, and scripts."""
self.table.LookupList.subset_lookups(lookup_indices)
feature_indices = self.table.FeatureList.subset_lookups(lookup_indices)
self.table.ScriptList.subset_features(feature_indices)
@_add_method(fontTools.ttLib.getTableClass('GSUB'),
fontTools.ttLib.getTableClass('GPOS'))
def prune_lookups(self):
"Remove unreferenced lookups"
feature_indices = self.table.ScriptList.collect_features()
lookup_indices = self.table.FeatureList.collect_lookups(feature_indices)
lookup_indices = self.table.LookupList.closure_lookups(lookup_indices)
self.subset_lookups(lookup_indices)
@_add_method(fontTools.ttLib.getTableClass('GSUB'),
fontTools.ttLib.getTableClass('GPOS'))
def subset_feature_tags(self, feature_tags):
feature_indices = [i for i,f in
enumerate(self.table.FeatureList.FeatureRecord)
if f.FeatureTag in feature_tags]
self.table.FeatureList.subset_features(feature_indices)
self.table.ScriptList.subset_features(feature_indices)
@_add_method(fontTools.ttLib.getTableClass('GSUB'),
fontTools.ttLib.getTableClass('GPOS'))
def prune_pre_subset(self, options):
if '*' not in options.layout_features:
self.subset_feature_tags(options.layout_features)
self.prune_lookups()
self.table.LookupList.prune_pre_subset(options);
return True
@_add_method(fontTools.ttLib.getTableClass('GSUB'),
fontTools.ttLib.getTableClass('GPOS'))
def prune_post_subset(self, options):
self.table.LookupList.prune_post_subset(options);
return True
@_add_method(fontTools.ttLib.getTableClass('GDEF'))
def subset_glyphs(self, s):
glyphs = s.glyphs_gsubed
table = self.table
if table.LigCaretList:
indices = table.LigCaretList.Coverage.subset(glyphs)
table.LigCaretList.LigGlyph = [table.LigCaretList.LigGlyph[i]
for i in indices]
table.LigCaretList.LigGlyphCount = len(table.LigCaretList.LigGlyph)
if not table.LigCaretList.LigGlyphCount:
table.LigCaretList = None
if table.MarkAttachClassDef:
table.MarkAttachClassDef.classDefs = dict((g,v) for g,v in
table.MarkAttachClassDef.
classDefs.iteritems()
if g in glyphs)
if not table.MarkAttachClassDef.classDefs:
table.MarkAttachClassDef = None
if table.GlyphClassDef:
table.GlyphClassDef.classDefs = dict((g,v) for g,v in
table.GlyphClassDef.
classDefs.iteritems()
if g in glyphs)
if not table.GlyphClassDef.classDefs:
table.GlyphClassDef = None
if table.AttachList:
indices = table.AttachList.Coverage.subset(glyphs)
table.AttachList.AttachPoint = [table.AttachList.AttachPoint[i]
for i in indices]
table.AttachList.GlyphCount = len(table.AttachList.AttachPoint)
if not table.AttachList.GlyphCount:
table.AttachList = None
return bool(table.LigCaretList or
table.MarkAttachClassDef or
table.GlyphClassDef or
table.AttachList)
@_add_method(fontTools.ttLib.getTableClass('kern'))
def prune_pre_subset(self, options):
# Prune unknown kern table types
self.kernTables = [t for t in self.kernTables if hasattr(t, 'kernTable')]
return bool(self.kernTables)
@_add_method(fontTools.ttLib.getTableClass('kern'))
def subset_glyphs(self, s):
glyphs = s.glyphs_gsubed
for t in self.kernTables:
t.kernTable = dict(((a,b),v) for (a,b),v in t.kernTable.iteritems()
if a in glyphs and b in glyphs)
self.kernTables = [t for t in self.kernTables if t.kernTable]
return bool(self.kernTables)
@_add_method(fontTools.ttLib.getTableClass('vmtx'),
fontTools.ttLib.getTableClass('hmtx'))
def subset_glyphs(self, s):
self.metrics = dict((g,v) for g,v in self.metrics.iteritems() if g in s.glyphs)
return bool(self.metrics)
@_add_method(fontTools.ttLib.getTableClass('hdmx'))
def subset_glyphs(self, s):
self.hdmx = dict((sz,_dict((g,v) for g,v in l.iteritems() if g in s.glyphs))
for sz,l in self.hdmx.iteritems())
return bool(self.hdmx)
@_add_method(fontTools.ttLib.getTableClass('VORG'))
def subset_glyphs(self, s):
self.VOriginRecords = dict((g,v) for g,v in self.VOriginRecords.iteritems()
if g in s.glyphs)
self.numVertOriginYMetrics = len(self.VOriginRecords)
return True # Never drop; has default metrics
@_add_method(fontTools.ttLib.getTableClass('post'))
def prune_pre_subset(self, options):
if not options.glyph_names:
self.formatType = 3.0
return True
@_add_method(fontTools.ttLib.getTableClass('post'))
def subset_glyphs(self, s):
self.extraNames = [] # This seems to do it
return True
@_add_method(fontTools.ttLib.getTableModule('glyf').Glyph)
def getComponentNamesFast(self, glyfTable):
if not self.data or struct.unpack(">h", self.data[:2])[0] >= 0:
return [] # Not composite
data = self.data
i = 10
components = []
more = 1
while more:
flags, glyphID = struct.unpack(">HH", data[i:i+4])
i += 4
flags = int(flags)
components.append(glyfTable.getGlyphName(int(glyphID)))
if flags & 0x0001: i += 4 # ARG_1_AND_2_ARE_WORDS
else: i += 2
if flags & 0x0008: i += 2 # WE_HAVE_A_SCALE
elif flags & 0x0040: i += 4 # WE_HAVE_AN_X_AND_Y_SCALE
elif flags & 0x0080: i += 8 # WE_HAVE_A_TWO_BY_TWO
more = flags & 0x0020 # MORE_COMPONENTS
return components
@_add_method(fontTools.ttLib.getTableModule('glyf').Glyph)
def remapComponentsFast(self, indices):
if not self.data or struct.unpack(">h", self.data[:2])[0] >= 0:
return # Not composite
data = array.array("B", self.data)
i = 10
more = 1
while more:
flags =(data[i] << 8) | data[i+1]
glyphID =(data[i+2] << 8) | data[i+3]
# Remap
glyphID = indices.index(glyphID)
data[i+2] = glyphID >> 8
data[i+3] = glyphID & 0xFF
i += 4
flags = int(flags)
if flags & 0x0001: i += 4 # ARG_1_AND_2_ARE_WORDS
else: i += 2
if flags & 0x0008: i += 2 # WE_HAVE_A_SCALE
elif flags & 0x0040: i += 4 # WE_HAVE_AN_X_AND_Y_SCALE
elif flags & 0x0080: i += 8 # WE_HAVE_A_TWO_BY_TWO
more = flags & 0x0020 # MORE_COMPONENTS
self.data = data.tostring()
@_add_method(fontTools.ttLib.getTableModule('glyf').Glyph)
def dropInstructionsFast(self):
if not self.data:
return
numContours = struct.unpack(">h", self.data[:2])[0]
data = array.array("B", self.data)
i = 10
if numContours >= 0:
i += 2 * numContours # endPtsOfContours
instructionLen =(data[i] << 8) | data[i+1]
# Zero it
data[i] = data [i+1] = 0
i += 2
if instructionLen:
# Splice it out
data = data[:i] + data[i+instructionLen:]
else:
more = 1
while more:
flags =(data[i] << 8) | data[i+1]
# Turn instruction flag off
flags &= ~0x0100 # WE_HAVE_INSTRUCTIONS
data[i+0] = flags >> 8
data[i+1] = flags & 0xFF
i += 4
flags = int(flags)
if flags & 0x0001: i += 4 # ARG_1_AND_2_ARE_WORDS
else: i += 2
if flags & 0x0008: i += 2 # WE_HAVE_A_SCALE
elif flags & 0x0040: i += 4 # WE_HAVE_AN_X_AND_Y_SCALE
elif flags & 0x0080: i += 8 # WE_HAVE_A_TWO_BY_TWO
more = flags & 0x0020 # MORE_COMPONENTS
# Cut off
data = data[:i]
if len(data) % 4:
# add pad bytes
nPadBytes = 4 -(len(data) % 4)
for i in range(nPadBytes):
data.append(0)
self.data = data.tostring()
@_add_method(fontTools.ttLib.getTableClass('glyf'))
def closure_glyphs(self, s):
decompose = s.glyphs
# I don't know if component glyphs can be composite themselves.
# We handle them anyway.
while True:
components = set()
for g in decompose:
if g not in self.glyphs:
continue
gl = self.glyphs[g]
if hasattr(gl, "data"):
for c in gl.getComponentNamesFast(self):
if c not in s.glyphs:
components.add(c)
else:
# TTX seems to expand gid0..3 always
if gl.isComposite():
for c in gl.components:
if c.glyphName not in s.glyphs:
components.add(c.glyphName)
components = set(c for c in components if c not in s.glyphs)
if not components:
break
decompose = components
s.glyphs.update(components)
@_add_method(fontTools.ttLib.getTableClass('glyf'))
def prune_pre_subset(self, options):
if options.notdef_glyph and not options.notdef_outline:
g = self[self.glyphOrder[0]]
# Yay, easy!
g.__dict__.clear()
g.data = ""
return True
@_add_method(fontTools.ttLib.getTableClass('glyf'))
def subset_glyphs(self, s):
self.glyphs = dict((g,v) for g,v in self.glyphs.iteritems() if g in s.glyphs)
indices = [i for i,g in enumerate(self.glyphOrder) if g in s.glyphs]
for v in self.glyphs.itervalues():
if hasattr(v, "data"):
v.remapComponentsFast(indices)
else:
pass # No need
self.glyphOrder = [g for g in self.glyphOrder if g in s.glyphs]
# Don't drop empty 'glyf' tables, otherwise 'loca' doesn't get subset.
return True
@_add_method(fontTools.ttLib.getTableClass('glyf'))
def prune_post_subset(self, options):
if not options.hinting:
for v in self.glyphs.itervalues():
if hasattr(v, "data"):
v.dropInstructionsFast()
else:
v.program = fontTools.ttLib.tables.ttProgram.Program()
v.program.fromBytecode([])
return True
@_add_method(fontTools.ttLib.getTableClass('CFF '))
def prune_pre_subset(self, options):
cff = self.cff
# CFF table must have one font only
cff.fontNames = cff.fontNames[:1]
if options.notdef_glyph and not options.notdef_outline:
for fontname in cff.keys():
font = cff[fontname]
c,_ = font.CharStrings.getItemAndSelector('.notdef')
c.bytecode = '\x0e' # endchar
c.program = None
return True # bool(cff.fontNames)
@_add_method(fontTools.ttLib.getTableClass('CFF '))
def subset_glyphs(self, s):
cff = self.cff
for fontname in cff.keys():
font = cff[fontname]
cs = font.CharStrings
# Load all glyphs
for g in font.charset:
if g not in s.glyphs: continue
c,sel = cs.getItemAndSelector(g)
if cs.charStringsAreIndexed:
indices = [i for i,g in enumerate(font.charset) if g in s.glyphs]
csi = cs.charStringsIndex
csi.items = [csi.items[i] for i in indices]
csi.count = len(csi.items)
del csi.file, csi.offsets
if hasattr(font, "FDSelect"):
sel = font.FDSelect
sel.format = None
sel.gidArray = [sel.gidArray[i] for i in indices]
cs.charStrings = dict((g,indices.index(v))
for g,v in cs.charStrings.iteritems()
if g in s.glyphs)
else:
cs.charStrings = dict((g,v)
for g,v in cs.charStrings.iteritems()
if g in s.glyphs)
font.charset = [g for g in font.charset if g in s.glyphs]
font.numGlyphs = len(font.charset)
return True # any(cff[fontname].numGlyphs for fontname in cff.keys())
@_add_method(fontTools.misc.psCharStrings.T2CharString)
def subset_subroutines(self, subrs, gsubrs):
p = self.program
assert len(p)
for i in xrange(1, len(p)):
if p[i] == 'callsubr':
assert type(p[i-1]) is int
p[i-1] = subrs._used.index(p[i-1] + subrs._old_bias) - subrs._new_bias
elif p[i] == 'callgsubr':
assert type(p[i-1]) is int
p[i-1] = gsubrs._used.index(p[i-1] + gsubrs._old_bias) - gsubrs._new_bias
@_add_method(fontTools.misc.psCharStrings.T2CharString)
def drop_hints(self):
hints = self._hints
if hints.has_hint:
self.program = self.program[hints.last_hint:]
if hasattr(self, 'width'):
# Insert width back if needed
if self.width != self.private.defaultWidthX:
self.program.insert(0, self.width - self.private.nominalWidthX)
if hints.has_hintmask:
i = 0
p = self.program
while i < len(p):
if p[i] in ['hintmask', 'cntrmask']:
assert i + 1 <= len(p)
del p[i:i+2]
continue
i += 1
assert len(self.program)
del self._hints
class _MarkingT2Decompiler(fontTools.misc.psCharStrings.SimpleT2Decompiler):
def __init__(self, localSubrs, globalSubrs):
fontTools.misc.psCharStrings.SimpleT2Decompiler.__init__(self,
localSubrs,
globalSubrs)
for subrs in [localSubrs, globalSubrs]:
if subrs and not hasattr(subrs, "_used"):
subrs._used = set()
def op_callsubr(self, index):
self.localSubrs._used.add(self.operandStack[-1]+self.localBias)
fontTools.misc.psCharStrings.SimpleT2Decompiler.op_callsubr(self, index)
def op_callgsubr(self, index):
self.globalSubrs._used.add(self.operandStack[-1]+self.globalBias)
fontTools.misc.psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index)
class _DehintingT2Decompiler(fontTools.misc.psCharStrings.SimpleT2Decompiler):
class Hints:
def __init__(self):
# Whether calling this charstring produces any hint stems
self.has_hint = False
# Index to start at to drop all hints
self.last_hint = 0
# Index up to which we know more hints are possible. Only
# relevant if status is 0 or 1.
self.last_checked = 0
# The status means:
# 0: after dropping hints, this charstring is empty
# 1: after dropping hints, there may be more hints continuing after this
# 2: no more hints possible after this charstring
self.status = 0
# Has hintmask instructions; not recursive
self.has_hintmask = False
pass
def __init__(self, css, localSubrs, globalSubrs):
self._css = css
fontTools.misc.psCharStrings.SimpleT2Decompiler.__init__(self,
localSubrs,
globalSubrs)
def execute(self, charString):
old_hints = charString._hints if hasattr(charString, '_hints') else None
charString._hints = self.Hints()
fontTools.misc.psCharStrings.SimpleT2Decompiler.execute(self, charString)
hints = charString._hints
if hints.has_hint or hints.has_hintmask:
self._css.add(charString)
if hints.status != 2:
# Check from last_check, make sure we didn't have any operators.
for i in xrange(hints.last_checked, len(charString.program) - 1):
if type(charString.program[i]) == str:
hints.status = 2
break;
else:
hints.status = 1 # There's *something* here
hints.last_checked = len(charString.program)
if old_hints:
assert hints.__dict__ == old_hints.__dict__
def op_callsubr(self, index):
subr = self.localSubrs[self.operandStack[-1]+self.localBias]
fontTools.misc.psCharStrings.SimpleT2Decompiler.op_callsubr(self, index)
self.processSubr(index, subr)
def op_callgsubr(self, index):
subr = self.globalSubrs[self.operandStack[-1]+self.globalBias]
fontTools.misc.psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index)
self.processSubr(index, subr)
def op_hstem(self, index):
fontTools.misc.psCharStrings.SimpleT2Decompiler.op_hstem(self, index)
self.processHint(index)
def op_vstem(self, index):
fontTools.misc.psCharStrings.SimpleT2Decompiler.op_vstem(self, index)
self.processHint(index)
def op_hstemhm(self, index):
fontTools.misc.psCharStrings.SimpleT2Decompiler.op_hstemhm(self, index)
self.processHint(index)
def op_vstemhm(self, index):
fontTools.misc.psCharStrings.SimpleT2Decompiler.op_vstemhm(self, index)
self.processHint(index)
def op_hintmask(self, index):
fontTools.misc.psCharStrings.SimpleT2Decompiler.op_hintmask(self, index)
self.processHintmask(index)
def op_cntrmask(self, index):
fontTools.misc.psCharStrings.SimpleT2Decompiler.op_cntrmask(self, index)
self.processHintmask(index)
def processHintmask(self, index):
cs = self.callingStack[-1]
hints = cs._hints
hints.has_hintmask = True
if hints.status != 2 and hints.has_hint:
# Check from last_check, see if we may be an implicit vstem
for i in xrange(hints.last_checked, index - 1):
if type(cs.program[i]) == str:
hints.status = 2
break;
if hints.status != 2:
# We are an implicit vstem
hints.last_hint = index + 1
hints.status = 0
hints.last_checked = index + 1
def processHint(self, index):
cs = self.callingStack[-1]
hints = cs._hints
hints.has_hint = True
hints.last_hint = index
hints.last_checked = index
def processSubr(self, index, subr):
cs = self.callingStack[-1]
hints = cs._hints
subr_hints = subr._hints
if subr_hints.has_hint:
if hints.status != 2:
hints.has_hint = True
self.last_checked = index
self.status = subr_hints.status
# Decide where to chop off from
if subr_hints.status == 0:
self.last_hint = index
else:
self.last_hint = index - 2 # Leave the subr call in
else:
# In my understanding, this is a font bug. Ie. it has hint stems
# *after* path construction. I've seen this in widespread fonts.
# Best to ignore the hints I suppose...
pass
#assert 0
else:
hints.status = max(hints.status, subr_hints.status)
if hints.status != 2:
# Check from last_check, make sure we didn't have
# any operators.
for i in xrange(hints.last_checked, index - 1):
if type(cs.program[i]) == str:
hints.status = 2
break;
hints.last_checked = index
@_add_method(fontTools.ttLib.getTableClass('CFF '))
def prune_post_subset(self, options):
cff = self.cff
for fontname in cff.keys():
font = cff[fontname]
cs = font.CharStrings
#
# Drop unused FontDictionaries
#
if hasattr(font, "FDSelect"):
sel = font.FDSelect
indices = _uniq_sort(sel.gidArray)
sel.gidArray = [indices.index (ss) for ss in sel.gidArray]
arr = font.FDArray
arr.items = [arr[i] for i in indices]
arr.count = len(arr.items)
del arr.file, arr.offsets
#
# Drop hints if not needed
#
if not options.hinting:
#
# This can be tricky, but doesn't have to. What we do is:
#
# - Run all used glyph charstrings and recurse into subroutines,
# - For each charstring (including subroutines), if it has any
# of the hint stem operators, we mark it as such. Upon returning,
# for each charstring we note all the subroutine calls it makes
# that (recursively) contain a stem,
# - Dropping hinting then consists of the following two ops:
# * Drop the piece of the program in each charstring before the
# last call to a stem op or a stem-calling subroutine,
# * Drop all hintmask operations.
# - It's trickier... A hintmask right after hints and a few numbers
# will act as an implicit vstemhm. As such, we track whether
# we have seen any non-hint operators so far and do the right
# thing, recursively... Good luck understanding that :(
#
css = set()
for g in font.charset:
c,sel = cs.getItemAndSelector(g)
# Make sure it's decompiled. We want our "decompiler" to walk
# the program, not the bytecode.
c.draw(fontTools.pens.basePen.NullPen())
subrs = getattr(c.private, "Subrs", [])
decompiler = _DehintingT2Decompiler(css, subrs, c.globalSubrs)
decompiler.execute(c)
for charstring in css:
charstring.drop_hints()
#
# Renumber subroutines to remove unused ones
#
# Mark all used subroutines
for g in font.charset:
c,sel = cs.getItemAndSelector(g)
subrs = getattr(c.private, "Subrs", [])
decompiler = _MarkingT2Decompiler(subrs, c.globalSubrs)
decompiler.execute(c)
all_subrs = [font.GlobalSubrs]
if hasattr(font, 'FDSelect'):
all_subrs.extend(fd.Private.Subrs for fd in font.FDArray if hasattr(fd.Private, 'Subrs') and fd.Private.Subrs)
elif hasattr(font.Private, 'Subrs') and font.Private.Subrs:
all_subrs.append(font.Private.Subrs)
subrs = set(subrs) # Remove duplicates
# Prepare
for subrs in all_subrs:
if not hasattr(subrs, '_used'):
subrs._used = set()
subrs._used = _uniq_sort(subrs._used)
subrs._old_bias = fontTools.misc.psCharStrings.calcSubrBias(subrs)
subrs._new_bias = fontTools.misc.psCharStrings.calcSubrBias(subrs._used)
# Renumber glyph charstrings
for g in font.charset:
c,sel = cs.getItemAndSelector(g)
subrs = getattr(c.private, "Subrs", [])
c.subset_subroutines (subrs, font.GlobalSubrs)
# Renumber subroutines themselves
for subrs in all_subrs:
if subrs == font.GlobalSubrs:
if not hasattr(font, 'FDSelect') and hasattr(font.Private, 'Subrs'):
local_subrs = font.Private.Subrs
else:
local_subrs = []
else:
local_subrs = subrs
subrs.items = [subrs.items[i] for i in subrs._used]
subrs.count = len(subrs.items)
del subrs.file
if hasattr(subrs, 'offsets'):
del subrs.offsets
for i in xrange (subrs.count):
subrs[i].subset_subroutines (local_subrs, font.GlobalSubrs)
# Cleanup
for subrs in all_subrs:
del subrs._used, subrs._old_bias, subrs._new_bias
return True
@_add_method(fontTools.ttLib.getTableClass('cmap'))
def closure_glyphs(self, s):
tables = [t for t in self.tables
if t.platformID == 3 and t.platEncID in [1, 10]]
for u in s.unicodes_requested:
found = False
for table in tables:
if u in table.cmap:
s.glyphs.add(table.cmap[u])
found = True
break
if not found:
s.log("No glyph for Unicode value %s; skipping." % u)
@_add_method(fontTools.ttLib.getTableClass('cmap'))
def prune_pre_subset(self, options):
if not options.legacy_cmap:
# Drop non-Unicode / non-Symbol cmaps
self.tables = [t for t in self.tables
if t.platformID == 3 and t.platEncID in [0, 1, 10]]
if not options.symbol_cmap:
self.tables = [t for t in self.tables
if t.platformID == 3 and t.platEncID in [1, 10]]
# TODO(behdad) Only keep one subtable?
# For now, drop format=0 which can't be subset_glyphs easily?
self.tables = [t for t in self.tables if t.format != 0]
return bool(self.tables)
@_add_method(fontTools.ttLib.getTableClass('cmap'))
def subset_glyphs(self, s):
s.glyphs = s.glyphs_cmaped
for t in self.tables:
# For reasons I don't understand I need this here
# to force decompilation of the cmap format 14.
try:
getattr(t, "asdf")
except AttributeError:
pass
if t.format == 14:
# TODO(behdad) XXX We drop all the default-UVS mappings(g==None).
t.uvsDict = dict((v,[(u,g) for u,g in l if g in s.glyphs])
for v,l in t.uvsDict.iteritems())
t.uvsDict = dict((v,l) for v,l in t.uvsDict.iteritems() if l)
else:
t.cmap = dict((u,g) for u,g in t.cmap.iteritems()
if g in s.glyphs_requested or u in s.unicodes_requested)
self.tables = [t for t in self.tables
if (t.cmap if t.format != 14 else t.uvsDict)]
# TODO(behdad) Convert formats when needed.
# In particular, if we have a format=12 without non-BMP
# characters, either drop format=12 one or convert it
# to format=4 if there's not one.
return bool(self.tables)
@_add_method(fontTools.ttLib.getTableClass('name'))
def prune_pre_subset(self, options):
if '*' not in options.name_IDs:
self.names = [n for n in self.names if n.nameID in options.name_IDs]
if not options.name_legacy:
self.names = [n for n in self.names
if n.platformID == 3 and n.platEncID == 1]
if '*' not in options.name_languages:
self.names = [n for n in self.names if n.langID in options.name_languages]
return True # Retain even if empty
# TODO(behdad) OS/2 ulUnicodeRange / ulCodePageRange?
# TODO(behdad) Drop unneeded GSUB/GPOS Script/LangSys entries.
# TODO(behdad) Drop empty GSUB/GPOS, and GDEF if no GSUB/GPOS left
# TODO(behdad) Drop GDEF subitems if unused by lookups
# TODO(behdad) Avoid recursing too much (in GSUB/GPOS and in CFF)
# TODO(behdad) Text direction considerations.
# TODO(behdad) Text script / language considerations.
class Options(object):
class UnknownOptionError(Exception):
pass
_drop_tables_default = ['BASE', 'JSTF', 'DSIG', 'EBDT', 'EBLC', 'EBSC', 'SVG ',
'PCLT', 'LTSH']
_drop_tables_default += ['Feat', 'Glat', 'Gloc', 'Silf', 'Sill'] # Graphite
_drop_tables_default += ['CBLC', 'CBDT', 'sbix', 'COLR', 'CPAL'] # Color
_no_subset_tables_default = ['gasp', 'head', 'hhea', 'maxp', 'vhea', 'OS/2',
'loca', 'name', 'cvt ', 'fpgm', 'prep']
_hinting_tables_default = ['cvt ', 'fpgm', 'prep', 'hdmx', 'VDMX']
# Based on HarfBuzz shapers
_layout_features_groups = {
# Default shaper
'common': ['ccmp', 'liga', 'locl', 'mark', 'mkmk', 'rlig'],
'horizontal': ['calt', 'clig', 'curs', 'kern', 'rclt'],
'vertical': ['valt', 'vert', 'vkrn', 'vpal', 'vrt2'],
'ltr': ['ltra', 'ltrm'],
'rtl': ['rtla', 'rtlm'],
# Complex shapers
'arabic': ['init', 'medi', 'fina', 'isol', 'med2', 'fin2', 'fin3',
'cswh', 'mset'],
'hangul': ['ljmo', 'vjmo', 'tjmo'],
'tibetal': ['abvs', 'blws', 'abvm', 'blwm'],
'indic': ['nukt', 'akhn', 'rphf', 'rkrf', 'pref', 'blwf', 'half',
'abvf', 'pstf', 'cfar', 'vatu', 'cjct', 'init', 'pres',
'abvs', 'blws', 'psts', 'haln', 'dist', 'abvm', 'blwm'],
}
_layout_features_default = _uniq_sort(sum(
_layout_features_groups.itervalues(), []))
drop_tables = _drop_tables_default
no_subset_tables = _no_subset_tables_default
hinting_tables = _hinting_tables_default
layout_features = _layout_features_default
hinting = False
glyph_names = False
legacy_cmap = False
symbol_cmap = False
name_IDs = [1, 2] # Family and Style
name_legacy = False
name_languages = [0x0409] # English
notdef_glyph = True # gid0 for TrueType / .notdef for CFF
notdef_outline = False # No need for notdef to have an outline really
recommended_glyphs = False # gid1, gid2, gid3 for TrueType
recalc_bounds = False # Recalculate font bounding boxes
canonical_order = False # Order tables as recommended
flavor = None # May be 'woff'
def __init__(self, **kwargs):
self.set(**kwargs)
def set(self, **kwargs):
for k,v in kwargs.iteritems():
if not hasattr(self, k):
raise self.UnknownOptionError("Unknown option '%s'" % k)
setattr(self, k, v)
def parse_opts(self, argv, ignore_unknown=False):
ret = []
opts = {}
for a in argv:
orig_a = a
if not a.startswith('--'):
ret.append(a)
continue
a = a[2:]
i = a.find('=')
op = '='
if i == -1:
if a.startswith("no-"):
k = a[3:]
v = False
else:
k = a
v = True
else:
k = a[:i]
if k[-1] in "-+":
op = k[-1]+'=' # Ops is '-=' or '+=' now.
k = k[:-1]
v = a[i+1:]
k = k.replace('-', '_')
if not hasattr(self, k):
if ignore_unknown == True or k in ignore_unknown:
ret.append(orig_a)
continue
else:
raise self.UnknownOptionError("Unknown option '%s'" % a)
ov = getattr(self, k)
if isinstance(ov, bool):
v = bool(v)
elif isinstance(ov, int):
v = int(v)
elif isinstance(ov, list):
vv = v.split(',')
if vv == ['']:
vv = []
vv = [int(x, 0) if len(x) and x[0] in "0123456789" else x for x in vv]
if op == '=':
v = vv
elif op == '+=':
v = ov
v.extend(vv)
elif op == '-=':
v = ov
for x in vv:
if x in v:
v.remove(x)
else:
assert 0
opts[k] = v
self.set(**opts)
return ret
class Subsetter(object):
def __init__(self, options=None, log=None):
if not log:
log = Logger()
if not options:
options = Options()
self.options = options
self.log = log
self.unicodes_requested = set()
self.glyphs_requested = set()
self.glyphs = set()
def populate(self, glyphs=[], unicodes=[], text=""):
self.unicodes_requested.update(unicodes)
if isinstance(text, str):
text = text.decode("utf8")
for u in text:
self.unicodes_requested.add(ord(u))
self.glyphs_requested.update(glyphs)
self.glyphs.update(glyphs)
def _prune_pre_subset(self, font):
for tag in font.keys():
if tag == 'GlyphOrder': continue
if(tag in self.options.drop_tables or
(tag in self.options.hinting_tables and not self.options.hinting)):
self.log(tag, "dropped")
del font[tag]
continue
clazz = fontTools.ttLib.getTableClass(tag)
if hasattr(clazz, 'prune_pre_subset'):
table = font[tag]
self.log.lapse("load '%s'" % tag)
retain = table.prune_pre_subset(self.options)
self.log.lapse("prune '%s'" % tag)
if not retain:
self.log(tag, "pruned to empty; dropped")
del font[tag]
continue
else:
self.log(tag, "pruned")
def _closure_glyphs(self, font):
self.glyphs = self.glyphs_requested.copy()
if 'cmap' in font:
font['cmap'].closure_glyphs(self)
self.glyphs_cmaped = self.glyphs
if self.options.notdef_glyph:
if 'glyf' in font:
self.glyphs.add(font.getGlyphName(0))
self.log("Added gid0 to subset")
else:
self.glyphs.add('.notdef')
self.log("Added .notdef to subset")
if self.options.recommended_glyphs:
if 'glyf' in font:
for i in range(4):
self.glyphs.add(font.getGlyphName(i))
self.log("Added first four glyphs to subset")
if 'GSUB' in font:
self.log("Closing glyph list over 'GSUB': %d glyphs before" %
len(self.glyphs))
self.log.glyphs(self.glyphs, font=font)
font['GSUB'].closure_glyphs(self)
self.log("Closed glyph list over 'GSUB': %d glyphs after" %
len(self.glyphs))
self.log.glyphs(self.glyphs, font=font)
self.log.lapse("close glyph list over 'GSUB'")
self.glyphs_gsubed = self.glyphs.copy()
if 'glyf' in font:
self.log("Closing glyph list over 'glyf': %d glyphs before" %
len(self.glyphs))
self.log.glyphs(self.glyphs, font=font)
font['glyf'].closure_glyphs(self)
self.log("Closed glyph list over 'glyf': %d glyphs after" %
len(self.glyphs))
self.log.glyphs(self.glyphs, font=font)
self.log.lapse("close glyph list over 'glyf'")
self.glyphs_glyfed = self.glyphs.copy()
self.glyphs_all = self.glyphs.copy()
self.log("Retaining %d glyphs: " % len(self.glyphs_all))
def _subset_glyphs(self, font):
for tag in font.keys():
if tag == 'GlyphOrder': continue
clazz = fontTools.ttLib.getTableClass(tag)
if tag in self.options.no_subset_tables:
self.log(tag, "subsetting not needed")
elif hasattr(clazz, 'subset_glyphs'):
table = font[tag]
self.glyphs = self.glyphs_all
retain = table.subset_glyphs(self)
self.glyphs = self.glyphs_all
self.log.lapse("subset '%s'" % tag)
if not retain:
self.log(tag, "subsetted to empty; dropped")
del font[tag]
else:
self.log(tag, "subsetted")
else:
self.log(tag, "NOT subset; don't know how to subset; dropped")
del font[tag]
glyphOrder = font.getGlyphOrder()
glyphOrder = [g for g in glyphOrder if g in self.glyphs_all]
font.setGlyphOrder(glyphOrder)
font._buildReverseGlyphOrderDict()
self.log.lapse("subset GlyphOrder")
def _prune_post_subset(self, font):
for tag in font.keys():
if tag == 'GlyphOrder': continue
clazz = fontTools.ttLib.getTableClass(tag)
if hasattr(clazz, 'prune_post_subset'):
table = font[tag]
retain = table.prune_post_subset(self.options)
self.log.lapse("prune '%s'" % tag)
if not retain:
self.log(tag, "pruned to empty; dropped")
del font[tag]
else:
self.log(tag, "pruned")
def subset(self, font):
self._prune_pre_subset(font)
self._closure_glyphs(font)
self._subset_glyphs(font)
self._prune_post_subset(font)
class Logger(object):
def __init__(self, verbose=False, xml=False, timing=False):
self.verbose = verbose
self.xml = xml
self.timing = timing
self.last_time = self.start_time = time.time()
def parse_opts(self, argv):
argv = argv[:]
for v in ['verbose', 'xml', 'timing']:
if "--"+v in argv:
setattr(self, v, True)
argv.remove("--"+v)
return argv
def __call__(self, *things):
if not self.verbose:
return
print ' '.join(str(x) for x in things)
def lapse(self, *things):
if not self.timing:
return
new_time = time.time()
print "Took %0.3fs to %s" %(new_time - self.last_time,
' '.join(str(x) for x in things))
self.last_time = new_time
def glyphs(self, glyphs, font=None):
self("Names: ", sorted(glyphs))
if font:
reverseGlyphMap = font.getReverseGlyphMap()
self("Gids : ", sorted(reverseGlyphMap[g] for g in glyphs))
def font(self, font, file=sys.stdout):
if not self.xml:
return
import xmlWriter
writer = xmlWriter.XMLWriter(file)
font.disassembleInstructions = False # Work around ttLib bug
for tag in font.keys():
writer.begintag(tag)
writer.newline()
font[tag].toXML(writer, font)
writer.endtag(tag)
writer.newline()
def load_font(fontFile,
options,
checkChecksums=False,
dontLoadGlyphNames=False):
font = fontTools.ttLib.TTFont(fontFile,
checkChecksums=checkChecksums,
recalcBBoxes=options.recalc_bounds)
# Hack:
#
# If we don't need glyph names, change 'post' class to not try to
# load them. It avoid lots of headache with broken fonts as well
# as loading time.
#
# Ideally ttLib should provide a way to ask it to skip loading
# glyph names. But it currently doesn't provide such a thing.
#
if dontLoadGlyphNames:
post = fontTools.ttLib.getTableClass('post')
saved = post.decode_format_2_0
post.decode_format_2_0 = post.decode_format_3_0
f = font['post']
if f.formatType == 2.0:
f.formatType = 3.0
post.decode_format_2_0 = saved
return font
def save_font(font, outfile, options):
if options.flavor and not hasattr(font, 'flavor'):
raise Exception("fonttools version does not support flavors.")
font.flavor = options.flavor
font.save(outfile, reorderTables=options.canonical_order)
def main(args):
log = Logger()
args = log.parse_opts(args)
options = Options()
args = options.parse_opts(args, ignore_unknown=['text'])
if len(args) < 2:
print >>sys.stderr, "usage: pyftsubset font-file glyph... [--text=ABC]... [--option=value]..."
sys.exit(1)
fontfile = args[0]
args = args[1:]
dontLoadGlyphNames =(not options.glyph_names and
all(any(g.startswith(p)
for p in ['gid', 'glyph', 'uni', 'U+'])
for g in args))
font = load_font(fontfile, options, dontLoadGlyphNames=dontLoadGlyphNames)
subsetter = Subsetter(options=options, log=log)
log.lapse("load font")
names = font.getGlyphNames()
log.lapse("loading glyph names")
glyphs = []
unicodes = []
text = ""
for g in args:
if g == '*':
glyphs.extend(font.getGlyphOrder())
continue
if g in names:
glyphs.append(g)
continue
if g.startswith('--text='):
text += g[7:]
continue
if g.startswith('uni') or g.startswith('U+'):
if g.startswith('uni') and len(g) > 3:
g = g[3:]
elif g.startswith('U+') and len(g) > 2:
g = g[2:]
u = int(g, 16)
unicodes.append(u)
continue
if g.startswith('gid') or g.startswith('glyph'):
if g.startswith('gid') and len(g) > 3:
g = g[3:]
elif g.startswith('glyph') and len(g) > 5:
g = g[5:]
try:
glyphs.append(font.getGlyphName(int(g), requireReal=1))
except ValueError:
raise Exception("Invalid glyph identifier: %s" % g)
continue
raise Exception("Invalid glyph identifier: %s" % g)
log.lapse("compile glyph list")
log("Unicodes:", unicodes)
log("Glyphs:", glyphs)
subsetter.populate(glyphs=glyphs, unicodes=unicodes, text=text)
subsetter.subset(font)
outfile = fontfile + '.subset'
save_font (font, outfile, options)
log.lapse("compile and save font")
log.last_time = log.start_time
log.lapse("make one with everything(TOTAL TIME)")
if log.verbose:
import os
log("Input font: %d bytes" % os.path.getsize(fontfile))
log("Subset font: %d bytes" % os.path.getsize(outfile))
log.font(font)
font.close()
__all__ = [
'Options',
'Subsetter',
'Logger',
'load_font',
'save_font',
'main'
]
if __name__ == '__main__':
main(sys.argv[1:])
|
# coding: utf-8
from __future__ import unicode_literals, division, print_function
"""
Workflows for GW calculations:
VaspGWFWWorkFlow fireworks wf for vasp
SingleAbinitGWWorkFlow workflow for abinit
Under construction:
general GW workflow that should manage all the code independent logic
"""
__author__ = "Michiel van Setten"
__copyright__ = " "
__version__ = "0.9"
__maintainer__ = "Michiel van Setten"
__email__ = "mjvansetten@gmail.com"
__date__ = "May 2014"
import os
import os.path
import copy
from pymatgen.io.abinitio.abiobjects import asabistructure
from pymatgen.io.abinitio.calculations import g0w0_extended
from pymatgen.io.abinitio.flows import AbinitFlow
from pymatgen.io.abinitio.tasks import TaskManager
from pymatgen.io.abinitio.pseudos import PseudoTable
from pymatgen.io.gwwrapper.GWtasks import *
from pymatgen.io.gwwrapper.helpers import now, s_name, expand, read_grid_from_file, is_converged
from pymatgen.io.gwwrapper.helpers import read_extra_abivars
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
logger = logging.getLogger(__name__)
class GWWorkflow(object):
"""
UNDER CONSTRUCTION
Base class for GW workflows. the code specific implementations should extend this one.
the base class should contain the convergence calculations structure
"""
@property
def grid(self):
return self._grid
@property
def all_done(self):
return self._all_done
@property
def workdir(self):
return self._workdir
def set_status(self, structure):
self._grid = 0
self._all_done = False
self._workdir = None
self._converged = is_converged(False, structure)
try:
self._grid = read_grid_from_file(s_name(structure)+".full_res")['grid']
self._all_done = read_grid_from_file(s_name(structure)+".full_res")['all_done']
self._workdir = os.path.join(s_name(structure), 'work_'+str(self.grid))
except (IOError, OSError):
pass
class VaspGWFWWorkFlow():
"""
Object containing a VASP GW workflow for a single structure
"""
def __init__(self):
self.work_list = []
self.connections = {}
self.fw_id = 1
self.prep_id = 1
self.wf = []
def add_work(self, parameters):
from fireworks.core.firework import FireWork
tasks = []
job = parameters['job']
print('adding job ' + job + ' to the workslist as ', self.fw_id)
if job == 'prep':
launch_spec = {'task_type': 'Preparation job', '_category': 'cluster', '_queueadapter': 'qadapterdict'}
task = VaspGWInputTask(parameters)
tasks.append(task)
task = VaspGWExecuteTask(parameters)
tasks.append(task)
task = VaspGWToDiagTask(parameters)
tasks.append(task)
task = VaspGWExecuteTask(parameters)
tasks.append(task)
fw = FireWork(tasks, spec=launch_spec, name=job, created_on=now(), fw_id=self.fw_id)
self.connections[self.fw_id] = []
self.prep_id = self.fw_id
self.fw_id += 1
print(self.connections)
elif job in ['G0W0', 'GW0', 'scGW0']:
launch_spec = {'task_type': 'GW job', '_category': 'cluster', '_queueadapter': 'qadapterdict'}
task = VaspGWInputTask(parameters)
tasks.append(task)
task = VaspGWGetPrepResTask(parameters)
tasks.append(task)
task = VaspGWExecuteTask(parameters)
tasks.append(task)
if parameters['spec']['converge']:
task = VaspGWWriteConDatTask(parameters)
tasks.append(task)
task = VaspGWTestConTask(parameters)
tasks.append(task)
fw = FireWork(tasks, spec=launch_spec, name=job, created_on=now(), fw_id=self.fw_id)
self.connections[self.fw_id] = []
self.connections[self.prep_id].append(self.fw_id)
self.fw_id += 1
else:
fw = []
print('unspecified job, this should have been captured before !!')
exit()
self.work_list.append(fw)
def create(self):
from fireworks.core.firework import Workflow
self.wf = Workflow(self.work_list, self.connections, name='VaspGWFWWorkFlow', created_on=now())
print('creating workflow')
def add_to_db(self):
from fireworks.core.launchpad import LaunchPad
launchpad_file = os.path.join(os.environ['FW_CONFIG_DIR'], 'my_launchpad.yaml')
lp = LaunchPad.from_file(launchpad_file)
lp.add_wf(self.wf)
class SingleAbinitGWWorkFlow():
"""
GW workflow for Abinit
"""
RESPONSE_MODELS = ["cd", "godby", "hybersten", "linden", "farid"]
TESTS = {'ecuteps': {'test_range': (10, 14), 'method': 'direct', 'control': "gap", 'level': "sigma"},
'nscf_nbands': {'test_range': (30, 40), 'method': 'set_bands', 'control': "gap", 'level': "nscf"},
'response_model': {'test_range': RESPONSE_MODELS, 'method': 'direct', 'control': 'gap', 'level': 'screening'}}
# scf level test are run independently, the last value will be used in the nscf and sigma tests
#'test': {'test_range': (1, 2, 3), 'method': 'direct', 'control': "e_ks_max", 'level': "scf"},
CONVS = {'ecut': {'test_range': (28, 32, 36, 40, 44), 'method': 'direct', 'control': "e_ks_max", 'level': "scf"},
'ecuteps': {'test_range': (4, 8, 12, 16, 20), 'method': 'direct', 'control': "gap", 'level': "sigma"},
'nscf_nbands': {'test_range': (5, 10, 20, 30, 40), 'method': 'set_bands', 'control': "gap", 'level': "nscf"}}
def __init__(self, structure, spec, option=None):
self.structure = structure
self.spec = spec
self.option = option
self.tests = self.__class__.get_defaults_tests()
self.convs = self.__class__.get_defaults_convs()
self.response_models = self.__class__.get_response_models()
if self.option is None:
self.all_converged = False
elif len(self.option) == len(self.convs):
self.all_converged = True
else:
self.all_converged = False
path_add = '.conv' if self.all_converged else ''
self.work_dir = s_name(self.structure)+path_add
abi_pseudo = os.environ['ABINIT_PS_EXT']
abi_pseudo_dir = os.environ['ABINIT_PS']
pseudos = []
for element in self.structure.composition.element_composition:
pseudo = os.path.join(abi_pseudo_dir, str(element) + abi_pseudo)
pseudos.append(pseudo)
self.pseudo_table = PseudoTable(pseudos)
@classmethod
def get_defaults_tests(cls):
return copy.deepcopy(cls.TESTS)
@classmethod
def get_defaults_convs(cls):
return copy.deepcopy(cls.CONVS)
@classmethod
def get_response_models(cls):
return copy.deepcopy(cls.RESPONSE_MODELS)
def get_electrons(self, structure):
"""
Method for retrieving the number of valence electrons
"""
electrons = 0
for element in structure.species:
entries = self.pseudo_table.pseudos_with_symbol(element.symbol)
assert len(entries) == 1
pseudo = entries[0]
electrons += pseudo.Z_val
return electrons
def get_bands(self, structure):
"""
Method for retrieving the standard number of bands
"""
bands = self.get_electrons(structure) / 2 + len(structure)
return int(bands)
def get_work_dir(self):
name = s_name(self.structure)
if not self.all_converged:
return str(name)+'_'+str(self.option['test'])+'_'+str(self.option['value'])
else:
return str(name)
def create(self):
"""
create single abinit G0W0 flow
"""
manager = 'slurm' if 'ceci' in self.spec['mode'] else 'shell'
# an AbiStructure object has an overwritten version of get_sorted_structure that sorts according to Z
# this could also be pulled into the constructor of Abistructure
abi_structure = asabistructure(self.structure).get_sorted_structure()
manager = TaskManager.from_user_config()
# Initialize the flow.
# flow = AbinitFlow(self.work_dir, manager, pickle_protocol=0)
flow = AbinitFlow(self.work_dir, manager)
# kpoint grid defined over density 40 > ~ 3 3 3
if self.spec['converge'] and not self.all_converged:
# (2x2x2) gamma centered mesh for the convergence test on nbands and ecuteps
# if kp_in is present in the specs a kp_in X kp_in x kp_in mesh is used for the convergence studie
if 'kp_in' in self.spec.keys():
if self.spec['kp_in'] > 9:
print('WARNING:\nkp_in should be < 10 to generate an n x n x n mesh\nfor larger values a grid with '
'density kp_in will be generated')
scf_kppa = self.spec['kp_in']
else:
scf_kppa = 2
else:
# use the specified density for the final calculation with the converged nbands and ecuteps of other
# stand alone calculations
scf_kppa = self.spec['kp_grid_dens']
gamma = True
# 'standard' parameters for stand alone calculation
nb = self.get_bands(self.structure)
nscf_nband = [10 * nb]
ecuteps = [8]
ecutsigx = 44
extra_abivars = dict(
paral_kgb=1,
inclvkb=2,
ecut=44,
pawecutdg=88,
gwmem='10',
getden=-1,
istwfk="*1",
timopt=-1,
nbdbuf=8
)
# read user defined extra abivars from file 'extra_abivars' should be dictionary
extra_abivars.update(read_extra_abivars())
response_models = ['godby']
if 'ppmodel' in extra_abivars.keys():
response_models = [extra_abivars.pop('ppmodel')]
if self.option is not None:
for k in self.option.keys():
if k in ['ecuteps', 'nscf_nbands']:
pass
else:
extra_abivars.update({k: self.option[k]})
if k == 'ecut':
extra_abivars.update({'pawecutdg': self.option[k]*2})
try:
grid = read_grid_from_file(s_name(self.structure)+".full_res")['grid']
all_done = read_grid_from_file(s_name(self.structure)+".full_res")['all_done']
workdir = os.path.join(s_name(self.structure), 'w'+str(grid))
except (IOError, OSError):
grid = 0
all_done = False
workdir = None
if not all_done:
if (self.spec['test'] or self.spec['converge']) and not self.all_converged:
if self.spec['test']:
print('| setting test calculation')
tests = SingleAbinitGWWorkFlow(self.structure, self.spec).tests
response_models = []
else:
if grid == 0:
print('| setting convergence calculations for grid 0')
tests = SingleAbinitGWWorkFlow(self.structure, self.spec).convs
else:
print('| extending grid')
tests = expand(SingleAbinitGWWorkFlow(self.structure, self.spec).convs, grid)
ecuteps = []
nscf_nband = []
for test in tests:
if tests[test]['level'] == 'scf':
if self.option is None:
extra_abivars.update({test + '_s': tests[test]['test_range']})
elif test in self.option:
extra_abivars.update({test: self.option[test]})
else:
extra_abivars.update({test + '_s': tests[test]['test_range']})
else:
for value in tests[test]['test_range']:
if test == 'nscf_nbands':
nscf_nband.append(value * self.get_bands(self.structure))
#scr_nband takes nscf_nbands if not specified
#sigma_nband takes scr_nbands if not specified
if test == 'ecuteps':
ecuteps.append(value)
if test == 'response_model':
response_models.append(value)
elif self.all_converged:
print('| setting up for testing the converged values at the high kp grid ')
# in this case a convergence study has already been performed.
# The resulting parameters are passed as option
ecuteps = [self.option['ecuteps'], self.option['ecuteps'] + self.convs['ecuteps']['test_range'][1] -
self.convs['ecuteps']['test_range'][0]]
nscf_nband = [self.option['nscf_nbands'], self.option['nscf_nbands'] + self.convs['nscf_nbands'][
'test_range'][1] - self.convs['nscf_nbands']['test_range'][0]]
# for option in self.option:
# if option not in ['ecuteps', 'nscf_nband']:
# extra_abivars.update({option + '_s': self.option[option]})
else:
print('| all is done for this material')
return
logger.info('ecuteps : ', ecuteps)
logger.info('extra : ', extra_abivars)
logger.info('nscf_nb : ', nscf_nband)
work = g0w0_extended(abi_structure, self.pseudo_table, scf_kppa, nscf_nband, ecuteps, ecutsigx,
accuracy="normal", spin_mode="unpolarized", smearing=None, response_models=response_models,
charge=0.0, sigma_nband=None, scr_nband=None, gamma=gamma,
**extra_abivars)
flow.register_work(work, workdir=workdir)
return flow.allocate()
def create_job_file(self, serial=True):
"""
Create the jobfile for starting all schedulers manually
serial = True creates a list that can be submitted as job that runs all schedulers a a batch job
(the job header needs to be added)
serial = False creates a list that can be used to start all schedulers on the frontend in the background
"""
job_file = open("job_collection", mode='a')
if serial:
job_file.write('abirun.py ' + self.work_dir + ' scheduler > ' + self.work_dir + '.log\n')
else:
job_file.write('nohup abirun.py ' + self.work_dir + ' scheduler > ' + self.work_dir + '.log & \n')
job_file.write('sleep 2\n')
job_file.close()
pickle fix
Former-commit-id: a2712eaef60f871e66234ac4b1984c428a3059bf [formerly 0f896b118c5a342504c35b954d48cbcab6330767]
Former-commit-id: 666177f485983801242bb557e7ebad28cd9ae9ba
# coding: utf-8
from __future__ import unicode_literals, division, print_function
"""
Workflows for GW calculations:
VaspGWFWWorkFlow fireworks wf for vasp
SingleAbinitGWWorkFlow workflow for abinit
Under construction:
general GW workflow that should manage all the code independent logic
"""
__author__ = "Michiel van Setten"
__copyright__ = " "
__version__ = "0.9"
__maintainer__ = "Michiel van Setten"
__email__ = "mjvansetten@gmail.com"
__date__ = "May 2014"
import os
import os.path
import copy
from pymatgen.io.abinitio.abiobjects import asabistructure
from pymatgen.io.abinitio.calculations import g0w0_extended
from pymatgen.io.abinitio.flows import AbinitFlow
from pymatgen.io.abinitio.tasks import TaskManager
from pymatgen.io.abinitio.pseudos import PseudoTable
from pymatgen.io.gwwrapper.GWtasks import *
from pymatgen.io.gwwrapper.helpers import now, s_name, expand, read_grid_from_file, is_converged
from pymatgen.io.gwwrapper.helpers import read_extra_abivars
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
logger = logging.getLogger(__name__)
class GWWorkflow(object):
"""
UNDER CONSTRUCTION
Base class for GW workflows. the code specific implementations should extend this one.
the base class should contain the convergence calculations structure
"""
@property
def grid(self):
return self._grid
@property
def all_done(self):
return self._all_done
@property
def workdir(self):
return self._workdir
def set_status(self, structure):
self._grid = 0
self._all_done = False
self._workdir = None
self._converged = is_converged(False, structure)
try:
self._grid = read_grid_from_file(s_name(structure)+".full_res")['grid']
self._all_done = read_grid_from_file(s_name(structure)+".full_res")['all_done']
self._workdir = os.path.join(s_name(structure), 'work_'+str(self.grid))
except (IOError, OSError):
pass
class VaspGWFWWorkFlow():
"""
Object containing a VASP GW workflow for a single structure
"""
def __init__(self):
self.work_list = []
self.connections = {}
self.fw_id = 1
self.prep_id = 1
self.wf = []
def add_work(self, parameters):
from fireworks.core.firework import FireWork
tasks = []
job = parameters['job']
print('adding job ' + job + ' to the workslist as ', self.fw_id)
if job == 'prep':
launch_spec = {'task_type': 'Preparation job', '_category': 'cluster', '_queueadapter': 'qadapterdict'}
task = VaspGWInputTask(parameters)
tasks.append(task)
task = VaspGWExecuteTask(parameters)
tasks.append(task)
task = VaspGWToDiagTask(parameters)
tasks.append(task)
task = VaspGWExecuteTask(parameters)
tasks.append(task)
fw = FireWork(tasks, spec=launch_spec, name=job, created_on=now(), fw_id=self.fw_id)
self.connections[self.fw_id] = []
self.prep_id = self.fw_id
self.fw_id += 1
print(self.connections)
elif job in ['G0W0', 'GW0', 'scGW0']:
launch_spec = {'task_type': 'GW job', '_category': 'cluster', '_queueadapter': 'qadapterdict'}
task = VaspGWInputTask(parameters)
tasks.append(task)
task = VaspGWGetPrepResTask(parameters)
tasks.append(task)
task = VaspGWExecuteTask(parameters)
tasks.append(task)
if parameters['spec']['converge']:
task = VaspGWWriteConDatTask(parameters)
tasks.append(task)
task = VaspGWTestConTask(parameters)
tasks.append(task)
fw = FireWork(tasks, spec=launch_spec, name=job, created_on=now(), fw_id=self.fw_id)
self.connections[self.fw_id] = []
self.connections[self.prep_id].append(self.fw_id)
self.fw_id += 1
else:
fw = []
print('unspecified job, this should have been captured before !!')
exit()
self.work_list.append(fw)
def create(self):
from fireworks.core.firework import Workflow
self.wf = Workflow(self.work_list, self.connections, name='VaspGWFWWorkFlow', created_on=now())
print('creating workflow')
def add_to_db(self):
from fireworks.core.launchpad import LaunchPad
launchpad_file = os.path.join(os.environ['FW_CONFIG_DIR'], 'my_launchpad.yaml')
lp = LaunchPad.from_file(launchpad_file)
lp.add_wf(self.wf)
class SingleAbinitGWWorkFlow():
"""
GW workflow for Abinit
"""
RESPONSE_MODELS = ["cd", "godby", "hybersten", "linden", "farid"]
TESTS = {'ecuteps': {'test_range': (10, 14), 'method': 'direct', 'control': "gap", 'level': "sigma"},
'nscf_nbands': {'test_range': (30, 40), 'method': 'set_bands', 'control': "gap", 'level': "nscf"},
'response_model': {'test_range': RESPONSE_MODELS, 'method': 'direct', 'control': 'gap', 'level': 'screening'}}
# scf level test are run independently, the last value will be used in the nscf and sigma tests
#'test': {'test_range': (1, 2, 3), 'method': 'direct', 'control': "e_ks_max", 'level': "scf"},
CONVS = {'ecut': {'test_range': (28, 32, 36, 40, 44), 'method': 'direct', 'control': "e_ks_max", 'level': "scf"},
'ecuteps': {'test_range': (4, 8, 12, 16, 20), 'method': 'direct', 'control': "gap", 'level': "sigma"},
'nscf_nbands': {'test_range': (5, 10, 20, 30, 40), 'method': 'set_bands', 'control': "gap", 'level': "nscf"}}
def __init__(self, structure, spec, option=None):
self.structure = structure
self.spec = spec
self.option = option
self.tests = self.__class__.get_defaults_tests()
self.convs = self.__class__.get_defaults_convs()
self.response_models = self.__class__.get_response_models()
if self.option is None:
self.all_converged = False
elif len(self.option) == len(self.convs):
self.all_converged = True
else:
self.all_converged = False
path_add = '.conv' if self.all_converged else ''
self.work_dir = s_name(self.structure)+path_add
abi_pseudo = os.environ['ABINIT_PS_EXT']
abi_pseudo_dir = os.environ['ABINIT_PS']
pseudos = []
for element in self.structure.composition.element_composition:
pseudo = os.path.join(abi_pseudo_dir, str(element) + abi_pseudo)
pseudos.append(pseudo)
self.pseudo_table = PseudoTable(pseudos)
@classmethod
def get_defaults_tests(cls):
return copy.deepcopy(cls.TESTS)
@classmethod
def get_defaults_convs(cls):
return copy.deepcopy(cls.CONVS)
@classmethod
def get_response_models(cls):
return copy.deepcopy(cls.RESPONSE_MODELS)
def get_electrons(self, structure):
"""
Method for retrieving the number of valence electrons
"""
electrons = 0
for element in structure.species:
entries = self.pseudo_table.pseudos_with_symbol(element.symbol)
assert len(entries) == 1
pseudo = entries[0]
electrons += pseudo.Z_val
return electrons
def get_bands(self, structure):
"""
Method for retrieving the standard number of bands
"""
bands = self.get_electrons(structure) / 2 + len(structure)
return int(bands)
def get_work_dir(self):
name = s_name(self.structure)
if not self.all_converged:
return str(name)+'_'+str(self.option['test'])+'_'+str(self.option['value'])
else:
return str(name)
def create(self):
"""
create single abinit G0W0 flow
"""
manager = 'slurm' if 'ceci' in self.spec['mode'] else 'shell'
# an AbiStructure object has an overwritten version of get_sorted_structure that sorts according to Z
# this could also be pulled into the constructor of Abistructure
abi_structure = asabistructure(self.structure).get_sorted_structure()
manager = TaskManager.from_user_config()
# Initialize the flow.
flow = AbinitFlow(self.work_dir, manager, pickle_protocol=0)
# flow = AbinitFlow(self.work_dir, manager)
# kpoint grid defined over density 40 > ~ 3 3 3
if self.spec['converge'] and not self.all_converged:
# (2x2x2) gamma centered mesh for the convergence test on nbands and ecuteps
# if kp_in is present in the specs a kp_in X kp_in x kp_in mesh is used for the convergence studie
if 'kp_in' in self.spec.keys():
if self.spec['kp_in'] > 9:
print('WARNING:\nkp_in should be < 10 to generate an n x n x n mesh\nfor larger values a grid with '
'density kp_in will be generated')
scf_kppa = self.spec['kp_in']
else:
scf_kppa = 2
else:
# use the specified density for the final calculation with the converged nbands and ecuteps of other
# stand alone calculations
scf_kppa = self.spec['kp_grid_dens']
gamma = True
# 'standard' parameters for stand alone calculation
nb = self.get_bands(self.structure)
nscf_nband = [10 * nb]
ecuteps = [8]
ecutsigx = 44
extra_abivars = dict(
paral_kgb=1,
inclvkb=2,
ecut=44,
pawecutdg=88,
gwmem='10',
getden=-1,
istwfk="*1",
timopt=-1,
nbdbuf=8
)
# read user defined extra abivars from file 'extra_abivars' should be dictionary
extra_abivars.update(read_extra_abivars())
response_models = ['godby']
if 'ppmodel' in extra_abivars.keys():
response_models = [extra_abivars.pop('ppmodel')]
if self.option is not None:
for k in self.option.keys():
if k in ['ecuteps', 'nscf_nbands']:
pass
else:
extra_abivars.update({k: self.option[k]})
if k == 'ecut':
extra_abivars.update({'pawecutdg': self.option[k]*2})
try:
grid = read_grid_from_file(s_name(self.structure)+".full_res")['grid']
all_done = read_grid_from_file(s_name(self.structure)+".full_res")['all_done']
workdir = os.path.join(s_name(self.structure), 'w'+str(grid))
except (IOError, OSError):
grid = 0
all_done = False
workdir = None
if not all_done:
if (self.spec['test'] or self.spec['converge']) and not self.all_converged:
if self.spec['test']:
print('| setting test calculation')
tests = SingleAbinitGWWorkFlow(self.structure, self.spec).tests
response_models = []
else:
if grid == 0:
print('| setting convergence calculations for grid 0')
tests = SingleAbinitGWWorkFlow(self.structure, self.spec).convs
else:
print('| extending grid')
tests = expand(SingleAbinitGWWorkFlow(self.structure, self.spec).convs, grid)
ecuteps = []
nscf_nband = []
for test in tests:
if tests[test]['level'] == 'scf':
if self.option is None:
extra_abivars.update({test + '_s': tests[test]['test_range']})
elif test in self.option:
extra_abivars.update({test: self.option[test]})
else:
extra_abivars.update({test + '_s': tests[test]['test_range']})
else:
for value in tests[test]['test_range']:
if test == 'nscf_nbands':
nscf_nband.append(value * self.get_bands(self.structure))
#scr_nband takes nscf_nbands if not specified
#sigma_nband takes scr_nbands if not specified
if test == 'ecuteps':
ecuteps.append(value)
if test == 'response_model':
response_models.append(value)
elif self.all_converged:
print('| setting up for testing the converged values at the high kp grid ')
# in this case a convergence study has already been performed.
# The resulting parameters are passed as option
ecuteps = [self.option['ecuteps'], self.option['ecuteps'] + self.convs['ecuteps']['test_range'][1] -
self.convs['ecuteps']['test_range'][0]]
nscf_nband = [self.option['nscf_nbands'], self.option['nscf_nbands'] + self.convs['nscf_nbands'][
'test_range'][1] - self.convs['nscf_nbands']['test_range'][0]]
# for option in self.option:
# if option not in ['ecuteps', 'nscf_nband']:
# extra_abivars.update({option + '_s': self.option[option]})
else:
print('| all is done for this material')
return
logger.info('ecuteps : ', ecuteps)
logger.info('extra : ', extra_abivars)
logger.info('nscf_nb : ', nscf_nband)
work = g0w0_extended(abi_structure, self.pseudo_table, scf_kppa, nscf_nband, ecuteps, ecutsigx,
accuracy="normal", spin_mode="unpolarized", smearing=None, response_models=response_models,
charge=0.0, sigma_nband=None, scr_nband=None, gamma=gamma,
**extra_abivars)
flow.register_work(work, workdir=workdir)
return flow.allocate()
def create_job_file(self, serial=True):
"""
Create the jobfile for starting all schedulers manually
serial = True creates a list that can be submitted as job that runs all schedulers a a batch job
(the job header needs to be added)
serial = False creates a list that can be used to start all schedulers on the frontend in the background
"""
job_file = open("job_collection", mode='a')
if serial:
job_file.write('abirun.py ' + self.work_dir + ' scheduler > ' + self.work_dir + '.log\n')
else:
job_file.write('nohup abirun.py ' + self.work_dir + ' scheduler > ' + self.work_dir + '.log & \n')
job_file.write('sleep 2\n')
job_file.close()
|
""" Classes to represent and manipulate gravity's stored configuration and
state data.
"""
import enum
import errno
import os
import sys
from collections import defaultdict
import yaml
from gravity.util import AttributeDict
GALAXY_YML_SAMPLE_PATH = "lib/galaxy/config/sample/galaxy.yml.sample"
DEFAULT_GALAXY_ENVIRONMENT = {
"PYTHONPATH": "lib",
"GALAXY_CONFIG_FILE": "{galaxy_conf}",
}
CELERY_BEAT_DB_FILENAME = "celery-beat-schedule"
class GracefulMethod(enum.Enum):
DEFAULT = 0
SIGHUP = 1
class Service(AttributeDict):
service_type = "service"
service_name = "_default_"
environment_from = None
default_environment = {}
add_virtualenv_to_path = False
graceful_method = GracefulMethod.DEFAULT
command_arguments = {}
def __init__(self, *args, **kwargs):
super(Service, self).__init__(*args, **kwargs)
if "service_type" not in kwargs:
self["service_type"] = self.__class__.service_type
if "service_name" not in kwargs:
self["service_name"] = self.__class__.service_name
def __eq__(self, other):
return self["config_type"] == other["config_type"] and self["service_type"] == other["service_type"] and self["service_name"] == other["service_name"]
def full_match(self, other):
return set(self.keys()) == set(other.keys()) and all([self[k] == other[k] for k in self if not k.startswith("_")])
def get_environment(self):
return self.default_environment.copy()
def get_command_arguments(self, attribs, format_vars):
rval = {}
for setting, value in attribs.get(self.service_type, {}).items():
if setting in self.command_arguments:
# FIXME: this truthiness testing of value is probably not the best
if value:
rval[setting] = self.command_arguments[setting].format(**format_vars)
else:
rval[setting] = ""
else:
rval[setting] = value
return rval
class GalaxyGunicornService(Service):
service_type = "gunicorn"
service_name = "gunicorn"
default_environment = DEFAULT_GALAXY_ENVIRONMENT
graceful_method = GracefulMethod.SIGHUP
command_template = "{virtualenv_bin}gunicorn 'galaxy.webapps.galaxy.fast_factory:factory()'" \
" --timeout {gunicorn[timeout]}" \
" --pythonpath lib" \
" -k galaxy.webapps.galaxy.workers.Worker" \
" -b {gunicorn[bind]}" \
" --workers={gunicorn[workers]}" \
" --config python:galaxy.web_stack.gunicorn_config" \
" {gunicorn[preload]}" \
" {gunicorn[extra_args]}"
def get_environment(self):
# Works around https://github.com/galaxyproject/galaxy/issues/11821
environment = self.default_environment.copy()
if sys.platform == 'darwin':
environment["OBJC_DISABLE_INITIALIZE_FORK_SAFETY"] = "YES"
return environment
class GalaxyUnicornHerderService(Service):
service_type = "unicornherder"
service_name = "unicornherder"
environment_from = "gunicorn"
graceful_method = GracefulMethod.SIGHUP
default_environment = DEFAULT_GALAXY_ENVIRONMENT
command_template = "{virtualenv_bin}unicornherder --pidfile {supervisor_state_dir}/{program_name}.pid --" \
" 'galaxy.webapps.galaxy.fast_factory:factory()'" \
" --timeout {gunicorn[timeout]}" \
" --pythonpath lib" \
" -k galaxy.webapps.galaxy.workers.Worker" \
" -b {gunicorn[bind]}" \
" --workers={gunicorn[workers]}" \
" --config python:galaxy.web_stack.gunicorn_config" \
" {gunicorn[preload]}" \
" {gunicorn[extra_args]}"
def get_environment(self):
environment = self.default_environment.copy()
if sys.platform == 'darwin':
environment["OBJC_DISABLE_INITIALIZE_FORK_SAFETY"] = "YES"
environment["GALAXY_CONFIG_LOG_DESTINATION"] = "{log_dir}/gunicorn.log"
return environment
class GalaxyCeleryService(Service):
service_type = "celery"
service_name = "celery"
default_environment = DEFAULT_GALAXY_ENVIRONMENT
command_template = "{virtualenv_bin}celery" \
" --app galaxy.celery worker" \
" --concurrency {celery[concurrency]}" \
" --loglevel {celery[loglevel]}" \
" --pool {celery[pool]}" \
" --queues {celery[queues]}" \
" {celery[extra_args]}"
class GalaxyCeleryBeatService(Service):
service_type = "celery-beat"
service_name = "celery-beat"
default_environment = DEFAULT_GALAXY_ENVIRONMENT
command_template = "{virtualenv_bin}celery" \
" --app galaxy.celery" \
" beat" \
" --loglevel {celery[loglevel]}" \
" --schedule {state_dir}/" + CELERY_BEAT_DB_FILENAME
class GalaxyGxItProxyService(Service):
service_type = "gx-it-proxy"
service_name = "gx-it-proxy"
default_environment = {
"npm_config_yes": "true",
}
# the npx shebang is $!/usr/bin/env node, so $PATH has to be correct
add_virtualenv_to_path = True
command_arguments = {
"forward_ip": "--forwardIP {gx_it_proxy[forward_ip]}",
"forward_port": "--forwardPort {gx_it_proxy[forward_port]}",
"reverse_proxy": "--reverseProxy",
}
command_template = "{virtualenv_bin}npx gx-it-proxy --ip {gx_it_proxy[ip]} --port {gx_it_proxy[port]}" \
" --sessions {gx_it_proxy[sessions]} {gx_it_proxy[verbose]}" \
" {command_arguments[forward_ip]} {command_arguments[forward_port]}" \
" {command_arguments[reverse_proxy]}"
class GalaxyTUSDService(Service):
service_type = "tusd"
service_name = "tusd"
command_template = "{tusd[tusd_path]} -host={tusd[host]} -port={tusd[port]} -upload-dir={tusd[upload_dir]}" \
" -hooks-http={galaxy_infrastructure_url}/api/upload/hooks" \
" -hooks-http-forward-headers=X-Api-Key,Cookie {tusd[extra_args]}" \
" -hooks-enabled-events {tusd[hooks_enabled_events]}"
class GalaxyStandaloneService(Service):
service_type = "standalone"
service_name = "standalone"
# FIXME: supervisor-specific
command_template = "{virtualenv_bin}python ./lib/galaxy/main.py -c {galaxy_conf} --server-name={server_name}{attach_to_pool_opt}" \
" --pid-file={supervisor_state_dir}/{program_name}.pid"
def get_environment(self):
return self.get("environment") or {}
class ConfigFile(AttributeDict):
def __init__(self, *args, **kwargs):
super(ConfigFile, self).__init__(*args, **kwargs)
services = []
for service in self.get("services", []):
service_class = SERVICE_CLASS_MAP.get(service["service_type"], Service)
services.append(service_class(**service))
self.services = services
@property
def defaults(self):
return {
"instance_name": self["instance_name"],
"galaxy_root": self["attribs"]["galaxy_root"],
"log_dir": self["attribs"]["log_dir"],
"gunicorn": self.gunicorn_config,
}
@property
def gunicorn_config(self):
# We used to store bind_address and bind_port instead of a gunicorn config key, so restore from here
gunicorn = self["attribs"].get("gunicorn")
if not gunicorn and 'bind_address' in self["attribs"]:
return {'bind': f'{self["attribs"]["bind_address"]}:{self["attribs"]["bind_port"]}'}
return gunicorn
class GravityState(AttributeDict):
@classmethod
def open(cls, name):
try:
s = cls.loads(open(name).read())
except (OSError, IOError) as exc:
if exc.errno == errno.ENOENT:
yaml.dump({}, open(name, "w"))
s = cls()
s._name = name
return s
def __init__(self, *args, **kwargs):
super(GravityState, self).__init__(*args, **kwargs)
normalized_state = defaultdict(dict)
for key in ("config_files",):
if key not in self:
self[key] = {}
for config_file, config_dict in self[key].items():
# resolve path, so we always deal with absolute and symlink-resolved paths
config_file = os.path.realpath(config_file)
if config_file.endswith(GALAXY_YML_SAMPLE_PATH):
root_dir = config_dict['attribs']['galaxy_root']
non_sample_path = os.path.join(root_dir, 'config', 'galaxy.yml')
if os.path.exists(non_sample_path):
config_file = non_sample_path
normalized_state[key][config_file] = ConfigFile(config_dict)
self.update(normalized_state)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
with open(self._name, "w") as fh:
self.dump(fh)
def set_name(self, name):
self._name = name
def service_for_service_type(service_type):
try:
return SERVICE_CLASS_MAP[service_type]
except KeyError:
raise RuntimeError(f"Unknown service type: {service_type}")
# TODO: better to pull this from __class__.service_type
SERVICE_CLASS_MAP = {
"gunicorn": GalaxyGunicornService,
"unicornherder": GalaxyUnicornHerderService,
"celery": GalaxyCeleryService,
"celery-beat": GalaxyCeleryBeatService,
"gx-it-proxy": GalaxyGxItProxyService,
"tusd": GalaxyTUSDService,
"standalone": GalaxyStandaloneService,
}
Made graceful method for gunicorn dependent on preload
""" Classes to represent and manipulate gravity's stored configuration and
state data.
"""
import enum
import errno
import os
import sys
from collections import defaultdict
import yaml
from gravity.util import AttributeDict
GALAXY_YML_SAMPLE_PATH = "lib/galaxy/config/sample/galaxy.yml.sample"
DEFAULT_GALAXY_ENVIRONMENT = {
"PYTHONPATH": "lib",
"GALAXY_CONFIG_FILE": "{galaxy_conf}",
}
CELERY_BEAT_DB_FILENAME = "celery-beat-schedule"
class GracefulMethod(enum.Enum):
DEFAULT = 0
SIGHUP = 1
class Service(AttributeDict):
service_type = "service"
service_name = "_default_"
environment_from = None
default_environment = {}
add_virtualenv_to_path = False
graceful_method = GracefulMethod.DEFAULT
command_arguments = {}
def __init__(self, *args, **kwargs):
super(Service, self).__init__(*args, **kwargs)
if "service_type" not in kwargs:
self["service_type"] = self.__class__.service_type
if "service_name" not in kwargs:
self["service_name"] = self.__class__.service_name
def __eq__(self, other):
return self["config_type"] == other["config_type"] and self["service_type"] == other["service_type"] and self["service_name"] == other["service_name"]
def full_match(self, other):
return set(self.keys()) == set(other.keys()) and all([self[k] == other[k] for k in self if not k.startswith("_")])
def get_environment(self):
return self.default_environment.copy()
def get_command_arguments(self, attribs, format_vars):
rval = {}
for setting, value in attribs.get(self.service_type, {}).items():
if setting in self.command_arguments:
# FIXME: this truthiness testing of value is probably not the best
if value:
rval[setting] = self.command_arguments[setting].format(**format_vars)
else:
rval[setting] = ""
else:
rval[setting] = value
return rval
class GalaxyGunicornService(Service):
service_type = "gunicorn"
service_name = "gunicorn"
default_environment = DEFAULT_GALAXY_ENVIRONMENT
#graceful_method = GracefulMethod.SIGHUP
command_template = "{virtualenv_bin}gunicorn 'galaxy.webapps.galaxy.fast_factory:factory()'" \
" --timeout {gunicorn[timeout]}" \
" --pythonpath lib" \
" -k galaxy.webapps.galaxy.workers.Worker" \
" -b {gunicorn[bind]}" \
" --workers={gunicorn[workers]}" \
" --config python:galaxy.web_stack.gunicorn_config" \
" {gunicorn[preload]}" \
" {gunicorn[extra_args]}"
@property
def graceful_method(self):
if self.get("preload"):
return GracefulMethod.DEFAULT
else:
return GracefulMethod.SIGHUP
def get_environment(self):
# Works around https://github.com/galaxyproject/galaxy/issues/11821
environment = self.default_environment.copy()
if sys.platform == 'darwin':
environment["OBJC_DISABLE_INITIALIZE_FORK_SAFETY"] = "YES"
return environment
class GalaxyUnicornHerderService(Service):
service_type = "unicornherder"
service_name = "unicornherder"
environment_from = "gunicorn"
graceful_method = GracefulMethod.SIGHUP
default_environment = DEFAULT_GALAXY_ENVIRONMENT
command_template = "{virtualenv_bin}unicornherder --pidfile {supervisor_state_dir}/{program_name}.pid --" \
" 'galaxy.webapps.galaxy.fast_factory:factory()'" \
" --timeout {gunicorn[timeout]}" \
" --pythonpath lib" \
" -k galaxy.webapps.galaxy.workers.Worker" \
" -b {gunicorn[bind]}" \
" --workers={gunicorn[workers]}" \
" --config python:galaxy.web_stack.gunicorn_config" \
" {gunicorn[preload]}" \
" {gunicorn[extra_args]}"
def get_environment(self):
environment = self.default_environment.copy()
if sys.platform == 'darwin':
environment["OBJC_DISABLE_INITIALIZE_FORK_SAFETY"] = "YES"
environment["GALAXY_CONFIG_LOG_DESTINATION"] = "{log_dir}/gunicorn.log"
return environment
class GalaxyCeleryService(Service):
service_type = "celery"
service_name = "celery"
default_environment = DEFAULT_GALAXY_ENVIRONMENT
command_template = "{virtualenv_bin}celery" \
" --app galaxy.celery worker" \
" --concurrency {celery[concurrency]}" \
" --loglevel {celery[loglevel]}" \
" --pool {celery[pool]}" \
" --queues {celery[queues]}" \
" {celery[extra_args]}"
class GalaxyCeleryBeatService(Service):
service_type = "celery-beat"
service_name = "celery-beat"
default_environment = DEFAULT_GALAXY_ENVIRONMENT
command_template = "{virtualenv_bin}celery" \
" --app galaxy.celery" \
" beat" \
" --loglevel {celery[loglevel]}" \
" --schedule {state_dir}/" + CELERY_BEAT_DB_FILENAME
class GalaxyGxItProxyService(Service):
service_type = "gx-it-proxy"
service_name = "gx-it-proxy"
default_environment = {
"npm_config_yes": "true",
}
# the npx shebang is $!/usr/bin/env node, so $PATH has to be correct
add_virtualenv_to_path = True
command_arguments = {
"forward_ip": "--forwardIP {gx_it_proxy[forward_ip]}",
"forward_port": "--forwardPort {gx_it_proxy[forward_port]}",
"reverse_proxy": "--reverseProxy",
}
command_template = "{virtualenv_bin}npx gx-it-proxy --ip {gx_it_proxy[ip]} --port {gx_it_proxy[port]}" \
" --sessions {gx_it_proxy[sessions]} {gx_it_proxy[verbose]}" \
" {command_arguments[forward_ip]} {command_arguments[forward_port]}" \
" {command_arguments[reverse_proxy]}"
class GalaxyTUSDService(Service):
service_type = "tusd"
service_name = "tusd"
command_template = "{tusd[tusd_path]} -host={tusd[host]} -port={tusd[port]} -upload-dir={tusd[upload_dir]}" \
" -hooks-http={galaxy_infrastructure_url}/api/upload/hooks" \
" -hooks-http-forward-headers=X-Api-Key,Cookie {tusd[extra_args]}" \
" -hooks-enabled-events {tusd[hooks_enabled_events]}"
class GalaxyStandaloneService(Service):
service_type = "standalone"
service_name = "standalone"
# FIXME: supervisor-specific
command_template = "{virtualenv_bin}python ./lib/galaxy/main.py -c {galaxy_conf} --server-name={server_name}{attach_to_pool_opt}" \
" --pid-file={supervisor_state_dir}/{program_name}.pid"
def get_environment(self):
return self.get("environment") or {}
class ConfigFile(AttributeDict):
def __init__(self, *args, **kwargs):
super(ConfigFile, self).__init__(*args, **kwargs)
services = []
for service in self.get("services", []):
service_class = SERVICE_CLASS_MAP.get(service["service_type"], Service)
services.append(service_class(**service))
self.services = services
@property
def defaults(self):
return {
"instance_name": self["instance_name"],
"galaxy_root": self["attribs"]["galaxy_root"],
"log_dir": self["attribs"]["log_dir"],
"gunicorn": self.gunicorn_config,
}
@property
def gunicorn_config(self):
# We used to store bind_address and bind_port instead of a gunicorn config key, so restore from here
gunicorn = self["attribs"].get("gunicorn")
if not gunicorn and 'bind_address' in self["attribs"]:
return {'bind': f'{self["attribs"]["bind_address"]}:{self["attribs"]["bind_port"]}'}
return gunicorn
class GravityState(AttributeDict):
@classmethod
def open(cls, name):
try:
s = cls.loads(open(name).read())
except (OSError, IOError) as exc:
if exc.errno == errno.ENOENT:
yaml.dump({}, open(name, "w"))
s = cls()
s._name = name
return s
def __init__(self, *args, **kwargs):
super(GravityState, self).__init__(*args, **kwargs)
normalized_state = defaultdict(dict)
for key in ("config_files",):
if key not in self:
self[key] = {}
for config_file, config_dict in self[key].items():
# resolve path, so we always deal with absolute and symlink-resolved paths
config_file = os.path.realpath(config_file)
if config_file.endswith(GALAXY_YML_SAMPLE_PATH):
root_dir = config_dict['attribs']['galaxy_root']
non_sample_path = os.path.join(root_dir, 'config', 'galaxy.yml')
if os.path.exists(non_sample_path):
config_file = non_sample_path
normalized_state[key][config_file] = ConfigFile(config_dict)
self.update(normalized_state)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
with open(self._name, "w") as fh:
self.dump(fh)
def set_name(self, name):
self._name = name
def service_for_service_type(service_type):
try:
return SERVICE_CLASS_MAP[service_type]
except KeyError:
raise RuntimeError(f"Unknown service type: {service_type}")
# TODO: better to pull this from __class__.service_type
SERVICE_CLASS_MAP = {
"gunicorn": GalaxyGunicornService,
"unicornherder": GalaxyUnicornHerderService,
"celery": GalaxyCeleryService,
"celery-beat": GalaxyCeleryBeatService,
"gx-it-proxy": GalaxyGxItProxyService,
"tusd": GalaxyTUSDService,
"standalone": GalaxyStandaloneService,
}
|
__author__ = 'Nicolas'
import t411
import os
import transmissionrpc
import base64
import inspect
#https://api.t411.me/
class cmdLoop:
__result_len_limit__ = 20
def command(command_string):
def decorator(func):
func.command_string = command_string
return func
return decorator
def __init__(self):
self.__load_commands()
self.__create_menu()
return
try:
print 'Connecting to T411'
self.t411 = t411.T411()
except Exception as e:
print 'Could not connect to T411: '+str(e)
try:
print 'Connecting to Transmission'
self.transmission = transmissionrpc.Client(address='nicorasp.local', port=9091)
except Exception as e:
print 'Could not connect to Transmission: '+str(e)
self.clear()
def __load_commands(self):
self.commands = [{'cmd': method[1].command_string, 'method': method[1], 'doc': inspect.getdoc(method[1])}
for method in inspect.getmembers(self, predicate=inspect.ismethod) if hasattr(method[1], 'command_string')]
print self.commands
def __create_menu(self):
self.menu = 'T411:\n'
for cmd in self.commands:
doclines = cmd['doc'].splitlines()
self.menu += '\t-{!s}:\t{!s}\n'.format(cmd['cmd'], doclines[0])
if len(doclines)>1:
for line in doclines[1:]:
self.menu += '\t\t{!s}\n'.format(line)
print self.menu
def clear(self, *args):
self.offset = 0
self.last_search_result = dict()
self.last_query_string = ''
def __main_menu(self):
print 'Choose between'
print '\t-\'s\'\tSearch'
print '\t-\'d\'\tDownload'
print '\t-\'n\'\tNext'
print '\t-\'p\'\tPrevious'
print '\t-\'c\'\tClear'
print '\t-\'i\'\tInfo'
print '\t-\'u\'\tUser'
print '\t-\'c\'\tCat'
print '\t-\'q\'\tQuit'
return raw_input('Prompt: ')
def get_search_string(self, query, filters):
query_filters_names = ('cid',)
base_search_string = query+'?offset='+str(self.offset)+'&limit='+str(self.__result_len_limit__)
query_filters = [(index, filter['type'], filter['arg']) for index, filter in enumerate(filters) if filter['type'] in query_filters_names]
if query_filters:
for filter in query_filters:
base_search_string += '&{!s}={!s}'.format(filter[1], filter[2])
return base_search_string
def print_search_results(self):
print 'Found {!s} torrent'.format(self.last_search_result['total'])
if self.last_search_result:
for i, torrent in enumerate(self.last_search_result['torrents']):
print '\t-{!s} {} [{!s} -{!s}-]'.format(i,
torrent['name'].encode('utf-8'),
torrent['categoryname'].encode('utf-8'),
torrent['category'])
else:
print 'Nothing found.'
def search_t411(self, filters):
return self.t411.search(self.get_search_string(self.last_query_string, filters)).json()
@command('s')
def search(self, cmdArgs, filters):
"""
Search Torrent
yrst
"""
self.last_query_string = str(cmdArgs)
self.last_search_result = self.search_t411(filters)
self.print_search_results()
@command('i')
def info(self, cmdArgs, filters):
"""
Get Torrent Info
"""
infos = self.t411.details(self.last_search_result['torrents'][int(cmdArgs)]['id']).json()
for key, value in infos['terms'].iteritems():
print '\t- ' + key + ':\t' + value
def user(self, cmdArgs, filters):
infos = self.t411.me().json()
print 'Uploaded:\t'+str(infos['uploaded'])+' bytes'
print 'Downloaded:\t'+str(infos['downloaded'])+' bytes'
print 'Ratio:\t'+str(float(infos['uploaded'])/float(infos['downloaded']))
def next(self, cmdArgs, filterss):
if self.last_search_result:
self.offset += self.__result_len_limit__
self.last_search_result = self.search_t411()
self.print_search_results()
else:
print 'You need to make a search first.'
def previous(self, cmdArgs, filters):
if self.last_search_result:
self.offset -= self.__result_len_limit__
self.offset = max(0, self.offset)
self.last_search_result = self.search_t411()
self.print_search_results()
else:
print 'You need to make a search first.'
def cat(self, cmdArgs, filters):
cat_list = self.t411.categories().json()
for cat_id, cat_info in cat_list.iteritems():
if 'id' in cat_info:
print '\t-{!s}:\t{!s}'.format(cat_id, cat_info['name'].encode('utf-8'))
if 'cats' in cat_info:
for subcat_id, subcat_info in cat_info['cats'].iteritems():
print '\t\t-{!s}:\t{!s}'.format(subcat_id, subcat_info['name'].encode('utf-8'))
def download(self, cmdArgs, filters):
torrent = self.t411.download(self.last_search_result['torrents'][int(cmdArgs)]['id'])
self.transmission.add_torrent(base64.b64encode(torrent.content))
@staticmethod
def parse_command_line(line):
filters = list()
cmdArgs = ''
cmd = ''
for i, ele in enumerate(line.split('|')):
if i:
filters.append({'type': (ele.split())[0], 'arg': (ele.split())[1:]})
else:
cmd = (ele.split())[0]
if len(ele.split())>1:
cmdArgs = (ele.split())[1:][0]
return cmd, cmdArgs, filters
def run(self):
cmd = ''
actions = {'s': self.search,
'd': self.download,
'n': self.next,
'p': self.previous,
'c': self.clear,
'i': self.info,
'u': self.user,
'c': self.cat}
while cmd != 'q':
cmd, cmdArgs, filters = self.parse_command_line(self.__main_menu())
try:
actions[cmd](cmdArgs, filters)
except KeyError as e:
print 'Command {!s} not recognized-{!s}-'.format(cmd, e)
except Exception as e:
print 'Command {!s} failed -{!s}-'.format((cmd.split())[1:], e)
if __name__ == '__main__':
cli = cmdLoop()
cli.run()
Getting there!
__author__ = 'Nicolas'
import t411
import os
import transmissionrpc
import base64
import inspect
#https://api.t411.me/
class cmdLoop:
__result_len_limit__ = 20
def command(command_string):
def decorator(func):
func.command_string = command_string
return func
return decorator
def __init__(self):
self.__load_commands()
self.__create_menu()
try:
print 'Connecting to T411'
self.t411 = t411.T411()
except Exception as e:
print 'Could not connect to T411: '+str(e)
try:
print 'Connecting to Transmission'
self.transmission = transmissionrpc.Client(address='nicorasp.local', port=9091)
except Exception as e:
print 'Could not connect to Transmission: '+str(e)
self.clear()
print 'Type \'help\' for help'
def __load_commands(self):
self.commands = {method[1].command_string: {'method': method[1], 'doc': inspect.getdoc(method[1])}
for method in inspect.getmembers(self, predicate=inspect.ismethod)
if hasattr(method[1], 'command_string')}
def __create_menu(self):
self.menu = 'T411:\n'
for str, cmd in self.commands.iteritems():
try:
doclines = cmd['doc'].splitlines()
self.menu += '\t-{!s}:\t\t{!s}\n'.format(str, doclines[0])
if len(doclines)>1:
for line in doclines[1:]:
self.menu += '\t\t\th{!s}\n'.format(line)
except:
self.menu += '\t-{!s}\n'.format(str)
def __main_menu(self):
return raw_input('T411: ')
def get_search_string(self, query, filters):
query_filters_names = ('cid',)
base_search_string = query+'?offset='+str(self.offset)+'&limit='+str(self.__result_len_limit__)
query_filters = [(index, filter['type'], filter['arg']) for index, filter in enumerate(filters) if filter['type'] in query_filters_names]
if query_filters:
for filter in query_filters:
base_search_string += '&{!s}={!s}'.format(filter[1], filter[2])
return base_search_string
def print_search_results(self):
print 'Found {!s} torrent'.format(self.last_search_result['total'])
if self.last_search_result:
for i, torrent in enumerate(self.last_search_result['torrents']):
print '\t-{!s} {} [{!s} -{!s}-]'.format(i,
torrent['name'].encode('utf-8'),
torrent['categoryname'].encode('utf-8'),
torrent['category'])
else:
print 'Nothing found.'
def search_t411(self, filters):
return self.t411.search(self.get_search_string(self.last_query_string, filters)).json()
@command('clear')
def clear(self, *args):
"""
Clear previous results
"""
self.offset = 0
self.last_search_result = dict()
self.last_query_string = ''
@command('search')
def search(self, cmdArgs, filters):
"""
[query string] -> Search Torrent
accept filters:
| cuid category_id
"""
self.last_query_string = str(cmdArgs)
self.last_search_result = self.search_t411(filters)
self.print_search_results()
@command('help')
def help(self, cmdArgs, filters):
print self.menu
@command('info')
def info(self, cmdArgs, filters):
"""
[torrentID] -> Get Torrent Info
"""
infos = self.t411.details(self.last_search_result['torrents'][int(cmdArgs)]['id']).json()
for key, value in infos['terms'].iteritems():
print '\t- ' + key + ':\t' + value
@command('user')
def user(self, cmdArgs, filters):
"""
Show user data (ratio...)
"""
infos = self.t411.me().json()
print 'Uploaded:\t'+str(infos['uploaded'])+' bytes'
print 'Downloaded:\t'+str(infos['downloaded'])+' bytes'
print 'Ratio:\t'+str(float(infos['uploaded'])/float(infos['downloaded']))
@command('next')
def next(self, cmdArgs, filterss):
"""
Shows next results for last query
"""
if self.last_search_result:
self.offset += self.__result_len_limit__
self.last_search_result = self.search_t411()
self.print_search_results()
else:
print 'You need to make a search first.'
@command('previous')
def previous(self, cmdArgs, filters):
"""
Shows previous results for last query
"""
if self.last_search_result:
self.offset -= self.__result_len_limit__
self.offset = max(0, self.offset)
self.last_search_result = self.search_t411()
self.print_search_results()
else:
print 'You need to make a search first.'
@command('cat')
def cat(self, cmdArgs, filters):
"""
List categories
"""
cat_list = self.t411.categories().json()
for cat_id, cat_info in cat_list.iteritems():
if 'id' in cat_info:
print '\t-{!s}:\t{!s}'.format(cat_id, cat_info['name'].encode('utf-8'))
if 'cats' in cat_info:
for subcat_id, subcat_info in cat_info['cats'].iteritems():
print '\t\t-{!s}:\t{!s}'.format(subcat_id, subcat_info['name'].encode('utf-8'))
@command('download')
def download(self, cmdArgs, filters):
"""
[search result index] -> Download torrent
"""
torrent = self.t411.download(self.last_search_result['torrents'][int(cmdArgs)]['id'])
self.transmission.add_torrent(base64.b64encode(torrent.content))
@staticmethod
def parse_command_line(line):
filters = list()
cmdArgs = ''
cmd = ''
for i, ele in enumerate(line.split('|')):
if i:
filters.append({'type': (ele.split())[0], 'arg': (ele.split())[1:]})
else:
cmd = (ele.split())[0]
if len(ele.split())>1:
cmdArgs = (ele.split())[1:][0]
return cmd, cmdArgs, filters
def run(self):
cmd = ''
while cmd != 'q':
cmd, cmdArgs, filters = self.parse_command_line(self.__main_menu())
try:
self.commands[cmd]['method'](cmdArgs, filters)
except KeyError as e:
print 'Command {!s} not recognized-{!s}-'.format(cmd, e)
except Exception as e:
print 'Command {!s} failed -{!s}-'.format((cmd.split())[1:], e)
if __name__ == '__main__':
cli = cmdLoop()
cli.run()
|
# -*- coding: utf-8 -*-
"""
Google Cloud Storage pythonic interface
"""
from __future__ import print_function
import decorator
import array
from base64 import b64encode
import google.auth as gauth
import google.auth.compute_engine
from google.auth.transport.requests import AuthorizedSession
from google.auth.exceptions import GoogleAuthError
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
from google.oauth2 import service_account
from hashlib import md5
import io
import json
import logging
import traceback
import os
import posixpath
import pickle
import re
import requests
import sys
import time
import warnings
from requests.exceptions import RequestException
from .utils import HtmlError, is_retriable, read_block
PY2 = sys.version_info.major == 2
logger = logging.getLogger(__name__)
# Allow optional tracing of call locations for api calls.
# Disabled by default to avoid *massive* test logs.
_TRACE_METHOD_INVOCATIONS = False
@decorator.decorator
def _tracemethod(f, self, *args, **kwargs):
logger.debug("%s(args=%s, kwargs=%s)", f.__name__, args, kwargs)
if _TRACE_METHOD_INVOCATIONS and logger.isEnabledFor(logging.DEBUG-1):
tb_io = io.StringIO()
traceback.print_stack(file=tb_io)
logger.log(logging.DEBUG - 1, tb_io.getvalue())
return f(self, *args, **kwargs)
# client created 2018-01-16
not_secret = {"client_id": "586241054156-0asut23a7m10790r2ik24309flribp7j"
".apps.googleusercontent.com",
"client_secret": "w6VkI99jS6e9mECscNztXvQv"}
client_config = {'installed': {
'client_id': not_secret['client_id'],
'client_secret': not_secret['client_secret'],
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
"token_uri": "https://accounts.google.com/o/oauth2/token"
}}
tfile = os.path.join(os.path.expanduser("~"), '.gcs_tokens')
ACLs = {"authenticatedread", "bucketownerfullcontrol", "bucketownerread",
"private", "projectprivate", "publicread"}
bACLs = {"authenticatedRead", "private", "projectPrivate", "publicRead",
"publicReadWrite"}
DEFAULT_PROJECT = os.environ.get('GCSFS_DEFAULT_PROJECT', '')
GCS_MIN_BLOCK_SIZE = 2 ** 18
DEFAULT_BLOCK_SIZE = 5 * 2 ** 20
if PY2:
FileNotFoundError = IOError
def quote_plus(s):
"""
Convert some URL elements to be HTTP-safe.
Not the same as in urllib, because, for instance, parentheses and commas
are passed through.
Parameters
----------
s: input URL/portion
Returns
-------
corrected URL
"""
s = s.replace('/', '%2F')
s = s.replace(' ', '%20')
return s
def norm_path(path):
"""Canonicalize path to '{bucket}/{name}' form."""
return "/".join(split_path(path))
def split_path(path):
"""
Normalise GCS path string into bucket and key.
Parameters
----------
path : string
Input path, like `gcs://mybucket/path/to/file`.
Path is of the form: '[gs|gcs://]bucket[/key]'
Returns
-------
(bucket, key) tuple
Examples
--------
>>> split_path("gcs://mybucket/path/to/file")
['mybucket', 'path/to/file']
>>> split_path("mybucket/path/to/file")
['mybucket', 'path/to/file']
>>> split_path("gs://mybucket")
['mybucket', '']
"""
if path.startswith('gcs://'):
path = path[6:]
if path.startswith('gs://'):
path = path[5:]
if path.startswith('/'):
path = path[1:]
if '/' not in path:
return path, ""
else:
return path.split('/', 1)
def validate_response(r, path):
"""
Check the requests object r, raise error if it's not ok.
Parameters
----------
r: requests response object
path: associated URL path, for error messages
"""
if not r.ok:
m = str(r.content)
error = None
try:
error = r.json()['error']
msg = error['message']
except:
msg = str(r.content)
if r.status_code == 404:
raise FileNotFoundError(path)
elif r.status_code == 403:
raise IOError("Forbidden: %s\n%s" % (path, msg))
elif "invalid" in m:
raise ValueError("Bad Request: %s\n%s" % (path, msg))
elif error:
raise HtmlError(error)
else:
raise RuntimeError(m)
class GCSFileSystem(object):
"""
Connect to Google Cloud Storage.
The following modes of authentication are supported:
- ``token=None``, GCSFS will attempt to guess your credentials in the
following order: gcloud CLI default, gcsfs cached token, google compute
metadata service, anonymous.
- ``token='google_default'``, your default gcloud credentials will be used,
which are typically established by doing ``gcloud login`` in a terminal.
- ``token=='cache'``, credentials from previously successful gcsfs
authentication will be used (use this after "browser" auth succeeded)
- ``token='anon'``, no authentication is preformed, and you can only
access data which is accessible to allUsers (in this case, the project and
access level parameters are meaningless)
- ``token='browser'``, you get an access code with which you can
authenticate via a specially provided URL
- if ``token='cloud'``, we assume we are running within google compute
or google container engine, and query the internal metadata directly for
a token.
- you may supply a token generated by the
[gcloud](https://cloud.google.com/sdk/docs/)
utility; this is either a python dictionary, the name of a file
containing the JSON returned by logging in with the gcloud CLI tool,
or a Credentials object. gcloud typically stores its tokens in locations
such as
``~/.config/gcloud/application_default_credentials.json``,
`` ~/.config/gcloud/credentials``, or
``~\AppData\Roaming\gcloud\credentials``, etc.
Objects
-------
Specific methods, (eg. `ls`, `info`, ...) may return object details from GCS.
These detailed listings include the
[object resource](https://cloud.google.com/storage/docs/json_api/v1/objects#resource)
with additional properties:
- "path" : string
The "{bucket}/{name}" path of the object, used in calls to GCSFileSystem or GCSFile.
GCS *does not* include "directory" objects but instead generates directories by splitting
[object names](https://cloud.google.com/storage/docs/key-terms). This means that, for example,
a directory does not need to exist for an object to be created within it. Creating an object
implicitly creates it's parent directories, and removing all objects from a directory implicitly
deletes the empty directory.
`GCSFileSystem` generates listing entries for these implied directories in listing apis with the
object properies:
- "path" : string
The "{bucket}/{name}" path of the dir, used in calls to GCSFileSystem or GCSFile.
- "bucket" : string
The name of the bucket containing this object.
- "name" : string
The "/" terminated name of the directory within the bucket.
- "kind" : 'storage#object'
- "size" : 0
- "storageClass" : 'DIRECTORY'
Caching
-------
GCSFileSystem maintains a per-implied-directory cache of object listings and fulfills all
object information and listing requests from cache. This implied, for example, that objects
created via other processes *will not* be visible to the GCSFileSystem until the cache
refreshed. Calls to GCSFileSystem.open and calls to GCSFile are not effected by this cache.
In the default case the cache is never expired. This may be controlled via the `cache_timeout`
GCSFileSystem parameter or via explicit calls to `GCSFileSystem.invalidate_cache`.
Parameters
----------
project : string
project_id to work under. Note that this is not the same as, but ofter
very similar to, the project name.
This is required in order
to list all the buckets you have access to within a project and to
create/delete buckets, or update their access policies.
If ``token='google_default'``, the value is overriden by the default,
if ``token='anon'``, the value is ignored.
access : one of {'read_only', 'read_write', 'full_control'}
Full control implies read/write as well as modifying metadata,
e.g., access control.
token: None, dict or string
(see description of authentication methods, above)
consistency: 'none', 'size', 'md5'
Check method when writing files. Can be overridden in open().
cache_timeout: float, seconds
Cache expiration time in seconds for object metadata cache.
Set cache_timeout <= 0 for no caching, None for no cache expiration.
secure_serialize: bool
If True, instances re-establish auth upon deserialization; if False,
token is passed directly, which may be a security risk if passed
across an insecure network.
check_connection: bool
When token=None, gcsfs will attempt various methods of establishing
credentials, falling back to anon. It is possible for a methoc to
find credentials in the system that turn out not to be valid. Setting
this parameter to True will ensure that an actual operation is
attempted before deciding that credentials are valid.
"""
scopes = {'read_only', 'read_write', 'full_control'}
retries = 4 # number of retries on http failure
base = "https://www.googleapis.com/storage/v1/"
_singleton = [None]
_singleton_pars = [None]
default_block_size = DEFAULT_BLOCK_SIZE
def __init__(self, project=DEFAULT_PROJECT, access='full_control',
token=None, block_size=None, consistency='none',
cache_timeout=None, secure_serialize=True,
check_connection=True):
pars = (project, access, token, block_size, consistency, cache_timeout)
if access not in self.scopes:
raise ValueError('access must be one of {}', self.scopes)
if project is None:
warnings.warn('GCS project not set - cannot list or create buckets')
if block_size is not None:
self.default_block_size = block_size
self.project = project
self.access = access
self.scope = "https://www.googleapis.com/auth/devstorage." + access
self.consistency = consistency
self.token = token
self.cache_timeout = cache_timeout
self.check_credentials = check_connection
if pars == self._singleton_pars[0]:
inst = self._singleton[0]
self.session = inst.session
self._listing_cache = inst._listing_cache
self.token = inst.token
else:
self.session = None
self.connect(method=token)
self._listing_cache = {}
self._singleton[0] = self
self._singleton_pars[0] = pars
if not secure_serialize:
self.token = self.session.credentials
@classmethod
def current(cls):
""" Return the most recently created GCSFileSystem
If no GCSFileSystem has been created, then create one
"""
if not cls._singleton[0]:
return GCSFileSystem()
else:
return cls._singleton[0]
@staticmethod
def load_tokens():
try:
with open(tfile, 'rb') as f:
tokens = pickle.load(f)
# backwards compatability
tokens = {k: (GCSFileSystem._dict_to_credentials(v)
if isinstance(v, dict) else v)
for k, v in tokens.items()}
except Exception:
tokens = {}
GCSFileSystem.tokens = tokens
def _connect_google_default(self):
credentials, project = gauth.default(scopes=[self.scope])
self.project = project
self.session = AuthorizedSession(credentials)
def _connect_cloud(self):
credentials = gauth.compute_engine.Credentials()
self.session = AuthorizedSession(credentials)
def _connect_cache(self):
project, access = self.project, self.access
if (project, access) in self.tokens:
credentials = self.tokens[(project, access)]
self.session = AuthorizedSession(credentials)
def _dict_to_credentials(self, token):
"""
Convert old dict-style token.
Does not preserve access token itself, assumes refresh required.
"""
return Credentials(
None, refresh_token=token['refresh_token'],
client_secret=token['client_secret'],
client_id=token['client_id'],
token_uri='https://www.googleapis.com/oauth2/v4/token',
scopes=[self.scope]
)
def _connect_token(self, token):
"""
Connect using a concrete token
Parameters
----------
token: str, dict or Credentials
If a str, try to load as a Service file, or next as a JSON; if
dict, try to interpret as credentials; if Credentials, use directly.
"""
if isinstance(token, str):
if not os.path.exists(token):
raise FileNotFoundError(token)
try:
# is this a "service" token?
self._connect_service(token)
return
except:
# some other kind of token file
# will raise exception if is not json
token = json.load(open(token))
if isinstance(token, dict):
credentials = self._dict_to_credentials(token)
elif isinstance(token, Credentials):
credentials = token
else:
raise ValueError('Token format no understood')
self.session = AuthorizedSession(credentials)
def _connect_service(self, fn):
# raises exception if file does not match expectation
credentials = service_account.Credentials.from_service_account_file(
fn, scopes=[self.scope])
self.session = AuthorizedSession(credentials)
def _connect_anon(self):
self.session = requests.Session()
def _connect_browser(self):
flow = InstalledAppFlow.from_client_config(client_config, [self.scope])
credentials = flow.run_console()
self.tokens[(self.project, self.access)] = credentials
self._save_tokens()
self.session = AuthorizedSession(credentials)
def connect(self, method=None):
"""
Establish session token. A new token will be requested if the current
one is within 100s of expiry.
Parameters
----------
method: str (google_default|cache|cloud|token|anon|browser) or None
Type of authorisation to implement - calls `_connect_*` methods.
If None, will try sequence of methods.
"""
if method not in ['google_default', 'cache', 'cloud', 'token', 'anon',
'browser', None]:
self._connect_token(method)
elif method is None:
for meth in ['google_default', 'cache', 'cloud', 'anon']:
try:
self.connect(method=meth)
if self.check_credentials and method != 'anon':
self.ls('anaconda-public-data')
except:
logger.debug('Connection with method "%s" failed' % meth)
if self.session:
break
else:
self.__getattribute__('_connect_' + method)()
self.method = method
@staticmethod
def _save_tokens():
try:
with open(tfile, 'wb') as f:
pickle.dump(GCSFileSystem.tokens, f, 2)
except Exception as e:
warnings.warn('Saving token cache failed: ' + str(e))
@_tracemethod
def _call(self, method, path, *args, **kwargs):
for k, v in list(kwargs.items()):
# only pass parameters that have values
if v is None:
del kwargs[k]
json = kwargs.pop('json', None)
meth = getattr(self.session, method)
if args:
path = path.format(*[quote_plus(p) for p in args])
for retry in range(self.retries):
try:
time.sleep(2**retry - 1)
r = meth(self.base + path, params=kwargs, json=json)
validate_response(r, path)
break
except (HtmlError, RequestException, GoogleAuthError) as e:
logger.exception("_call exception: %s", e)
if retry == self.retries - 1:
raise e
if is_retriable(e):
# retry
continue
raise e
try:
out = r.json()
except ValueError:
out = r.content
return out
@property
def buckets(self):
"""Return list of available project buckets."""
return [b["name"] for b in self._list_buckets()["items"]]
@classmethod
def _process_object(self, bucket, object_metadata):
"""Process object resource into gcsfs object information format.
Process GCS object resource via type casting and attribute updates to
the cache-able gcsf object information format. Returns an updated copy
of the object resource.
(See https://cloud.google.com/storage/docs/json_api/v1/objects#resource)
"""
result = dict(object_metadata)
result["size"] = int(object_metadata.get("size", 0))
result["path"] = posixpath.join(bucket, object_metadata["name"])
return result
@_tracemethod
def _get_object(self, path):
"""Return object information at the given path."""
bucket, key = split_path(path)
# Check if parent dir is in listing cache
parent = "/".join([bucket, posixpath.dirname(key.rstrip("/"))]) + "/"
parent_cache = self._maybe_get_cached_listing(parent)
if parent_cache:
cached_obj = [o for o in parent_cache["items"] if o["name"] == key]
if cached_obj:
logger.debug("found cached object: %s", cached_obj)
return cached_obj[0]
else:
logger.debug("object not found cached parent listing")
raise FileNotFoundError(path)
if not key:
# Attempt to "get" the bucket root, return error instead of
# listing.
raise FileNotFoundError(path)
result = self._process_object(bucket, self._call('get', 'b/{}/o/{}',
bucket, key))
return result
@_tracemethod
def _maybe_get_cached_listing(self, path):
logger.debug("_maybe_get_cached_listing: %s", path)
if path in self._listing_cache:
retrieved_time, listing = self._listing_cache[path]
cache_age = time.time() - retrieved_time
if self.cache_timeout is not None and cache_age > self.cache_timeout:
logger.debug(
"expired cache path: %s retrieved_time: %.3f cache_age: "
"%.3f cache_timeout: %.3f",
path, retrieved_time, cache_age, self.cache_timeout
)
del self._listing_cache[path]
return None
return listing
return None
@_tracemethod
def _list_objects(self, path):
path = norm_path(path)
clisting = self._maybe_get_cached_listing(path)
if clisting:
return clisting
listing = self._do_list_objects(path)
retrieved_time = time.time()
self._listing_cache[path] = (retrieved_time, listing)
return listing
@_tracemethod
def _do_list_objects(self, path, max_results = None):
"""Object listing for the given {bucket}/{prefix}/ path."""
bucket, prefix = split_path(path)
if not prefix:
prefix = None
prefixes = []
items = []
page = self._call(
'get', 'b/{}/o/', bucket, delimiter="/", prefix=prefix,
maxResults=max_results)
assert page["kind"] == "storage#objects"
prefixes.extend(page.get("prefixes", []))
items.extend(page.get("items", []))
next_page_token = page.get('nextPageToken', None)
while next_page_token is not None:
page = self._call(
'get', 'b/{}/o/', bucket, delimiter="/", prefix=prefix,
maxResults=max_results, pageToken=next_page_token)
assert page["kind"] == "storage#objects"
prefixes.extend(page.get("prefixes", []))
items.extend(page.get("items", []))
next_page_token = page.get('nextPageToken', None)
result = {
"kind": "storage#objects",
"prefixes": prefixes,
"items": [self._process_object(bucket, i) for i in items],
}
return result
@_tracemethod
def _list_buckets(self):
"""Return list of all buckets under the current project."""
items = []
page = self._call(
'get', 'b/', project=self.project
)
assert page["kind"] == "storage#buckets"
items.extend(page.get("items", []))
next_page_token = page.get('nextPageToken', None)
while next_page_token is not None:
page = self._call(
'get', 'b/', project=self.roject, pageToken=next_page_token)
assert page["kind"] == "storage#buckets"
items.extend(page.get("items", []))
next_page_token = page.get('nextPageToken', None)
result = {
"kind": "storage#buckets",
"items": items,
}
return result
@_tracemethod
def invalidate_cache(self, path=None):
"""
Invalidate listing cache for given path, it is reloaded on next use.
Parameters
----------
path: string or None
If None, clear all listings cached else listings at or under given
path.
"""
if not path:
logger.debug("invalidate_cache clearing cache")
self._listing_cache.clear()
else:
path = norm_path(path)
invalid_keys = [k for k in self._listing_cache
if k.startswith(path)]
for k in invalid_keys:
self._listing_cache.pop(k, None)
@_tracemethod
def mkdir(self, bucket, acl='projectPrivate',
default_acl='bucketOwnerFullControl'):
"""
New bucket
Parameters
----------
bucket: str
bucket name
acl: string, one of bACLs
access for the bucket itself
default_acl: str, one of ACLs
default ACL for objects created in this bucket
"""
self._call('post', 'b/', predefinedAcl=acl, project=self.project,
predefinedDefaultObjectAcl=default_acl,
json={"name": bucket})
self.invalidate_cache(bucket)
@_tracemethod
def rmdir(self, bucket):
"""Delete an empty bucket"""
self._call('delete', 'b/' + bucket)
self.invalidate_cache(bucket)
@_tracemethod
def ls(self, path, detail=False):
"""List objects under the given '/{bucket}/{prefix} path."""
path = norm_path(path)
if path in ['/', '']:
return self.buckets
elif path.endswith("/"):
return self._ls(path, detail)
else:
combined_listing = self._ls(path, detail) + self._ls(path + "/",
detail)
if detail:
combined_entries = dict(
(l["path"], l) for l in combined_listing)
combined_entries.pop(path + "/", None)
return list(combined_entries.values())
else:
return list(set(combined_listing) - {path + "/"})
@_tracemethod
def _ls(self, path, detail=False):
listing = self._list_objects(path)
bucket, key = split_path(path)
if not detail:
# Convert item listing into list of 'item' and 'subdir/'
# entries. Items may be of form "key/", in which case there
# will be duplicate entries in prefix and item_names.
item_names = [f["name"] for f in listing["items"] if f["name"]]
prefixes = [p for p in listing["prefixes"]]
return [
posixpath.join(bucket, n) for n in set(item_names + prefixes)
]
else:
item_details = listing["items"]
pseudodirs = [{
'bucket': bucket,
'name': prefix,
'path': bucket + "/" + prefix,
'kind': 'storage#object',
'size': 0,
'storageClass': 'DIRECTORY',
}
for prefix in listing["prefixes"]
]
return item_details + pseudodirs
@_tracemethod
def walk(self, path, detail=False):
""" Return all real keys belows path. """
path = norm_path(path)
if path in ("/", ""):
raise ValueError("path must include at least target bucket")
if path.endswith('/'):
listing = self.ls(path, detail=True)
files = [l for l in listing if l["storageClass"] != "DIRECTORY"]
dirs = [l for l in listing if l["storageClass"] == "DIRECTORY"]
for d in dirs:
files.extend(self.walk(d["path"], detail=True))
else:
files = self.walk(path + "/", detail=True)
try:
obj = self.info(path)
if obj["storageClass"] != "DIRECTORY":
files.append(obj)
except FileNotFoundError:
pass
if detail:
return files
else:
return [f["path"] for f in files]
@_tracemethod
def du(self, path, total=False, deep=False):
if deep:
files = self.walk(path, True)
else:
files = [f for f in self.ls(path, True)]
if total:
return sum(f['size'] for f in files)
return {f['path']: f['size'] for f in files}
@_tracemethod
def glob(self, path):
"""
Find files by glob-matching.
Note that the bucket part of the path must not contain a "*"
"""
path = path.rstrip('/')
bucket, key = split_path(path)
path = '/'.join([bucket, key])
if "*" in bucket:
raise ValueError('Bucket cannot contain a "*"')
if '*' not in path:
path = path.rstrip('/') + '/*'
if '/' in path[:path.index('*')]:
ind = path[:path.index('*')].rindex('/')
root = path[:ind + 1]
else:
root = ''
allfiles = self.walk(root)
pattern = re.compile("^" + path.replace('//', '/')
.rstrip('/').replace('**', '.+')
.replace('*', '[^/]+')
.replace('?', '.') + "$")
out = [f for f in allfiles if re.match(pattern,
f.replace('//', '/').rstrip('/'))]
return out
@_tracemethod
def exists(self, path):
bucket, key = split_path(path)
try:
if key:
return bool(self.info(path))
else:
if bucket in self.buckets:
return True
else:
try:
# Bucket may be present & viewable, but not owned by
# the current project. Attempt to list.
self._list_objects(path)
return True
except (FileNotFoundError, IOError, ValueError):
# bucket listing failed as it doesn't exist or we can't
# see it
return False
except FileNotFoundError:
return False
@_tracemethod
def info(self, path):
bucket, key = split_path(path)
if not key:
# Return a pseudo dir for the bucket root
# TODO: check that it exists (either is in bucket list,
# or can list it)
return {
'bucket': bucket,
'name': "/",
'path': bucket + "/",
'kind': 'storage#object',
'size': 0,
'storageClass': 'DIRECTORY',
}
try:
return self._get_object(path)
except FileNotFoundError:
logger.debug("info FileNotFound at path: %s", path)
# ls containing directory of path to determine
# if a pseudodirectory is needed for this entry.
ikey = key.rstrip("/")
dkey = ikey + "/"
assert ikey, "Stripped path resulted in root object."
parent_listing = self.ls(
posixpath.join(bucket, posixpath.dirname(ikey)), detail=True)
pseudo_listing = [
i for i in parent_listing
if i["storageClass"] == "DIRECTORY" and i["name"] == dkey ]
if pseudo_listing:
return pseudo_listing[0]
else:
raise
@_tracemethod
def url(self, path):
return self.info(path)['mediaLink']
@_tracemethod
def cat(self, path):
""" Simple one-shot get of file data """
details = self.info(path)
return _fetch_range(details, self.session)
@_tracemethod
def get(self, rpath, lpath, blocksize=5 * 2 ** 20):
with self.open(rpath, 'rb', block_size=blocksize) as f1:
with open(lpath, 'wb') as f2:
while True:
d = f1.read(blocksize)
if not d:
break
f2.write(d)
@_tracemethod
def put(self, lpath, rpath, blocksize=5 * 2 ** 20, acl=None):
with self.open(rpath, 'wb', block_size=blocksize, acl=acl) as f1:
with open(lpath, 'rb') as f2:
while True:
d = f2.read(blocksize)
if not d:
break
f1.write(d)
@_tracemethod
def head(self, path, size=1024):
with self.open(path, 'rb') as f:
return f.read(size)
@_tracemethod
def tail(self, path, size=1024):
if size > self.info(path)['size']:
return self.cat(path)
with self.open(path, 'rb') as f:
f.seek(-size, 2)
return f.read()
@_tracemethod
def merge(self, path, paths, acl=None):
"""Concatenate objects within a single bucket"""
bucket, key = split_path(path)
source = [{'name': split_path(p)[1]} for p in paths]
self._call('post', 'b/{}/o/{}/compose', bucket, key,
destinationPredefinedAcl=acl,
json={'sourceObjects': source,
"kind": "storage#composeRequest",
'destination': {'name': key, 'bucket': bucket}})
@_tracemethod
def copy(self, path1, path2, acl=None):
b1, k1 = split_path(path1)
b2, k2 = split_path(path2)
self._call('post', 'b/{}/o/{}/copyTo/b/{}/o/{}', b1, k1, b2, k2,
destinationPredefinedAcl=acl)
@_tracemethod
def mv(self, path1, path2, acl=None):
self.copy(path1, path2, acl)
self.rm(path1)
@_tracemethod
def rm(self, path, recursive=False):
"""Delete keys.
If a list, batch-delete all keys in one go (can span buckets)
Returns whether operation succeeded (a list if input was a list)
If recursive, delete all keys given by walk(path)
"""
if isinstance(path, (tuple, list)):
template = ('\n--===============7330845974216740156==\n'
'Content-Type: application/http\n'
'Content-Transfer-Encoding: binary\n'
'Content-ID: <b29c5de2-0db4-490b-b421-6a51b598bd11+{i}>'
'\n\nDELETE /storage/v1/b/{bucket}/o/{key} HTTP/1.1\n'
'Content-Type: application/json\n'
'accept: application/json\ncontent-length: 0\n')
body = "".join([template.format(i=i+1, bucket=p.split('/', 1)[0],
key=quote_plus(p.split('/', 1)[1]))
for i, p in enumerate(path)])
r = self.session.post('https://www.googleapis.com/batch', headers={
'Content-Type':
'multipart/mixed; boundary="==============='
'7330845974216740156=="'},
data=body + "\n--==============="
"7330845974216740156==--")
# actually can have some succeed and some fail
validate_response(r, path)
boundary = r.headers['Content-Type'].split('=', 1)[1]
parents = {posixpath.dirname(norm_path(p)) for p in path}
[self.invalidate_cache(parent) for parent in parents]
return ['200 OK' in c or '204 No Content' in c for c in
r.text.split(boundary)][1:-1]
elif recursive:
return self.rm(self.walk(path))
else:
bucket, key = split_path(path)
self._call('delete', "b/{}/o/{}", bucket, key)
self.invalidate_cache(posixpath.dirname(norm_path(path)))
return True
@_tracemethod
def open(self, path, mode='rb', block_size=None, acl=None,
consistency=None, metadata=None):
"""
See ``GCSFile``.
consistency: None or str
If None, use default for this instance
"""
if block_size is None:
block_size = self.default_block_size
const = consistency or self.consistency
if 'b' in mode:
return GCSFile(self, path, mode, block_size, consistency=const,
metadata=metadata)
else:
mode = mode.replace('t', '') + 'b'
return io.TextIOWrapper(
GCSFile(self, path, mode, block_size, consistency=const,
metadata=metadata))
@_tracemethod
def touch(self, path):
with self.open(path, 'wb'):
pass
@_tracemethod
def read_block(self, fn, offset, length, delimiter=None):
""" Read a block of bytes from a GCS file
Starting at ``offset`` of the file, read ``length`` bytes. If
``delimiter`` is set then we ensure that the read starts and stops at
delimiter boundaries that follow the locations ``offset`` and ``offset
+ length``. If ``offset`` is zero then we start at zero. The
bytestring returned WILL include the end delimiter string.
If offset+length is beyond the eof, reads to eof.
Parameters
----------
fn: string
Path to filename on GCS
offset: int
Byte offset to start read
length: int
Number of bytes to read
delimiter: bytes (optional)
Ensure reading starts and stops at delimiter bytestring
Examples
--------
>>> gcs.read_block('data/file.csv', 0, 13) # doctest: +SKIP
b'Alice, 100\\nBo'
>>> gcs.read_block('data/file.csv', 0, 13, delimiter=b'\\n') # doctest: +SKIP
b'Alice, 100\\nBob, 200\\n'
Use ``length=None`` to read to the end of the file.
>>> gcs.read_block('data/file.csv', 0, None, delimiter=b'\\n') # doctest: +SKIP
b'Alice, 100\\nBob, 200\\nCharlie, 300'
See Also
--------
distributed.utils.read_block
"""
with self.open(fn, 'rb') as f:
size = f.size
if length is None:
length = size
if offset + length > size:
length = size - offset
bytes = read_block(f, offset, length, delimiter)
return bytes
def __getstate__(self):
d = self.__dict__.copy()
d["_listing_cache"] = {}
logger.debug("Serialize with state: %s", d)
return d
def __setstate__(self, state):
self.__dict__.update(state)
self.connect(self.token)
GCSFileSystem.load_tokens()
class GCSFile:
@_tracemethod
def __init__(self, gcsfs, path, mode='rb', block_size=DEFAULT_BLOCK_SIZE,
acl=None, consistency='md5', metadata=None):
"""
Open a file.
Parameters
----------
gcsfs: instance of GCSFileSystem
path: str
location in GCS, like 'bucket/path/to/file'
mode: str
Normal file modes. Currently only 'wb' amd 'rb'.
block_size: int
Buffer size for reading or writing
acl: str
ACL to apply, if any, one of ``ACLs``. New files are normally
"bucketownerfullcontrol", but a default can be configured per
bucket.
consistency: str, 'none', 'size', 'md5'
Check for success in writing, applied at file close.
'size' ensures that the number of bytes reported by GCS matches
the number we wrote; 'md5' does a full checksum. Any value other
than 'size' or 'md5' is assumed to mean no checking.
metadata: dict
Custom metadata, in key/value pairs, added at file creation
"""
bucket, key = split_path(path)
if not key:
raise OSError('Attempt to open a bucket')
self.gcsfs = gcsfs
self.bucket = bucket
self.key = key
self.metadata = metadata
self.mode = mode
self.blocksize = block_size
self.cache = b""
self.loc = 0
self.acl = acl
self.end = None
self.start = None
self.closed = False
self.trim = True
self.consistency = consistency
if self.consistency == 'md5':
self.md5 = md5()
if mode not in {'rb', 'wb'}:
raise NotImplementedError('File mode not supported')
if mode == 'rb':
self.details = gcsfs.info(path)
self.size = self.details['size']
else:
if block_size < GCS_MIN_BLOCK_SIZE:
warnings.warn('Setting block size to minimum value, 2**18')
self.blocksize = GCS_MIN_BLOCK_SIZE
self.buffer = io.BytesIO()
self.offset = 0
self.forced = False
self.location = None
def info(self):
""" File information about this path """
return self.details
def url(self):
return self.details['mediaLink']
def tell(self):
""" Current file location """
return self.loc
@_tracemethod
def seek(self, loc, whence=0):
""" Set current file location
Parameters
----------
loc : int
byte location
whence : {0, 1, 2}
from start of file, current location or end of file, resp.
"""
if not self.mode == 'rb':
raise ValueError('Seek only available in read mode')
if whence == 0:
nloc = loc
elif whence == 1:
nloc = self.loc + loc
elif whence == 2:
nloc = self.size + loc
else:
raise ValueError(
"invalid whence (%s, should be 0, 1 or 2)" % whence)
if nloc < 0:
raise ValueError('Seek before start of file')
self.loc = nloc
return self.loc
def readline(self, length=-1):
"""
Read and return a line from the stream.
If length is specified, at most size bytes will be read.
"""
self._fetch(self.loc, self.loc + 1)
while True:
found = self.cache[self.loc - self.start:].find(b'\n') + 1
if 0 < length < found:
return self.read(length)
if found:
return self.read(found)
if self.end > self.size:
return self.read(length)
self._fetch(self.start, self.end + self.blocksize)
def __next__(self):
data = self.readline()
if data:
return data
else:
raise StopIteration
next = __next__
def __iter__(self):
return self
def readlines(self):
""" Return all lines in a file as a list """
return list(self)
def write(self, data):
"""
Write data to buffer.
Buffer only sent to GCS on flush() or if buffer is greater than
or equal to blocksize.
Parameters
----------
data : bytes
Set of bytes to be written.
"""
if self.mode not in {'wb', 'ab'}:
raise ValueError('File not in write mode')
if self.closed:
raise ValueError('I/O operation on closed file.')
if self.forced:
raise ValueError('This file has been force-flushed, can only close')
out = self.buffer.write(ensure_writable(data))
self.loc += out
if self.buffer.tell() >= self.blocksize:
self.flush()
return out
@_tracemethod
def flush(self, force=False):
"""
Write buffered data to GCS.
Uploads the current buffer, if it is larger than the block-size, or if
the file is being closed.
Parameters
----------
force : bool
When closing, write the last block even if it is smaller than
blocks are allowed to be. Disallows further writing to this file.
"""
if self.closed:
raise ValueError('Flush on closed file')
if force and self.forced:
raise ValueError("Force flush cannot be called more than once")
if self.mode not in {'wb', 'ab'}:
assert not hasattr(self, "buffer"), "flush on read-mode file with non-empty buffer"
return
if self.buffer.tell() == 0 and not force:
# no data in the buffer to write
return
if self.buffer.tell() < GCS_MIN_BLOCK_SIZE and not force:
logger.debug(
"flush(force=False) with buffer (%i) < min size (2 ** 18), "
"skipping block upload.", self.buffer.tell()
)
return
if not self.offset:
if force and self.buffer.tell() <= self.blocksize:
# Force-write a buffer below blocksize with a single write
self._simple_upload()
elif not force and self.buffer.tell() <= self.blocksize:
# Defer initialization of multipart upload, *may* still
# be able to simple upload.
return
else:
# At initialize a multipart upload, setting self.location
self._initiate_upload()
if self.location is not None:
# Continue with multipart upload has been initialized
self._upload_chunk(final=force)
if force:
self.forced = True
@_tracemethod
def _upload_chunk(self, final=False):
self.buffer.seek(0)
data = self.buffer.read()
head = {}
l = self.buffer.tell()
if final:
if l:
head['Content-Range'] = 'bytes %i-%i/%i' % (
self.offset, self.offset + l - 1, self.offset + l)
else:
# closing when buffer is empty
head['Content-Range'] = 'bytes */%i' % self.offset
data = None
else:
assert l >= GCS_MIN_BLOCK_SIZE, "Non-final chunk write below min size."
head['Content-Range'] = 'bytes %i-%i/*' % (
self.offset, self.offset + l - 1)
head.update({'Content-Type': 'application/octet-stream',
'Content-Length': str(l)})
r = self.gcsfs.session.post(
self.location, params={'uploadType': 'resumable'},
headers=head, data=data)
validate_response(r, self.location)
if 'Range' in r.headers:
assert not final, "Response looks like upload is partial"
shortfall = (self.offset + l - 1) - int(
r.headers['Range'].split('-')[1])
if shortfall:
if self.consistency == 'md5':
self.md5.update(data[:-shortfall])
self.buffer = io.BytesIO(data[-shortfall:])
self.buffer.seek(shortfall)
else:
if self.consistency == 'md5':
self.md5.update(data)
self.buffer = io.BytesIO()
self.offset += l - shortfall
else:
assert final, "Response looks like upload is over"
size, md5 = int(r.json()['size']), r.json()['md5Hash']
if self.consistency == 'size':
assert size == self.buffer.tell() + self.offset, "Size mismatch"
if self.consistency == 'md5':
assert b64encode(
self.md5.digest()) == md5.encode(), "MD5 checksum failed"
self.buffer = io.BytesIO()
self.offset += l
@_tracemethod
def _initiate_upload(self):
r = self.gcsfs.session.post(
'https://www.googleapis.com/upload/storage/v1/b/%s/o'
% quote_plus(self.bucket),
params={'uploadType': 'resumable'},
json={'name': self.key, 'metadata': self.metadata})
self.location = r.headers['Location']
@_tracemethod
def _simple_upload(self):
"""One-shot upload, less than 5MB"""
self.buffer.seek(0)
data = self.buffer.read()
path = ('https://www.googleapis.com/upload/storage/v1/b/%s/o'
% quote_plus(self.bucket))
r = self.gcsfs.session.post(
path, params={'uploadType': 'media', 'name': self.key}, data=data)
validate_response(r, path)
size, md5 = int(r.json()['size']), r.json()['md5Hash']
if self.consistency == 'size':
assert size == self.buffer.tell(), "Size mismatch"
if self.consistency == 'md5':
self.md5.update(data)
assert b64encode(self.md5.digest()) == md5.encode(), "MD5 checksum failed"
@_tracemethod
def _fetch(self, start, end):
if self.start is None and self.end is None:
# First read
self.start = start
self.end = end + self.blocksize
self.cache = _fetch_range(self.details, self.gcsfs.session,
self.start, self.end)
if start < self.start:
if self.end - end > self.blocksize:
self.start = start
self.end = end + self.blocksize
self.cache = _fetch_range(self.details, self.gcsfs.session,
self.start, self.end)
else:
new = _fetch_range(self.details, self.gcsfs.session,
start, self.start)
self.start = start
self.cache = new + self.cache
if end > self.end:
if self.end > self.size:
return
if end - self.end > self.blocksize:
self.start = start
self.end = end + self.blocksize
self.cache = _fetch_range(self.details, self.gcsfs.session,
self.start, self.end)
else:
new = _fetch_range(self.details, self.gcsfs.session, self.end,
end + self.blocksize)
self.end = end + self.blocksize
self.cache = self.cache + new
def read(self, length=-1):
"""
Return data from cache, or fetch pieces as necessary
Parameters
----------
length : int (-1)
Number of bytes to read; if <0, all remaining bytes.
"""
if self.mode != 'rb':
raise ValueError('File not in read mode')
if length < 0:
length = self.size
if self.closed:
raise ValueError('I/O operation on closed file.')
self._fetch(self.loc, self.loc + length)
out = self.cache[self.loc - self.start:
self.loc - self.start + length]
self.loc += len(out)
if self.trim:
num = (self.loc - self.start) // self.blocksize - 1
if num > 0:
self.start += self.blocksize * num
self.cache = self.cache[self.blocksize * num:]
return out
@_tracemethod
def close(self):
""" Close file """
if self.closed:
return
if self.mode == 'rb':
self.cache = None
else:
if not self.forced:
self.flush(force=True)
else:
logger.debug("close with forced=True, bypassing final flush.")
assert self.buffer.tell() == 0
self.gcsfs.invalidate_cache(
posixpath.dirname("/".join([self.bucket, self.key])))
self.closed = True
def readable(self):
"""Return whether the GCSFile was opened for reading"""
return self.mode == 'rb'
def seekable(self):
"""Return whether the GCSFile is seekable (only in read mode)"""
return self.readable()
def writable(self):
"""Return whether the GCSFile was opened for writing"""
return self.mode in {'wb', 'ab'}
@_tracemethod
def __del__(self):
self.close()
def __str__(self):
return "<GCSFile %s/%s>" % (self.bucket, self.key)
__repr__ = __str__
@_tracemethod
def __enter__(self):
return self
@_tracemethod
def __exit__(self, *args):
self.close()
@_tracemethod
def _fetch_range(obj_dict, session, start=None, end=None):
""" Get data from GCS
obj_dict : an entry from ls() or info()
session: requests.Session instance
start, end : None or integers
if not both None, fetch only given range
"""
if start is not None or end is not None:
start = start or 0
end = end or 0
head = {'Range': 'bytes=%i-%i' % (start, end - 1)}
else:
head = None
back = session.get(obj_dict['mediaLink'], headers=head)
data = back.content
if data == b'Request range not satisfiable':
return b''
return data
def put_object(credentials, bucket, name, data, session):
""" Simple put, up to 5MB of data
credentials : from auth()
bucket : string
name : object name
data : binary
session: requests.Session instance
"""
out = session.post('https://www.googleapis.com/upload/storage/'
'v1/b/%s/o?uploadType=media&name=%s' % (
quote_plus(bucket), quote_plus(name)),
headers={'Authorization': 'Bearer ' +
credentials.access_token,
'Content-Type': 'application/octet-stream',
'Content-Length': len(data)}, data=data)
assert out.status_code == 200
def ensure_writable(b):
if PY2 and isinstance(b, array.array):
return b.tostring()
return b
reset session if connect failed
# -*- coding: utf-8 -*-
"""
Google Cloud Storage pythonic interface
"""
from __future__ import print_function
import decorator
import array
from base64 import b64encode
import google.auth as gauth
import google.auth.compute_engine
from google.auth.transport.requests import AuthorizedSession
from google.auth.exceptions import GoogleAuthError
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
from google.oauth2 import service_account
from hashlib import md5
import io
import json
import logging
import traceback
import os
import posixpath
import pickle
import re
import requests
import sys
import time
import warnings
from requests.exceptions import RequestException
from .utils import HtmlError, is_retriable, read_block
PY2 = sys.version_info.major == 2
logger = logging.getLogger(__name__)
# Allow optional tracing of call locations for api calls.
# Disabled by default to avoid *massive* test logs.
_TRACE_METHOD_INVOCATIONS = False
@decorator.decorator
def _tracemethod(f, self, *args, **kwargs):
logger.debug("%s(args=%s, kwargs=%s)", f.__name__, args, kwargs)
if _TRACE_METHOD_INVOCATIONS and logger.isEnabledFor(logging.DEBUG-1):
tb_io = io.StringIO()
traceback.print_stack(file=tb_io)
logger.log(logging.DEBUG - 1, tb_io.getvalue())
return f(self, *args, **kwargs)
# client created 2018-01-16
not_secret = {"client_id": "586241054156-0asut23a7m10790r2ik24309flribp7j"
".apps.googleusercontent.com",
"client_secret": "w6VkI99jS6e9mECscNztXvQv"}
client_config = {'installed': {
'client_id': not_secret['client_id'],
'client_secret': not_secret['client_secret'],
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
"token_uri": "https://accounts.google.com/o/oauth2/token"
}}
tfile = os.path.join(os.path.expanduser("~"), '.gcs_tokens')
ACLs = {"authenticatedread", "bucketownerfullcontrol", "bucketownerread",
"private", "projectprivate", "publicread"}
bACLs = {"authenticatedRead", "private", "projectPrivate", "publicRead",
"publicReadWrite"}
DEFAULT_PROJECT = os.environ.get('GCSFS_DEFAULT_PROJECT', '')
GCS_MIN_BLOCK_SIZE = 2 ** 18
DEFAULT_BLOCK_SIZE = 5 * 2 ** 20
if PY2:
FileNotFoundError = IOError
def quote_plus(s):
"""
Convert some URL elements to be HTTP-safe.
Not the same as in urllib, because, for instance, parentheses and commas
are passed through.
Parameters
----------
s: input URL/portion
Returns
-------
corrected URL
"""
s = s.replace('/', '%2F')
s = s.replace(' ', '%20')
return s
def norm_path(path):
"""Canonicalize path to '{bucket}/{name}' form."""
return "/".join(split_path(path))
def split_path(path):
"""
Normalise GCS path string into bucket and key.
Parameters
----------
path : string
Input path, like `gcs://mybucket/path/to/file`.
Path is of the form: '[gs|gcs://]bucket[/key]'
Returns
-------
(bucket, key) tuple
Examples
--------
>>> split_path("gcs://mybucket/path/to/file")
['mybucket', 'path/to/file']
>>> split_path("mybucket/path/to/file")
['mybucket', 'path/to/file']
>>> split_path("gs://mybucket")
['mybucket', '']
"""
if path.startswith('gcs://'):
path = path[6:]
if path.startswith('gs://'):
path = path[5:]
if path.startswith('/'):
path = path[1:]
if '/' not in path:
return path, ""
else:
return path.split('/', 1)
def validate_response(r, path):
"""
Check the requests object r, raise error if it's not ok.
Parameters
----------
r: requests response object
path: associated URL path, for error messages
"""
if not r.ok:
m = str(r.content)
error = None
try:
error = r.json()['error']
msg = error['message']
except:
msg = str(r.content)
if r.status_code == 404:
raise FileNotFoundError(path)
elif r.status_code == 403:
raise IOError("Forbidden: %s\n%s" % (path, msg))
elif "invalid" in m:
raise ValueError("Bad Request: %s\n%s" % (path, msg))
elif error:
raise HtmlError(error)
else:
raise RuntimeError(m)
class GCSFileSystem(object):
"""
Connect to Google Cloud Storage.
The following modes of authentication are supported:
- ``token=None``, GCSFS will attempt to guess your credentials in the
following order: gcloud CLI default, gcsfs cached token, google compute
metadata service, anonymous.
- ``token='google_default'``, your default gcloud credentials will be used,
which are typically established by doing ``gcloud login`` in a terminal.
- ``token=='cache'``, credentials from previously successful gcsfs
authentication will be used (use this after "browser" auth succeeded)
- ``token='anon'``, no authentication is preformed, and you can only
access data which is accessible to allUsers (in this case, the project and
access level parameters are meaningless)
- ``token='browser'``, you get an access code with which you can
authenticate via a specially provided URL
- if ``token='cloud'``, we assume we are running within google compute
or google container engine, and query the internal metadata directly for
a token.
- you may supply a token generated by the
[gcloud](https://cloud.google.com/sdk/docs/)
utility; this is either a python dictionary, the name of a file
containing the JSON returned by logging in with the gcloud CLI tool,
or a Credentials object. gcloud typically stores its tokens in locations
such as
``~/.config/gcloud/application_default_credentials.json``,
`` ~/.config/gcloud/credentials``, or
``~\AppData\Roaming\gcloud\credentials``, etc.
Objects
-------
Specific methods, (eg. `ls`, `info`, ...) may return object details from GCS.
These detailed listings include the
[object resource](https://cloud.google.com/storage/docs/json_api/v1/objects#resource)
with additional properties:
- "path" : string
The "{bucket}/{name}" path of the object, used in calls to GCSFileSystem or GCSFile.
GCS *does not* include "directory" objects but instead generates directories by splitting
[object names](https://cloud.google.com/storage/docs/key-terms). This means that, for example,
a directory does not need to exist for an object to be created within it. Creating an object
implicitly creates it's parent directories, and removing all objects from a directory implicitly
deletes the empty directory.
`GCSFileSystem` generates listing entries for these implied directories in listing apis with the
object properies:
- "path" : string
The "{bucket}/{name}" path of the dir, used in calls to GCSFileSystem or GCSFile.
- "bucket" : string
The name of the bucket containing this object.
- "name" : string
The "/" terminated name of the directory within the bucket.
- "kind" : 'storage#object'
- "size" : 0
- "storageClass" : 'DIRECTORY'
Caching
-------
GCSFileSystem maintains a per-implied-directory cache of object listings and fulfills all
object information and listing requests from cache. This implied, for example, that objects
created via other processes *will not* be visible to the GCSFileSystem until the cache
refreshed. Calls to GCSFileSystem.open and calls to GCSFile are not effected by this cache.
In the default case the cache is never expired. This may be controlled via the `cache_timeout`
GCSFileSystem parameter or via explicit calls to `GCSFileSystem.invalidate_cache`.
Parameters
----------
project : string
project_id to work under. Note that this is not the same as, but ofter
very similar to, the project name.
This is required in order
to list all the buckets you have access to within a project and to
create/delete buckets, or update their access policies.
If ``token='google_default'``, the value is overriden by the default,
if ``token='anon'``, the value is ignored.
access : one of {'read_only', 'read_write', 'full_control'}
Full control implies read/write as well as modifying metadata,
e.g., access control.
token: None, dict or string
(see description of authentication methods, above)
consistency: 'none', 'size', 'md5'
Check method when writing files. Can be overridden in open().
cache_timeout: float, seconds
Cache expiration time in seconds for object metadata cache.
Set cache_timeout <= 0 for no caching, None for no cache expiration.
secure_serialize: bool
If True, instances re-establish auth upon deserialization; if False,
token is passed directly, which may be a security risk if passed
across an insecure network.
check_connection: bool
When token=None, gcsfs will attempt various methods of establishing
credentials, falling back to anon. It is possible for a methoc to
find credentials in the system that turn out not to be valid. Setting
this parameter to True will ensure that an actual operation is
attempted before deciding that credentials are valid.
"""
scopes = {'read_only', 'read_write', 'full_control'}
retries = 4 # number of retries on http failure
base = "https://www.googleapis.com/storage/v1/"
_singleton = [None]
_singleton_pars = [None]
default_block_size = DEFAULT_BLOCK_SIZE
def __init__(self, project=DEFAULT_PROJECT, access='full_control',
token=None, block_size=None, consistency='none',
cache_timeout=None, secure_serialize=True,
check_connection=True):
pars = (project, access, token, block_size, consistency, cache_timeout)
if access not in self.scopes:
raise ValueError('access must be one of {}', self.scopes)
if project is None:
warnings.warn('GCS project not set - cannot list or create buckets')
if block_size is not None:
self.default_block_size = block_size
self.project = project
self.access = access
self.scope = "https://www.googleapis.com/auth/devstorage." + access
self.consistency = consistency
self.token = token
self.cache_timeout = cache_timeout
self.check_credentials = check_connection
if pars == self._singleton_pars[0]:
inst = self._singleton[0]
self.session = inst.session
self._listing_cache = inst._listing_cache
self.token = inst.token
else:
self.session = None
self.connect(method=token)
self._listing_cache = {}
self._singleton[0] = self
self._singleton_pars[0] = pars
if not secure_serialize:
self.token = self.session.credentials
@classmethod
def current(cls):
""" Return the most recently created GCSFileSystem
If no GCSFileSystem has been created, then create one
"""
if not cls._singleton[0]:
return GCSFileSystem()
else:
return cls._singleton[0]
@staticmethod
def load_tokens():
try:
with open(tfile, 'rb') as f:
tokens = pickle.load(f)
# backwards compatability
tokens = {k: (GCSFileSystem._dict_to_credentials(v)
if isinstance(v, dict) else v)
for k, v in tokens.items()}
except Exception:
tokens = {}
GCSFileSystem.tokens = tokens
def _connect_google_default(self):
credentials, project = gauth.default(scopes=[self.scope])
self.project = project
self.session = AuthorizedSession(credentials)
def _connect_cloud(self):
credentials = gauth.compute_engine.Credentials()
self.session = AuthorizedSession(credentials)
def _connect_cache(self):
project, access = self.project, self.access
if (project, access) in self.tokens:
credentials = self.tokens[(project, access)]
self.session = AuthorizedSession(credentials)
def _dict_to_credentials(self, token):
"""
Convert old dict-style token.
Does not preserve access token itself, assumes refresh required.
"""
return Credentials(
None, refresh_token=token['refresh_token'],
client_secret=token['client_secret'],
client_id=token['client_id'],
token_uri='https://www.googleapis.com/oauth2/v4/token',
scopes=[self.scope]
)
def _connect_token(self, token):
"""
Connect using a concrete token
Parameters
----------
token: str, dict or Credentials
If a str, try to load as a Service file, or next as a JSON; if
dict, try to interpret as credentials; if Credentials, use directly.
"""
if isinstance(token, str):
if not os.path.exists(token):
raise FileNotFoundError(token)
try:
# is this a "service" token?
self._connect_service(token)
return
except:
# some other kind of token file
# will raise exception if is not json
token = json.load(open(token))
if isinstance(token, dict):
credentials = self._dict_to_credentials(token)
elif isinstance(token, Credentials):
credentials = token
else:
raise ValueError('Token format no understood')
self.session = AuthorizedSession(credentials)
def _connect_service(self, fn):
# raises exception if file does not match expectation
credentials = service_account.Credentials.from_service_account_file(
fn, scopes=[self.scope])
self.session = AuthorizedSession(credentials)
def _connect_anon(self):
self.session = requests.Session()
def _connect_browser(self):
flow = InstalledAppFlow.from_client_config(client_config, [self.scope])
credentials = flow.run_console()
self.tokens[(self.project, self.access)] = credentials
self._save_tokens()
self.session = AuthorizedSession(credentials)
def connect(self, method=None):
"""
Establish session token. A new token will be requested if the current
one is within 100s of expiry.
Parameters
----------
method: str (google_default|cache|cloud|token|anon|browser) or None
Type of authorisation to implement - calls `_connect_*` methods.
If None, will try sequence of methods.
"""
if method not in ['google_default', 'cache', 'cloud', 'token', 'anon',
'browser', None]:
self._connect_token(method)
elif method is None:
for meth in ['google_default', 'cache', 'cloud', 'anon']:
try:
self.connect(method=meth)
if self.check_credentials and method != 'anon':
self.ls('anaconda-public-data')
except:
self.session = None
logger.debug('Connection with method "%s" failed' % meth)
if self.session:
break
else:
self.__getattribute__('_connect_' + method)()
self.method = method
@staticmethod
def _save_tokens():
try:
with open(tfile, 'wb') as f:
pickle.dump(GCSFileSystem.tokens, f, 2)
except Exception as e:
warnings.warn('Saving token cache failed: ' + str(e))
@_tracemethod
def _call(self, method, path, *args, **kwargs):
for k, v in list(kwargs.items()):
# only pass parameters that have values
if v is None:
del kwargs[k]
json = kwargs.pop('json', None)
meth = getattr(self.session, method)
if args:
path = path.format(*[quote_plus(p) for p in args])
for retry in range(self.retries):
try:
time.sleep(2**retry - 1)
r = meth(self.base + path, params=kwargs, json=json)
validate_response(r, path)
break
except (HtmlError, RequestException, GoogleAuthError) as e:
logger.exception("_call exception: %s", e)
if retry == self.retries - 1:
raise e
if is_retriable(e):
# retry
continue
raise e
try:
out = r.json()
except ValueError:
out = r.content
return out
@property
def buckets(self):
"""Return list of available project buckets."""
return [b["name"] for b in self._list_buckets()["items"]]
@classmethod
def _process_object(self, bucket, object_metadata):
"""Process object resource into gcsfs object information format.
Process GCS object resource via type casting and attribute updates to
the cache-able gcsf object information format. Returns an updated copy
of the object resource.
(See https://cloud.google.com/storage/docs/json_api/v1/objects#resource)
"""
result = dict(object_metadata)
result["size"] = int(object_metadata.get("size", 0))
result["path"] = posixpath.join(bucket, object_metadata["name"])
return result
@_tracemethod
def _get_object(self, path):
"""Return object information at the given path."""
bucket, key = split_path(path)
# Check if parent dir is in listing cache
parent = "/".join([bucket, posixpath.dirname(key.rstrip("/"))]) + "/"
parent_cache = self._maybe_get_cached_listing(parent)
if parent_cache:
cached_obj = [o for o in parent_cache["items"] if o["name"] == key]
if cached_obj:
logger.debug("found cached object: %s", cached_obj)
return cached_obj[0]
else:
logger.debug("object not found cached parent listing")
raise FileNotFoundError(path)
if not key:
# Attempt to "get" the bucket root, return error instead of
# listing.
raise FileNotFoundError(path)
result = self._process_object(bucket, self._call('get', 'b/{}/o/{}',
bucket, key))
return result
@_tracemethod
def _maybe_get_cached_listing(self, path):
logger.debug("_maybe_get_cached_listing: %s", path)
if path in self._listing_cache:
retrieved_time, listing = self._listing_cache[path]
cache_age = time.time() - retrieved_time
if self.cache_timeout is not None and cache_age > self.cache_timeout:
logger.debug(
"expired cache path: %s retrieved_time: %.3f cache_age: "
"%.3f cache_timeout: %.3f",
path, retrieved_time, cache_age, self.cache_timeout
)
del self._listing_cache[path]
return None
return listing
return None
@_tracemethod
def _list_objects(self, path):
path = norm_path(path)
clisting = self._maybe_get_cached_listing(path)
if clisting:
return clisting
listing = self._do_list_objects(path)
retrieved_time = time.time()
self._listing_cache[path] = (retrieved_time, listing)
return listing
@_tracemethod
def _do_list_objects(self, path, max_results = None):
"""Object listing for the given {bucket}/{prefix}/ path."""
bucket, prefix = split_path(path)
if not prefix:
prefix = None
prefixes = []
items = []
page = self._call(
'get', 'b/{}/o/', bucket, delimiter="/", prefix=prefix,
maxResults=max_results)
assert page["kind"] == "storage#objects"
prefixes.extend(page.get("prefixes", []))
items.extend(page.get("items", []))
next_page_token = page.get('nextPageToken', None)
while next_page_token is not None:
page = self._call(
'get', 'b/{}/o/', bucket, delimiter="/", prefix=prefix,
maxResults=max_results, pageToken=next_page_token)
assert page["kind"] == "storage#objects"
prefixes.extend(page.get("prefixes", []))
items.extend(page.get("items", []))
next_page_token = page.get('nextPageToken', None)
result = {
"kind": "storage#objects",
"prefixes": prefixes,
"items": [self._process_object(bucket, i) for i in items],
}
return result
@_tracemethod
def _list_buckets(self):
"""Return list of all buckets under the current project."""
items = []
page = self._call(
'get', 'b/', project=self.project
)
assert page["kind"] == "storage#buckets"
items.extend(page.get("items", []))
next_page_token = page.get('nextPageToken', None)
while next_page_token is not None:
page = self._call(
'get', 'b/', project=self.roject, pageToken=next_page_token)
assert page["kind"] == "storage#buckets"
items.extend(page.get("items", []))
next_page_token = page.get('nextPageToken', None)
result = {
"kind": "storage#buckets",
"items": items,
}
return result
@_tracemethod
def invalidate_cache(self, path=None):
"""
Invalidate listing cache for given path, it is reloaded on next use.
Parameters
----------
path: string or None
If None, clear all listings cached else listings at or under given
path.
"""
if not path:
logger.debug("invalidate_cache clearing cache")
self._listing_cache.clear()
else:
path = norm_path(path)
invalid_keys = [k for k in self._listing_cache
if k.startswith(path)]
for k in invalid_keys:
self._listing_cache.pop(k, None)
@_tracemethod
def mkdir(self, bucket, acl='projectPrivate',
default_acl='bucketOwnerFullControl'):
"""
New bucket
Parameters
----------
bucket: str
bucket name
acl: string, one of bACLs
access for the bucket itself
default_acl: str, one of ACLs
default ACL for objects created in this bucket
"""
self._call('post', 'b/', predefinedAcl=acl, project=self.project,
predefinedDefaultObjectAcl=default_acl,
json={"name": bucket})
self.invalidate_cache(bucket)
@_tracemethod
def rmdir(self, bucket):
"""Delete an empty bucket"""
self._call('delete', 'b/' + bucket)
self.invalidate_cache(bucket)
@_tracemethod
def ls(self, path, detail=False):
"""List objects under the given '/{bucket}/{prefix} path."""
path = norm_path(path)
if path in ['/', '']:
return self.buckets
elif path.endswith("/"):
return self._ls(path, detail)
else:
combined_listing = self._ls(path, detail) + self._ls(path + "/",
detail)
if detail:
combined_entries = dict(
(l["path"], l) for l in combined_listing)
combined_entries.pop(path + "/", None)
return list(combined_entries.values())
else:
return list(set(combined_listing) - {path + "/"})
@_tracemethod
def _ls(self, path, detail=False):
listing = self._list_objects(path)
bucket, key = split_path(path)
if not detail:
# Convert item listing into list of 'item' and 'subdir/'
# entries. Items may be of form "key/", in which case there
# will be duplicate entries in prefix and item_names.
item_names = [f["name"] for f in listing["items"] if f["name"]]
prefixes = [p for p in listing["prefixes"]]
return [
posixpath.join(bucket, n) for n in set(item_names + prefixes)
]
else:
item_details = listing["items"]
pseudodirs = [{
'bucket': bucket,
'name': prefix,
'path': bucket + "/" + prefix,
'kind': 'storage#object',
'size': 0,
'storageClass': 'DIRECTORY',
}
for prefix in listing["prefixes"]
]
return item_details + pseudodirs
@_tracemethod
def walk(self, path, detail=False):
""" Return all real keys belows path. """
path = norm_path(path)
if path in ("/", ""):
raise ValueError("path must include at least target bucket")
if path.endswith('/'):
listing = self.ls(path, detail=True)
files = [l for l in listing if l["storageClass"] != "DIRECTORY"]
dirs = [l for l in listing if l["storageClass"] == "DIRECTORY"]
for d in dirs:
files.extend(self.walk(d["path"], detail=True))
else:
files = self.walk(path + "/", detail=True)
try:
obj = self.info(path)
if obj["storageClass"] != "DIRECTORY":
files.append(obj)
except FileNotFoundError:
pass
if detail:
return files
else:
return [f["path"] for f in files]
@_tracemethod
def du(self, path, total=False, deep=False):
if deep:
files = self.walk(path, True)
else:
files = [f for f in self.ls(path, True)]
if total:
return sum(f['size'] for f in files)
return {f['path']: f['size'] for f in files}
@_tracemethod
def glob(self, path):
"""
Find files by glob-matching.
Note that the bucket part of the path must not contain a "*"
"""
path = path.rstrip('/')
bucket, key = split_path(path)
path = '/'.join([bucket, key])
if "*" in bucket:
raise ValueError('Bucket cannot contain a "*"')
if '*' not in path:
path = path.rstrip('/') + '/*'
if '/' in path[:path.index('*')]:
ind = path[:path.index('*')].rindex('/')
root = path[:ind + 1]
else:
root = ''
allfiles = self.walk(root)
pattern = re.compile("^" + path.replace('//', '/')
.rstrip('/').replace('**', '.+')
.replace('*', '[^/]+')
.replace('?', '.') + "$")
out = [f for f in allfiles if re.match(pattern,
f.replace('//', '/').rstrip('/'))]
return out
@_tracemethod
def exists(self, path):
bucket, key = split_path(path)
try:
if key:
return bool(self.info(path))
else:
if bucket in self.buckets:
return True
else:
try:
# Bucket may be present & viewable, but not owned by
# the current project. Attempt to list.
self._list_objects(path)
return True
except (FileNotFoundError, IOError, ValueError):
# bucket listing failed as it doesn't exist or we can't
# see it
return False
except FileNotFoundError:
return False
@_tracemethod
def info(self, path):
bucket, key = split_path(path)
if not key:
# Return a pseudo dir for the bucket root
# TODO: check that it exists (either is in bucket list,
# or can list it)
return {
'bucket': bucket,
'name': "/",
'path': bucket + "/",
'kind': 'storage#object',
'size': 0,
'storageClass': 'DIRECTORY',
}
try:
return self._get_object(path)
except FileNotFoundError:
logger.debug("info FileNotFound at path: %s", path)
# ls containing directory of path to determine
# if a pseudodirectory is needed for this entry.
ikey = key.rstrip("/")
dkey = ikey + "/"
assert ikey, "Stripped path resulted in root object."
parent_listing = self.ls(
posixpath.join(bucket, posixpath.dirname(ikey)), detail=True)
pseudo_listing = [
i for i in parent_listing
if i["storageClass"] == "DIRECTORY" and i["name"] == dkey ]
if pseudo_listing:
return pseudo_listing[0]
else:
raise
@_tracemethod
def url(self, path):
return self.info(path)['mediaLink']
@_tracemethod
def cat(self, path):
""" Simple one-shot get of file data """
details = self.info(path)
return _fetch_range(details, self.session)
@_tracemethod
def get(self, rpath, lpath, blocksize=5 * 2 ** 20):
with self.open(rpath, 'rb', block_size=blocksize) as f1:
with open(lpath, 'wb') as f2:
while True:
d = f1.read(blocksize)
if not d:
break
f2.write(d)
@_tracemethod
def put(self, lpath, rpath, blocksize=5 * 2 ** 20, acl=None):
with self.open(rpath, 'wb', block_size=blocksize, acl=acl) as f1:
with open(lpath, 'rb') as f2:
while True:
d = f2.read(blocksize)
if not d:
break
f1.write(d)
@_tracemethod
def head(self, path, size=1024):
with self.open(path, 'rb') as f:
return f.read(size)
@_tracemethod
def tail(self, path, size=1024):
if size > self.info(path)['size']:
return self.cat(path)
with self.open(path, 'rb') as f:
f.seek(-size, 2)
return f.read()
@_tracemethod
def merge(self, path, paths, acl=None):
"""Concatenate objects within a single bucket"""
bucket, key = split_path(path)
source = [{'name': split_path(p)[1]} for p in paths]
self._call('post', 'b/{}/o/{}/compose', bucket, key,
destinationPredefinedAcl=acl,
json={'sourceObjects': source,
"kind": "storage#composeRequest",
'destination': {'name': key, 'bucket': bucket}})
@_tracemethod
def copy(self, path1, path2, acl=None):
b1, k1 = split_path(path1)
b2, k2 = split_path(path2)
self._call('post', 'b/{}/o/{}/copyTo/b/{}/o/{}', b1, k1, b2, k2,
destinationPredefinedAcl=acl)
@_tracemethod
def mv(self, path1, path2, acl=None):
self.copy(path1, path2, acl)
self.rm(path1)
@_tracemethod
def rm(self, path, recursive=False):
"""Delete keys.
If a list, batch-delete all keys in one go (can span buckets)
Returns whether operation succeeded (a list if input was a list)
If recursive, delete all keys given by walk(path)
"""
if isinstance(path, (tuple, list)):
template = ('\n--===============7330845974216740156==\n'
'Content-Type: application/http\n'
'Content-Transfer-Encoding: binary\n'
'Content-ID: <b29c5de2-0db4-490b-b421-6a51b598bd11+{i}>'
'\n\nDELETE /storage/v1/b/{bucket}/o/{key} HTTP/1.1\n'
'Content-Type: application/json\n'
'accept: application/json\ncontent-length: 0\n')
body = "".join([template.format(i=i+1, bucket=p.split('/', 1)[0],
key=quote_plus(p.split('/', 1)[1]))
for i, p in enumerate(path)])
r = self.session.post('https://www.googleapis.com/batch', headers={
'Content-Type':
'multipart/mixed; boundary="==============='
'7330845974216740156=="'},
data=body + "\n--==============="
"7330845974216740156==--")
# actually can have some succeed and some fail
validate_response(r, path)
boundary = r.headers['Content-Type'].split('=', 1)[1]
parents = {posixpath.dirname(norm_path(p)) for p in path}
[self.invalidate_cache(parent) for parent in parents]
return ['200 OK' in c or '204 No Content' in c for c in
r.text.split(boundary)][1:-1]
elif recursive:
return self.rm(self.walk(path))
else:
bucket, key = split_path(path)
self._call('delete', "b/{}/o/{}", bucket, key)
self.invalidate_cache(posixpath.dirname(norm_path(path)))
return True
@_tracemethod
def open(self, path, mode='rb', block_size=None, acl=None,
consistency=None, metadata=None):
"""
See ``GCSFile``.
consistency: None or str
If None, use default for this instance
"""
if block_size is None:
block_size = self.default_block_size
const = consistency or self.consistency
if 'b' in mode:
return GCSFile(self, path, mode, block_size, consistency=const,
metadata=metadata)
else:
mode = mode.replace('t', '') + 'b'
return io.TextIOWrapper(
GCSFile(self, path, mode, block_size, consistency=const,
metadata=metadata))
@_tracemethod
def touch(self, path):
with self.open(path, 'wb'):
pass
@_tracemethod
def read_block(self, fn, offset, length, delimiter=None):
""" Read a block of bytes from a GCS file
Starting at ``offset`` of the file, read ``length`` bytes. If
``delimiter`` is set then we ensure that the read starts and stops at
delimiter boundaries that follow the locations ``offset`` and ``offset
+ length``. If ``offset`` is zero then we start at zero. The
bytestring returned WILL include the end delimiter string.
If offset+length is beyond the eof, reads to eof.
Parameters
----------
fn: string
Path to filename on GCS
offset: int
Byte offset to start read
length: int
Number of bytes to read
delimiter: bytes (optional)
Ensure reading starts and stops at delimiter bytestring
Examples
--------
>>> gcs.read_block('data/file.csv', 0, 13) # doctest: +SKIP
b'Alice, 100\\nBo'
>>> gcs.read_block('data/file.csv', 0, 13, delimiter=b'\\n') # doctest: +SKIP
b'Alice, 100\\nBob, 200\\n'
Use ``length=None`` to read to the end of the file.
>>> gcs.read_block('data/file.csv', 0, None, delimiter=b'\\n') # doctest: +SKIP
b'Alice, 100\\nBob, 200\\nCharlie, 300'
See Also
--------
distributed.utils.read_block
"""
with self.open(fn, 'rb') as f:
size = f.size
if length is None:
length = size
if offset + length > size:
length = size - offset
bytes = read_block(f, offset, length, delimiter)
return bytes
def __getstate__(self):
d = self.__dict__.copy()
d["_listing_cache"] = {}
logger.debug("Serialize with state: %s", d)
return d
def __setstate__(self, state):
self.__dict__.update(state)
self.connect(self.token)
GCSFileSystem.load_tokens()
class GCSFile:
@_tracemethod
def __init__(self, gcsfs, path, mode='rb', block_size=DEFAULT_BLOCK_SIZE,
acl=None, consistency='md5', metadata=None):
"""
Open a file.
Parameters
----------
gcsfs: instance of GCSFileSystem
path: str
location in GCS, like 'bucket/path/to/file'
mode: str
Normal file modes. Currently only 'wb' amd 'rb'.
block_size: int
Buffer size for reading or writing
acl: str
ACL to apply, if any, one of ``ACLs``. New files are normally
"bucketownerfullcontrol", but a default can be configured per
bucket.
consistency: str, 'none', 'size', 'md5'
Check for success in writing, applied at file close.
'size' ensures that the number of bytes reported by GCS matches
the number we wrote; 'md5' does a full checksum. Any value other
than 'size' or 'md5' is assumed to mean no checking.
metadata: dict
Custom metadata, in key/value pairs, added at file creation
"""
bucket, key = split_path(path)
if not key:
raise OSError('Attempt to open a bucket')
self.gcsfs = gcsfs
self.bucket = bucket
self.key = key
self.metadata = metadata
self.mode = mode
self.blocksize = block_size
self.cache = b""
self.loc = 0
self.acl = acl
self.end = None
self.start = None
self.closed = False
self.trim = True
self.consistency = consistency
if self.consistency == 'md5':
self.md5 = md5()
if mode not in {'rb', 'wb'}:
raise NotImplementedError('File mode not supported')
if mode == 'rb':
self.details = gcsfs.info(path)
self.size = self.details['size']
else:
if block_size < GCS_MIN_BLOCK_SIZE:
warnings.warn('Setting block size to minimum value, 2**18')
self.blocksize = GCS_MIN_BLOCK_SIZE
self.buffer = io.BytesIO()
self.offset = 0
self.forced = False
self.location = None
def info(self):
""" File information about this path """
return self.details
def url(self):
return self.details['mediaLink']
def tell(self):
""" Current file location """
return self.loc
@_tracemethod
def seek(self, loc, whence=0):
""" Set current file location
Parameters
----------
loc : int
byte location
whence : {0, 1, 2}
from start of file, current location or end of file, resp.
"""
if not self.mode == 'rb':
raise ValueError('Seek only available in read mode')
if whence == 0:
nloc = loc
elif whence == 1:
nloc = self.loc + loc
elif whence == 2:
nloc = self.size + loc
else:
raise ValueError(
"invalid whence (%s, should be 0, 1 or 2)" % whence)
if nloc < 0:
raise ValueError('Seek before start of file')
self.loc = nloc
return self.loc
def readline(self, length=-1):
"""
Read and return a line from the stream.
If length is specified, at most size bytes will be read.
"""
self._fetch(self.loc, self.loc + 1)
while True:
found = self.cache[self.loc - self.start:].find(b'\n') + 1
if 0 < length < found:
return self.read(length)
if found:
return self.read(found)
if self.end > self.size:
return self.read(length)
self._fetch(self.start, self.end + self.blocksize)
def __next__(self):
data = self.readline()
if data:
return data
else:
raise StopIteration
next = __next__
def __iter__(self):
return self
def readlines(self):
""" Return all lines in a file as a list """
return list(self)
def write(self, data):
"""
Write data to buffer.
Buffer only sent to GCS on flush() or if buffer is greater than
or equal to blocksize.
Parameters
----------
data : bytes
Set of bytes to be written.
"""
if self.mode not in {'wb', 'ab'}:
raise ValueError('File not in write mode')
if self.closed:
raise ValueError('I/O operation on closed file.')
if self.forced:
raise ValueError('This file has been force-flushed, can only close')
out = self.buffer.write(ensure_writable(data))
self.loc += out
if self.buffer.tell() >= self.blocksize:
self.flush()
return out
@_tracemethod
def flush(self, force=False):
"""
Write buffered data to GCS.
Uploads the current buffer, if it is larger than the block-size, or if
the file is being closed.
Parameters
----------
force : bool
When closing, write the last block even if it is smaller than
blocks are allowed to be. Disallows further writing to this file.
"""
if self.closed:
raise ValueError('Flush on closed file')
if force and self.forced:
raise ValueError("Force flush cannot be called more than once")
if self.mode not in {'wb', 'ab'}:
assert not hasattr(self, "buffer"), "flush on read-mode file with non-empty buffer"
return
if self.buffer.tell() == 0 and not force:
# no data in the buffer to write
return
if self.buffer.tell() < GCS_MIN_BLOCK_SIZE and not force:
logger.debug(
"flush(force=False) with buffer (%i) < min size (2 ** 18), "
"skipping block upload.", self.buffer.tell()
)
return
if not self.offset:
if force and self.buffer.tell() <= self.blocksize:
# Force-write a buffer below blocksize with a single write
self._simple_upload()
elif not force and self.buffer.tell() <= self.blocksize:
# Defer initialization of multipart upload, *may* still
# be able to simple upload.
return
else:
# At initialize a multipart upload, setting self.location
self._initiate_upload()
if self.location is not None:
# Continue with multipart upload has been initialized
self._upload_chunk(final=force)
if force:
self.forced = True
@_tracemethod
def _upload_chunk(self, final=False):
self.buffer.seek(0)
data = self.buffer.read()
head = {}
l = self.buffer.tell()
if final:
if l:
head['Content-Range'] = 'bytes %i-%i/%i' % (
self.offset, self.offset + l - 1, self.offset + l)
else:
# closing when buffer is empty
head['Content-Range'] = 'bytes */%i' % self.offset
data = None
else:
assert l >= GCS_MIN_BLOCK_SIZE, "Non-final chunk write below min size."
head['Content-Range'] = 'bytes %i-%i/*' % (
self.offset, self.offset + l - 1)
head.update({'Content-Type': 'application/octet-stream',
'Content-Length': str(l)})
r = self.gcsfs.session.post(
self.location, params={'uploadType': 'resumable'},
headers=head, data=data)
validate_response(r, self.location)
if 'Range' in r.headers:
assert not final, "Response looks like upload is partial"
shortfall = (self.offset + l - 1) - int(
r.headers['Range'].split('-')[1])
if shortfall:
if self.consistency == 'md5':
self.md5.update(data[:-shortfall])
self.buffer = io.BytesIO(data[-shortfall:])
self.buffer.seek(shortfall)
else:
if self.consistency == 'md5':
self.md5.update(data)
self.buffer = io.BytesIO()
self.offset += l - shortfall
else:
assert final, "Response looks like upload is over"
size, md5 = int(r.json()['size']), r.json()['md5Hash']
if self.consistency == 'size':
assert size == self.buffer.tell() + self.offset, "Size mismatch"
if self.consistency == 'md5':
assert b64encode(
self.md5.digest()) == md5.encode(), "MD5 checksum failed"
self.buffer = io.BytesIO()
self.offset += l
@_tracemethod
def _initiate_upload(self):
r = self.gcsfs.session.post(
'https://www.googleapis.com/upload/storage/v1/b/%s/o'
% quote_plus(self.bucket),
params={'uploadType': 'resumable'},
json={'name': self.key, 'metadata': self.metadata})
self.location = r.headers['Location']
@_tracemethod
def _simple_upload(self):
"""One-shot upload, less than 5MB"""
self.buffer.seek(0)
data = self.buffer.read()
path = ('https://www.googleapis.com/upload/storage/v1/b/%s/o'
% quote_plus(self.bucket))
r = self.gcsfs.session.post(
path, params={'uploadType': 'media', 'name': self.key}, data=data)
validate_response(r, path)
size, md5 = int(r.json()['size']), r.json()['md5Hash']
if self.consistency == 'size':
assert size == self.buffer.tell(), "Size mismatch"
if self.consistency == 'md5':
self.md5.update(data)
assert b64encode(self.md5.digest()) == md5.encode(), "MD5 checksum failed"
@_tracemethod
def _fetch(self, start, end):
if self.start is None and self.end is None:
# First read
self.start = start
self.end = end + self.blocksize
self.cache = _fetch_range(self.details, self.gcsfs.session,
self.start, self.end)
if start < self.start:
if self.end - end > self.blocksize:
self.start = start
self.end = end + self.blocksize
self.cache = _fetch_range(self.details, self.gcsfs.session,
self.start, self.end)
else:
new = _fetch_range(self.details, self.gcsfs.session,
start, self.start)
self.start = start
self.cache = new + self.cache
if end > self.end:
if self.end > self.size:
return
if end - self.end > self.blocksize:
self.start = start
self.end = end + self.blocksize
self.cache = _fetch_range(self.details, self.gcsfs.session,
self.start, self.end)
else:
new = _fetch_range(self.details, self.gcsfs.session, self.end,
end + self.blocksize)
self.end = end + self.blocksize
self.cache = self.cache + new
def read(self, length=-1):
"""
Return data from cache, or fetch pieces as necessary
Parameters
----------
length : int (-1)
Number of bytes to read; if <0, all remaining bytes.
"""
if self.mode != 'rb':
raise ValueError('File not in read mode')
if length < 0:
length = self.size
if self.closed:
raise ValueError('I/O operation on closed file.')
self._fetch(self.loc, self.loc + length)
out = self.cache[self.loc - self.start:
self.loc - self.start + length]
self.loc += len(out)
if self.trim:
num = (self.loc - self.start) // self.blocksize - 1
if num > 0:
self.start += self.blocksize * num
self.cache = self.cache[self.blocksize * num:]
return out
@_tracemethod
def close(self):
""" Close file """
if self.closed:
return
if self.mode == 'rb':
self.cache = None
else:
if not self.forced:
self.flush(force=True)
else:
logger.debug("close with forced=True, bypassing final flush.")
assert self.buffer.tell() == 0
self.gcsfs.invalidate_cache(
posixpath.dirname("/".join([self.bucket, self.key])))
self.closed = True
def readable(self):
"""Return whether the GCSFile was opened for reading"""
return self.mode == 'rb'
def seekable(self):
"""Return whether the GCSFile is seekable (only in read mode)"""
return self.readable()
def writable(self):
"""Return whether the GCSFile was opened for writing"""
return self.mode in {'wb', 'ab'}
@_tracemethod
def __del__(self):
self.close()
def __str__(self):
return "<GCSFile %s/%s>" % (self.bucket, self.key)
__repr__ = __str__
@_tracemethod
def __enter__(self):
return self
@_tracemethod
def __exit__(self, *args):
self.close()
@_tracemethod
def _fetch_range(obj_dict, session, start=None, end=None):
""" Get data from GCS
obj_dict : an entry from ls() or info()
session: requests.Session instance
start, end : None or integers
if not both None, fetch only given range
"""
if start is not None or end is not None:
start = start or 0
end = end or 0
head = {'Range': 'bytes=%i-%i' % (start, end - 1)}
else:
head = None
back = session.get(obj_dict['mediaLink'], headers=head)
data = back.content
if data == b'Request range not satisfiable':
return b''
return data
def put_object(credentials, bucket, name, data, session):
""" Simple put, up to 5MB of data
credentials : from auth()
bucket : string
name : object name
data : binary
session: requests.Session instance
"""
out = session.post('https://www.googleapis.com/upload/storage/'
'v1/b/%s/o?uploadType=media&name=%s' % (
quote_plus(bucket), quote_plus(name)),
headers={'Authorization': 'Bearer ' +
credentials.access_token,
'Content-Type': 'application/octet-stream',
'Content-Length': len(data)}, data=data)
assert out.status_code == 200
def ensure_writable(b):
if PY2 and isinstance(b, array.array):
return b.tostring()
return b
|
#!/usr/bin/env python
#=============================================================================================
# MODULE DOCSTRING
#=============================================================================================
"""
Tools to build Yank experiments from a YAML configuration file.
"""
#=============================================================================================
# GLOBAL IMPORTS
#=============================================================================================
import os
import re
import copy
import yaml
import logging
logger = logging.getLogger(__name__)
import numpy as np
import openmoltools
from simtk import unit, openmm
import utils
import pipeline
from yank import Yank
from repex import ReplicaExchange, ThermodynamicState
from sampling import ModifiedHamiltonianExchange
#=============================================================================================
# UTILITY FUNCTIONS
#=============================================================================================
def compute_min_dist(mol_positions, *args):
"""Compute the minimum distance between a molecule and a set of other molecules.
All the positions must be expressed in the same unit of measure.
Parameters
----------
mol_positions : numpy.ndarray
An Nx3 array where, N is the number of atoms, containing the positions of
the atoms of the molecule for which we want to compute the minimum distance
from the others
Other parameters
----------------
args
A series of numpy.ndarrays containing the positions of the atoms of the other
molecules
Returns
-------
min_dist : float
The minimum distance between mol_positions and the other set of positions
"""
for pos1 in args:
# Compute squared distances
# Each row is an array of distances from a mol2 atom to all mol1 atoms
distances2 = np.array([((pos1 - pos2)**2).sum(1) for pos2 in mol_positions])
# Find closest atoms and their distance
min_idx = np.unravel_index(distances2.argmin(), distances2.shape)
try:
min_dist = min(min_dist, np.sqrt(distances2[min_idx]))
except UnboundLocalError:
min_dist = np.sqrt(distances2[min_idx])
return min_dist
def remove_overlap(mol_positions, *args, **kwargs):
"""Remove any eventual overlap between a molecule and a set of others.
The method both randomly shifts and rotates the molecule (when overlapping atoms
are detected) until it does not clash with any other given molecule anymore. All
the others are kept fixed.
All the positions must be expressed in the same unit of measure.
Parameters
----------
mol_positions : numpy.ndarray
An Nx3 array where, N is the number of atoms, containing the positions of
the atoms of the molecule that we want to not clash with the others.
min_distance : float
The minimum distance accepted to consider the molecule not clashing with
the others. Must be in the same unit of measure of the positions.
sigma : float
The maximum displacement for a single step. Must be in the same unit of
measure of the positions.
Other parameters
----------------
args
A series of numpy.ndarrays containing the positions of the atoms of the
molecules that are kept fixed.
Returns
-------
x : numpy.ndarray
Positions of the atoms of the given molecules that do not clash.
"""
x = np.copy(mol_positions)
sigma = kwargs.get('sigma', 1.0)
min_distance = kwargs.get('min_distance', 1.0)
# Try until we have a non-overlapping conformation w.r.t. all fixed molecules
while compute_min_dist(x, *args) <= min_distance:
# Compute center of geometry
x0 = x.mean(0)
# Randomize orientation of ligand.
q = ModifiedHamiltonianExchange._generate_uniform_quaternion()
Rq = ModifiedHamiltonianExchange._rotation_matrix_from_quaternion(q)
x = ((Rq * np.matrix(x - x0).T).T + x0).A
# Choose a random displacement vector and translate
x += sigma * np.random.randn(3)
return x
def to_openmm_app(str):
"""Converter function to be used with validate_parameters()."""
return getattr(openmm.app, str)
#=============================================================================================
# BUILDER CLASS
#=============================================================================================
class YamlParseError(Exception):
"""Represent errors occurring during parsing of Yank YAML file."""
def __init__(self, message):
super(YamlParseError, self).__init__(message)
logger.error(message)
class YamlBuilder:
"""Parse YAML configuration file and build the experiment.
The relative paths indicated in the script are assumed to be relative to
the script directory. However, if YamlBuilder is initiated with a string
rather than a file path, the paths will be relative to the user's working
directory.
The class firstly perform a dry run to check if this is going to overwrite
some files and raises an exception if it finds already existing output folders
unless the options resume_setup or resume_simulation are True.
Properties
----------
yank_options : dict
The options specified in the parsed YAML file that will be passed to Yank.
These are not the full range of options specified in the script since some
of them are used to configure YamlBuilder and not the Yank object.
Examples
--------
>>> import textwrap
>>> setup_dir = utils.get_data_filename(os.path.join('..', 'examples',
... 'p-xylene-implicit', 'setup'))
>>> pxylene_path = os.path.join(setup_dir, 'ligand.tripos.mol2')
>>> lysozyme_path = os.path.join(setup_dir, 'receptor.pdbfixer.pdb')
>>> with utils.temporary_directory() as tmp_dir:
... yaml_content = '''
... ---
... options:
... number_of_iterations: 1
... output_dir: {}
... molecules:
... T4lysozyme:
... filepath: {}
... parameters: oldff/leaprc.ff99SBildn
... p-xylene:
... filepath: {}
... parameters: antechamber
... solvents:
... vacuum:
... nonbonded_method: NoCutoff
... experiment:
... components:
... receptor: T4lysozyme
... ligand: p-xylene
... solvent: vacuum
... '''.format(tmp_dir, lysozyme_path, pxylene_path)
>>> yaml_builder = YamlBuilder(textwrap.dedent(yaml_content))
>>> yaml_builder.build_experiment()
"""
SETUP_DIR = 'setup'
SETUP_SYSTEMS_DIR = os.path.join(SETUP_DIR, 'systems')
SETUP_MOLECULES_DIR = os.path.join(SETUP_DIR, 'molecules')
EXPERIMENTS_DIR = 'experiments'
DEFAULT_OPTIONS = {
'verbose': False,
'mpi': False,
'resume_setup': False,
'resume_simulation': False,
'output_dir': 'output/',
'temperature': 298 * unit.kelvin,
'pressure': 1 * unit.atmosphere,
'constraints': openmm.app.HBonds,
'hydrogen_mass': 1 * unit.amu
}
@property
def yank_options(self):
return self._isolate_yank_options(self.options)
def __init__(self, yaml_source):
"""Parse the given YAML configuration file.
This does not build the actual experiment but simply checks that the syntax
is correct and loads the configuration into memory.
Parameters
----------
yaml_source : str
A path to the YAML script or the YAML content.
"""
self._oe_molecules = {} # molecules generated by OpenEye
self._fixed_pos_cache = {} # positions of molecules given as files
# TODO check version of yank-yaml language
# TODO what if there are multiple streams in the YAML file?
# Load YAML script and decide working directory for relative paths
try:
with open(yaml_source, 'r') as f:
yaml_content = yaml.load(f)
self._script_dir = os.path.dirname(yaml_source)
except IOError:
yaml_content = yaml.load(yaml_source)
self._script_dir = os.getcwd()
if yaml_content is None:
raise YamlParseError('The YAML file is empty!')
# Save raw YAML content that will be needed when generating the YAML files
self._raw_yaml = copy.deepcopy({key: yaml_content.get(key, {})
for key in ['options', 'molecules', 'solvents']})
# Parse each section
self._parse_options(yaml_content)
self._parse_molecules(yaml_content)
self._parse_solvents(yaml_content)
self._parse_experiments(yaml_content)
def build_experiment(self):
"""Set up and run all the Yank experiments."""
# Throw exception if there are no experiments
if len(self._experiments) == 0:
raise YamlParseError('No experiments specified!')
# Run all experiments with paths relative to the script directory
with utils.temporary_cd(self._script_dir):
self._check_setup_resume()
for output_dir, combination in self._expand_experiments():
self._run_experiment(combination, output_dir)
def _validate_options(self, options):
"""Return a dictionary with YamlBuilder and Yank options validated."""
template_options = self.DEFAULT_OPTIONS.copy()
template_options.update(Yank.default_parameters)
template_options.update(ReplicaExchange.default_parameters)
openmm_app_type = {'constraints': to_openmm_app}
try:
valid = utils.validate_parameters(options, template_options, check_unknown=True,
process_units_str=True, float_to_int=True,
special_conversions=openmm_app_type)
except (TypeError, ValueError) as e:
raise YamlParseError(str(e))
return valid
def _isolate_yank_options(self, options):
"""Return the options that do not belong to YamlBuilder."""
return {opt: val for opt, val in options.items()
if opt not in self.DEFAULT_OPTIONS}
def _parse_options(self, yaml_content):
"""Validate and store options in the script.
Parameters
----------
yaml_content : dict
The dictionary representing the YAML script loaded by yaml.load()
"""
# Merge options and metadata and validate
temp_options = yaml_content.get('options', {})
temp_options.update(yaml_content.get('metadata', {}))
# Validate options and fill in default values
self.options = self.DEFAULT_OPTIONS.copy()
self.options.update(self._validate_options(temp_options))
def _parse_molecules(self, yaml_content):
"""Load molecules information and check that their syntax is correct.
One and only one source must be specified (e.g. filepath, name). Also
the parameters must be specified, and the extension of filepath must
match one of the supported file formats.
Parameters
----------
yaml_content : dict
The dictionary representing the YAML script loaded by yaml.load()
"""
file_formats = set(['mol2', 'pdb'])
sources = set(['filepath', 'name', 'smiles'])
template_mol = {'filepath': 'str', 'name': 'str', 'smiles': 'str',
'parameters': 'str', 'epik': 0}
self._molecules = yaml_content.get('molecules', {})
# First validate and convert
for molecule_id, molecule in self._molecules.items():
try:
self._molecules[molecule_id] = utils.validate_parameters(molecule, template_mol,
check_unknown=True)
except (TypeError, ValueError) as e:
raise YamlParseError(str(e))
err_msg = ''
for molecule_id, molecule in self._molecules.items():
fields = set(molecule.keys())
# Check that only one source is specified
specified_sources = sources & fields
if not specified_sources or len(specified_sources) > 1:
err_msg = ('need only one between {} for molecule {}').format(
', '.join(list(sources)), molecule_id)
# Check supported file formats
elif 'filepath' in specified_sources:
extension = os.path.splitext(molecule['filepath'])[1][1:] # remove '.'
if extension not in file_formats:
err_msg = 'molecule {}, only {} files supported'.format(
molecule_id, ', '.join(file_formats))
# Check that parameters are specified
if 'parameters' not in fields:
err_msg = 'no parameters specified for molecule {}'.format(molecule_id)
if err_msg != '':
raise YamlParseError(err_msg)
def _parse_solvents(self, yaml_content):
"""Load solvents information and check that their syntax is correct.
The option nonbonded_method must be specified. All quantities are converted to
simtk.app.Quantity objects or openmm.app.TYPE (e.g. app.PME, app.OBC2). This
also perform some consistency checks to verify that the user did not mix
implicit and explicit solvent parameters.
Parameters
----------
yaml_content : dict
The dictionary representing the YAML script loaded by yaml.load()
"""
template_parameters = {'nonbonded_method': openmm.app.PME, 'nonbonded_cutoff': 1 * unit.nanometer,
'implicit_solvent': openmm.app.OBC2, 'clearance': 10.0 * unit.angstroms}
openmm_app_type = ('nonbonded_method', 'implicit_solvent')
openmm_app_type = {option: to_openmm_app for option in openmm_app_type}
self._solvents = yaml_content.get('solvents', {})
# First validate and convert
for solvent_id, solvent in self._solvents.items():
try:
self._solvents[solvent_id] = utils.validate_parameters(solvent, template_parameters,
check_unknown=True, process_units_str=True,
special_conversions=openmm_app_type)
except (TypeError, ValueError, AttributeError) as e:
raise YamlParseError(str(e))
err_msg = ''
for solvent_id, solvent in self._solvents.items():
# Test mandatory parameters
if 'nonbonded_method' not in solvent:
err_msg = 'solvent {} must specify nonbonded_method'.format(solvent_id)
raise YamlParseError(err_msg)
# Test solvent consistency
nonbonded_method = solvent['nonbonded_method']
if nonbonded_method == openmm.app.NoCutoff:
if 'nonbonded_cutoff' in solvent:
err_msg = ('solvent {} specify both nonbonded_method: NoCutoff and '
'and nonbonded_cutoff').format(solvent_id)
else:
if 'implicit_solvent' in solvent:
err_msg = ('solvent {} specify both nonbonded_method: {} '
'and implicit_solvent').format(solvent_id, nonbonded_method)
elif 'clearance' not in solvent:
err_msg = ('solvent {} uses explicit solvent but '
'no clearance specified').format(solvent_id)
# Raise error
if err_msg != '':
raise YamlParseError(err_msg)
def _expand_experiments(self):
"""Generates all possible combinations of experiment.
Each generated experiment is uniquely named.
Returns
-------
output_dir : str
A unique path where to save the experiment output files relative to
the main output directory specified by the user in the options.
combination : dict
The dictionary describing a single experiment.
"""
output_dir = ''
for exp_name, experiment in self._experiments.items():
if len(self._experiments) > 1:
output_dir = exp_name
# Loop over all combinations
for name, combination in experiment.named_combinations(separator='_', max_name_length=40):
yield os.path.join(output_dir, name), combination
def _parse_experiments(self, yaml_content):
"""Perform dry run and validate components and options of every combination.
Receptor, ligand and solvent must be already loaded. If they are not found
an exception is raised. Experiments options are validated as well.
Parameters
----------
yaml_content : dict
The dictionary representing the YAML script loaded by yaml.load()
"""
experiment_template = {'components': {}, 'options': {}}
components_template = {'receptor': 'str', 'ligand': 'str', 'solvent': 'str'}
if 'experiments' not in yaml_content:
self._experiments = {}
return
# Check if there is a sequence of experiments or a single one
if isinstance(yaml_content['experiments'], list):
self._experiments = {exp_name: utils.CombinatorialTree(yaml_content[exp_name])
for exp_name in yaml_content['experiments']}
else:
self._experiments = {'experiments': utils.CombinatorialTree(yaml_content['experiments'])}
# Check validity of every experiment combination
err_msg = ''
for exp_name, exp in self._expand_experiments():
if exp_name == '':
exp_name = 'experiments'
# Check if we can identify components
if 'components' not in exp:
raise YamlParseError('Cannot find components for {}'.format(exp_name))
components = exp['components']
# Validate and check for unknowns
try:
utils.validate_parameters(exp, experiment_template, check_unknown=True)
utils.validate_parameters(components, components_template, check_unknown=True)
self._validate_options(exp.get('options', {}))
except (ValueError, TypeError) as e:
raise YamlParseError(str(e))
# Check that components have been specified
if components['receptor'] not in self._molecules:
err_msg = 'Cannot identify receptor for {}'.format(exp_name)
elif components['ligand'] not in self._molecules:
err_msg = 'Cannot identify ligand for {}'.format(exp_name)
elif components['solvent'] not in self._solvents:
err_msg = 'Cannot identify solvent for {}'.format(exp_name)
if err_msg != '':
raise YamlParseError(err_msg)
def _check_molecule_setup(self, output_dir, molecule_id):
"""Check whether the molecule has been set up already.
The molecule must be set up if it needs to be parametrize by antechamber
(and the gaff.mol2 and frcmod files do not exist) or if the molecule must
be generated by OpenEye. We set up the molecule in the second case even if
the final output files already exist since its initial position may change
from system to system in order to avoid overlapping atoms.
Parameters
----------
output_dir : str
The path to the main output directory specified by the user in the YAML options.
molecule_id : str
The id of the molecule indicated by the user in the YAML file.
Returns
-------
is_setup : bool
True if the molecule has been already set up.
molecule_dir : str
Directory where the files of the molecule are (or should) be stored.
parameters : str
If is_setup is true and the molecule must be parametrized, this is
the path to the parameters file to be used for the molecule. Otherwise
this is an empty string.
filepath : str
If is_setup is true and the molecule must be parametrized, this is
the path to the file describing the molecule. Otherwise this is an
empty string.
"""
filepath = ''
parameters = ''
is_setup = False
raw_molecule_descr = self._raw_yaml['molecules'][molecule_id]
molecule_dir = os.path.join(output_dir, self.SETUP_MOLECULES_DIR, molecule_id)
# Check that this molecule doesn't have be generated by OpenEye
# OpenEye and that the eventual antechamber output already exists
if 'filepath' in raw_molecule_descr:
# If it has to be parametrized, the antechamber files must exist
if raw_molecule_descr['parameters'] == 'antechamber':
parameters = os.path.join(molecule_dir, molecule_id + '.frcmod')
filepath = os.path.join(molecule_dir, molecule_id + '.gaff.mol2')
if os.path.isfile(parameters) and os.path.isfile(filepath):
is_setup = True
else:
is_setup = True
return is_setup, molecule_dir, parameters, filepath
@classmethod
def _check_system_setup(cls, output_dir, receptor_id, ligand_id, solvent_id):
"""Check whether the system has been set up already.
Parameters
----------
output_dir : str
The path to the main output directory specified by the user in the YAML options.
receptor_id : str
The id of the receptor indicated by the user in the YAML file.
ligand_id : str
The id of the ligand indicated by the user in the YAML file.
solvent_id : str
The id of the solvent indicated by the user in the YAML file.
Returns
-------
is_setup : bool
True if the system has been already set up.
system_dir : str
Directory where the files of the system are (or should) be stored.
"""
system_dir = '_'.join((receptor_id, ligand_id, solvent_id))
system_dir = os.path.join(output_dir, cls.SETUP_SYSTEMS_DIR, system_dir)
is_setup = (os.path.exists(os.path.join(system_dir, 'complex.prmtop')) and
os.path.exists(os.path.join(system_dir, 'complex.inpcrd')) and
os.path.exists(os.path.join(system_dir, 'solvent.prmtop')) and
os.path.exists(os.path.join(system_dir, 'solvent.inpcrd')))
return is_setup, system_dir
@classmethod
def _get_experiment_dir(cls, output_dir, experiment_dir):
"""Return directory where the experiment output files should be stored."""
return os.path.join(output_dir, cls.EXPERIMENTS_DIR, experiment_dir)
def _check_setup_resume(self):
"""Perform dry run to check if we are going to overwrite files.
If we find folders that YamlBuilder should create we throw an Exception
unless resume_setup or resume_simulation are found, in which case we
assume we need to use the existing files. We never overwrite files, the
user is responsible to delete them or move them.
It's important to check all possible combinations at the beginning to
avoid interrupting the user simulation after few experiments.
"""
err_msg = ''
for exp_sub_dir, combination in self._expand_experiments():
try:
output_dir = combination['options']['output_dir']
except KeyError:
output_dir = self.options['output_dir']
try:
resume_setup = combination['options']['resume_setup']
except KeyError:
resume_setup = self.options['resume_setup']
try:
resume_sim = combination['options']['resume_simulation']
except KeyError:
resume_sim = self.options['resume_simulation']
# Identify components
components = combination['components']
receptor_id = components['receptor']
ligand_id = components['ligand']
solvent_id = components['solvent']
# Check experiment dir
experiment_dir = self._get_experiment_dir(output_dir, exp_sub_dir)
if os.path.exists(experiment_dir) and not resume_sim:
err_msg = 'experiment directory {}'.format(experiment_dir)
solving_option = 'resume_simulation'
else:
# Check system and molecule setup dirs
is_sys_setup, system_dir = self._check_system_setup(output_dir, receptor_id,
ligand_id, solvent_id)
if is_sys_setup and not resume_setup:
err_msg = 'system setup directory {}'.format(system_dir)
else:
for molecule_id in [receptor_id, ligand_id]:
is_setup, mol_dir, param = self._check_molecule_setup(output_dir, molecule_id)[:3]
if is_setup and param != '' and not resume_setup:
err_msg = 'molecule setup directory {}'.format(mol_dir)
break
if err_msg != '':
solving_option = 'resume_setup'
# Check for errors
if err_msg != '':
err_msg += (' already exists; cowardly refusing to proceed. Move/delete '
'directory or set {} options').format(solving_option)
raise YamlParseError(err_msg)
def _generate_molecule(self, molecule_id):
"""Generate molecule using the OpenEye toolkit from name or smiles.
The molecules is charged with OpenEye's recommended AM1BCC charge
selection scheme.
Parameters
----------
molecule_id : str
The id of the molecule as given in the YAML script
Returns
-------
molecule : OEMol
The generated molecule.
"""
mol_descr = self._molecules[molecule_id]
try:
if 'name' in mol_descr:
molecule = openmoltools.openeye.iupac_to_oemol(mol_descr['name'])
elif 'smiles' in mol_descr:
molecule = openmoltools.openeye.smiles_to_oemol(mol_descr['smiles'])
molecule = openmoltools.openeye.get_charges(molecule, keep_confs=1)
except ImportError as e:
error_msg = ('requested molecule generation from name or smiles but '
'could not find OpenEye toolkit: ' + str(e))
raise YamlParseError(error_msg)
return molecule
def _setup_molecules(self, output_dir, *args):
"""Set up the files needed to generate the system for all the molecules.
If OpenEye tools are installed, this generate the molecules when the source is
not a file. If two (or more) molecules generated by OpenEye have overlapping
atoms, the molecules are randomly shifted and rotated until the clash is resolved.
With the OpenEye toolkit installed, we also perform a sanity check to verify that
the molecules from files do not have overlapping atoms. An Exception is raised if
this is not the case.
If the Schrodinger's suite is install, this can enumerate tautomeric and protonation
states with epik when requested.
This also parametrize the molecule with antechamber when requested.
Parameters
----------
output_dir : str
The path to the main output directory specified by the user in the YAML options
Other parameters
----------------
args
All the molecules ids that compose the system. These molecules are the only
ones considered when trying to resolve the overlapping atoms.
"""
# Determine which molecules should have fixed positions
# At the end of parametrization we update the 'filepath' key also for OpenEye-generated
# molecules so we need to check that the molecule is not in self._oe_molecules as well
file_mol_ids = {mol_id for mol_id in args if 'filepath' in self._molecules[mol_id] and
mol_id not in self._oe_molecules}
# Generate missing molecules with OpenEye
self._oe_molecules.update({mol_id: self._generate_molecule(mol_id) for mol_id in args
if mol_id not in file_mol_ids and mol_id not in self._oe_molecules})
# Check that non-generated molecules don't have overlapping atoms
# TODO this check should be available even without OpenEye
# TODO also there should be an option allowing to solve the overlap in this case too?
fixed_pos = {} # positions of molecules from files of THIS setup
if utils.is_openeye_installed():
# We need positions as a list so we separate the ids and positions in two lists
mol_id_list = list(file_mol_ids)
positions = [0 for _ in mol_id_list]
for i, mol_id in enumerate(mol_id_list):
try:
positions[i] = self._fixed_pos_cache[mol_id]
except KeyError:
positions[i] = utils.get_oe_mol_positions(utils.read_oe_molecule(
self._molecules[mol_id]['filepath']))
# Verify that distances between any pair of fixed molecules is big enough
for i in range(len(positions) - 1):
posi = positions[i]
if compute_min_dist(posi, *positions[i+1:]) < 0.1:
raise YamlParseError('The given molecules have overlapping atoms!')
# Convert positions list to dictionary, this is needed to solve overlaps
fixed_pos = {mol_id_list[i]: positions[i] for i in range(len(mol_id_list))}
# Cache positions for future molecule setups
self._fixed_pos_cache.update(fixed_pos)
# Find and solve overlapping atoms in OpenEye generated molecules
for mol_id in args:
# Retrive OpenEye-generated molecule
try:
molecule = self._oe_molecules[mol_id]
except KeyError:
continue
molecule_pos = utils.get_oe_mol_positions(molecule)
# Remove overlap and save new positions
if fixed_pos:
molecule_pos = remove_overlap(molecule_pos, *(fixed_pos.values()),
min_distance=1.0, sigma=1.0)
utils.set_oe_mol_positions(molecule, molecule_pos)
# Update fixed positions for next round
fixed_pos[mol_id] = molecule_pos
# Save parametrized molecules
for mol_id in args:
mol_descr = self._molecules[mol_id]
is_setup, mol_dir, parameters, filepath = self._check_molecule_setup(output_dir, mol_id)
# Have we already processed this molecule? Do we have to do it at all?
# We don't want to create the output folder if we don't need to
if is_setup:
# Be sure that filepath and parameters point to the correct file
# if this molecule must be parametrize with antechamber
if parameters != '':
mol_descr['parameters'] = parameters
mol_descr['filepath'] = filepath
continue
# Create output directory if it doesn't exist
if not os.path.exists(mol_dir):
os.makedirs(mol_dir)
# Write OpenEye generated molecules in mol2 files
if mol_id in self._oe_molecules:
# We update the 'filepath' key in the molecule description
mol_descr['filepath'] = os.path.join(mol_dir, mol_id + '.mol2')
# We set the residue name as the first three uppercase letters
residue_name = re.sub('[^A-Za-z]+', '', mol_id.upper())
openmoltools.openeye.molecule_to_mol2(molecule, mol_descr['filepath'],
residue_name=residue_name)
# Enumerate protonation states with epik
if 'epik' in mol_descr:
epik_idx = mol_descr['epik']
epik_output_file = os.path.join(mol_dir, mol_id + '-epik.mol2')
utils.run_epik(mol_descr['filepath'], epik_output_file, extract_range=epik_idx)
mol_descr['filepath'] = epik_output_file
# Parametrize the molecule with antechamber
if mol_descr['parameters'] == 'antechamber':
# Generate parameters
input_mol_path = os.path.abspath(mol_descr['filepath'])
with utils.temporary_cd(mol_dir):
openmoltools.amber.run_antechamber(mol_id, input_mol_path)
# Save new parameters paths, this way if we try to
# setup the molecule again it will just be skipped
mol_descr['filepath'] = os.path.join(mol_dir, mol_id + '.gaff.mol2')
mol_descr['parameters'] = os.path.join(mol_dir, mol_id + '.frcmod')
def _setup_system(self, output_dir, components):
"""Create the prmtop and inpcrd files from the given components.
This calls _setup_molecules() so there's no need to call it ahead. The
system files are generated with tleap. If no molecule specify a general
force field, leaprc.ff14SB is loaded.
Parameters
----------
output_dir : str
The path to the main output directory specified by the user in the YAML options
components : dict
A dictionary containing the keys 'receptor', 'ligand' and 'solvent' with the ids
of molecules and solvents
Returns
-------
system_dir : str
The path to the directory containing the prmtop and inpcrd files
"""
# Identify components
receptor_id = components['receptor']
ligand_id = components['ligand']
solvent_id = components['solvent']
receptor = self._molecules[receptor_id]
ligand = self._molecules[ligand_id]
solvent = self._solvents[solvent_id]
# Check if system has been already processed
is_setup, system_dir = self._check_system_setup(output_dir, receptor_id,
ligand_id, solvent_id)
if is_setup:
return system_dir
# We still need to check if system_dir exists because the set up may
# have been interrupted
if not os.path.exists(system_dir):
os.makedirs(system_dir)
# Setup molecules
self._setup_molecules(output_dir, receptor_id, ligand_id)
# Create tleap script
tleap = utils.TLeap()
tleap.new_section('Load GAFF parameters')
tleap.load_parameters('leaprc.gaff')
# Check that AMBER force field is specified
if not ('leaprc.' in receptor['parameters'] or 'leaprc.' in ligand['parameters']):
tleap.load_parameters('leaprc.ff14SB')
# Load receptor and ligand
for group_name in ['receptor', 'ligand']:
group = self._molecules[components[group_name]]
tleap.new_section('Load ' + group_name)
tleap.load_parameters(group['parameters'])
tleap.load_group(name=group_name, file_path=group['filepath'])
# Create complex
tleap.new_section('Create complex')
tleap.combine('complex', 'receptor', 'ligand')
# Configure solvent
if solvent['nonbonded_method'] == openmm.app.NoCutoff:
if 'implicit_solvent' in solvent: # GBSA implicit solvent
tleap.new_section('Set GB radii to recommended values for OBC')
tleap.add_commands('set default PBRadii mbondi2')
else: # explicit solvent
tleap.new_section('Solvate systems')
clearance = float(solvent['clearance'].value_in_unit(unit.angstroms))
tleap.solvate(group='complex', water_model='TIP3PBOX', clearance=clearance)
tleap.solvate(group='ligand', water_model='TIP3PBOX', clearance=clearance)
# Check charge
tleap.new_section('Check charge')
tleap.add_commands('check complex', 'charge complex')
# Save prmtop and inpcrd files
tleap.new_section('Save prmtop and inpcrd files')
tleap.save_group('complex', os.path.join(system_dir, 'complex.prmtop'))
tleap.save_group('complex', os.path.join(system_dir, 'complex.pdb'))
tleap.save_group('ligand', os.path.join(system_dir, 'solvent.prmtop'))
tleap.save_group('ligand', os.path.join(system_dir, 'solvent.pdb'))
# Save tleap script for reference
tleap.export_script(os.path.join(system_dir, 'leap.in'))
# Run tleap!
tleap.run()
return system_dir
def _generate_yaml(self, experiment, file_path):
"""Generate the minimum YAML file needed to reproduce the experiment.
Parameters
----------
experiment : dict
The dictionary describing a single experiment.
file_path : str
The path to the file to save.
"""
components = set(experiment['components'].values())
# Molecules section data
mol_section = {mol_id: molecule for mol_id, molecule in self._raw_yaml['molecules'].items()
if mol_id in components}
# Solvents section data
sol_section = {solvent_id: solvent for solvent_id, solvent in self._raw_yaml['solvents'].items()
if solvent_id in components}
# We pop the options section in experiment and merge it to the general one
exp_section = experiment.copy()
opt_section = self._raw_yaml['options'].copy()
opt_section.update(exp_section.pop('options', {}))
# Create YAML with the sections in order
yaml_content = yaml.dump({'options': opt_section}, default_flow_style=False, line_break='\n', explicit_start=True)
yaml_content += yaml.dump({'molecules': mol_section}, default_flow_style=False, line_break='\n')
yaml_content += yaml.dump({'solvents': sol_section}, default_flow_style=False, line_break='\n')
yaml_content += yaml.dump({'experiments': exp_section}, default_flow_style=False, line_break='\n')
# Export YAML into a file
with open(file_path, 'w') as f:
f.write(yaml_content)
def _run_experiment(self, experiment, experiment_dir):
"""Prepare and run a single experiment.
Parameters
----------
experiment : dict
A dictionary describing a single experiment
experiment_dir : str
The directory where to store the output files relative to the main
output directory as specified by the user in the YAML script
"""
components = experiment['components']
exp_name = 'experiments' if experiment_dir == '' else os.path.basename(experiment_dir)
# Get and validate experiment sub-options
exp_opts = self.options.copy()
exp_opts.update(self._validate_options(experiment.get('options', {})))
yank_opts = self._isolate_yank_options(exp_opts)
# Configure MPI, if requested
if exp_opts['mpi']:
from mpi4py import MPI
MPI.COMM_WORLD.barrier()
mpicomm = MPI.COMM_WORLD
else:
mpicomm = None
# TODO configure platform and precision when they are fixed in Yank
# Create directory and configure logger for this experiment
results_dir = self._get_experiment_dir(exp_opts['output_dir'], experiment_dir)
if not os.path.isdir(results_dir):
os.makedirs(results_dir)
resume = False
else:
resume = True
utils.config_root_logger(exp_opts['verbose'], os.path.join(results_dir, exp_name + '.log'))
# Initialize simulation
yank = Yank(results_dir, mpicomm=mpicomm, **yank_opts)
if resume:
yank.resume()
else:
# Export YAML file for reproducibility
self._generate_yaml(experiment, os.path.join(results_dir, exp_name + '.yaml'))
# Determine system files path
system_dir = self._setup_system(exp_opts['output_dir'], components)
# Get ligand resname for alchemical atom selection
ligand_dsl = utils.get_mol2_resname(self._molecules[components['ligand']]['filepath'])
if ligand_dsl is None:
ligand_dsl = 'MOL'
ligand_dsl = 'resname ' + ligand_dsl
# System configuration
create_system_filter = set(('nonbonded_method', 'nonbonded_cutoff', 'implicit_solvent',
'constraints', 'hydrogen_mass'))
solvent = self._solvents[components['solvent']]
system_pars = {opt: solvent[opt] for opt in create_system_filter if opt in solvent}
system_pars.update({opt: exp_opts[opt] for opt in create_system_filter
if opt in exp_opts})
# Convert underscore_parameters to camelCase for OpenMM API
system_pars = {utils.underscore_to_camelcase(opt): value
for opt, value in system_pars.items()}
# Prepare system
phases, systems, positions, atom_indices = pipeline.prepare_amber(system_dir, ligand_dsl, system_pars)
# Create thermodynamic state
thermodynamic_state = ThermodynamicState(temperature=exp_opts['temperature'],
pressure=exp_opts['pressure'])
# Create new simulation
yank.create(phases, systems, positions, atom_indices, thermodynamic_state)
# Run the simulation!
yank.run()
if __name__ == "__main__":
import doctest
doctest.testmod()
Fix YamlBuilder doctest
#!/usr/bin/env python
#=============================================================================================
# MODULE DOCSTRING
#=============================================================================================
"""
Tools to build Yank experiments from a YAML configuration file.
"""
#=============================================================================================
# GLOBAL IMPORTS
#=============================================================================================
import os
import re
import copy
import yaml
import logging
logger = logging.getLogger(__name__)
import numpy as np
import openmoltools
from simtk import unit, openmm
import utils
import pipeline
from yank import Yank
from repex import ReplicaExchange, ThermodynamicState
from sampling import ModifiedHamiltonianExchange
#=============================================================================================
# UTILITY FUNCTIONS
#=============================================================================================
def compute_min_dist(mol_positions, *args):
"""Compute the minimum distance between a molecule and a set of other molecules.
All the positions must be expressed in the same unit of measure.
Parameters
----------
mol_positions : numpy.ndarray
An Nx3 array where, N is the number of atoms, containing the positions of
the atoms of the molecule for which we want to compute the minimum distance
from the others
Other parameters
----------------
args
A series of numpy.ndarrays containing the positions of the atoms of the other
molecules
Returns
-------
min_dist : float
The minimum distance between mol_positions and the other set of positions
"""
for pos1 in args:
# Compute squared distances
# Each row is an array of distances from a mol2 atom to all mol1 atoms
distances2 = np.array([((pos1 - pos2)**2).sum(1) for pos2 in mol_positions])
# Find closest atoms and their distance
min_idx = np.unravel_index(distances2.argmin(), distances2.shape)
try:
min_dist = min(min_dist, np.sqrt(distances2[min_idx]))
except UnboundLocalError:
min_dist = np.sqrt(distances2[min_idx])
return min_dist
def remove_overlap(mol_positions, *args, **kwargs):
"""Remove any eventual overlap between a molecule and a set of others.
The method both randomly shifts and rotates the molecule (when overlapping atoms
are detected) until it does not clash with any other given molecule anymore. All
the others are kept fixed.
All the positions must be expressed in the same unit of measure.
Parameters
----------
mol_positions : numpy.ndarray
An Nx3 array where, N is the number of atoms, containing the positions of
the atoms of the molecule that we want to not clash with the others.
min_distance : float
The minimum distance accepted to consider the molecule not clashing with
the others. Must be in the same unit of measure of the positions.
sigma : float
The maximum displacement for a single step. Must be in the same unit of
measure of the positions.
Other parameters
----------------
args
A series of numpy.ndarrays containing the positions of the atoms of the
molecules that are kept fixed.
Returns
-------
x : numpy.ndarray
Positions of the atoms of the given molecules that do not clash.
"""
x = np.copy(mol_positions)
sigma = kwargs.get('sigma', 1.0)
min_distance = kwargs.get('min_distance', 1.0)
# Try until we have a non-overlapping conformation w.r.t. all fixed molecules
while compute_min_dist(x, *args) <= min_distance:
# Compute center of geometry
x0 = x.mean(0)
# Randomize orientation of ligand.
q = ModifiedHamiltonianExchange._generate_uniform_quaternion()
Rq = ModifiedHamiltonianExchange._rotation_matrix_from_quaternion(q)
x = ((Rq * np.matrix(x - x0).T).T + x0).A
# Choose a random displacement vector and translate
x += sigma * np.random.randn(3)
return x
def to_openmm_app(str):
"""Converter function to be used with validate_parameters()."""
return getattr(openmm.app, str)
#=============================================================================================
# BUILDER CLASS
#=============================================================================================
class YamlParseError(Exception):
"""Represent errors occurring during parsing of Yank YAML file."""
def __init__(self, message):
super(YamlParseError, self).__init__(message)
logger.error(message)
class YamlBuilder:
"""Parse YAML configuration file and build the experiment.
The relative paths indicated in the script are assumed to be relative to
the script directory. However, if YamlBuilder is initiated with a string
rather than a file path, the paths will be relative to the user's working
directory.
The class firstly perform a dry run to check if this is going to overwrite
some files and raises an exception if it finds already existing output folders
unless the options resume_setup or resume_simulation are True.
Properties
----------
yank_options : dict
The options specified in the parsed YAML file that will be passed to Yank.
These are not the full range of options specified in the script since some
of them are used to configure YamlBuilder and not the Yank object.
Examples
--------
>>> import textwrap
>>> setup_dir = utils.get_data_filename(os.path.join('..', 'examples',
... 'p-xylene-implicit', 'setup'))
>>> pxylene_path = os.path.join(setup_dir, 'ligand.tripos.mol2')
>>> lysozyme_path = os.path.join(setup_dir, 'receptor.pdbfixer.pdb')
>>> with utils.temporary_directory() as tmp_dir:
... yaml_content = '''
... ---
... options:
... number_of_iterations: 1
... output_dir: {}
... molecules:
... T4lysozyme:
... filepath: {}
... parameters: oldff/leaprc.ff99SBildn
... p-xylene:
... filepath: {}
... parameters: antechamber
... solvents:
... vacuum:
... nonbonded_method: NoCutoff
... experiments:
... components:
... receptor: T4lysozyme
... ligand: p-xylene
... solvent: vacuum
... '''.format(tmp_dir, lysozyme_path, pxylene_path)
>>> yaml_builder = YamlBuilder(textwrap.dedent(yaml_content))
>>> yaml_builder.build_experiment()
"""
SETUP_DIR = 'setup'
SETUP_SYSTEMS_DIR = os.path.join(SETUP_DIR, 'systems')
SETUP_MOLECULES_DIR = os.path.join(SETUP_DIR, 'molecules')
EXPERIMENTS_DIR = 'experiments'
DEFAULT_OPTIONS = {
'verbose': False,
'mpi': False,
'resume_setup': False,
'resume_simulation': False,
'output_dir': 'output/',
'temperature': 298 * unit.kelvin,
'pressure': 1 * unit.atmosphere,
'constraints': openmm.app.HBonds,
'hydrogen_mass': 1 * unit.amu
}
@property
def yank_options(self):
return self._isolate_yank_options(self.options)
def __init__(self, yaml_source):
"""Parse the given YAML configuration file.
This does not build the actual experiment but simply checks that the syntax
is correct and loads the configuration into memory.
Parameters
----------
yaml_source : str
A path to the YAML script or the YAML content.
"""
self._oe_molecules = {} # molecules generated by OpenEye
self._fixed_pos_cache = {} # positions of molecules given as files
# TODO check version of yank-yaml language
# TODO what if there are multiple streams in the YAML file?
# Load YAML script and decide working directory for relative paths
try:
with open(yaml_source, 'r') as f:
yaml_content = yaml.load(f)
self._script_dir = os.path.dirname(yaml_source)
except IOError:
yaml_content = yaml.load(yaml_source)
self._script_dir = os.getcwd()
if yaml_content is None:
raise YamlParseError('The YAML file is empty!')
# Save raw YAML content that will be needed when generating the YAML files
self._raw_yaml = copy.deepcopy({key: yaml_content.get(key, {})
for key in ['options', 'molecules', 'solvents']})
# Parse each section
self._parse_options(yaml_content)
self._parse_molecules(yaml_content)
self._parse_solvents(yaml_content)
self._parse_experiments(yaml_content)
def build_experiment(self):
"""Set up and run all the Yank experiments."""
# Throw exception if there are no experiments
if len(self._experiments) == 0:
raise YamlParseError('No experiments specified!')
# Run all experiments with paths relative to the script directory
with utils.temporary_cd(self._script_dir):
self._check_setup_resume()
for output_dir, combination in self._expand_experiments():
self._run_experiment(combination, output_dir)
def _validate_options(self, options):
"""Return a dictionary with YamlBuilder and Yank options validated."""
template_options = self.DEFAULT_OPTIONS.copy()
template_options.update(Yank.default_parameters)
template_options.update(ReplicaExchange.default_parameters)
openmm_app_type = {'constraints': to_openmm_app}
try:
valid = utils.validate_parameters(options, template_options, check_unknown=True,
process_units_str=True, float_to_int=True,
special_conversions=openmm_app_type)
except (TypeError, ValueError) as e:
raise YamlParseError(str(e))
return valid
def _isolate_yank_options(self, options):
"""Return the options that do not belong to YamlBuilder."""
return {opt: val for opt, val in options.items()
if opt not in self.DEFAULT_OPTIONS}
def _parse_options(self, yaml_content):
"""Validate and store options in the script.
Parameters
----------
yaml_content : dict
The dictionary representing the YAML script loaded by yaml.load()
"""
# Merge options and metadata and validate
temp_options = yaml_content.get('options', {})
temp_options.update(yaml_content.get('metadata', {}))
# Validate options and fill in default values
self.options = self.DEFAULT_OPTIONS.copy()
self.options.update(self._validate_options(temp_options))
def _parse_molecules(self, yaml_content):
"""Load molecules information and check that their syntax is correct.
One and only one source must be specified (e.g. filepath, name). Also
the parameters must be specified, and the extension of filepath must
match one of the supported file formats.
Parameters
----------
yaml_content : dict
The dictionary representing the YAML script loaded by yaml.load()
"""
file_formats = set(['mol2', 'pdb'])
sources = set(['filepath', 'name', 'smiles'])
template_mol = {'filepath': 'str', 'name': 'str', 'smiles': 'str',
'parameters': 'str', 'epik': 0}
self._molecules = yaml_content.get('molecules', {})
# First validate and convert
for molecule_id, molecule in self._molecules.items():
try:
self._molecules[molecule_id] = utils.validate_parameters(molecule, template_mol,
check_unknown=True)
except (TypeError, ValueError) as e:
raise YamlParseError(str(e))
err_msg = ''
for molecule_id, molecule in self._molecules.items():
fields = set(molecule.keys())
# Check that only one source is specified
specified_sources = sources & fields
if not specified_sources or len(specified_sources) > 1:
err_msg = ('need only one between {} for molecule {}').format(
', '.join(list(sources)), molecule_id)
# Check supported file formats
elif 'filepath' in specified_sources:
extension = os.path.splitext(molecule['filepath'])[1][1:] # remove '.'
if extension not in file_formats:
err_msg = 'molecule {}, only {} files supported'.format(
molecule_id, ', '.join(file_formats))
# Check that parameters are specified
if 'parameters' not in fields:
err_msg = 'no parameters specified for molecule {}'.format(molecule_id)
if err_msg != '':
raise YamlParseError(err_msg)
def _parse_solvents(self, yaml_content):
"""Load solvents information and check that their syntax is correct.
The option nonbonded_method must be specified. All quantities are converted to
simtk.app.Quantity objects or openmm.app.TYPE (e.g. app.PME, app.OBC2). This
also perform some consistency checks to verify that the user did not mix
implicit and explicit solvent parameters.
Parameters
----------
yaml_content : dict
The dictionary representing the YAML script loaded by yaml.load()
"""
template_parameters = {'nonbonded_method': openmm.app.PME, 'nonbonded_cutoff': 1 * unit.nanometer,
'implicit_solvent': openmm.app.OBC2, 'clearance': 10.0 * unit.angstroms}
openmm_app_type = ('nonbonded_method', 'implicit_solvent')
openmm_app_type = {option: to_openmm_app for option in openmm_app_type}
self._solvents = yaml_content.get('solvents', {})
# First validate and convert
for solvent_id, solvent in self._solvents.items():
try:
self._solvents[solvent_id] = utils.validate_parameters(solvent, template_parameters,
check_unknown=True, process_units_str=True,
special_conversions=openmm_app_type)
except (TypeError, ValueError, AttributeError) as e:
raise YamlParseError(str(e))
err_msg = ''
for solvent_id, solvent in self._solvents.items():
# Test mandatory parameters
if 'nonbonded_method' not in solvent:
err_msg = 'solvent {} must specify nonbonded_method'.format(solvent_id)
raise YamlParseError(err_msg)
# Test solvent consistency
nonbonded_method = solvent['nonbonded_method']
if nonbonded_method == openmm.app.NoCutoff:
if 'nonbonded_cutoff' in solvent:
err_msg = ('solvent {} specify both nonbonded_method: NoCutoff and '
'and nonbonded_cutoff').format(solvent_id)
else:
if 'implicit_solvent' in solvent:
err_msg = ('solvent {} specify both nonbonded_method: {} '
'and implicit_solvent').format(solvent_id, nonbonded_method)
elif 'clearance' not in solvent:
err_msg = ('solvent {} uses explicit solvent but '
'no clearance specified').format(solvent_id)
# Raise error
if err_msg != '':
raise YamlParseError(err_msg)
def _expand_experiments(self):
"""Generates all possible combinations of experiment.
Each generated experiment is uniquely named.
Returns
-------
output_dir : str
A unique path where to save the experiment output files relative to
the main output directory specified by the user in the options.
combination : dict
The dictionary describing a single experiment.
"""
output_dir = ''
for exp_name, experiment in self._experiments.items():
if len(self._experiments) > 1:
output_dir = exp_name
# Loop over all combinations
for name, combination in experiment.named_combinations(separator='_', max_name_length=40):
yield os.path.join(output_dir, name), combination
def _parse_experiments(self, yaml_content):
"""Perform dry run and validate components and options of every combination.
Receptor, ligand and solvent must be already loaded. If they are not found
an exception is raised. Experiments options are validated as well.
Parameters
----------
yaml_content : dict
The dictionary representing the YAML script loaded by yaml.load()
"""
experiment_template = {'components': {}, 'options': {}}
components_template = {'receptor': 'str', 'ligand': 'str', 'solvent': 'str'}
if 'experiments' not in yaml_content:
self._experiments = {}
return
# Check if there is a sequence of experiments or a single one
if isinstance(yaml_content['experiments'], list):
self._experiments = {exp_name: utils.CombinatorialTree(yaml_content[exp_name])
for exp_name in yaml_content['experiments']}
else:
self._experiments = {'experiments': utils.CombinatorialTree(yaml_content['experiments'])}
# Check validity of every experiment combination
err_msg = ''
for exp_name, exp in self._expand_experiments():
if exp_name == '':
exp_name = 'experiments'
# Check if we can identify components
if 'components' not in exp:
raise YamlParseError('Cannot find components for {}'.format(exp_name))
components = exp['components']
# Validate and check for unknowns
try:
utils.validate_parameters(exp, experiment_template, check_unknown=True)
utils.validate_parameters(components, components_template, check_unknown=True)
self._validate_options(exp.get('options', {}))
except (ValueError, TypeError) as e:
raise YamlParseError(str(e))
# Check that components have been specified
if components['receptor'] not in self._molecules:
err_msg = 'Cannot identify receptor for {}'.format(exp_name)
elif components['ligand'] not in self._molecules:
err_msg = 'Cannot identify ligand for {}'.format(exp_name)
elif components['solvent'] not in self._solvents:
err_msg = 'Cannot identify solvent for {}'.format(exp_name)
if err_msg != '':
raise YamlParseError(err_msg)
def _check_molecule_setup(self, output_dir, molecule_id):
"""Check whether the molecule has been set up already.
The molecule must be set up if it needs to be parametrize by antechamber
(and the gaff.mol2 and frcmod files do not exist) or if the molecule must
be generated by OpenEye. We set up the molecule in the second case even if
the final output files already exist since its initial position may change
from system to system in order to avoid overlapping atoms.
Parameters
----------
output_dir : str
The path to the main output directory specified by the user in the YAML options.
molecule_id : str
The id of the molecule indicated by the user in the YAML file.
Returns
-------
is_setup : bool
True if the molecule has been already set up.
molecule_dir : str
Directory where the files of the molecule are (or should) be stored.
parameters : str
If is_setup is true and the molecule must be parametrized, this is
the path to the parameters file to be used for the molecule. Otherwise
this is an empty string.
filepath : str
If is_setup is true and the molecule must be parametrized, this is
the path to the file describing the molecule. Otherwise this is an
empty string.
"""
filepath = ''
parameters = ''
is_setup = False
raw_molecule_descr = self._raw_yaml['molecules'][molecule_id]
molecule_dir = os.path.join(output_dir, self.SETUP_MOLECULES_DIR, molecule_id)
# Check that this molecule doesn't have be generated by OpenEye
# OpenEye and that the eventual antechamber output already exists
if 'filepath' in raw_molecule_descr:
# If it has to be parametrized, the antechamber files must exist
if raw_molecule_descr['parameters'] == 'antechamber':
parameters = os.path.join(molecule_dir, molecule_id + '.frcmod')
filepath = os.path.join(molecule_dir, molecule_id + '.gaff.mol2')
if os.path.isfile(parameters) and os.path.isfile(filepath):
is_setup = True
else:
is_setup = True
return is_setup, molecule_dir, parameters, filepath
@classmethod
def _check_system_setup(cls, output_dir, receptor_id, ligand_id, solvent_id):
"""Check whether the system has been set up already.
Parameters
----------
output_dir : str
The path to the main output directory specified by the user in the YAML options.
receptor_id : str
The id of the receptor indicated by the user in the YAML file.
ligand_id : str
The id of the ligand indicated by the user in the YAML file.
solvent_id : str
The id of the solvent indicated by the user in the YAML file.
Returns
-------
is_setup : bool
True if the system has been already set up.
system_dir : str
Directory where the files of the system are (or should) be stored.
"""
system_dir = '_'.join((receptor_id, ligand_id, solvent_id))
system_dir = os.path.join(output_dir, cls.SETUP_SYSTEMS_DIR, system_dir)
is_setup = (os.path.exists(os.path.join(system_dir, 'complex.prmtop')) and
os.path.exists(os.path.join(system_dir, 'complex.inpcrd')) and
os.path.exists(os.path.join(system_dir, 'solvent.prmtop')) and
os.path.exists(os.path.join(system_dir, 'solvent.inpcrd')))
return is_setup, system_dir
@classmethod
def _get_experiment_dir(cls, output_dir, experiment_dir):
"""Return directory where the experiment output files should be stored."""
return os.path.join(output_dir, cls.EXPERIMENTS_DIR, experiment_dir)
def _check_setup_resume(self):
"""Perform dry run to check if we are going to overwrite files.
If we find folders that YamlBuilder should create we throw an Exception
unless resume_setup or resume_simulation are found, in which case we
assume we need to use the existing files. We never overwrite files, the
user is responsible to delete them or move them.
It's important to check all possible combinations at the beginning to
avoid interrupting the user simulation after few experiments.
"""
err_msg = ''
for exp_sub_dir, combination in self._expand_experiments():
try:
output_dir = combination['options']['output_dir']
except KeyError:
output_dir = self.options['output_dir']
try:
resume_setup = combination['options']['resume_setup']
except KeyError:
resume_setup = self.options['resume_setup']
try:
resume_sim = combination['options']['resume_simulation']
except KeyError:
resume_sim = self.options['resume_simulation']
# Identify components
components = combination['components']
receptor_id = components['receptor']
ligand_id = components['ligand']
solvent_id = components['solvent']
# Check experiment dir
experiment_dir = self._get_experiment_dir(output_dir, exp_sub_dir)
if os.path.exists(experiment_dir) and not resume_sim:
err_msg = 'experiment directory {}'.format(experiment_dir)
solving_option = 'resume_simulation'
else:
# Check system and molecule setup dirs
is_sys_setup, system_dir = self._check_system_setup(output_dir, receptor_id,
ligand_id, solvent_id)
if is_sys_setup and not resume_setup:
err_msg = 'system setup directory {}'.format(system_dir)
else:
for molecule_id in [receptor_id, ligand_id]:
is_setup, mol_dir, param = self._check_molecule_setup(output_dir, molecule_id)[:3]
if is_setup and param != '' and not resume_setup:
err_msg = 'molecule setup directory {}'.format(mol_dir)
break
if err_msg != '':
solving_option = 'resume_setup'
# Check for errors
if err_msg != '':
err_msg += (' already exists; cowardly refusing to proceed. Move/delete '
'directory or set {} options').format(solving_option)
raise YamlParseError(err_msg)
def _generate_molecule(self, molecule_id):
"""Generate molecule using the OpenEye toolkit from name or smiles.
The molecules is charged with OpenEye's recommended AM1BCC charge
selection scheme.
Parameters
----------
molecule_id : str
The id of the molecule as given in the YAML script
Returns
-------
molecule : OEMol
The generated molecule.
"""
mol_descr = self._molecules[molecule_id]
try:
if 'name' in mol_descr:
molecule = openmoltools.openeye.iupac_to_oemol(mol_descr['name'])
elif 'smiles' in mol_descr:
molecule = openmoltools.openeye.smiles_to_oemol(mol_descr['smiles'])
molecule = openmoltools.openeye.get_charges(molecule, keep_confs=1)
except ImportError as e:
error_msg = ('requested molecule generation from name or smiles but '
'could not find OpenEye toolkit: ' + str(e))
raise YamlParseError(error_msg)
return molecule
def _setup_molecules(self, output_dir, *args):
"""Set up the files needed to generate the system for all the molecules.
If OpenEye tools are installed, this generate the molecules when the source is
not a file. If two (or more) molecules generated by OpenEye have overlapping
atoms, the molecules are randomly shifted and rotated until the clash is resolved.
With the OpenEye toolkit installed, we also perform a sanity check to verify that
the molecules from files do not have overlapping atoms. An Exception is raised if
this is not the case.
If the Schrodinger's suite is install, this can enumerate tautomeric and protonation
states with epik when requested.
This also parametrize the molecule with antechamber when requested.
Parameters
----------
output_dir : str
The path to the main output directory specified by the user in the YAML options
Other parameters
----------------
args
All the molecules ids that compose the system. These molecules are the only
ones considered when trying to resolve the overlapping atoms.
"""
# Determine which molecules should have fixed positions
# At the end of parametrization we update the 'filepath' key also for OpenEye-generated
# molecules so we need to check that the molecule is not in self._oe_molecules as well
file_mol_ids = {mol_id for mol_id in args if 'filepath' in self._molecules[mol_id] and
mol_id not in self._oe_molecules}
# Generate missing molecules with OpenEye
self._oe_molecules.update({mol_id: self._generate_molecule(mol_id) for mol_id in args
if mol_id not in file_mol_ids and mol_id not in self._oe_molecules})
# Check that non-generated molecules don't have overlapping atoms
# TODO this check should be available even without OpenEye
# TODO also there should be an option allowing to solve the overlap in this case too?
fixed_pos = {} # positions of molecules from files of THIS setup
if utils.is_openeye_installed():
# We need positions as a list so we separate the ids and positions in two lists
mol_id_list = list(file_mol_ids)
positions = [0 for _ in mol_id_list]
for i, mol_id in enumerate(mol_id_list):
try:
positions[i] = self._fixed_pos_cache[mol_id]
except KeyError:
positions[i] = utils.get_oe_mol_positions(utils.read_oe_molecule(
self._molecules[mol_id]['filepath']))
# Verify that distances between any pair of fixed molecules is big enough
for i in range(len(positions) - 1):
posi = positions[i]
if compute_min_dist(posi, *positions[i+1:]) < 0.1:
raise YamlParseError('The given molecules have overlapping atoms!')
# Convert positions list to dictionary, this is needed to solve overlaps
fixed_pos = {mol_id_list[i]: positions[i] for i in range(len(mol_id_list))}
# Cache positions for future molecule setups
self._fixed_pos_cache.update(fixed_pos)
# Find and solve overlapping atoms in OpenEye generated molecules
for mol_id in args:
# Retrive OpenEye-generated molecule
try:
molecule = self._oe_molecules[mol_id]
except KeyError:
continue
molecule_pos = utils.get_oe_mol_positions(molecule)
# Remove overlap and save new positions
if fixed_pos:
molecule_pos = remove_overlap(molecule_pos, *(fixed_pos.values()),
min_distance=1.0, sigma=1.0)
utils.set_oe_mol_positions(molecule, molecule_pos)
# Update fixed positions for next round
fixed_pos[mol_id] = molecule_pos
# Save parametrized molecules
for mol_id in args:
mol_descr = self._molecules[mol_id]
is_setup, mol_dir, parameters, filepath = self._check_molecule_setup(output_dir, mol_id)
# Have we already processed this molecule? Do we have to do it at all?
# We don't want to create the output folder if we don't need to
if is_setup:
# Be sure that filepath and parameters point to the correct file
# if this molecule must be parametrize with antechamber
if parameters != '':
mol_descr['parameters'] = parameters
mol_descr['filepath'] = filepath
continue
# Create output directory if it doesn't exist
if not os.path.exists(mol_dir):
os.makedirs(mol_dir)
# Write OpenEye generated molecules in mol2 files
if mol_id in self._oe_molecules:
# We update the 'filepath' key in the molecule description
mol_descr['filepath'] = os.path.join(mol_dir, mol_id + '.mol2')
# We set the residue name as the first three uppercase letters
residue_name = re.sub('[^A-Za-z]+', '', mol_id.upper())
openmoltools.openeye.molecule_to_mol2(molecule, mol_descr['filepath'],
residue_name=residue_name)
# Enumerate protonation states with epik
if 'epik' in mol_descr:
epik_idx = mol_descr['epik']
epik_output_file = os.path.join(mol_dir, mol_id + '-epik.mol2')
utils.run_epik(mol_descr['filepath'], epik_output_file, extract_range=epik_idx)
mol_descr['filepath'] = epik_output_file
# Parametrize the molecule with antechamber
if mol_descr['parameters'] == 'antechamber':
# Generate parameters
input_mol_path = os.path.abspath(mol_descr['filepath'])
with utils.temporary_cd(mol_dir):
openmoltools.amber.run_antechamber(mol_id, input_mol_path)
# Save new parameters paths, this way if we try to
# setup the molecule again it will just be skipped
mol_descr['filepath'] = os.path.join(mol_dir, mol_id + '.gaff.mol2')
mol_descr['parameters'] = os.path.join(mol_dir, mol_id + '.frcmod')
def _setup_system(self, output_dir, components):
"""Create the prmtop and inpcrd files from the given components.
This calls _setup_molecules() so there's no need to call it ahead. The
system files are generated with tleap. If no molecule specify a general
force field, leaprc.ff14SB is loaded.
Parameters
----------
output_dir : str
The path to the main output directory specified by the user in the YAML options
components : dict
A dictionary containing the keys 'receptor', 'ligand' and 'solvent' with the ids
of molecules and solvents
Returns
-------
system_dir : str
The path to the directory containing the prmtop and inpcrd files
"""
# Identify components
receptor_id = components['receptor']
ligand_id = components['ligand']
solvent_id = components['solvent']
receptor = self._molecules[receptor_id]
ligand = self._molecules[ligand_id]
solvent = self._solvents[solvent_id]
# Check if system has been already processed
is_setup, system_dir = self._check_system_setup(output_dir, receptor_id,
ligand_id, solvent_id)
if is_setup:
return system_dir
# We still need to check if system_dir exists because the set up may
# have been interrupted
if not os.path.exists(system_dir):
os.makedirs(system_dir)
# Setup molecules
self._setup_molecules(output_dir, receptor_id, ligand_id)
# Create tleap script
tleap = utils.TLeap()
tleap.new_section('Load GAFF parameters')
tleap.load_parameters('leaprc.gaff')
# Check that AMBER force field is specified
if not ('leaprc.' in receptor['parameters'] or 'leaprc.' in ligand['parameters']):
tleap.load_parameters('leaprc.ff14SB')
# Load receptor and ligand
for group_name in ['receptor', 'ligand']:
group = self._molecules[components[group_name]]
tleap.new_section('Load ' + group_name)
tleap.load_parameters(group['parameters'])
tleap.load_group(name=group_name, file_path=group['filepath'])
# Create complex
tleap.new_section('Create complex')
tleap.combine('complex', 'receptor', 'ligand')
# Configure solvent
if solvent['nonbonded_method'] == openmm.app.NoCutoff:
if 'implicit_solvent' in solvent: # GBSA implicit solvent
tleap.new_section('Set GB radii to recommended values for OBC')
tleap.add_commands('set default PBRadii mbondi2')
else: # explicit solvent
tleap.new_section('Solvate systems')
clearance = float(solvent['clearance'].value_in_unit(unit.angstroms))
tleap.solvate(group='complex', water_model='TIP3PBOX', clearance=clearance)
tleap.solvate(group='ligand', water_model='TIP3PBOX', clearance=clearance)
# Check charge
tleap.new_section('Check charge')
tleap.add_commands('check complex', 'charge complex')
# Save prmtop and inpcrd files
tleap.new_section('Save prmtop and inpcrd files')
tleap.save_group('complex', os.path.join(system_dir, 'complex.prmtop'))
tleap.save_group('complex', os.path.join(system_dir, 'complex.pdb'))
tleap.save_group('ligand', os.path.join(system_dir, 'solvent.prmtop'))
tleap.save_group('ligand', os.path.join(system_dir, 'solvent.pdb'))
# Save tleap script for reference
tleap.export_script(os.path.join(system_dir, 'leap.in'))
# Run tleap!
tleap.run()
return system_dir
def _generate_yaml(self, experiment, file_path):
"""Generate the minimum YAML file needed to reproduce the experiment.
Parameters
----------
experiment : dict
The dictionary describing a single experiment.
file_path : str
The path to the file to save.
"""
components = set(experiment['components'].values())
# Molecules section data
mol_section = {mol_id: molecule for mol_id, molecule in self._raw_yaml['molecules'].items()
if mol_id in components}
# Solvents section data
sol_section = {solvent_id: solvent for solvent_id, solvent in self._raw_yaml['solvents'].items()
if solvent_id in components}
# We pop the options section in experiment and merge it to the general one
exp_section = experiment.copy()
opt_section = self._raw_yaml['options'].copy()
opt_section.update(exp_section.pop('options', {}))
# Create YAML with the sections in order
yaml_content = yaml.dump({'options': opt_section}, default_flow_style=False, line_break='\n', explicit_start=True)
yaml_content += yaml.dump({'molecules': mol_section}, default_flow_style=False, line_break='\n')
yaml_content += yaml.dump({'solvents': sol_section}, default_flow_style=False, line_break='\n')
yaml_content += yaml.dump({'experiments': exp_section}, default_flow_style=False, line_break='\n')
# Export YAML into a file
with open(file_path, 'w') as f:
f.write(yaml_content)
def _run_experiment(self, experiment, experiment_dir):
"""Prepare and run a single experiment.
Parameters
----------
experiment : dict
A dictionary describing a single experiment
experiment_dir : str
The directory where to store the output files relative to the main
output directory as specified by the user in the YAML script
"""
components = experiment['components']
exp_name = 'experiments' if experiment_dir == '' else os.path.basename(experiment_dir)
# Get and validate experiment sub-options
exp_opts = self.options.copy()
exp_opts.update(self._validate_options(experiment.get('options', {})))
yank_opts = self._isolate_yank_options(exp_opts)
# Configure MPI, if requested
if exp_opts['mpi']:
from mpi4py import MPI
MPI.COMM_WORLD.barrier()
mpicomm = MPI.COMM_WORLD
else:
mpicomm = None
# TODO configure platform and precision when they are fixed in Yank
# Create directory and configure logger for this experiment
results_dir = self._get_experiment_dir(exp_opts['output_dir'], experiment_dir)
if not os.path.isdir(results_dir):
os.makedirs(results_dir)
resume = False
else:
resume = True
utils.config_root_logger(exp_opts['verbose'], os.path.join(results_dir, exp_name + '.log'))
# Initialize simulation
yank = Yank(results_dir, mpicomm=mpicomm, **yank_opts)
if resume:
yank.resume()
else:
# Export YAML file for reproducibility
self._generate_yaml(experiment, os.path.join(results_dir, exp_name + '.yaml'))
# Determine system files path
system_dir = self._setup_system(exp_opts['output_dir'], components)
# Get ligand resname for alchemical atom selection
ligand_dsl = utils.get_mol2_resname(self._molecules[components['ligand']]['filepath'])
if ligand_dsl is None:
ligand_dsl = 'MOL'
ligand_dsl = 'resname ' + ligand_dsl
# System configuration
create_system_filter = set(('nonbonded_method', 'nonbonded_cutoff', 'implicit_solvent',
'constraints', 'hydrogen_mass'))
solvent = self._solvents[components['solvent']]
system_pars = {opt: solvent[opt] for opt in create_system_filter if opt in solvent}
system_pars.update({opt: exp_opts[opt] for opt in create_system_filter
if opt in exp_opts})
# Convert underscore_parameters to camelCase for OpenMM API
system_pars = {utils.underscore_to_camelcase(opt): value
for opt, value in system_pars.items()}
# Prepare system
phases, systems, positions, atom_indices = pipeline.prepare_amber(system_dir, ligand_dsl, system_pars)
# Create thermodynamic state
thermodynamic_state = ThermodynamicState(temperature=exp_opts['temperature'],
pressure=exp_opts['pressure'])
# Create new simulation
yank.create(phases, systems, positions, atom_indices, thermodynamic_state)
# Run the simulation!
yank.run()
if __name__ == "__main__":
import doctest
doctest.testmod()
|
#!/usr/bin/env python2
"""Standardize images."""
from __future__ import division
import argparse
import numpy as np
import scipy as sp
import dwi.dataset
import dwi.mask
import dwi.plot
import dwi.util
def parse_args():
"""Parse command-line arguments."""
p = argparse.ArgumentParser(description=__doc__)
p.add_argument('--verbose', '-v', action='count',
help='increase verbosity')
p.add_argument('--samplelist',
help='sample list file')
p.add_argument('--subregiondir',
help='subregion bounding box directory')
p.add_argument('--pmapdir', default='dicoms_Mono_combinedDICOM',
help='input parametric map directory')
p.add_argument('--param', default='ADCm',
help='image parameter to use')
p.add_argument('--pc', metavar='I', nargs=2, type=float,
default=[0, 99.8],
help='minimum and maximum percentiles')
p.add_argument('--scale', metavar='I', nargs=2, type=int,
default=[1, 4095],
help='standard scale minimum and maximum')
p.add_argument('--cases', metavar='I', nargs='*', type=int, default=[],
help='case numbers')
p.add_argument('--scans', metavar='S', nargs='*', default=[],
help='scan identifiers')
args = p.parse_args()
return args
def set_landmarks(data, pc1, pc2):
from scipy.stats import scoreatpercentile
for d in data:
img = d['img']
#img = img[img > np.mean(img)]
d['p1'] = scoreatpercentile(img, pc1)
d['p2'] = scoreatpercentile(img, pc2)
#percentiles = [25, 50, 75]
percentiles = [i*10 for i in range(1, 10)]
d['landmarks'] = percentiles
d['scores'] = [scoreatpercentile(img, i) for i in percentiles]
def map_landmarks(data, s1, s2):
for d in data:
p1, p2 = d['p1'], d['p2']
d['mapped_scores'] = [int(map_onto_scale(p1, p2, s1, s2, v)) for v in
d['scores']]
def map_onto_scale(p1, p2, s1, s2, v):
"""Map value v from original scale [p1, p2] onto standard scale [s1, s2]."""
assert p1 <= p2, (p1, p2)
assert s1 <= s2, (s1, s2)
if p1 == p2:
assert s1 == s2, (s1, s2)
return s1
f = (v-p1) / (p2-p1)
r = f * (s2-s1) + s1
return r
def transform(img, pc1, pc2, landmarks, s1, s2, mapped_scores):
"""Transform image onto standard scale."""
from scipy.stats import scoreatpercentile
pc = [pc1] + list(landmarks) + [pc2]
mapped = [s1] + list(mapped_scores) + [s2]
scores = [scoreatpercentile(img, i) for i in pc]
r = np.zeros_like(img, dtype=np.int)
for pos, v in np.ndenumerate(img):
slot = sum(v > s for s in scores)
slot = np.clip(slot, 1, len(scores)-1)
r[pos] = map_onto_scale(scores[slot-1], scores[slot],
mapped[slot-1], mapped[slot], v)
print dwi.util.fivenum(r)
return r
def plot(data, s1, s2):
import pylab as pl
for d in data:
img = d['img']
hist, bin_edges = np.histogram(img, bins=1000, density=True)
pl.plot(bin_edges[:-1], hist)
pl.show()
pl.close()
for d in data:
img = d['img_normalized']
hist, bin_edges = np.histogram(img, bins=1000, density=True)
pl.plot(bin_edges[:-1], hist)
pl.show()
pl.close()
for d in data:
y = d['scores']
x = range(len(y))
pl.plot(x, y)
pl.show()
pl.close()
dwi.plot.show_images([[d['img'], d['img_scaled']] for d in data], vmin=s1,
vmax=s2)
args = parse_args()
pc1, pc2 = args.pc
s1, s2 = args.scale
if args.verbose:
print 'Reading data...'
data = dwi.dataset.dataset_read_samplelist(args.samplelist, args.cases,
args.scans)
if args.subregiondir:
dwi.dataset.dataset_read_subregions(data, args.subregiondir)
dwi.dataset.dataset_read_pmaps(data, args.pmapdir, [args.param])
if args.verbose:
print 'Data:'
for d in data:
d['img'] = d['image'][15,...,0]
print d['case'], d['scan'], d['img'].shape, dwi.util.fivenum(d['img'])
set_landmarks(data, pc1, pc2)
if args.verbose:
print 'Landmark scores:'
for d in data:
print d['case'], d['scan'], (d['p1'], d['p2']), d['scores']
map_landmarks(data, s1, s2)
if args.verbose:
print 'Mapped landmark scores:'
for d in data:
print d['case'], d['scan'], (s1, s2), d['mapped_scores']
mapped_scores = np.array([d['mapped_scores'] for d in data],
dtype=np.int)
print mapped_scores.shape
print np.mean(mapped_scores, axis=0, dtype=np.int)
print dwi.util.median(mapped_scores, axis=0, dtype=np.int)
mapped_scores = np.mean(mapped_scores, axis=0, dtype=np.int)
for d in data:
d['img_scaled'] = transform(d['img'], pc1, pc2, d['landmarks'], s1, s2,
mapped_scores)
plot(data, s1, s2)
fix, outfile
#!/usr/bin/env python2
"""Standardize images."""
from __future__ import division
import argparse
import numpy as np
import scipy as sp
import dwi.dataset
import dwi.mask
import dwi.plot
import dwi.util
def parse_args():
"""Parse command-line arguments."""
p = argparse.ArgumentParser(description=__doc__)
p.add_argument('--verbose', '-v', action='count',
help='increase verbosity')
p.add_argument('--samplelist',
help='sample list file')
p.add_argument('--subregiondir',
help='subregion bounding box directory')
p.add_argument('--pmapdir', default='dicoms_Mono_combinedDICOM',
help='input parametric map directory')
p.add_argument('--param', default='ADCm',
help='image parameter to use')
p.add_argument('--pc', metavar='I', nargs=2, type=float,
default=[0, 99.8],
help='minimum and maximum percentiles')
p.add_argument('--scale', metavar='I', nargs=2, type=int,
default=[1, 4095],
help='standard scale minimum and maximum')
p.add_argument('--cases', metavar='I', nargs='*', type=int, default=[],
help='case numbers')
p.add_argument('--scans', metavar='S', nargs='*', default=[],
help='scan identifiers')
args = p.parse_args()
return args
def set_landmarks(data, pc1, pc2):
from scipy.stats import scoreatpercentile
for d in data:
img = d['img']
#img = img[img > np.mean(img)]
d['p1'] = scoreatpercentile(img, pc1)
d['p2'] = scoreatpercentile(img, pc2)
#percentiles = [25, 50, 75]
percentiles = [i*10 for i in range(1, 10)]
d['landmarks'] = percentiles
d['scores'] = [scoreatpercentile(img, i) for i in percentiles]
def map_landmarks(data, s1, s2):
for d in data:
p1, p2 = d['p1'], d['p2']
d['mapped_scores'] = [int(map_onto_scale(p1, p2, s1, s2, v)) for v in
d['scores']]
def map_onto_scale(p1, p2, s1, s2, v):
"""Map value v from original scale [p1, p2] onto standard scale [s1, s2]."""
assert p1 <= p2, (p1, p2)
assert s1 <= s2, (s1, s2)
if p1 == p2:
assert s1 == s2, (s1, s2)
return s1
f = (v-p1) / (p2-p1)
r = f * (s2-s1) + s1
return r
def transform(img, pc1, pc2, landmarks, s1, s2, mapped_scores):
"""Transform image onto standard scale."""
from scipy.stats import scoreatpercentile
pc = [pc1] + list(landmarks) + [pc2]
mapped = [s1] + list(mapped_scores) + [s2]
scores = [scoreatpercentile(img, i) for i in pc]
r = np.zeros_like(img, dtype=np.int)
for pos, v in np.ndenumerate(img):
slot = sum(v > s for s in scores)
slot = np.clip(slot, 1, len(scores)-1)
r[pos] = map_onto_scale(scores[slot-1], scores[slot],
mapped[slot-1], mapped[slot], v)
print dwi.util.fivenum(r)
return r
def plot(data, s1, s2, outfile):
import pylab as pl
for d in data:
img = d['img']
hist, bin_edges = np.histogram(img, bins=1000, density=True)
pl.plot(bin_edges[:-1], hist)
pl.show()
pl.close()
for d in data:
img = d['img_scaled']
hist, bin_edges = np.histogram(img, bins=1000, density=True)
pl.plot(bin_edges[:-1], hist)
pl.show()
pl.close()
for d in data:
y = d['scores']
x = range(len(y))
pl.plot(x, y)
pl.show()
pl.close()
dwi.plot.show_images([[d['img'], d['img_scaled']] for d in data], vmin=s1,
vmax=s2, outfile=outfile)
args = parse_args()
pc1, pc2 = args.pc
s1, s2 = args.scale
if args.verbose:
print 'Reading data...'
data = dwi.dataset.dataset_read_samplelist(args.samplelist, args.cases,
args.scans)
if args.subregiondir:
dwi.dataset.dataset_read_subregions(data, args.subregiondir)
dwi.dataset.dataset_read_pmaps(data, args.pmapdir, [args.param])
if args.verbose:
print 'Data:'
for d in data:
d['img'] = d['image'][15,...,0]
print d['case'], d['scan'], d['img'].shape, dwi.util.fivenum(d['img'])
set_landmarks(data, pc1, pc2)
if args.verbose:
print 'Landmark scores:'
for d in data:
print d['case'], d['scan'], (d['p1'], d['p2']), d['scores']
map_landmarks(data, s1, s2)
if args.verbose:
print 'Mapped landmark scores:'
for d in data:
print d['case'], d['scan'], (s1, s2), d['mapped_scores']
mapped_scores = np.array([d['mapped_scores'] for d in data],
dtype=np.int)
print mapped_scores.shape
print np.mean(mapped_scores, axis=0, dtype=np.int)
print dwi.util.median(mapped_scores, axis=0, dtype=np.int)
mapped_scores = np.mean(mapped_scores, axis=0, dtype=np.int)
for d in data:
d['img_scaled'] = transform(d['img'], pc1, pc2, d['landmarks'], s1, s2,
mapped_scores)
plot(data, s1, s2, 'std.png')
|
# Copyright (c) 2011 Tencent Inc.
# All rights reserved.
#
# Author: Huan Yu <huanyu@tencent.com>
# Feng Chen <phongchen@tencent.com>
# Yi Wang <yiwang@tencent.com>
# Chong Peng <michaelpeng@tencent.com>
# Wenting Li <wentingli@tencent.com>
# Date: October 20, 2011
"""
This is the scons rules helper module which should be
imported by Scons script
"""
import os
import py_compile
import shutil
import signal
import socket
import stat
import string
import subprocess
import sys
import tempfile
import time
import zipfile
import glob
import SCons
import SCons.Action
import SCons.Builder
import SCons.Scanner
import SCons.Scanner.Prog
import blade_util
import console
from console import colors
# option_verbose to indicate print verbose or not
option_verbose = False
# linking tmp dir
linking_tmp_dir = ''
def generate_python_egg(target, source, env):
setup_file = ''
if not str(source[0]).endswith('setup.py'):
console.warning('setup.py not existed to generate target %s, '
'blade will generate a default one for you' %
str(target[0]))
else:
setup_file = str(source[0])
init_file = ''
source_index = 2
if not setup_file:
source_index = 1
init_file = str(source[0])
else:
init_file = str(source[1])
init_file_dir = os.path.dirname(init_file)
dep_source_list = []
for s in source[source_index:]:
dep_source_list.append(str(s))
target_file = str(target[0])
target_file_dir_list = target_file.split('/')
target_profile = target_file_dir_list[0]
target_dir = '/'.join(target_file_dir_list[0:-1])
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if setup_file:
shutil.copyfile(setup_file, os.path.join(target_dir, 'setup.py'))
else:
target_name = os.path.basename(init_file_dir)
if not target_name:
console.error_exit('invalid package for target %s' % str(target[0]))
# generate default setup.py for user
setup_str = """
#!/usr/bin/env python
# This file was generated by blade
from setuptools import find_packages, setup
setup(
name='%s',
version='0.1.0',
packages=find_packages(),
zip_safe=True
)
""" % target_name
default_setup_file = open(os.path.join(target_dir, 'setup.py'), 'w')
default_setup_file.write(setup_str)
default_setup_file.close()
package_dir = os.path.join(target_profile, init_file_dir)
if os.path.exists(package_dir):
shutil.rmtree(package_dir, ignore_errors=True)
cmd = 'cp -r %s %s' % (init_file_dir, target_dir)
p = subprocess.Popen(
cmd,
env={},
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
universal_newlines=True)
std_out, std_err = p.communicate()
if p.returncode:
console.info(std_out)
console.info(std_err)
console.error_exit('failed to copy source files from %s to %s' % (
init_file_dir, target_dir))
return p.returncode
# copy file to package_dir
for f in dep_source_list:
dep_file_basename = os.path.basename(f)
dep_file_dir = os.path.dirname(f)
sub_dir = ''
sub_dir_list = dep_file_dir.split('/')
if len(sub_dir_list) > 1:
sub_dir = '/'.join(dep_file_dir.split('/')[1:])
if sub_dir:
package_sub_dir = os.path.join(package_dir, sub_dir)
if not os.path.exists(package_sub_dir):
os.makedirs(package_sub_dir)
sub_init_file = os.path.join(package_sub_dir, '__init__.py')
if not os.path.exists(sub_init_file):
sub_f = open(sub_init_file, 'w')
sub_f.close()
shutil.copyfile(f, os.path.join(package_sub_dir, dep_file_basename))
make_egg_cmd = 'python setup.py bdist_egg'
p = subprocess.Popen(
make_egg_cmd,
env={},
cwd=target_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
universal_newlines=True)
std_out, std_err = p.communicate()
if p.returncode:
console.info(std_out)
console.info(std_err)
console.error_exit('failed to generate python egg in %s' % target_dir)
return p.returncode
return 0
def _compile_python(src, build_dir):
if src.startswith(build_dir):
pyc = src + 'c'
else:
pyc = os.path.join(build_dir, src) + 'c'
py_compile.compile(src, pyc)
return pyc
def generate_python_library(target, source, env):
target_file = open(str(target[0]), 'w')
data = dict()
data['base_dir'] = env.get('BASE_DIR', '')
build_dir = env['BUILD_DIR']
srcs = []
for s in source:
src = str(s)
_compile_python(src, build_dir)
srcs.append(src)
data['srcs'] = srcs
target_file.write(str(data))
target_file.close()
return None
def _update_init_py_dirs(arcname, dirs, dirs_with_init_py):
dir = os.path.dirname(arcname)
if os.path.basename(arcname) == '__init__.py':
dirs_with_init_py.add(dir)
while dir:
dirs.add(dir)
dir = os.path.dirname(dir)
def generate_python_binary(target, source, env):
"""The action for generate python executable file"""
target_name = str(target[0])
build_dir = env['BUILD_DIR']
target_file = zipfile.ZipFile(target_name, 'w', zipfile.ZIP_DEFLATED)
dirs = set()
dirs_with_init_py = set()
for s in source:
src = str(s)
if src.endswith('.pylib'):
libfile = open(src)
data = eval(libfile.read())
libfile.close()
base_dir = data['base_dir']
for libsrc in data['srcs']:
arcname = os.path.relpath(libsrc, base_dir)
_update_init_py_dirs(arcname, dirs, dirs_with_init_py)
target_file.write(libsrc, arcname)
else:
_compile_python(src, build_dir)
_update_init_py_dirs(src, dirs, dirs_with_init_py)
target_file.write(src)
# insert __init__.py into each dir if missing
dirs_missing_init_py = dirs - dirs_with_init_py
for dir in dirs_missing_init_py:
target_file.writestr(os.path.join(dir, '__init__.py'), '')
target_file.writestr('__init__.py', '')
target_file.close()
target_file = open(target_name, 'rb')
zip_content = target_file.read()
target_file.close()
# Insert bootstrap before zip, it is also a valid zip file.
# unzip will seek actually start until meet the zip magic number.
entry = env['ENTRY']
bootstrap = (
'#!/bin/sh\n'
'\n'
'PYTHONPATH="$0:$PYTHONPATH" exec python -m "%s" "$@"\n') % entry
target_file = open(target_name, 'wb')
target_file.write(bootstrap)
target_file.write(zip_content)
target_file.close()
os.chmod(target_name, 0775)
return None
def generate_resource_index(target, source, env):
res_source_path = str(target[0])
res_header_path = str(target[1])
if not os.path.exists(os.path.dirname(res_header_path)):
os.mkdir(os.path.dirname(res_header_path))
h = open(res_header_path, 'w')
c = open(res_source_path, 'w')
source_path = env["SOURCE_PATH"]
full_name = blade_util.regular_variable_name("%s/%s" % (source_path, env["TARGET_NAME"]))
guard_name = 'BLADE_RESOURCE_%s_H' % full_name.upper()
print >>h, '#ifndef %s\n#define %s' % (guard_name, guard_name)
print >>h, '''
// This file was automatically generated by blade
#ifdef __cplusplus
extern "C" {
#endif
#ifndef BLADE_RESOURCE_TYPE_DEFINED
#define BLADE_RESOURCE_TYPE_DEFINED
struct BladeResourceEntry {
const char* name;
const char* data;
unsigned int size;
};
#endif
'''
res_index_name = 'RESOURCE_INDEX_%s' % full_name
print >>c, '// This file was automatically generated by blade\n'
print >>c, '#include "%s"\n' % res_header_path
print >>c, 'const struct BladeResourceEntry %s[] = {' % res_index_name
for s in source:
src = str(s)
var_name = blade_util.regular_variable_name(src)
org_src = blade_util.relative_path(src, source_path)
print >>h, '// %s' % org_src
print >>h, 'extern const char RESOURCE_%s[%d];' % (var_name, s.get_size())
print >>h, 'extern const unsigned RESOURCE_%s_len;\n' % var_name
print >>c, ' { "%s", RESOURCE_%s, %s },' % (org_src, var_name, s.get_size())
print >>c, '};'
print >>c, 'const unsigned %s_len = %s;' % (res_index_name, len(source))
print >>h, '// Resource index'
print >>h, 'extern const struct BladeResourceEntry %s[];' % res_index_name
print >>h, 'extern const unsigned %s_len;' % res_index_name
print >>h, '\n#ifdef __cplusplus\n} // extern "C"\n#endif\n'
print >>h, '\n#endif // %s' % guard_name
c.close()
h.close()
return None
def generate_resource_file(target, source, env):
"""Generate resource source file in resource_library"""
src_path = str(source[0])
new_src_path = str(target[0])
cmd = ('xxd -i %s | sed -e "s/^unsigned char /const char RESOURCE_/g" '
'-e "s/^unsigned int /const unsigned int RESOURCE_/g"> %s') % (
src_path, new_src_path)
p = subprocess.Popen(
cmd,
env={},
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
universal_newlines=True)
stdout, stderr = p.communicate()
if p.returncode or stderr:
error = 'failed to generate resource file'
if stderr:
error = error + ': ' + stderr
console.error_exit(error)
return p.returncode
def _java_resource_file_target_path(path):
""" Return relative target path in target dir, see
https://maven.apache.org/guides/introduction/introduction-to-the-standard-directory-layout.html
for the rules
"""
path = str(path)
segs = [
'/src/main/resources/',
'/src/test/resources/',
'/resources/',
]
for seg in segs:
pos = path.find(seg)
if pos != -1:
return path[pos + len(seg):]
return ''
# emitter function is used by scons builder to determine target files from
# source files.
def _emit_java_resources(target, source, env):
"""Create and return lists of source resource files
and their corresponding target resource files.
"""
target[0].must_be_same(SCons.Node.FS.Dir)
classdir = target[0]
slist = []
for entry in source:
entry = entry.rentry().disambiguate()
if isinstance(entry, SCons.Node.FS.File):
slist.append(entry)
elif isinstance(entry, SCons.Node.FS.Dir):
result = SCons.Util.OrderedDict()
dirnode = entry.rdir()
def find_files(arg, dirpath, filenames):
mydir = dirnode.Dir(dirpath)
for name in filenames:
if os.path.isfile(os.path.join(str(dirpath), name)):
arg[mydir.File(name)] = True
for dirpath, dirnames, filenames in os.walk(dirnode.get_abspath()):
find_files(result, dirpath, filenames)
entry.walk(find_files, result)
slist.extend(list(result.keys()))
else:
raise SCons.Errors.UserError("Java resource must be File or Dir, not '%s'" % entry.__class__)
tlist = []
for f in slist:
target_path = _java_resource_file_target_path(f.rfile().get_abspath())
if target_path:
d = target[0]
t = d.File(target_path)
t.attributes.java_classdir = classdir
f.attributes.target_path = target_path
t.set_specific_source([f])
tlist.append(t)
else:
console.warning('java resource file "%s" does not match any '
'resource path pattern of maven standard directory '
'layout, ignored. \nsee '
'https://maven.apache.org/guides/introduction/introduction-to-the-standard-directory-layout.html' % f)
return tlist, slist
def process_java_resources(target, source, env):
"""Copy source resource file into .resources dir"""
target_dir = str(target[0].attributes.java_classdir)
for src in source:
target_path = os.path.join(target_dir, src.attributes.target_path)
shutil.copy2(str(src), target_path)
return None
def _generate_jar(target, sources, resources, env):
"""Generate a jar containing the sources and resources. """
classes_dir = target.replace('.jar', '.classes')
resources_dir = target.replace('.jar', '.resources')
cmd = []
cmd.append('%s cf %s' % (env['JAR'], target))
jar_path_set = set()
if os.path.exists(classes_dir):
for source in sources:
if not source.endswith('.class'):
continue
jar_path = os.path.relpath(source, classes_dir)
if jar_path not in jar_path_set:
jar_path_set.add(jar_path)
if '$' not in source:
inner_classes = glob.glob(source[:-6] + '$*.class')
for inner_class in inner_classes:
if os.path.getmtime(inner_class) >= os.path.getmtime(source):
jar_path = os.path.relpath(inner_class, classes_dir)
if jar_path not in jar_path_set:
jar_path_set.add(jar_path)
for path in jar_path_set:
# Add quotes for file names with $
cmd.append("-C '%s' '%s'" % (classes_dir, path))
if os.path.exists(resources_dir):
for resource in resources:
cmd.append("-C '%s' '%s'" % (resources_dir,
os.path.relpath(resource, resources_dir)))
cmd = ' '.join(cmd)
global option_verbose
if option_verbose:
print cmd
p = subprocess.Popen(cmd, env=os.environ, shell=True)
p.communicate()
return p.returncode
def generate_jar(target, source, env):
target = str(target[0])
sources = []
index = 0
for src in source:
if str(src).endswith('.class'):
sources.append(str(src))
index += 1
else:
break
resources = [str(src) for src in source[index:]]
return _generate_jar(target, sources, resources, env)
_one_jar_boot_path = None
def _generate_one_jar(target,
main_class,
main_jar,
deps_jar,
one_jar_boot_path):
target_dir = os.path.dirname(target)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
target_one_jar = zipfile.ZipFile(target, 'w')
# Copy files from one-jar-boot.jar to the target jar
zip_file = zipfile.ZipFile(one_jar_boot_path, 'r')
name_list = zip_file.namelist()
for name in name_list:
if not name.lower().endswith('manifest.mf'): # Exclude manifest
target_one_jar.writestr(name, zip_file.read(name))
zip_file.close()
# Main jar and dependencies
target_one_jar.write(main_jar, os.path.join('main',
os.path.basename(main_jar)))
for dep in deps_jar:
dep_name = os.path.basename(dep)
target_one_jar.write(dep, os.path.join('lib', dep_name))
# Manifest
# Note that the manifest file must end with a new line or carriage return
target_one_jar.writestr(os.path.join('META-INF', 'MANIFEST.MF'),
'''Manifest-Version: 1.0
Main-Class: com.simontuffs.onejar.Boot
One-Jar-Main-Class: %s
''' % main_class)
target_one_jar.close()
return None
def generate_one_jar(target, source, env):
if len(source) < 2:
console.error_exit('Failed to generate java binary from %s: '
'Source should at least contain main class '
'and main jar' % ','.join(str(s) for s in source))
main_class = str(source[0])
main_jar = str(source[1])
deps_jar = []
for dep in source[2:]:
deps_jar.append(str(dep))
target = str(target[0])
# print target, main_class, main_jar, deps_jar, _one_jar_boot_path
return _generate_one_jar(target, main_class, main_jar, deps_jar,
_one_jar_boot_path)
def _generate_fat_jar(target, deps_jar):
"""Generate a fat jar containing the contents of all the jar dependencies. """
target_dir = os.path.dirname(target)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
target_fat_jar = zipfile.ZipFile(target, 'w')
manifest = os.path.join('META-INF', 'MANIFEST.MF')
first_jar = True
# Record paths written in the fat jar to avoid duplicate writing
zip_path_set = set()
for jar in deps_jar:
jar = zipfile.ZipFile(jar, 'r')
name_list = jar.namelist()
for name in name_list:
if name.upper() == manifest:
# Use the MANIFEST file of the first jar
if first_jar:
target_fat_jar.writestr(name, jar.read(name))
first_jar = False
else:
if name not in zip_path_set:
target_fat_jar.writestr(name, jar.read(name))
zip_path_set.add(name)
jar.close()
target_fat_jar.close()
return None
def generate_fat_jar(target, source, env):
target = str(target[0])
deps_jar = []
for dep in source:
deps_jar.append(str(dep))
return _generate_fat_jar(target, deps_jar)
def _generate_java_binary(target_name, onejar_path, jvm_flags, run_args):
"""generate a wrapper shell script to run jar"""
onejar_name = os.path.basename(onejar_path)
target_file = open(target_name, 'w')
target_file.write(
"""#!/bin/sh
# Auto generated wrapper shell script by blade
# *.one.jar must be in same dir
jar=`dirname "$0"`/"%s"
exec java %s -jar "$jar" %s $@
""" % (onejar_name, jvm_flags, run_args))
os.chmod(target_name, 0755)
target_file.close()
return None
def generate_java_binary(target, source, env):
"""build function to generate wrapper shell script for java binary"""
target_name = str(target[0])
onejar_path = str(source[0])
return _generate_java_binary(target_name, onejar_path, '', '')
def _get_all_test_class_names_in_jar(jar):
"""Returns a list of test class names in the jar file. """
test_class_names = []
zip_file = zipfile.ZipFile(jar, 'r')
name_list = zip_file.namelist()
for name in name_list:
basename = os.path.basename(name)
# Exclude inner class and Test.class
if (basename.endswith('Test.class') and
len(basename) > len('Test.class') and
not '$' in basename):
class_name = name.replace('/', '.')[:-6] # Remove .class suffix
test_class_names.append(class_name)
zip_file.close()
return test_class_names
def generate_java_test(target, source, env):
"""build function to generate wrapper shell script for java binary"""
target_name = str(target[0])
onejar_path = str(source[0])
test_class_names = []
for src in source[1:]:
test_class_names += _get_all_test_class_names_in_jar(str(src))
return _generate_java_binary(target_name, onejar_path, '',
' '.join(test_class_names))
def MakeAction(cmd, cmdstr):
global option_verbose
if option_verbose:
return SCons.Action.Action(cmd)
else:
return SCons.Action.Action(cmd, cmdstr)
_ERRORS = [': error:', ': fatal error:', ': undefined reference to',
': cannot find ', ': ld returned 1 exit status',
' is not defined'
]
_WARNINGS = [': warning:', ': note: ', '] Warning: ']
def error_colorize(message):
colored_message = []
for line in message.splitlines(True): # keepends
color = 'cyan'
# For clang column indicator, such as '^~~~~~'
if line.strip().startswith('^'):
color = 'green'
else:
for w in _WARNINGS:
if w in line:
color = 'yellow'
break
for w in _ERRORS:
if w in line:
color = 'red'
break
colored_message.append(console.colors(color))
colored_message.append(line)
colored_message.append(console.colors('end'))
return console.inerasable(''.join(colored_message))
def _colored_echo(stdout, stderr):
"""Echo error colored message"""
if stdout:
sys.stdout.write(error_colorize(stdout))
if stderr:
sys.stderr.write(error_colorize(stderr))
def echospawn(sh, escape, cmd, args, env):
# convert env from unicode strings
asciienv = {}
for key, value in env.iteritems():
asciienv[key] = str(value)
cmdline = ' '.join(args)
p = subprocess.Popen(
cmdline,
env=asciienv,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
shell=True,
universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
if p.returncode != -signal.SIGINT:
# Error
_colored_echo(stdout, stderr)
else:
# Only warnings
_colored_echo(stdout, stderr)
return p.returncode
def _blade_action_postfunc(closing_message):
"""To do post jobs if blade's own actions failed to build. """
console.info(closing_message)
# Remember to write the dblite incase of re-linking once fail to
# build last time. We should elaborate a way to avoid rebuilding
# after failure of our own builders or actions.
SCons.SConsign.write()
def _fast_link_helper(target, source, env, link_com):
"""fast link helper function. """
target_file = str(target[0])
prefix_str = 'blade_%s' % target_file.replace('/', '_').replace('.', '_')
fd, temporary_file = tempfile.mkstemp(suffix='xianxian',
prefix=prefix_str,
dir=linking_tmp_dir)
os.close(fd)
sources = []
for s in source:
sources.append(str(s))
link_com_str = link_com.substitute(
FL_TARGET=temporary_file,
FL_SOURCE=' '.join(sources))
p = subprocess.Popen(
link_com_str,
env=os.environ,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
universal_newlines=True)
std_out, std_err = p.communicate()
if std_out:
print std_out
if std_err:
print std_err
if p.returncode == 0:
shutil.move(temporary_file, target_file)
if not os.path.exists(target_file):
console.warning('failed to genreate %s in link on tmpfs mode' % target_file)
else:
_blade_action_postfunc('failed while fast linking')
return p.returncode
def fast_link_sharelib_action(target, source, env):
# $SHLINK -o $TARGET $SHLINKFLAGS $__RPATH $SOURCES $_LIBDIRFLAGS $_LIBFLAGS
link_com = string.Template('%s -o $FL_TARGET %s %s $FL_SOURCE %s %s' % (
env.subst('$SHLINK'),
env.subst('$SHLINKFLAGS'),
env.subst('$__RPATH'),
env.subst('$_LIBDIRFLAGS'),
env.subst('$_LIBFLAGS')))
return _fast_link_helper(target, source, env, link_com)
def fast_link_prog_action(target, source, env):
# $LINK -o $TARGET $LINKFLAGS $__RPATH $SOURCES $_LIBDIRFLAGS $_LIBFLAGS
link_com = string.Template('%s -o $FL_TARGET %s %s $FL_SOURCE %s %s' % (
env.subst('$LINK'),
env.subst('$LINKFLAGS'),
env.subst('$__RPATH'),
env.subst('$_LIBDIRFLAGS'),
env.subst('$_LIBFLAGS')))
return _fast_link_helper(target, source, env, link_com)
def setup_fast_link_prog_builder(top_env):
"""
This is the function to setup blade fast link
program builder. It will overwrite the program
builder of top level env if user specifies an
option to apply fast link method that they want
to place the blade output to distributed file
system to advoid the random read write of linker
largely degrades building performance.
"""
new_link_action = MakeAction(fast_link_prog_action, '$LINKCOMSTR')
program = SCons.Builder.Builder(action=new_link_action,
emitter='$PROGEMITTER',
prefix='$PROGPREFIX',
suffix='$PROGSUFFIX',
src_suffix='$OBJSUFFIX',
src_builder='Object',
target_scanner=SCons.Scanner.Prog.ProgramScanner())
top_env['BUILDERS']['Program'] = program
def setup_fast_link_sharelib_builder(top_env):
"""
This is the function to setup blade fast link
sharelib builder. It will overwrite the sharelib
builder of top level env if user specifies an
option to apply fast link method that they want
to place the blade output to distributed file
system to advoid the random read write of linker
largely degrades building performance.
"""
new_link_actions = []
new_link_actions.append(SCons.Defaults.SharedCheck)
new_link_actions.append(MakeAction(fast_link_sharelib_action, '$SHLINKCOMSTR'))
sharedlib = SCons.Builder.Builder(action=new_link_actions,
emitter='$SHLIBEMITTER',
prefix='$SHLIBPREFIX',
suffix='$SHLIBSUFFIX',
target_scanner=SCons.Scanner.Prog.ProgramScanner(),
src_suffix='$SHOBJSUFFIX',
src_builder='SharedObject')
top_env['BUILDERS']['SharedLibrary'] = sharedlib
def setup_fast_link_builders(top_env):
"""Creates fast link builders - Program and SharedLibrary. """
# Check requirement
acquire_temp_place = "df | grep tmpfs | awk '{print $5, $6}'"
p = subprocess.Popen(
acquire_temp_place,
env=os.environ,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
universal_newlines=True)
stdout, stderr = p.communicate()
# Do not try to overwrite builder with error
if p.returncode:
console.warning('you have link on tmp enabled, but it is not fullfilled to make it.')
return
# No tmpfs to do fastlink, will not overwrite the builder
if not stdout:
console.warning('you have link on tmp enabled, but there is no tmpfs to make it.')
return
# Use the first one
global linking_tmp_dir
usage, linking_tmp_dir = tuple(stdout.splitlines(False)[0].split())
# Do not try to do that if there is no memory space left
usage = int(usage.replace('%', ''))
if usage > 90:
console.warning('you have link on tmp enabled, '
'but there is not enough space on %s to make it.' %
linking_tmp_dir)
return
console.info('building in link on tmpfs mode')
setup_fast_link_sharelib_builder(top_env)
setup_fast_link_prog_builder(top_env)
def make_top_env(build_dir):
"""Make the top level scons envrionment object"""
os.environ['LC_ALL'] = 'C'
top_env = SCons.Environment.Environment(ENV=os.environ)
# Optimization options, see http://www.scons.org/wiki/GoFastButton
top_env.Decider('MD5-timestamp')
top_env.SetOption('implicit_cache', 1)
top_env.SetOption('max_drift', 1)
top_env.VariantDir(build_dir, '.', duplicate=0)
return top_env
def get_compile_source_message():
return console.erasable('%sCompiling %s$SOURCE%s%s' % (
colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
def get_link_program_message():
return console.inerasable('%sLinking Program %s$TARGET%s%s' % (
colors('green'), colors('purple'), colors('green'), colors('end')))
def setup_compliation_verbose(top_env, color_enabled, verbose):
"""Generates color and verbose message. """
console.color_enabled = color_enabled
if not verbose:
top_env["SPAWN"] = echospawn
compile_source_message = get_compile_source_message()
link_program_message = get_link_program_message()
assembling_source_message = console.erasable('%sAssembling %s$SOURCE%s%s' % (
colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
link_library_message = console.inerasable('%sCreating Static Library %s$TARGET%s%s' % (
colors('green'), colors('purple'), colors('green'), colors('end')))
ranlib_library_message = console.inerasable('%sRanlib Library %s$TARGET%s%s' % (
colors('green'), colors('purple'), colors('green'), colors('end')))
link_shared_library_message = console.inerasable('%sLinking Shared Library %s$TARGET%s%s' % (
colors('green'), colors('purple'), colors('green'), colors('end')))
jar_message = console.inerasable('%sCreating Jar %s$TARGET%s%s' % (
colors('green'), colors('purple'), colors('green'), colors('end')))
if not verbose:
top_env.Append(
CXXCOMSTR = compile_source_message,
CCCOMSTR = compile_source_message,
ASCOMSTR = assembling_source_message,
SHCCCOMSTR = compile_source_message,
SHCXXCOMSTR = compile_source_message,
ARCOMSTR = link_library_message,
RANLIBCOMSTR = ranlib_library_message,
SHLINKCOMSTR = link_shared_library_message,
LINKCOMSTR = link_program_message,
JAVACCOMSTR = compile_source_message,
JARCOMSTR = jar_message,
LEXCOMSTR = compile_source_message)
def setup_proto_builders(top_env, build_dir, protoc_bin, protobuf_path,
protobuf_incs_str,
protoc_php_plugin, protobuf_php_path):
compile_proto_cc_message = console.erasable('%sCompiling %s$SOURCE%s to cc source%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
compile_proto_java_message = console.erasable('%sCompiling %s$SOURCE%s to java source%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
compile_proto_php_message = console.erasable('%sCompiling %s$SOURCE%s to php source%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
compile_proto_python_message = console.erasable('%sCompiling %s$SOURCE%s to python source%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
proto_bld = SCons.Builder.Builder(action = MakeAction(
"%s --proto_path=. -I. %s -I=`dirname $SOURCE` --cpp_out=%s $SOURCE" % (
protoc_bin, protobuf_incs_str, build_dir),
compile_proto_cc_message))
top_env.Append(BUILDERS = {"Proto" : proto_bld})
proto_java_bld = SCons.Builder.Builder(action = MakeAction(
"%s --proto_path=. --proto_path=%s --java_out=%s/`dirname $SOURCE` $SOURCE" % (
protoc_bin, protobuf_path, build_dir),
compile_proto_java_message))
top_env.Append(BUILDERS = {"ProtoJava" : proto_java_bld})
proto_php_bld = SCons.Builder.Builder(action = MakeAction(
"%s --proto_path=. --plugin=protoc-gen-php=%s -I. %s -I%s -I=`dirname $SOURCE` --php_out=%s/`dirname $SOURCE` $SOURCE" % (
protoc_bin, protoc_php_plugin, protobuf_incs_str, protobuf_php_path, build_dir),
compile_proto_php_message))
top_env.Append(BUILDERS = {"ProtoPhp" : proto_php_bld})
proto_python_bld = SCons.Builder.Builder(action = MakeAction(
"%s --proto_path=. -I. %s -I=`dirname $SOURCE` --python_out=%s $SOURCE" % (
protoc_bin, protobuf_incs_str, build_dir),
compile_proto_python_message))
top_env.Append(BUILDERS = {"ProtoPython" : proto_python_bld})
def setup_thrift_builders(top_env, build_dir, thrift_bin, thrift_incs_str):
compile_thrift_cc_message = console.erasable('%sCompiling %s$SOURCE%s to cc source%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
compile_thrift_java_message = console.erasable('%sCompiling %s$SOURCE%s to java source%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
compile_thrift_python_message = console.erasable( '%sCompiling %s$SOURCE%s to python source%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
thrift_bld = SCons.Builder.Builder(action = MakeAction(
'%s --gen cpp:include_prefix,pure_enums -I . %s -I `dirname $SOURCE`'
' -out %s/`dirname $SOURCE` $SOURCE' % (
thrift_bin, thrift_incs_str, build_dir),
compile_thrift_cc_message))
top_env.Append(BUILDERS = {"Thrift" : thrift_bld})
thrift_java_bld = SCons.Builder.Builder(action = MakeAction(
"%s --gen java -I . %s -I `dirname $SOURCE` -out %s/`dirname $SOURCE` $SOURCE" % (
thrift_bin, thrift_incs_str, build_dir),
compile_thrift_java_message))
top_env.Append(BUILDERS = {"ThriftJava" : thrift_java_bld})
thrift_python_bld = SCons.Builder.Builder(action = MakeAction(
"%s --gen py -I . %s -I `dirname $SOURCE` -out %s/`dirname $SOURCE` $SOURCE" % (
thrift_bin, thrift_incs_str, build_dir),
compile_thrift_python_message))
top_env.Append(BUILDERS = {"ThriftPython" : thrift_python_bld})
def setup_fbthrift_builders(top_env, build_dir, fbthrift1_bin, fbthrift2_bin, fbthrift_incs_str):
compile_fbthrift_cpp_message = console.erasable('%sCompiling %s$SOURCE%s to cpp source%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
compile_fbthrift_cpp2_message = console.erasable('%sCompiling %s$SOURCE%s to cpp2 source%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
fbthrift1_bld = SCons.Builder.Builder(action = MakeAction(
'%s --gen cpp:templates,cob_style,include_prefix,enum_strict -I . %s -I `dirname $SOURCE`'
' -o %s/`dirname $SOURCE` $SOURCE' % (
fbthrift1_bin, fbthrift_incs_str, build_dir),
compile_fbthrift_cpp_message))
top_env.Append(BUILDERS = {"FBThrift1" : fbthrift1_bld})
fbthrift2_bld = SCons.Builder.Builder(action = MakeAction(
'%s --gen=cpp2:cob_style,include_prefix,future -I . %s -I `dirname $SOURCE` '
'-o %s/`dirname $SOURCE` $SOURCE' % (
fbthrift2_bin, fbthrift_incs_str, build_dir),
compile_fbthrift_cpp2_message))
top_env.Append(BUILDERS = {"FBThrift2" : fbthrift2_bld})
def setup_cuda_builders(top_env, nvcc_str, cuda_incs_str):
nvcc_object_bld = SCons.Builder.Builder(action = MakeAction(
"%s -ccbin g++ %s $NVCCFLAGS -o $TARGET -c $SOURCE" % (nvcc_str, cuda_incs_str),
get_compile_source_message()))
top_env.Append(BUILDERS = {"NvccObject" : nvcc_object_bld})
nvcc_binary_bld = SCons.Builder.Builder(action = MakeAction(
"%s %s $NVCCFLAGS -o $TARGET" % (nvcc_str, cuda_incs_str),
get_link_program_message()))
top_env.Append(NVCC=nvcc_str)
top_env.Append(BUILDERS = {"NvccBinary" : nvcc_binary_bld})
def setup_java_builders(top_env, java_home, one_jar_boot_path):
if java_home:
top_env.Replace(JAVAC=os.path.join(java_home, 'bin/javac'))
top_env.Replace(JAR=os.path.join(java_home, 'bin/jar'))
blade_jar_bld = SCons.Builder.Builder(action = MakeAction(
'jar cf $TARGET -C `dirname $SOURCE` .',
'$JARCOMSTR'))
top_env.Append(BUILDERS = {"BladeJar" : blade_jar_bld})
# Scons has many bugs with generated sources file,
# such as can't obtain class file path correctly.
# so just build all sources to jar directly
generated_jar_bld = SCons.Builder.Builder(action = MakeAction(
'rm -fr ${TARGET}.classes && mkdir -p ${TARGET}.classes && '
'$JAVAC $JAVACFLAGS $_JAVABOOTCLASSPATH $_JAVACLASSPATH -d ${TARGET}.classes $SOURCES && '
'$JAR $JARFLAGS ${TARGET} -C ${TARGET}.classes . && '
'rm -fr ${TARGET}.classes',
'$JARCOMSTR'))
top_env.Append(BUILDERS = {"GeneratedJavaJar" : generated_jar_bld})
# Scons Java builder has bugs on detecting generated .class files
# produced by javac: anonymous inner classes are missing in the results
# of Java builder no matter which JAVAVERSION(1.5, 1.6) is specified
# See: http://scons.tigris.org/issues/show_bug.cgi?id=1594
# http://scons.tigris.org/issues/show_bug.cgi?id=2742
blade_java_jar_bld = SCons.Builder.Builder(action = MakeAction(
generate_jar, '$JARCOMSTR'))
top_env.Append(BUILDERS = {"BladeJavaJar" : blade_java_jar_bld})
resource_message = console.erasable('%sProcess jar resource %s$SOURCES%s%s' % ( \
colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
java_resource_bld = SCons.Builder.Builder(
action = MakeAction(
process_java_resources, resource_message),
emitter = _emit_java_resources,
target_factory = SCons.Node.FS.Entry,
source_factory = SCons.Node.FS.Entry)
top_env.Append(BUILDERS = {"JavaResource" : java_resource_bld})
global _one_jar_boot_path
_one_jar_boot_path = one_jar_boot_path
one_java_message = console.erasable('%sGenerating one jar %s$TARGET%s%s' % ( \
colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
one_jar_bld = SCons.Builder.Builder(action = MakeAction(generate_one_jar,
one_java_message))
top_env.Append(BUILDERS = {'OneJar' : one_jar_bld})
fat_java_message = console.inerasable('%sCreating fat jar %s$TARGET%s%s' % ( \
colors('green'), colors('purple'), colors('green'), colors('end')))
fat_jar_bld = SCons.Builder.Builder(action = MakeAction(generate_fat_jar,
fat_java_message))
top_env.Append(BUILDERS = {'FatJar' : fat_jar_bld})
java_binary_message = console.inerasable('%sGenerating java binary %s$TARGET%s%s' % \
(colors('green'), colors('purple'), colors('green'), colors('end')))
java_binary_bld = SCons.Builder.Builder(action = MakeAction(
generate_java_binary, java_binary_message))
top_env.Append(BUILDERS = {"JavaBinary" : java_binary_bld})
java_test_message = console.inerasable('%sGenerating java test %s$TARGET%s%s' % \
(colors('green'), colors('purple'), colors('green'), colors('end')))
java_test_bld = SCons.Builder.Builder(action = MakeAction(
generate_java_test, java_test_message))
top_env.Append(BUILDERS = {"JavaTest" : java_test_bld})
def setup_yacc_builders(top_env):
compile_yacc_message = console.erasable('%sYacc %s$SOURCE%s to $TARGET%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
yacc_bld = SCons.Builder.Builder(action = MakeAction(
'bison $YACCFLAGS -d -o $TARGET $SOURCE',
compile_yacc_message))
top_env.Append(BUILDERS = {"Yacc" : yacc_bld})
def setup_resource_builders(top_env):
compile_resource_index_message = console.erasable('%sGenerating resource index for %s$SOURCE_PATH/$TARGET_NAME%s%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
compile_resource_message = console.erasable('%sCompiling %s$SOURCE%s as resource file%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
resource_index_bld = SCons.Builder.Builder(action = MakeAction(generate_resource_index,
compile_resource_index_message))
resource_file_bld = SCons.Builder.Builder(action = MakeAction(generate_resource_file,
compile_resource_message))
top_env.Append(BUILDERS = {"ResourceIndex" : resource_index_bld})
top_env.Append(BUILDERS = {"ResourceFile" : resource_file_bld})
def setup_python_builders(top_env):
compile_python_egg_message = console.erasable('%sGenerating python egg %s$TARGET%s%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
compile_python_library_message = console.erasable('%sGenerating python library %s$TARGET%s%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
compile_python_binary_message = console.inerasable('%sGenerating python binary %s$TARGET%s%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
python_egg_bld = SCons.Builder.Builder(action = MakeAction(generate_python_egg,
compile_python_egg_message))
python_library_bld = SCons.Builder.Builder(action = MakeAction(generate_python_library,
compile_python_library_message))
python_binary_bld = SCons.Builder.Builder(action = MakeAction(generate_python_binary,
compile_python_binary_message))
top_env.Append(BUILDERS = {"PythonEgg" : python_egg_bld})
top_env.Append(BUILDERS = {"PythonLibrary" : python_library_bld})
top_env.Append(BUILDERS = {"PythonBinary" : python_binary_bld})
def setup_other_builders(top_env):
setup_yacc_builders(top_env)
setup_resource_builders(top_env)
setup_python_builders(top_env)
def setup_swig_builders(top_env, build_dir):
compile_swig_python_message = console.erasable('%sCompiling %s$SOURCE%s to python source%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
compile_swig_java_message = console.erasable('%sCompiling %s$SOURCE%s to java source%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
compile_swig_php_message = console.erasable('%sCompiling %s$SOURCE%s to php source%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
# Python
swig_py_bld = SCons.Builder.Builder(action=MakeAction(
'swig -python -threads $SWIGPYTHONFLAGS -c++ -I%s -o $TARGET $SOURCE' % (build_dir),
compile_swig_python_message))
top_env.Append(BUILDERS={"SwigPython" : swig_py_bld})
# Java
swig_java_bld = SCons.Builder.Builder(action=MakeAction(
'swig -java $SWIGJAVAFLAGS -c++ -I%s -o $TARGET $SOURCE' % (build_dir),
compile_swig_java_message))
top_env.Append(BUILDERS={'SwigJava' : swig_java_bld})
swig_php_bld = SCons.Builder.Builder(action=MakeAction(
'swig -php $SWIGPHPFLAGS -c++ -I%s -o $TARGET $SOURCE' % (build_dir),
compile_swig_php_message))
top_env.Append(BUILDERS={"SwigPhp" : swig_php_bld})
def _exec_get_version_info(cmd, cwd, dirname):
lc_all_env = os.environ
lc_all_env['LC_ALL'] = 'POSIX'
p = subprocess.Popen(cmd,
env=lc_all_env,
cwd=cwd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
stdout, stderr = p.communicate()
if p.returncode:
return None
else:
return stdout.replace('\n', '\\n\\\n')
def _get_version_info(blade_root_dir, svn_roots):
"""Gets svn root dir info. """
svn_info_map = {}
if os.path.exists("%s/.git" % blade_root_dir):
cmd = "git log -n 1"
dirname = os.path.dirname(blade_root_dir)
version_info = _exec_get_version_info(cmd, None, dirname)
if version_info:
svn_info_map[dirname] = version_info
return svn_info_map
for root_dir in svn_roots:
root_dir_realpath = os.path.realpath(root_dir)
svn_working_dir = os.path.dirname(root_dir_realpath)
svn_dir = os.path.basename(root_dir_realpath)
cmd = 'svn info %s' % svn_dir
cwd = svn_working_dir
version_info = _exec_get_version_info(cmd, cwd, root_dir)
if not version_info:
cmd = 'git ls-remote --get-url && git branch | grep "*" && git log -n 1'
cwd = root_dir_realpath
version_info = _exec_get_version_info(cmd, cwd, root_dir)
if not version_info:
console.warning('failed to get version control info in %s' % root_dir)
continue
svn_info_map[root_dir] = version_info
return svn_info_map
def generate_version_file(top_env, blade_root_dir, build_dir,
profile, gcc_version, svn_roots):
"""Generate version information files. """
svn_info_map = _get_version_info(blade_root_dir, svn_roots)
svn_info_len = len(svn_info_map)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
filename = '%s/version.cpp' % build_dir
version_cpp = open(filename, 'w')
print >>version_cpp, '/* This file was generated by blade */'
print >>version_cpp, 'extern "C" {'
print >>version_cpp, 'namespace binary_version {'
print >>version_cpp, 'extern const int kSvnInfoCount = %d;' % svn_info_len
svn_info_array = '{'
for idx in range(svn_info_len):
key_with_idx = svn_info_map.keys()[idx]
svn_info_line = '"%s"' % svn_info_map[key_with_idx]
svn_info_array += svn_info_line
if idx != (svn_info_len - 1):
svn_info_array += ','
svn_info_array += '}'
print >>version_cpp, 'extern const char* const kSvnInfo[%d] = %s;' % (
svn_info_len, svn_info_array)
print >>version_cpp, 'extern const char kBuildType[] = "%s";' % profile
print >>version_cpp, 'extern const char kBuildTime[] = "%s";' % time.asctime()
print >>version_cpp, 'extern const char kBuilderName[] = "%s";' % os.getenv('USER')
print >>version_cpp, (
'extern const char kHostName[] = "%s";' % socket.gethostname())
compiler = 'GCC %s' % gcc_version
print >>version_cpp, 'extern const char kCompiler[] = "%s";' % compiler
print >>version_cpp, '}}'
version_cpp.close()
env_version = top_env.Clone()
env_version.Replace(SHCXXCOMSTR=console.erasable(
'%sUpdating version information%s' % (
colors('cyan'), colors('end'))))
return env_version.SharedObject(filename)
More comments
# Copyright (c) 2011 Tencent Inc.
# All rights reserved.
#
# Author: Huan Yu <huanyu@tencent.com>
# Feng Chen <phongchen@tencent.com>
# Yi Wang <yiwang@tencent.com>
# Chong Peng <michaelpeng@tencent.com>
# Wenting Li <wentingli@tencent.com>
# Date: October 20, 2011
"""
This is the scons rules helper module which should be
imported by Scons script
"""
import os
import py_compile
import shutil
import signal
import socket
import stat
import string
import subprocess
import sys
import tempfile
import time
import zipfile
import glob
import SCons
import SCons.Action
import SCons.Builder
import SCons.Scanner
import SCons.Scanner.Prog
import blade_util
import console
from console import colors
# option_verbose to indicate print verbose or not
option_verbose = False
# linking tmp dir
linking_tmp_dir = ''
def generate_python_egg(target, source, env):
setup_file = ''
if not str(source[0]).endswith('setup.py'):
console.warning('setup.py not existed to generate target %s, '
'blade will generate a default one for you' %
str(target[0]))
else:
setup_file = str(source[0])
init_file = ''
source_index = 2
if not setup_file:
source_index = 1
init_file = str(source[0])
else:
init_file = str(source[1])
init_file_dir = os.path.dirname(init_file)
dep_source_list = []
for s in source[source_index:]:
dep_source_list.append(str(s))
target_file = str(target[0])
target_file_dir_list = target_file.split('/')
target_profile = target_file_dir_list[0]
target_dir = '/'.join(target_file_dir_list[0:-1])
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if setup_file:
shutil.copyfile(setup_file, os.path.join(target_dir, 'setup.py'))
else:
target_name = os.path.basename(init_file_dir)
if not target_name:
console.error_exit('invalid package for target %s' % str(target[0]))
# generate default setup.py for user
setup_str = """
#!/usr/bin/env python
# This file was generated by blade
from setuptools import find_packages, setup
setup(
name='%s',
version='0.1.0',
packages=find_packages(),
zip_safe=True
)
""" % target_name
default_setup_file = open(os.path.join(target_dir, 'setup.py'), 'w')
default_setup_file.write(setup_str)
default_setup_file.close()
package_dir = os.path.join(target_profile, init_file_dir)
if os.path.exists(package_dir):
shutil.rmtree(package_dir, ignore_errors=True)
cmd = 'cp -r %s %s' % (init_file_dir, target_dir)
p = subprocess.Popen(
cmd,
env={},
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
universal_newlines=True)
std_out, std_err = p.communicate()
if p.returncode:
console.info(std_out)
console.info(std_err)
console.error_exit('failed to copy source files from %s to %s' % (
init_file_dir, target_dir))
return p.returncode
# copy file to package_dir
for f in dep_source_list:
dep_file_basename = os.path.basename(f)
dep_file_dir = os.path.dirname(f)
sub_dir = ''
sub_dir_list = dep_file_dir.split('/')
if len(sub_dir_list) > 1:
sub_dir = '/'.join(dep_file_dir.split('/')[1:])
if sub_dir:
package_sub_dir = os.path.join(package_dir, sub_dir)
if not os.path.exists(package_sub_dir):
os.makedirs(package_sub_dir)
sub_init_file = os.path.join(package_sub_dir, '__init__.py')
if not os.path.exists(sub_init_file):
sub_f = open(sub_init_file, 'w')
sub_f.close()
shutil.copyfile(f, os.path.join(package_sub_dir, dep_file_basename))
make_egg_cmd = 'python setup.py bdist_egg'
p = subprocess.Popen(
make_egg_cmd,
env={},
cwd=target_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
universal_newlines=True)
std_out, std_err = p.communicate()
if p.returncode:
console.info(std_out)
console.info(std_err)
console.error_exit('failed to generate python egg in %s' % target_dir)
return p.returncode
return 0
def _compile_python(src, build_dir):
if src.startswith(build_dir):
pyc = src + 'c'
else:
pyc = os.path.join(build_dir, src) + 'c'
py_compile.compile(src, pyc)
return pyc
def generate_python_library(target, source, env):
target_file = open(str(target[0]), 'w')
data = dict()
data['base_dir'] = env.get('BASE_DIR', '')
build_dir = env['BUILD_DIR']
srcs = []
for s in source:
src = str(s)
_compile_python(src, build_dir)
srcs.append(src)
data['srcs'] = srcs
target_file.write(str(data))
target_file.close()
return None
def _update_init_py_dirs(arcname, dirs, dirs_with_init_py):
dir = os.path.dirname(arcname)
if os.path.basename(arcname) == '__init__.py':
dirs_with_init_py.add(dir)
while dir:
dirs.add(dir)
dir = os.path.dirname(dir)
def generate_python_binary(target, source, env):
"""The action for generate python executable file"""
target_name = str(target[0])
build_dir = env['BUILD_DIR']
target_file = zipfile.ZipFile(target_name, 'w', zipfile.ZIP_DEFLATED)
dirs = set()
dirs_with_init_py = set()
for s in source:
src = str(s)
if src.endswith('.pylib'):
libfile = open(src)
data = eval(libfile.read())
libfile.close()
base_dir = data['base_dir']
for libsrc in data['srcs']:
arcname = os.path.relpath(libsrc, base_dir)
_update_init_py_dirs(arcname, dirs, dirs_with_init_py)
target_file.write(libsrc, arcname)
else:
_compile_python(src, build_dir)
_update_init_py_dirs(src, dirs, dirs_with_init_py)
target_file.write(src)
# insert __init__.py into each dir if missing
dirs_missing_init_py = dirs - dirs_with_init_py
for dir in dirs_missing_init_py:
target_file.writestr(os.path.join(dir, '__init__.py'), '')
target_file.writestr('__init__.py', '')
target_file.close()
target_file = open(target_name, 'rb')
zip_content = target_file.read()
target_file.close()
# Insert bootstrap before zip, it is also a valid zip file.
# unzip will seek actually start until meet the zip magic number.
entry = env['ENTRY']
bootstrap = (
'#!/bin/sh\n'
'\n'
'PYTHONPATH="$0:$PYTHONPATH" exec python -m "%s" "$@"\n') % entry
target_file = open(target_name, 'wb')
target_file.write(bootstrap)
target_file.write(zip_content)
target_file.close()
os.chmod(target_name, 0775)
return None
def generate_resource_index(target, source, env):
res_source_path = str(target[0])
res_header_path = str(target[1])
if not os.path.exists(os.path.dirname(res_header_path)):
os.mkdir(os.path.dirname(res_header_path))
h = open(res_header_path, 'w')
c = open(res_source_path, 'w')
source_path = env["SOURCE_PATH"]
full_name = blade_util.regular_variable_name("%s/%s" % (source_path, env["TARGET_NAME"]))
guard_name = 'BLADE_RESOURCE_%s_H' % full_name.upper()
print >>h, '#ifndef %s\n#define %s' % (guard_name, guard_name)
print >>h, '''
// This file was automatically generated by blade
#ifdef __cplusplus
extern "C" {
#endif
#ifndef BLADE_RESOURCE_TYPE_DEFINED
#define BLADE_RESOURCE_TYPE_DEFINED
struct BladeResourceEntry {
const char* name;
const char* data;
unsigned int size;
};
#endif
'''
res_index_name = 'RESOURCE_INDEX_%s' % full_name
print >>c, '// This file was automatically generated by blade\n'
print >>c, '#include "%s"\n' % res_header_path
print >>c, 'const struct BladeResourceEntry %s[] = {' % res_index_name
for s in source:
src = str(s)
var_name = blade_util.regular_variable_name(src)
org_src = blade_util.relative_path(src, source_path)
print >>h, '// %s' % org_src
print >>h, 'extern const char RESOURCE_%s[%d];' % (var_name, s.get_size())
print >>h, 'extern const unsigned RESOURCE_%s_len;\n' % var_name
print >>c, ' { "%s", RESOURCE_%s, %s },' % (org_src, var_name, s.get_size())
print >>c, '};'
print >>c, 'const unsigned %s_len = %s;' % (res_index_name, len(source))
print >>h, '// Resource index'
print >>h, 'extern const struct BladeResourceEntry %s[];' % res_index_name
print >>h, 'extern const unsigned %s_len;' % res_index_name
print >>h, '\n#ifdef __cplusplus\n} // extern "C"\n#endif\n'
print >>h, '\n#endif // %s' % guard_name
c.close()
h.close()
return None
def generate_resource_file(target, source, env):
"""Generate resource source file in resource_library"""
src_path = str(source[0])
new_src_path = str(target[0])
cmd = ('xxd -i %s | sed -e "s/^unsigned char /const char RESOURCE_/g" '
'-e "s/^unsigned int /const unsigned int RESOURCE_/g"> %s') % (
src_path, new_src_path)
p = subprocess.Popen(
cmd,
env={},
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
universal_newlines=True)
stdout, stderr = p.communicate()
if p.returncode or stderr:
error = 'failed to generate resource file'
if stderr:
error = error + ': ' + stderr
console.error_exit(error)
return p.returncode
def _java_resource_file_target_path(path):
""" Return relative target path in target dir, see
https://maven.apache.org/guides/introduction/introduction-to-the-standard-directory-layout.html
for the rules
"""
path = str(path)
segs = [
'/src/main/resources/',
'/src/test/resources/',
'/resources/',
]
for seg in segs:
pos = path.find(seg)
if pos != -1:
return path[pos + len(seg):]
return ''
# emitter function is used by scons builder to determine target files from
# source files.
def _emit_java_resources(target, source, env):
"""Create and return lists of source resource files
and their corresponding target resource files.
"""
target[0].must_be_same(SCons.Node.FS.Dir)
classdir = target[0]
slist = []
for entry in source:
entry = entry.rentry().disambiguate()
if isinstance(entry, SCons.Node.FS.File):
slist.append(entry)
elif isinstance(entry, SCons.Node.FS.Dir):
result = SCons.Util.OrderedDict()
dirnode = entry.rdir()
def find_files(arg, dirpath, filenames):
mydir = dirnode.Dir(dirpath)
for name in filenames:
if os.path.isfile(os.path.join(str(dirpath), name)):
arg[mydir.File(name)] = True
for dirpath, dirnames, filenames in os.walk(dirnode.get_abspath()):
find_files(result, dirpath, filenames)
entry.walk(find_files, result)
slist.extend(list(result.keys()))
else:
raise SCons.Errors.UserError("Java resource must be File or Dir, not '%s'" % entry.__class__)
tlist = []
for f in slist:
target_path = _java_resource_file_target_path(f.rfile().get_abspath())
if target_path:
d = target[0]
t = d.File(target_path)
t.attributes.java_classdir = classdir
f.attributes.target_path = target_path
t.set_specific_source([f])
tlist.append(t)
else:
console.warning('java resource file "%s" does not match any '
'resource path pattern of maven standard directory '
'layout, ignored. \nsee '
'https://maven.apache.org/guides/introduction/introduction-to-the-standard-directory-layout.html' % f)
return tlist, slist
def process_java_resources(target, source, env):
"""Copy source resource file into .resources dir"""
target_dir = str(target[0].attributes.java_classdir)
for src in source:
target_path = os.path.join(target_dir, src.attributes.target_path)
shutil.copy2(str(src), target_path)
return None
def _generate_jar(target, sources, resources, env):
"""Generate a jar containing the sources and resources. """
classes_dir = target.replace('.jar', '.classes')
resources_dir = target.replace('.jar', '.resources')
cmd = []
cmd.append('%s cf %s' % (env['JAR'], target))
jar_path_set = set()
if os.path.exists(classes_dir):
for source in sources:
if not source.endswith('.class'):
continue
# Add the source from sources produced by Java builder
# no matter it's a normal class or inner class
jar_path = os.path.relpath(source, classes_dir)
if jar_path not in jar_path_set:
jar_path_set.add(jar_path)
if '$' not in source:
inner_classes = glob.glob(source[:-6] + '$*.class')
for inner_class in inner_classes:
if os.path.getmtime(inner_class) >= os.path.getmtime(source):
jar_path = os.path.relpath(inner_class, classes_dir)
if jar_path not in jar_path_set:
jar_path_set.add(jar_path)
for path in jar_path_set:
# Add quotes for file names with $
cmd.append("-C '%s' '%s'" % (classes_dir, path))
if os.path.exists(resources_dir):
for resource in resources:
cmd.append("-C '%s' '%s'" % (resources_dir,
os.path.relpath(resource, resources_dir)))
cmd = ' '.join(cmd)
global option_verbose
if option_verbose:
print cmd
p = subprocess.Popen(cmd, env=os.environ, shell=True)
p.communicate()
return p.returncode
def generate_jar(target, source, env):
target = str(target[0])
sources = []
index = 0
for src in source:
if str(src).endswith('.class'):
sources.append(str(src))
index += 1
else:
break
resources = [str(src) for src in source[index:]]
return _generate_jar(target, sources, resources, env)
_one_jar_boot_path = None
def _generate_one_jar(target,
main_class,
main_jar,
deps_jar,
one_jar_boot_path):
target_dir = os.path.dirname(target)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
target_one_jar = zipfile.ZipFile(target, 'w')
# Copy files from one-jar-boot.jar to the target jar
zip_file = zipfile.ZipFile(one_jar_boot_path, 'r')
name_list = zip_file.namelist()
for name in name_list:
if not name.lower().endswith('manifest.mf'): # Exclude manifest
target_one_jar.writestr(name, zip_file.read(name))
zip_file.close()
# Main jar and dependencies
target_one_jar.write(main_jar, os.path.join('main',
os.path.basename(main_jar)))
for dep in deps_jar:
dep_name = os.path.basename(dep)
target_one_jar.write(dep, os.path.join('lib', dep_name))
# Manifest
# Note that the manifest file must end with a new line or carriage return
target_one_jar.writestr(os.path.join('META-INF', 'MANIFEST.MF'),
'''Manifest-Version: 1.0
Main-Class: com.simontuffs.onejar.Boot
One-Jar-Main-Class: %s
''' % main_class)
target_one_jar.close()
return None
def generate_one_jar(target, source, env):
if len(source) < 2:
console.error_exit('Failed to generate java binary from %s: '
'Source should at least contain main class '
'and main jar' % ','.join(str(s) for s in source))
main_class = str(source[0])
main_jar = str(source[1])
deps_jar = []
for dep in source[2:]:
deps_jar.append(str(dep))
target = str(target[0])
# print target, main_class, main_jar, deps_jar, _one_jar_boot_path
return _generate_one_jar(target, main_class, main_jar, deps_jar,
_one_jar_boot_path)
def _generate_fat_jar(target, deps_jar):
"""Generate a fat jar containing the contents of all the jar dependencies. """
target_dir = os.path.dirname(target)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
target_fat_jar = zipfile.ZipFile(target, 'w')
manifest = os.path.join('META-INF', 'MANIFEST.MF')
first_jar = True
# Record paths written in the fat jar to avoid duplicate writing
zip_path_set = set()
for jar in deps_jar:
jar = zipfile.ZipFile(jar, 'r')
name_list = jar.namelist()
for name in name_list:
if name.upper() == manifest:
# Use the MANIFEST file of the first jar
if first_jar:
target_fat_jar.writestr(name, jar.read(name))
first_jar = False
else:
if name not in zip_path_set:
target_fat_jar.writestr(name, jar.read(name))
zip_path_set.add(name)
jar.close()
target_fat_jar.close()
return None
def generate_fat_jar(target, source, env):
target = str(target[0])
deps_jar = []
for dep in source:
deps_jar.append(str(dep))
return _generate_fat_jar(target, deps_jar)
def _generate_java_binary(target_name, onejar_path, jvm_flags, run_args):
"""generate a wrapper shell script to run jar"""
onejar_name = os.path.basename(onejar_path)
target_file = open(target_name, 'w')
target_file.write(
"""#!/bin/sh
# Auto generated wrapper shell script by blade
# *.one.jar must be in same dir
jar=`dirname "$0"`/"%s"
exec java %s -jar "$jar" %s $@
""" % (onejar_name, jvm_flags, run_args))
os.chmod(target_name, 0755)
target_file.close()
return None
def generate_java_binary(target, source, env):
"""build function to generate wrapper shell script for java binary"""
target_name = str(target[0])
onejar_path = str(source[0])
return _generate_java_binary(target_name, onejar_path, '', '')
def _get_all_test_class_names_in_jar(jar):
"""Returns a list of test class names in the jar file. """
test_class_names = []
zip_file = zipfile.ZipFile(jar, 'r')
name_list = zip_file.namelist()
for name in name_list:
basename = os.path.basename(name)
# Exclude inner class and Test.class
if (basename.endswith('Test.class') and
len(basename) > len('Test.class') and
not '$' in basename):
class_name = name.replace('/', '.')[:-6] # Remove .class suffix
test_class_names.append(class_name)
zip_file.close()
return test_class_names
def generate_java_test(target, source, env):
"""build function to generate wrapper shell script for java binary"""
target_name = str(target[0])
onejar_path = str(source[0])
test_class_names = []
for src in source[1:]:
test_class_names += _get_all_test_class_names_in_jar(str(src))
return _generate_java_binary(target_name, onejar_path, '',
' '.join(test_class_names))
def MakeAction(cmd, cmdstr):
global option_verbose
if option_verbose:
return SCons.Action.Action(cmd)
else:
return SCons.Action.Action(cmd, cmdstr)
_ERRORS = [': error:', ': fatal error:', ': undefined reference to',
': cannot find ', ': ld returned 1 exit status',
' is not defined'
]
_WARNINGS = [': warning:', ': note: ', '] Warning: ']
def error_colorize(message):
colored_message = []
for line in message.splitlines(True): # keepends
color = 'cyan'
# For clang column indicator, such as '^~~~~~'
if line.strip().startswith('^'):
color = 'green'
else:
for w in _WARNINGS:
if w in line:
color = 'yellow'
break
for w in _ERRORS:
if w in line:
color = 'red'
break
colored_message.append(console.colors(color))
colored_message.append(line)
colored_message.append(console.colors('end'))
return console.inerasable(''.join(colored_message))
def _colored_echo(stdout, stderr):
"""Echo error colored message"""
if stdout:
sys.stdout.write(error_colorize(stdout))
if stderr:
sys.stderr.write(error_colorize(stderr))
def echospawn(sh, escape, cmd, args, env):
# convert env from unicode strings
asciienv = {}
for key, value in env.iteritems():
asciienv[key] = str(value)
cmdline = ' '.join(args)
p = subprocess.Popen(
cmdline,
env=asciienv,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
shell=True,
universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
if p.returncode != -signal.SIGINT:
# Error
_colored_echo(stdout, stderr)
else:
# Only warnings
_colored_echo(stdout, stderr)
return p.returncode
def _blade_action_postfunc(closing_message):
"""To do post jobs if blade's own actions failed to build. """
console.info(closing_message)
# Remember to write the dblite incase of re-linking once fail to
# build last time. We should elaborate a way to avoid rebuilding
# after failure of our own builders or actions.
SCons.SConsign.write()
def _fast_link_helper(target, source, env, link_com):
"""fast link helper function. """
target_file = str(target[0])
prefix_str = 'blade_%s' % target_file.replace('/', '_').replace('.', '_')
fd, temporary_file = tempfile.mkstemp(suffix='xianxian',
prefix=prefix_str,
dir=linking_tmp_dir)
os.close(fd)
sources = []
for s in source:
sources.append(str(s))
link_com_str = link_com.substitute(
FL_TARGET=temporary_file,
FL_SOURCE=' '.join(sources))
p = subprocess.Popen(
link_com_str,
env=os.environ,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
universal_newlines=True)
std_out, std_err = p.communicate()
if std_out:
print std_out
if std_err:
print std_err
if p.returncode == 0:
shutil.move(temporary_file, target_file)
if not os.path.exists(target_file):
console.warning('failed to genreate %s in link on tmpfs mode' % target_file)
else:
_blade_action_postfunc('failed while fast linking')
return p.returncode
def fast_link_sharelib_action(target, source, env):
# $SHLINK -o $TARGET $SHLINKFLAGS $__RPATH $SOURCES $_LIBDIRFLAGS $_LIBFLAGS
link_com = string.Template('%s -o $FL_TARGET %s %s $FL_SOURCE %s %s' % (
env.subst('$SHLINK'),
env.subst('$SHLINKFLAGS'),
env.subst('$__RPATH'),
env.subst('$_LIBDIRFLAGS'),
env.subst('$_LIBFLAGS')))
return _fast_link_helper(target, source, env, link_com)
def fast_link_prog_action(target, source, env):
# $LINK -o $TARGET $LINKFLAGS $__RPATH $SOURCES $_LIBDIRFLAGS $_LIBFLAGS
link_com = string.Template('%s -o $FL_TARGET %s %s $FL_SOURCE %s %s' % (
env.subst('$LINK'),
env.subst('$LINKFLAGS'),
env.subst('$__RPATH'),
env.subst('$_LIBDIRFLAGS'),
env.subst('$_LIBFLAGS')))
return _fast_link_helper(target, source, env, link_com)
def setup_fast_link_prog_builder(top_env):
"""
This is the function to setup blade fast link
program builder. It will overwrite the program
builder of top level env if user specifies an
option to apply fast link method that they want
to place the blade output to distributed file
system to advoid the random read write of linker
largely degrades building performance.
"""
new_link_action = MakeAction(fast_link_prog_action, '$LINKCOMSTR')
program = SCons.Builder.Builder(action=new_link_action,
emitter='$PROGEMITTER',
prefix='$PROGPREFIX',
suffix='$PROGSUFFIX',
src_suffix='$OBJSUFFIX',
src_builder='Object',
target_scanner=SCons.Scanner.Prog.ProgramScanner())
top_env['BUILDERS']['Program'] = program
def setup_fast_link_sharelib_builder(top_env):
"""
This is the function to setup blade fast link
sharelib builder. It will overwrite the sharelib
builder of top level env if user specifies an
option to apply fast link method that they want
to place the blade output to distributed file
system to advoid the random read write of linker
largely degrades building performance.
"""
new_link_actions = []
new_link_actions.append(SCons.Defaults.SharedCheck)
new_link_actions.append(MakeAction(fast_link_sharelib_action, '$SHLINKCOMSTR'))
sharedlib = SCons.Builder.Builder(action=new_link_actions,
emitter='$SHLIBEMITTER',
prefix='$SHLIBPREFIX',
suffix='$SHLIBSUFFIX',
target_scanner=SCons.Scanner.Prog.ProgramScanner(),
src_suffix='$SHOBJSUFFIX',
src_builder='SharedObject')
top_env['BUILDERS']['SharedLibrary'] = sharedlib
def setup_fast_link_builders(top_env):
"""Creates fast link builders - Program and SharedLibrary. """
# Check requirement
acquire_temp_place = "df | grep tmpfs | awk '{print $5, $6}'"
p = subprocess.Popen(
acquire_temp_place,
env=os.environ,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
universal_newlines=True)
stdout, stderr = p.communicate()
# Do not try to overwrite builder with error
if p.returncode:
console.warning('you have link on tmp enabled, but it is not fullfilled to make it.')
return
# No tmpfs to do fastlink, will not overwrite the builder
if not stdout:
console.warning('you have link on tmp enabled, but there is no tmpfs to make it.')
return
# Use the first one
global linking_tmp_dir
usage, linking_tmp_dir = tuple(stdout.splitlines(False)[0].split())
# Do not try to do that if there is no memory space left
usage = int(usage.replace('%', ''))
if usage > 90:
console.warning('you have link on tmp enabled, '
'but there is not enough space on %s to make it.' %
linking_tmp_dir)
return
console.info('building in link on tmpfs mode')
setup_fast_link_sharelib_builder(top_env)
setup_fast_link_prog_builder(top_env)
def make_top_env(build_dir):
"""Make the top level scons envrionment object"""
os.environ['LC_ALL'] = 'C'
top_env = SCons.Environment.Environment(ENV=os.environ)
# Optimization options, see http://www.scons.org/wiki/GoFastButton
top_env.Decider('MD5-timestamp')
top_env.SetOption('implicit_cache', 1)
top_env.SetOption('max_drift', 1)
top_env.VariantDir(build_dir, '.', duplicate=0)
return top_env
def get_compile_source_message():
return console.erasable('%sCompiling %s$SOURCE%s%s' % (
colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
def get_link_program_message():
return console.inerasable('%sLinking Program %s$TARGET%s%s' % (
colors('green'), colors('purple'), colors('green'), colors('end')))
def setup_compliation_verbose(top_env, color_enabled, verbose):
"""Generates color and verbose message. """
console.color_enabled = color_enabled
if not verbose:
top_env["SPAWN"] = echospawn
compile_source_message = get_compile_source_message()
link_program_message = get_link_program_message()
assembling_source_message = console.erasable('%sAssembling %s$SOURCE%s%s' % (
colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
link_library_message = console.inerasable('%sCreating Static Library %s$TARGET%s%s' % (
colors('green'), colors('purple'), colors('green'), colors('end')))
ranlib_library_message = console.inerasable('%sRanlib Library %s$TARGET%s%s' % (
colors('green'), colors('purple'), colors('green'), colors('end')))
link_shared_library_message = console.inerasable('%sLinking Shared Library %s$TARGET%s%s' % (
colors('green'), colors('purple'), colors('green'), colors('end')))
jar_message = console.inerasable('%sCreating Jar %s$TARGET%s%s' % (
colors('green'), colors('purple'), colors('green'), colors('end')))
if not verbose:
top_env.Append(
CXXCOMSTR = compile_source_message,
CCCOMSTR = compile_source_message,
ASCOMSTR = assembling_source_message,
SHCCCOMSTR = compile_source_message,
SHCXXCOMSTR = compile_source_message,
ARCOMSTR = link_library_message,
RANLIBCOMSTR = ranlib_library_message,
SHLINKCOMSTR = link_shared_library_message,
LINKCOMSTR = link_program_message,
JAVACCOMSTR = compile_source_message,
JARCOMSTR = jar_message,
LEXCOMSTR = compile_source_message)
def setup_proto_builders(top_env, build_dir, protoc_bin, protobuf_path,
protobuf_incs_str,
protoc_php_plugin, protobuf_php_path):
compile_proto_cc_message = console.erasable('%sCompiling %s$SOURCE%s to cc source%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
compile_proto_java_message = console.erasable('%sCompiling %s$SOURCE%s to java source%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
compile_proto_php_message = console.erasable('%sCompiling %s$SOURCE%s to php source%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
compile_proto_python_message = console.erasable('%sCompiling %s$SOURCE%s to python source%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
proto_bld = SCons.Builder.Builder(action = MakeAction(
"%s --proto_path=. -I. %s -I=`dirname $SOURCE` --cpp_out=%s $SOURCE" % (
protoc_bin, protobuf_incs_str, build_dir),
compile_proto_cc_message))
top_env.Append(BUILDERS = {"Proto" : proto_bld})
proto_java_bld = SCons.Builder.Builder(action = MakeAction(
"%s --proto_path=. --proto_path=%s --java_out=%s/`dirname $SOURCE` $SOURCE" % (
protoc_bin, protobuf_path, build_dir),
compile_proto_java_message))
top_env.Append(BUILDERS = {"ProtoJava" : proto_java_bld})
proto_php_bld = SCons.Builder.Builder(action = MakeAction(
"%s --proto_path=. --plugin=protoc-gen-php=%s -I. %s -I%s -I=`dirname $SOURCE` --php_out=%s/`dirname $SOURCE` $SOURCE" % (
protoc_bin, protoc_php_plugin, protobuf_incs_str, protobuf_php_path, build_dir),
compile_proto_php_message))
top_env.Append(BUILDERS = {"ProtoPhp" : proto_php_bld})
proto_python_bld = SCons.Builder.Builder(action = MakeAction(
"%s --proto_path=. -I. %s -I=`dirname $SOURCE` --python_out=%s $SOURCE" % (
protoc_bin, protobuf_incs_str, build_dir),
compile_proto_python_message))
top_env.Append(BUILDERS = {"ProtoPython" : proto_python_bld})
def setup_thrift_builders(top_env, build_dir, thrift_bin, thrift_incs_str):
compile_thrift_cc_message = console.erasable('%sCompiling %s$SOURCE%s to cc source%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
compile_thrift_java_message = console.erasable('%sCompiling %s$SOURCE%s to java source%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
compile_thrift_python_message = console.erasable( '%sCompiling %s$SOURCE%s to python source%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
thrift_bld = SCons.Builder.Builder(action = MakeAction(
'%s --gen cpp:include_prefix,pure_enums -I . %s -I `dirname $SOURCE`'
' -out %s/`dirname $SOURCE` $SOURCE' % (
thrift_bin, thrift_incs_str, build_dir),
compile_thrift_cc_message))
top_env.Append(BUILDERS = {"Thrift" : thrift_bld})
thrift_java_bld = SCons.Builder.Builder(action = MakeAction(
"%s --gen java -I . %s -I `dirname $SOURCE` -out %s/`dirname $SOURCE` $SOURCE" % (
thrift_bin, thrift_incs_str, build_dir),
compile_thrift_java_message))
top_env.Append(BUILDERS = {"ThriftJava" : thrift_java_bld})
thrift_python_bld = SCons.Builder.Builder(action = MakeAction(
"%s --gen py -I . %s -I `dirname $SOURCE` -out %s/`dirname $SOURCE` $SOURCE" % (
thrift_bin, thrift_incs_str, build_dir),
compile_thrift_python_message))
top_env.Append(BUILDERS = {"ThriftPython" : thrift_python_bld})
def setup_fbthrift_builders(top_env, build_dir, fbthrift1_bin, fbthrift2_bin, fbthrift_incs_str):
compile_fbthrift_cpp_message = console.erasable('%sCompiling %s$SOURCE%s to cpp source%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
compile_fbthrift_cpp2_message = console.erasable('%sCompiling %s$SOURCE%s to cpp2 source%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
fbthrift1_bld = SCons.Builder.Builder(action = MakeAction(
'%s --gen cpp:templates,cob_style,include_prefix,enum_strict -I . %s -I `dirname $SOURCE`'
' -o %s/`dirname $SOURCE` $SOURCE' % (
fbthrift1_bin, fbthrift_incs_str, build_dir),
compile_fbthrift_cpp_message))
top_env.Append(BUILDERS = {"FBThrift1" : fbthrift1_bld})
fbthrift2_bld = SCons.Builder.Builder(action = MakeAction(
'%s --gen=cpp2:cob_style,include_prefix,future -I . %s -I `dirname $SOURCE` '
'-o %s/`dirname $SOURCE` $SOURCE' % (
fbthrift2_bin, fbthrift_incs_str, build_dir),
compile_fbthrift_cpp2_message))
top_env.Append(BUILDERS = {"FBThrift2" : fbthrift2_bld})
def setup_cuda_builders(top_env, nvcc_str, cuda_incs_str):
nvcc_object_bld = SCons.Builder.Builder(action = MakeAction(
"%s -ccbin g++ %s $NVCCFLAGS -o $TARGET -c $SOURCE" % (nvcc_str, cuda_incs_str),
get_compile_source_message()))
top_env.Append(BUILDERS = {"NvccObject" : nvcc_object_bld})
nvcc_binary_bld = SCons.Builder.Builder(action = MakeAction(
"%s %s $NVCCFLAGS -o $TARGET" % (nvcc_str, cuda_incs_str),
get_link_program_message()))
top_env.Append(NVCC=nvcc_str)
top_env.Append(BUILDERS = {"NvccBinary" : nvcc_binary_bld})
def setup_java_builders(top_env, java_home, one_jar_boot_path):
if java_home:
top_env.Replace(JAVAC=os.path.join(java_home, 'bin/javac'))
top_env.Replace(JAR=os.path.join(java_home, 'bin/jar'))
blade_jar_bld = SCons.Builder.Builder(action = MakeAction(
'jar cf $TARGET -C `dirname $SOURCE` .',
'$JARCOMSTR'))
top_env.Append(BUILDERS = {"BladeJar" : blade_jar_bld})
# Scons has many bugs with generated sources file,
# such as can't obtain class file path correctly.
# so just build all sources to jar directly
generated_jar_bld = SCons.Builder.Builder(action = MakeAction(
'rm -fr ${TARGET}.classes && mkdir -p ${TARGET}.classes && '
'$JAVAC $JAVACFLAGS $_JAVABOOTCLASSPATH $_JAVACLASSPATH -d ${TARGET}.classes $SOURCES && '
'$JAR $JARFLAGS ${TARGET} -C ${TARGET}.classes . && '
'rm -fr ${TARGET}.classes',
'$JARCOMSTR'))
top_env.Append(BUILDERS = {"GeneratedJavaJar" : generated_jar_bld})
# Scons Java builder has bugs on detecting generated .class files
# produced by javac: anonymous inner classes are missing in the results
# of Java builder no matter which JAVAVERSION(1.5, 1.6) is specified
# See: http://scons.tigris.org/issues/show_bug.cgi?id=1594
# http://scons.tigris.org/issues/show_bug.cgi?id=2742
blade_java_jar_bld = SCons.Builder.Builder(action = MakeAction(
generate_jar, '$JARCOMSTR'))
top_env.Append(BUILDERS = {"BladeJavaJar" : blade_java_jar_bld})
resource_message = console.erasable('%sProcess jar resource %s$SOURCES%s%s' % ( \
colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
java_resource_bld = SCons.Builder.Builder(
action = MakeAction(
process_java_resources, resource_message),
emitter = _emit_java_resources,
target_factory = SCons.Node.FS.Entry,
source_factory = SCons.Node.FS.Entry)
top_env.Append(BUILDERS = {"JavaResource" : java_resource_bld})
global _one_jar_boot_path
_one_jar_boot_path = one_jar_boot_path
one_java_message = console.erasable('%sGenerating one jar %s$TARGET%s%s' % ( \
colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
one_jar_bld = SCons.Builder.Builder(action = MakeAction(generate_one_jar,
one_java_message))
top_env.Append(BUILDERS = {'OneJar' : one_jar_bld})
fat_java_message = console.inerasable('%sCreating fat jar %s$TARGET%s%s' % ( \
colors('green'), colors('purple'), colors('green'), colors('end')))
fat_jar_bld = SCons.Builder.Builder(action = MakeAction(generate_fat_jar,
fat_java_message))
top_env.Append(BUILDERS = {'FatJar' : fat_jar_bld})
java_binary_message = console.inerasable('%sGenerating java binary %s$TARGET%s%s' % \
(colors('green'), colors('purple'), colors('green'), colors('end')))
java_binary_bld = SCons.Builder.Builder(action = MakeAction(
generate_java_binary, java_binary_message))
top_env.Append(BUILDERS = {"JavaBinary" : java_binary_bld})
java_test_message = console.inerasable('%sGenerating java test %s$TARGET%s%s' % \
(colors('green'), colors('purple'), colors('green'), colors('end')))
java_test_bld = SCons.Builder.Builder(action = MakeAction(
generate_java_test, java_test_message))
top_env.Append(BUILDERS = {"JavaTest" : java_test_bld})
def setup_yacc_builders(top_env):
compile_yacc_message = console.erasable('%sYacc %s$SOURCE%s to $TARGET%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
yacc_bld = SCons.Builder.Builder(action = MakeAction(
'bison $YACCFLAGS -d -o $TARGET $SOURCE',
compile_yacc_message))
top_env.Append(BUILDERS = {"Yacc" : yacc_bld})
def setup_resource_builders(top_env):
compile_resource_index_message = console.erasable('%sGenerating resource index for %s$SOURCE_PATH/$TARGET_NAME%s%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
compile_resource_message = console.erasable('%sCompiling %s$SOURCE%s as resource file%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
resource_index_bld = SCons.Builder.Builder(action = MakeAction(generate_resource_index,
compile_resource_index_message))
resource_file_bld = SCons.Builder.Builder(action = MakeAction(generate_resource_file,
compile_resource_message))
top_env.Append(BUILDERS = {"ResourceIndex" : resource_index_bld})
top_env.Append(BUILDERS = {"ResourceFile" : resource_file_bld})
def setup_python_builders(top_env):
compile_python_egg_message = console.erasable('%sGenerating python egg %s$TARGET%s%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
compile_python_library_message = console.erasable('%sGenerating python library %s$TARGET%s%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
compile_python_binary_message = console.inerasable('%sGenerating python binary %s$TARGET%s%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
python_egg_bld = SCons.Builder.Builder(action = MakeAction(generate_python_egg,
compile_python_egg_message))
python_library_bld = SCons.Builder.Builder(action = MakeAction(generate_python_library,
compile_python_library_message))
python_binary_bld = SCons.Builder.Builder(action = MakeAction(generate_python_binary,
compile_python_binary_message))
top_env.Append(BUILDERS = {"PythonEgg" : python_egg_bld})
top_env.Append(BUILDERS = {"PythonLibrary" : python_library_bld})
top_env.Append(BUILDERS = {"PythonBinary" : python_binary_bld})
def setup_other_builders(top_env):
setup_yacc_builders(top_env)
setup_resource_builders(top_env)
setup_python_builders(top_env)
def setup_swig_builders(top_env, build_dir):
compile_swig_python_message = console.erasable('%sCompiling %s$SOURCE%s to python source%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
compile_swig_java_message = console.erasable('%sCompiling %s$SOURCE%s to java source%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
compile_swig_php_message = console.erasable('%sCompiling %s$SOURCE%s to php source%s' % \
(colors('cyan'), colors('purple'), colors('cyan'), colors('end')))
# Python
swig_py_bld = SCons.Builder.Builder(action=MakeAction(
'swig -python -threads $SWIGPYTHONFLAGS -c++ -I%s -o $TARGET $SOURCE' % (build_dir),
compile_swig_python_message))
top_env.Append(BUILDERS={"SwigPython" : swig_py_bld})
# Java
swig_java_bld = SCons.Builder.Builder(action=MakeAction(
'swig -java $SWIGJAVAFLAGS -c++ -I%s -o $TARGET $SOURCE' % (build_dir),
compile_swig_java_message))
top_env.Append(BUILDERS={'SwigJava' : swig_java_bld})
swig_php_bld = SCons.Builder.Builder(action=MakeAction(
'swig -php $SWIGPHPFLAGS -c++ -I%s -o $TARGET $SOURCE' % (build_dir),
compile_swig_php_message))
top_env.Append(BUILDERS={"SwigPhp" : swig_php_bld})
def _exec_get_version_info(cmd, cwd, dirname):
lc_all_env = os.environ
lc_all_env['LC_ALL'] = 'POSIX'
p = subprocess.Popen(cmd,
env=lc_all_env,
cwd=cwd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
stdout, stderr = p.communicate()
if p.returncode:
return None
else:
return stdout.replace('\n', '\\n\\\n')
def _get_version_info(blade_root_dir, svn_roots):
"""Gets svn root dir info. """
svn_info_map = {}
if os.path.exists("%s/.git" % blade_root_dir):
cmd = "git log -n 1"
dirname = os.path.dirname(blade_root_dir)
version_info = _exec_get_version_info(cmd, None, dirname)
if version_info:
svn_info_map[dirname] = version_info
return svn_info_map
for root_dir in svn_roots:
root_dir_realpath = os.path.realpath(root_dir)
svn_working_dir = os.path.dirname(root_dir_realpath)
svn_dir = os.path.basename(root_dir_realpath)
cmd = 'svn info %s' % svn_dir
cwd = svn_working_dir
version_info = _exec_get_version_info(cmd, cwd, root_dir)
if not version_info:
cmd = 'git ls-remote --get-url && git branch | grep "*" && git log -n 1'
cwd = root_dir_realpath
version_info = _exec_get_version_info(cmd, cwd, root_dir)
if not version_info:
console.warning('failed to get version control info in %s' % root_dir)
continue
svn_info_map[root_dir] = version_info
return svn_info_map
def generate_version_file(top_env, blade_root_dir, build_dir,
profile, gcc_version, svn_roots):
"""Generate version information files. """
svn_info_map = _get_version_info(blade_root_dir, svn_roots)
svn_info_len = len(svn_info_map)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
filename = '%s/version.cpp' % build_dir
version_cpp = open(filename, 'w')
print >>version_cpp, '/* This file was generated by blade */'
print >>version_cpp, 'extern "C" {'
print >>version_cpp, 'namespace binary_version {'
print >>version_cpp, 'extern const int kSvnInfoCount = %d;' % svn_info_len
svn_info_array = '{'
for idx in range(svn_info_len):
key_with_idx = svn_info_map.keys()[idx]
svn_info_line = '"%s"' % svn_info_map[key_with_idx]
svn_info_array += svn_info_line
if idx != (svn_info_len - 1):
svn_info_array += ','
svn_info_array += '}'
print >>version_cpp, 'extern const char* const kSvnInfo[%d] = %s;' % (
svn_info_len, svn_info_array)
print >>version_cpp, 'extern const char kBuildType[] = "%s";' % profile
print >>version_cpp, 'extern const char kBuildTime[] = "%s";' % time.asctime()
print >>version_cpp, 'extern const char kBuilderName[] = "%s";' % os.getenv('USER')
print >>version_cpp, (
'extern const char kHostName[] = "%s";' % socket.gethostname())
compiler = 'GCC %s' % gcc_version
print >>version_cpp, 'extern const char kCompiler[] = "%s";' % compiler
print >>version_cpp, '}}'
version_cpp.close()
env_version = top_env.Clone()
env_version.Replace(SHCXXCOMSTR=console.erasable(
'%sUpdating version information%s' % (
colors('cyan'), colors('end'))))
return env_version.SharedObject(filename)
|
#!/usr/bin/env python
######################################################
#
# howdoi - instant coding answers via the command line
# written by Benjamin Gleitzman (gleitz@mit.edu)
# inspired by Rich Jones (rich@anomos.info)
#
######################################################
from __future__ import print_function
import gc
gc.disable() # disable right at the start, we don't need it
import argparse
import os
import appdirs
import re
from cachelib import FileSystemCache, NullCache
import requests
import sys
from . import __version__
from pygments import highlight
from pygments.lexers import guess_lexer, get_lexer_by_name
from pygments.formatters.terminal import TerminalFormatter
from pygments.util import ClassNotFound
from pyquery import PyQuery as pq
from requests.exceptions import ConnectionError
from requests.exceptions import SSLError
# Handle imports for Python 2 and 3
if sys.version < '3':
import codecs
from urllib import quote as url_quote
from urllib import getproxies
# Handling Unicode: http://stackoverflow.com/a/6633040/305414
def u(x):
return codecs.unicode_escape_decode(x)[0]
else:
from urllib.request import getproxies
from urllib.parse import quote as url_quote
def u(x):
return x
# rudimentary standardized 3-level log output
_print_err = lambda x: print("[ERROR] " + x)
_print_ok = print
_print_dbg = lambda x: print("[DEBUG] " + x)
if os.getenv('HOWDOI_DISABLE_SSL'): # Set http instead of https
SCHEME = 'http://'
VERIFY_SSL_CERTIFICATE = False
else:
SCHEME = 'https://'
VERIFY_SSL_CERTIFICATE = True
URL = os.getenv('HOWDOI_URL') or 'stackoverflow.com'
USER_AGENTS = ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:11.0) Gecko/20100101 Firefox/11.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:22.0) Gecko/20100 101 Firefox/22.0',
'Mozilla/5.0 (Windows NT 6.1; rv:11.0) Gecko/20100101 Firefox/11.0',
('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_4) AppleWebKit/536.5 (KHTML, like Gecko) '
'Chrome/19.0.1084.46 Safari/536.5'),
('Mozilla/5.0 (Windows; Windows NT 6.1) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.46'
'Safari/536.5'), )
SEARCH_URLS = {
'bing': SCHEME + 'www.bing.com/search?q=site:{0}%20{1}',
'google': SCHEME + 'www.google.com/search?q=site:{0}%20{1}'
}
BLOCK_INDICATORS = (
'form id="captcha-form"',
'This page appears when Google automatically detects requests coming from your computer network which appear to be in violation of the <a href="//www.google.com/policies/terms/">Terms of Service'
)
STAR_HEADER = u('\u2605')
ANSWER_HEADER = u('{2} Answer from {0} {2}\n{1}')
NO_ANSWER_MSG = '< no answer given >'
CACHE_EMPTY_VAL = "NULL"
CACHE_DIR = appdirs.user_cache_dir('howdoi')
CACHE_ENTRY_MAX = 128
if os.getenv('HOWDOI_DISABLE_CACHE'):
cache = NullCache() # works like an always empty cache, cleaner than 'if cache:' everywhere
else:
cache = FileSystemCache(CACHE_DIR, CACHE_ENTRY_MAX, default_timeout=0)
howdoi_session = requests.session()
class BlockError(Exception):
pass
def _random_int(width):
bres = os.urandom(width)
if sys.version < '3':
ires = int(bres.encode('hex'), 16)
else:
ires = int.from_bytes(bres, 'little')
return ires
def _random_choice(seq):
return seq[_random_int(1) % len(seq)]
def get_proxies():
proxies = getproxies()
filtered_proxies = {}
for key, value in proxies.items():
if key.startswith('http'):
if not value.startswith('http'):
filtered_proxies[key] = 'http://%s' % value
else:
filtered_proxies[key] = value
return filtered_proxies
def _get_result(url):
try:
return howdoi_session.get(url, headers={'User-Agent': _random_choice(USER_AGENTS)},
proxies=get_proxies(),
verify=VERIFY_SSL_CERTIFICATE).text
except requests.exceptions.SSLError as e:
_print_err('Encountered an SSL Error. Try using HTTP instead of '
'HTTPS by setting the environment variable "HOWDOI_DISABLE_SSL".\n')
raise e
def _add_links_to_text(element):
hyperlinks = element.find('a')
for hyperlink in hyperlinks:
pquery_object = pq(hyperlink)
href = hyperlink.attrib['href']
copy = pquery_object.text()
if (copy == href):
replacement = copy
else:
replacement = "[{0}]({1})".format(copy, href)
pquery_object.replace_with(replacement)
def get_text(element):
''' return inner text in pyquery element '''
_add_links_to_text(element)
try:
return element.text(squash_space=False)
except TypeError:
return element.text()
def _extract_links_from_bing(html):
html.remove_namespaces()
return [a.attrib['href'] for a in html('.b_algo')('h2')('a')]
def _extract_links_from_google(html):
return [a.attrib['href'] for a in html('.l')] or \
[a.attrib['href'] for a in html('.r')('a')]
def _extract_links(html, search_engine):
if search_engine == 'bing':
return _extract_links_from_bing(html)
return _extract_links_from_google(html)
def _get_search_url(search_engine):
return SEARCH_URLS.get(search_engine, SEARCH_URLS['google'])
def _detect_block(page):
for indicator in BLOCK_INDICATORS:
if page.find(indicator) != -1:
return True
return False
def _get_links(query):
search_engine = os.getenv('HOWDOI_SEARCH_ENGINE', 'google')
search_url = _get_search_url(search_engine)
result = _get_result(search_url.format(URL, url_quote(query)))
if _detect_block(result):
_print_err('Unable to find an answer because the search engine temporarily blocked the request. '
'Please wait a few minutes or select a different search engine.')
raise BlockError("Temporary block by search engine")
html = pq(result)
return _extract_links(html, search_engine)
def get_link_at_pos(links, position):
if not links:
return False
if len(links) >= position:
link = links[position - 1]
else:
link = links[-1]
return link
def _format_output(code, args):
if not args['color']:
return code
lexer = None
# try to find a lexer using the StackOverflow tags
# or the query arguments
for keyword in args['query'].split() + args['tags']:
try:
lexer = get_lexer_by_name(keyword)
break
except ClassNotFound:
pass
# no lexer found above, use the guesser
if not lexer:
try:
lexer = guess_lexer(code)
except ClassNotFound:
return code
return highlight(code,
lexer,
TerminalFormatter(bg='dark'))
def _is_question(link):
return re.search(r'questions/\d+/', link)
def _get_questions(links):
return [link for link in links if _is_question(link)]
def _get_answer(args, links):
link = get_link_at_pos(links, args['pos'])
if not link:
return False
if args.get('link'):
return link
cache_key = link
page = cache.get(link)
if not page:
page = _get_result(link + '?answertab=votes')
cache.set(cache_key, page)
html = pq(page)
first_answer = html('.answer').eq(0)
instructions = first_answer.find('pre') or first_answer.find('code')
args['tags'] = [t.text for t in html('.post-tag')]
if not instructions and not args['all']:
text = get_text(first_answer.find('.post-text').eq(0))
elif args['all']:
texts = []
for html_tag in first_answer.items('.post-text > *'):
current_text = get_text(html_tag)
if current_text:
if html_tag[0].tag in ['pre', 'code']:
texts.append(_format_output(current_text, args))
else:
texts.append(current_text)
text = '\n'.join(texts)
else:
text = _format_output(get_text(instructions.eq(0)), args)
if text is None:
text = NO_ANSWER_MSG
text = text.strip()
return text
def _get_links_with_cache(query):
cache_key = query + "-links"
res = cache.get(cache_key)
if res:
if res == CACHE_EMPTY_VAL:
res = False
return res
links = _get_links(query)
if not links:
cache.set(cache_key, CACHE_EMPTY_VAL)
question_links = _get_questions(links)
cache.set(cache_key, question_links or CACHE_EMPTY_VAL)
return question_links
def _get_instructions(args):
question_links = _get_links_with_cache(args['query'])
if not question_links:
return False
only_hyperlinks = args.get('link')
star_headers = (args['num_answers'] > 1 or args['all'])
answers = []
initial_position = args['pos']
spliter_length = 80
answer_spliter = '\n' + '=' * spliter_length + '\n\n'
for answer_number in range(args['num_answers']):
current_position = answer_number + initial_position
args['pos'] = current_position
link = get_link_at_pos(question_links, current_position)
answer = _get_answer(args, question_links)
if not answer:
continue
if not only_hyperlinks:
answer = format_answer(link, answer, star_headers)
answer += '\n'
answers.append(answer)
return answer_spliter.join(answers)
def format_answer(link, answer, star_headers):
if star_headers:
return ANSWER_HEADER.format(link, answer, STAR_HEADER)
return answer
def _clear_cache():
global cache
if not cache:
cache = FileSystemCache(CACHE_DIR, CACHE_ENTRY_MAX, 0)
return cache.clear()
def howdoi(args):
args['query'] = ' '.join(args['query']).replace('?', '')
cache_key = str(args)
res = cache.get(cache_key)
if res:
return res
try:
res = _get_instructions(args)
if not res:
res = 'Sorry, couldn\'t find any help with that topic\n'
cache.set(cache_key, res)
return res
except (ConnectionError, SSLError):
return 'Failed to establish network connection\n'
def get_parser():
parser = argparse.ArgumentParser(description='instant coding answers via the command line')
parser.add_argument('query', metavar='QUERY', type=str, nargs='*',
help='the question to answer')
parser.add_argument('-p', '--pos', help='select answer in specified position (default: 1)', default=1, type=int)
parser.add_argument('-a', '--all', help='display the full text of the answer',
action='store_true')
parser.add_argument('-l', '--link', help='display only the answer link',
action='store_true')
parser.add_argument('-c', '--color', help='enable colorized output',
action='store_true')
parser.add_argument('-n', '--num-answers', help='number of answers to return', default=1, type=int)
parser.add_argument('-C', '--clear-cache', help='clear the cache',
action='store_true')
parser.add_argument('-v', '--version', help='displays the current version of howdoi',
action='store_true')
return parser
def command_line_runner():
parser = get_parser()
args = vars(parser.parse_args())
if args['version']:
_print_ok(__version__)
return
if args['clear_cache']:
if _clear_cache():
_print_ok('Cache cleared successfully')
else:
_print_err('Clearing cache failed')
return
if not args['query']:
parser.print_help()
return
if os.getenv('HOWDOI_COLORIZE'):
args['color'] = True
utf8_result = howdoi(args).encode('utf-8', 'ignore')
if sys.version < '3':
print(utf8_result)
else:
# Write UTF-8 to stdout: https://stackoverflow.com/a/3603160
sys.stdout.buffer.write(utf8_result)
# close the session to release connection
howdoi_session.close()
if __name__ == '__main__':
command_line_runner()
exception cosmetics
#!/usr/bin/env python
######################################################
#
# howdoi - instant coding answers via the command line
# written by Benjamin Gleitzman (gleitz@mit.edu)
# inspired by Rich Jones (rich@anomos.info)
#
######################################################
from __future__ import print_function
import gc
gc.disable() # disable right at the start, we don't need it
import argparse
import os
import appdirs
import re
from cachelib import FileSystemCache, NullCache
import requests
import sys
from . import __version__
from pygments import highlight
from pygments.lexers import guess_lexer, get_lexer_by_name
from pygments.formatters.terminal import TerminalFormatter
from pygments.util import ClassNotFound
from pyquery import PyQuery as pq
from requests.exceptions import ConnectionError
from requests.exceptions import SSLError
# Handle imports for Python 2 and 3
if sys.version < '3':
import codecs
from urllib import quote as url_quote
from urllib import getproxies
# Handling Unicode: http://stackoverflow.com/a/6633040/305414
def u(x):
return codecs.unicode_escape_decode(x)[0]
else:
from urllib.request import getproxies
from urllib.parse import quote as url_quote
def u(x):
return x
# rudimentary standardized 3-level log output
_print_err = lambda x: print("[ERROR] " + x)
_print_ok = print
_print_dbg = lambda x: print("[DEBUG] " + x)
if os.getenv('HOWDOI_DISABLE_SSL'): # Set http instead of https
SCHEME = 'http://'
VERIFY_SSL_CERTIFICATE = False
else:
SCHEME = 'https://'
VERIFY_SSL_CERTIFICATE = True
URL = os.getenv('HOWDOI_URL') or 'stackoverflow.com'
USER_AGENTS = ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:11.0) Gecko/20100101 Firefox/11.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:22.0) Gecko/20100 101 Firefox/22.0',
'Mozilla/5.0 (Windows NT 6.1; rv:11.0) Gecko/20100101 Firefox/11.0',
('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_4) AppleWebKit/536.5 (KHTML, like Gecko) '
'Chrome/19.0.1084.46 Safari/536.5'),
('Mozilla/5.0 (Windows; Windows NT 6.1) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.46'
'Safari/536.5'), )
SEARCH_URLS = {
'bing': SCHEME + 'www.bing.com/search?q=site:{0}%20{1}',
'google': SCHEME + 'www.google.com/search?q=site:{0}%20{1}'
}
BLOCK_INDICATORS = (
'form id="captcha-form"',
'This page appears when Google automatically detects requests coming from your computer network which appear to be in violation of the <a href="//www.google.com/policies/terms/">Terms of Service'
)
STAR_HEADER = u('\u2605')
ANSWER_HEADER = u('{2} Answer from {0} {2}\n{1}')
NO_ANSWER_MSG = '< no answer given >'
CACHE_EMPTY_VAL = "NULL"
CACHE_DIR = appdirs.user_cache_dir('howdoi')
CACHE_ENTRY_MAX = 128
if os.getenv('HOWDOI_DISABLE_CACHE'):
cache = NullCache() # works like an always empty cache, cleaner than 'if cache:' everywhere
else:
cache = FileSystemCache(CACHE_DIR, CACHE_ENTRY_MAX, default_timeout=0)
howdoi_session = requests.session()
class BlockError(RuntimeError):
pass
def _random_int(width):
bres = os.urandom(width)
if sys.version < '3':
ires = int(bres.encode('hex'), 16)
else:
ires = int.from_bytes(bres, 'little')
return ires
def _random_choice(seq):
return seq[_random_int(1) % len(seq)]
def get_proxies():
proxies = getproxies()
filtered_proxies = {}
for key, value in proxies.items():
if key.startswith('http'):
if not value.startswith('http'):
filtered_proxies[key] = 'http://%s' % value
else:
filtered_proxies[key] = value
return filtered_proxies
def _get_result(url):
try:
return howdoi_session.get(url, headers={'User-Agent': _random_choice(USER_AGENTS)},
proxies=get_proxies(),
verify=VERIFY_SSL_CERTIFICATE).text
except requests.exceptions.SSLError as e:
_print_err('Encountered an SSL Error. Try using HTTP instead of '
'HTTPS by setting the environment variable "HOWDOI_DISABLE_SSL".\n')
raise e
def _add_links_to_text(element):
hyperlinks = element.find('a')
for hyperlink in hyperlinks:
pquery_object = pq(hyperlink)
href = hyperlink.attrib['href']
copy = pquery_object.text()
if (copy == href):
replacement = copy
else:
replacement = "[{0}]({1})".format(copy, href)
pquery_object.replace_with(replacement)
def get_text(element):
''' return inner text in pyquery element '''
_add_links_to_text(element)
try:
return element.text(squash_space=False)
except TypeError:
return element.text()
def _extract_links_from_bing(html):
html.remove_namespaces()
return [a.attrib['href'] for a in html('.b_algo')('h2')('a')]
def _extract_links_from_google(html):
return [a.attrib['href'] for a in html('.l')] or \
[a.attrib['href'] for a in html('.r')('a')]
def _extract_links(html, search_engine):
if search_engine == 'bing':
return _extract_links_from_bing(html)
return _extract_links_from_google(html)
def _get_search_url(search_engine):
return SEARCH_URLS.get(search_engine, SEARCH_URLS['google'])
def _detect_block(page):
for indicator in BLOCK_INDICATORS:
if page.find(indicator) != -1:
return True
return False
def _get_links(query):
search_engine = os.getenv('HOWDOI_SEARCH_ENGINE', 'google')
search_url = _get_search_url(search_engine)
result = _get_result(search_url.format(URL, url_quote(query)))
if _detect_block(result):
_print_err('Unable to find an answer because the search engine temporarily blocked the request. '
'Please wait a few minutes or select a different search engine.')
raise BlockError("Temporary block by search engine")
html = pq(result)
return _extract_links(html, search_engine)
def get_link_at_pos(links, position):
if not links:
return False
if len(links) >= position:
link = links[position - 1]
else:
link = links[-1]
return link
def _format_output(code, args):
if not args['color']:
return code
lexer = None
# try to find a lexer using the StackOverflow tags
# or the query arguments
for keyword in args['query'].split() + args['tags']:
try:
lexer = get_lexer_by_name(keyword)
break
except ClassNotFound:
pass
# no lexer found above, use the guesser
if not lexer:
try:
lexer = guess_lexer(code)
except ClassNotFound:
return code
return highlight(code,
lexer,
TerminalFormatter(bg='dark'))
def _is_question(link):
return re.search(r'questions/\d+/', link)
def _get_questions(links):
return [link for link in links if _is_question(link)]
def _get_answer(args, links):
link = get_link_at_pos(links, args['pos'])
if not link:
return False
if args.get('link'):
return link
cache_key = link
page = cache.get(link)
if not page:
page = _get_result(link + '?answertab=votes')
cache.set(cache_key, page)
html = pq(page)
first_answer = html('.answer').eq(0)
instructions = first_answer.find('pre') or first_answer.find('code')
args['tags'] = [t.text for t in html('.post-tag')]
if not instructions and not args['all']:
text = get_text(first_answer.find('.post-text').eq(0))
elif args['all']:
texts = []
for html_tag in first_answer.items('.post-text > *'):
current_text = get_text(html_tag)
if current_text:
if html_tag[0].tag in ['pre', 'code']:
texts.append(_format_output(current_text, args))
else:
texts.append(current_text)
text = '\n'.join(texts)
else:
text = _format_output(get_text(instructions.eq(0)), args)
if text is None:
text = NO_ANSWER_MSG
text = text.strip()
return text
def _get_links_with_cache(query):
cache_key = query + "-links"
res = cache.get(cache_key)
if res:
if res == CACHE_EMPTY_VAL:
res = False
return res
links = _get_links(query)
if not links:
cache.set(cache_key, CACHE_EMPTY_VAL)
question_links = _get_questions(links)
cache.set(cache_key, question_links or CACHE_EMPTY_VAL)
return question_links
def _get_instructions(args):
question_links = _get_links_with_cache(args['query'])
if not question_links:
return False
only_hyperlinks = args.get('link')
star_headers = (args['num_answers'] > 1 or args['all'])
answers = []
initial_position = args['pos']
spliter_length = 80
answer_spliter = '\n' + '=' * spliter_length + '\n\n'
for answer_number in range(args['num_answers']):
current_position = answer_number + initial_position
args['pos'] = current_position
link = get_link_at_pos(question_links, current_position)
answer = _get_answer(args, question_links)
if not answer:
continue
if not only_hyperlinks:
answer = format_answer(link, answer, star_headers)
answer += '\n'
answers.append(answer)
return answer_spliter.join(answers)
def format_answer(link, answer, star_headers):
if star_headers:
return ANSWER_HEADER.format(link, answer, STAR_HEADER)
return answer
def _clear_cache():
global cache
if not cache:
cache = FileSystemCache(CACHE_DIR, CACHE_ENTRY_MAX, 0)
return cache.clear()
def howdoi(args):
args['query'] = ' '.join(args['query']).replace('?', '')
cache_key = str(args)
res = cache.get(cache_key)
if res:
return res
try:
res = _get_instructions(args)
if not res:
res = 'Sorry, couldn\'t find any help with that topic\n'
cache.set(cache_key, res)
return res
except (ConnectionError, SSLError):
return 'Failed to establish network connection\n'
def get_parser():
parser = argparse.ArgumentParser(description='instant coding answers via the command line')
parser.add_argument('query', metavar='QUERY', type=str, nargs='*',
help='the question to answer')
parser.add_argument('-p', '--pos', help='select answer in specified position (default: 1)', default=1, type=int)
parser.add_argument('-a', '--all', help='display the full text of the answer',
action='store_true')
parser.add_argument('-l', '--link', help='display only the answer link',
action='store_true')
parser.add_argument('-c', '--color', help='enable colorized output',
action='store_true')
parser.add_argument('-n', '--num-answers', help='number of answers to return', default=1, type=int)
parser.add_argument('-C', '--clear-cache', help='clear the cache',
action='store_true')
parser.add_argument('-v', '--version', help='displays the current version of howdoi',
action='store_true')
return parser
def command_line_runner():
parser = get_parser()
args = vars(parser.parse_args())
if args['version']:
_print_ok(__version__)
return
if args['clear_cache']:
if _clear_cache():
_print_ok('Cache cleared successfully')
else:
_print_err('Clearing cache failed')
return
if not args['query']:
parser.print_help()
return
if os.getenv('HOWDOI_COLORIZE'):
args['color'] = True
utf8_result = howdoi(args).encode('utf-8', 'ignore')
if sys.version < '3':
print(utf8_result)
else:
# Write UTF-8 to stdout: https://stackoverflow.com/a/3603160
sys.stdout.buffer.write(utf8_result)
# close the session to release connection
howdoi_session.close()
if __name__ == '__main__':
command_line_runner()
|
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2011, Georgia Institute of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Georgia Institute of Technology nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import roslib; roslib.load_manifest('tabletop_pushing')
import rospy
from geometry_msgs.msg import PoseStamped
from geometry_msgs.msg import TwistStamped
# import rosdoc_rosorg
# import JinvTeleopController
import tf
import tf.transformations as tf_trans
import sys
class TestNode:
def __init__(self):
rospy.init_node('test_node', log_level=rospy.DEBUG)
def talk():
pub = rospy.Publish('command_pose', PoseStamped)
rospy.loginfo('created the publisher obj')
pose = PostStamped()
pose.pose.position.x = 1
pose.pose.position.y = 1
pose.pose.position.z = 0
pub.publish(pose)
rospy.sleep(1.0)
def run(self):
talk();
if __name__ == '__main__':
node = TestNode()
node.run()
temporary: cartesian controller testing for visual servo
git-svn-id: 9d271b02f21ef92375d53f3a729673c5fe5e0adf@2675 5fc10e88-4e10-11de-b850-9977aa68d080
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2011, Georgia Institute of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Georgia Institute of Technology nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import roslib; roslib.load_manifest('tabletop_pushing')
import rospy
from geometry_msgs.msg import PoseStamped
from geometry_msgs.msg import TwistStamped
import tf
import tf.transformations as tf_trans
import sys
class TestNode:
def __init__(self):
rospy.init_node('test_node', log_level=rospy.DEBUG)
def talk(self):
# pub = rospy.Publisher('command', PoseStamped)
pub = rospy.Publisher('r_cart/command_pose', PoseStamped)
rospy.loginfo('created the publisher obj')
pose = PoseStamped()
pose.header.frame_id = '/torso_lift_link'
pose.header.stamp = rospy.Time(0)
pose.pose.position.x = 0.5
pose.pose.position.y = 0.5
pose.pose.position.z = 0
while not rospy.is_shutdown():
rospy.loginfo('Publishing following message: %s'%pose)
pub.publish(pose)
rospy.sleep(2.0)
if __name__ == '__main__':
try:
node = TestNode()
node.talk()
except rospy.ROSInterruptException: pass
|
# Copyright 2001-2012 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Logging package for Python. Based on PEP 282 and comments thereto in
comp.lang.python.
Copyright (C) 2001-2012 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import sys, os, time, cStringIO, traceback, warnings, weakref
__all__ = ['BASIC_FORMAT', 'BufferingFormatter', 'CRITICAL', 'DEBUG', 'ERROR',
'FATAL', 'FileHandler', 'Filter', 'Formatter', 'Handler', 'INFO',
'LogRecord', 'Logger', 'LoggerAdapter', 'NOTSET', 'NullHandler',
'StreamHandler', 'WARN', 'WARNING', 'addLevelName', 'basicConfig',
'captureWarnings', 'critical', 'debug', 'disable', 'error',
'exception', 'fatal', 'getLevelName', 'getLogger', 'getLoggerClass',
'info', 'log', 'makeLogRecord', 'setLoggerClass', 'warn', 'warning']
try:
import codecs
except ImportError:
codecs = None
try:
import thread
import threading
except ImportError:
thread = None
__author__ = "Vinay Sajip <vinay_sajip@red-dove.com>"
__status__ = "production"
__version__ = "0.5.1.2"
__date__ = "07 February 2010"
#---------------------------------------------------------------------------
# Miscellaneous module data
#---------------------------------------------------------------------------
try:
unicode
_unicode = True
except NameError:
_unicode = False
#
# _srcfile is used when walking the stack to check when we've got the first
# caller stack frame.
#
if hasattr(sys, 'frozen'): #support for py2exe
_srcfile = "logging%s__init__%s" % (os.sep, __file__[-4:])
elif __file__[-4:].lower() in ['.pyc', '.pyo']:
_srcfile = __file__[:-4] + '.py'
else:
_srcfile = __file__
_srcfile = os.path.normcase(_srcfile)
# next bit filched from 1.5.2's inspect.py
def currentframe():
"""Return the frame object for the caller's stack frame."""
try:
raise Exception
except:
return sys.exc_info()[2].tb_frame.f_back
if hasattr(sys, '_getframe'): currentframe = lambda: sys._getframe(3)
# done filching
# _srcfile is only used in conjunction with sys._getframe().
# To provide compatibility with older versions of Python, set _srcfile
# to None if _getframe() is not available; this value will prevent
# findCaller() from being called.
#if not hasattr(sys, "_getframe"):
# _srcfile = None
#
#_startTime is used as the base when calculating the relative time of events
#
_startTime = time.time()
#
#raiseExceptions is used to see if exceptions during handling should be
#propagated
#
raiseExceptions = 1
#
# If you don't want threading information in the log, set this to zero
#
logThreads = 1
#
# If you don't want multiprocessing information in the log, set this to zero
#
logMultiprocessing = 1
#
# If you don't want process information in the log, set this to zero
#
logProcesses = 1
#---------------------------------------------------------------------------
# Level related stuff
#---------------------------------------------------------------------------
#
# Default levels and level names, these can be replaced with any positive set
# of values having corresponding names. There is a pseudo-level, NOTSET, which
# is only really there as a lower limit for user-defined levels. Handlers and
# loggers are initialized with NOTSET so that they will log all messages, even
# at user-defined levels.
#
CRITICAL = 50
FATAL = CRITICAL
ERROR = 40
WARNING = 30
WARN = WARNING
INFO = 20
DEBUG = 10
NOTSET = 0
_levelNames = {
CRITICAL : 'CRITICAL',
ERROR : 'ERROR',
WARNING : 'WARNING',
INFO : 'INFO',
DEBUG : 'DEBUG',
NOTSET : 'NOTSET',
'CRITICAL' : CRITICAL,
'ERROR' : ERROR,
'WARN' : WARNING,
'WARNING' : WARNING,
'INFO' : INFO,
'DEBUG' : DEBUG,
'NOTSET' : NOTSET,
}
def getLevelName(level):
"""
Return the textual representation of logging level 'level'.
If the level is one of the predefined levels (CRITICAL, ERROR, WARNING,
INFO, DEBUG) then you get the corresponding string. If you have
associated levels with names using addLevelName then the name you have
associated with 'level' is returned.
If a numeric value corresponding to one of the defined levels is passed
in, the corresponding string representation is returned.
Otherwise, the string "Level %s" % level is returned.
"""
return _levelNames.get(level, ("Level %s" % level))
def addLevelName(level, levelName):
"""
Associate 'levelName' with 'level'.
This is used when converting levels to text during message formatting.
"""
_acquireLock()
try: #unlikely to cause an exception, but you never know...
_levelNames[level] = levelName
_levelNames[levelName] = level
finally:
_releaseLock()
def _checkLevel(level):
if isinstance(level, int):
rv = level
elif str(level) == level:
if level not in _levelNames:
raise ValueError("Unknown level: %r" % level)
rv = _levelNames[level]
else:
raise TypeError("Level not an integer or a valid string: %r" % level)
return rv
#---------------------------------------------------------------------------
# Thread-related stuff
#---------------------------------------------------------------------------
#
#_lock is used to serialize access to shared data structures in this module.
#This needs to be an RLock because fileConfig() creates and configures
#Handlers, and so might arbitrary user threads. Since Handler code updates the
#shared dictionary _handlers, it needs to acquire the lock. But if configuring,
#the lock would already have been acquired - so we need an RLock.
#The same argument applies to Loggers and Manager.loggerDict.
#
if thread:
_lock = threading.RLock()
else:
_lock = None
def _acquireLock():
"""
Acquire the module-level lock for serializing access to shared data.
This should be released with _releaseLock().
"""
if _lock:
_lock.acquire()
def _releaseLock():
"""
Release the module-level lock acquired by calling _acquireLock().
"""
if _lock:
_lock.release()
#---------------------------------------------------------------------------
# The logging record
#---------------------------------------------------------------------------
class LogRecord(object):
"""
A LogRecord instance represents an event being logged.
LogRecord instances are created every time something is logged. They
contain all the information pertinent to the event being logged. The
main information passed in is in msg and args, which are combined
using str(msg) % args to create the message field of the record. The
record also includes information such as when the record was created,
the source line where the logging call was made, and any exception
information to be logged.
"""
def __init__(self, name, level, pathname, lineno,
msg, args, exc_info, func=None):
"""
Initialize a logging record with interesting information.
"""
ct = time.time()
self.name = name
self.msg = msg
#
# The following statement allows passing of a dictionary as a sole
# argument, so that you can do something like
# logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2})
# Suggested by Stefan Behnel.
# Note that without the test for args[0], we get a problem because
# during formatting, we test to see if the arg is present using
# 'if self.args:'. If the event being logged is e.g. 'Value is %d'
# and if the passed arg fails 'if self.args:' then no formatting
# is done. For example, logger.warn('Value is %d', 0) would log
# 'Value is %d' instead of 'Value is 0'.
# For the use case of passing a dictionary, this should not be a
# problem.
if args and len(args) == 1 and isinstance(args[0], dict) and args[0]:
args = args[0]
self.args = args
self.levelname = getLevelName(level)
self.levelno = level
self.pathname = pathname
try:
self.filename = os.path.basename(pathname)
self.module = os.path.splitext(self.filename)[0]
except (TypeError, ValueError, AttributeError):
self.filename = pathname
self.module = "Unknown module"
self.exc_info = exc_info
self.exc_text = None # used to cache the traceback text
self.lineno = lineno
self.funcName = func
self.created = ct
self.msecs = (ct - long(ct)) * 1000
self.relativeCreated = (self.created - _startTime) * 1000
if logThreads and thread:
self.thread = thread.get_ident()
self.threadName = threading.current_thread().name
else:
self.thread = None
self.threadName = None
if not logMultiprocessing:
self.processName = None
else:
self.processName = 'MainProcess'
mp = sys.modules.get('multiprocessing')
if mp is not None:
# Errors may occur if multiprocessing has not finished loading
# yet - e.g. if a custom import hook causes third-party code
# to run when multiprocessing calls import. See issue 8200
# for an example
try:
self.processName = mp.current_process().name
except StandardError:
pass
if logProcesses and hasattr(os, 'getpid'):
self.process = os.getpid()
else:
self.process = None
def __str__(self):
return '<LogRecord: %s, %s, %s, %s, "%s">'%(self.name, self.levelno,
self.pathname, self.lineno, self.msg)
def getMessage(self):
"""
Return the message for this LogRecord.
Return the message for this LogRecord after merging any user-supplied
arguments with the message.
"""
if not _unicode: #if no unicode support...
msg = str(self.msg)
else:
msg = self.msg
if not isinstance(msg, basestring):
try:
msg = str(self.msg)
except UnicodeError:
msg = self.msg #Defer encoding till later
if self.args:
msg = msg % self.args
return msg
def makeLogRecord(dict):
"""
Make a LogRecord whose attributes are defined by the specified dictionary,
This function is useful for converting a logging event received over
a socket connection (which is sent as a dictionary) into a LogRecord
instance.
"""
rv = LogRecord(None, None, "", 0, "", (), None, None)
rv.__dict__.update(dict)
return rv
#---------------------------------------------------------------------------
# Formatter classes and functions
#---------------------------------------------------------------------------
class Formatter(object):
"""
Formatter instances are used to convert a LogRecord to text.
Formatters need to know how a LogRecord is constructed. They are
responsible for converting a LogRecord to (usually) a string which can
be interpreted by either a human or an external system. The base Formatter
allows a formatting string to be specified. If none is supplied, the
default value of "%s(message)\\n" is used.
The Formatter can be initialized with a format string which makes use of
knowledge of the LogRecord attributes - e.g. the default value mentioned
above makes use of the fact that the user's message and arguments are pre-
formatted into a LogRecord's message attribute. Currently, the useful
attributes in a LogRecord are described by:
%(name)s Name of the logger (logging channel)
%(levelno)s Numeric logging level for the message (DEBUG, INFO,
WARNING, ERROR, CRITICAL)
%(levelname)s Text logging level for the message ("DEBUG", "INFO",
"WARNING", "ERROR", "CRITICAL")
%(pathname)s Full pathname of the source file where the logging
call was issued (if available)
%(filename)s Filename portion of pathname
%(module)s Module (name portion of filename)
%(lineno)d Source line number where the logging call was issued
(if available)
%(funcName)s Function name
%(created)f Time when the LogRecord was created (time.time()
return value)
%(asctime)s Textual time when the LogRecord was created
%(msecs)d Millisecond portion of the creation time
%(relativeCreated)d Time in milliseconds when the LogRecord was created,
relative to the time the logging module was loaded
(typically at application startup time)
%(thread)d Thread ID (if available)
%(threadName)s Thread name (if available)
%(process)d Process ID (if available)
%(message)s The result of record.getMessage(), computed just as
the record is emitted
"""
converter = time.localtime
def __init__(self, fmt=None, datefmt=None):
"""
Initialize the formatter with specified format strings.
Initialize the formatter either with the specified format string, or a
default as described above. Allow for specialized date formatting with
the optional datefmt argument (if omitted, you get the ISO8601 format).
"""
if fmt:
self._fmt = fmt
else:
self._fmt = "%(message)s"
self.datefmt = datefmt
def formatTime(self, record, datefmt=None):
"""
Return the creation time of the specified LogRecord as formatted text.
This method should be called from format() by a formatter which
wants to make use of a formatted time. This method can be overridden
in formatters to provide for any specific requirement, but the
basic behaviour is as follows: if datefmt (a string) is specified,
it is used with time.strftime() to format the creation time of the
record. Otherwise, the ISO8601 format is used. The resulting
string is returned. This function uses a user-configurable function
to convert the creation time to a tuple. By default, time.localtime()
is used; to change this for a particular formatter instance, set the
'converter' attribute to a function with the same signature as
time.localtime() or time.gmtime(). To change it for all formatters,
for example if you want all logging times to be shown in GMT,
set the 'converter' attribute in the Formatter class.
"""
ct = self.converter(record.created)
if datefmt:
s = time.strftime(datefmt, ct)
else:
t = time.strftime("%Y-%m-%d %H:%M:%S", ct)
s = "%s,%03d" % (t, record.msecs)
return s
def formatException(self, ei):
"""
Format and return the specified exception information as a string.
This default implementation just uses
traceback.print_exception()
"""
sio = cStringIO.StringIO()
traceback.print_exception(ei[0], ei[1], ei[2], None, sio)
s = sio.getvalue()
sio.close()
if s[-1:] == "\n":
s = s[:-1]
return s
def usesTime(self):
"""
Check if the format uses the creation time of the record.
"""
return self._fmt.find("%(asctime)") >= 0
def format(self, record):
"""
Format the specified record as text.
The record's attribute dictionary is used as the operand to a
string formatting operation which yields the returned string.
Before formatting the dictionary, a couple of preparatory steps
are carried out. The message attribute of the record is computed
using LogRecord.getMessage(). If the formatting string uses the
time (as determined by a call to usesTime(), formatTime() is
called to format the event time. If there is exception information,
it is formatted using formatException() and appended to the message.
"""
record.message = record.getMessage()
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
s = self._fmt % record.__dict__
if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it's constant anyway)
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
if s[-1:] != "\n":
s = s + "\n"
try:
s = s + record.exc_text
except UnicodeError:
# Sometimes filenames have non-ASCII chars, which can lead
# to errors when s is Unicode and record.exc_text is str
# See issue 8924.
# We also use replace for when there are multiple
# encodings, e.g. UTF-8 for the filesystem and latin-1
# for a script. See issue 13232.
s = s + record.exc_text.decode(sys.getfilesystemencoding(),
'replace')
return s
#
# The default formatter to use when no other is specified
#
_defaultFormatter = Formatter()
class BufferingFormatter(object):
"""
A formatter suitable for formatting a number of records.
"""
def __init__(self, linefmt=None):
"""
Optionally specify a formatter which will be used to format each
individual record.
"""
if linefmt:
self.linefmt = linefmt
else:
self.linefmt = _defaultFormatter
def formatHeader(self, records):
"""
Return the header string for the specified records.
"""
return ""
def formatFooter(self, records):
"""
Return the footer string for the specified records.
"""
return ""
def format(self, records):
"""
Format the specified records and return the result as a string.
"""
rv = ""
if len(records) > 0:
rv = rv + self.formatHeader(records)
for record in records:
rv = rv + self.linefmt.format(record)
rv = rv + self.formatFooter(records)
return rv
#---------------------------------------------------------------------------
# Filter classes and functions
#---------------------------------------------------------------------------
class Filter(object):
"""
Filter instances are used to perform arbitrary filtering of LogRecords.
Loggers and Handlers can optionally use Filter instances to filter
records as desired. The base filter class only allows events which are
below a certain point in the logger hierarchy. For example, a filter
initialized with "A.B" will allow events logged by loggers "A.B",
"A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If
initialized with the empty string, all events are passed.
"""
def __init__(self, name=''):
"""
Initialize a filter.
Initialize with the name of the logger which, together with its
children, will have its events allowed through the filter. If no
name is specified, allow every event.
"""
self.name = name
self.nlen = len(name)
def filter(self, record):
"""
Determine if the specified record is to be logged.
Is the specified record to be logged? Returns 0 for no, nonzero for
yes. If deemed appropriate, the record may be modified in-place.
"""
if self.nlen == 0:
return 1
elif self.name == record.name:
return 1
elif record.name.find(self.name, 0, self.nlen) != 0:
return 0
return (record.name[self.nlen] == ".")
class Filterer(object):
"""
A base class for loggers and handlers which allows them to share
common code.
"""
def __init__(self):
"""
Initialize the list of filters to be an empty list.
"""
self.filters = []
def addFilter(self, filter):
"""
Add the specified filter to this handler.
"""
if not (filter in self.filters):
self.filters.append(filter)
def removeFilter(self, filter):
"""
Remove the specified filter from this handler.
"""
if filter in self.filters:
self.filters.remove(filter)
def filter(self, record):
"""
Determine if a record is loggable by consulting all the filters.
The default is to allow the record to be logged; any filter can veto
this and the record is then dropped. Returns a zero value if a record
is to be dropped, else non-zero.
"""
rv = 1
for f in self.filters:
if not f.filter(record):
rv = 0
break
return rv
#---------------------------------------------------------------------------
# Handler classes and functions
#---------------------------------------------------------------------------
_handlers = weakref.WeakValueDictionary() #map of handler names to handlers
_handlerList = [] # added to allow handlers to be removed in reverse of order initialized
def _removeHandlerRef(wr):
"""
Remove a handler reference from the internal cleanup list.
"""
# This function can be called during module teardown, when globals are
# set to None. If _acquireLock is None, assume this is the case and do
# nothing.
if _acquireLock is not None:
_acquireLock()
try:
if wr in _handlerList:
_handlerList.remove(wr)
finally:
_releaseLock()
def _addHandlerRef(handler):
"""
Add a handler to the internal cleanup list using a weak reference.
"""
_acquireLock()
try:
_handlerList.append(weakref.ref(handler, _removeHandlerRef))
finally:
_releaseLock()
class Handler(Filterer):
"""
Handler instances dispatch logging events to specific destinations.
The base handler class. Acts as a placeholder which defines the Handler
interface. Handlers can optionally use Formatter instances to format
records as desired. By default, no formatter is specified; in this case,
the 'raw' message as determined by record.message is logged.
"""
def __init__(self, level=NOTSET):
"""
Initializes the instance - basically setting the formatter to None
and the filter list to empty.
"""
Filterer.__init__(self)
self._name = None
self.level = _checkLevel(level)
self.formatter = None
# Add the handler to the global _handlerList (for cleanup on shutdown)
_addHandlerRef(self)
self.createLock()
def get_name(self):
return self._name
def set_name(self, name):
_acquireLock()
try:
if self._name in _handlers:
del _handlers[self._name]
self._name = name
if name:
_handlers[name] = self
finally:
_releaseLock()
name = property(get_name, set_name)
def createLock(self):
"""
Acquire a thread lock for serializing access to the underlying I/O.
"""
if thread:
self.lock = threading.RLock()
else:
self.lock = None
def acquire(self):
"""
Acquire the I/O thread lock.
"""
if self.lock:
self.lock.acquire()
def release(self):
"""
Release the I/O thread lock.
"""
if self.lock:
self.lock.release()
def setLevel(self, level):
"""
Set the logging level of this handler.
"""
self.level = _checkLevel(level)
def format(self, record):
"""
Format the specified record.
If a formatter is set, use it. Otherwise, use the default formatter
for the module.
"""
if self.formatter:
fmt = self.formatter
else:
fmt = _defaultFormatter
return fmt.format(record)
def emit(self, record):
"""
Do whatever it takes to actually log the specified logging record.
This version is intended to be implemented by subclasses and so
raises a NotImplementedError.
"""
raise NotImplementedError('emit must be implemented '
'by Handler subclasses')
def handle(self, record):
"""
Conditionally emit the specified logging record.
Emission depends on filters which may have been added to the handler.
Wrap the actual emission of the record with acquisition/release of
the I/O thread lock. Returns whether the filter passed the record for
emission.
"""
rv = self.filter(record)
if rv:
self.acquire()
try:
self.emit(record)
finally:
self.release()
return rv
def setFormatter(self, fmt):
"""
Set the formatter for this handler.
"""
self.formatter = fmt
def flush(self):
"""
Ensure all logging output has been flushed.
This version does nothing and is intended to be implemented by
subclasses.
"""
pass
def close(self):
"""
Tidy up any resources used by the handler.
This version removes the handler from an internal map of handlers,
_handlers, which is used for handler lookup by name. Subclasses
should ensure that this gets called from overridden close()
methods.
"""
#get the module data lock, as we're updating a shared structure.
_acquireLock()
try: #unlikely to raise an exception, but you never know...
if self._name and self._name in _handlers:
del _handlers[self._name]
finally:
_releaseLock()
def handleError(self, record):
"""
Handle errors which occur during an emit() call.
This method should be called from handlers when an exception is
encountered during an emit() call. If raiseExceptions is false,
exceptions get silently ignored. This is what is mostly wanted
for a logging system - most users will not care about errors in
the logging system, they are more interested in application errors.
You could, however, replace this with a custom handler if you wish.
The record which was being processed is passed in to this method.
"""
if raiseExceptions and sys.stderr: # see issue 13807
ei = sys.exc_info()
try:
traceback.print_exception(ei[0], ei[1], ei[2],
None, sys.stderr)
sys.stderr.write('Logged from file %s, line %s\n' % (
record.filename, record.lineno))
except IOError:
pass # see issue 5971
finally:
del ei
class StreamHandler(Handler):
"""
A handler class which writes logging records, appropriately formatted,
to a stream. Note that this class does not close the stream, as
sys.stdout or sys.stderr may be used.
"""
def __init__(self, stream=None):
"""
Initialize the handler.
If stream is not specified, sys.stderr is used.
"""
Handler.__init__(self)
if stream is None:
stream = sys.stderr
self.stream = stream
def flush(self):
"""
Flushes the stream.
"""
self.acquire()
try:
if self.stream and hasattr(self.stream, "flush"):
self.stream.flush()
finally:
self.release()
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline. If
exception information is present, it is formatted using
traceback.print_exception and appended to the stream. If the stream
has an 'encoding' attribute, it is used to determine how to do the
output to the stream.
"""
try:
msg = self.format(record)
stream = self.stream
fs = "%s\n"
if not _unicode: #if no unicode support...
stream.write(fs % msg)
else:
try:
if (isinstance(msg, unicode) and
getattr(stream, 'encoding', None)):
ufs = fs.decode(stream.encoding)
try:
stream.write(ufs % msg)
except UnicodeEncodeError:
#Printing to terminals sometimes fails. For example,
#with an encoding of 'cp1251', the above write will
#work if written to a stream opened or wrapped by
#the codecs module, but fail when writing to a
#terminal even when the codepage is set to cp1251.
#An extra encoding step seems to be needed.
stream.write((ufs % msg).encode(stream.encoding))
else:
stream.write(fs % msg)
except UnicodeError:
stream.write(fs % msg.encode("UTF-8"))
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class FileHandler(StreamHandler):
"""
A handler class which writes formatted logging records to disk files.
"""
def __init__(self, filename, mode='a', encoding=None, delay=0):
"""
Open the specified file and use it as the stream for logging.
"""
#keep the absolute path, otherwise derived classes which use this
#may come a cropper when the current directory changes
if codecs is None:
encoding = None
self.baseFilename = os.path.abspath(filename)
self.mode = mode
self.encoding = encoding
if delay:
#We don't open the stream, but we still need to call the
#Handler constructor to set level, formatter, lock etc.
Handler.__init__(self)
self.stream = None
else:
StreamHandler.__init__(self, self._open())
def close(self):
"""
Closes the stream.
"""
self.acquire()
try:
if self.stream:
self.flush()
if hasattr(self.stream, "close"):
self.stream.close()
StreamHandler.close(self)
self.stream = None
finally:
self.release()
def _open(self):
"""
Open the current base file with the (original) mode and encoding.
Return the resulting stream.
"""
if self.encoding is None:
stream = open(self.baseFilename, self.mode)
else:
stream = codecs.open(self.baseFilename, self.mode, self.encoding)
return stream
def emit(self, record):
"""
Emit a record.
If the stream was not opened because 'delay' was specified in the
constructor, open it before calling the superclass's emit.
"""
if self.stream is None:
self.stream = self._open()
StreamHandler.emit(self, record)
#---------------------------------------------------------------------------
# Manager classes and functions
#---------------------------------------------------------------------------
class PlaceHolder(object):
"""
PlaceHolder instances are used in the Manager logger hierarchy to take
the place of nodes for which no loggers have been defined. This class is
intended for internal use only and not as part of the public API.
"""
def __init__(self, alogger):
"""
Initialize with the specified logger being a child of this placeholder.
"""
#self.loggers = [alogger]
self.loggerMap = { alogger : None }
def append(self, alogger):
"""
Add the specified logger as a child of this placeholder.
"""
#if alogger not in self.loggers:
if alogger not in self.loggerMap:
#self.loggers.append(alogger)
self.loggerMap[alogger] = None
#
# Determine which class to use when instantiating loggers.
#
_loggerClass = None
def setLoggerClass(klass):
"""
Set the class to be used when instantiating a logger. The class should
define __init__() such that only a name argument is required, and the
__init__() should call Logger.__init__()
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
global _loggerClass
_loggerClass = klass
def getLoggerClass():
"""
Return the class to be used when instantiating a logger.
"""
return _loggerClass
class Manager(object):
"""
There is [under normal circumstances] just one Manager instance, which
holds the hierarchy of loggers.
"""
def __init__(self, rootnode):
"""
Initialize the manager with the root node of the logger hierarchy.
"""
self.root = rootnode
self.disable = 0
self.emittedNoHandlerWarning = 0
self.loggerDict = {}
self.loggerClass = None
def getLogger(self, name):
"""
Get a logger with the specified name (channel name), creating it
if it doesn't yet exist. This name is a dot-separated hierarchical
name, such as "a", "a.b", "a.b.c" or similar.
If a PlaceHolder existed for the specified name [i.e. the logger
didn't exist but a child of it did], replace it with the created
logger and fix up the parent/child references which pointed to the
placeholder to now point to the logger.
"""
rv = None
if not isinstance(name, basestring):
raise TypeError('A logger name must be string or Unicode')
if isinstance(name, unicode):
name = name.encode('utf-8')
_acquireLock()
try:
if name in self.loggerDict:
rv = self.loggerDict[name]
if isinstance(rv, PlaceHolder):
ph = rv
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupChildren(ph, rv)
self._fixupParents(rv)
else:
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupParents(rv)
finally:
_releaseLock()
return rv
def setLoggerClass(self, klass):
"""
Set the class to be used when instantiating a logger with this Manager.
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
self.loggerClass = klass
def _fixupParents(self, alogger):
"""
Ensure that there are either loggers or placeholders all the way
from the specified logger to the root of the logger hierarchy.
"""
name = alogger.name
i = name.rfind(".")
rv = None
while (i > 0) and not rv:
substr = name[:i]
if substr not in self.loggerDict:
self.loggerDict[substr] = PlaceHolder(alogger)
else:
obj = self.loggerDict[substr]
if isinstance(obj, Logger):
rv = obj
else:
assert isinstance(obj, PlaceHolder)
obj.append(alogger)
i = name.rfind(".", 0, i - 1)
if not rv:
rv = self.root
alogger.parent = rv
def _fixupChildren(self, ph, alogger):
"""
Ensure that children of the placeholder ph are connected to the
specified logger.
"""
name = alogger.name
namelen = len(name)
for c in ph.loggerMap.keys():
#The if means ... if not c.parent.name.startswith(nm)
if c.parent.name[:namelen] != name:
alogger.parent = c.parent
c.parent = alogger
#---------------------------------------------------------------------------
# Logger classes and functions
#---------------------------------------------------------------------------
class Logger(Filterer):
"""
Instances of the Logger class represent a single logging channel. A
"logging channel" indicates an area of an application. Exactly how an
"area" is defined is up to the application developer. Since an
application can have any number of areas, logging channels are identified
by a unique string. Application areas can be nested (e.g. an area
of "input processing" might include sub-areas "read CSV files", "read
XLS files" and "read Gnumeric files"). To cater for this natural nesting,
channel names are organized into a namespace hierarchy where levels are
separated by periods, much like the Java or Python package namespace. So
in the instance given above, channel names might be "input" for the upper
level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels.
There is no arbitrary limit to the depth of nesting.
"""
def __init__(self, name, level=NOTSET):
"""
Initialize the logger with a name and an optional level.
"""
Filterer.__init__(self)
self.name = name
self.level = _checkLevel(level)
self.parent = None
self.propagate = 1
self.handlers = []
self.disabled = 0
def setLevel(self, level):
"""
Set the logging level of this logger.
"""
self.level = _checkLevel(level)
def debug(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'DEBUG'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)
"""
if self.isEnabledFor(DEBUG):
self._log(DEBUG, msg, args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'INFO'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.info("Houston, we have a %s", "interesting problem", exc_info=1)
"""
if self.isEnabledFor(INFO):
self._log(INFO, msg, args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'WARNING'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1)
"""
if self.isEnabledFor(WARNING):
self._log(WARNING, msg, args, **kwargs)
warn = warning
def error(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'ERROR'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.error("Houston, we have a %s", "major problem", exc_info=1)
"""
if self.isEnabledFor(ERROR):
self._log(ERROR, msg, args, **kwargs)
def exception(self, msg, *args):
"""
Convenience method for logging an ERROR with exception information.
"""
self.error(msg, exc_info=1, *args)
def critical(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'CRITICAL'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.critical("Houston, we have a %s", "major disaster", exc_info=1)
"""
if self.isEnabledFor(CRITICAL):
self._log(CRITICAL, msg, args, **kwargs)
fatal = critical
def log(self, level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.log(level, "We have a %s", "mysterious problem", exc_info=1)
"""
if not isinstance(level, int):
if raiseExceptions:
raise TypeError("level must be an integer")
else:
return
if self.isEnabledFor(level):
self._log(level, msg, args, **kwargs)
def findCaller(self):
"""
Find the stack frame of the caller so that we can note the source
file name, line number and function name.
"""
f = currentframe()
#On some versions of IronPython, currentframe() returns None if
#IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
rv = "(unknown file)", 0, "(unknown function)"
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
if filename == _srcfile:
f = f.f_back
continue
rv = (co.co_filename, f.f_lineno, co.co_name)
break
return rv
def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None):
"""
A factory method which can be overridden in subclasses to create
specialized LogRecords.
"""
rv = LogRecord(name, level, fn, lno, msg, args, exc_info, func)
if extra is not None:
for key in extra:
if (key in ["message", "asctime"]) or (key in rv.__dict__):
raise KeyError("Attempt to overwrite %r in LogRecord" % key)
rv.__dict__[key] = extra[key]
return rv
def _log(self, level, msg, args, exc_info=None, extra=None):
"""
Low-level logging routine which creates a LogRecord and then calls
all the handlers of this logger to handle the record.
"""
if _srcfile:
#IronPython doesn't track Python frames, so findCaller throws an
#exception on some versions of IronPython. We trap it here so that
#IronPython can use logging.
try:
fn, lno, func = self.findCaller()
except ValueError:
fn, lno, func = "(unknown file)", 0, "(unknown function)"
else:
fn, lno, func = "(unknown file)", 0, "(unknown function)"
if exc_info:
if not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
record = self.makeRecord(self.name, level, fn, lno, msg, args, exc_info, func, extra)
self.handle(record)
def handle(self, record):
"""
Call the handlers for the specified record.
This method is used for unpickled records received from a socket, as
well as those created locally. Logger-level filtering is applied.
"""
if (not self.disabled) and self.filter(record):
self.callHandlers(record)
def addHandler(self, hdlr):
"""
Add the specified handler to this logger.
"""
_acquireLock()
try:
if not (hdlr in self.handlers):
self.handlers.append(hdlr)
finally:
_releaseLock()
def removeHandler(self, hdlr):
"""
Remove the specified handler from this logger.
"""
_acquireLock()
try:
if hdlr in self.handlers:
self.handlers.remove(hdlr)
finally:
_releaseLock()
def callHandlers(self, record):
"""
Pass a record to all relevant handlers.
Loop through all handlers for this logger and its parents in the
logger hierarchy. If no handler was found, output a one-off error
message to sys.stderr. Stop searching up the hierarchy whenever a
logger with the "propagate" attribute set to zero is found - that
will be the last logger whose handlers are called.
"""
c = self
found = 0
while c:
for hdlr in c.handlers:
found = found + 1
if record.levelno >= hdlr.level:
hdlr.handle(record)
if not c.propagate:
c = None #break out
else:
c = c.parent
if (found == 0) and raiseExceptions and not self.manager.emittedNoHandlerWarning:
sys.stderr.write("No handlers could be found for logger"
" \"%s\"\n" % self.name)
self.manager.emittedNoHandlerWarning = 1
def getEffectiveLevel(self):
"""
Get the effective level for this logger.
Loop through this logger and its parents in the logger hierarchy,
looking for a non-zero logging level. Return the first one found.
"""
logger = self
while logger:
if logger.level:
return logger.level
logger = logger.parent
return NOTSET
def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
if self.manager.disable >= level:
return 0
return level >= self.getEffectiveLevel()
def getChild(self, suffix):
"""
Get a logger which is a descendant to this one.
This is a convenience method, such that
logging.getLogger('abc').getChild('def.ghi')
is the same as
logging.getLogger('abc.def.ghi')
It's useful, for example, when the parent logger is named using
__name__ rather than a literal string.
"""
if self.root is not self:
suffix = '.'.join((self.name, suffix))
return self.manager.getLogger(suffix)
class RootLogger(Logger):
"""
A root logger is not that different to any other logger, except that
it must have a logging level and there is only one instance of it in
the hierarchy.
"""
def __init__(self, level):
"""
Initialize the logger with the name "root".
"""
Logger.__init__(self, "root", level)
_loggerClass = Logger
class LoggerAdapter(object):
"""
An adapter for loggers which makes it easier to specify contextual
information in logging output.
"""
def __init__(self, logger, extra):
"""
Initialize the adapter with a logger and a dict-like object which
provides contextual information. This constructor signature allows
easy stacking of LoggerAdapters, if so desired.
You can effectively pass keyword arguments as shown in the
following example:
adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2"))
"""
self.logger = logger
self.extra = extra
def process(self, msg, kwargs):
"""
Process the logging message and keyword arguments passed in to
a logging call to insert contextual information. You can either
manipulate the message itself, the keyword args or both. Return
the message and kwargs modified (or not) to suit your needs.
Normally, you'll only need to override this one method in a
LoggerAdapter subclass for your specific needs.
"""
kwargs["extra"] = self.extra
return msg, kwargs
def debug(self, msg, *args, **kwargs):
"""
Delegate a debug call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.debug(msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Delegate an info call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.info(msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Delegate a warning call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Delegate an error call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.error(msg, *args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
Delegate an exception call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
kwargs["exc_info"] = 1
self.logger.error(msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Delegate a critical call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.critical(msg, *args, **kwargs)
def log(self, level, msg, *args, **kwargs):
"""
Delegate a log call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.log(level, msg, *args, **kwargs)
def isEnabledFor(self, level):
"""
See if the underlying logger is enabled for the specified level.
"""
return self.logger.isEnabledFor(level)
root = RootLogger(WARNING)
Logger.root = root
Logger.manager = Manager(Logger.root)
#---------------------------------------------------------------------------
# Configuration classes and functions
#---------------------------------------------------------------------------
BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s"
def basicConfig(**kwargs):
"""
Do basic configuration for the logging system.
This function does nothing if the root logger already has handlers
configured. It is a convenience method intended for use by simple scripts
to do one-shot configuration of the logging package.
The default behaviour is to create a StreamHandler which writes to
sys.stderr, set a formatter using the BASIC_FORMAT format string, and
add the handler to the root logger.
A number of optional keyword arguments may be specified, which can alter
the default behaviour.
filename Specifies that a FileHandler be created, using the specified
filename, rather than a StreamHandler.
filemode Specifies the mode to open the file, if filename is specified
(if filemode is unspecified, it defaults to 'a').
format Use the specified format string for the handler.
datefmt Use the specified date/time format.
level Set the root logger level to the specified level.
stream Use the specified stream to initialize the StreamHandler. Note
that this argument is incompatible with 'filename' - if both
are present, 'stream' is ignored.
Note that you could specify a stream created using open(filename, mode)
rather than passing the filename and mode in. However, it should be
remembered that StreamHandler does not close its stream (since it may be
using sys.stdout or sys.stderr), whereas FileHandler closes its stream
when the handler is closed.
"""
# Add thread safety in case someone mistakenly calls
# basicConfig() from multiple threads
_acquireLock()
try:
if len(root.handlers) == 0:
filename = kwargs.get("filename")
if filename:
mode = kwargs.get("filemode", 'a')
hdlr = FileHandler(filename, mode)
else:
stream = kwargs.get("stream")
hdlr = StreamHandler(stream)
fs = kwargs.get("format", BASIC_FORMAT)
dfs = kwargs.get("datefmt", None)
fmt = Formatter(fs, dfs)
hdlr.setFormatter(fmt)
root.addHandler(hdlr)
level = kwargs.get("level")
if level is not None:
root.setLevel(level)
finally:
_releaseLock()
#---------------------------------------------------------------------------
# Utility functions at module level.
# Basically delegate everything to the root logger.
#---------------------------------------------------------------------------
def getLogger(name=None):
"""
Return a logger with the specified name, creating it if necessary.
If no name is specified, return the root logger.
"""
if name:
return Logger.manager.getLogger(name)
else:
return root
#def getRootLogger():
# """
# Return the root logger.
#
# Note that getLogger('') now does the same thing, so this function is
# deprecated and may disappear in the future.
# """
# return root
def critical(msg, *args, **kwargs):
"""
Log a message with severity 'CRITICAL' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.critical(msg, *args, **kwargs)
fatal = critical
def error(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.error(msg, *args, **kwargs)
def exception(msg, *args):
"""
Log a message with severity 'ERROR' on the root logger,
with exception information.
"""
error(msg, exc_info=1, *args)
def warning(msg, *args, **kwargs):
"""
Log a message with severity 'WARNING' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.warning(msg, *args, **kwargs)
warn = warning
def info(msg, *args, **kwargs):
"""
Log a message with severity 'INFO' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.info(msg, *args, **kwargs)
def debug(msg, *args, **kwargs):
"""
Log a message with severity 'DEBUG' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.debug(msg, *args, **kwargs)
def log(level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.log(level, msg, *args, **kwargs)
def disable(level):
"""
Disable all logging calls of severity 'level' and below.
"""
root.manager.disable = level
def shutdown(handlerList=_handlerList):
"""
Perform any cleanup actions in the logging system (e.g. flushing
buffers).
Should be called at application exit.
"""
for wr in reversed(handlerList[:]):
#errors might occur, for example, if files are locked
#we just ignore them if raiseExceptions is not set
try:
h = wr()
if h:
try:
h.acquire()
h.flush()
h.close()
except (IOError, ValueError):
# Ignore errors which might be caused
# because handlers have been closed but
# references to them are still around at
# application exit.
pass
finally:
h.release()
except:
if raiseExceptions:
raise
#else, swallow
#Let's try and shutdown automatically on application exit...
import atexit
atexit.register(shutdown)
# Null handler
class NullHandler(Handler):
"""
This handler does nothing. It's intended to be used to avoid the
"No handlers could be found for logger XXX" one-off warning. This is
important for library code, which may contain code to log events. If a user
of the library does not configure logging, the one-off warning might be
produced; to avoid this, the library developer simply needs to instantiate
a NullHandler and add it to the top-level logger of the library module or
package.
"""
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
# Warnings integration
_warnings_showwarning = None
def _showwarning(message, category, filename, lineno, file=None, line=None):
"""
Implementation of showwarnings which redirects to logging, which will first
check to see if the file parameter is None. If a file is specified, it will
delegate to the original warnings implementation of showwarning. Otherwise,
it will call warnings.formatwarning and will log the resulting string to a
warnings logger named "py.warnings" with level logging.WARNING.
"""
if file is not None:
if _warnings_showwarning is not None:
_warnings_showwarning(message, category, filename, lineno, file, line)
else:
s = warnings.formatwarning(message, category, filename, lineno, line)
logger = getLogger("py.warnings")
if not logger.handlers:
logger.addHandler(NullHandler())
logger.warning("%s", s)
def captureWarnings(capture):
"""
If capture is true, redirect all warnings to the logging package.
If capture is False, ensure that warnings are not redirected to logging
but to their original destinations.
"""
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = _showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
Issue #15541: Correct anomaly in logging.exception. Thanks to Ned Batchelder for the report.
# Copyright 2001-2012 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Logging package for Python. Based on PEP 282 and comments thereto in
comp.lang.python.
Copyright (C) 2001-2012 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import sys, os, time, cStringIO, traceback, warnings, weakref
__all__ = ['BASIC_FORMAT', 'BufferingFormatter', 'CRITICAL', 'DEBUG', 'ERROR',
'FATAL', 'FileHandler', 'Filter', 'Formatter', 'Handler', 'INFO',
'LogRecord', 'Logger', 'LoggerAdapter', 'NOTSET', 'NullHandler',
'StreamHandler', 'WARN', 'WARNING', 'addLevelName', 'basicConfig',
'captureWarnings', 'critical', 'debug', 'disable', 'error',
'exception', 'fatal', 'getLevelName', 'getLogger', 'getLoggerClass',
'info', 'log', 'makeLogRecord', 'setLoggerClass', 'warn', 'warning']
try:
import codecs
except ImportError:
codecs = None
try:
import thread
import threading
except ImportError:
thread = None
__author__ = "Vinay Sajip <vinay_sajip@red-dove.com>"
__status__ = "production"
__version__ = "0.5.1.2"
__date__ = "07 February 2010"
#---------------------------------------------------------------------------
# Miscellaneous module data
#---------------------------------------------------------------------------
try:
unicode
_unicode = True
except NameError:
_unicode = False
#
# _srcfile is used when walking the stack to check when we've got the first
# caller stack frame.
#
if hasattr(sys, 'frozen'): #support for py2exe
_srcfile = "logging%s__init__%s" % (os.sep, __file__[-4:])
elif __file__[-4:].lower() in ['.pyc', '.pyo']:
_srcfile = __file__[:-4] + '.py'
else:
_srcfile = __file__
_srcfile = os.path.normcase(_srcfile)
# next bit filched from 1.5.2's inspect.py
def currentframe():
"""Return the frame object for the caller's stack frame."""
try:
raise Exception
except:
return sys.exc_info()[2].tb_frame.f_back
if hasattr(sys, '_getframe'): currentframe = lambda: sys._getframe(3)
# done filching
# _srcfile is only used in conjunction with sys._getframe().
# To provide compatibility with older versions of Python, set _srcfile
# to None if _getframe() is not available; this value will prevent
# findCaller() from being called.
#if not hasattr(sys, "_getframe"):
# _srcfile = None
#
#_startTime is used as the base when calculating the relative time of events
#
_startTime = time.time()
#
#raiseExceptions is used to see if exceptions during handling should be
#propagated
#
raiseExceptions = 1
#
# If you don't want threading information in the log, set this to zero
#
logThreads = 1
#
# If you don't want multiprocessing information in the log, set this to zero
#
logMultiprocessing = 1
#
# If you don't want process information in the log, set this to zero
#
logProcesses = 1
#---------------------------------------------------------------------------
# Level related stuff
#---------------------------------------------------------------------------
#
# Default levels and level names, these can be replaced with any positive set
# of values having corresponding names. There is a pseudo-level, NOTSET, which
# is only really there as a lower limit for user-defined levels. Handlers and
# loggers are initialized with NOTSET so that they will log all messages, even
# at user-defined levels.
#
CRITICAL = 50
FATAL = CRITICAL
ERROR = 40
WARNING = 30
WARN = WARNING
INFO = 20
DEBUG = 10
NOTSET = 0
_levelNames = {
CRITICAL : 'CRITICAL',
ERROR : 'ERROR',
WARNING : 'WARNING',
INFO : 'INFO',
DEBUG : 'DEBUG',
NOTSET : 'NOTSET',
'CRITICAL' : CRITICAL,
'ERROR' : ERROR,
'WARN' : WARNING,
'WARNING' : WARNING,
'INFO' : INFO,
'DEBUG' : DEBUG,
'NOTSET' : NOTSET,
}
def getLevelName(level):
"""
Return the textual representation of logging level 'level'.
If the level is one of the predefined levels (CRITICAL, ERROR, WARNING,
INFO, DEBUG) then you get the corresponding string. If you have
associated levels with names using addLevelName then the name you have
associated with 'level' is returned.
If a numeric value corresponding to one of the defined levels is passed
in, the corresponding string representation is returned.
Otherwise, the string "Level %s" % level is returned.
"""
return _levelNames.get(level, ("Level %s" % level))
def addLevelName(level, levelName):
"""
Associate 'levelName' with 'level'.
This is used when converting levels to text during message formatting.
"""
_acquireLock()
try: #unlikely to cause an exception, but you never know...
_levelNames[level] = levelName
_levelNames[levelName] = level
finally:
_releaseLock()
def _checkLevel(level):
if isinstance(level, int):
rv = level
elif str(level) == level:
if level not in _levelNames:
raise ValueError("Unknown level: %r" % level)
rv = _levelNames[level]
else:
raise TypeError("Level not an integer or a valid string: %r" % level)
return rv
#---------------------------------------------------------------------------
# Thread-related stuff
#---------------------------------------------------------------------------
#
#_lock is used to serialize access to shared data structures in this module.
#This needs to be an RLock because fileConfig() creates and configures
#Handlers, and so might arbitrary user threads. Since Handler code updates the
#shared dictionary _handlers, it needs to acquire the lock. But if configuring,
#the lock would already have been acquired - so we need an RLock.
#The same argument applies to Loggers and Manager.loggerDict.
#
if thread:
_lock = threading.RLock()
else:
_lock = None
def _acquireLock():
"""
Acquire the module-level lock for serializing access to shared data.
This should be released with _releaseLock().
"""
if _lock:
_lock.acquire()
def _releaseLock():
"""
Release the module-level lock acquired by calling _acquireLock().
"""
if _lock:
_lock.release()
#---------------------------------------------------------------------------
# The logging record
#---------------------------------------------------------------------------
class LogRecord(object):
"""
A LogRecord instance represents an event being logged.
LogRecord instances are created every time something is logged. They
contain all the information pertinent to the event being logged. The
main information passed in is in msg and args, which are combined
using str(msg) % args to create the message field of the record. The
record also includes information such as when the record was created,
the source line where the logging call was made, and any exception
information to be logged.
"""
def __init__(self, name, level, pathname, lineno,
msg, args, exc_info, func=None):
"""
Initialize a logging record with interesting information.
"""
ct = time.time()
self.name = name
self.msg = msg
#
# The following statement allows passing of a dictionary as a sole
# argument, so that you can do something like
# logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2})
# Suggested by Stefan Behnel.
# Note that without the test for args[0], we get a problem because
# during formatting, we test to see if the arg is present using
# 'if self.args:'. If the event being logged is e.g. 'Value is %d'
# and if the passed arg fails 'if self.args:' then no formatting
# is done. For example, logger.warn('Value is %d', 0) would log
# 'Value is %d' instead of 'Value is 0'.
# For the use case of passing a dictionary, this should not be a
# problem.
if args and len(args) == 1 and isinstance(args[0], dict) and args[0]:
args = args[0]
self.args = args
self.levelname = getLevelName(level)
self.levelno = level
self.pathname = pathname
try:
self.filename = os.path.basename(pathname)
self.module = os.path.splitext(self.filename)[0]
except (TypeError, ValueError, AttributeError):
self.filename = pathname
self.module = "Unknown module"
self.exc_info = exc_info
self.exc_text = None # used to cache the traceback text
self.lineno = lineno
self.funcName = func
self.created = ct
self.msecs = (ct - long(ct)) * 1000
self.relativeCreated = (self.created - _startTime) * 1000
if logThreads and thread:
self.thread = thread.get_ident()
self.threadName = threading.current_thread().name
else:
self.thread = None
self.threadName = None
if not logMultiprocessing:
self.processName = None
else:
self.processName = 'MainProcess'
mp = sys.modules.get('multiprocessing')
if mp is not None:
# Errors may occur if multiprocessing has not finished loading
# yet - e.g. if a custom import hook causes third-party code
# to run when multiprocessing calls import. See issue 8200
# for an example
try:
self.processName = mp.current_process().name
except StandardError:
pass
if logProcesses and hasattr(os, 'getpid'):
self.process = os.getpid()
else:
self.process = None
def __str__(self):
return '<LogRecord: %s, %s, %s, %s, "%s">'%(self.name, self.levelno,
self.pathname, self.lineno, self.msg)
def getMessage(self):
"""
Return the message for this LogRecord.
Return the message for this LogRecord after merging any user-supplied
arguments with the message.
"""
if not _unicode: #if no unicode support...
msg = str(self.msg)
else:
msg = self.msg
if not isinstance(msg, basestring):
try:
msg = str(self.msg)
except UnicodeError:
msg = self.msg #Defer encoding till later
if self.args:
msg = msg % self.args
return msg
def makeLogRecord(dict):
"""
Make a LogRecord whose attributes are defined by the specified dictionary,
This function is useful for converting a logging event received over
a socket connection (which is sent as a dictionary) into a LogRecord
instance.
"""
rv = LogRecord(None, None, "", 0, "", (), None, None)
rv.__dict__.update(dict)
return rv
#---------------------------------------------------------------------------
# Formatter classes and functions
#---------------------------------------------------------------------------
class Formatter(object):
"""
Formatter instances are used to convert a LogRecord to text.
Formatters need to know how a LogRecord is constructed. They are
responsible for converting a LogRecord to (usually) a string which can
be interpreted by either a human or an external system. The base Formatter
allows a formatting string to be specified. If none is supplied, the
default value of "%s(message)\\n" is used.
The Formatter can be initialized with a format string which makes use of
knowledge of the LogRecord attributes - e.g. the default value mentioned
above makes use of the fact that the user's message and arguments are pre-
formatted into a LogRecord's message attribute. Currently, the useful
attributes in a LogRecord are described by:
%(name)s Name of the logger (logging channel)
%(levelno)s Numeric logging level for the message (DEBUG, INFO,
WARNING, ERROR, CRITICAL)
%(levelname)s Text logging level for the message ("DEBUG", "INFO",
"WARNING", "ERROR", "CRITICAL")
%(pathname)s Full pathname of the source file where the logging
call was issued (if available)
%(filename)s Filename portion of pathname
%(module)s Module (name portion of filename)
%(lineno)d Source line number where the logging call was issued
(if available)
%(funcName)s Function name
%(created)f Time when the LogRecord was created (time.time()
return value)
%(asctime)s Textual time when the LogRecord was created
%(msecs)d Millisecond portion of the creation time
%(relativeCreated)d Time in milliseconds when the LogRecord was created,
relative to the time the logging module was loaded
(typically at application startup time)
%(thread)d Thread ID (if available)
%(threadName)s Thread name (if available)
%(process)d Process ID (if available)
%(message)s The result of record.getMessage(), computed just as
the record is emitted
"""
converter = time.localtime
def __init__(self, fmt=None, datefmt=None):
"""
Initialize the formatter with specified format strings.
Initialize the formatter either with the specified format string, or a
default as described above. Allow for specialized date formatting with
the optional datefmt argument (if omitted, you get the ISO8601 format).
"""
if fmt:
self._fmt = fmt
else:
self._fmt = "%(message)s"
self.datefmt = datefmt
def formatTime(self, record, datefmt=None):
"""
Return the creation time of the specified LogRecord as formatted text.
This method should be called from format() by a formatter which
wants to make use of a formatted time. This method can be overridden
in formatters to provide for any specific requirement, but the
basic behaviour is as follows: if datefmt (a string) is specified,
it is used with time.strftime() to format the creation time of the
record. Otherwise, the ISO8601 format is used. The resulting
string is returned. This function uses a user-configurable function
to convert the creation time to a tuple. By default, time.localtime()
is used; to change this for a particular formatter instance, set the
'converter' attribute to a function with the same signature as
time.localtime() or time.gmtime(). To change it for all formatters,
for example if you want all logging times to be shown in GMT,
set the 'converter' attribute in the Formatter class.
"""
ct = self.converter(record.created)
if datefmt:
s = time.strftime(datefmt, ct)
else:
t = time.strftime("%Y-%m-%d %H:%M:%S", ct)
s = "%s,%03d" % (t, record.msecs)
return s
def formatException(self, ei):
"""
Format and return the specified exception information as a string.
This default implementation just uses
traceback.print_exception()
"""
sio = cStringIO.StringIO()
traceback.print_exception(ei[0], ei[1], ei[2], None, sio)
s = sio.getvalue()
sio.close()
if s[-1:] == "\n":
s = s[:-1]
return s
def usesTime(self):
"""
Check if the format uses the creation time of the record.
"""
return self._fmt.find("%(asctime)") >= 0
def format(self, record):
"""
Format the specified record as text.
The record's attribute dictionary is used as the operand to a
string formatting operation which yields the returned string.
Before formatting the dictionary, a couple of preparatory steps
are carried out. The message attribute of the record is computed
using LogRecord.getMessage(). If the formatting string uses the
time (as determined by a call to usesTime(), formatTime() is
called to format the event time. If there is exception information,
it is formatted using formatException() and appended to the message.
"""
record.message = record.getMessage()
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
s = self._fmt % record.__dict__
if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it's constant anyway)
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
if s[-1:] != "\n":
s = s + "\n"
try:
s = s + record.exc_text
except UnicodeError:
# Sometimes filenames have non-ASCII chars, which can lead
# to errors when s is Unicode and record.exc_text is str
# See issue 8924.
# We also use replace for when there are multiple
# encodings, e.g. UTF-8 for the filesystem and latin-1
# for a script. See issue 13232.
s = s + record.exc_text.decode(sys.getfilesystemencoding(),
'replace')
return s
#
# The default formatter to use when no other is specified
#
_defaultFormatter = Formatter()
class BufferingFormatter(object):
"""
A formatter suitable for formatting a number of records.
"""
def __init__(self, linefmt=None):
"""
Optionally specify a formatter which will be used to format each
individual record.
"""
if linefmt:
self.linefmt = linefmt
else:
self.linefmt = _defaultFormatter
def formatHeader(self, records):
"""
Return the header string for the specified records.
"""
return ""
def formatFooter(self, records):
"""
Return the footer string for the specified records.
"""
return ""
def format(self, records):
"""
Format the specified records and return the result as a string.
"""
rv = ""
if len(records) > 0:
rv = rv + self.formatHeader(records)
for record in records:
rv = rv + self.linefmt.format(record)
rv = rv + self.formatFooter(records)
return rv
#---------------------------------------------------------------------------
# Filter classes and functions
#---------------------------------------------------------------------------
class Filter(object):
"""
Filter instances are used to perform arbitrary filtering of LogRecords.
Loggers and Handlers can optionally use Filter instances to filter
records as desired. The base filter class only allows events which are
below a certain point in the logger hierarchy. For example, a filter
initialized with "A.B" will allow events logged by loggers "A.B",
"A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If
initialized with the empty string, all events are passed.
"""
def __init__(self, name=''):
"""
Initialize a filter.
Initialize with the name of the logger which, together with its
children, will have its events allowed through the filter. If no
name is specified, allow every event.
"""
self.name = name
self.nlen = len(name)
def filter(self, record):
"""
Determine if the specified record is to be logged.
Is the specified record to be logged? Returns 0 for no, nonzero for
yes. If deemed appropriate, the record may be modified in-place.
"""
if self.nlen == 0:
return 1
elif self.name == record.name:
return 1
elif record.name.find(self.name, 0, self.nlen) != 0:
return 0
return (record.name[self.nlen] == ".")
class Filterer(object):
"""
A base class for loggers and handlers which allows them to share
common code.
"""
def __init__(self):
"""
Initialize the list of filters to be an empty list.
"""
self.filters = []
def addFilter(self, filter):
"""
Add the specified filter to this handler.
"""
if not (filter in self.filters):
self.filters.append(filter)
def removeFilter(self, filter):
"""
Remove the specified filter from this handler.
"""
if filter in self.filters:
self.filters.remove(filter)
def filter(self, record):
"""
Determine if a record is loggable by consulting all the filters.
The default is to allow the record to be logged; any filter can veto
this and the record is then dropped. Returns a zero value if a record
is to be dropped, else non-zero.
"""
rv = 1
for f in self.filters:
if not f.filter(record):
rv = 0
break
return rv
#---------------------------------------------------------------------------
# Handler classes and functions
#---------------------------------------------------------------------------
_handlers = weakref.WeakValueDictionary() #map of handler names to handlers
_handlerList = [] # added to allow handlers to be removed in reverse of order initialized
def _removeHandlerRef(wr):
"""
Remove a handler reference from the internal cleanup list.
"""
# This function can be called during module teardown, when globals are
# set to None. If _acquireLock is None, assume this is the case and do
# nothing.
if _acquireLock is not None:
_acquireLock()
try:
if wr in _handlerList:
_handlerList.remove(wr)
finally:
_releaseLock()
def _addHandlerRef(handler):
"""
Add a handler to the internal cleanup list using a weak reference.
"""
_acquireLock()
try:
_handlerList.append(weakref.ref(handler, _removeHandlerRef))
finally:
_releaseLock()
class Handler(Filterer):
"""
Handler instances dispatch logging events to specific destinations.
The base handler class. Acts as a placeholder which defines the Handler
interface. Handlers can optionally use Formatter instances to format
records as desired. By default, no formatter is specified; in this case,
the 'raw' message as determined by record.message is logged.
"""
def __init__(self, level=NOTSET):
"""
Initializes the instance - basically setting the formatter to None
and the filter list to empty.
"""
Filterer.__init__(self)
self._name = None
self.level = _checkLevel(level)
self.formatter = None
# Add the handler to the global _handlerList (for cleanup on shutdown)
_addHandlerRef(self)
self.createLock()
def get_name(self):
return self._name
def set_name(self, name):
_acquireLock()
try:
if self._name in _handlers:
del _handlers[self._name]
self._name = name
if name:
_handlers[name] = self
finally:
_releaseLock()
name = property(get_name, set_name)
def createLock(self):
"""
Acquire a thread lock for serializing access to the underlying I/O.
"""
if thread:
self.lock = threading.RLock()
else:
self.lock = None
def acquire(self):
"""
Acquire the I/O thread lock.
"""
if self.lock:
self.lock.acquire()
def release(self):
"""
Release the I/O thread lock.
"""
if self.lock:
self.lock.release()
def setLevel(self, level):
"""
Set the logging level of this handler.
"""
self.level = _checkLevel(level)
def format(self, record):
"""
Format the specified record.
If a formatter is set, use it. Otherwise, use the default formatter
for the module.
"""
if self.formatter:
fmt = self.formatter
else:
fmt = _defaultFormatter
return fmt.format(record)
def emit(self, record):
"""
Do whatever it takes to actually log the specified logging record.
This version is intended to be implemented by subclasses and so
raises a NotImplementedError.
"""
raise NotImplementedError('emit must be implemented '
'by Handler subclasses')
def handle(self, record):
"""
Conditionally emit the specified logging record.
Emission depends on filters which may have been added to the handler.
Wrap the actual emission of the record with acquisition/release of
the I/O thread lock. Returns whether the filter passed the record for
emission.
"""
rv = self.filter(record)
if rv:
self.acquire()
try:
self.emit(record)
finally:
self.release()
return rv
def setFormatter(self, fmt):
"""
Set the formatter for this handler.
"""
self.formatter = fmt
def flush(self):
"""
Ensure all logging output has been flushed.
This version does nothing and is intended to be implemented by
subclasses.
"""
pass
def close(self):
"""
Tidy up any resources used by the handler.
This version removes the handler from an internal map of handlers,
_handlers, which is used for handler lookup by name. Subclasses
should ensure that this gets called from overridden close()
methods.
"""
#get the module data lock, as we're updating a shared structure.
_acquireLock()
try: #unlikely to raise an exception, but you never know...
if self._name and self._name in _handlers:
del _handlers[self._name]
finally:
_releaseLock()
def handleError(self, record):
"""
Handle errors which occur during an emit() call.
This method should be called from handlers when an exception is
encountered during an emit() call. If raiseExceptions is false,
exceptions get silently ignored. This is what is mostly wanted
for a logging system - most users will not care about errors in
the logging system, they are more interested in application errors.
You could, however, replace this with a custom handler if you wish.
The record which was being processed is passed in to this method.
"""
if raiseExceptions and sys.stderr: # see issue 13807
ei = sys.exc_info()
try:
traceback.print_exception(ei[0], ei[1], ei[2],
None, sys.stderr)
sys.stderr.write('Logged from file %s, line %s\n' % (
record.filename, record.lineno))
except IOError:
pass # see issue 5971
finally:
del ei
class StreamHandler(Handler):
"""
A handler class which writes logging records, appropriately formatted,
to a stream. Note that this class does not close the stream, as
sys.stdout or sys.stderr may be used.
"""
def __init__(self, stream=None):
"""
Initialize the handler.
If stream is not specified, sys.stderr is used.
"""
Handler.__init__(self)
if stream is None:
stream = sys.stderr
self.stream = stream
def flush(self):
"""
Flushes the stream.
"""
self.acquire()
try:
if self.stream and hasattr(self.stream, "flush"):
self.stream.flush()
finally:
self.release()
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline. If
exception information is present, it is formatted using
traceback.print_exception and appended to the stream. If the stream
has an 'encoding' attribute, it is used to determine how to do the
output to the stream.
"""
try:
msg = self.format(record)
stream = self.stream
fs = "%s\n"
if not _unicode: #if no unicode support...
stream.write(fs % msg)
else:
try:
if (isinstance(msg, unicode) and
getattr(stream, 'encoding', None)):
ufs = fs.decode(stream.encoding)
try:
stream.write(ufs % msg)
except UnicodeEncodeError:
#Printing to terminals sometimes fails. For example,
#with an encoding of 'cp1251', the above write will
#work if written to a stream opened or wrapped by
#the codecs module, but fail when writing to a
#terminal even when the codepage is set to cp1251.
#An extra encoding step seems to be needed.
stream.write((ufs % msg).encode(stream.encoding))
else:
stream.write(fs % msg)
except UnicodeError:
stream.write(fs % msg.encode("UTF-8"))
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class FileHandler(StreamHandler):
"""
A handler class which writes formatted logging records to disk files.
"""
def __init__(self, filename, mode='a', encoding=None, delay=0):
"""
Open the specified file and use it as the stream for logging.
"""
#keep the absolute path, otherwise derived classes which use this
#may come a cropper when the current directory changes
if codecs is None:
encoding = None
self.baseFilename = os.path.abspath(filename)
self.mode = mode
self.encoding = encoding
if delay:
#We don't open the stream, but we still need to call the
#Handler constructor to set level, formatter, lock etc.
Handler.__init__(self)
self.stream = None
else:
StreamHandler.__init__(self, self._open())
def close(self):
"""
Closes the stream.
"""
self.acquire()
try:
if self.stream:
self.flush()
if hasattr(self.stream, "close"):
self.stream.close()
StreamHandler.close(self)
self.stream = None
finally:
self.release()
def _open(self):
"""
Open the current base file with the (original) mode and encoding.
Return the resulting stream.
"""
if self.encoding is None:
stream = open(self.baseFilename, self.mode)
else:
stream = codecs.open(self.baseFilename, self.mode, self.encoding)
return stream
def emit(self, record):
"""
Emit a record.
If the stream was not opened because 'delay' was specified in the
constructor, open it before calling the superclass's emit.
"""
if self.stream is None:
self.stream = self._open()
StreamHandler.emit(self, record)
#---------------------------------------------------------------------------
# Manager classes and functions
#---------------------------------------------------------------------------
class PlaceHolder(object):
"""
PlaceHolder instances are used in the Manager logger hierarchy to take
the place of nodes for which no loggers have been defined. This class is
intended for internal use only and not as part of the public API.
"""
def __init__(self, alogger):
"""
Initialize with the specified logger being a child of this placeholder.
"""
#self.loggers = [alogger]
self.loggerMap = { alogger : None }
def append(self, alogger):
"""
Add the specified logger as a child of this placeholder.
"""
#if alogger not in self.loggers:
if alogger not in self.loggerMap:
#self.loggers.append(alogger)
self.loggerMap[alogger] = None
#
# Determine which class to use when instantiating loggers.
#
_loggerClass = None
def setLoggerClass(klass):
"""
Set the class to be used when instantiating a logger. The class should
define __init__() such that only a name argument is required, and the
__init__() should call Logger.__init__()
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
global _loggerClass
_loggerClass = klass
def getLoggerClass():
"""
Return the class to be used when instantiating a logger.
"""
return _loggerClass
class Manager(object):
"""
There is [under normal circumstances] just one Manager instance, which
holds the hierarchy of loggers.
"""
def __init__(self, rootnode):
"""
Initialize the manager with the root node of the logger hierarchy.
"""
self.root = rootnode
self.disable = 0
self.emittedNoHandlerWarning = 0
self.loggerDict = {}
self.loggerClass = None
def getLogger(self, name):
"""
Get a logger with the specified name (channel name), creating it
if it doesn't yet exist. This name is a dot-separated hierarchical
name, such as "a", "a.b", "a.b.c" or similar.
If a PlaceHolder existed for the specified name [i.e. the logger
didn't exist but a child of it did], replace it with the created
logger and fix up the parent/child references which pointed to the
placeholder to now point to the logger.
"""
rv = None
if not isinstance(name, basestring):
raise TypeError('A logger name must be string or Unicode')
if isinstance(name, unicode):
name = name.encode('utf-8')
_acquireLock()
try:
if name in self.loggerDict:
rv = self.loggerDict[name]
if isinstance(rv, PlaceHolder):
ph = rv
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupChildren(ph, rv)
self._fixupParents(rv)
else:
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupParents(rv)
finally:
_releaseLock()
return rv
def setLoggerClass(self, klass):
"""
Set the class to be used when instantiating a logger with this Manager.
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
self.loggerClass = klass
def _fixupParents(self, alogger):
"""
Ensure that there are either loggers or placeholders all the way
from the specified logger to the root of the logger hierarchy.
"""
name = alogger.name
i = name.rfind(".")
rv = None
while (i > 0) and not rv:
substr = name[:i]
if substr not in self.loggerDict:
self.loggerDict[substr] = PlaceHolder(alogger)
else:
obj = self.loggerDict[substr]
if isinstance(obj, Logger):
rv = obj
else:
assert isinstance(obj, PlaceHolder)
obj.append(alogger)
i = name.rfind(".", 0, i - 1)
if not rv:
rv = self.root
alogger.parent = rv
def _fixupChildren(self, ph, alogger):
"""
Ensure that children of the placeholder ph are connected to the
specified logger.
"""
name = alogger.name
namelen = len(name)
for c in ph.loggerMap.keys():
#The if means ... if not c.parent.name.startswith(nm)
if c.parent.name[:namelen] != name:
alogger.parent = c.parent
c.parent = alogger
#---------------------------------------------------------------------------
# Logger classes and functions
#---------------------------------------------------------------------------
class Logger(Filterer):
"""
Instances of the Logger class represent a single logging channel. A
"logging channel" indicates an area of an application. Exactly how an
"area" is defined is up to the application developer. Since an
application can have any number of areas, logging channels are identified
by a unique string. Application areas can be nested (e.g. an area
of "input processing" might include sub-areas "read CSV files", "read
XLS files" and "read Gnumeric files"). To cater for this natural nesting,
channel names are organized into a namespace hierarchy where levels are
separated by periods, much like the Java or Python package namespace. So
in the instance given above, channel names might be "input" for the upper
level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels.
There is no arbitrary limit to the depth of nesting.
"""
def __init__(self, name, level=NOTSET):
"""
Initialize the logger with a name and an optional level.
"""
Filterer.__init__(self)
self.name = name
self.level = _checkLevel(level)
self.parent = None
self.propagate = 1
self.handlers = []
self.disabled = 0
def setLevel(self, level):
"""
Set the logging level of this logger.
"""
self.level = _checkLevel(level)
def debug(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'DEBUG'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)
"""
if self.isEnabledFor(DEBUG):
self._log(DEBUG, msg, args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'INFO'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.info("Houston, we have a %s", "interesting problem", exc_info=1)
"""
if self.isEnabledFor(INFO):
self._log(INFO, msg, args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'WARNING'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1)
"""
if self.isEnabledFor(WARNING):
self._log(WARNING, msg, args, **kwargs)
warn = warning
def error(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'ERROR'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.error("Houston, we have a %s", "major problem", exc_info=1)
"""
if self.isEnabledFor(ERROR):
self._log(ERROR, msg, args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
Convenience method for logging an ERROR with exception information.
"""
kwargs['exc_info'] = 1
self.error(msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'CRITICAL'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.critical("Houston, we have a %s", "major disaster", exc_info=1)
"""
if self.isEnabledFor(CRITICAL):
self._log(CRITICAL, msg, args, **kwargs)
fatal = critical
def log(self, level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.log(level, "We have a %s", "mysterious problem", exc_info=1)
"""
if not isinstance(level, int):
if raiseExceptions:
raise TypeError("level must be an integer")
else:
return
if self.isEnabledFor(level):
self._log(level, msg, args, **kwargs)
def findCaller(self):
"""
Find the stack frame of the caller so that we can note the source
file name, line number and function name.
"""
f = currentframe()
#On some versions of IronPython, currentframe() returns None if
#IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
rv = "(unknown file)", 0, "(unknown function)"
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
if filename == _srcfile:
f = f.f_back
continue
rv = (co.co_filename, f.f_lineno, co.co_name)
break
return rv
def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None):
"""
A factory method which can be overridden in subclasses to create
specialized LogRecords.
"""
rv = LogRecord(name, level, fn, lno, msg, args, exc_info, func)
if extra is not None:
for key in extra:
if (key in ["message", "asctime"]) or (key in rv.__dict__):
raise KeyError("Attempt to overwrite %r in LogRecord" % key)
rv.__dict__[key] = extra[key]
return rv
def _log(self, level, msg, args, exc_info=None, extra=None):
"""
Low-level logging routine which creates a LogRecord and then calls
all the handlers of this logger to handle the record.
"""
if _srcfile:
#IronPython doesn't track Python frames, so findCaller throws an
#exception on some versions of IronPython. We trap it here so that
#IronPython can use logging.
try:
fn, lno, func = self.findCaller()
except ValueError:
fn, lno, func = "(unknown file)", 0, "(unknown function)"
else:
fn, lno, func = "(unknown file)", 0, "(unknown function)"
if exc_info:
if not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
record = self.makeRecord(self.name, level, fn, lno, msg, args, exc_info, func, extra)
self.handle(record)
def handle(self, record):
"""
Call the handlers for the specified record.
This method is used for unpickled records received from a socket, as
well as those created locally. Logger-level filtering is applied.
"""
if (not self.disabled) and self.filter(record):
self.callHandlers(record)
def addHandler(self, hdlr):
"""
Add the specified handler to this logger.
"""
_acquireLock()
try:
if not (hdlr in self.handlers):
self.handlers.append(hdlr)
finally:
_releaseLock()
def removeHandler(self, hdlr):
"""
Remove the specified handler from this logger.
"""
_acquireLock()
try:
if hdlr in self.handlers:
self.handlers.remove(hdlr)
finally:
_releaseLock()
def callHandlers(self, record):
"""
Pass a record to all relevant handlers.
Loop through all handlers for this logger and its parents in the
logger hierarchy. If no handler was found, output a one-off error
message to sys.stderr. Stop searching up the hierarchy whenever a
logger with the "propagate" attribute set to zero is found - that
will be the last logger whose handlers are called.
"""
c = self
found = 0
while c:
for hdlr in c.handlers:
found = found + 1
if record.levelno >= hdlr.level:
hdlr.handle(record)
if not c.propagate:
c = None #break out
else:
c = c.parent
if (found == 0) and raiseExceptions and not self.manager.emittedNoHandlerWarning:
sys.stderr.write("No handlers could be found for logger"
" \"%s\"\n" % self.name)
self.manager.emittedNoHandlerWarning = 1
def getEffectiveLevel(self):
"""
Get the effective level for this logger.
Loop through this logger and its parents in the logger hierarchy,
looking for a non-zero logging level. Return the first one found.
"""
logger = self
while logger:
if logger.level:
return logger.level
logger = logger.parent
return NOTSET
def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
if self.manager.disable >= level:
return 0
return level >= self.getEffectiveLevel()
def getChild(self, suffix):
"""
Get a logger which is a descendant to this one.
This is a convenience method, such that
logging.getLogger('abc').getChild('def.ghi')
is the same as
logging.getLogger('abc.def.ghi')
It's useful, for example, when the parent logger is named using
__name__ rather than a literal string.
"""
if self.root is not self:
suffix = '.'.join((self.name, suffix))
return self.manager.getLogger(suffix)
class RootLogger(Logger):
"""
A root logger is not that different to any other logger, except that
it must have a logging level and there is only one instance of it in
the hierarchy.
"""
def __init__(self, level):
"""
Initialize the logger with the name "root".
"""
Logger.__init__(self, "root", level)
_loggerClass = Logger
class LoggerAdapter(object):
"""
An adapter for loggers which makes it easier to specify contextual
information in logging output.
"""
def __init__(self, logger, extra):
"""
Initialize the adapter with a logger and a dict-like object which
provides contextual information. This constructor signature allows
easy stacking of LoggerAdapters, if so desired.
You can effectively pass keyword arguments as shown in the
following example:
adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2"))
"""
self.logger = logger
self.extra = extra
def process(self, msg, kwargs):
"""
Process the logging message and keyword arguments passed in to
a logging call to insert contextual information. You can either
manipulate the message itself, the keyword args or both. Return
the message and kwargs modified (or not) to suit your needs.
Normally, you'll only need to override this one method in a
LoggerAdapter subclass for your specific needs.
"""
kwargs["extra"] = self.extra
return msg, kwargs
def debug(self, msg, *args, **kwargs):
"""
Delegate a debug call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.debug(msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Delegate an info call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.info(msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Delegate a warning call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Delegate an error call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.error(msg, *args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
Delegate an exception call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
kwargs["exc_info"] = 1
self.logger.error(msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Delegate a critical call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.critical(msg, *args, **kwargs)
def log(self, level, msg, *args, **kwargs):
"""
Delegate a log call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.log(level, msg, *args, **kwargs)
def isEnabledFor(self, level):
"""
See if the underlying logger is enabled for the specified level.
"""
return self.logger.isEnabledFor(level)
root = RootLogger(WARNING)
Logger.root = root
Logger.manager = Manager(Logger.root)
#---------------------------------------------------------------------------
# Configuration classes and functions
#---------------------------------------------------------------------------
BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s"
def basicConfig(**kwargs):
"""
Do basic configuration for the logging system.
This function does nothing if the root logger already has handlers
configured. It is a convenience method intended for use by simple scripts
to do one-shot configuration of the logging package.
The default behaviour is to create a StreamHandler which writes to
sys.stderr, set a formatter using the BASIC_FORMAT format string, and
add the handler to the root logger.
A number of optional keyword arguments may be specified, which can alter
the default behaviour.
filename Specifies that a FileHandler be created, using the specified
filename, rather than a StreamHandler.
filemode Specifies the mode to open the file, if filename is specified
(if filemode is unspecified, it defaults to 'a').
format Use the specified format string for the handler.
datefmt Use the specified date/time format.
level Set the root logger level to the specified level.
stream Use the specified stream to initialize the StreamHandler. Note
that this argument is incompatible with 'filename' - if both
are present, 'stream' is ignored.
Note that you could specify a stream created using open(filename, mode)
rather than passing the filename and mode in. However, it should be
remembered that StreamHandler does not close its stream (since it may be
using sys.stdout or sys.stderr), whereas FileHandler closes its stream
when the handler is closed.
"""
# Add thread safety in case someone mistakenly calls
# basicConfig() from multiple threads
_acquireLock()
try:
if len(root.handlers) == 0:
filename = kwargs.get("filename")
if filename:
mode = kwargs.get("filemode", 'a')
hdlr = FileHandler(filename, mode)
else:
stream = kwargs.get("stream")
hdlr = StreamHandler(stream)
fs = kwargs.get("format", BASIC_FORMAT)
dfs = kwargs.get("datefmt", None)
fmt = Formatter(fs, dfs)
hdlr.setFormatter(fmt)
root.addHandler(hdlr)
level = kwargs.get("level")
if level is not None:
root.setLevel(level)
finally:
_releaseLock()
#---------------------------------------------------------------------------
# Utility functions at module level.
# Basically delegate everything to the root logger.
#---------------------------------------------------------------------------
def getLogger(name=None):
"""
Return a logger with the specified name, creating it if necessary.
If no name is specified, return the root logger.
"""
if name:
return Logger.manager.getLogger(name)
else:
return root
#def getRootLogger():
# """
# Return the root logger.
#
# Note that getLogger('') now does the same thing, so this function is
# deprecated and may disappear in the future.
# """
# return root
def critical(msg, *args, **kwargs):
"""
Log a message with severity 'CRITICAL' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.critical(msg, *args, **kwargs)
fatal = critical
def error(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.error(msg, *args, **kwargs)
def exception(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger,
with exception information.
"""
kwargs['exc_info'] = 1
error(msg, *args, **kwargs)
def warning(msg, *args, **kwargs):
"""
Log a message with severity 'WARNING' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.warning(msg, *args, **kwargs)
warn = warning
def info(msg, *args, **kwargs):
"""
Log a message with severity 'INFO' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.info(msg, *args, **kwargs)
def debug(msg, *args, **kwargs):
"""
Log a message with severity 'DEBUG' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.debug(msg, *args, **kwargs)
def log(level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.log(level, msg, *args, **kwargs)
def disable(level):
"""
Disable all logging calls of severity 'level' and below.
"""
root.manager.disable = level
def shutdown(handlerList=_handlerList):
"""
Perform any cleanup actions in the logging system (e.g. flushing
buffers).
Should be called at application exit.
"""
for wr in reversed(handlerList[:]):
#errors might occur, for example, if files are locked
#we just ignore them if raiseExceptions is not set
try:
h = wr()
if h:
try:
h.acquire()
h.flush()
h.close()
except (IOError, ValueError):
# Ignore errors which might be caused
# because handlers have been closed but
# references to them are still around at
# application exit.
pass
finally:
h.release()
except:
if raiseExceptions:
raise
#else, swallow
#Let's try and shutdown automatically on application exit...
import atexit
atexit.register(shutdown)
# Null handler
class NullHandler(Handler):
"""
This handler does nothing. It's intended to be used to avoid the
"No handlers could be found for logger XXX" one-off warning. This is
important for library code, which may contain code to log events. If a user
of the library does not configure logging, the one-off warning might be
produced; to avoid this, the library developer simply needs to instantiate
a NullHandler and add it to the top-level logger of the library module or
package.
"""
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
# Warnings integration
_warnings_showwarning = None
def _showwarning(message, category, filename, lineno, file=None, line=None):
"""
Implementation of showwarnings which redirects to logging, which will first
check to see if the file parameter is None. If a file is specified, it will
delegate to the original warnings implementation of showwarning. Otherwise,
it will call warnings.formatwarning and will log the resulting string to a
warnings logger named "py.warnings" with level logging.WARNING.
"""
if file is not None:
if _warnings_showwarning is not None:
_warnings_showwarning(message, category, filename, lineno, file, line)
else:
s = warnings.formatwarning(message, category, filename, lineno, line)
logger = getLogger("py.warnings")
if not logger.handlers:
logger.addHandler(NullHandler())
logger.warning("%s", s)
def captureWarnings(capture):
"""
If capture is true, redirect all warnings to the logging package.
If capture is False, ensure that warnings are not redirected to logging
but to their original destinations.
"""
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = _showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
|
"""
function for calculating the convergence of an x, y data set
main function:
test_conv(xs, ys, name, tol)
tries to fit multiple functions to the x, y data
calculates which function fits best
for tol < 0
returns the x value for which y is converged within tol of the assymtotic value
for tol > 0
returns the x_value for which dy(x)/dx < tol for all x >= x_value, conv is true is such a x_value exists
for the best fit a gnuplot line is printed plotting the data, the function and the assymthotic value
"""
from __future__ import division
__author__ = "Michiel van Setten"
__copyright__ = " "
__version__ = "0.9"
__maintainer__ = "Michiel van Setten"
__email__ = "mjvansetten@gmail.com"
__date__ = "May 2014"
import string
import random
def id_generator(size=8, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
class SplineInputError(Exception):
def __init__(self, msg):
self.msg = msg
def get_derivatives(xs, ys, fd=False):
"""
return the derivatives of y(x) at the points x
if scipy is available a spline is generated to calculate the derivatives
if scipy is not available the left and right slopes are calculated, if both exist the average is returned
putting fd to zero always returns the finite difference slopes
"""
try:
if fd:
raise SplineInputError('no spline wanted')
if len(xs) < 4:
er = SplineInputError('too few data points')
raise er
from scipy.interpolate import UnivariateSpline
spline = UnivariateSpline(xs, ys)
d = spline.derivative(1)(xs)
except (ImportError, SplineInputError):
d = []
m, left, right = 0, 0, 0
for n in range(0, len(xs), 1):
try:
left = (ys[n] - ys[n-1]) / (xs[n] - xs[n-1])
m += 1
except IndexError:
pass
try:
right = (ys[n+1] - ys[n]) / (xs[n+1] - xs[n])
m += 1
except IndexError:
pass
d.append(left + right / m)
return d
"""
functions used in the fitting procedure, with initial guesses
"""
def reciprocal(x, a, b, n):
"""
reciprocal function to the power n to fit convergence data
"""
import numpy as np
if n < 1:
n = 1
elif n > 5:
n = 5
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b / x_v ** n)
y = np.array(y_l)
else:
y = a + b / x ** n
return y
def p0_reciprocal(xs, ys):
"""
predictor for first guess for reciprocal
"""
a0 = ys[len(ys) - 1]
b0 = ys[0]*xs[0] - a0*xs[0]
return [a0, b0, 1]
def exponential(x, a, b, n):
"""
exponential function base n to fit convergence data
"""
import numpy as np
if n < 1.000001:
n = 1.000001
#print n
elif n > 1.2:
n = 1.2
#print n
if b < -10:
b = -10
#print b
elif b > 10:
b = 10
#print b
#print a, b, x
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b * n ** -x_v)
y = np.array(y_l)
else:
y = a + b * n ** -x
#print y
#print type(y)
return y
def p0_exponential(xs, ys):
n0 = 1.005
b0 = (n0 ** -xs[-1] - n0 ** -xs[1]) / (ys[-1] - ys[1])
a0 = ys[1] - b0 * n0 ** -xs[1]
#a0 = ys[-1]
#b0 = (ys[0] - a0) / n0 ** xs[0]
return [a0, b0, n0]
def single_reciprocal(x, a, b, c):
"""
reciprocal function to fit convergence data
"""
import numpy as np
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b / (x_v - c))
y = np.array(y_l)
else:
y = a + b / (x - c)
return y
def p0_single_reciprocal(xs, ys):
c = 1
b = (1/(xs[-1] - c)-1/(xs[1] - c)) / (ys[-1] - ys[1])
a = ys[1] - b / (xs[1] - c)
return [a, b, c]
def simple_reciprocal(x, a, b):
"""
reciprocal function to fit convergence data
"""
import numpy as np
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b / x_v)
y = np.array(y_l)
else:
y = a + b / x
return y
def p0_simple_reciprocal(xs, ys):
c = 0
b = (1/(xs[-1] - c) - 1/(xs[1] - c)) / (ys[-1] - ys[1])
a = ys[1] - b / (xs[1] - c)
return [a, b]
def simple_2reciprocal(x, a, b):
"""
reciprocal function to fit convergence data
"""
import numpy as np
c = 2
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b / x_v ** c)
y = np.array(y_l)
else:
y = a + b / x ** c
return y
def p0_simple_2reciprocal(xs, ys):
c = 2
b = (1/xs[-1]**c - 1/xs[1]**c) / (ys[-1] - ys[1])
a = ys[1] - b / xs[1]**c
return [a, b]
def simple_4reciprocal(x, a, b):
"""
reciprocal function to fit convergence data
"""
import numpy as np
c = 4
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b / x_v ** c)
y = np.array(y_l)
else:
y = a + b / x ** c
return y
def p0_simple_4reciprocal(xs, ys):
c = 4
b = (1/xs[-1]**c - 1/xs[1]**c) / (ys[-1] - ys[1])
a = ys[1] - b / xs[1]**c
return [a, b]
def measure(function, xs, ys, popt, weights):
"""
measure the quality of the fit
"""
m = 0
n = 0
for x in xs:
if len(popt) == 2:
m += abs(ys[n] - function(x, popt[0], popt[1])) * weights[n]
elif len(popt) == 3:
m += abs(ys[n] - function(x, popt[0], popt[1], popt[2])) * weights[n]
else:
raise NotImplementedError
n += 1
return m
def multy_curve_fit(xs, ys, verbose):
"""
fit multiple functions to the x, y data, return the best fit
"""
#functions = {exponential: p0_exponential, reciprocal: p0_reciprocal, single_reciprocal: p0_single_reciprocal}
functions = {simple_reciprocal: p0_simple_reciprocal, simple_2reciprocal: p0_simple_2reciprocal,
simple_4reciprocal: p0_simple_4reciprocal}
import numpy as np
from scipy.optimize import curve_fit
fit_results = {}
best = ['', np.inf]
for function in functions:
try:
ds = get_derivatives(xs, ys, fd=True)
mind = np.inf
for d in ds:
mind = min(abs(d), mind)
weights = []
for d in ds:
weights.append(abs((1 / d) / (1 / mind)))
print weights
popt, pcov = curve_fit(function, xs, ys, functions[function](xs, ys), maxfev=8000, sigma=weights)
m = measure(function, xs, ys, popt, weights)
perr = max(np.sqrt(np.diag(pcov)))
#print 'pcov:\n', pcov
#print 'diag:\n', np.sqrt(np.diag(pcov))
#print 'function:\n', function, perr, m
fit_results.update({function: {'measure': m, 'perr': perr, 'popt': popt, 'pcov': pcov}})
for f in fit_results:
if fit_results[f]['measure'] <= best[1]:
best = f, fit_results[f]['measure']
except RuntimeError:
if True:
print 'no fit found for ', function
return fit_results[best[0]]['popt'], fit_results[best[0]]['pcov'], best
def print_plot_line(function, popt, xs, ys, name, extra=''):
"""
print the gnuplot command line to plot the x, y data with the fitted function using the popt parameters
"""
idp = id_generator()
f = open('convdat.'+str(idp), mode='w')
for n in range(0, len(ys), 1):
f.write(str(xs[n]) + ' ' + str(ys[n]) + '\n')
f.write('\n')
f.close()
line = ''
if function is exponential:
line = "plot %s + %s * %s ** -x, 'convdat.%s' pointsize 4 lt 0, %s" % \
(popt[0], popt[1], min(max(1.00001, popt[2]), 1.2), idp, popt[0])
#print 'plot ', popt[0], ' + ', popt[1], "* ", popt[2], " ** -x," "'"+'convdat.'+idp+"'"
elif function is reciprocal:
line = "plot %s + %s / x**%s, 'convdat.%s' pointsize 4 lt 0, %s" % \
(popt[0], popt[1], min(max(1, popt[2]), 5), idp, popt[0])
#print 'plot ', popt[0], ' + ', popt[1], "/x**", popt[2], "," "'"+'convdat.'+idp+"'"
elif function is single_reciprocal:
line = "plot %s + %s / (x - %s), 'convdat.%s' pointsize 4 lt 0, %s" % \
(popt[0], popt[1], popt[2], idp, popt[0])
elif function is simple_reciprocal:
line = "plot %s + %s / x, 'convdat.%s' pointsize 4 lt 0, %s" % \
(popt[0], popt[1], idp, popt[0])
elif function is simple_2reciprocal:
line = "plot %s + %s / x**2, 'convdat.%s' pointsize 4 lt 0, %s" % \
(popt[0], popt[1], idp, popt[0])
elif function is simple_4reciprocal:
line = "plot %s + %s / x**4, 'convdat.%s' pointsize 4 lt 0, %s" % \
(popt[0], popt[1], idp, popt[0])
#print 'plot ', popt[0], ' + ', popt[1], "/ (x - ", popt[2], ")," "'"+'convdat.'+idp+"'"
f = open('plot-fits', mode='a')
f.write('pause -1 \n')
f.write('set title "' + name + ' - ' + extra + '"\n')
f.write("set output '" + name + '-' + idp + ".gif'" + '\n')
f.write(line + '\n')
f.close()
def test_conv(xs, ys, name, tol=0.0001, extra='', verbose=False):
"""
test it and at which x_value dy(x)/dx < tol for all x >= x_value, conv is true is such a x_value exists.
"""
conv = False
x_value = float('inf')
y_value = None
n_value = None
popt = [None, None, None]
if len(xs) > 2:
ds = get_derivatives(xs[0:len(ys)], ys)
try:
import numpy as np
from scipy.optimize import curve_fit
if None not in ys:
#popt, pcov = curve_fit(exponential, xs, ys, p0_exponential(xs, ys), maxfev=8000)
#perr = np.sqrt(np.diag(pcov))
#print perr
popt, pcov, func = multy_curve_fit(xs, ys, verbose)
#print popt
#print pcov
#print func
# todo print this to file via a method in helper, as dict
f = open(name+'.fitdat', mode='a')
f.write('{')
f.write('"popt": ' + str(popt) + ', ')
f.write('"pcov": ' + str(pcov) + ', ')
f.write('"data": [')
for n in range(0, len(ys), 1):
f.write('[' + str(xs[n]) + ' ' + str(ys[n]) + ']')
f.write(']}\n')
f.close()
print_plot_line(func[0], popt, xs, ys, name, extra=extra)
# print 'plot ', popt[0], ' + ', popt[1], "/x**", popt[2], ', "'+name+'.convdat"'
# print 'plot ', popt[0], ' + ', popt[1], "/x", popt[2], '/x**2, "'+name+'.convdat"'
# id = id_generator()
# print 'plot ', popt[0], ' + ', popt[1], "* ", popt[2], " ** -x," "'"+'convdat.'+id+"'"
except ImportError:
popt, pcov = None, None
for n in range(0, len(ds), 1):
if tol < 0:
if popt[0] is not None:
test = abs(popt[0] - ys[n])
else:
test = float('inf')
else:
test = abs(ds[n])
if verbose:
print test
if test < abs(tol):
if verbose:
print 'converged'
conv = True
if xs[n] < x_value:
x_value = xs[n]
y_value = ys[n]
n_value = n
else:
if verbose:
print 'not converged'
conv = False
x_value = float('inf')
if n_value is None:
return [conv, x_value, y_value, n_value, popt[0], None]
else:
return [conv, x_value, y_value, n_value, popt[0], ds[n_value]]
else:
return [conv, x_value, y_value, n_value, popt[0], None]
convergence
Former-commit-id: 48cf9488f047633adcc0884a56b51dddfbceae08 [formerly 801600b186db434fa8123ebf9333e3c33fdfbd1c]
Former-commit-id: 18ea3a6321fc88f7ecccfae00bfea1702e41ae4f
"""
function for calculating the convergence of an x, y data set
main function:
test_conv(xs, ys, name, tol)
tries to fit multiple functions to the x, y data
calculates which function fits best
for tol < 0
returns the x value for which y is converged within tol of the assymtotic value
for tol > 0
returns the x_value for which dy(x)/dx < tol for all x >= x_value, conv is true is such a x_value exists
for the best fit a gnuplot line is printed plotting the data, the function and the assymthotic value
"""
from __future__ import division
__author__ = "Michiel van Setten"
__copyright__ = " "
__version__ = "0.9"
__maintainer__ = "Michiel van Setten"
__email__ = "mjvansetten@gmail.com"
__date__ = "May 2014"
import string
import random
def id_generator(size=8, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
class SplineInputError(Exception):
def __init__(self, msg):
self.msg = msg
def get_derivatives(xs, ys, fd=False):
"""
return the derivatives of y(x) at the points x
if scipy is available a spline is generated to calculate the derivatives
if scipy is not available the left and right slopes are calculated, if both exist the average is returned
putting fd to zero always returns the finite difference slopes
"""
try:
if fd:
raise SplineInputError('no spline wanted')
if len(xs) < 4:
er = SplineInputError('too few data points')
raise er
from scipy.interpolate import UnivariateSpline
spline = UnivariateSpline(xs, ys)
d = spline.derivative(1)(xs)
except (ImportError, SplineInputError):
d = []
m, left, right = 0, 0, 0
for n in range(0, len(xs), 1):
try:
left = (ys[n] - ys[n-1]) / (xs[n] - xs[n-1])
m += 1
except IndexError:
pass
try:
right = (ys[n+1] - ys[n]) / (xs[n+1] - xs[n])
m += 1
except IndexError:
pass
d.append(left + right / m)
return d
"""
functions used in the fitting procedure, with initial guesses
"""
def reciprocal(x, a, b, n):
"""
reciprocal function to the power n to fit convergence data
"""
import numpy as np
if n < 1:
n = 1
elif n > 5:
n = 5
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b / x_v ** n)
y = np.array(y_l)
else:
y = a + b / x ** n
return y
def p0_reciprocal(xs, ys):
"""
predictor for first guess for reciprocal
"""
a0 = ys[len(ys) - 1]
b0 = ys[0]*xs[0] - a0*xs[0]
return [a0, b0, 1]
def exponential(x, a, b, n):
"""
exponential function base n to fit convergence data
"""
import numpy as np
if n < 1.000001:
n = 1.000001
#print n
elif n > 1.2:
n = 1.2
#print n
if b < -10:
b = -10
#print b
elif b > 10:
b = 10
#print b
#print a, b, x
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b * n ** -x_v)
y = np.array(y_l)
else:
y = a + b * n ** -x
#print y
#print type(y)
return y
def p0_exponential(xs, ys):
n0 = 1.005
b0 = (n0 ** -xs[-1] - n0 ** -xs[1]) / (ys[-1] - ys[1])
a0 = ys[1] - b0 * n0 ** -xs[1]
#a0 = ys[-1]
#b0 = (ys[0] - a0) / n0 ** xs[0]
return [a0, b0, n0]
def single_reciprocal(x, a, b, c):
"""
reciprocal function to fit convergence data
"""
import numpy as np
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b / (x_v - c))
y = np.array(y_l)
else:
y = a + b / (x - c)
return y
def p0_single_reciprocal(xs, ys):
c = 1
b = (1/(xs[-1] - c)-1/(xs[1] - c)) / (ys[-1] - ys[1])
a = ys[1] - b / (xs[1] - c)
return [a, b, c]
def simple_reciprocal(x, a, b):
"""
reciprocal function to fit convergence data
"""
import numpy as np
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b / x_v)
y = np.array(y_l)
else:
y = a + b / x
return y
def p0_simple_reciprocal(xs, ys):
c = 0
b = (1/(xs[-1] - c) - 1/(xs[1] - c)) / (ys[-1] - ys[1])
a = ys[1] - b / (xs[1] - c)
return [a, b]
def simple_2reciprocal(x, a, b):
"""
reciprocal function to fit convergence data
"""
import numpy as np
c = 2
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b / x_v ** c)
y = np.array(y_l)
else:
y = a + b / x ** c
return y
def p0_simple_2reciprocal(xs, ys):
c = 2
b = (1/xs[-1]**c - 1/xs[1]**c) / (ys[-1] - ys[1])
a = ys[1] - b / xs[1]**c
return [a, b]
def simple_4reciprocal(x, a, b):
"""
reciprocal function to fit convergence data
"""
import numpy as np
c = 4
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b / x_v ** c)
y = np.array(y_l)
else:
y = a + b / x ** c
return y
def p0_simple_4reciprocal(xs, ys):
c = 4
b = (1/xs[-1]**c - 1/xs[1]**c) / (ys[-1] - ys[1])
a = ys[1] - b / xs[1]**c
return [a, b]
def measure(function, xs, ys, popt, weights):
"""
measure the quality of the fit
"""
m = 0
n = 0
for x in xs:
if len(popt) == 2:
m += abs(ys[n] - function(x, popt[0], popt[1])) * weights[n]
elif len(popt) == 3:
m += abs(ys[n] - function(x, popt[0], popt[1], popt[2])) * weights[n]
else:
raise NotImplementedError
n += 1
return m
def multy_curve_fit(xs, ys, verbose):
"""
fit multiple functions to the x, y data, return the best fit
"""
#functions = {exponential: p0_exponential, reciprocal: p0_reciprocal, single_reciprocal: p0_single_reciprocal}
functions = {
exponential: p0_exponential,
reciprocal: p0_reciprocal,
single_reciprocal: p0_single_reciprocal,
simple_reciprocal: p0_simple_reciprocal,
simple_2reciprocal: p0_simple_2reciprocal,
simple_4reciprocal: p0_simple_4reciprocal
}
import numpy as np
from scipy.optimize import curve_fit
fit_results = {}
best = ['', np.inf]
for function in functions:
try:
ds = get_derivatives(xs, ys, fd=True)
mind = np.inf
for d in ds:
mind = min(abs(d), mind)
weights = []
for d in ds:
weights.append(abs((1 / d) / (1 / mind)))
print weights
popt, pcov = curve_fit(function, xs, ys, functions[function](xs, ys), maxfev=8000, sigma=weights)
m = measure(function, xs, ys, popt, weights)
perr = max(np.sqrt(np.diag(pcov)))
#print 'pcov:\n', pcov
#print 'diag:\n', np.sqrt(np.diag(pcov))
#print 'function:\n', function, perr, m
fit_results.update({function: {'measure': m, 'perr': perr, 'popt': popt, 'pcov': pcov}})
for f in fit_results:
if fit_results[f]['measure'] <= best[1]:
best = f, fit_results[f]['measure']
except RuntimeError:
if True:
print 'no fit found for ', function
return fit_results[best[0]]['popt'], fit_results[best[0]]['pcov'], best
def print_plot_line(function, popt, xs, ys, name, extra=''):
"""
print the gnuplot command line to plot the x, y data with the fitted function using the popt parameters
"""
idp = id_generator()
f = open('convdat.'+str(idp), mode='w')
for n in range(0, len(ys), 1):
f.write(str(xs[n]) + ' ' + str(ys[n]) + '\n')
f.write('\n')
f.close()
line = ''
if function is exponential:
line = "plot %s + %s * %s ** -x, 'convdat.%s' pointsize 4 lt 0, %s" % \
(popt[0], popt[1], min(max(1.00001, popt[2]), 1.2), idp, popt[0])
#print 'plot ', popt[0], ' + ', popt[1], "* ", popt[2], " ** -x," "'"+'convdat.'+idp+"'"
elif function is reciprocal:
line = "plot %s + %s / x**%s, 'convdat.%s' pointsize 4 lt 0, %s" % \
(popt[0], popt[1], min(max(1, popt[2]), 5), idp, popt[0])
#print 'plot ', popt[0], ' + ', popt[1], "/x**", popt[2], "," "'"+'convdat.'+idp+"'"
elif function is single_reciprocal:
line = "plot %s + %s / (x - %s), 'convdat.%s' pointsize 4 lt 0, %s" % \
(popt[0], popt[1], popt[2], idp, popt[0])
elif function is simple_reciprocal:
line = "plot %s + %s / x, 'convdat.%s' pointsize 4 lt 0, %s" % \
(popt[0], popt[1], idp, popt[0])
elif function is simple_2reciprocal:
line = "plot %s + %s / x**2, 'convdat.%s' pointsize 4 lt 0, %s" % \
(popt[0], popt[1], idp, popt[0])
elif function is simple_4reciprocal:
line = "plot %s + %s / x**4, 'convdat.%s' pointsize 4 lt 0, %s" % \
(popt[0], popt[1], idp, popt[0])
#print 'plot ', popt[0], ' + ', popt[1], "/ (x - ", popt[2], ")," "'"+'convdat.'+idp+"'"
f = open('plot-fits', mode='a')
f.write('pause -1 \n')
f.write('set title "' + name + ' - ' + extra + '"\n')
f.write("set output '" + name + '-' + idp + ".gif'" + '\n')
f.write(line + '\n')
f.close()
def test_conv(xs, ys, name, tol=0.0001, extra='', verbose=False):
"""
test it and at which x_value dy(x)/dx < tol for all x >= x_value, conv is true is such a x_value exists.
"""
conv = False
x_value = float('inf')
y_value = None
n_value = None
popt = [None, None, None]
if len(xs) > 2:
ds = get_derivatives(xs[0:len(ys)], ys)
try:
import numpy as np
from scipy.optimize import curve_fit
if None not in ys:
#popt, pcov = curve_fit(exponential, xs, ys, p0_exponential(xs, ys), maxfev=8000)
#perr = np.sqrt(np.diag(pcov))
#print perr
popt, pcov, func = multy_curve_fit(xs, ys, verbose)
#print popt
#print pcov
#print func
# todo print this to file via a method in helper, as dict
f = open(name+'.fitdat', mode='a')
f.write('{')
f.write('"popt": ' + str(popt) + ', ')
f.write('"pcov": ' + str(pcov) + ', ')
f.write('"data": [')
for n in range(0, len(ys), 1):
f.write('[' + str(xs[n]) + ' ' + str(ys[n]) + ']')
f.write(']}\n')
f.close()
print_plot_line(func[0], popt, xs, ys, name, extra=extra)
# print 'plot ', popt[0], ' + ', popt[1], "/x**", popt[2], ', "'+name+'.convdat"'
# print 'plot ', popt[0], ' + ', popt[1], "/x", popt[2], '/x**2, "'+name+'.convdat"'
# id = id_generator()
# print 'plot ', popt[0], ' + ', popt[1], "* ", popt[2], " ** -x," "'"+'convdat.'+id+"'"
except ImportError:
popt, pcov = None, None
for n in range(0, len(ds), 1):
if tol < 0:
if popt[0] is not None:
test = abs(popt[0] - ys[n])
else:
test = float('inf')
else:
test = abs(ds[n])
if verbose:
print test
if test < abs(tol):
if verbose:
print 'converged'
conv = True
if xs[n] < x_value:
x_value = xs[n]
y_value = ys[n]
n_value = n
else:
if verbose:
print 'not converged'
conv = False
x_value = float('inf')
if n_value is None:
return [conv, x_value, y_value, n_value, popt[0], None]
else:
return [conv, x_value, y_value, n_value, popt[0], ds[n_value]]
else:
return [conv, x_value, y_value, n_value, popt[0], None]
|
# Copyright 2019-2021 VyOS maintainers and contributors <maintainers@vyos.io>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
from netifaces import interfaces
import os
import re
import json
import jmespath
from copy import deepcopy
from glob import glob
from ipaddress import IPv4Network
from netifaces import ifaddresses
# this is not the same as socket.AF_INET/INET6
from netifaces import AF_INET
from netifaces import AF_INET6
from vyos import ConfigError
from vyos.configdict import list_diff
from vyos.configdict import dict_merge
from vyos.configdict import get_vlan_ids
from vyos.template import render
from vyos.util import mac2eui64
from vyos.util import dict_search
from vyos.util import read_file
from vyos.util import get_interface_config
from vyos.util import is_systemd_service_active
from vyos.template import is_ipv4
from vyos.template import is_ipv6
from vyos.validate import is_intf_addr_assigned
from vyos.validate import is_ipv6_link_local
from vyos.validate import assert_boolean
from vyos.validate import assert_list
from vyos.validate import assert_mac
from vyos.validate import assert_mtu
from vyos.validate import assert_positive
from vyos.validate import assert_range
from vyos.ifconfig.control import Control
from vyos.ifconfig.vrrp import VRRP
from vyos.ifconfig.operational import Operational
from vyos.ifconfig import Section
from netaddr import EUI
from netaddr import mac_unix_expanded
class Interface(Control):
# This is the class which will be used to create
# self.operational, it allows subclasses, such as
# WireGuard to modify their display behaviour
OperationalClass = Operational
options = ['debug', 'create']
required = []
default = {
'debug': True,
'create': True,
}
definition = {
'section': '',
'prefixes': [],
'vlan': False,
'bondable': False,
'broadcast': False,
'bridgeable': False,
'eternal': '',
}
_command_get = {
'admin_state': {
'shellcmd': 'ip -json link show dev {ifname}',
'format': lambda j: 'up' if 'UP' in jmespath.search('[*].flags | [0]', json.loads(j)) else 'down',
},
'alias': {
'shellcmd': 'ip -json -detail link list dev {ifname}',
'format': lambda j: jmespath.search('[*].ifalias | [0]', json.loads(j)) or '',
},
'mac': {
'shellcmd': 'ip -json -detail link list dev {ifname}',
'format': lambda j: jmespath.search('[*].address | [0]', json.loads(j)),
},
'min_mtu': {
'shellcmd': 'ip -json -detail link list dev {ifname}',
'format': lambda j: jmespath.search('[*].min_mtu | [0]', json.loads(j)),
},
'max_mtu': {
'shellcmd': 'ip -json -detail link list dev {ifname}',
'format': lambda j: jmespath.search('[*].max_mtu | [0]', json.loads(j)),
},
'mtu': {
'shellcmd': 'ip -json -detail link list dev {ifname}',
'format': lambda j: jmespath.search('[*].mtu | [0]', json.loads(j)),
},
'oper_state': {
'shellcmd': 'ip -json -detail link list dev {ifname}',
'format': lambda j: jmespath.search('[*].operstate | [0]', json.loads(j)),
},
'vrf': {
'shellcmd': 'ip -json -detail link list dev {ifname}',
'format': lambda j: jmespath.search('[*].master | [0]', json.loads(j)),
},
}
_command_set = {
'admin_state': {
'validate': lambda v: assert_list(v, ['up', 'down']),
'shellcmd': 'ip link set dev {ifname} {value}',
},
'alias': {
'convert': lambda name: name if name else '',
'shellcmd': 'ip link set dev {ifname} alias "{value}"',
},
'bridge_port_isolation': {
'validate': lambda v: assert_list(v, ['on', 'off']),
'shellcmd': 'bridge link set dev {ifname} isolated {value}',
},
'mac': {
'validate': assert_mac,
'shellcmd': 'ip link set dev {ifname} address {value}',
},
'mtu': {
'validate': assert_mtu,
'shellcmd': 'ip link set dev {ifname} mtu {value}',
},
'vrf': {
'convert': lambda v: f'master {v}' if v else 'nomaster',
'shellcmd': 'ip link set dev {ifname} {value}',
},
}
_sysfs_set = {
'arp_cache_tmo': {
'location': '/proc/sys/net/ipv4/neigh/{ifname}/base_reachable_time_ms',
},
'arp_filter': {
'validate': assert_boolean,
'location': '/proc/sys/net/ipv4/conf/{ifname}/arp_filter',
},
'arp_accept': {
'validate': lambda arp: assert_range(arp,0,2),
'location': '/proc/sys/net/ipv4/conf/{ifname}/arp_accept',
},
'arp_announce': {
'validate': assert_boolean,
'location': '/proc/sys/net/ipv4/conf/{ifname}/arp_announce',
},
'arp_ignore': {
'validate': assert_boolean,
'location': '/proc/sys/net/ipv4/conf/{ifname}/arp_ignore',
},
'ipv4_forwarding': {
'validate': assert_boolean,
'location': '/proc/sys/net/ipv4/conf/{ifname}/forwarding',
},
'rp_filter': {
'validate': lambda flt: assert_range(flt,0,3),
'location': '/proc/sys/net/ipv4/conf/{ifname}/rp_filter',
},
'ipv6_accept_ra': {
'validate': lambda ara: assert_range(ara,0,3),
'location': '/proc/sys/net/ipv6/conf/{ifname}/accept_ra',
},
'ipv6_autoconf': {
'validate': lambda aco: assert_range(aco,0,2),
'location': '/proc/sys/net/ipv6/conf/{ifname}/autoconf',
},
'ipv6_forwarding': {
'validate': lambda fwd: assert_range(fwd,0,2),
'location': '/proc/sys/net/ipv6/conf/{ifname}/forwarding',
},
'ipv6_dad_transmits': {
'validate': assert_positive,
'location': '/proc/sys/net/ipv6/conf/{ifname}/dad_transmits',
},
'path_cost': {
# XXX: we should set a maximum
'validate': assert_positive,
'location': '/sys/class/net/{ifname}/brport/path_cost',
'errormsg': '{ifname} is not a bridge port member'
},
'path_priority': {
# XXX: we should set a maximum
'validate': assert_positive,
'location': '/sys/class/net/{ifname}/brport/priority',
'errormsg': '{ifname} is not a bridge port member'
},
'proxy_arp': {
'validate': assert_boolean,
'location': '/proc/sys/net/ipv4/conf/{ifname}/proxy_arp',
},
'proxy_arp_pvlan': {
'validate': assert_boolean,
'location': '/proc/sys/net/ipv4/conf/{ifname}/proxy_arp_pvlan',
},
# link_detect vs link_filter name weirdness
'link_detect': {
'validate': lambda link: assert_range(link,0,3),
'location': '/proc/sys/net/ipv4/conf/{ifname}/link_filter',
},
}
_sysfs_get = {
'arp_cache_tmo': {
'location': '/proc/sys/net/ipv4/neigh/{ifname}/base_reachable_time_ms',
},
'arp_filter': {
'location': '/proc/sys/net/ipv4/conf/{ifname}/arp_filter',
},
'arp_accept': {
'location': '/proc/sys/net/ipv4/conf/{ifname}/arp_accept',
},
'arp_announce': {
'location': '/proc/sys/net/ipv4/conf/{ifname}/arp_announce',
},
'arp_ignore': {
'location': '/proc/sys/net/ipv4/conf/{ifname}/arp_ignore',
},
'ipv4_forwarding': {
'location': '/proc/sys/net/ipv4/conf/{ifname}/forwarding',
},
'rp_filter': {
'location': '/proc/sys/net/ipv4/conf/{ifname}/rp_filter',
},
'ipv6_accept_ra': {
'location': '/proc/sys/net/ipv6/conf/{ifname}/accept_ra',
},
'ipv6_autoconf': {
'location': '/proc/sys/net/ipv6/conf/{ifname}/autoconf',
},
'ipv6_forwarding': {
'location': '/proc/sys/net/ipv6/conf/{ifname}/forwarding',
},
'ipv6_dad_transmits': {
'location': '/proc/sys/net/ipv6/conf/{ifname}/dad_transmits',
},
'proxy_arp': {
'location': '/proc/sys/net/ipv4/conf/{ifname}/proxy_arp',
},
'proxy_arp_pvlan': {
'location': '/proc/sys/net/ipv4/conf/{ifname}/proxy_arp_pvlan',
},
'link_detect': {
'location': '/proc/sys/net/ipv4/conf/{ifname}/link_filter',
},
}
@classmethod
def exists(cls, ifname):
return os.path.exists(f'/sys/class/net/{ifname}')
@classmethod
def get_config(cls):
"""
Some but not all interfaces require a configuration when they are added
using iproute2. This method will provide the configuration dictionary
used by this class.
"""
return deepcopy(cls.default)
def __init__(self, ifname, **kargs):
"""
This is the base interface class which supports basic IP/MAC address
operations as well as DHCP(v6). Other interface which represent e.g.
and ethernet bridge are implemented as derived classes adding all
additional functionality.
For creation you will need to provide the interface type, otherwise
the existing interface is used
DEBUG:
This class has embedded debugging (print) which can be enabled by
creating the following file:
vyos@vyos# touch /tmp/vyos.ifconfig.debug
Example:
>>> from vyos.ifconfig import Interface
>>> i = Interface('eth0')
"""
self.config = deepcopy(kargs)
self.config['ifname'] = self.ifname = ifname
self._admin_state_down_cnt = 0
# we must have updated config before initialising the Interface
super().__init__(**kargs)
if not self.exists(ifname):
# Any instance of Interface, such as Interface('eth0') can be used
# safely to access the generic function in this class as 'type' is
# unset, the class can not be created
if not self.iftype:
raise Exception(f'interface "{ifname}" not found')
self.config['type'] = self.iftype
# Should an Instance of a child class (EthernetIf, DummyIf, ..)
# be required, then create should be set to False to not accidentally create it.
# In case a subclass does not define it, we use get to set the default to True
if self.config.get('create',True):
for k in self.required:
if k not in kargs:
name = self.default['type']
raise ConfigError(f'missing required option {k} for {name} {ifname} creation')
self._create()
# If we can not connect to the interface then let the caller know
# as the class could not be correctly initialised
else:
raise Exception(f'interface "{ifname}" not found!')
# temporary list of assigned IP addresses
self._addr = []
self.operational = self.OperationalClass(ifname)
self.vrrp = VRRP(ifname)
def _create(self):
cmd = 'ip link add dev {ifname} type {type}'.format(**self.config)
self._cmd(cmd)
def remove(self):
"""
Remove interface from operating system. Removing the interface
deconfigures all assigned IP addresses and clear possible DHCP(v6)
client processes.
Example:
>>> from vyos.ifconfig import Interface
>>> i = Interface('eth0')
>>> i.remove()
"""
# remove all assigned IP addresses from interface - this is a bit redundant
# as the kernel will remove all addresses on interface deletion, but we
# can not delete ALL interfaces, see below
self.flush_addrs()
# ---------------------------------------------------------------------
# Any class can define an eternal regex in its definition
# interface matching the regex will not be deleted
eternal = self.definition['eternal']
if not eternal:
self._delete()
elif not re.match(eternal, self.ifname):
self._delete()
def _delete(self):
# NOTE (Improvement):
# after interface removal no other commands should be allowed
# to be called and instead should raise an Exception:
cmd = 'ip link del dev {ifname}'.format(**self.config)
return self._cmd(cmd)
def _set_vrf_ct_zone(self, vrf):
"""
Add/Remove rules in nftables to associate traffic in VRF to an
individual conntack zone
"""
if vrf:
# Get routing table ID for VRF
vrf_table_id = get_interface_config(vrf).get('linkinfo', {}).get(
'info_data', {}).get('table')
# Add map element with interface and zone ID
if vrf_table_id:
self._cmd(f'nft add element inet vrf_zones ct_iface_map {{ "{self.ifname}" : {vrf_table_id} }}')
else:
nft_del_element = f'delete element inet vrf_zones ct_iface_map {{ "{self.ifname}" }}'
# Check if deleting is possible first to avoid raising errors
_, err = self._popen(f'nft -c {nft_del_element}')
if not err:
# Remove map element
self._cmd(f'nft {nft_del_element}')
def get_min_mtu(self):
"""
Get hardware minimum supported MTU
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').get_min_mtu()
'60'
"""
return int(self.get_interface('min_mtu'))
def get_max_mtu(self):
"""
Get hardware maximum supported MTU
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').get_max_mtu()
'9000'
"""
return int(self.get_interface('max_mtu'))
def get_mtu(self):
"""
Get/set interface mtu in bytes.
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').get_mtu()
'1500'
"""
return int(self.get_interface('mtu'))
def set_mtu(self, mtu):
"""
Get/set interface mtu in bytes.
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').set_mtu(1400)
>>> Interface('eth0').get_mtu()
'1400'
"""
tmp = self.get_interface('mtu')
if str(tmp) == mtu:
return None
return self.set_interface('mtu', mtu)
def get_mac(self):
"""
Get current interface MAC (Media Access Contrl) address used.
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').get_mac()
'00:50:ab:cd:ef:00'
"""
return self.get_interface('mac')
def get_mac_synthetic(self):
"""
Get a synthetic MAC address. This is a common method which can be called
from derived classes to overwrite the get_mac() call in a generic way.
NOTE: Tunnel interfaces have no "MAC" address by default. The content
of the 'address' file in /sys/class/net/device contains the
local-ip thus we generate a random MAC address instead
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').get_mac()
'00:50:ab:cd:ef:00'
"""
from hashlib import sha256
# Get processor ID number
cpu_id = self._cmd('sudo dmidecode -t 4 | grep ID | head -n1 | sed "s/.*ID://;s/ //g"')
# Get system eth0 base MAC address - every system has eth0
eth0_mac = Interface('eth0').get_mac()
sha = sha256()
# Calculate SHA256 sum based on the CPU ID number, eth0 mac address and
# this interface identifier - this is as predictable as an interface
# MAC address and thus can be used in the same way
sha.update(cpu_id.encode())
sha.update(eth0_mac.encode())
sha.update(self.ifname.encode())
# take the most significant 48 bits from the SHA256 string
tmp = sha.hexdigest()[:12]
# Convert pseudo random string into EUI format which now represents a
# MAC address
tmp = EUI(tmp).value
# set locally administered bit in MAC address
tmp |= 0xf20000000000
# convert integer to "real" MAC address representation
mac = EUI(hex(tmp).split('x')[-1])
# change dialect to use : as delimiter instead of -
mac.dialect = mac_unix_expanded
return str(mac)
def set_mac(self, mac):
"""
Set interface MAC (Media Access Contrl) address to given value.
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').set_mac('00:50:ab:cd:ef:01')
"""
# If MAC is unchanged, bail out early
if mac == self.get_mac():
return None
# MAC address can only be changed if interface is in 'down' state
prev_state = self.get_admin_state()
if prev_state == 'up':
self.set_admin_state('down')
self.set_interface('mac', mac)
# Turn an interface to the 'up' state if it was changed to 'down' by this fucntion
if prev_state == 'up':
self.set_admin_state('up')
def set_vrf(self, vrf):
"""
Add/Remove interface from given VRF instance.
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').set_vrf('foo')
>>> Interface('eth0').set_vrf()
"""
tmp = self.get_interface('vrf')
if tmp == vrf:
return None
self.set_interface('vrf', vrf)
self._set_vrf_ct_zone(vrf)
def set_arp_cache_tmo(self, tmo):
"""
Set ARP cache timeout value in seconds. Internal Kernel representation
is in milliseconds.
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').set_arp_cache_tmo(40)
"""
tmo = str(int(tmo) * 1000)
tmp = self.get_interface('arp_cache_tmo')
if tmp == tmo:
return None
return self.set_interface('arp_cache_tmo', tmo)
def set_tcp_ipv4_mss(self, mss):
"""
Set IPv4 TCP MSS value advertised when TCP SYN packets leave this
interface. Value is in bytes.
A value of 0 will disable the MSS adjustment
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').set_tcp_ipv4_mss(1340)
"""
iptables_bin = 'iptables'
base_options = f'-A FORWARD -o {self.ifname} -p tcp -m tcp --tcp-flags SYN,RST SYN'
out = self._cmd(f'{iptables_bin}-save -t mangle')
for line in out.splitlines():
if line.startswith(base_options):
# remove OLD MSS mangling configuration
line = line.replace('-A FORWARD', '-D FORWARD')
self._cmd(f'{iptables_bin} -t mangle {line}')
cmd_mss = f'{iptables_bin} -t mangle {base_options} --jump TCPMSS'
if mss == 'clamp-mss-to-pmtu':
self._cmd(f'{cmd_mss} --clamp-mss-to-pmtu')
elif int(mss) > 0:
# probably add option to clamp only if bigger:
low_mss = str(int(mss) + 1)
self._cmd(f'{cmd_mss} -m tcpmss --mss {low_mss}:65535 --set-mss {mss}')
def set_tcp_ipv6_mss(self, mss):
"""
Set IPv6 TCP MSS value advertised when TCP SYN packets leave this
interface. Value is in bytes.
A value of 0 will disable the MSS adjustment
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').set_tcp_mss(1320)
"""
iptables_bin = 'ip6tables'
base_options = f'-A FORWARD -o {self.ifname} -p tcp -m tcp --tcp-flags SYN,RST SYN'
out = self._cmd(f'{iptables_bin}-save -t mangle')
for line in out.splitlines():
if line.startswith(base_options):
# remove OLD MSS mangling configuration
line = line.replace('-A FORWARD', '-D FORWARD')
self._cmd(f'{iptables_bin} -t mangle {line}')
cmd_mss = f'{iptables_bin} -t mangle {base_options} --jump TCPMSS'
if mss == 'clamp-mss-to-pmtu':
self._cmd(f'{cmd_mss} --clamp-mss-to-pmtu')
elif int(mss) > 0:
# probably add option to clamp only if bigger:
low_mss = str(int(mss) + 1)
self._cmd(f'{cmd_mss} -m tcpmss --mss {low_mss}:65535 --set-mss {mss}')
def set_arp_filter(self, arp_filter):
"""
Filter ARP requests
1 - Allows you to have multiple network interfaces on the same
subnet, and have the ARPs for each interface be answered
based on whether or not the kernel would route a packet from
the ARP'd IP out that interface (therefore you must use source
based routing for this to work). In other words it allows control
of which cards (usually 1) will respond to an arp request.
0 - (default) The kernel can respond to arp requests with addresses
from other interfaces. This may seem wrong but it usually makes
sense, because it increases the chance of successful communication.
IP addresses are owned by the complete host on Linux, not by
particular interfaces. Only for more complex setups like load-
balancing, does this behaviour cause problems.
"""
tmp = self.get_interface('arp_filter')
if tmp == arp_filter:
return None
return self.set_interface('arp_filter', arp_filter)
def set_arp_accept(self, arp_accept):
"""
Define behavior for gratuitous ARP frames who's IP is not
already present in the ARP table:
0 - don't create new entries in the ARP table
1 - create new entries in the ARP table
Both replies and requests type gratuitous arp will trigger the
ARP table to be updated, if this setting is on.
If the ARP table already contains the IP address of the
gratuitous arp frame, the arp table will be updated regardless
if this setting is on or off.
"""
tmp = self.get_interface('arp_accept')
if tmp == arp_accept:
return None
return self.set_interface('arp_accept', arp_accept)
def set_arp_announce(self, arp_announce):
"""
Define different restriction levels for announcing the local
source IP address from IP packets in ARP requests sent on
interface:
0 - (default) Use any local address, configured on any interface
1 - Try to avoid local addresses that are not in the target's
subnet for this interface. This mode is useful when target
hosts reachable via this interface require the source IP
address in ARP requests to be part of their logical network
configured on the receiving interface. When we generate the
request we will check all our subnets that include the
target IP and will preserve the source address if it is from
such subnet.
Increasing the restriction level gives more chance for
receiving answer from the resolved target while decreasing
the level announces more valid sender's information.
"""
tmp = self.get_interface('arp_announce')
if tmp == arp_announce:
return None
return self.set_interface('arp_announce', arp_announce)
def set_arp_ignore(self, arp_ignore):
"""
Define different modes for sending replies in response to received ARP
requests that resolve local target IP addresses:
0 - (default): reply for any local target IP address, configured
on any interface
1 - reply only if the target IP address is local address
configured on the incoming interface
"""
tmp = self.get_interface('arp_ignore')
if tmp == arp_ignore:
return None
return self.set_interface('arp_ignore', arp_ignore)
def set_ipv4_forwarding(self, forwarding):
""" Configure IPv4 forwarding. """
tmp = self.get_interface('ipv4_forwarding')
if tmp == forwarding:
return None
return self.set_interface('ipv4_forwarding', forwarding)
def set_ipv4_source_validation(self, value):
"""
Help prevent attacks used by Spoofing IP Addresses. Reverse path
filtering is a Kernel feature that, when enabled, is designed to ensure
packets that are not routable to be dropped. The easiest example of this
would be and IP Address of the range 10.0.0.0/8, a private IP Address,
being received on the Internet facing interface of the router.
As per RFC3074.
"""
if value == 'strict':
value = 1
elif value == 'loose':
value = 2
else:
value = 0
all_rp_filter = int(read_file('/proc/sys/net/ipv4/conf/all/rp_filter'))
if all_rp_filter > value:
global_setting = 'disable'
if all_rp_filter == 1: global_setting = 'strict'
elif all_rp_filter == 2: global_setting = 'loose'
print(f'WARNING: Global source-validation is set to "{global_setting}\n"' \
'this overrides per interface setting!')
tmp = self.get_interface('rp_filter')
if int(tmp) == value:
return None
return self.set_interface('rp_filter', value)
def set_ipv6_accept_ra(self, accept_ra):
"""
Accept Router Advertisements; autoconfigure using them.
It also determines whether or not to transmit Router Solicitations.
If and only if the functional setting is to accept Router
Advertisements, Router Solicitations will be transmitted.
0 - Do not accept Router Advertisements.
1 - (default) Accept Router Advertisements if forwarding is disabled.
2 - Overrule forwarding behaviour. Accept Router Advertisements even if
forwarding is enabled.
"""
tmp = self.get_interface('ipv6_accept_ra')
if tmp == accept_ra:
return None
return self.set_interface('ipv6_accept_ra', accept_ra)
def set_ipv6_autoconf(self, autoconf):
"""
Autoconfigure addresses using Prefix Information in Router
Advertisements.
"""
tmp = self.get_interface('ipv6_autoconf')
if tmp == autoconf:
return None
return self.set_interface('ipv6_autoconf', autoconf)
def add_ipv6_eui64_address(self, prefix):
"""
Extended Unique Identifier (EUI), as per RFC2373, allows a host to
assign itself a unique IPv6 address based on a given IPv6 prefix.
Calculate the EUI64 from the interface's MAC, then assign it
with the given prefix to the interface.
"""
# T2863: only add a link-local IPv6 address if the interface returns
# a MAC address. This is not the case on e.g. WireGuard interfaces.
mac = self.get_mac()
if mac:
eui64 = mac2eui64(mac, prefix)
prefixlen = prefix.split('/')[1]
self.add_addr(f'{eui64}/{prefixlen}')
def del_ipv6_eui64_address(self, prefix):
"""
Delete the address based on the interface's MAC-based EUI64
combined with the prefix address.
"""
if is_ipv6(prefix):
eui64 = mac2eui64(self.get_mac(), prefix)
prefixlen = prefix.split('/')[1]
self.del_addr(f'{eui64}/{prefixlen}')
def set_ipv6_forwarding(self, forwarding):
"""
Configure IPv6 interface-specific Host/Router behaviour.
False:
By default, Host behaviour is assumed. This means:
1. IsRouter flag is not set in Neighbour Advertisements.
2. If accept_ra is TRUE (default), transmit Router
Solicitations.
3. If accept_ra is TRUE (default), accept Router
Advertisements (and do autoconfiguration).
4. If accept_redirects is TRUE (default), accept Redirects.
True:
If local forwarding is enabled, Router behaviour is assumed.
This means exactly the reverse from the above:
1. IsRouter flag is set in Neighbour Advertisements.
2. Router Solicitations are not sent unless accept_ra is 2.
3. Router Advertisements are ignored unless accept_ra is 2.
4. Redirects are ignored.
"""
tmp = self.get_interface('ipv6_forwarding')
if tmp == forwarding:
return None
return self.set_interface('ipv6_forwarding', forwarding)
def set_ipv6_dad_messages(self, dad):
"""
The amount of Duplicate Address Detection probes to send.
Default: 1
"""
tmp = self.get_interface('ipv6_dad_transmits')
if tmp == dad:
return None
return self.set_interface('ipv6_dad_transmits', dad)
def set_link_detect(self, link_filter):
"""
Configure kernel response in packets received on interfaces that are 'down'
0 - Allow packets to be received for the address on this interface
even if interface is disabled or no carrier.
1 - Ignore packets received if interface associated with the incoming
address is down.
2 - Ignore packets received if interface associated with the incoming
address is down or has no carrier.
Default value is 0. Note that some distributions enable it in startup
scripts.
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').set_link_detect(1)
"""
tmp = self.get_interface('link_detect')
if tmp == link_filter:
return None
return self.set_interface('link_detect', link_filter)
def get_alias(self):
"""
Get interface alias name used by e.g. SNMP
Example:
>>> Interface('eth0').get_alias()
'interface description as set by user'
"""
return self.get_interface('alias')
def set_alias(self, ifalias=''):
"""
Set interface alias name used by e.g. SNMP
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').set_alias('VyOS upstream interface')
to clear alias e.g. delete it use:
>>> Interface('eth0').set_ifalias('')
"""
tmp = self.get_interface('alias')
if tmp == ifalias:
return None
self.set_interface('alias', ifalias)
def get_admin_state(self):
"""
Get interface administrative state. Function will return 'up' or 'down'
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').get_admin_state()
'up'
"""
return self.get_interface('admin_state')
def set_admin_state(self, state):
"""
Set interface administrative state to be 'up' or 'down'
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').set_admin_state('down')
>>> Interface('eth0').get_admin_state()
'down'
"""
if state == 'up':
self._admin_state_down_cnt -= 1
if self._admin_state_down_cnt < 1:
return self.set_interface('admin_state', state)
else:
self._admin_state_down_cnt += 1
return self.set_interface('admin_state', state)
def set_path_cost(self, cost):
"""
Set interface path cost, only relevant for STP enabled interfaces
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').set_path_cost(4)
"""
self.set_interface('path_cost', cost)
def set_path_priority(self, priority):
"""
Set interface path priority, only relevant for STP enabled interfaces
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').set_path_priority(4)
"""
self.set_interface('path_priority', priority)
def set_port_isolation(self, on_or_off):
"""
Controls whether a given port will be isolated, which means it will be
able to communicate with non-isolated ports only. By default this flag
is off.
Use enable=1 to enable or enable=0 to disable
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth1').set_port_isolation('on')
"""
self.set_interface('bridge_port_isolation', on_or_off)
def set_proxy_arp(self, enable):
"""
Set per interface proxy ARP configuration
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').set_proxy_arp(1)
"""
tmp = self.get_interface('proxy_arp')
if tmp == enable:
return None
self.set_interface('proxy_arp', enable)
def set_proxy_arp_pvlan(self, enable):
"""
Private VLAN proxy arp.
Basically allow proxy arp replies back to the same interface
(from which the ARP request/solicitation was received).
This is done to support (ethernet) switch features, like RFC
3069, where the individual ports are NOT allowed to
communicate with each other, but they are allowed to talk to
the upstream router. As described in RFC 3069, it is possible
to allow these hosts to communicate through the upstream
router by proxy_arp'ing. Don't need to be used together with
proxy_arp.
This technology is known by different names:
In RFC 3069 it is called VLAN Aggregation.
Cisco and Allied Telesyn call it Private VLAN.
Hewlett-Packard call it Source-Port filtering or port-isolation.
Ericsson call it MAC-Forced Forwarding (RFC Draft).
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').set_proxy_arp_pvlan(1)
"""
tmp = self.get_interface('proxy_arp_pvlan')
if tmp == enable:
return None
self.set_interface('proxy_arp_pvlan', enable)
def get_addr_v4(self):
"""
Retrieve assigned IPv4 addresses from given interface.
This is done using the netifaces and ipaddress python modules.
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').get_addr_v4()
['172.16.33.30/24']
"""
ipv4 = []
if AF_INET in ifaddresses(self.config['ifname']):
for v4_addr in ifaddresses(self.config['ifname'])[AF_INET]:
# we need to manually assemble a list of IPv4 address/prefix
prefix = '/' + \
str(IPv4Network('0.0.0.0/' + v4_addr['netmask']).prefixlen)
ipv4.append(v4_addr['addr'] + prefix)
return ipv4
def get_addr_v6(self):
"""
Retrieve assigned IPv6 addresses from given interface.
This is done using the netifaces and ipaddress python modules.
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').get_addr_v6()
['fe80::20c:29ff:fe11:a174/64']
"""
ipv6 = []
if AF_INET6 in ifaddresses(self.config['ifname']):
for v6_addr in ifaddresses(self.config['ifname'])[AF_INET6]:
# Note that currently expanded netmasks are not supported. That means
# 2001:db00::0/24 is a valid argument while 2001:db00::0/ffff:ff00:: not.
# see https://docs.python.org/3/library/ipaddress.html
prefix = '/' + v6_addr['netmask'].split('/')[-1]
# we alsoneed to remove the interface suffix on link local
# addresses
v6_addr['addr'] = v6_addr['addr'].split('%')[0]
ipv6.append(v6_addr['addr'] + prefix)
return ipv6
def get_addr(self):
"""
Retrieve assigned IPv4 and IPv6 addresses from given interface.
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').get_addr()
['172.16.33.30/24', 'fe80::20c:29ff:fe11:a174/64']
"""
return self.get_addr_v4() + self.get_addr_v6()
def add_addr(self, addr):
"""
Add IP(v6) address to interface. Address is only added if it is not
already assigned to that interface. Address format must be validated
and compressed/normalized before calling this function.
addr: can be an IPv4 address, IPv6 address, dhcp or dhcpv6!
IPv4: add IPv4 address to interface
IPv6: add IPv6 address to interface
dhcp: start dhclient (IPv4) on interface
dhcpv6: start WIDE DHCPv6 (IPv6) on interface
Returns False if address is already assigned and wasn't re-added.
Example:
>>> from vyos.ifconfig import Interface
>>> j = Interface('eth0')
>>> j.add_addr('192.0.2.1/24')
>>> j.add_addr('2001:db8::ffff/64')
>>> j.get_addr()
['192.0.2.1/24', '2001:db8::ffff/64']
"""
# XXX: normalize/compress with ipaddress if calling functions don't?
# is subnet mask always passed, and in the same way?
# do not add same address twice
if addr in self._addr:
return False
addr_is_v4 = is_ipv4(addr)
# we can't have both DHCP and static IPv4 addresses assigned
for a in self._addr:
if ( ( addr == 'dhcp' and a != 'dhcpv6' and is_ipv4(a) ) or
( a == 'dhcp' and addr != 'dhcpv6' and addr_is_v4 ) ):
raise ConfigError((
"Can't configure both static IPv4 and DHCP address "
"on the same interface"))
# add to interface
if addr == 'dhcp':
self.set_dhcp(True)
elif addr == 'dhcpv6':
self.set_dhcpv6(True)
elif not is_intf_addr_assigned(self.ifname, addr):
self._cmd(f'ip addr add "{addr}" '
f'{"brd + " if addr_is_v4 else ""}dev "{self.ifname}"')
else:
return False
# add to cache
self._addr.append(addr)
return True
def del_addr(self, addr):
"""
Delete IP(v6) address from interface. Address is only deleted if it is
assigned to that interface. Address format must be exactly the same as
was used when adding the address.
addr: can be an IPv4 address, IPv6 address, dhcp or dhcpv6!
IPv4: delete IPv4 address from interface
IPv6: delete IPv6 address from interface
dhcp: stop dhclient (IPv4) on interface
dhcpv6: stop dhclient (IPv6) on interface
Returns False if address isn't already assigned and wasn't deleted.
Example:
>>> from vyos.ifconfig import Interface
>>> j = Interface('eth0')
>>> j.add_addr('2001:db8::ffff/64')
>>> j.add_addr('192.0.2.1/24')
>>> j.get_addr()
['192.0.2.1/24', '2001:db8::ffff/64']
>>> j.del_addr('192.0.2.1/24')
>>> j.get_addr()
['2001:db8::ffff/64']
"""
if not addr:
raise ValueError()
# remove from interface
if addr == 'dhcp':
self.set_dhcp(False)
elif addr == 'dhcpv6':
self.set_dhcpv6(False)
elif is_intf_addr_assigned(self.ifname, addr):
self._cmd(f'ip addr del "{addr}" dev "{self.ifname}"')
else:
return False
# remove from cache
if addr in self._addr:
self._addr.remove(addr)
return True
def flush_addrs(self):
"""
Flush all addresses from an interface, including DHCP.
Will raise an exception on error.
"""
# stop DHCP(v6) if running
self.set_dhcp(False)
self.set_dhcpv6(False)
# flush all addresses
self._cmd(f'ip addr flush dev "{self.ifname}"')
def add_to_bridge(self, bridge_dict):
"""
Adds the interface to the bridge with the passed port config.
Returns False if bridge doesn't exist.
"""
# drop all interface addresses first
self.flush_addrs()
ifname = self.ifname
for bridge, bridge_config in bridge_dict.items():
# add interface to bridge - use Section.klass to get BridgeIf class
Section.klass(bridge)(bridge, create=True).add_port(self.ifname)
# set bridge port path cost
if 'cost' in bridge_config:
self.set_path_cost(bridge_config['cost'])
# set bridge port path priority
if 'priority' in bridge_config:
self.set_path_cost(bridge_config['priority'])
bridge_vlan_filter = Section.klass(bridge)(bridge, create=True).get_vlan_filter()
if int(bridge_vlan_filter):
cur_vlan_ids = get_vlan_ids(ifname)
add_vlan = []
native_vlan_id = None
allowed_vlan_ids= []
if 'native_vlan' in bridge_config:
vlan_id = bridge_config['native_vlan']
add_vlan.append(vlan_id)
native_vlan_id = vlan_id
if 'allowed_vlan' in bridge_config:
for vlan in bridge_config['allowed_vlan']:
vlan_range = vlan.split('-')
if len(vlan_range) == 2:
for vlan_add in range(int(vlan_range[0]),int(vlan_range[1]) + 1):
add_vlan.append(str(vlan_add))
allowed_vlan_ids.append(str(vlan_add))
else:
add_vlan.append(vlan)
allowed_vlan_ids.append(vlan)
# Remove redundant VLANs from the system
for vlan in list_diff(cur_vlan_ids, add_vlan):
cmd = f'bridge vlan del dev {ifname} vid {vlan} master'
self._cmd(cmd)
for vlan in allowed_vlan_ids:
cmd = f'bridge vlan add dev {ifname} vid {vlan} master'
self._cmd(cmd)
# Setting native VLAN to system
if native_vlan_id:
cmd = f'bridge vlan add dev {ifname} vid {native_vlan_id} pvid untagged master'
self._cmd(cmd)
def set_dhcp(self, enable):
"""
Enable/Disable DHCP client on a given interface.
"""
if enable not in [True, False]:
raise ValueError()
ifname = self.ifname
config_base = r'/var/lib/dhcp/dhclient'
config_file = f'{config_base}_{ifname}.conf'
options_file = f'{config_base}_{ifname}.options'
pid_file = f'{config_base}_{ifname}.pid'
lease_file = f'{config_base}_{ifname}.leases'
# Stop client with old config files to get the right IF_METRIC.
systemd_service = f'dhclient@{ifname}.service'
if is_systemd_service_active(systemd_service):
self._cmd(f'systemctl stop {systemd_service}')
if enable and 'disable' not in self._config:
if dict_search('dhcp_options.host_name', self._config) == None:
# read configured system hostname.
# maybe change to vyos hostd client ???
hostname = 'vyos'
with open('/etc/hostname', 'r') as f:
hostname = f.read().rstrip('\n')
tmp = {'dhcp_options' : { 'host_name' : hostname}}
self._config = dict_merge(tmp, self._config)
render(options_file, 'dhcp-client/daemon-options.tmpl',
self._config)
render(config_file, 'dhcp-client/ipv4.tmpl',
self._config)
# 'up' check is mandatory b/c even if the interface is A/D, as soon as
# the DHCP client is started the interface will be placed in u/u state.
# This is not what we intended to do when disabling an interface.
return self._cmd(f'systemctl restart {systemd_service}')
else:
# cleanup old config files
for file in [config_file, options_file, pid_file, lease_file]:
if os.path.isfile(file):
os.remove(file)
def set_dhcpv6(self, enable):
"""
Enable/Disable DHCPv6 client on a given interface.
"""
if enable not in [True, False]:
raise ValueError()
ifname = self.ifname
config_file = f'/run/dhcp6c/dhcp6c.{ifname}.conf'
systemd_service = f'dhcp6c@{ifname}.service'
if enable and 'disable' not in self._config:
render(config_file, 'dhcp-client/ipv6.tmpl',
self._config)
# We must ignore any return codes. This is required to enable
# DHCPv6-PD for interfaces which are yet not up and running.
return self._popen(f'systemctl restart {systemd_service}')
else:
if is_systemd_service_active(systemd_service):
self._cmd(f'systemctl stop {systemd_service}')
if os.path.isfile(config_file):
os.remove(config_file)
def set_mirror(self):
# Please refer to the document for details
# - https://man7.org/linux/man-pages/man8/tc.8.html
# - https://man7.org/linux/man-pages/man8/tc-mirred.8.html
# Depening if we are the source or the target interface of the port
# mirror we need to setup some variables.
source_if = self._config['ifname']
config = self._config.get('mirror', None)
if 'is_mirror_intf' in self._config:
source_if = next(iter(self._config['is_mirror_intf']))
config = self._config['is_mirror_intf'][source_if].get('mirror', None)
# Check configuration stored by old perl code before delete T3782
if not 'redirect' in self._config:
# Please do not clear the 'set $? = 0 '. It's meant to force a return of 0
# Remove existing mirroring rules
delete_tc_cmd = f'tc qdisc del dev {source_if} handle ffff: ingress 2> /dev/null;'
delete_tc_cmd += f'tc qdisc del dev {source_if} handle 1: root prio 2> /dev/null;'
delete_tc_cmd += 'set $?=0'
self._popen(delete_tc_cmd)
# Bail out early if nothing needs to be configured
if not config:
return
for direction, mirror_if in config.items():
if mirror_if not in interfaces():
continue
if direction == 'ingress':
handle = 'ffff: ingress'
parent = 'ffff:'
elif direction == 'egress':
handle = '1: root prio'
parent = '1:'
# Mirror egress traffic
mirror_cmd = f'tc qdisc add dev {source_if} handle {handle}; '
# Export the mirrored traffic to the interface
mirror_cmd += f'tc filter add dev {source_if} parent {parent} protocol all prio 10 u32 match u32 0 0 flowid 1:1 action mirred egress mirror dev {mirror_if}'
self._popen(mirror_cmd)
def set_xdp(self, state):
"""
Enable Kernel XDP support. State can be either True or False.
Example:
>>> from vyos.ifconfig import Interface
>>> i = Interface('eth0')
>>> i.set_xdp(True)
"""
if not isinstance(state, bool):
raise ValueError("Value out of range")
# https://phabricator.vyos.net/T3448 - there is (yet) no RPI support for XDP
if not os.path.exists('/usr/sbin/xdp_loader'):
return
ifname = self.config['ifname']
cmd = f'xdp_loader -d {ifname} -U --auto-mode'
if state:
# Using 'xdp' will automatically decide if the driver supports
# 'xdpdrv' or only 'xdpgeneric'. A user later sees which driver is
# actually in use by calling 'ip a' or 'show interfaces ethernet'
cmd = f'xdp_loader -d {ifname} --auto-mode -F --progsec xdp_router ' \
f'--filename /usr/share/vyos/xdp/xdp_prog_kern.o && ' \
f'xdp_prog_user -d {ifname}'
return self._cmd(cmd)
def update(self, config):
""" General helper function which works on a dictionary retrived by
get_config_dict(). It's main intention is to consolidate the scattered
interface setup code and provide a single point of entry when workin
on any interface. """
if self.debug:
import pprint
pprint.pprint(config)
# Cache the configuration - it will be reused inside e.g. DHCP handler
# XXX: maybe pass the option via __init__ in the future and rename this
# method to apply()?
self._config = config
# Change interface MAC address - re-set to real hardware address (hw-id)
# if custom mac is removed. Skip if bond member.
if 'is_bond_member' not in config:
mac = config.get('hw_id')
if 'mac' in config:
mac = config.get('mac')
if mac:
self.set_mac(mac)
# Update interface description
self.set_alias(config.get('description', ''))
# Ignore link state changes
value = '2' if 'disable_link_detect' in config else '1'
self.set_link_detect(value)
# Configure assigned interface IP addresses. No longer
# configured addresses will be removed first
new_addr = config.get('address', [])
# always ensure DHCP client is stopped (when not configured explicitly)
if 'dhcp' not in new_addr:
self.del_addr('dhcp')
# always ensure DHCPv6 client is stopped (when not configured as client
# for IPv6 address or prefix delegation)
dhcpv6pd = dict_search('dhcpv6_options.pd', config)
dhcpv6pd = dhcpv6pd != None and len(dhcpv6pd) != 0
if 'dhcpv6' not in new_addr and not dhcpv6pd:
self.del_addr('dhcpv6')
# determine IP addresses which are assigned to the interface and build a
# list of addresses which are no longer in the dict so they can be removed
if 'address_old' in config:
for addr in list_diff(config['address_old'], new_addr):
# we will delete all interface specific IP addresses if they are not
# explicitly configured on the CLI
if is_ipv6_link_local(addr):
eui64 = mac2eui64(self.get_mac(), 'fe80::/64')
if addr != f'{eui64}/64':
self.del_addr(addr)
else:
self.del_addr(addr)
for addr in new_addr:
self.add_addr(addr)
# start DHCPv6 client when only PD was configured
if dhcpv6pd:
self.set_dhcpv6(True)
# There are some items in the configuration which can only be applied
# if this instance is not bound to a bridge. This should be checked
# by the caller but better save then sorry!
if not any(k in ['is_bond_member', 'is_bridge_member'] for k in config):
# Bind interface to given VRF or unbind it if vrf node is not set.
# unbinding will call 'ip link set dev eth0 nomaster' which will
# also drop the interface out of a bridge or bond - thus this is
# checked before
self.set_vrf(config.get('vrf', None))
# Configure MSS value for IPv4 TCP connections
tmp = dict_search('ip.adjust_mss', config)
value = tmp if (tmp != None) else '0'
self.set_tcp_ipv4_mss(value)
# Configure MSS value for IPv6 TCP connections
tmp = dict_search('ipv6.adjust_mss', config)
value = tmp if (tmp != None) else '0'
self.set_tcp_ipv6_mss(value)
# Configure ARP cache timeout in milliseconds - has default value
tmp = dict_search('ip.arp_cache_timeout', config)
value = tmp if (tmp != None) else '30'
self.set_arp_cache_tmo(value)
# Configure ARP filter configuration
tmp = dict_search('ip.disable_arp_filter', config)
value = '0' if (tmp != None) else '1'
self.set_arp_filter(value)
# Configure ARP accept
tmp = dict_search('ip.enable_arp_accept', config)
value = '1' if (tmp != None) else '0'
self.set_arp_accept(value)
# Configure ARP announce
tmp = dict_search('ip.enable_arp_announce', config)
value = '1' if (tmp != None) else '0'
self.set_arp_announce(value)
# Configure ARP ignore
tmp = dict_search('ip.enable_arp_ignore', config)
value = '1' if (tmp != None) else '0'
self.set_arp_ignore(value)
# Enable proxy-arp on this interface
tmp = dict_search('ip.enable_proxy_arp', config)
value = '1' if (tmp != None) else '0'
self.set_proxy_arp(value)
# Enable private VLAN proxy ARP on this interface
tmp = dict_search('ip.proxy_arp_pvlan', config)
value = '1' if (tmp != None) else '0'
self.set_proxy_arp_pvlan(value)
# IPv4 forwarding
tmp = dict_search('ip.disable_forwarding', config)
value = '0' if (tmp != None) else '1'
self.set_ipv4_forwarding(value)
# IPv4 source-validation
tmp = dict_search('ip.source_validation', config)
value = tmp if (tmp != None) else '0'
self.set_ipv4_source_validation(value)
# IPv6 forwarding
tmp = dict_search('ipv6.disable_forwarding', config)
value = '0' if (tmp != None) else '1'
self.set_ipv6_forwarding(value)
# IPv6 router advertisements
tmp = dict_search('ipv6.address.autoconf', config)
value = '2' if (tmp != None) else '1'
if 'dhcpv6' in new_addr:
value = '2'
self.set_ipv6_accept_ra(value)
# IPv6 address autoconfiguration
tmp = dict_search('ipv6.address.autoconf', config)
value = '1' if (tmp != None) else '0'
self.set_ipv6_autoconf(value)
# IPv6 Duplicate Address Detection (DAD) tries
tmp = dict_search('ipv6.dup_addr_detect_transmits', config)
value = tmp if (tmp != None) else '1'
self.set_ipv6_dad_messages(value)
# MTU - Maximum Transfer Unit
if 'mtu' in config:
self.set_mtu(config.get('mtu'))
# Delete old IPv6 EUI64 addresses before changing MAC
for addr in (dict_search('ipv6.address.eui64_old', config) or []):
self.del_ipv6_eui64_address(addr)
# Manage IPv6 link-local addresses
if dict_search('ipv6.address.no_default_link_local', config) != None:
self.del_ipv6_eui64_address('fe80::/64')
else:
self.add_ipv6_eui64_address('fe80::/64')
# Add IPv6 EUI-based addresses
tmp = dict_search('ipv6.address.eui64', config)
if tmp:
for addr in tmp:
self.add_ipv6_eui64_address(addr)
# re-add ourselves to any bridge we might have fallen out of
if 'is_bridge_member' in config:
bridge_dict = config.get('is_bridge_member')
self.add_to_bridge(bridge_dict)
# eXpress Data Path - highly experimental
self.set_xdp('xdp' in config)
# configure port mirror
self.set_mirror()
# Enable/Disable of an interface must always be done at the end of the
# derived class to make use of the ref-counting set_admin_state()
# function. We will only enable the interface if 'up' was called as
# often as 'down'. This is required by some interface implementations
# as certain parameters can only be changed when the interface is
# in admin-down state. This ensures the link does not flap during
# reconfiguration.
state = 'down' if 'disable' in config else 'up'
self.set_admin_state(state)
# remove no longer required 802.1ad (Q-in-Q VLANs)
ifname = config['ifname']
for vif_s_id in config.get('vif_s_remove', {}):
vif_s_ifname = f'{ifname}.{vif_s_id}'
VLANIf(vif_s_ifname).remove()
# create/update 802.1ad (Q-in-Q VLANs)
for vif_s_id, vif_s_config in config.get('vif_s', {}).items():
tmp = deepcopy(VLANIf.get_config())
tmp['protocol'] = vif_s_config['protocol']
tmp['source_interface'] = ifname
tmp['vlan_id'] = vif_s_id
vif_s_ifname = f'{ifname}.{vif_s_id}'
vif_s_config['ifname'] = vif_s_ifname
# It is not possible to change the VLAN encapsulation protocol
# "on-the-fly". For this "quirk" we need to actively delete and
# re-create the VIF-S interface.
if self.exists(vif_s_ifname):
cur_cfg = get_interface_config(vif_s_ifname)
protocol = dict_search('linkinfo.info_data.protocol', cur_cfg).lower()
if protocol != vif_s_config['protocol']:
VLANIf(vif_s_ifname).remove()
s_vlan = VLANIf(vif_s_ifname, **tmp)
s_vlan.update(vif_s_config)
# remove no longer required client VLAN (vif-c)
for vif_c_id in vif_s_config.get('vif_c_remove', {}):
vif_c_ifname = f'{vif_s_ifname}.{vif_c_id}'
VLANIf(vif_c_ifname).remove()
# create/update client VLAN (vif-c) interface
for vif_c_id, vif_c_config in vif_s_config.get('vif_c', {}).items():
tmp = deepcopy(VLANIf.get_config())
tmp['source_interface'] = vif_s_ifname
tmp['vlan_id'] = vif_c_id
vif_c_ifname = f'{vif_s_ifname}.{vif_c_id}'
vif_c_config['ifname'] = vif_c_ifname
c_vlan = VLANIf(vif_c_ifname, **tmp)
c_vlan.update(vif_c_config)
# remove no longer required 802.1q VLAN interfaces
for vif_id in config.get('vif_remove', {}):
vif_ifname = f'{ifname}.{vif_id}'
VLANIf(vif_ifname).remove()
# create/update 802.1q VLAN interfaces
for vif_id, vif_config in config.get('vif', {}).items():
vif_ifname = f'{ifname}.{vif_id}'
vif_config['ifname'] = vif_ifname
tmp = deepcopy(VLANIf.get_config())
tmp['source_interface'] = ifname
tmp['vlan_id'] = vif_id
# We need to ensure that the string format is consistent, and we need to exclude redundant spaces.
sep = ' '
if 'egress_qos' in vif_config:
# Unwrap strings into arrays
egress_qos_array = vif_config['egress_qos'].split()
# The split array is spliced according to the fixed format
tmp['egress_qos'] = sep.join(egress_qos_array)
if 'ingress_qos' in vif_config:
# Unwrap strings into arrays
ingress_qos_array = vif_config['ingress_qos'].split()
# The split array is spliced according to the fixed format
tmp['ingress_qos'] = sep.join(ingress_qos_array)
# Since setting the QoS control parameters in the later stage will
# not completely delete the old settings,
# we still need to delete the VLAN encapsulation interface in order to
# ensure that the changed settings are effective.
cur_cfg = get_interface_config(vif_ifname)
qos_str = ''
tmp2 = dict_search('linkinfo.info_data.ingress_qos', cur_cfg)
if 'ingress_qos' in tmp and tmp2:
for item in tmp2:
from_key = item['from']
to_key = item['to']
qos_str += f'{from_key}:{to_key} '
if qos_str != tmp['ingress_qos']:
if self.exists(vif_ifname):
VLANIf(vif_ifname).remove()
qos_str = ''
tmp2 = dict_search('linkinfo.info_data.egress_qos', cur_cfg)
if 'egress_qos' in tmp and tmp2:
for item in tmp2:
from_key = item['from']
to_key = item['to']
qos_str += f'{from_key}:{to_key} '
if qos_str != tmp['egress_qos']:
if self.exists(vif_ifname):
VLANIf(vif_ifname).remove()
vlan = VLANIf(vif_ifname, **tmp)
vlan.update(vif_config)
class VLANIf(Interface):
""" Specific class which abstracts 802.1q and 802.1ad (Q-in-Q) VLAN interfaces """
iftype = 'vlan'
def _create(self):
# bail out early if interface already exists
if self.exists(f'{self.ifname}'):
return
cmd = 'ip link add link {source_interface} name {ifname} type vlan id {vlan_id}'
if 'protocol' in self.config:
cmd += ' protocol {protocol}'
if 'ingress_qos' in self.config:
cmd += ' ingress-qos-map {ingress_qos}'
if 'egress_qos' in self.config:
cmd += ' egress-qos-map {egress_qos}'
self._cmd(cmd.format(**self.config))
# interface is always A/D down. It needs to be enabled explicitly
self.set_admin_state('down')
def set_admin_state(self, state):
"""
Set interface administrative state to be 'up' or 'down'
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0.10').set_admin_state('down')
>>> Interface('eth0.10').get_admin_state()
'down'
"""
# A VLAN interface can only be placed in admin up state when
# the lower interface is up, too
lower_interface = glob(f'/sys/class/net/{self.ifname}/lower*/flags')[0]
with open(lower_interface, 'r') as f:
flags = f.read()
# If parent is not up - bail out as we can not bring up the VLAN.
# Flags are defined in kernel source include/uapi/linux/if.h
if not int(flags, 16) & 1:
return None
return super().set_admin_state(state)
def set_mirror(self):
return
vyos.ifconfig: T3883: bugfix VRF deletion
We can not pass None as VRF name, this raises an exception.
OSError: [Errno 255] failed to run command: ip link set dev eth2 master None
(cherry picked from commit e687502b1cf4a3e15c562a3662afcbe0776b1fe7)
# Copyright 2019-2021 VyOS maintainers and contributors <maintainers@vyos.io>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
from netifaces import interfaces
import os
import re
import json
import jmespath
from copy import deepcopy
from glob import glob
from ipaddress import IPv4Network
from netifaces import ifaddresses
# this is not the same as socket.AF_INET/INET6
from netifaces import AF_INET
from netifaces import AF_INET6
from vyos import ConfigError
from vyos.configdict import list_diff
from vyos.configdict import dict_merge
from vyos.configdict import get_vlan_ids
from vyos.template import render
from vyos.util import mac2eui64
from vyos.util import dict_search
from vyos.util import read_file
from vyos.util import get_interface_config
from vyos.util import is_systemd_service_active
from vyos.template import is_ipv4
from vyos.template import is_ipv6
from vyos.validate import is_intf_addr_assigned
from vyos.validate import is_ipv6_link_local
from vyos.validate import assert_boolean
from vyos.validate import assert_list
from vyos.validate import assert_mac
from vyos.validate import assert_mtu
from vyos.validate import assert_positive
from vyos.validate import assert_range
from vyos.ifconfig.control import Control
from vyos.ifconfig.vrrp import VRRP
from vyos.ifconfig.operational import Operational
from vyos.ifconfig import Section
from netaddr import EUI
from netaddr import mac_unix_expanded
class Interface(Control):
# This is the class which will be used to create
# self.operational, it allows subclasses, such as
# WireGuard to modify their display behaviour
OperationalClass = Operational
options = ['debug', 'create']
required = []
default = {
'debug': True,
'create': True,
}
definition = {
'section': '',
'prefixes': [],
'vlan': False,
'bondable': False,
'broadcast': False,
'bridgeable': False,
'eternal': '',
}
_command_get = {
'admin_state': {
'shellcmd': 'ip -json link show dev {ifname}',
'format': lambda j: 'up' if 'UP' in jmespath.search('[*].flags | [0]', json.loads(j)) else 'down',
},
'alias': {
'shellcmd': 'ip -json -detail link list dev {ifname}',
'format': lambda j: jmespath.search('[*].ifalias | [0]', json.loads(j)) or '',
},
'mac': {
'shellcmd': 'ip -json -detail link list dev {ifname}',
'format': lambda j: jmespath.search('[*].address | [0]', json.loads(j)),
},
'min_mtu': {
'shellcmd': 'ip -json -detail link list dev {ifname}',
'format': lambda j: jmespath.search('[*].min_mtu | [0]', json.loads(j)),
},
'max_mtu': {
'shellcmd': 'ip -json -detail link list dev {ifname}',
'format': lambda j: jmespath.search('[*].max_mtu | [0]', json.loads(j)),
},
'mtu': {
'shellcmd': 'ip -json -detail link list dev {ifname}',
'format': lambda j: jmespath.search('[*].mtu | [0]', json.loads(j)),
},
'oper_state': {
'shellcmd': 'ip -json -detail link list dev {ifname}',
'format': lambda j: jmespath.search('[*].operstate | [0]', json.loads(j)),
},
'vrf': {
'shellcmd': 'ip -json -detail link list dev {ifname}',
'format': lambda j: jmespath.search('[*].master | [0]', json.loads(j)),
},
}
_command_set = {
'admin_state': {
'validate': lambda v: assert_list(v, ['up', 'down']),
'shellcmd': 'ip link set dev {ifname} {value}',
},
'alias': {
'convert': lambda name: name if name else '',
'shellcmd': 'ip link set dev {ifname} alias "{value}"',
},
'bridge_port_isolation': {
'validate': lambda v: assert_list(v, ['on', 'off']),
'shellcmd': 'bridge link set dev {ifname} isolated {value}',
},
'mac': {
'validate': assert_mac,
'shellcmd': 'ip link set dev {ifname} address {value}',
},
'mtu': {
'validate': assert_mtu,
'shellcmd': 'ip link set dev {ifname} mtu {value}',
},
'vrf': {
'convert': lambda v: f'master {v}' if v else 'nomaster',
'shellcmd': 'ip link set dev {ifname} {value}',
},
}
_sysfs_set = {
'arp_cache_tmo': {
'location': '/proc/sys/net/ipv4/neigh/{ifname}/base_reachable_time_ms',
},
'arp_filter': {
'validate': assert_boolean,
'location': '/proc/sys/net/ipv4/conf/{ifname}/arp_filter',
},
'arp_accept': {
'validate': lambda arp: assert_range(arp,0,2),
'location': '/proc/sys/net/ipv4/conf/{ifname}/arp_accept',
},
'arp_announce': {
'validate': assert_boolean,
'location': '/proc/sys/net/ipv4/conf/{ifname}/arp_announce',
},
'arp_ignore': {
'validate': assert_boolean,
'location': '/proc/sys/net/ipv4/conf/{ifname}/arp_ignore',
},
'ipv4_forwarding': {
'validate': assert_boolean,
'location': '/proc/sys/net/ipv4/conf/{ifname}/forwarding',
},
'rp_filter': {
'validate': lambda flt: assert_range(flt,0,3),
'location': '/proc/sys/net/ipv4/conf/{ifname}/rp_filter',
},
'ipv6_accept_ra': {
'validate': lambda ara: assert_range(ara,0,3),
'location': '/proc/sys/net/ipv6/conf/{ifname}/accept_ra',
},
'ipv6_autoconf': {
'validate': lambda aco: assert_range(aco,0,2),
'location': '/proc/sys/net/ipv6/conf/{ifname}/autoconf',
},
'ipv6_forwarding': {
'validate': lambda fwd: assert_range(fwd,0,2),
'location': '/proc/sys/net/ipv6/conf/{ifname}/forwarding',
},
'ipv6_dad_transmits': {
'validate': assert_positive,
'location': '/proc/sys/net/ipv6/conf/{ifname}/dad_transmits',
},
'path_cost': {
# XXX: we should set a maximum
'validate': assert_positive,
'location': '/sys/class/net/{ifname}/brport/path_cost',
'errormsg': '{ifname} is not a bridge port member'
},
'path_priority': {
# XXX: we should set a maximum
'validate': assert_positive,
'location': '/sys/class/net/{ifname}/brport/priority',
'errormsg': '{ifname} is not a bridge port member'
},
'proxy_arp': {
'validate': assert_boolean,
'location': '/proc/sys/net/ipv4/conf/{ifname}/proxy_arp',
},
'proxy_arp_pvlan': {
'validate': assert_boolean,
'location': '/proc/sys/net/ipv4/conf/{ifname}/proxy_arp_pvlan',
},
# link_detect vs link_filter name weirdness
'link_detect': {
'validate': lambda link: assert_range(link,0,3),
'location': '/proc/sys/net/ipv4/conf/{ifname}/link_filter',
},
}
_sysfs_get = {
'arp_cache_tmo': {
'location': '/proc/sys/net/ipv4/neigh/{ifname}/base_reachable_time_ms',
},
'arp_filter': {
'location': '/proc/sys/net/ipv4/conf/{ifname}/arp_filter',
},
'arp_accept': {
'location': '/proc/sys/net/ipv4/conf/{ifname}/arp_accept',
},
'arp_announce': {
'location': '/proc/sys/net/ipv4/conf/{ifname}/arp_announce',
},
'arp_ignore': {
'location': '/proc/sys/net/ipv4/conf/{ifname}/arp_ignore',
},
'ipv4_forwarding': {
'location': '/proc/sys/net/ipv4/conf/{ifname}/forwarding',
},
'rp_filter': {
'location': '/proc/sys/net/ipv4/conf/{ifname}/rp_filter',
},
'ipv6_accept_ra': {
'location': '/proc/sys/net/ipv6/conf/{ifname}/accept_ra',
},
'ipv6_autoconf': {
'location': '/proc/sys/net/ipv6/conf/{ifname}/autoconf',
},
'ipv6_forwarding': {
'location': '/proc/sys/net/ipv6/conf/{ifname}/forwarding',
},
'ipv6_dad_transmits': {
'location': '/proc/sys/net/ipv6/conf/{ifname}/dad_transmits',
},
'proxy_arp': {
'location': '/proc/sys/net/ipv4/conf/{ifname}/proxy_arp',
},
'proxy_arp_pvlan': {
'location': '/proc/sys/net/ipv4/conf/{ifname}/proxy_arp_pvlan',
},
'link_detect': {
'location': '/proc/sys/net/ipv4/conf/{ifname}/link_filter',
},
}
@classmethod
def exists(cls, ifname):
return os.path.exists(f'/sys/class/net/{ifname}')
@classmethod
def get_config(cls):
"""
Some but not all interfaces require a configuration when they are added
using iproute2. This method will provide the configuration dictionary
used by this class.
"""
return deepcopy(cls.default)
def __init__(self, ifname, **kargs):
"""
This is the base interface class which supports basic IP/MAC address
operations as well as DHCP(v6). Other interface which represent e.g.
and ethernet bridge are implemented as derived classes adding all
additional functionality.
For creation you will need to provide the interface type, otherwise
the existing interface is used
DEBUG:
This class has embedded debugging (print) which can be enabled by
creating the following file:
vyos@vyos# touch /tmp/vyos.ifconfig.debug
Example:
>>> from vyos.ifconfig import Interface
>>> i = Interface('eth0')
"""
self.config = deepcopy(kargs)
self.config['ifname'] = self.ifname = ifname
self._admin_state_down_cnt = 0
# we must have updated config before initialising the Interface
super().__init__(**kargs)
if not self.exists(ifname):
# Any instance of Interface, such as Interface('eth0') can be used
# safely to access the generic function in this class as 'type' is
# unset, the class can not be created
if not self.iftype:
raise Exception(f'interface "{ifname}" not found')
self.config['type'] = self.iftype
# Should an Instance of a child class (EthernetIf, DummyIf, ..)
# be required, then create should be set to False to not accidentally create it.
# In case a subclass does not define it, we use get to set the default to True
if self.config.get('create',True):
for k in self.required:
if k not in kargs:
name = self.default['type']
raise ConfigError(f'missing required option {k} for {name} {ifname} creation')
self._create()
# If we can not connect to the interface then let the caller know
# as the class could not be correctly initialised
else:
raise Exception(f'interface "{ifname}" not found!')
# temporary list of assigned IP addresses
self._addr = []
self.operational = self.OperationalClass(ifname)
self.vrrp = VRRP(ifname)
def _create(self):
cmd = 'ip link add dev {ifname} type {type}'.format(**self.config)
self._cmd(cmd)
def remove(self):
"""
Remove interface from operating system. Removing the interface
deconfigures all assigned IP addresses and clear possible DHCP(v6)
client processes.
Example:
>>> from vyos.ifconfig import Interface
>>> i = Interface('eth0')
>>> i.remove()
"""
# remove all assigned IP addresses from interface - this is a bit redundant
# as the kernel will remove all addresses on interface deletion, but we
# can not delete ALL interfaces, see below
self.flush_addrs()
# ---------------------------------------------------------------------
# Any class can define an eternal regex in its definition
# interface matching the regex will not be deleted
eternal = self.definition['eternal']
if not eternal:
self._delete()
elif not re.match(eternal, self.ifname):
self._delete()
def _delete(self):
# NOTE (Improvement):
# after interface removal no other commands should be allowed
# to be called and instead should raise an Exception:
cmd = 'ip link del dev {ifname}'.format(**self.config)
return self._cmd(cmd)
def _set_vrf_ct_zone(self, vrf):
"""
Add/Remove rules in nftables to associate traffic in VRF to an
individual conntack zone
"""
if vrf:
# Get routing table ID for VRF
vrf_table_id = get_interface_config(vrf).get('linkinfo', {}).get(
'info_data', {}).get('table')
# Add map element with interface and zone ID
if vrf_table_id:
self._cmd(f'nft add element inet vrf_zones ct_iface_map {{ "{self.ifname}" : {vrf_table_id} }}')
else:
nft_del_element = f'delete element inet vrf_zones ct_iface_map {{ "{self.ifname}" }}'
# Check if deleting is possible first to avoid raising errors
_, err = self._popen(f'nft -c {nft_del_element}')
if not err:
# Remove map element
self._cmd(f'nft {nft_del_element}')
def get_min_mtu(self):
"""
Get hardware minimum supported MTU
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').get_min_mtu()
'60'
"""
return int(self.get_interface('min_mtu'))
def get_max_mtu(self):
"""
Get hardware maximum supported MTU
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').get_max_mtu()
'9000'
"""
return int(self.get_interface('max_mtu'))
def get_mtu(self):
"""
Get/set interface mtu in bytes.
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').get_mtu()
'1500'
"""
return int(self.get_interface('mtu'))
def set_mtu(self, mtu):
"""
Get/set interface mtu in bytes.
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').set_mtu(1400)
>>> Interface('eth0').get_mtu()
'1400'
"""
tmp = self.get_interface('mtu')
if str(tmp) == mtu:
return None
return self.set_interface('mtu', mtu)
def get_mac(self):
"""
Get current interface MAC (Media Access Contrl) address used.
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').get_mac()
'00:50:ab:cd:ef:00'
"""
return self.get_interface('mac')
def get_mac_synthetic(self):
"""
Get a synthetic MAC address. This is a common method which can be called
from derived classes to overwrite the get_mac() call in a generic way.
NOTE: Tunnel interfaces have no "MAC" address by default. The content
of the 'address' file in /sys/class/net/device contains the
local-ip thus we generate a random MAC address instead
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').get_mac()
'00:50:ab:cd:ef:00'
"""
from hashlib import sha256
# Get processor ID number
cpu_id = self._cmd('sudo dmidecode -t 4 | grep ID | head -n1 | sed "s/.*ID://;s/ //g"')
# Get system eth0 base MAC address - every system has eth0
eth0_mac = Interface('eth0').get_mac()
sha = sha256()
# Calculate SHA256 sum based on the CPU ID number, eth0 mac address and
# this interface identifier - this is as predictable as an interface
# MAC address and thus can be used in the same way
sha.update(cpu_id.encode())
sha.update(eth0_mac.encode())
sha.update(self.ifname.encode())
# take the most significant 48 bits from the SHA256 string
tmp = sha.hexdigest()[:12]
# Convert pseudo random string into EUI format which now represents a
# MAC address
tmp = EUI(tmp).value
# set locally administered bit in MAC address
tmp |= 0xf20000000000
# convert integer to "real" MAC address representation
mac = EUI(hex(tmp).split('x')[-1])
# change dialect to use : as delimiter instead of -
mac.dialect = mac_unix_expanded
return str(mac)
def set_mac(self, mac):
"""
Set interface MAC (Media Access Contrl) address to given value.
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').set_mac('00:50:ab:cd:ef:01')
"""
# If MAC is unchanged, bail out early
if mac == self.get_mac():
return None
# MAC address can only be changed if interface is in 'down' state
prev_state = self.get_admin_state()
if prev_state == 'up':
self.set_admin_state('down')
self.set_interface('mac', mac)
# Turn an interface to the 'up' state if it was changed to 'down' by this fucntion
if prev_state == 'up':
self.set_admin_state('up')
def set_vrf(self, vrf):
"""
Add/Remove interface from given VRF instance.
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').set_vrf('foo')
>>> Interface('eth0').set_vrf()
"""
tmp = self.get_interface('vrf')
if tmp == vrf:
return None
self.set_interface('vrf', vrf)
self._set_vrf_ct_zone(vrf)
def set_arp_cache_tmo(self, tmo):
"""
Set ARP cache timeout value in seconds. Internal Kernel representation
is in milliseconds.
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').set_arp_cache_tmo(40)
"""
tmo = str(int(tmo) * 1000)
tmp = self.get_interface('arp_cache_tmo')
if tmp == tmo:
return None
return self.set_interface('arp_cache_tmo', tmo)
def set_tcp_ipv4_mss(self, mss):
"""
Set IPv4 TCP MSS value advertised when TCP SYN packets leave this
interface. Value is in bytes.
A value of 0 will disable the MSS adjustment
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').set_tcp_ipv4_mss(1340)
"""
iptables_bin = 'iptables'
base_options = f'-A FORWARD -o {self.ifname} -p tcp -m tcp --tcp-flags SYN,RST SYN'
out = self._cmd(f'{iptables_bin}-save -t mangle')
for line in out.splitlines():
if line.startswith(base_options):
# remove OLD MSS mangling configuration
line = line.replace('-A FORWARD', '-D FORWARD')
self._cmd(f'{iptables_bin} -t mangle {line}')
cmd_mss = f'{iptables_bin} -t mangle {base_options} --jump TCPMSS'
if mss == 'clamp-mss-to-pmtu':
self._cmd(f'{cmd_mss} --clamp-mss-to-pmtu')
elif int(mss) > 0:
# probably add option to clamp only if bigger:
low_mss = str(int(mss) + 1)
self._cmd(f'{cmd_mss} -m tcpmss --mss {low_mss}:65535 --set-mss {mss}')
def set_tcp_ipv6_mss(self, mss):
"""
Set IPv6 TCP MSS value advertised when TCP SYN packets leave this
interface. Value is in bytes.
A value of 0 will disable the MSS adjustment
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').set_tcp_mss(1320)
"""
iptables_bin = 'ip6tables'
base_options = f'-A FORWARD -o {self.ifname} -p tcp -m tcp --tcp-flags SYN,RST SYN'
out = self._cmd(f'{iptables_bin}-save -t mangle')
for line in out.splitlines():
if line.startswith(base_options):
# remove OLD MSS mangling configuration
line = line.replace('-A FORWARD', '-D FORWARD')
self._cmd(f'{iptables_bin} -t mangle {line}')
cmd_mss = f'{iptables_bin} -t mangle {base_options} --jump TCPMSS'
if mss == 'clamp-mss-to-pmtu':
self._cmd(f'{cmd_mss} --clamp-mss-to-pmtu')
elif int(mss) > 0:
# probably add option to clamp only if bigger:
low_mss = str(int(mss) + 1)
self._cmd(f'{cmd_mss} -m tcpmss --mss {low_mss}:65535 --set-mss {mss}')
def set_arp_filter(self, arp_filter):
"""
Filter ARP requests
1 - Allows you to have multiple network interfaces on the same
subnet, and have the ARPs for each interface be answered
based on whether or not the kernel would route a packet from
the ARP'd IP out that interface (therefore you must use source
based routing for this to work). In other words it allows control
of which cards (usually 1) will respond to an arp request.
0 - (default) The kernel can respond to arp requests with addresses
from other interfaces. This may seem wrong but it usually makes
sense, because it increases the chance of successful communication.
IP addresses are owned by the complete host on Linux, not by
particular interfaces. Only for more complex setups like load-
balancing, does this behaviour cause problems.
"""
tmp = self.get_interface('arp_filter')
if tmp == arp_filter:
return None
return self.set_interface('arp_filter', arp_filter)
def set_arp_accept(self, arp_accept):
"""
Define behavior for gratuitous ARP frames who's IP is not
already present in the ARP table:
0 - don't create new entries in the ARP table
1 - create new entries in the ARP table
Both replies and requests type gratuitous arp will trigger the
ARP table to be updated, if this setting is on.
If the ARP table already contains the IP address of the
gratuitous arp frame, the arp table will be updated regardless
if this setting is on or off.
"""
tmp = self.get_interface('arp_accept')
if tmp == arp_accept:
return None
return self.set_interface('arp_accept', arp_accept)
def set_arp_announce(self, arp_announce):
"""
Define different restriction levels for announcing the local
source IP address from IP packets in ARP requests sent on
interface:
0 - (default) Use any local address, configured on any interface
1 - Try to avoid local addresses that are not in the target's
subnet for this interface. This mode is useful when target
hosts reachable via this interface require the source IP
address in ARP requests to be part of their logical network
configured on the receiving interface. When we generate the
request we will check all our subnets that include the
target IP and will preserve the source address if it is from
such subnet.
Increasing the restriction level gives more chance for
receiving answer from the resolved target while decreasing
the level announces more valid sender's information.
"""
tmp = self.get_interface('arp_announce')
if tmp == arp_announce:
return None
return self.set_interface('arp_announce', arp_announce)
def set_arp_ignore(self, arp_ignore):
"""
Define different modes for sending replies in response to received ARP
requests that resolve local target IP addresses:
0 - (default): reply for any local target IP address, configured
on any interface
1 - reply only if the target IP address is local address
configured on the incoming interface
"""
tmp = self.get_interface('arp_ignore')
if tmp == arp_ignore:
return None
return self.set_interface('arp_ignore', arp_ignore)
def set_ipv4_forwarding(self, forwarding):
""" Configure IPv4 forwarding. """
tmp = self.get_interface('ipv4_forwarding')
if tmp == forwarding:
return None
return self.set_interface('ipv4_forwarding', forwarding)
def set_ipv4_source_validation(self, value):
"""
Help prevent attacks used by Spoofing IP Addresses. Reverse path
filtering is a Kernel feature that, when enabled, is designed to ensure
packets that are not routable to be dropped. The easiest example of this
would be and IP Address of the range 10.0.0.0/8, a private IP Address,
being received on the Internet facing interface of the router.
As per RFC3074.
"""
if value == 'strict':
value = 1
elif value == 'loose':
value = 2
else:
value = 0
all_rp_filter = int(read_file('/proc/sys/net/ipv4/conf/all/rp_filter'))
if all_rp_filter > value:
global_setting = 'disable'
if all_rp_filter == 1: global_setting = 'strict'
elif all_rp_filter == 2: global_setting = 'loose'
print(f'WARNING: Global source-validation is set to "{global_setting}\n"' \
'this overrides per interface setting!')
tmp = self.get_interface('rp_filter')
if int(tmp) == value:
return None
return self.set_interface('rp_filter', value)
def set_ipv6_accept_ra(self, accept_ra):
"""
Accept Router Advertisements; autoconfigure using them.
It also determines whether or not to transmit Router Solicitations.
If and only if the functional setting is to accept Router
Advertisements, Router Solicitations will be transmitted.
0 - Do not accept Router Advertisements.
1 - (default) Accept Router Advertisements if forwarding is disabled.
2 - Overrule forwarding behaviour. Accept Router Advertisements even if
forwarding is enabled.
"""
tmp = self.get_interface('ipv6_accept_ra')
if tmp == accept_ra:
return None
return self.set_interface('ipv6_accept_ra', accept_ra)
def set_ipv6_autoconf(self, autoconf):
"""
Autoconfigure addresses using Prefix Information in Router
Advertisements.
"""
tmp = self.get_interface('ipv6_autoconf')
if tmp == autoconf:
return None
return self.set_interface('ipv6_autoconf', autoconf)
def add_ipv6_eui64_address(self, prefix):
"""
Extended Unique Identifier (EUI), as per RFC2373, allows a host to
assign itself a unique IPv6 address based on a given IPv6 prefix.
Calculate the EUI64 from the interface's MAC, then assign it
with the given prefix to the interface.
"""
# T2863: only add a link-local IPv6 address if the interface returns
# a MAC address. This is not the case on e.g. WireGuard interfaces.
mac = self.get_mac()
if mac:
eui64 = mac2eui64(mac, prefix)
prefixlen = prefix.split('/')[1]
self.add_addr(f'{eui64}/{prefixlen}')
def del_ipv6_eui64_address(self, prefix):
"""
Delete the address based on the interface's MAC-based EUI64
combined with the prefix address.
"""
if is_ipv6(prefix):
eui64 = mac2eui64(self.get_mac(), prefix)
prefixlen = prefix.split('/')[1]
self.del_addr(f'{eui64}/{prefixlen}')
def set_ipv6_forwarding(self, forwarding):
"""
Configure IPv6 interface-specific Host/Router behaviour.
False:
By default, Host behaviour is assumed. This means:
1. IsRouter flag is not set in Neighbour Advertisements.
2. If accept_ra is TRUE (default), transmit Router
Solicitations.
3. If accept_ra is TRUE (default), accept Router
Advertisements (and do autoconfiguration).
4. If accept_redirects is TRUE (default), accept Redirects.
True:
If local forwarding is enabled, Router behaviour is assumed.
This means exactly the reverse from the above:
1. IsRouter flag is set in Neighbour Advertisements.
2. Router Solicitations are not sent unless accept_ra is 2.
3. Router Advertisements are ignored unless accept_ra is 2.
4. Redirects are ignored.
"""
tmp = self.get_interface('ipv6_forwarding')
if tmp == forwarding:
return None
return self.set_interface('ipv6_forwarding', forwarding)
def set_ipv6_dad_messages(self, dad):
"""
The amount of Duplicate Address Detection probes to send.
Default: 1
"""
tmp = self.get_interface('ipv6_dad_transmits')
if tmp == dad:
return None
return self.set_interface('ipv6_dad_transmits', dad)
def set_link_detect(self, link_filter):
"""
Configure kernel response in packets received on interfaces that are 'down'
0 - Allow packets to be received for the address on this interface
even if interface is disabled or no carrier.
1 - Ignore packets received if interface associated with the incoming
address is down.
2 - Ignore packets received if interface associated with the incoming
address is down or has no carrier.
Default value is 0. Note that some distributions enable it in startup
scripts.
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').set_link_detect(1)
"""
tmp = self.get_interface('link_detect')
if tmp == link_filter:
return None
return self.set_interface('link_detect', link_filter)
def get_alias(self):
"""
Get interface alias name used by e.g. SNMP
Example:
>>> Interface('eth0').get_alias()
'interface description as set by user'
"""
return self.get_interface('alias')
def set_alias(self, ifalias=''):
"""
Set interface alias name used by e.g. SNMP
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').set_alias('VyOS upstream interface')
to clear alias e.g. delete it use:
>>> Interface('eth0').set_ifalias('')
"""
tmp = self.get_interface('alias')
if tmp == ifalias:
return None
self.set_interface('alias', ifalias)
def get_admin_state(self):
"""
Get interface administrative state. Function will return 'up' or 'down'
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').get_admin_state()
'up'
"""
return self.get_interface('admin_state')
def set_admin_state(self, state):
"""
Set interface administrative state to be 'up' or 'down'
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').set_admin_state('down')
>>> Interface('eth0').get_admin_state()
'down'
"""
if state == 'up':
self._admin_state_down_cnt -= 1
if self._admin_state_down_cnt < 1:
return self.set_interface('admin_state', state)
else:
self._admin_state_down_cnt += 1
return self.set_interface('admin_state', state)
def set_path_cost(self, cost):
"""
Set interface path cost, only relevant for STP enabled interfaces
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').set_path_cost(4)
"""
self.set_interface('path_cost', cost)
def set_path_priority(self, priority):
"""
Set interface path priority, only relevant for STP enabled interfaces
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').set_path_priority(4)
"""
self.set_interface('path_priority', priority)
def set_port_isolation(self, on_or_off):
"""
Controls whether a given port will be isolated, which means it will be
able to communicate with non-isolated ports only. By default this flag
is off.
Use enable=1 to enable or enable=0 to disable
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth1').set_port_isolation('on')
"""
self.set_interface('bridge_port_isolation', on_or_off)
def set_proxy_arp(self, enable):
"""
Set per interface proxy ARP configuration
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').set_proxy_arp(1)
"""
tmp = self.get_interface('proxy_arp')
if tmp == enable:
return None
self.set_interface('proxy_arp', enable)
def set_proxy_arp_pvlan(self, enable):
"""
Private VLAN proxy arp.
Basically allow proxy arp replies back to the same interface
(from which the ARP request/solicitation was received).
This is done to support (ethernet) switch features, like RFC
3069, where the individual ports are NOT allowed to
communicate with each other, but they are allowed to talk to
the upstream router. As described in RFC 3069, it is possible
to allow these hosts to communicate through the upstream
router by proxy_arp'ing. Don't need to be used together with
proxy_arp.
This technology is known by different names:
In RFC 3069 it is called VLAN Aggregation.
Cisco and Allied Telesyn call it Private VLAN.
Hewlett-Packard call it Source-Port filtering or port-isolation.
Ericsson call it MAC-Forced Forwarding (RFC Draft).
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').set_proxy_arp_pvlan(1)
"""
tmp = self.get_interface('proxy_arp_pvlan')
if tmp == enable:
return None
self.set_interface('proxy_arp_pvlan', enable)
def get_addr_v4(self):
"""
Retrieve assigned IPv4 addresses from given interface.
This is done using the netifaces and ipaddress python modules.
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').get_addr_v4()
['172.16.33.30/24']
"""
ipv4 = []
if AF_INET in ifaddresses(self.config['ifname']):
for v4_addr in ifaddresses(self.config['ifname'])[AF_INET]:
# we need to manually assemble a list of IPv4 address/prefix
prefix = '/' + \
str(IPv4Network('0.0.0.0/' + v4_addr['netmask']).prefixlen)
ipv4.append(v4_addr['addr'] + prefix)
return ipv4
def get_addr_v6(self):
"""
Retrieve assigned IPv6 addresses from given interface.
This is done using the netifaces and ipaddress python modules.
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').get_addr_v6()
['fe80::20c:29ff:fe11:a174/64']
"""
ipv6 = []
if AF_INET6 in ifaddresses(self.config['ifname']):
for v6_addr in ifaddresses(self.config['ifname'])[AF_INET6]:
# Note that currently expanded netmasks are not supported. That means
# 2001:db00::0/24 is a valid argument while 2001:db00::0/ffff:ff00:: not.
# see https://docs.python.org/3/library/ipaddress.html
prefix = '/' + v6_addr['netmask'].split('/')[-1]
# we alsoneed to remove the interface suffix on link local
# addresses
v6_addr['addr'] = v6_addr['addr'].split('%')[0]
ipv6.append(v6_addr['addr'] + prefix)
return ipv6
def get_addr(self):
"""
Retrieve assigned IPv4 and IPv6 addresses from given interface.
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').get_addr()
['172.16.33.30/24', 'fe80::20c:29ff:fe11:a174/64']
"""
return self.get_addr_v4() + self.get_addr_v6()
def add_addr(self, addr):
"""
Add IP(v6) address to interface. Address is only added if it is not
already assigned to that interface. Address format must be validated
and compressed/normalized before calling this function.
addr: can be an IPv4 address, IPv6 address, dhcp or dhcpv6!
IPv4: add IPv4 address to interface
IPv6: add IPv6 address to interface
dhcp: start dhclient (IPv4) on interface
dhcpv6: start WIDE DHCPv6 (IPv6) on interface
Returns False if address is already assigned and wasn't re-added.
Example:
>>> from vyos.ifconfig import Interface
>>> j = Interface('eth0')
>>> j.add_addr('192.0.2.1/24')
>>> j.add_addr('2001:db8::ffff/64')
>>> j.get_addr()
['192.0.2.1/24', '2001:db8::ffff/64']
"""
# XXX: normalize/compress with ipaddress if calling functions don't?
# is subnet mask always passed, and in the same way?
# do not add same address twice
if addr in self._addr:
return False
addr_is_v4 = is_ipv4(addr)
# we can't have both DHCP and static IPv4 addresses assigned
for a in self._addr:
if ( ( addr == 'dhcp' and a != 'dhcpv6' and is_ipv4(a) ) or
( a == 'dhcp' and addr != 'dhcpv6' and addr_is_v4 ) ):
raise ConfigError((
"Can't configure both static IPv4 and DHCP address "
"on the same interface"))
# add to interface
if addr == 'dhcp':
self.set_dhcp(True)
elif addr == 'dhcpv6':
self.set_dhcpv6(True)
elif not is_intf_addr_assigned(self.ifname, addr):
self._cmd(f'ip addr add "{addr}" '
f'{"brd + " if addr_is_v4 else ""}dev "{self.ifname}"')
else:
return False
# add to cache
self._addr.append(addr)
return True
def del_addr(self, addr):
"""
Delete IP(v6) address from interface. Address is only deleted if it is
assigned to that interface. Address format must be exactly the same as
was used when adding the address.
addr: can be an IPv4 address, IPv6 address, dhcp or dhcpv6!
IPv4: delete IPv4 address from interface
IPv6: delete IPv6 address from interface
dhcp: stop dhclient (IPv4) on interface
dhcpv6: stop dhclient (IPv6) on interface
Returns False if address isn't already assigned and wasn't deleted.
Example:
>>> from vyos.ifconfig import Interface
>>> j = Interface('eth0')
>>> j.add_addr('2001:db8::ffff/64')
>>> j.add_addr('192.0.2.1/24')
>>> j.get_addr()
['192.0.2.1/24', '2001:db8::ffff/64']
>>> j.del_addr('192.0.2.1/24')
>>> j.get_addr()
['2001:db8::ffff/64']
"""
if not addr:
raise ValueError()
# remove from interface
if addr == 'dhcp':
self.set_dhcp(False)
elif addr == 'dhcpv6':
self.set_dhcpv6(False)
elif is_intf_addr_assigned(self.ifname, addr):
self._cmd(f'ip addr del "{addr}" dev "{self.ifname}"')
else:
return False
# remove from cache
if addr in self._addr:
self._addr.remove(addr)
return True
def flush_addrs(self):
"""
Flush all addresses from an interface, including DHCP.
Will raise an exception on error.
"""
# stop DHCP(v6) if running
self.set_dhcp(False)
self.set_dhcpv6(False)
# flush all addresses
self._cmd(f'ip addr flush dev "{self.ifname}"')
def add_to_bridge(self, bridge_dict):
"""
Adds the interface to the bridge with the passed port config.
Returns False if bridge doesn't exist.
"""
# drop all interface addresses first
self.flush_addrs()
ifname = self.ifname
for bridge, bridge_config in bridge_dict.items():
# add interface to bridge - use Section.klass to get BridgeIf class
Section.klass(bridge)(bridge, create=True).add_port(self.ifname)
# set bridge port path cost
if 'cost' in bridge_config:
self.set_path_cost(bridge_config['cost'])
# set bridge port path priority
if 'priority' in bridge_config:
self.set_path_cost(bridge_config['priority'])
bridge_vlan_filter = Section.klass(bridge)(bridge, create=True).get_vlan_filter()
if int(bridge_vlan_filter):
cur_vlan_ids = get_vlan_ids(ifname)
add_vlan = []
native_vlan_id = None
allowed_vlan_ids= []
if 'native_vlan' in bridge_config:
vlan_id = bridge_config['native_vlan']
add_vlan.append(vlan_id)
native_vlan_id = vlan_id
if 'allowed_vlan' in bridge_config:
for vlan in bridge_config['allowed_vlan']:
vlan_range = vlan.split('-')
if len(vlan_range) == 2:
for vlan_add in range(int(vlan_range[0]),int(vlan_range[1]) + 1):
add_vlan.append(str(vlan_add))
allowed_vlan_ids.append(str(vlan_add))
else:
add_vlan.append(vlan)
allowed_vlan_ids.append(vlan)
# Remove redundant VLANs from the system
for vlan in list_diff(cur_vlan_ids, add_vlan):
cmd = f'bridge vlan del dev {ifname} vid {vlan} master'
self._cmd(cmd)
for vlan in allowed_vlan_ids:
cmd = f'bridge vlan add dev {ifname} vid {vlan} master'
self._cmd(cmd)
# Setting native VLAN to system
if native_vlan_id:
cmd = f'bridge vlan add dev {ifname} vid {native_vlan_id} pvid untagged master'
self._cmd(cmd)
def set_dhcp(self, enable):
"""
Enable/Disable DHCP client on a given interface.
"""
if enable not in [True, False]:
raise ValueError()
ifname = self.ifname
config_base = r'/var/lib/dhcp/dhclient'
config_file = f'{config_base}_{ifname}.conf'
options_file = f'{config_base}_{ifname}.options'
pid_file = f'{config_base}_{ifname}.pid'
lease_file = f'{config_base}_{ifname}.leases'
# Stop client with old config files to get the right IF_METRIC.
systemd_service = f'dhclient@{ifname}.service'
if is_systemd_service_active(systemd_service):
self._cmd(f'systemctl stop {systemd_service}')
if enable and 'disable' not in self._config:
if dict_search('dhcp_options.host_name', self._config) == None:
# read configured system hostname.
# maybe change to vyos hostd client ???
hostname = 'vyos'
with open('/etc/hostname', 'r') as f:
hostname = f.read().rstrip('\n')
tmp = {'dhcp_options' : { 'host_name' : hostname}}
self._config = dict_merge(tmp, self._config)
render(options_file, 'dhcp-client/daemon-options.tmpl',
self._config)
render(config_file, 'dhcp-client/ipv4.tmpl',
self._config)
# 'up' check is mandatory b/c even if the interface is A/D, as soon as
# the DHCP client is started the interface will be placed in u/u state.
# This is not what we intended to do when disabling an interface.
return self._cmd(f'systemctl restart {systemd_service}')
else:
# cleanup old config files
for file in [config_file, options_file, pid_file, lease_file]:
if os.path.isfile(file):
os.remove(file)
def set_dhcpv6(self, enable):
"""
Enable/Disable DHCPv6 client on a given interface.
"""
if enable not in [True, False]:
raise ValueError()
ifname = self.ifname
config_file = f'/run/dhcp6c/dhcp6c.{ifname}.conf'
systemd_service = f'dhcp6c@{ifname}.service'
if enable and 'disable' not in self._config:
render(config_file, 'dhcp-client/ipv6.tmpl',
self._config)
# We must ignore any return codes. This is required to enable
# DHCPv6-PD for interfaces which are yet not up and running.
return self._popen(f'systemctl restart {systemd_service}')
else:
if is_systemd_service_active(systemd_service):
self._cmd(f'systemctl stop {systemd_service}')
if os.path.isfile(config_file):
os.remove(config_file)
def set_mirror(self):
# Please refer to the document for details
# - https://man7.org/linux/man-pages/man8/tc.8.html
# - https://man7.org/linux/man-pages/man8/tc-mirred.8.html
# Depening if we are the source or the target interface of the port
# mirror we need to setup some variables.
source_if = self._config['ifname']
config = self._config.get('mirror', None)
if 'is_mirror_intf' in self._config:
source_if = next(iter(self._config['is_mirror_intf']))
config = self._config['is_mirror_intf'][source_if].get('mirror', None)
# Check configuration stored by old perl code before delete T3782
if not 'redirect' in self._config:
# Please do not clear the 'set $? = 0 '. It's meant to force a return of 0
# Remove existing mirroring rules
delete_tc_cmd = f'tc qdisc del dev {source_if} handle ffff: ingress 2> /dev/null;'
delete_tc_cmd += f'tc qdisc del dev {source_if} handle 1: root prio 2> /dev/null;'
delete_tc_cmd += 'set $?=0'
self._popen(delete_tc_cmd)
# Bail out early if nothing needs to be configured
if not config:
return
for direction, mirror_if in config.items():
if mirror_if not in interfaces():
continue
if direction == 'ingress':
handle = 'ffff: ingress'
parent = 'ffff:'
elif direction == 'egress':
handle = '1: root prio'
parent = '1:'
# Mirror egress traffic
mirror_cmd = f'tc qdisc add dev {source_if} handle {handle}; '
# Export the mirrored traffic to the interface
mirror_cmd += f'tc filter add dev {source_if} parent {parent} protocol all prio 10 u32 match u32 0 0 flowid 1:1 action mirred egress mirror dev {mirror_if}'
self._popen(mirror_cmd)
def set_xdp(self, state):
"""
Enable Kernel XDP support. State can be either True or False.
Example:
>>> from vyos.ifconfig import Interface
>>> i = Interface('eth0')
>>> i.set_xdp(True)
"""
if not isinstance(state, bool):
raise ValueError("Value out of range")
# https://phabricator.vyos.net/T3448 - there is (yet) no RPI support for XDP
if not os.path.exists('/usr/sbin/xdp_loader'):
return
ifname = self.config['ifname']
cmd = f'xdp_loader -d {ifname} -U --auto-mode'
if state:
# Using 'xdp' will automatically decide if the driver supports
# 'xdpdrv' or only 'xdpgeneric'. A user later sees which driver is
# actually in use by calling 'ip a' or 'show interfaces ethernet'
cmd = f'xdp_loader -d {ifname} --auto-mode -F --progsec xdp_router ' \
f'--filename /usr/share/vyos/xdp/xdp_prog_kern.o && ' \
f'xdp_prog_user -d {ifname}'
return self._cmd(cmd)
def update(self, config):
""" General helper function which works on a dictionary retrived by
get_config_dict(). It's main intention is to consolidate the scattered
interface setup code and provide a single point of entry when workin
on any interface. """
if self.debug:
import pprint
pprint.pprint(config)
# Cache the configuration - it will be reused inside e.g. DHCP handler
# XXX: maybe pass the option via __init__ in the future and rename this
# method to apply()?
self._config = config
# Change interface MAC address - re-set to real hardware address (hw-id)
# if custom mac is removed. Skip if bond member.
if 'is_bond_member' not in config:
mac = config.get('hw_id')
if 'mac' in config:
mac = config.get('mac')
if mac:
self.set_mac(mac)
# Update interface description
self.set_alias(config.get('description', ''))
# Ignore link state changes
value = '2' if 'disable_link_detect' in config else '1'
self.set_link_detect(value)
# Configure assigned interface IP addresses. No longer
# configured addresses will be removed first
new_addr = config.get('address', [])
# always ensure DHCP client is stopped (when not configured explicitly)
if 'dhcp' not in new_addr:
self.del_addr('dhcp')
# always ensure DHCPv6 client is stopped (when not configured as client
# for IPv6 address or prefix delegation)
dhcpv6pd = dict_search('dhcpv6_options.pd', config)
dhcpv6pd = dhcpv6pd != None and len(dhcpv6pd) != 0
if 'dhcpv6' not in new_addr and not dhcpv6pd:
self.del_addr('dhcpv6')
# determine IP addresses which are assigned to the interface and build a
# list of addresses which are no longer in the dict so they can be removed
if 'address_old' in config:
for addr in list_diff(config['address_old'], new_addr):
# we will delete all interface specific IP addresses if they are not
# explicitly configured on the CLI
if is_ipv6_link_local(addr):
eui64 = mac2eui64(self.get_mac(), 'fe80::/64')
if addr != f'{eui64}/64':
self.del_addr(addr)
else:
self.del_addr(addr)
for addr in new_addr:
self.add_addr(addr)
# start DHCPv6 client when only PD was configured
if dhcpv6pd:
self.set_dhcpv6(True)
# There are some items in the configuration which can only be applied
# if this instance is not bound to a bridge. This should be checked
# by the caller but better save then sorry!
if not any(k in ['is_bond_member', 'is_bridge_member'] for k in config):
# Bind interface to given VRF or unbind it if vrf node is not set.
# unbinding will call 'ip link set dev eth0 nomaster' which will
# also drop the interface out of a bridge or bond - thus this is
# checked before
self.set_vrf(config.get('vrf', ''))
# Configure MSS value for IPv4 TCP connections
tmp = dict_search('ip.adjust_mss', config)
value = tmp if (tmp != None) else '0'
self.set_tcp_ipv4_mss(value)
# Configure MSS value for IPv6 TCP connections
tmp = dict_search('ipv6.adjust_mss', config)
value = tmp if (tmp != None) else '0'
self.set_tcp_ipv6_mss(value)
# Configure ARP cache timeout in milliseconds - has default value
tmp = dict_search('ip.arp_cache_timeout', config)
value = tmp if (tmp != None) else '30'
self.set_arp_cache_tmo(value)
# Configure ARP filter configuration
tmp = dict_search('ip.disable_arp_filter', config)
value = '0' if (tmp != None) else '1'
self.set_arp_filter(value)
# Configure ARP accept
tmp = dict_search('ip.enable_arp_accept', config)
value = '1' if (tmp != None) else '0'
self.set_arp_accept(value)
# Configure ARP announce
tmp = dict_search('ip.enable_arp_announce', config)
value = '1' if (tmp != None) else '0'
self.set_arp_announce(value)
# Configure ARP ignore
tmp = dict_search('ip.enable_arp_ignore', config)
value = '1' if (tmp != None) else '0'
self.set_arp_ignore(value)
# Enable proxy-arp on this interface
tmp = dict_search('ip.enable_proxy_arp', config)
value = '1' if (tmp != None) else '0'
self.set_proxy_arp(value)
# Enable private VLAN proxy ARP on this interface
tmp = dict_search('ip.proxy_arp_pvlan', config)
value = '1' if (tmp != None) else '0'
self.set_proxy_arp_pvlan(value)
# IPv4 forwarding
tmp = dict_search('ip.disable_forwarding', config)
value = '0' if (tmp != None) else '1'
self.set_ipv4_forwarding(value)
# IPv4 source-validation
tmp = dict_search('ip.source_validation', config)
value = tmp if (tmp != None) else '0'
self.set_ipv4_source_validation(value)
# IPv6 forwarding
tmp = dict_search('ipv6.disable_forwarding', config)
value = '0' if (tmp != None) else '1'
self.set_ipv6_forwarding(value)
# IPv6 router advertisements
tmp = dict_search('ipv6.address.autoconf', config)
value = '2' if (tmp != None) else '1'
if 'dhcpv6' in new_addr:
value = '2'
self.set_ipv6_accept_ra(value)
# IPv6 address autoconfiguration
tmp = dict_search('ipv6.address.autoconf', config)
value = '1' if (tmp != None) else '0'
self.set_ipv6_autoconf(value)
# IPv6 Duplicate Address Detection (DAD) tries
tmp = dict_search('ipv6.dup_addr_detect_transmits', config)
value = tmp if (tmp != None) else '1'
self.set_ipv6_dad_messages(value)
# MTU - Maximum Transfer Unit
if 'mtu' in config:
self.set_mtu(config.get('mtu'))
# Delete old IPv6 EUI64 addresses before changing MAC
for addr in (dict_search('ipv6.address.eui64_old', config) or []):
self.del_ipv6_eui64_address(addr)
# Manage IPv6 link-local addresses
if dict_search('ipv6.address.no_default_link_local', config) != None:
self.del_ipv6_eui64_address('fe80::/64')
else:
self.add_ipv6_eui64_address('fe80::/64')
# Add IPv6 EUI-based addresses
tmp = dict_search('ipv6.address.eui64', config)
if tmp:
for addr in tmp:
self.add_ipv6_eui64_address(addr)
# re-add ourselves to any bridge we might have fallen out of
if 'is_bridge_member' in config:
bridge_dict = config.get('is_bridge_member')
self.add_to_bridge(bridge_dict)
# eXpress Data Path - highly experimental
self.set_xdp('xdp' in config)
# configure port mirror
self.set_mirror()
# Enable/Disable of an interface must always be done at the end of the
# derived class to make use of the ref-counting set_admin_state()
# function. We will only enable the interface if 'up' was called as
# often as 'down'. This is required by some interface implementations
# as certain parameters can only be changed when the interface is
# in admin-down state. This ensures the link does not flap during
# reconfiguration.
state = 'down' if 'disable' in config else 'up'
self.set_admin_state(state)
# remove no longer required 802.1ad (Q-in-Q VLANs)
ifname = config['ifname']
for vif_s_id in config.get('vif_s_remove', {}):
vif_s_ifname = f'{ifname}.{vif_s_id}'
VLANIf(vif_s_ifname).remove()
# create/update 802.1ad (Q-in-Q VLANs)
for vif_s_id, vif_s_config in config.get('vif_s', {}).items():
tmp = deepcopy(VLANIf.get_config())
tmp['protocol'] = vif_s_config['protocol']
tmp['source_interface'] = ifname
tmp['vlan_id'] = vif_s_id
vif_s_ifname = f'{ifname}.{vif_s_id}'
vif_s_config['ifname'] = vif_s_ifname
# It is not possible to change the VLAN encapsulation protocol
# "on-the-fly". For this "quirk" we need to actively delete and
# re-create the VIF-S interface.
if self.exists(vif_s_ifname):
cur_cfg = get_interface_config(vif_s_ifname)
protocol = dict_search('linkinfo.info_data.protocol', cur_cfg).lower()
if protocol != vif_s_config['protocol']:
VLANIf(vif_s_ifname).remove()
s_vlan = VLANIf(vif_s_ifname, **tmp)
s_vlan.update(vif_s_config)
# remove no longer required client VLAN (vif-c)
for vif_c_id in vif_s_config.get('vif_c_remove', {}):
vif_c_ifname = f'{vif_s_ifname}.{vif_c_id}'
VLANIf(vif_c_ifname).remove()
# create/update client VLAN (vif-c) interface
for vif_c_id, vif_c_config in vif_s_config.get('vif_c', {}).items():
tmp = deepcopy(VLANIf.get_config())
tmp['source_interface'] = vif_s_ifname
tmp['vlan_id'] = vif_c_id
vif_c_ifname = f'{vif_s_ifname}.{vif_c_id}'
vif_c_config['ifname'] = vif_c_ifname
c_vlan = VLANIf(vif_c_ifname, **tmp)
c_vlan.update(vif_c_config)
# remove no longer required 802.1q VLAN interfaces
for vif_id in config.get('vif_remove', {}):
vif_ifname = f'{ifname}.{vif_id}'
VLANIf(vif_ifname).remove()
# create/update 802.1q VLAN interfaces
for vif_id, vif_config in config.get('vif', {}).items():
vif_ifname = f'{ifname}.{vif_id}'
vif_config['ifname'] = vif_ifname
tmp = deepcopy(VLANIf.get_config())
tmp['source_interface'] = ifname
tmp['vlan_id'] = vif_id
# We need to ensure that the string format is consistent, and we need to exclude redundant spaces.
sep = ' '
if 'egress_qos' in vif_config:
# Unwrap strings into arrays
egress_qos_array = vif_config['egress_qos'].split()
# The split array is spliced according to the fixed format
tmp['egress_qos'] = sep.join(egress_qos_array)
if 'ingress_qos' in vif_config:
# Unwrap strings into arrays
ingress_qos_array = vif_config['ingress_qos'].split()
# The split array is spliced according to the fixed format
tmp['ingress_qos'] = sep.join(ingress_qos_array)
# Since setting the QoS control parameters in the later stage will
# not completely delete the old settings,
# we still need to delete the VLAN encapsulation interface in order to
# ensure that the changed settings are effective.
cur_cfg = get_interface_config(vif_ifname)
qos_str = ''
tmp2 = dict_search('linkinfo.info_data.ingress_qos', cur_cfg)
if 'ingress_qos' in tmp and tmp2:
for item in tmp2:
from_key = item['from']
to_key = item['to']
qos_str += f'{from_key}:{to_key} '
if qos_str != tmp['ingress_qos']:
if self.exists(vif_ifname):
VLANIf(vif_ifname).remove()
qos_str = ''
tmp2 = dict_search('linkinfo.info_data.egress_qos', cur_cfg)
if 'egress_qos' in tmp and tmp2:
for item in tmp2:
from_key = item['from']
to_key = item['to']
qos_str += f'{from_key}:{to_key} '
if qos_str != tmp['egress_qos']:
if self.exists(vif_ifname):
VLANIf(vif_ifname).remove()
vlan = VLANIf(vif_ifname, **tmp)
vlan.update(vif_config)
class VLANIf(Interface):
""" Specific class which abstracts 802.1q and 802.1ad (Q-in-Q) VLAN interfaces """
iftype = 'vlan'
def _create(self):
# bail out early if interface already exists
if self.exists(f'{self.ifname}'):
return
cmd = 'ip link add link {source_interface} name {ifname} type vlan id {vlan_id}'
if 'protocol' in self.config:
cmd += ' protocol {protocol}'
if 'ingress_qos' in self.config:
cmd += ' ingress-qos-map {ingress_qos}'
if 'egress_qos' in self.config:
cmd += ' egress-qos-map {egress_qos}'
self._cmd(cmd.format(**self.config))
# interface is always A/D down. It needs to be enabled explicitly
self.set_admin_state('down')
def set_admin_state(self, state):
"""
Set interface administrative state to be 'up' or 'down'
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0.10').set_admin_state('down')
>>> Interface('eth0.10').get_admin_state()
'down'
"""
# A VLAN interface can only be placed in admin up state when
# the lower interface is up, too
lower_interface = glob(f'/sys/class/net/{self.ifname}/lower*/flags')[0]
with open(lower_interface, 'r') as f:
flags = f.read()
# If parent is not up - bail out as we can not bring up the VLAN.
# Flags are defined in kernel source include/uapi/linux/if.h
if not int(flags, 16) & 1:
return None
return super().set_admin_state(state)
def set_mirror(self):
return
|
# Copyright 2013 dotCloud inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .client import Client
import auth
newline
# Copyright 2013 dotCloud inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .client import Client
import auth
|
from corehq import Domain
from corehq.apps.locations.models import SQLLocation
from corehq.apps.products.models import SQLProduct
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn
from corehq.apps.reports.filters.fixtures import AsyncLocationFilter
from corehq.apps.reports.graph_models import PieChart
from custom.ewsghana import StockLevelsReport
from custom.ewsghana.reports import MultiReport, EWSData
from casexml.apps.stock.models import StockTransaction
from custom.ewsghana.utils import calculate_last_period, get_supply_points
from corehq.apps.reports.filters.dates import DatespanFilter
from custom.ilsgateway.tanzania import make_url
from custom.ilsgateway.tanzania.reports.utils import link_format
from django.utils.translation import ugettext as _
class AlertsData(EWSData):
pass
class ReportingRates(EWSData):
show_table = False
show_chart = True
slug = 'reporting_rates'
title = _('Reporting Rates')
@property
def rows(self):
rows = {}
if self.config['location_id']:
supply_points = get_supply_points(self.config['location_id'], self.config['domain'])
last_period_st, last_period_end = calculate_last_period(self.config['enddate'])
reports = StockTransaction.objects.filter(case_id__in=supply_points,
report__date__range=[last_period_st,
last_period_end]
).distinct('case_id').count()
rows = dict(
total=len(supply_points),
reported=reports,
non_reported=len(supply_points)-reports
)
return rows
@property
def charts(self):
data = self.rows
chart_data = []
if data:
reported_percent = float(data['reported']) * 100 / (data['total'] or 1)
non_reported_percent = float(data['non_reported']) * 100 / (data['total'] or 1)
chart_data = [
dict(value=reported_percent,
label=_('Reported'),
description=_("%.2f%% (%d) Reported (last 7 days)" % (reported_percent, data['total']))),
dict(value=non_reported_percent,
label=_('Non-Reported'),
description=_("%.2f%% (%d) Non-Reported (last 7 days)" %
(non_reported_percent, data['total']))),
]
return [PieChart('', '', chart_data)]
class ReportingDetails(EWSData):
show_table = False
show_chart = True
slug = 'reporting_details'
title = _('Reporting Details')
@property
def rows(self):
rows = {}
if self.config['location_id']:
last_period_st, last_period_end = calculate_last_period(self.config['enddate'])
supply_points = get_supply_points(self.config['location_id'], self.config['domain'])
products_count = SQLProduct.objects.filter(domain=self.config['domain'], is_archived=False).count()
complete = 0
incomplete = 0
for sp in supply_points:
st = StockTransaction.objects.filter(case_id=sp,
report__date__range=[last_period_st,
last_period_end]
).distinct('product_id').count()
if products_count == st:
complete += 1
else:
incomplete += 1
rows = dict(
total=complete + incomplete,
complete=complete,
incomplete=incomplete
)
return rows
@property
def charts(self):
data = self.rows
chart_data = []
if data:
complete_percent = float(data['complete']) * 100 / (data['total'] or 1)
incomplete_percent = float(data['incomplete']) * 100 / (data['total'] or 1)
chart_data = [
dict(value=complete_percent,
label=_('Completed'),
description=_("%.2f%% (%d) Complete Reports in last 7 days" %
(complete_percent, data['total']))),
dict(value=incomplete_percent,
label=_('Incompleted'),
description=_("%.2f%% (%d) Incomplete Reports in last 7 days" %
(incomplete_percent, data['total']))),
]
return [PieChart('', '', chart_data)]
class SummaryReportingRates(EWSData):
show_table = True
show_chart = False
slug = 'summary_reporting'
title = _('Summary Reporting Rates')
use_datatables = True
@property
def get_locations(self):
location_types = [
loc_type.name for loc_type in filter(lambda loc_type: loc_type.administrative,
Domain.get_by_name(self.config['domain']).location_types
)
]
return SQLLocation.objects.filter(parent__location_id=self.config['location_id'],
location_type__in=location_types)
@property
def headers(self):
if self.config['location_id']:
return DataTablesHeader(*[
DataTablesColumn(_(self.get_locations[0].location_type.title())),
DataTablesColumn(_('# Sites')),
DataTablesColumn(_('# Reporting')),
DataTablesColumn(_('Reporting Rate'))
])
else:
return []
@property
def rows(self):
rows = []
if self.config['location_id']:
last_period_st, last_period_end = calculate_last_period(self.config['enddate'])
for loc in self.get_locations:
supply_points = get_supply_points(loc.location_id, loc.domain)
sites = len(supply_points)
reported = StockTransaction.objects.filter(case_id__in=supply_points,
report__date__range=[last_period_st,
last_period_end]
).distinct('case_id').count()
reporting_rates = '%.2f%%' % (reported * 100 / (float(sites) or 1.0))
url = make_url(
ReportingRatesReport,
self.config['domain'],
'?location_id=%s&startdate=%s&enddate=%s',
(loc.location_id, self.config['startdate'], self.config['enddate']))
rows.append([link_format(loc.name, url), sites, reported, reporting_rates])
return rows
class NonReporting(EWSData):
show_table = True
show_chart = False
slug = 'non_reporting'
use_datatables = True
@property
def title(self):
ltype = SQLLocation.objects.get(location_id=self.config['location_id']).location_type.lower()
if ltype == 'country':
return _('Non Reporting RMS and THs')
else:
return _('Non Reporting Facilities')
@property
def headers(self):
if self.config['location_id']:
return DataTablesHeader(*[
DataTablesColumn(_('Name')),
DataTablesColumn(_('Last Stock Report Received')),
])
else:
return []
@property
def rows(self):
rows = []
if self.config['location_id']:
supply_points = get_supply_points(self.config['location_id'], self.config['domain'])
last_period_st, last_period_end = calculate_last_period(self.config['enddate'])
reported = StockTransaction.objects.filter(case_id__in=supply_points,
report__date__range=[last_period_st,
last_period_end]
).values_list(*['case_id'], flat=True)
not_reported = SQLLocation.objects.filter(location_type__in=self.location_types,
parent__location_id=self.config['location_id'])\
.exclude(supply_point_id__in=reported)
for loc in not_reported:
url = make_url(
StockLevelsReport,
self.config['domain'],
'?location_id=%s&startdate=%s&enddate=%s',
(loc.location_id, self.config['startdate'], self.config['enddate']))
st = StockTransaction.objects.filter(case_id=loc.supply_point_id).order_by('-report__date')
if st:
date = st[0].report.date
else:
date = _('---')
rows.append([link_format(loc.name, url), date])
return rows
class IncompliteReports(EWSData):
show_table = True
show_chart = False
slug = 'incomplete_reporting'
title = _('Incomplete Reports')
use_datatables = True
@property
def headers(self):
if self.config['location_id']:
return DataTablesHeader(*[
DataTablesColumn(_('Name')),
DataTablesColumn(_('Last Stock Report Received')),
])
else:
return []
@property
def rows(self):
rows = []
if self.config['location_id']:
last_period_st, last_period_end = calculate_last_period(self.config['enddate'])
locations = SQLLocation.objects.filter(parent__location_id=self.config['location_id'],
location_type__in=self.location_types)
products_count = SQLProduct.objects.filter(domain=self.config['domain'], is_archived=False).count()
for loc in locations:
st = StockTransaction.objects.filter(case_id=loc.supply_point_id,
report__date__range=[last_period_st,
last_period_end]
).order_by('-report__date')
st_count = st.distinct('product_id').count()
if products_count != st_count:
if st:
date = st[0].report.date
else:
date = '---'
url = make_url(
StockLevelsReport,
self.config['domain'],
'?location_id=%s&startdate=%s&enddate=%s',
(loc.location_id, self.config['startdate'], self.config['enddate']))
rows.append([link_format(loc.name, url), date])
return rows
class ReportingRatesReport(MultiReport):
name = 'Reporting Page'
title = 'Reporting Page'
slug = 'reporting_page'
fields = [AsyncLocationFilter, DatespanFilter]
split = False
@property
def report_config(self):
return dict(
domain=self.domain,
startdate=self.datespan.startdate_utc,
enddate=self.datespan.enddate_utc,
location_id=self.request.GET.get('location_id'),
)
@property
def data_providers(self):
config = self.report_config
data_providers = [
ReportingRates(config=config),
ReportingDetails(config=config)]
if config['location_id']:
location = SQLLocation.objects.get(location_id=config['location_id'])
if location.location_type.lower() in ['country', 'region']:
data_providers.append(SummaryReportingRates(config=config))
data_providers.extend([
NonReporting(config=config),
IncompliteReports(config=config)])
return data_providers
small fix
from corehq import Domain
from corehq.apps.locations.models import SQLLocation
from corehq.apps.products.models import SQLProduct
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn
from corehq.apps.reports.filters.fixtures import AsyncLocationFilter
from corehq.apps.reports.graph_models import PieChart
from custom.ewsghana import StockLevelsReport
from custom.ewsghana.reports import MultiReport, EWSData
from casexml.apps.stock.models import StockTransaction
from custom.ewsghana.utils import calculate_last_period, get_supply_points
from corehq.apps.reports.filters.dates import DatespanFilter
from custom.ilsgateway.tanzania import make_url
from custom.ilsgateway.tanzania.reports.utils import link_format
from django.utils.translation import ugettext as _
class AlertsData(EWSData):
pass
class ReportingRates(EWSData):
show_table = False
show_chart = True
slug = 'reporting_rates'
title = _('Reporting Rates')
@property
def rows(self):
rows = {}
if self.config['location_id']:
supply_points = get_supply_points(self.config['location_id'], self.config['domain'])
last_period_st, last_period_end = calculate_last_period(self.config['enddate'])
reports = StockTransaction.objects.filter(case_id__in=supply_points,
report__date__range=[last_period_st,
last_period_end]
).distinct('case_id').count()
rows = dict(
total=len(supply_points),
reported=reports,
non_reported=len(supply_points)-reports
)
return rows
@property
def charts(self):
data = self.rows
chart_data = []
if data:
reported_percent = float(data['reported']) * 100 / (data['total'] or 1)
non_reported_percent = float(data['non_reported']) * 100 / (data['total'] or 1)
chart_data = [
dict(value=reported_percent,
label=_('Reported'),
description=_("%.2f%% (%d) Reported (last 7 days)" % (reported_percent, data['total']))),
dict(value=non_reported_percent,
label=_('Non-Reported'),
description=_("%.2f%% (%d) Non-Reported (last 7 days)" %
(non_reported_percent, data['total']))),
]
return [PieChart('', '', chart_data)]
class ReportingDetails(EWSData):
show_table = False
show_chart = True
slug = 'reporting_details'
title = _('Reporting Details')
@property
def rows(self):
rows = {}
if self.config['location_id']:
last_period_st, last_period_end = calculate_last_period(self.config['enddate'])
supply_points = get_supply_points(self.config['location_id'], self.config['domain'])
products_count = SQLProduct.objects.filter(domain=self.config['domain'], is_archived=False).count()
complete = 0
incomplete = 0
for sp in supply_points:
st = StockTransaction.objects.filter(case_id=sp,
report__date__range=[last_period_st,
last_period_end]
).distinct('product_id').count()
if products_count == st:
complete += 1
else:
incomplete += 1
rows = dict(
total=complete + incomplete,
complete=complete,
incomplete=incomplete
)
return rows
@property
def charts(self):
data = self.rows
chart_data = []
if data:
complete_percent = float(data['complete']) * 100 / (data['total'] or 1)
incomplete_percent = float(data['incomplete']) * 100 / (data['total'] or 1)
chart_data = [
dict(value=complete_percent,
label=_('Completed'),
description=_("%.2f%% (%d) Complete Reports in last 7 days" %
(complete_percent, data['total']))),
dict(value=incomplete_percent,
label=_('Incompleted'),
description=_("%.2f%% (%d) Incomplete Reports in last 7 days" %
(incomplete_percent, data['total']))),
]
return [PieChart('', '', chart_data)]
class SummaryReportingRates(EWSData):
show_table = True
show_chart = False
slug = 'summary_reporting'
title = _('Summary Reporting Rates')
use_datatables = True
@property
def get_locations(self):
location_types = [
loc_type.name for loc_type in filter(lambda loc_type: loc_type.administrative,
Domain.get_by_name(self.config['domain']).location_types
)
]
return SQLLocation.objects.filter(parent__location_id=self.config['location_id'],
location_type__in=location_types)
@property
def headers(self):
if self.config['location_id']:
return DataTablesHeader(*[
DataTablesColumn(_(self.get_locations[0].location_type.title())),
DataTablesColumn(_('# Sites')),
DataTablesColumn(_('# Reporting')),
DataTablesColumn(_('Reporting Rate'))
])
else:
return []
@property
def rows(self):
rows = []
if self.config['location_id']:
last_period_st, last_period_end = calculate_last_period(self.config['enddate'])
for loc in self.get_locations:
supply_points = get_supply_points(loc.location_id, loc.domain)
sites = len(supply_points)
reported = StockTransaction.objects.filter(case_id__in=supply_points,
report__date__range=[last_period_st,
last_period_end]
).distinct('case_id').count()
reporting_rates = '%.2f%%' % (reported * 100 / (float(sites) or 1.0))
url = make_url(
ReportingRatesReport,
self.config['domain'],
'?location_id=%s&startdate=%s&enddate=%s',
(loc.location_id, self.config['startdate'], self.config['enddate']))
rows.append([link_format(loc.name, url), sites, reported, reporting_rates])
return rows
class NonReporting(EWSData):
show_table = True
show_chart = False
slug = 'non_reporting'
use_datatables = True
@property
def title(self):
if self.config['location_id']:
ltype = SQLLocation.objects.get(location_id=self.config['location_id']).location_type.lower()
if ltype == 'country':
return _('Non Reporting RMS and THs')
else:
return _('Non Reporting Facilities')
return ''
@property
def headers(self):
if self.config['location_id']:
return DataTablesHeader(*[
DataTablesColumn(_('Name')),
DataTablesColumn(_('Last Stock Report Received')),
])
else:
return []
@property
def rows(self):
rows = []
if self.config['location_id']:
supply_points = get_supply_points(self.config['location_id'], self.config['domain'])
last_period_st, last_period_end = calculate_last_period(self.config['enddate'])
reported = StockTransaction.objects.filter(case_id__in=supply_points,
report__date__range=[last_period_st,
last_period_end]
).values_list(*['case_id'], flat=True)
not_reported = SQLLocation.objects.filter(location_type__in=self.location_types,
parent__location_id=self.config['location_id'])\
.exclude(supply_point_id__in=reported)
for loc in not_reported:
url = make_url(
StockLevelsReport,
self.config['domain'],
'?location_id=%s&startdate=%s&enddate=%s',
(loc.location_id, self.config['startdate'], self.config['enddate']))
st = StockTransaction.objects.filter(case_id=loc.supply_point_id).order_by('-report__date')
if st:
date = st[0].report.date
else:
date = _('---')
rows.append([link_format(loc.name, url), date])
return rows
class IncompliteReports(EWSData):
show_table = True
show_chart = False
slug = 'incomplete_reporting'
title = _('Incomplete Reports')
use_datatables = True
@property
def headers(self):
if self.config['location_id']:
return DataTablesHeader(*[
DataTablesColumn(_('Name')),
DataTablesColumn(_('Last Stock Report Received')),
])
else:
return []
@property
def rows(self):
rows = []
if self.config['location_id']:
last_period_st, last_period_end = calculate_last_period(self.config['enddate'])
locations = SQLLocation.objects.filter(parent__location_id=self.config['location_id'],
location_type__in=self.location_types)
products_count = SQLProduct.objects.filter(domain=self.config['domain'], is_archived=False).count()
for loc in locations:
st = StockTransaction.objects.filter(case_id=loc.supply_point_id,
report__date__range=[last_period_st,
last_period_end]
).order_by('-report__date')
st_count = st.distinct('product_id').count()
if products_count != st_count:
if st:
date = st[0].report.date
else:
date = '---'
url = make_url(
StockLevelsReport,
self.config['domain'],
'?location_id=%s&startdate=%s&enddate=%s',
(loc.location_id, self.config['startdate'], self.config['enddate']))
rows.append([link_format(loc.name, url), date])
return rows
class ReportingRatesReport(MultiReport):
name = 'Reporting Page'
title = 'Reporting Page'
slug = 'reporting_page'
fields = [AsyncLocationFilter, DatespanFilter]
split = False
@property
def report_config(self):
return dict(
domain=self.domain,
startdate=self.datespan.startdate_utc,
enddate=self.datespan.enddate_utc,
location_id=self.request.GET.get('location_id'),
)
@property
def data_providers(self):
config = self.report_config
data_providers = [
ReportingRates(config=config),
ReportingDetails(config=config)]
if config['location_id']:
location = SQLLocation.objects.get(location_id=config['location_id'])
if location.location_type.lower() in ['country', 'region']:
data_providers.append(SummaryReportingRates(config=config))
data_providers.extend([
NonReporting(config=config),
IncompliteReports(config=config)])
return data_providers |
import requests
import json
base_url = 'http://gis.co.hennepin.mn.us/ArcGIS/rest/services/Maps/PROPERTY/MapServer/0'
output_file = 'output.geojson'
metadata = requests.get(base_url, params={'f': 'json'}).json()
bounds = metadata['extent']
fields = metadata['fields']
geom_type = metadata['geometryType']
saved = set()
# Look for a field that to use as the deduping ID
oid_field = next(field['name'] for field in fields if field['type'] == 'esriFieldTypeOID')
if oid_field:
print "Using '%s' as the OID field to dedupe." % oid_field
else:
print "WARNING: Couldn't find the OID field to dedupe on, so you'll have duplicate data probably."
cells_x = 3
cells_y = 3
x_step = (bounds['xmax'] - bounds['xmin']) / cells_x
y_step = (bounds['ymax'] - bounds['ymin']) / cells_y
def xfrange(start, stop=None, step=None):
"""Like range(), but returns list of floats instead
All numbers are generated on-demand using generators
"""
if stop is None:
stop = float(start)
start = 0.0
if step is None:
step = 1.0
cur = float(start)
while cur < stop:
yield cur
cur += step
def esrijson2geojson(geom_type, esrijson):
geojson = {}
if geom_type == 'esriGeometryPolygon':
geojson['type'] = 'Polygon'
elif geom_type == 'esriGeometryPolyline':
geojson['type'] = 'LineString'
elif geom_type == 'esriGeometryPoint':
geojson['type'] = 'Point'
else:
print "I don't know how to convert esrijson of type '%s'." % geom_type
geojson['coordinates'] = esrijson['rings']
return geojson
geojson_doc = {
"type": "FeatureCollection",
"features": []
}
i = 0
for x in xfrange(bounds['xmin'], bounds['xmax'], x_step):
for y in xfrange(bounds['ymin'], bounds['ymax'], y_step):
bbox = (x, y, x + x_step, y + y_step)
geometry = json.dumps({
"rings": [
[
[bbox[0], bbox[1]],
[bbox[0], bbox[3]],
[bbox[2], bbox[3]],
[bbox[2], bbox[1]],
[bbox[0], bbox[1]]
]
]
})
args = {
'geometry': geometry,
'geometryType': 'esriGeometryPolygon',
'inSR': bounds['spatialReference']['wkid'],
'spatialRel': 'esriSpatialRelIntersects',
'returnCountOnly': 'false',
'returnIdsOnly': 'false',
'returnGeometry': 'true',
'outSR': 4326,
'outFields': '*',
'f': 'json'
}
resp = requests.get(base_url + '/query', params=args)
print resp.url
for feature in resp.json()['features']:
attrs = feature['attributes']
oid = attrs.get(oid_field)
if oid in saved:
continue
geom = feature['geometry']
geojson_doc['features'].append({
"type": "Feature",
"properties": attrs,
"geometry": esrijson2geojson(geom_type, geom)
})
saved.add(oid)
i += 1
print "%s/%s cells, %s features." % (i, (cells_x * cells_y), len(saved))
with open(output_file, 'w') as f:
json.dump(geojson_doc, f)
Don't store everything in memory before dumping it.
import requests
import json
base_url = 'http://gisweb.co.aitkin.mn.us/arcgis/rest/services/MapLayers/MapServer/3'
output_file = 'output.geojson'
metadata = requests.get(base_url, params={'f': 'json'}).json()
bounds = metadata['extent']
fields = metadata['fields']
geom_type = metadata['geometryType']
saved = set()
# Look for a field that to use as the deduping ID
oid_field = next(field['name'] for field in fields if field['type'] == 'esriFieldTypeOID')
if oid_field:
print "Using '%s' as the OID field to dedupe." % oid_field
else:
print "WARNING: Couldn't find the OID field to dedupe on, so you'll have duplicate data probably."
cells_x = 5
cells_y = 5
x_step = (bounds['xmax'] - bounds['xmin']) / cells_x
y_step = (bounds['ymax'] - bounds['ymin']) / cells_y
def xfrange(start, stop=None, step=None):
"""Like range(), but returns list of floats instead
All numbers are generated on-demand using generators
"""
if stop is None:
stop = float(start)
start = 0.0
if step is None:
step = 1.0
cur = float(start)
while cur < stop:
yield cur
cur += step
def esrijson2geojson(geom_type, esrijson):
geojson = {}
if geom_type == 'esriGeometryPolygon':
geojson['type'] = 'Polygon'
geojson['coordinates'] = esrijson['rings']
elif geom_type == 'esriGeometryPolyline':
geojson['type'] = 'MultiLineString'
geojson['coordinates'] = esrijson['paths']
elif geom_type == 'esriGeometryPoint':
geojson['type'] = 'Point'
geojson['coordinates'] = [esrijson['x'], esrijson['y']]
else:
print "I don't know how to convert esrijson of type '%s'." % geom_type
return geojson
i = 0
f = open(output_file, 'w')
f.write("""{
"type": "FeatureCollection",
"features": [\n""")
for x in xfrange(bounds['xmin'], bounds['xmax'], x_step):
for y in xfrange(bounds['ymin'], bounds['ymax'], y_step):
bbox = (x, y, x + x_step, y + y_step)
geometry = json.dumps({
"rings": [
[
[bbox[0], bbox[1]],
[bbox[0], bbox[3]],
[bbox[2], bbox[3]],
[bbox[2], bbox[1]],
[bbox[0], bbox[1]]
]
]
})
args = {
'geometry': geometry,
'geometryType': 'esriGeometryPolygon',
'inSR': bounds['spatialReference']['wkid'],
'spatialRel': 'esriSpatialRelIntersects',
'returnCountOnly': 'false',
'returnIdsOnly': 'false',
'returnGeometry': 'true',
'outSR': 4326,
'outFields': '*',
'f': 'json'
}
resp = requests.get(base_url + '/query', params=args)
for feature in resp.json()['features']:
attrs = feature['attributes']
oid = attrs.get(oid_field)
if oid in saved:
continue
geom = feature['geometry']
f.write(json.dumps({
"type": "Feature",
"properties": attrs,
"geometry": esrijson2geojson(geom_type, geom)
}))
f.write(',\n')
saved.add(oid)
i += 1
print "%s/%s cells, %s features." % (i, (cells_x * cells_y), len(saved))
f.write("]\n}\n")
|
# -*- coding: utf-8 -*-
"""
Google Cloud Storage pythonic interface
"""
from __future__ import print_function
import array
import io
import json
import logging
import oauth2client.client # version 1.5.2
import os
import pickle
import re
import requests
import sys
import time
import warnings
import webbrowser
from .utils import read_block
PY2 = sys.version_info.major == 2
logger = logging.getLogger(__name__)
not_secret = {"client_id": "586241054156-is96mugvl2prnj0ib5gsg1l3q9m9jp7p."
"apps.googleusercontent.com",
"client_secret": "_F-W4r2HzuuoPvi6ROeaUB6o"}
tfile = os.path.join(os.path.expanduser("~"), '.gcs_tokens')
ACLs = {"authenticatedread", "bucketownerfullcontrol", "bucketownerread",
"private", "projectprivate", "publicread"}
bACLs = {"authenticatedRead", "private", "projectPrivate", "publicRead",
"publicReadWrite"}
DEFAULT_PROJECT = os.environ.get('GCSFS_DEFAULT_PROJECT', '')
DEBUG = False
if PY2:
FileNotFoundError = IOError
def quote_plus(s):
"""
Convert some URL elements to be HTTP-safe.
Not the same as in urllib, because, for instance, parentheses and commas
are passed through.
Parameters
----------
s: input URL/portion
Returns
-------
corrected URL
"""
s = s.replace('/', '%2F')
s = s.replace(' ', '%20')
return s
def split_path(path):
"""
Normalise GCS path string into bucket and key.
Parameters
----------
path : string
Input path, like `gcs://mybucket/path/to/file`
Examples
--------
>>> split_path("gcs://mybucket/path/to/file")
['mybucket', 'path/to/file']
"""
if path.startswith('gcs://'):
path = path[6:]
if path.startswith('gs://'):
path = path[5:]
if '/' not in path:
return path, ""
else:
return path.split('/', 1)
def validate_response(r, path):
"""
Check the requests object r, raise error if it's not ok.
Parameters
----------
r: requests response object
path: associated URL path, for error messages
"""
if not r.ok:
msg = str(r.content)
if DEBUG:
print(r.url, r.headers, sep='\n')
if "Not Found" in msg:
raise FileNotFoundError(path)
elif "forbidden" in msg:
raise IOError("Forbidden: %s" % path)
elif "invalid" in msg:
raise ValueError("Bad Request: %s" % path)
else:
raise RuntimeError(msg)
class GCSFileSystem(object):
"""
Connect to Google Cloud Storage.
Two modes of authentication are supported:
- if ``token=None``, you will be given a "device code", which you must
enter into a browser where you are logged in with your Google identity.
- you may supply a token generated by the
[gcloud](https://cloud.google.com/sdk/docs/)
utility; this is either a python dictionary, or the name of a file
containing the JSON returned by logging in with the gcloud CLI tool. On
a posix system this may be at
``~/.config/gcloud/application_default_credentials.json``
We maintain a cache of refresh tokens in the file ~/.gcs_tokens, so for any
pair of (project, access), you will not need to log in once your credentials
are verified.
Parameters
----------
project : string
GCS users may only access to contents of one project in a single
instance of GCSFileSystem. This is required in order
to list all the buckets you have access to within a project.
access : one of {'read_only', 'read_write', 'full_control'}
Full control implies read/write as well as modifying metadata,
e.g., access control.
token: None, dict or string
(see description of authentication methods, above)
"""
scopes = {'read_only', 'read_write', 'full_control'}
retries = 10
base = "https://www.googleapis.com/storage/v1/"
_singleton = [None]
default_block_size = 5 * 2**20
def __init__(self, project=DEFAULT_PROJECT, access='full_control',
token=None, block_size=None):
if access not in self.scopes:
raise ValueError('access must be one of {}', self.scopes)
if project is None:
warnings.warn('GCS project not set - cannot list or create buckets')
self.input_token = token
if block_size is not None:
self.default_block_size = block_size
self.project = project
self.access = access
self.dirs = {}
self.connect()
self._singleton[0] = self
@classmethod
def current(cls):
""" Return the most recently created GCSFileSystem
If no GCSFileSystem has been created, then create one
"""
if not cls._singleton[0]:
return GCSFileSystem()
else:
return cls._singleton[0]
@staticmethod
def _parse_gtoken(gt):
if isinstance(gt, str):
t = json.load(open(gt))
else:
t = gt.copy()
typ = t.pop('type')
if typ != "authorized_user":
raise ValueError("Only 'authorized_user' tokens accepted, got: %s"
% typ)
t['grant_type'] = 'refresh_token'
t['timestamp'] = time.time()
t['expires_in'] = 0
return t
@staticmethod
def load_tokens():
try:
with open(tfile, 'rb') as f:
tokens = pickle.load(f)
except Exception:
tokens = {}
GCSFileSystem.tokens = tokens
def connect(self, refresh=False):
"""
Establish session token. A new token will be requested if the current
one is within 100s of expiry.
Parameters
----------
refresh: bool (False)
Force refresh, even if the token is expired.
"""
token = self.input_token
project, access = self.project, self.access
if token is not None:
if 'type' in token or isinstance(token, str):
token = self._parse_gtoken(token)
self.tokens[(project, access)] = token
if (project, access) in self.tokens:
# cached credentials
data = self.tokens[(project, access)]
else:
# no credentials - try to ask google in the browser
scope = "https://www.googleapis.com/auth/devstorage." + access
path = 'https://accounts.google.com/o/oauth2/device/code'
r = requests.post(path,
params={'client_id': not_secret['client_id'],
'scope': scope})
validate_response(r, path)
data = json.loads(r.content.decode())
print('Enter the following code when prompted in the browser:')
print(data['user_code'])
webbrowser.open(data['verification_url'])
for i in range(self.retries):
time.sleep(2)
r = requests.post(
"https://www.googleapis.com/oauth2/v4/token",
params={'client_id': not_secret['client_id'],
'client_secret': not_secret['client_secret'],
'code': data['device_code'],
'grant_type':
"http://oauth.net/grant_type/device/1.0"})
data2 = json.loads(r.content.decode())
if 'error' in data2:
if i == self.retries - 1:
raise RuntimeError("Waited too long for browser"
"authentication.")
continue
data = data2
break
data['timestamp'] = time.time()
data.update(not_secret)
if refresh or time.time() - data['timestamp'] > data['expires_in'] - 100:
# token has expired, or is about to - call refresh
path = "https://www.googleapis.com/oauth2/v4/token"
r = requests.post(
path,
params={'client_id': data['client_id'],
'client_secret': data['client_secret'],
'refresh_token': data['refresh_token'],
'grant_type': "refresh_token"})
validate_response(r, path)
data['timestamp'] = time.time()
data['access_token'] = json.loads(r.content.decode())['access_token']
self.tokens[(project, access)] = data
self.header = {'Authorization': 'Bearer ' + data['access_token']}
self._save_tokens()
@staticmethod
def _save_tokens():
try:
with open(tfile, 'wb') as f:
pickle.dump(GCSFileSystem.tokens, f, 2)
except Exception as e:
warnings.warn('Saving token cache failed: ' + str(e))
def _call(self, method, path, *args, **kwargs):
for k, v in list(kwargs.items()):
# only pass parameters that have values
if v is None:
del kwargs[k]
json = kwargs.pop('json', None)
meth = getattr(requests, method)
if args:
path = path.format(*[quote_plus(p) for p in args])
r = meth(self.base + path, headers=self.header, params=kwargs,
json=json)
try:
out = r.json()
except ValueError:
out = r.content
validate_response(r, path)
return out
def _list_buckets(self):
if '' not in self.dirs:
out = self._call('get', 'b/', project=self.project)
dirs = out.get('items', [])
self.dirs[''] = dirs
return self.dirs['']
def _list_bucket(self, bucket):
if bucket not in self.dirs:
out = self._call('get', 'b/{}/o/', bucket)
dirs = out.get('items', [])
for f in dirs:
f['name'] = '%s/%s' % (bucket, f['name'])
f['size'] = int(f.get('size'), 0)
self.dirs[bucket] = dirs
return self.dirs[bucket]
def mkdir(self, bucket, acl='projectPrivate',
default_acl='bucketOwnerFullControl'):
"""
New bucket
Parameters
----------
bucket: str
bucket name
acl: string, one of bACLs
access for the bucket itself
default_acl: str, one of ACLs
default ACL for objects created in this bucket
"""
self._call('post', 'b/', predefinedAcl=acl, project=self.project,
predefinedDefaultObjectAcl=default_acl,
json={"name": bucket})
self.invalidate_cache(bucket)
self.invalidate_cache('')
def rmdir(self, bucket):
"""Delete an empty bucket"""
self._call('delete', 'b/' + bucket)
if '' in self.dirs:
for v in self.dirs[''][:]:
if v['name'] == bucket:
self.dirs[''].remove(v)
self.invalidate_cache(bucket)
def invalidate_cache(self, bucket=None):
"""
Mark files cache as dirty, so that it is reloaded on next use.
Parameters
----------
bucket: string or None
If None, clear all files cached; if a string, clear the files
corresponding to that bucket.
"""
if bucket in {'/', '', None}:
self.dirs.clear()
else:
self.dirs.pop(bucket, None)
def ls(self, path, detail=False):
if path in ['', '/']:
out = self._list_buckets()
else:
bucket, prefix = split_path(path)
path = '/'.join([bucket, prefix])
files = self._list_bucket(bucket)
seek, l, bit = (path, len(path), '') if path.endswith('/') else (
path+'/', len(path)+1, '/')
out = []
for f in files:
if (f['name'].startswith(seek) and '/' not in f['name'][l:] or
f['name'] == path):
out.append(f)
elif f['name'].startswith(seek) and '/' in f['name'][l:]:
directory = {
'bucket': bucket, 'kind': 'storage#object',
'size': 0, 'storageClass': 'DIRECTORY',
'name': path+bit+f['name'][l:].split('/', 1)[0]+'/'}
if directory not in out:
out.append(directory)
if detail:
return out
else:
return [f['name'] for f in out]
def walk(self, path, detail=False):
bucket, prefix = split_path(path)
if not bucket:
raise ValueError('Cannot walk all of GCS')
path = '/'.join([bucket, prefix])
files = self._list_bucket(bucket)
if path.endswith('/'):
files = [f for f in files if f['name'].startswith(path) or
f['name'] == path]
else:
files = [f for f in files if f['name'].startswith(path+'/') or
f['name'] == path]
if detail:
return files
else:
return [f['name'] for f in files]
def du(self, path, total=False, deep=False):
if deep:
files = self.walk(path, True)
else:
files = [f for f in self.ls(path, True)]
if total:
return sum(f['size'] for f in files)
return {f['name']: f['size'] for f in files}
def glob(self, path):
"""
Find files by glob-matching.
Note that the bucket part of the path must not contain a "*"
"""
path = path.rstrip('/')
bucket, key = split_path(path)
path = '/'.join([bucket, key])
if "*" in bucket:
raise ValueError('Bucket cannot contain a "*"')
if '*' not in path:
path = path.rstrip('/') + '/*'
if '/' in path[:path.index('*')]:
ind = path[:path.index('*')].rindex('/')
root = path[:ind + 1]
else:
root = ''
allfiles = self.walk(root)
pattern = re.compile("^" + path.replace('//', '/')
.rstrip('/')
.replace('*', '[^/]*')
.replace('?', '.') + "$")
out = [f for f in allfiles if re.match(pattern,
f.replace('//', '/').rstrip('/'))]
return out
def exists(self, path):
bucket, key = split_path(path)
try:
if key:
return bool(self.info(path))
else:
return bucket in self.ls('')
except FileNotFoundError:
return False
def info(self, path):
path = '/'.join(split_path(path))
files = self.ls(path, True)
out = [f for f in files if f['name'] == path]
if out:
return out[0]
else:
raise FileNotFoundError(path)
def url(self, path):
return self.info(path)['mediaLink']
def cat(self, path):
""" Simple one-shot get of file data """
details = self.info(path)
return _fetch_range(self.header, details)
def get(self, rpath, lpath, blocksize=5 * 2 ** 20):
with self.open(rpath, 'rb', block_size=blocksize) as f1:
with open(lpath, 'wb') as f2:
while True:
d = f1.read(blocksize)
if not d:
break
f2.write(d)
def put(self, lpath, rpath, blocksize=5 * 2 ** 20, acl=None):
with self.open(rpath, 'wb', block_size=blocksize, acl=acl) as f1:
with open(lpath, 'rb') as f2:
while True:
d = f2.read(blocksize)
if not d:
break
f1.write(d)
def head(self, path, size=1024):
with self.open(path, 'rb') as f:
return f.read(size)
def tail(self, path, size=1024):
if size > self.info(path)['size']:
return self.cat(path)
with self.open(path, 'rb') as f:
f.seek(-size, 2)
return f.read()
def merge(self, path, paths, acl=None):
"""Concatenate objects within a single bucket"""
bucket, key = split_path(path)
source = [{'name': split_path(p)[1]} for p in paths]
self._call('post', 'b/{}/o/{}/compose', bucket, key,
destinationPredefinedAcl=acl,
json={'sourceObjects': source,
"kind": "storage#composeRequest",
'destination': {'name': key, 'bucket': bucket}})
def copy(self, path1, path2, acl=None):
b1, k1 = split_path(path1)
b2, k2 = split_path(path2)
self._call('post', 'b/{}/o/{}/copyTo/b/{}/o/{}', b1, k1, b2, k2,
destinationPredefinedAcl=acl)
def mv(self, path1, path2, acl=None):
self.copy(path1, path2, acl)
self.rm(path1)
def rm(self, path):
bucket, path = split_path(path)
self._call('delete', "b/{}/o/{}", bucket, path)
self.invalidate_cache(bucket)
def open(self, path, mode='rb', block_size=None, acl=None):
if block_size is None:
block_size = self.default_block_size
return GCSFile(self, path, mode, block_size)
def touch(self, path):
with self.open(path, 'wb'):
pass
def read_block(self, fn, offset, length, delimiter=None):
""" Read a block of bytes from a GCS file
Starting at ``offset`` of the file, read ``length`` bytes. If
``delimiter`` is set then we ensure that the read starts and stops at
delimiter boundaries that follow the locations ``offset`` and ``offset
+ length``. If ``offset`` is zero then we start at zero. The
bytestring returned WILL include the end delimiter string.
If offset+length is beyond the eof, reads to eof.
Parameters
----------
fn: string
Path to filename on GCS
offset: int
Byte offset to start read
length: int
Number of bytes to read
delimiter: bytes (optional)
Ensure reading starts and stops at delimiter bytestring
Examples
--------
>>> gcs.read_block('data/file.csv', 0, 13) # doctest: +SKIP
b'Alice, 100\\nBo'
>>> gcs.read_block('data/file.csv', 0, 13, delimiter=b'\\n') # doctest: +SKIP
b'Alice, 100\\nBob, 200\\n'
Use ``length=None`` to read to the end of the file.
>>> gcs.read_block('data/file.csv', 0, None, delimiter=b'\\n') # doctest: +SKIP
b'Alice, 100\\nBob, 200\\nCharlie, 300'
See Also
--------
distributed.utils.read_block
"""
with self.open(fn, 'rb') as f:
size = f.size
if length is None:
length = size
if offset + length > size:
length = size - offset
bytes = read_block(f, offset, length, delimiter)
return bytes
def __getstate__(self):
d = self.__dict__.copy()
del d['header']
logger.debug("Serialize with state: %s", d)
return d
def __setstate__(self, state):
self.__dict__.update(state)
self.connect()
GCSFileSystem.load_tokens()
class GCSFile:
def __init__(self, gcsfs, path, mode='rb', block_size=5 * 2 ** 20,
acl=None):
bucket, key = split_path(path)
self.gcsfs = gcsfs
self.bucket = bucket
self.key = key
self.mode = mode
self.blocksize = block_size
self.cache = b""
self.loc = 0
self.acl = acl
self.end = None
self.start = None
self.closed = False
self.trim = True
if mode not in {'rb', 'wb'}:
raise NotImplementedError('File mode not supported')
if mode == 'rb':
self.details = gcsfs.info(path)
self.size = self.details['size']
else:
if block_size < 2**18:
warnings.warn('Setting block size to minimum value, 2**18')
self.blocksize = 2**18
self.buffer = io.BytesIO()
self.offset = 0
self.forced = False
def info(self):
""" File information about this path """
return self.details
def url(self):
return self.details['mediaLink']
def tell(self):
""" Current file location """
return self.loc
def seek(self, loc, whence=0):
""" Set current file location
Parameters
----------
loc : int
byte location
whence : {0, 1, 2}
from start of file, current location or end of file, resp.
"""
if not self.mode == 'rb':
raise ValueError('Seek only available in read mode')
if whence == 0:
nloc = loc
elif whence == 1:
nloc = self.loc + loc
elif whence == 2:
nloc = self.size + loc
else:
raise ValueError(
"invalid whence (%s, should be 0, 1 or 2)" % whence)
if nloc < 0:
raise ValueError('Seek before start of file')
self.loc = nloc
return self.loc
def readline(self, length=-1):
"""
Read and return a line from the stream.
If length is specified, at most size bytes will be read.
"""
self._fetch(self.loc, self.loc + 1)
while True:
found = self.cache[self.loc - self.start:].find(b'\n') + 1
if 0 < length < found:
return self.read(length)
if found:
return self.read(found)
if self.end > self.size:
return self.read(length)
self._fetch(self.start, self.end + self.blocksize)
def __next__(self):
data = self.readline()
if data:
return data
else:
raise StopIteration
next = __next__
def __iter__(self):
return self
def readlines(self):
""" Return all lines in a file as a list """
return list(self)
def write(self, data):
"""
Write data to buffer.
Buffer only sent to GCS on flush() or if buffer is greater than
or equal to blocksize.
Parameters
----------
data : bytes
Set of bytes to be written.
"""
if self.mode not in {'wb', 'ab'}:
raise ValueError('File not in write mode')
if self.closed:
raise ValueError('I/O operation on closed file.')
out = self.buffer.write(ensure_writable(data))
self.loc += out
if self.buffer.tell() >= self.blocksize:
self.flush()
return out
def flush(self, force=False):
"""
Write buffered data to GCS.
Uploads the current buffer, if it is larger than the block-size, or if
the file is being closed.
Parameters
----------
force : bool
When closing, write the last block even if it is smaller than
blocks are allowed to be.
"""
if self.mode not in {'wb', 'ab'}:
raise ValueError('Flush on a file not in write mode')
if self.closed:
raise ValueError('Flush on closed file')
if self.buffer.tell() == 0 and not force:
# no data in the buffer to write
return
if force and self.forced:
raise ValueError("Force flush cannot be called more than once")
if force:
self.forced = True
if force and not self.offset and self.buffer.tell() <= 5 * 2**20:
self._simple_upload()
return
if not self.offset:
self._initiate_upload()
self._upload_chunk(final=force)
def _upload_chunk(self, final=False):
self.buffer.seek(0)
data = self.buffer.read()
head = self.gcsfs.header.copy()
l = len(data)
if final:
if l:
head['Content-Range'] = 'bytes %i-%i/%i' % (
self.offset, self.offset + l - 1, self.offset + l)
else:
# closing when buffer is empty
head['Content-Range'] = 'bytes */%i' % self.offset
data = None
else:
head['Content-Range'] = 'bytes %i-%i/*' % (
self.offset, self.offset + l - 1)
head.update({'Content-Type': 'application/octet-stream',
'Content-Length': str(l)})
r = requests.post(self.location, params={'uploadType': 'resumable'},
headers=head, data=data)
validate_response(r, self.location)
if 'Range' in r.headers:
shortfall = (self.offset + l - 1) - int(
r.headers['Range'].split('-')[1])
if shortfall:
self.buffer = io.BytesIO(data[-shortfall:])
self.buffer.seek(shortfall)
else:
self.buffer = io.BytesIO()
import pdb
# pdb.set_trace()
self.offset += l - shortfall
else:
self.buffer = io.BytesIO()
self.offset += l
def _initiate_upload(self):
r = requests.post('https://www.googleapis.com/upload/storage/v1/b/%s/o'
% quote_plus(self.bucket),
params={'uploadType': 'resumable'},
headers=self.gcsfs.header, json={'name': self.key})
self.location = r.headers['Location']
def _simple_upload(self):
"""One-shot upload, less than 5MB"""
head = self.gcsfs.header.copy()
self.buffer.seek(0)
data = self.buffer.read()
path = ('https://www.googleapis.com/upload/storage/v1/b/%s/o'
% quote_plus(self.bucket))
r = requests.post(path,
params={'uploadType': 'media', 'name': self.key},
headers=head, data=data)
validate_response(r, path)
def _fetch(self, start, end):
# force read to 5MB boundaries
start = start // (5 * 2**20) * 5 * 2**20
end = (end // (5 * 2 ** 20) + 1) * 5 * 2 ** 20
if self.start is None and self.end is None:
# First read
self.start = start
self.end = end + self.blocksize
self.cache = _fetch_range(self.gcsfs.header, self.details,
start, self.end)
if start < self.start:
new = _fetch_range(self.gcsfs.header, self.details,
start, self.start)
self.start = start
self.cache = new + self.cache
if end > self.end:
if self.end > self.size:
return
new = _fetch_range(self.gcsfs.header, self.details,
self.end, end + self.blocksize)
self.end = end + self.blocksize
self.cache = self.cache + new
def read(self, length=-1):
"""
Return data from cache, or fetch pieces as necessary
Parameters
----------
length : int (-1)
Number of bytes to read; if <0, all remaining bytes.
"""
if self.mode != 'rb':
raise ValueError('File not in read mode')
if length < 0:
length = self.size
if self.closed:
raise ValueError('I/O operation on closed file.')
self._fetch(self.loc, self.loc + length)
out = self.cache[self.loc - self.start:
self.loc - self.start + length]
self.loc += len(out)
if self.trim:
num = (self.loc - self.start) // self.blocksize - 1
if num > 0:
self.start += self.blocksize * num
self.cache = self.cache[self.blocksize * num:]
return out
def close(self):
""" Close file """
if self.closed:
return
if self.mode == 'rb':
self.cache = None
else:
self.flush(force=True)
self.gcsfs.invalidate_cache(self.bucket)
self.closed = True
def readable(self):
"""Return whether the GCSFile was opened for reading"""
return self.mode == 'rb'
def seekable(self):
"""Return whether the GCSFile is seekable (only in read mode)"""
return self.readable()
def writable(self):
"""Return whether the GCSFile was opened for writing"""
return self.mode in {'wb', 'ab'}
def __del__(self):
self.close()
def __str__(self):
return "<GCSFile %s/%s>" % (self.bucket, self.key)
__repr__ = __str__
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def _fetch_range(head, obj_dict, start=None, end=None):
""" Get data from GCS
head : dict
Contains authorization header
obj_dict : an entry from ls() or info()
start, end : None or integers
if not both None, fetch only given range
"""
if DEBUG:
print('Fetch: ', start, end)
logger.debug("Fetch: {}/{}, {}-{}", obj_dict['name'], start, end)
if start is not None or end is not None:
start = start or 0
end = end or 0
head = head.copy()
head['Range'] = 'bytes=%i-%i' % (start, end - 1)
back = requests.get(obj_dict['mediaLink'], headers=head)
data = back.content
if data == b'Request range not satisfiable':
return b''
return data
def put_object(credentials, bucket, name, data):
""" Simple put, up to 5MB of data
credentials : from auth()
bucket : string
name : object name
data : binary
"""
out = requests.post('https://www.googleapis.com/upload/storage/'
'v1/b/%s/o?uploadType=media&name=%s' % (
quote_plus(bucket), quote_plus(name)),
headers={'Authorization': 'Bearer ' +
credentials.access_token,
'Content-Type': 'application/octet-stream',
'Content-Length': len(data)}, data=data)
assert out.status_code == 200
def ensure_writable(b):
if PY2 and isinstance(b, array.array):
return b.tostring()
return b
Enable cloud token auth
# -*- coding: utf-8 -*-
"""
Google Cloud Storage pythonic interface
"""
from __future__ import print_function
import array
import io
import json
import logging
import oauth2client.client # version 1.5.2
import os
import pickle
import re
import requests
import sys
import time
import warnings
import webbrowser
from .utils import read_block
PY2 = sys.version_info.major == 2
logger = logging.getLogger(__name__)
not_secret = {"client_id": "586241054156-is96mugvl2prnj0ib5gsg1l3q9m9jp7p."
"apps.googleusercontent.com",
"client_secret": "_F-W4r2HzuuoPvi6ROeaUB6o"}
tfile = os.path.join(os.path.expanduser("~"), '.gcs_tokens')
ACLs = {"authenticatedread", "bucketownerfullcontrol", "bucketownerread",
"private", "projectprivate", "publicread"}
bACLs = {"authenticatedRead", "private", "projectPrivate", "publicRead",
"publicReadWrite"}
DEFAULT_PROJECT = os.environ.get('GCSFS_DEFAULT_PROJECT', '')
DEBUG = False
if PY2:
FileNotFoundError = IOError
def quote_plus(s):
"""
Convert some URL elements to be HTTP-safe.
Not the same as in urllib, because, for instance, parentheses and commas
are passed through.
Parameters
----------
s: input URL/portion
Returns
-------
corrected URL
"""
s = s.replace('/', '%2F')
s = s.replace(' ', '%20')
return s
def split_path(path):
"""
Normalise GCS path string into bucket and key.
Parameters
----------
path : string
Input path, like `gcs://mybucket/path/to/file`
Examples
--------
>>> split_path("gcs://mybucket/path/to/file")
['mybucket', 'path/to/file']
"""
if path.startswith('gcs://'):
path = path[6:]
if path.startswith('gs://'):
path = path[5:]
if '/' not in path:
return path, ""
else:
return path.split('/', 1)
def validate_response(r, path):
"""
Check the requests object r, raise error if it's not ok.
Parameters
----------
r: requests response object
path: associated URL path, for error messages
"""
if not r.ok:
msg = str(r.content)
if DEBUG:
print(r.url, r.headers, sep='\n')
if "Not Found" in msg:
raise FileNotFoundError(path)
elif "forbidden" in msg:
raise IOError("Forbidden: %s" % path)
elif "invalid" in msg:
raise ValueError("Bad Request: %s" % path)
else:
raise RuntimeError(msg)
class GCSFileSystem(object):
"""
Connect to Google Cloud Storage.
Two modes of authentication are supported:
- if ``token=None``, you will be given a "device code", which you must
enter into a browser where you are logged in with your Google identity.
- if ``token='cloud'``, we assume we are running within google compute
or google container engine, and query the internal metadata directly for
a token.
- you may supply a token generated by the
[gcloud](https://cloud.google.com/sdk/docs/)
utility; this is either a python dictionary, or the name of a file
containing the JSON returned by logging in with the gcloud CLI tool. On
a posix system this may be at
``~/.config/gcloud/application_default_credentials.json``
We maintain a cache of refresh tokens in the file ~/.gcs_tokens, so for any
pair of (project, access), you will not need to log in once your credentials
are verified.
Parameters
----------
project : string
GCS users may only access to contents of one project in a single
instance of GCSFileSystem. This is required in order
to list all the buckets you have access to within a project.
access : one of {'read_only', 'read_write', 'full_control'}
Full control implies read/write as well as modifying metadata,
e.g., access control.
token: None, dict or string
(see description of authentication methods, above)
"""
scopes = {'read_only', 'read_write', 'full_control'}
retries = 10
base = "https://www.googleapis.com/storage/v1/"
_singleton = [None]
default_block_size = 5 * 2**20
def __init__(self, project=DEFAULT_PROJECT, access='full_control',
token=None, block_size=None):
if access not in self.scopes:
raise ValueError('access must be one of {}', self.scopes)
if project is None:
warnings.warn('GCS project not set - cannot list or create buckets')
self.input_token = token
if block_size is not None:
self.default_block_size = block_size
self.project = project
self.access = access
self.dirs = {}
self.connect()
self._singleton[0] = self
@classmethod
def current(cls):
""" Return the most recently created GCSFileSystem
If no GCSFileSystem has been created, then create one
"""
if not cls._singleton[0]:
return GCSFileSystem()
else:
return cls._singleton[0]
@staticmethod
def _parse_gtoken(gt):
if isinstance(gt, str):
t = json.load(open(gt))
else:
t = gt.copy()
typ = t.pop('type')
if typ != "authorized_user":
raise ValueError("Only 'authorized_user' tokens accepted, got: %s"
% typ)
t['grant_type'] = 'refresh_token'
t['timestamp'] = time.time()
t['expires_in'] = 0
return t
@staticmethod
def load_tokens():
try:
with open(tfile, 'rb') as f:
tokens = pickle.load(f)
except Exception:
tokens = {}
GCSFileSystem.tokens = tokens
def connect(self, refresh=False):
"""
Establish session token. A new token will be requested if the current
one is within 100s of expiry.
Parameters
----------
refresh: bool (False)
Force refresh, even if the token is expired.
"""
token = self.input_token
project, access = self.project, self.access
if token is not None:
if 'type' in token or isinstance(token, str):
token = self._parse_gtoken(token)
self.tokens[(project, access)] = token
if (project, access) in self.tokens:
# cached credentials
data = self.tokens[(project, access)]
elif token == 'cloud':
data = {'timestamp': time.time() - 3600, 'expires_in': 1,
'type': 'cloud'}
else:
# no credentials - try to ask google in the browser
scope = "https://www.googleapis.com/auth/devstorage." + access
path = 'https://accounts.google.com/o/oauth2/device/code'
r = requests.post(path,
params={'client_id': not_secret['client_id'],
'scope': scope})
validate_response(r, path)
data = json.loads(r.content.decode())
print('Enter the following code when prompted in the browser:')
print(data['user_code'])
webbrowser.open(data['verification_url'])
for i in range(self.retries):
time.sleep(2)
r = requests.post(
"https://www.googleapis.com/oauth2/v4/token",
params={'client_id': not_secret['client_id'],
'client_secret': not_secret['client_secret'],
'code': data['device_code'],
'grant_type':
"http://oauth.net/grant_type/device/1.0"})
data2 = json.loads(r.content.decode())
if 'error' in data2:
if i == self.retries - 1:
raise RuntimeError("Waited too long for browser"
"authentication.")
continue
data = data2
break
data['timestamp'] = time.time()
data.update(not_secret)
if refresh or time.time() - data['timestamp'] > data['expires_in'] - 100:
# token has expired, or is about to - call refresh
if data.get('type', None) == 'cloud':
r = requests.get(
'http://metadata.google.internal/computeMetadata/v1/'
'instance/service-accounts/default/token',
headers={'Metadata-Flavor': 'Google'})
data = r.json()
data['timestamp'] = time.time()
data['type'] = 'cloud'
else:
path = "https://www.googleapis.com/oauth2/v4/token"
r = requests.post(
path,
params={'client_id': data['client_id'],
'client_secret': data['client_secret'],
'refresh_token': data['refresh_token'],
'grant_type': "refresh_token"})
validate_response(r, path)
data['timestamp'] = time.time()
data['access_token'] = r.json()['access_token']
self.tokens[(project, access)] = data
self.header = {'Authorization': 'Bearer ' + data['access_token']}
self._save_tokens()
@staticmethod
def _save_tokens():
try:
with open(tfile, 'wb') as f:
pickle.dump(GCSFileSystem.tokens, f, 2)
except Exception as e:
warnings.warn('Saving token cache failed: ' + str(e))
def _call(self, method, path, *args, **kwargs):
for k, v in list(kwargs.items()):
# only pass parameters that have values
if v is None:
del kwargs[k]
json = kwargs.pop('json', None)
meth = getattr(requests, method)
if args:
path = path.format(*[quote_plus(p) for p in args])
r = meth(self.base + path, headers=self.header, params=kwargs,
json=json)
try:
out = r.json()
except ValueError:
out = r.content
validate_response(r, path)
return out
def _list_buckets(self):
if '' not in self.dirs:
out = self._call('get', 'b/', project=self.project)
dirs = out.get('items', [])
self.dirs[''] = dirs
return self.dirs['']
def _list_bucket(self, bucket):
if bucket not in self.dirs:
out = self._call('get', 'b/{}/o/', bucket)
dirs = out.get('items', [])
for f in dirs:
f['name'] = '%s/%s' % (bucket, f['name'])
f['size'] = int(f.get('size'), 0)
self.dirs[bucket] = dirs
return self.dirs[bucket]
def mkdir(self, bucket, acl='projectPrivate',
default_acl='bucketOwnerFullControl'):
"""
New bucket
Parameters
----------
bucket: str
bucket name
acl: string, one of bACLs
access for the bucket itself
default_acl: str, one of ACLs
default ACL for objects created in this bucket
"""
self._call('post', 'b/', predefinedAcl=acl, project=self.project,
predefinedDefaultObjectAcl=default_acl,
json={"name": bucket})
self.invalidate_cache(bucket)
self.invalidate_cache('')
def rmdir(self, bucket):
"""Delete an empty bucket"""
self._call('delete', 'b/' + bucket)
if '' in self.dirs:
for v in self.dirs[''][:]:
if v['name'] == bucket:
self.dirs[''].remove(v)
self.invalidate_cache(bucket)
def invalidate_cache(self, bucket=None):
"""
Mark files cache as dirty, so that it is reloaded on next use.
Parameters
----------
bucket: string or None
If None, clear all files cached; if a string, clear the files
corresponding to that bucket.
"""
if bucket in {'/', '', None}:
self.dirs.clear()
else:
self.dirs.pop(bucket, None)
def ls(self, path, detail=False):
if path in ['', '/']:
out = self._list_buckets()
else:
bucket, prefix = split_path(path)
path = '/'.join([bucket, prefix])
files = self._list_bucket(bucket)
seek, l, bit = (path, len(path), '') if path.endswith('/') else (
path+'/', len(path)+1, '/')
out = []
for f in files:
if (f['name'].startswith(seek) and '/' not in f['name'][l:] or
f['name'] == path):
out.append(f)
elif f['name'].startswith(seek) and '/' in f['name'][l:]:
directory = {
'bucket': bucket, 'kind': 'storage#object',
'size': 0, 'storageClass': 'DIRECTORY',
'name': path+bit+f['name'][l:].split('/', 1)[0]+'/'}
if directory not in out:
out.append(directory)
if detail:
return out
else:
return [f['name'] for f in out]
def walk(self, path, detail=False):
bucket, prefix = split_path(path)
if not bucket:
raise ValueError('Cannot walk all of GCS')
path = '/'.join([bucket, prefix])
files = self._list_bucket(bucket)
if path.endswith('/'):
files = [f for f in files if f['name'].startswith(path) or
f['name'] == path]
else:
files = [f for f in files if f['name'].startswith(path+'/') or
f['name'] == path]
if detail:
return files
else:
return [f['name'] for f in files]
def du(self, path, total=False, deep=False):
if deep:
files = self.walk(path, True)
else:
files = [f for f in self.ls(path, True)]
if total:
return sum(f['size'] for f in files)
return {f['name']: f['size'] for f in files}
def glob(self, path):
"""
Find files by glob-matching.
Note that the bucket part of the path must not contain a "*"
"""
path = path.rstrip('/')
bucket, key = split_path(path)
path = '/'.join([bucket, key])
if "*" in bucket:
raise ValueError('Bucket cannot contain a "*"')
if '*' not in path:
path = path.rstrip('/') + '/*'
if '/' in path[:path.index('*')]:
ind = path[:path.index('*')].rindex('/')
root = path[:ind + 1]
else:
root = ''
allfiles = self.walk(root)
pattern = re.compile("^" + path.replace('//', '/')
.rstrip('/')
.replace('*', '[^/]*')
.replace('?', '.') + "$")
out = [f for f in allfiles if re.match(pattern,
f.replace('//', '/').rstrip('/'))]
return out
def exists(self, path):
bucket, key = split_path(path)
try:
if key:
return bool(self.info(path))
else:
return bucket in self.ls('')
except FileNotFoundError:
return False
def info(self, path):
path = '/'.join(split_path(path))
files = self.ls(path, True)
out = [f for f in files if f['name'] == path]
if out:
return out[0]
else:
raise FileNotFoundError(path)
def url(self, path):
return self.info(path)['mediaLink']
def cat(self, path):
""" Simple one-shot get of file data """
details = self.info(path)
return _fetch_range(self.header, details)
def get(self, rpath, lpath, blocksize=5 * 2 ** 20):
with self.open(rpath, 'rb', block_size=blocksize) as f1:
with open(lpath, 'wb') as f2:
while True:
d = f1.read(blocksize)
if not d:
break
f2.write(d)
def put(self, lpath, rpath, blocksize=5 * 2 ** 20, acl=None):
with self.open(rpath, 'wb', block_size=blocksize, acl=acl) as f1:
with open(lpath, 'rb') as f2:
while True:
d = f2.read(blocksize)
if not d:
break
f1.write(d)
def head(self, path, size=1024):
with self.open(path, 'rb') as f:
return f.read(size)
def tail(self, path, size=1024):
if size > self.info(path)['size']:
return self.cat(path)
with self.open(path, 'rb') as f:
f.seek(-size, 2)
return f.read()
def merge(self, path, paths, acl=None):
"""Concatenate objects within a single bucket"""
bucket, key = split_path(path)
source = [{'name': split_path(p)[1]} for p in paths]
self._call('post', 'b/{}/o/{}/compose', bucket, key,
destinationPredefinedAcl=acl,
json={'sourceObjects': source,
"kind": "storage#composeRequest",
'destination': {'name': key, 'bucket': bucket}})
def copy(self, path1, path2, acl=None):
b1, k1 = split_path(path1)
b2, k2 = split_path(path2)
self._call('post', 'b/{}/o/{}/copyTo/b/{}/o/{}', b1, k1, b2, k2,
destinationPredefinedAcl=acl)
def mv(self, path1, path2, acl=None):
self.copy(path1, path2, acl)
self.rm(path1)
def rm(self, path):
bucket, path = split_path(path)
self._call('delete', "b/{}/o/{}", bucket, path)
self.invalidate_cache(bucket)
def open(self, path, mode='rb', block_size=None, acl=None):
if block_size is None:
block_size = self.default_block_size
return GCSFile(self, path, mode, block_size)
def touch(self, path):
with self.open(path, 'wb'):
pass
def read_block(self, fn, offset, length, delimiter=None):
""" Read a block of bytes from a GCS file
Starting at ``offset`` of the file, read ``length`` bytes. If
``delimiter`` is set then we ensure that the read starts and stops at
delimiter boundaries that follow the locations ``offset`` and ``offset
+ length``. If ``offset`` is zero then we start at zero. The
bytestring returned WILL include the end delimiter string.
If offset+length is beyond the eof, reads to eof.
Parameters
----------
fn: string
Path to filename on GCS
offset: int
Byte offset to start read
length: int
Number of bytes to read
delimiter: bytes (optional)
Ensure reading starts and stops at delimiter bytestring
Examples
--------
>>> gcs.read_block('data/file.csv', 0, 13) # doctest: +SKIP
b'Alice, 100\\nBo'
>>> gcs.read_block('data/file.csv', 0, 13, delimiter=b'\\n') # doctest: +SKIP
b'Alice, 100\\nBob, 200\\n'
Use ``length=None`` to read to the end of the file.
>>> gcs.read_block('data/file.csv', 0, None, delimiter=b'\\n') # doctest: +SKIP
b'Alice, 100\\nBob, 200\\nCharlie, 300'
See Also
--------
distributed.utils.read_block
"""
with self.open(fn, 'rb') as f:
size = f.size
if length is None:
length = size
if offset + length > size:
length = size - offset
bytes = read_block(f, offset, length, delimiter)
return bytes
def __getstate__(self):
d = self.__dict__.copy()
del d['header']
logger.debug("Serialize with state: %s", d)
return d
def __setstate__(self, state):
self.__dict__.update(state)
self.connect()
GCSFileSystem.load_tokens()
class GCSFile:
def __init__(self, gcsfs, path, mode='rb', block_size=5 * 2 ** 20,
acl=None):
bucket, key = split_path(path)
self.gcsfs = gcsfs
self.bucket = bucket
self.key = key
self.mode = mode
self.blocksize = block_size
self.cache = b""
self.loc = 0
self.acl = acl
self.end = None
self.start = None
self.closed = False
self.trim = True
if mode not in {'rb', 'wb'}:
raise NotImplementedError('File mode not supported')
if mode == 'rb':
self.details = gcsfs.info(path)
self.size = self.details['size']
else:
if block_size < 2**18:
warnings.warn('Setting block size to minimum value, 2**18')
self.blocksize = 2**18
self.buffer = io.BytesIO()
self.offset = 0
self.forced = False
def info(self):
""" File information about this path """
return self.details
def url(self):
return self.details['mediaLink']
def tell(self):
""" Current file location """
return self.loc
def seek(self, loc, whence=0):
""" Set current file location
Parameters
----------
loc : int
byte location
whence : {0, 1, 2}
from start of file, current location or end of file, resp.
"""
if not self.mode == 'rb':
raise ValueError('Seek only available in read mode')
if whence == 0:
nloc = loc
elif whence == 1:
nloc = self.loc + loc
elif whence == 2:
nloc = self.size + loc
else:
raise ValueError(
"invalid whence (%s, should be 0, 1 or 2)" % whence)
if nloc < 0:
raise ValueError('Seek before start of file')
self.loc = nloc
return self.loc
def readline(self, length=-1):
"""
Read and return a line from the stream.
If length is specified, at most size bytes will be read.
"""
self._fetch(self.loc, self.loc + 1)
while True:
found = self.cache[self.loc - self.start:].find(b'\n') + 1
if 0 < length < found:
return self.read(length)
if found:
return self.read(found)
if self.end > self.size:
return self.read(length)
self._fetch(self.start, self.end + self.blocksize)
def __next__(self):
data = self.readline()
if data:
return data
else:
raise StopIteration
next = __next__
def __iter__(self):
return self
def readlines(self):
""" Return all lines in a file as a list """
return list(self)
def write(self, data):
"""
Write data to buffer.
Buffer only sent to GCS on flush() or if buffer is greater than
or equal to blocksize.
Parameters
----------
data : bytes
Set of bytes to be written.
"""
if self.mode not in {'wb', 'ab'}:
raise ValueError('File not in write mode')
if self.closed:
raise ValueError('I/O operation on closed file.')
out = self.buffer.write(ensure_writable(data))
self.loc += out
if self.buffer.tell() >= self.blocksize:
self.flush()
return out
def flush(self, force=False):
"""
Write buffered data to GCS.
Uploads the current buffer, if it is larger than the block-size, or if
the file is being closed.
Parameters
----------
force : bool
When closing, write the last block even if it is smaller than
blocks are allowed to be.
"""
if self.mode not in {'wb', 'ab'}:
raise ValueError('Flush on a file not in write mode')
if self.closed:
raise ValueError('Flush on closed file')
if self.buffer.tell() == 0 and not force:
# no data in the buffer to write
return
if force and self.forced:
raise ValueError("Force flush cannot be called more than once")
if force:
self.forced = True
if force and not self.offset and self.buffer.tell() <= 5 * 2**20:
self._simple_upload()
return
if not self.offset:
self._initiate_upload()
self._upload_chunk(final=force)
def _upload_chunk(self, final=False):
self.buffer.seek(0)
data = self.buffer.read()
head = self.gcsfs.header.copy()
l = len(data)
if final:
if l:
head['Content-Range'] = 'bytes %i-%i/%i' % (
self.offset, self.offset + l - 1, self.offset + l)
else:
# closing when buffer is empty
head['Content-Range'] = 'bytes */%i' % self.offset
data = None
else:
head['Content-Range'] = 'bytes %i-%i/*' % (
self.offset, self.offset + l - 1)
head.update({'Content-Type': 'application/octet-stream',
'Content-Length': str(l)})
r = requests.post(self.location, params={'uploadType': 'resumable'},
headers=head, data=data)
validate_response(r, self.location)
if 'Range' in r.headers:
shortfall = (self.offset + l - 1) - int(
r.headers['Range'].split('-')[1])
if shortfall:
self.buffer = io.BytesIO(data[-shortfall:])
self.buffer.seek(shortfall)
else:
self.buffer = io.BytesIO()
import pdb
# pdb.set_trace()
self.offset += l - shortfall
else:
self.buffer = io.BytesIO()
self.offset += l
def _initiate_upload(self):
r = requests.post('https://www.googleapis.com/upload/storage/v1/b/%s/o'
% quote_plus(self.bucket),
params={'uploadType': 'resumable'},
headers=self.gcsfs.header, json={'name': self.key})
self.location = r.headers['Location']
def _simple_upload(self):
"""One-shot upload, less than 5MB"""
head = self.gcsfs.header.copy()
self.buffer.seek(0)
data = self.buffer.read()
path = ('https://www.googleapis.com/upload/storage/v1/b/%s/o'
% quote_plus(self.bucket))
r = requests.post(path,
params={'uploadType': 'media', 'name': self.key},
headers=head, data=data)
validate_response(r, path)
def _fetch(self, start, end):
# force read to 5MB boundaries
start = start // (5 * 2**20) * 5 * 2**20
end = (end // (5 * 2 ** 20) + 1) * 5 * 2 ** 20
if self.start is None and self.end is None:
# First read
self.start = start
self.end = end + self.blocksize
self.cache = _fetch_range(self.gcsfs.header, self.details,
start, self.end)
if start < self.start:
new = _fetch_range(self.gcsfs.header, self.details,
start, self.start)
self.start = start
self.cache = new + self.cache
if end > self.end:
if self.end > self.size:
return
new = _fetch_range(self.gcsfs.header, self.details,
self.end, end + self.blocksize)
self.end = end + self.blocksize
self.cache = self.cache + new
def read(self, length=-1):
"""
Return data from cache, or fetch pieces as necessary
Parameters
----------
length : int (-1)
Number of bytes to read; if <0, all remaining bytes.
"""
if self.mode != 'rb':
raise ValueError('File not in read mode')
if length < 0:
length = self.size
if self.closed:
raise ValueError('I/O operation on closed file.')
self._fetch(self.loc, self.loc + length)
out = self.cache[self.loc - self.start:
self.loc - self.start + length]
self.loc += len(out)
if self.trim:
num = (self.loc - self.start) // self.blocksize - 1
if num > 0:
self.start += self.blocksize * num
self.cache = self.cache[self.blocksize * num:]
return out
def close(self):
""" Close file """
if self.closed:
return
if self.mode == 'rb':
self.cache = None
else:
self.flush(force=True)
self.gcsfs.invalidate_cache(self.bucket)
self.closed = True
def readable(self):
"""Return whether the GCSFile was opened for reading"""
return self.mode == 'rb'
def seekable(self):
"""Return whether the GCSFile is seekable (only in read mode)"""
return self.readable()
def writable(self):
"""Return whether the GCSFile was opened for writing"""
return self.mode in {'wb', 'ab'}
def __del__(self):
self.close()
def __str__(self):
return "<GCSFile %s/%s>" % (self.bucket, self.key)
__repr__ = __str__
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def _fetch_range(head, obj_dict, start=None, end=None):
""" Get data from GCS
head : dict
Contains authorization header
obj_dict : an entry from ls() or info()
start, end : None or integers
if not both None, fetch only given range
"""
if DEBUG:
print('Fetch: ', start, end)
logger.debug("Fetch: {}/{}, {}-{}", obj_dict['name'], start, end)
if start is not None or end is not None:
start = start or 0
end = end or 0
head = head.copy()
head['Range'] = 'bytes=%i-%i' % (start, end - 1)
back = requests.get(obj_dict['mediaLink'], headers=head)
data = back.content
if data == b'Request range not satisfiable':
return b''
return data
def put_object(credentials, bucket, name, data):
""" Simple put, up to 5MB of data
credentials : from auth()
bucket : string
name : object name
data : binary
"""
out = requests.post('https://www.googleapis.com/upload/storage/'
'v1/b/%s/o?uploadType=media&name=%s' % (
quote_plus(bucket), quote_plus(name)),
headers={'Authorization': 'Bearer ' +
credentials.access_token,
'Content-Type': 'application/octet-stream',
'Content-Length': len(data)}, data=data)
assert out.status_code == 200
def ensure_writable(b):
if PY2 and isinstance(b, array.array):
return b.tostring()
return b
|
"""constants for YesssSMS."""
VERSION = "0.3.1a3"
_UNSUPPORTED_CHARS_STRING = "<strong>Achtung:</strong> Ihre SMS konnte nicht \
versendet werden, da sie folgende ungültige Zeichen enthält:"
_LOGIN_ERROR_STRING = "<strong>Login nicht erfolgreich"
_LOGIN_LOCKED_MESS = "Wegen 3 ungültigen Login-Versuchen ist Ihr Account für \
eine Stunde gesperrt."
_LOGIN_LOCKED_MESS_ENG = "because of 3 failed login-attempts, your account \
has been suspended for one hour"
_UNSUPPORTED_CHARS_STRING = "<strong>Achtung:</strong> Ihre SMS konnte nicht \
versendet werden, da sie folgende ungültige Zeichen enthält:"
_SMS_SENDING_SUCCESSFUL_STRING = ">Ihre SMS wurde erfolgreich verschickt!<"
# <div class='alert alert-warning'>Lieber yesss! Kunde,<br /><br />Ihre Karte \
# wurde deaktiviert, da Sie innerhalb der letzten 12 Monate nicht mehr \
# aufgeladen haben. Bitte laden Sie zur Aktivierung Ihrer SIM-Karte Ihr \
# Guthaben wieder auf, da andernfalls in Kürze die Rufnummer gelöscht wird.\
# <br /><br />Ihr yesss! Team</div>
_ACCOUNT_LOCKED_WARNING = ">Ihre Karte wurde deaktiviert, da Sie innerhalb \
der letzten 12 Monate nicht mehr aufgeladen haben."
HELP = {'to_help': 'Recipient phone number in the format: +436601234567',
'desc': 'Send an SMS via the yesss.at website',
'configfile': "Path of a config-file. Default paths are: \
'/etc/yessssms.conf' and '~/.config/yessssms.conf'. \
An example file is yessssms_sample.conf.",
'login': 'Your phone number (eg. 06501234567), used to login at \
yesss.at',
'password': """Your password, it\'s not recommended to use this. \
Use a config-file instead (see: -c, --configfile).""",
'message': 'Message to be sent by SMS',
'version': 'print version information.',
'test': 'send a test message to yourself',
'print-config-file': 'prints a sample config file, that can be piped \
into eg. ~/.config/yessssms.conf.',
}
CONFIG_FILE_CONTENT = """[YESSS_AT]
YESSS_LOGIN = 06501234567
YESSS_PASSWD = mySecretPassword
# you can define a default recipient (will be overridden by -t option)
# YESSS_TO = +43664123123123
"""
CONFIG_FILE_PATHS = ["/etc/yessssms.conf",
"~/.config/yessssms.conf",
]
version bump
"""constants for YesssSMS."""
VERSION = "0.3.1a4"
_UNSUPPORTED_CHARS_STRING = "<strong>Achtung:</strong> Ihre SMS konnte nicht \
versendet werden, da sie folgende ungültige Zeichen enthält:"
_LOGIN_ERROR_STRING = "<strong>Login nicht erfolgreich"
_LOGIN_LOCKED_MESS = "Wegen 3 ungültigen Login-Versuchen ist Ihr Account für \
eine Stunde gesperrt."
_LOGIN_LOCKED_MESS_ENG = "because of 3 failed login-attempts, your account \
has been suspended for one hour"
_UNSUPPORTED_CHARS_STRING = "<strong>Achtung:</strong> Ihre SMS konnte nicht \
versendet werden, da sie folgende ungültige Zeichen enthält:"
_SMS_SENDING_SUCCESSFUL_STRING = ">Ihre SMS wurde erfolgreich verschickt!<"
# <div class='alert alert-warning'>Lieber yesss! Kunde,<br /><br />Ihre Karte \
# wurde deaktiviert, da Sie innerhalb der letzten 12 Monate nicht mehr \
# aufgeladen haben. Bitte laden Sie zur Aktivierung Ihrer SIM-Karte Ihr \
# Guthaben wieder auf, da andernfalls in Kürze die Rufnummer gelöscht wird.\
# <br /><br />Ihr yesss! Team</div>
_ACCOUNT_LOCKED_WARNING = ">Ihre Karte wurde deaktiviert, da Sie innerhalb \
der letzten 12 Monate nicht mehr aufgeladen haben."
HELP = {'to_help': 'Recipient phone number in the format: +436601234567',
'desc': 'Send an SMS via the yesss.at website',
'configfile': "Path of a config-file. Default paths are: \
'/etc/yessssms.conf' and '~/.config/yessssms.conf'. \
An example file is yessssms_sample.conf.",
'login': 'Your phone number (eg. 06501234567), used to login at \
yesss.at',
'password': """Your password, it\'s not recommended to use this. \
Use a config-file instead (see: -c, --configfile).""",
'message': 'Message to be sent by SMS',
'version': 'print version information.',
'test': 'send a test message to yourself',
'print-config-file': 'prints a sample config file, that can be piped \
into eg. ~/.config/yessssms.conf.',
}
CONFIG_FILE_CONTENT = """[YESSS_AT]
YESSS_LOGIN = 06501234567
YESSS_PASSWD = mySecretPassword
# you can define a default recipient (will be overridden by -t option)
# YESSS_TO = +43664123123123
"""
CONFIG_FILE_PATHS = ["/etc/yessssms.conf",
"~/.config/yessssms.conf",
]
|
# -*- coding: utf-8 -*-
"""
gspread.utils
~~~~~~~~~~~~~
This module contains utility functions.
"""
import re
from functools import wraps
from collections import defaultdict
from itertools import chain
from xml.etree import ElementTree
from .exceptions import IncorrectCellLabel, NoValidUrlKeyFound
MAGIC_NUMBER = 64
CELL_ADDR_RE = re.compile(r'([A-Za-z]+)([1-9]\d*)')
URL_KEY_V1_RE = re.compile(r'key=([^&#]+)')
URL_KEY_V2_RE = re.compile(r'/spreadsheets/d/([a-zA-Z0-9-_]+)')
def finditem(func, seq):
"""Finds and returns first item in iterable for which func(item) is True.
"""
return next((item for item in seq if func(item)))
# http://stackoverflow.com/questions/749796/pretty-printing-xml-in-python
# http://effbot.org/zone/element-lib.htm#prettyprint
def _indent(elem, level=0):
i = "\n" + level * " "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
_indent(elem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def _ds(elem):
"""ElementTree debug function.
Indents and renders xml tree to a string.
"""
_indent(elem)
return ElementTree.tostring(elem)
def numericise(value, empty2zero=False, default_blank=""):
"""Returns a value that depends on the input string:
- Float if input can be converted to Float
- Integer if input can be converted to integer
- Zero if the input string is empty and empty2zero flag is set
- The same input string, empty or not, otherwise.
Executable examples:
>>> numericise("faa")
'faa'
>>> numericise("3")
3
>>> numericise("3.1")
3.1
>>> numericise("", empty2zero=True)
0
>>> numericise("", empty2zero=False)
''
>>> numericise("", default_blank=None)
>>>
>>> numericise("", default_blank="foo")
'foo'
>>> numericise("")
''
>>> numericise(None)
>>>
"""
if value is not None:
try:
value = int(value)
except ValueError:
try:
value = float(value)
except ValueError:
if value == "":
if empty2zero:
value = 0
else:
value = default_blank
return value
def numericise_all(input, empty2zero=False, default_blank=""):
"""Returns a list of numericised values from strings"""
return [numericise(s, empty2zero, default_blank) for s in input]
def rowcol_to_a1(row, col):
"""Translates a row and column cell address to A1 notation.
:param row: The row of the cell to be converted.
Rows start at index 1.
:param col: The column of the cell to be converted.
Columns start at index 1.
:returns: a string containing the cell's coordinates in A1 notation.
Example:
>>> rowcol_to_a1(1, 1)
A1
"""
row = int(row)
col = int(col)
if row < 1 or col < 1:
raise IncorrectCellLabel('(%s, %s)' % (row, col))
div = col
column_label = ''
while div:
(div, mod) = divmod(div, 26)
if mod == 0:
mod = 26
div -= 1
column_label = chr(mod + MAGIC_NUMBER) + column_label
label = '%s%s' % (column_label, row)
return label
def a1_to_rowcol(label):
"""Translates a cell's address in A1 notation to a tuple of integers.
:param label: String with cell label in A1 notation, e.g. 'B1'.
Letter case is ignored.
:returns: a tuple containing `row` and `column` numbers. Both indexed
from 1 (one).
Example:
>>> a1_to_rowcol('A1')
(1, 1)
"""
m = CELL_ADDR_RE.match(label)
if m:
column_label = m.group(1).upper()
row = int(m.group(2))
col = 0
for i, c in enumerate(reversed(column_label)):
col += (ord(c) - MAGIC_NUMBER) * (26 ** i)
else:
raise IncorrectCellLabel(label)
return (row, col)
def cast_to_a1_notation(method):
"""
Decorator function casts wrapped arguments to A1 notation
in range method calls.
"""
@wraps(method)
def wrapper(self, *args, **kwargs):
try:
if len(args):
int(args[0])
# Convert to A1 notation
range_start = rowcol_to_a1(*args[:2])
range_end = rowcol_to_a1(*args[-2:])
range_name = ':'.join((range_start, range_end))
args = (range_name,) + args[4:]
except ValueError:
pass
return method(self, *args, **kwargs)
return wrapper
def extract_id_from_url(url):
m2 = URL_KEY_V2_RE.search(url)
if m2:
return m2.group(1)
m1 = URL_KEY_V1_RE.search(url)
if m1:
return m1.group(1)
raise NoValidUrlKeyFound
def wid_to_gid(wid):
"""Calculate gid of a worksheet from its wid."""
widval = wid[1:] if len(wid) > 3 else wid
xorval = 474 if len(wid) > 3 else 31578
return str(int(widval, 36) ^ xorval)
def rightpad(row, max_len):
pad_len = max_len - len(row)
return row + ([''] * pad_len) if pad_len != 0 else row
def fill_gaps(L, rows=None, cols=None):
max_cols = max(len(row) for row in L) if cols is None else cols
max_rows = len(L) if rows is None else rows
pad_rows = max_rows - len(L)
if pad_rows:
L = L + ([[]] * pad_rows)
return [rightpad(row, max_cols) for row in L]
def cell_list_to_rect(cell_list):
if not cell_list:
return []
rows = defaultdict(lambda: defaultdict(str))
row_offset = cell_list[0].row
col_offset = cell_list[0].col
for cell in cell_list:
row = rows.setdefault(int(cell.row) - row_offset, defaultdict(str))
row[cell.col - col_offset] = cell.value
if not rows:
return []
all_row_keys = chain.from_iterable(row.keys() for row in rows.values())
rect_cols = range(max(all_row_keys) + 1)
rect_rows = range(max(rows.keys()) + 1)
return [[rows[i][j] for j in rect_cols] for i in rect_rows]
if __name__ == '__main__':
import doctest
doctest.testmod()
Remove API v3 (xml) related code from `gspread.utils`
# -*- coding: utf-8 -*-
"""
gspread.utils
~~~~~~~~~~~~~
This module contains utility functions.
"""
import re
from functools import wraps
from collections import defaultdict
from itertools import chain
from .exceptions import IncorrectCellLabel, NoValidUrlKeyFound
MAGIC_NUMBER = 64
CELL_ADDR_RE = re.compile(r'([A-Za-z]+)([1-9]\d*)')
URL_KEY_V1_RE = re.compile(r'key=([^&#]+)')
URL_KEY_V2_RE = re.compile(r'/spreadsheets/d/([a-zA-Z0-9-_]+)')
def finditem(func, seq):
"""Finds and returns first item in iterable for which func(item) is True.
"""
return next((item for item in seq if func(item)))
def numericise(value, empty2zero=False, default_blank=""):
"""Returns a value that depends on the input string:
- Float if input can be converted to Float
- Integer if input can be converted to integer
- Zero if the input string is empty and empty2zero flag is set
- The same input string, empty or not, otherwise.
Executable examples:
>>> numericise("faa")
'faa'
>>> numericise("3")
3
>>> numericise("3.1")
3.1
>>> numericise("", empty2zero=True)
0
>>> numericise("", empty2zero=False)
''
>>> numericise("", default_blank=None)
>>>
>>> numericise("", default_blank="foo")
'foo'
>>> numericise("")
''
>>> numericise(None)
>>>
"""
if value is not None:
try:
value = int(value)
except ValueError:
try:
value = float(value)
except ValueError:
if value == "":
if empty2zero:
value = 0
else:
value = default_blank
return value
def numericise_all(input, empty2zero=False, default_blank=""):
"""Returns a list of numericised values from strings"""
return [numericise(s, empty2zero, default_blank) for s in input]
def rowcol_to_a1(row, col):
"""Translates a row and column cell address to A1 notation.
:param row: The row of the cell to be converted.
Rows start at index 1.
:param col: The column of the cell to be converted.
Columns start at index 1.
:returns: a string containing the cell's coordinates in A1 notation.
Example:
>>> rowcol_to_a1(1, 1)
A1
"""
row = int(row)
col = int(col)
if row < 1 or col < 1:
raise IncorrectCellLabel('(%s, %s)' % (row, col))
div = col
column_label = ''
while div:
(div, mod) = divmod(div, 26)
if mod == 0:
mod = 26
div -= 1
column_label = chr(mod + MAGIC_NUMBER) + column_label
label = '%s%s' % (column_label, row)
return label
def a1_to_rowcol(label):
"""Translates a cell's address in A1 notation to a tuple of integers.
:param label: String with cell label in A1 notation, e.g. 'B1'.
Letter case is ignored.
:returns: a tuple containing `row` and `column` numbers. Both indexed
from 1 (one).
Example:
>>> a1_to_rowcol('A1')
(1, 1)
"""
m = CELL_ADDR_RE.match(label)
if m:
column_label = m.group(1).upper()
row = int(m.group(2))
col = 0
for i, c in enumerate(reversed(column_label)):
col += (ord(c) - MAGIC_NUMBER) * (26 ** i)
else:
raise IncorrectCellLabel(label)
return (row, col)
def cast_to_a1_notation(method):
"""
Decorator function casts wrapped arguments to A1 notation
in range method calls.
"""
@wraps(method)
def wrapper(self, *args, **kwargs):
try:
if len(args):
int(args[0])
# Convert to A1 notation
range_start = rowcol_to_a1(*args[:2])
range_end = rowcol_to_a1(*args[-2:])
range_name = ':'.join((range_start, range_end))
args = (range_name,) + args[4:]
except ValueError:
pass
return method(self, *args, **kwargs)
return wrapper
def extract_id_from_url(url):
m2 = URL_KEY_V2_RE.search(url)
if m2:
return m2.group(1)
m1 = URL_KEY_V1_RE.search(url)
if m1:
return m1.group(1)
raise NoValidUrlKeyFound
def wid_to_gid(wid):
"""Calculate gid of a worksheet from its wid."""
widval = wid[1:] if len(wid) > 3 else wid
xorval = 474 if len(wid) > 3 else 31578
return str(int(widval, 36) ^ xorval)
def rightpad(row, max_len):
pad_len = max_len - len(row)
return row + ([''] * pad_len) if pad_len != 0 else row
def fill_gaps(L, rows=None, cols=None):
max_cols = max(len(row) for row in L) if cols is None else cols
max_rows = len(L) if rows is None else rows
pad_rows = max_rows - len(L)
if pad_rows:
L = L + ([[]] * pad_rows)
return [rightpad(row, max_cols) for row in L]
def cell_list_to_rect(cell_list):
if not cell_list:
return []
rows = defaultdict(lambda: defaultdict(str))
row_offset = cell_list[0].row
col_offset = cell_list[0].col
for cell in cell_list:
row = rows.setdefault(int(cell.row) - row_offset, defaultdict(str))
row[cell.col - col_offset] = cell.value
if not rows:
return []
all_row_keys = chain.from_iterable(row.keys() for row in rows.values())
rect_cols = range(max(all_row_keys) + 1)
rect_rows = range(max(rows.keys()) + 1)
return [[rows[i][j] for j in rect_cols] for i in rect_rows]
if __name__ == '__main__':
import doctest
doctest.testmod()
|
import os
import sys
import json
import string
import argparse as ap
from os import path
from glob import glob
from collections import namedtuple
import jedi
global verbose_, quiet_
def log(msg):
if verbose_:
sys.stderr.write(msg + '\n')
def error(msg):
if not quiet_:
sys.stderr.write(msg + '\n')
def graph(dir_, pretty=False, verbose=False, quiet=False):
global verbose_, quiet_
verbose_, quiet_ = verbose, quiet
os.chdir(dir_) # set working directory to be source directory
source_files = get_source_files('.')
modules_and_files = [(filename_to_module_name(f), f) for f in source_files]
jedi.api.preload_module([mf[0] for mf in modules_and_files])
defs = [d for d in get_defs(source_files)]
refs = [r for r in get_refs(source_files)]
for module, filename in modules_and_files:
defs.append(Def(
Path=module.replace('.', '/'),
Kind='module',
Name=string.split(module, '.')[-1],
File=filename,
DefStart=0,
DefEnd=0,
Exported=True,
Docstring='', # TODO: extract module/package-level doc
Data=None,
))
# De-duplicate definitions (local variables may be defined in more than one
# place). Could do something smarter here, but for now, just take the first
# definition that appears.
unique_defs = []
unique_def_paths = set([])
for def_ in defs:
if not def_.Path in unique_def_paths:
unique_defs.append(def_)
unique_def_paths.add(def_.Path)
# Self-references, dedup
unique_refs = []
unique_ref_keys = set([])
for def_ in unique_defs:
ref = Ref(
DefPath=def_.Path,
DefFile=path.abspath(def_.File),
Def=True,
File=def_.File,
Start=def_.DefStart,
End=def_.DefEnd,
ToBuiltin=False,
)
ref_key = (ref.DefPath, ref.DefFile, ref.File, ref.Start, ref.End)
if ref_key not in unique_ref_keys:
unique_ref_keys.add(ref_key)
unique_refs.append(ref)
for ref in refs:
ref_key = (ref.DefPath, ref.DefFile, ref.File, ref.Start, ref.End)
if ref_key not in unique_ref_keys:
unique_ref_keys.add(ref_key)
unique_refs.append(ref)
json_indent = 2 if pretty else None
print json.dumps({
'Defs': [d.__dict__ for d in unique_defs],
'Refs': [r.__dict__ for r in unique_refs],
}, indent=json_indent)
def get_source_files(dir_):
source_files = []
for dirpath, dirnames, filenames in os.walk(dir_):
rel_dirpath = os.path.relpath(dirpath, dir_)
for filename in filenames:
if os.path.splitext(filename)[1] == '.py':
source_files.append(os.path.join(rel_dirpath, filename))
return source_files
def get_defs(source_files):
for i, source_file in enumerate(source_files):
log('getting defs for source file (%d/%d) %s' % (i, len(source_files), source_file))
try:
source = None
with open(source_file) as sf:
source = unicode(sf.read())
linecoler = LineColToOffConverter(source)
defs = jedi.api.defined_names(source, path=source_file)
for def_ in defs:
for d in get_defs_(def_, source_file, linecoler):
yield d
except Exception as e:
error('failed to get defs for source file %s: %s' % (source_file, str(e)))
def get_defs_(def_, source_file, linecoler):
# ignore import definitions because these just redefine things imported from elsewhere
if def_.type == 'import':
return
def__, err = jedi_def_to_def(def_, source_file, linecoler)
if err is None:
yield def__
else:
error(err)
if def_.type not in ['function', 'class', 'module']:
return
subdefs = def_.defined_names()
for subdef in subdefs:
for d in get_defs_(subdef, source_file, linecoler):
yield d
def jedi_def_to_def(def_, source_file, linecoler):
full_name, err = full_name_of_def(def_)
if err is not None:
return None, err
start_pos = linecoler.convert(def_.start_pos)
return Def(
Path=full_name.replace('.', '/'),
Kind=def_.type,
Name=def_.name,
File=source_file,
DefStart=start_pos,
DefEnd=start_pos+len(def_.name),
Exported=True, # TODO: not all vars are exported
Docstring=def_.docstring(),
Data=None,
), None
def get_refs(source_files):
for i, source_file in enumerate(source_files):
log('getting refs for source file (%d/%d) %s' % (i, len(source_files), source_file))
try:
parserContext = ParserContext(source_file)
linecoler = LineColToOffConverter(parserContext.source)
for name_part, def_ in parserContext.refs():
try:
full_name, err = full_name_of_def(def_, from_ref=True)
if err is not None or full_name == '':
raise Exception(err)
start = linecoler.convert(name_part.start_pos)
end = linecoler.convert(name_part.end_pos)
yield Ref(
DefPath=full_name.replace('.', '/'),
DefFile=def_.module_path,
Def=False,
File=source_file,
Start=start,
End=end,
ToBuiltin=def_.in_builtin_module(),
)
except Exception as e:
error('failed to get ref (%s) in source file %s: %s' % (str((name_part, def_)), source_file, str(e)))
except Exception as e:
error('failed to get refs for source file %s: %s' % (source_file, str(e)))
def full_name_of_def(def_, from_ref=False):
# TODO: This function
# - currently fails for tuple assignments (e.g., 'x, y = 1, 3')
# - doesn't distinguish between m(module).n(submodule) and m(module).n(contained-variable)
if def_.in_builtin_module():
return def_.full_name, None
full_name = ('%s.%s' % (def_.full_name, def_.name)) if def_.type in set(['statement', 'param']) else def_.full_name
module_path = def_.module_path
if from_ref:
module_path, err = abs_module_path_to_relative_module_path(module_path)
if err is not None:
return None, err
supermodule = supermodule_path(module_path).replace('/', '.')
# definition definitions' full_name property contains only the promixal module, so we need to add back the parent
# module components. Luckily, the module_path is relative in this case.
return path.join(supermodule, full_name), None
def supermodule_path(module_path):
if path.basename(module_path) == '__init__.py':
return path.dirname(path.dirname(module_path))
return path.dirname(module_path)
def abs_module_path_to_relative_module_path(module_path):
relpath = path.relpath(module_path) # relative from pwd (which is set in main)
if not relpath.startswith('..'):
return relpath, None
components = module_path.split(os.sep)
pIdx = -1
for i, cmpt in enumerate(components):
if cmpt in ['site-packages', 'dist-packages']:
pIdx = i
break
if pIdx != -1:
return path.join(*components[i+1:]), None
for i, cmpt in enumerate(components):
if cmpt.startswith('python'):
pIdx = i
break
if pIdx != -1:
return path.join(*components[i+1:]), None
return None, ("could not convert absolute module path %s to relative module path" % module_path)
Def = namedtuple('Def', ['Path', 'Kind', 'Name', 'File', 'DefStart', 'DefEnd', 'Exported', 'Docstring', 'Data'])
Ref = namedtuple('Ref', ['DefPath', 'DefFile', 'Def', 'File', 'Start', 'End', "ToBuiltin"])
class ParserContext(object):
def __init__(self, source_file):
self.source_file = source_file
with open(source_file) as sf:
self.source = unicode(sf.read())
self.parser = jedi.parser.Parser(self.source, source_file)
def refs(self):
for r in self.scope_refs(self.parser.module):
yield r
def scope_refs(self, scope):
for import_ in scope.imports:
for r in self.import_refs(import_):
yield r
for stmt in scope.statements:
for r in self.stmt_refs(stmt):
yield r
for ret in scope.returns:
for r in self.stmt_refs(ret):
yield r
for subscope in scope.subscopes:
for r in self.scope_refs(subscope):
yield r
def import_refs(self, import_):
for name in import_.get_all_import_names():
for name_part in name.names:
defs = jedi.api.Script(
path=self.source_file,
line=name_part.start_pos[0],
column=name_part.start_pos[1],
).goto_assignments()
for def_ in defs:
yield (name_part, def_)
def stmt_refs(self, stmt):
if isinstance(stmt, jedi.parser.representation.KeywordStatement):
return
if isinstance(stmt, jedi.parser.representation.Flow):
return
if stmt is None:
return
for token in stmt._token_list:
if not isinstance(token, jedi.parser.representation.Name):
continue
for name_part in token.names:
# Note: we call goto_definitions instead of goto_assignments,
# because otherwise the reference will not follow imports (and
# also generates bogus local definitions whose paths conflict
# with those of actual definitions). This uses a modified
# goto_definitions (resolve_variables_to_types option) that
# *DOES NOT* follow assignment statements to resolve variables
# to types (because that's not what we want).
defs = jedi.api.Script(
path=self.source_file,
line=name_part.start_pos[0],
column=name_part.start_pos[1],
resolve_variables_to_types=False,
).goto_definitions()
# Note(beyang): For now, only yield the first definition.
# Otherwise, multiple references to multiple definitions will
# yield dup references. In the future, might want to do
# something smarter here.
i = 0
for def_ in defs:
if i > 0: break
yield (name_part, def_)
i += 1
def resolve_import_paths(scopes):
for s in scopes.copy():
if isinstance(s, jedi.evaluate.imports.ImportWrapper):
scopes.remove(s)
scopes.update(resolve_import_paths(set(s.follow())))
return scopes
def filename_to_module_name(filename):
if path.basename(filename) == '__init__.py':
return path.dirname(filename).replace('/', '.')
return path.splitext(filename)[0].replace('/', '.')
class LineColToOffConverter(object):
def __init__(self, source):
source_lines = source.split('\n')
cumulative_off = [0]
for line in source_lines:
cumulative_off.append(cumulative_off[-1] + len(line) + 1)
self._cumulative_off = cumulative_off
# Converts from (line, col) position to byte offset. line is 1-indexed, col is 0-indexed
def convert(self, linecol):
line, col = linecol[0] - 1, linecol[1] # convert line to 0-indexed
if line >= len(self._cumulative_off):
return None, 'requested line out of bounds %d > %d' % (line+1, len(self._cumulative_off)-1)
return self._cumulative_off[line] + col
if __name__ == '__main__':
argser = ap.ArgumentParser(description='graph.py is a command that dumps all Python definitions and references found in code rooted at a directory')
argser.add_argument('dir', help='path to root directory of code')
argser.add_argument('--pretty', help='pretty print JSON output', action='store_true', default=False)
argser.add_argument('--verbose', help='verbose', action='store_true', default=False)
argser.add_argument('--quiet', help='quiet', action='store_true', default=False)
args = argser.parse_args()
if args.dir == '':
error('target directory must not be empty')
os.exit(1)
graph(args.dir, pretty=args.pretty, verbose=args.verbose, quiet=args.quiet)
add optional # source files trunc option
import os
import sys
import json
import string
import argparse as ap
from os import path
from glob import glob
from collections import namedtuple
import jedi
global verbose_, quiet_
def log(msg):
if verbose_:
sys.stderr.write(msg + '\n')
def error(msg):
if not quiet_:
sys.stderr.write(msg + '\n')
def graph(dir_, pretty=False, verbose=False, quiet=False, nSourceFilesTrunc=None):
global verbose_, quiet_
verbose_, quiet_ = verbose, quiet
os.chdir(dir_) # set working directory to be source directory
source_files = get_source_files('.')
if nSourceFilesTrunc is not None:
source_files = source_files[:nSourceFilesTrunc]
modules_and_files = [(filename_to_module_name(f), f) for f in source_files]
jedi.api.preload_module([mf[0] for mf in modules_and_files])
defs = [d for d in get_defs(source_files)]
refs = [r for r in get_refs(source_files)]
for module, filename in modules_and_files:
defs.append(Def(
Path=module.replace('.', '/'),
Kind='module',
Name=string.split(module, '.')[-1],
File=filename,
DefStart=0,
DefEnd=0,
Exported=True,
Docstring='', # TODO: extract module/package-level doc
Data=None,
))
# De-duplicate definitions (local variables may be defined in more than one
# place). Could do something smarter here, but for now, just take the first
# definition that appears.
unique_defs = []
unique_def_paths = set([])
for def_ in defs:
if not def_.Path in unique_def_paths:
unique_defs.append(def_)
unique_def_paths.add(def_.Path)
# Self-references, dedup
unique_refs = []
unique_ref_keys = set([])
for def_ in unique_defs:
ref = Ref(
DefPath=def_.Path,
DefFile=path.abspath(def_.File),
Def=True,
File=def_.File,
Start=def_.DefStart,
End=def_.DefEnd,
ToBuiltin=False,
)
ref_key = (ref.DefPath, ref.DefFile, ref.File, ref.Start, ref.End)
if ref_key not in unique_ref_keys:
unique_ref_keys.add(ref_key)
unique_refs.append(ref)
for ref in refs:
ref_key = (ref.DefPath, ref.DefFile, ref.File, ref.Start, ref.End)
if ref_key not in unique_ref_keys:
unique_ref_keys.add(ref_key)
unique_refs.append(ref)
json_indent = 2 if pretty else None
print json.dumps({
'Defs': [d.__dict__ for d in unique_defs],
'Refs': [r.__dict__ for r in unique_refs],
}, indent=json_indent)
def get_source_files(dir_):
source_files = []
for dirpath, dirnames, filenames in os.walk(dir_):
rel_dirpath = os.path.relpath(dirpath, dir_)
for filename in filenames:
if os.path.splitext(filename)[1] == '.py':
source_files.append(os.path.join(rel_dirpath, filename))
return source_files
def get_defs(source_files):
for i, source_file in enumerate(source_files):
log('getting defs for source file (%d/%d) %s' % (i, len(source_files), source_file))
try:
source = None
with open(source_file) as sf:
source = unicode(sf.read())
linecoler = LineColToOffConverter(source)
defs = jedi.api.defined_names(source, path=source_file)
for def_ in defs:
for d in get_defs_(def_, source_file, linecoler):
yield d
except Exception as e:
error('failed to get defs for source file %s: %s' % (source_file, str(e)))
def get_defs_(def_, source_file, linecoler):
# ignore import definitions because these just redefine things imported from elsewhere
if def_.type == 'import':
return
def__, err = jedi_def_to_def(def_, source_file, linecoler)
if err is None:
yield def__
else:
error(err)
if def_.type not in ['function', 'class', 'module']:
return
subdefs = def_.defined_names()
for subdef in subdefs:
for d in get_defs_(subdef, source_file, linecoler):
yield d
def jedi_def_to_def(def_, source_file, linecoler):
full_name, err = full_name_of_def(def_)
if err is not None:
return None, err
start_pos = linecoler.convert(def_.start_pos)
return Def(
Path=full_name.replace('.', '/'),
Kind=def_.type,
Name=def_.name,
File=source_file,
DefStart=start_pos,
DefEnd=start_pos+len(def_.name),
Exported=True, # TODO: not all vars are exported
Docstring=def_.docstring(),
Data=None,
), None
def get_refs(source_files):
for i, source_file in enumerate(source_files):
log('getting refs for source file (%d/%d) %s' % (i, len(source_files), source_file))
try:
parserContext = ParserContext(source_file)
linecoler = LineColToOffConverter(parserContext.source)
for name_part, def_ in parserContext.refs():
try:
full_name, err = full_name_of_def(def_, from_ref=True)
if err is not None or full_name == '':
raise Exception(err)
start = linecoler.convert(name_part.start_pos)
end = linecoler.convert(name_part.end_pos)
yield Ref(
DefPath=full_name.replace('.', '/'),
DefFile=def_.module_path,
Def=False,
File=source_file,
Start=start,
End=end,
ToBuiltin=def_.in_builtin_module(),
)
except Exception as e:
error('failed to get ref (%s) in source file %s: %s' % (str((name_part, def_)), source_file, str(e)))
except Exception as e:
error('failed to get refs for source file %s: %s' % (source_file, str(e)))
def full_name_of_def(def_, from_ref=False):
# TODO: This function
# - currently fails for tuple assignments (e.g., 'x, y = 1, 3')
# - doesn't distinguish between m(module).n(submodule) and m(module).n(contained-variable)
if def_.in_builtin_module():
return def_.full_name, None
full_name = ('%s.%s' % (def_.full_name, def_.name)) if def_.type in set(['statement', 'param']) else def_.full_name
module_path = def_.module_path
if from_ref:
module_path, err = abs_module_path_to_relative_module_path(module_path)
if err is not None:
return None, err
supermodule = supermodule_path(module_path).replace('/', '.')
# definition definitions' full_name property contains only the promixal module, so we need to add back the parent
# module components. Luckily, the module_path is relative in this case.
return path.join(supermodule, full_name), None
def supermodule_path(module_path):
if path.basename(module_path) == '__init__.py':
return path.dirname(path.dirname(module_path))
return path.dirname(module_path)
def abs_module_path_to_relative_module_path(module_path):
relpath = path.relpath(module_path) # relative from pwd (which is set in main)
if not relpath.startswith('..'):
return relpath, None
components = module_path.split(os.sep)
pIdx = -1
for i, cmpt in enumerate(components):
if cmpt in ['site-packages', 'dist-packages']:
pIdx = i
break
if pIdx != -1:
return path.join(*components[i+1:]), None
for i, cmpt in enumerate(components):
if cmpt.startswith('python'):
pIdx = i
break
if pIdx != -1:
return path.join(*components[i+1:]), None
return None, ("could not convert absolute module path %s to relative module path" % module_path)
Def = namedtuple('Def', ['Path', 'Kind', 'Name', 'File', 'DefStart', 'DefEnd', 'Exported', 'Docstring', 'Data'])
Ref = namedtuple('Ref', ['DefPath', 'DefFile', 'Def', 'File', 'Start', 'End', "ToBuiltin"])
class ParserContext(object):
def __init__(self, source_file):
self.source_file = source_file
with open(source_file) as sf:
self.source = unicode(sf.read())
self.parser = jedi.parser.Parser(self.source, source_file)
def refs(self):
for r in self.scope_refs(self.parser.module):
yield r
def scope_refs(self, scope):
for import_ in scope.imports:
for r in self.import_refs(import_):
yield r
for stmt in scope.statements:
for r in self.stmt_refs(stmt):
yield r
for ret in scope.returns:
for r in self.stmt_refs(ret):
yield r
for subscope in scope.subscopes:
for r in self.scope_refs(subscope):
yield r
def import_refs(self, import_):
for name in import_.get_all_import_names():
for name_part in name.names:
defs = jedi.api.Script(
path=self.source_file,
line=name_part.start_pos[0],
column=name_part.start_pos[1],
).goto_assignments()
for def_ in defs:
yield (name_part, def_)
def stmt_refs(self, stmt):
if isinstance(stmt, jedi.parser.representation.KeywordStatement):
return
if isinstance(stmt, jedi.parser.representation.Flow):
return
if stmt is None:
return
for token in stmt._token_list:
if not isinstance(token, jedi.parser.representation.Name):
continue
for name_part in token.names:
# Note: we call goto_definitions instead of goto_assignments,
# because otherwise the reference will not follow imports (and
# also generates bogus local definitions whose paths conflict
# with those of actual definitions). This uses a modified
# goto_definitions (resolve_variables_to_types option) that
# *DOES NOT* follow assignment statements to resolve variables
# to types (because that's not what we want).
defs = jedi.api.Script(
path=self.source_file,
line=name_part.start_pos[0],
column=name_part.start_pos[1],
resolve_variables_to_types=False,
).goto_definitions()
# Note(beyang): For now, only yield the first definition.
# Otherwise, multiple references to multiple definitions will
# yield dup references. In the future, might want to do
# something smarter here.
i = 0
for def_ in defs:
if i > 0: break
yield (name_part, def_)
i += 1
def resolve_import_paths(scopes):
for s in scopes.copy():
if isinstance(s, jedi.evaluate.imports.ImportWrapper):
scopes.remove(s)
scopes.update(resolve_import_paths(set(s.follow())))
return scopes
def filename_to_module_name(filename):
if path.basename(filename) == '__init__.py':
return path.dirname(filename).replace('/', '.')
return path.splitext(filename)[0].replace('/', '.')
class LineColToOffConverter(object):
def __init__(self, source):
source_lines = source.split('\n')
cumulative_off = [0]
for line in source_lines:
cumulative_off.append(cumulative_off[-1] + len(line) + 1)
self._cumulative_off = cumulative_off
# Converts from (line, col) position to byte offset. line is 1-indexed, col is 0-indexed
def convert(self, linecol):
line, col = linecol[0] - 1, linecol[1] # convert line to 0-indexed
if line >= len(self._cumulative_off):
return None, 'requested line out of bounds %d > %d' % (line+1, len(self._cumulative_off)-1)
return self._cumulative_off[line] + col
if __name__ == '__main__':
argser = ap.ArgumentParser(description='graph.py is a command that dumps all Python definitions and references found in code rooted at a directory')
argser.add_argument('dir', help='path to root directory of code')
argser.add_argument('--pretty', help='pretty print JSON output', action='store_true', default=False)
argser.add_argument('--verbose', help='verbose', action='store_true', default=False)
argser.add_argument('--quiet', help='quiet', action='store_true', default=False)
args = argser.parse_args()
if args.dir == '':
error('target directory must not be empty')
os.exit(1)
graph(args.dir, pretty=args.pretty, verbose=args.verbose, quiet=args.quiet)
|
#!/usr/bin/env python
######################################################
#
# howdoi - instant coding answers via the command line
# written by Benjamin Gleitzman (gleitz@mit.edu)
# inspired by Rich Jones (rich@anomos.info)
#
######################################################
from __future__ import print_function
import gc
gc.disable() # noqa: E402
import argparse
import os
import appdirs
import re
from cachelib import FileSystemCache, NullCache
import requests
import sys
import json
from . import __version__
from pygments import highlight
from pygments.lexers import guess_lexer, get_lexer_by_name
from pygments.formatters.terminal import TerminalFormatter
from pygments.util import ClassNotFound
from pyquery import PyQuery as pq
from requests.exceptions import ConnectionError
from requests.exceptions import SSLError
# Handle imports for Python 2 and 3
if sys.version < '3':
import codecs
from urllib import quote as url_quote
from urllib import getproxies
from urlparse import urlparse, parse_qs
# Handling Unicode: http://stackoverflow.com/a/6633040/305414
def u(x):
return codecs.unicode_escape_decode(x)[0]
else:
from urllib.request import getproxies
from urllib.parse import quote as url_quote, urlparse, parse_qs
def u(x):
return x
# rudimentary standardized 3-level log output
def _print_err(x): print("[ERROR] " + x)
_print_ok = print # noqa: E305
def _print_dbg(x): print("[DEBUG] " + x) # noqa: E302
if os.getenv('HOWDOI_DISABLE_SSL'): # Set http instead of https
SCHEME = 'http://'
VERIFY_SSL_CERTIFICATE = False
else:
SCHEME = 'https://'
VERIFY_SSL_CERTIFICATE = True
SUPPORTED_SEARCH_ENGINES = ('google', 'bing', 'duckduckgo')
URL = os.getenv('HOWDOI_URL') or 'stackoverflow.com'
USER_AGENTS = ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:11.0) Gecko/20100101 Firefox/11.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:22.0) Gecko/20100 101 Firefox/22.0',
'Mozilla/5.0 (Windows NT 6.1; rv:11.0) Gecko/20100101 Firefox/11.0',
('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_4) AppleWebKit/536.5 (KHTML, like Gecko) '
'Chrome/19.0.1084.46 Safari/536.5'),
('Mozilla/5.0 (Windows; Windows NT 6.1) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.46'
'Safari/536.5'), )
SEARCH_URLS = {
'bing': SCHEME + 'www.bing.com/search?q=site:{0}%20{1}&hl=en',
'google': SCHEME + 'www.google.com/search?q=site:{0}%20{1}&hl=en',
'duckduckgo': SCHEME + 'duckduckgo.com/?q=site:{0}%20{1}&t=hj&ia=web'
}
BLOCK_INDICATORS = (
'form id="captcha-form"',
'This page appears when Google automatically detects requests coming from your computer '
'network which appear to be in violation of the <a href="//www.google.com/policies/terms/">Terms of Service'
)
BLOCKED_QUESTION_FRAGMENTS = (
'webcache.googleusercontent.com',
)
STAR_HEADER = u('\u2605')
ANSWER_HEADER = u('{2} Answer from {0} {2}\n{1}')
NO_ANSWER_MSG = '< no answer given >'
CACHE_EMPTY_VAL = "NULL"
CACHE_DIR = appdirs.user_cache_dir('howdoi')
CACHE_ENTRY_MAX = 128
SUPPORTED_HELP_QUERIES = ['use howdoi', 'howdoi', 'run howdoi',
'do howdoi', 'howdoi howdoi', 'howdoi use howdoi']
if os.getenv('HOWDOI_DISABLE_CACHE'):
cache = NullCache() # works like an always empty cache
else:
cache = FileSystemCache(CACHE_DIR, CACHE_ENTRY_MAX, default_timeout=0)
howdoi_session = requests.session()
class BlockError(RuntimeError):
pass
def _random_int(width):
bres = os.urandom(width)
if sys.version < '3':
ires = int(bres.encode('hex'), 16)
else:
ires = int.from_bytes(bres, 'little')
return ires
def _random_choice(seq):
return seq[_random_int(1) % len(seq)]
def get_proxies():
proxies = getproxies()
filtered_proxies = {}
for key, value in proxies.items():
if key.startswith('http'):
if not value.startswith('http'):
filtered_proxies[key] = 'http://%s' % value
else:
filtered_proxies[key] = value
return filtered_proxies
def _get_result(url):
try:
return howdoi_session.get(url, headers={'User-Agent': _random_choice(USER_AGENTS)},
proxies=get_proxies(),
verify=VERIFY_SSL_CERTIFICATE).text
except requests.exceptions.SSLError as e:
_print_err('Encountered an SSL Error. Try using HTTP instead of '
'HTTPS by setting the environment variable "HOWDOI_DISABLE_SSL".\n')
raise e
def _add_links_to_text(element):
hyperlinks = element.find('a')
for hyperlink in hyperlinks:
pquery_object = pq(hyperlink)
href = hyperlink.attrib['href']
copy = pquery_object.text()
if (copy == href):
replacement = copy
else:
replacement = "[{0}]({1})".format(copy, href)
pquery_object.replace_with(replacement)
def get_text(element):
''' return inner text in pyquery element '''
_add_links_to_text(element)
try:
return element.text(squash_space=False)
except TypeError:
return element.text()
def _extract_links_from_bing(html):
html.remove_namespaces()
return [a.attrib['href'] for a in html('.b_algo')('h2')('a')]
def _extract_links_from_google(html):
return [a.attrib['href'] for a in html('.l')] or \
[a.attrib['href'] for a in html('.r')('a')]
def _extract_links_from_duckduckgo(html):
html.remove_namespaces()
links_anchors = html.find('a.result__a')
results = []
for anchor in links_anchors:
link = anchor.attrib['href']
url_obj = urlparse(link)
parsed_url = parse_qs(url_obj.query).get('uddg', '')
if parsed_url:
results.append(parsed_url[0])
return results
def _extract_links(html, search_engine):
if search_engine == 'bing':
return _extract_links_from_bing(html)
if search_engine == 'duckduckgo':
return _extract_links_from_duckduckgo(html)
return _extract_links_from_google(html)
def _get_search_url(search_engine):
return SEARCH_URLS.get(search_engine, SEARCH_URLS['google'])
def _is_blocked(page):
for indicator in BLOCK_INDICATORS:
if page.find(indicator) != -1:
return True
return False
def _get_links(query):
search_engine = os.getenv('HOWDOI_SEARCH_ENGINE', 'google')
search_url = _get_search_url(search_engine)
result = _get_result(search_url.format(URL, url_quote(query)))
if _is_blocked(result):
_print_err('Unable to find an answer because the search engine temporarily blocked the request. '
'Please wait a few minutes or select a different search engine.')
raise BlockError("Temporary block by search engine")
html = pq(result)
return _extract_links(html, search_engine)
def get_link_at_pos(links, position):
if not links:
return False
if len(links) >= position:
link = links[position - 1]
else:
link = links[-1]
return link
def _format_output(code, args):
if not args['color']:
return code
lexer = None
# try to find a lexer using the StackOverflow tags
# or the query arguments
for keyword in args['query'].split() + args['tags']:
try:
lexer = get_lexer_by_name(keyword)
break
except ClassNotFound:
pass
# no lexer found above, use the guesser
if not lexer:
try:
lexer = guess_lexer(code)
except ClassNotFound:
return code
return highlight(code,
lexer,
TerminalFormatter(bg='dark'))
def _is_question(link):
for fragment in BLOCKED_QUESTION_FRAGMENTS:
if fragment in link:
return False
return re.search(r'questions/\d+/', link)
def _get_questions(links):
return [link for link in links if _is_question(link)]
def _get_answer(args, links):
link = get_link_at_pos(links, args['pos'])
if not link:
return False
cache_key = link
page = cache.get(link)
if not page:
page = _get_result(link + '?answertab=votes')
cache.set(cache_key, page)
html = pq(page)
first_answer = html('.answer').eq(0)
instructions = first_answer.find('pre') or first_answer.find('code')
args['tags'] = [t.text for t in html('.post-tag')]
if not instructions and not args['all']:
text = get_text(first_answer.find('.post-text').eq(0))
elif args['all']:
texts = []
for html_tag in first_answer.items('.post-text > *'):
current_text = get_text(html_tag)
if current_text:
if html_tag[0].tag in ['pre', 'code']:
texts.append(_format_output(current_text, args))
else:
texts.append(current_text)
text = '\n'.join(texts)
else:
text = _format_output(get_text(instructions.eq(0)), args)
if text is None:
text = NO_ANSWER_MSG
text = text.strip()
return text
def _get_links_with_cache(query):
cache_key = query + "-links"
res = cache.get(cache_key)
if res:
if res == CACHE_EMPTY_VAL:
res = False
return res
links = _get_links(query)
if not links:
cache.set(cache_key, CACHE_EMPTY_VAL)
question_links = _get_questions(links)
cache.set(cache_key, question_links or CACHE_EMPTY_VAL)
return question_links
def build_splitter(splitter_character='=', spliter_length=80):
return '\n' + splitter_character * spliter_length + '\n\n'
def _get_instructions(args):
"""
@args: command-line arguments
returns: json string with answers and other metadata
False if unable to get answers
"""
question_links = _get_links_with_cache(args['query'])
if not question_links:
return False
res = []
init_pos = args['pos']
num_answers = args['num_answers']
answers = []
initial_position = args['pos']
answer_spliter = build_splitter('=', 80)
for answer_number in range(args['num_answers']):
current_position = answer_number + initial_position
args['pos'] = current_position
link = get_link_at_pos(question_links, current_position)
answer = _get_answer(args, question_links)
if not answer:
continue
if not args['link'] and not args['json_output'] and not args['json_formatted']:
star_headers = (num_answers > 1 or args['all'])
answer = format_answer(link, answer, star_headers)
res.append({
'answer': answer,
'link': link,
'position': curr_pos
})
return json.dumps(res)
def format_answer(link, answer, star_headers):
if star_headers:
return ANSWER_HEADER.format(link, answer, STAR_HEADER)
return answer
def _clear_cache():
global cache
if not cache:
cache = FileSystemCache(CACHE_DIR, CACHE_ENTRY_MAX, 0)
return cache.clear()
def _is_help_query(query: str):
return any([query.lower() == help_query for help_query in SUPPORTED_HELP_QUERIES])
def _parse_json(res, args):
"""
@res: json object with answers and metadata
@args: command-line arguments (used for parsing)
returns: formated string of text ready to be printed
"""
res = json.loads(res)
if "error" in res:
return res["error"]
splitter_length = 80
answer_splitter = '\n' + '=' * splitter_length + '\n\n'
formatted_answers = []
for answer in res:
next_ans = answer["answer"]
if args["link"]: # if we only want links
next_ans = answer["link"]
formatted_answers.append(next_ans)
def _get_help_instructions():
instruction_splitter = build_splitter(' ', 60)
query = 'print hello world in python'
instructions = [
'Here are a few popular howdoi commands ',
'>>> howdoi {} (default query)',
'>>> howdoi {} -a (read entire answer)',
'>>> howdoi {} -n [number] (retrieve n number of answers)',
'>>> howdoi {} -l (display only a link to where the answer is from',
'>>> howdoi {} -c (Add colors to the output)',
'>>> howdoi {} -e (Specify the search engine you want to use e.g google,bing)'
]
instructions = map(lambda s: s.format(query), instructions)
return instruction_splitter.join(instructions)
def howdoi(raw_query):
args = raw_query
if type(raw_query) is str: # you can pass either a raw or a parsed query
parser = get_parser()
args = vars(parser.parse_args(raw_query.split(' ')))
args['query'] = ' '.join(args['query']).replace('?', '')
cache_key = str(args)
if _is_help_query(args['query']):
return _get_help_instructions() + '\n'
res = cache.get(cache_key)
if res:
if args["json_output"]:
return res # default / raw json
elif args["json_formatted"]:
return _format_json(res) # clean json
else:
return _parse_json(res, args) # string format
try:
res = _get_instructions(args)
if not res:
res = json.dumps({"error": "Sorry, couldn\'t find any help with that topic\n"})
cache.set(cache_key, res)
except (ConnectionError, SSLError):
return json.dumps({"error": "Failed to establish network connection\n"})
finally:
if args["json_output"]:
return res
elif args["json_formatted"]:
return _format_json(res)
else:
return _parse_json(res, args)
def get_parser():
parser = argparse.ArgumentParser(description='instant coding answers via the command line')
parser.add_argument('query', metavar='QUERY', type=str, nargs='*', help='the question to answer')
parser.add_argument('-p', '--pos', help='select answer in specified position (default: 1)', default=1, type=int)
parser.add_argument('-a', '--all', help='display the full text of the answer', action='store_true')
parser.add_argument('-l', '--link', help='display only the answer link', action='store_true')
parser.add_argument('-c', '--color', help='enable colorized output', action='store_true')
parser.add_argument('-n', '--num-answers', help='number of answers to return', default=1, type=int)
parser.add_argument('-C', '--clear-cache', help='clear the cache',
action='store_true')
parser.add_argument('-j', '--json-output', help='return answers in raw json',
action='store_true')
parser.add_argument('-jf', '--json-formatted', help='return answers in formatted json',
action='store_true')
parser.add_argument('-v', '--version', help='displays the current version of howdoi',
action='store_true')
parser.add_argument('-e', '--engine', help='change search engine for this query only (google, bing, duckduckgo)',
dest='search_engine', nargs="?", default='google')
return parser
def command_line_runner():
parser = get_parser()
args = vars(parser.parse_args())
if args['version']:
_print_ok(__version__)
return
if args['clear_cache']:
if _clear_cache():
_print_ok('Cache cleared successfully')
else:
_print_err('Clearing cache failed')
return
if not args['query']:
parser.print_help()
return
if os.getenv('HOWDOI_COLORIZE'):
args['color'] = True
if not args['search_engine'] in SUPPORTED_SEARCH_ENGINES:
_print_err('Unsupported engine.\nThe supported engines are: %s' % ', '.join(SUPPORTED_SEARCH_ENGINES))
return
elif args['search_engine'] != 'google':
os.environ['HOWDOI_SEARCH_ENGINE'] = args['search_engine']
utf8_result = howdoi(args).encode('utf-8', 'ignore')
if sys.version < '3':
print(utf8_result)
else:
# Write UTF-8 to stdout: https://stackoverflow.com/a/3603160
sys.stdout.buffer.write(utf8_result)
# close the session to release connection
howdoi_session.close()
if __name__ == '__main__':
command_line_runner()
Removed -jf flag, using json.tool for formatting instead
#!/usr/bin/env python
######################################################
#
# howdoi - instant coding answers via the command line
# written by Benjamin Gleitzman (gleitz@mit.edu)
# inspired by Rich Jones (rich@anomos.info)
#
######################################################
from __future__ import print_function
import gc
gc.disable() # noqa: E402
import argparse
import os
import appdirs
import re
from cachelib import FileSystemCache, NullCache
import requests
import sys
import json
from . import __version__
from pygments import highlight
from pygments.lexers import guess_lexer, get_lexer_by_name
from pygments.formatters.terminal import TerminalFormatter
from pygments.util import ClassNotFound
from pyquery import PyQuery as pq
from requests.exceptions import ConnectionError
from requests.exceptions import SSLError
# Handle imports for Python 2 and 3
if sys.version < '3':
import codecs
from urllib import quote as url_quote
from urllib import getproxies
from urlparse import urlparse, parse_qs
# Handling Unicode: http://stackoverflow.com/a/6633040/305414
def u(x):
return codecs.unicode_escape_decode(x)[0]
else:
from urllib.request import getproxies
from urllib.parse import quote as url_quote, urlparse, parse_qs
def u(x):
return x
# rudimentary standardized 3-level log output
def _print_err(x): print("[ERROR] " + x)
_print_ok = print # noqa: E305
def _print_dbg(x): print("[DEBUG] " + x) # noqa: E302
if os.getenv('HOWDOI_DISABLE_SSL'): # Set http instead of https
SCHEME = 'http://'
VERIFY_SSL_CERTIFICATE = False
else:
SCHEME = 'https://'
VERIFY_SSL_CERTIFICATE = True
SUPPORTED_SEARCH_ENGINES = ('google', 'bing', 'duckduckgo')
URL = os.getenv('HOWDOI_URL') or 'stackoverflow.com'
USER_AGENTS = ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:11.0) Gecko/20100101 Firefox/11.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:22.0) Gecko/20100 101 Firefox/22.0',
'Mozilla/5.0 (Windows NT 6.1; rv:11.0) Gecko/20100101 Firefox/11.0',
('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_4) AppleWebKit/536.5 (KHTML, like Gecko) '
'Chrome/19.0.1084.46 Safari/536.5'),
('Mozilla/5.0 (Windows; Windows NT 6.1) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.46'
'Safari/536.5'), )
SEARCH_URLS = {
'bing': SCHEME + 'www.bing.com/search?q=site:{0}%20{1}&hl=en',
'google': SCHEME + 'www.google.com/search?q=site:{0}%20{1}&hl=en',
'duckduckgo': SCHEME + 'duckduckgo.com/?q=site:{0}%20{1}&t=hj&ia=web'
}
BLOCK_INDICATORS = (
'form id="captcha-form"',
'This page appears when Google automatically detects requests coming from your computer '
'network which appear to be in violation of the <a href="//www.google.com/policies/terms/">Terms of Service'
)
BLOCKED_QUESTION_FRAGMENTS = (
'webcache.googleusercontent.com',
)
STAR_HEADER = u('\u2605')
ANSWER_HEADER = u('{2} Answer from {0} {2}\n{1}')
NO_ANSWER_MSG = '< no answer given >'
CACHE_EMPTY_VAL = "NULL"
CACHE_DIR = appdirs.user_cache_dir('howdoi')
CACHE_ENTRY_MAX = 128
SUPPORTED_HELP_QUERIES = ['use howdoi', 'howdoi', 'run howdoi',
'do howdoi', 'howdoi howdoi', 'howdoi use howdoi']
if os.getenv('HOWDOI_DISABLE_CACHE'):
cache = NullCache() # works like an always empty cache
else:
cache = FileSystemCache(CACHE_DIR, CACHE_ENTRY_MAX, default_timeout=0)
howdoi_session = requests.session()
class BlockError(RuntimeError):
pass
def _random_int(width):
bres = os.urandom(width)
if sys.version < '3':
ires = int(bres.encode('hex'), 16)
else:
ires = int.from_bytes(bres, 'little')
return ires
def _random_choice(seq):
return seq[_random_int(1) % len(seq)]
def get_proxies():
proxies = getproxies()
filtered_proxies = {}
for key, value in proxies.items():
if key.startswith('http'):
if not value.startswith('http'):
filtered_proxies[key] = 'http://%s' % value
else:
filtered_proxies[key] = value
return filtered_proxies
def _get_result(url):
try:
return howdoi_session.get(url, headers={'User-Agent': _random_choice(USER_AGENTS)},
proxies=get_proxies(),
verify=VERIFY_SSL_CERTIFICATE).text
except requests.exceptions.SSLError as e:
_print_err('Encountered an SSL Error. Try using HTTP instead of '
'HTTPS by setting the environment variable "HOWDOI_DISABLE_SSL".\n')
raise e
def _add_links_to_text(element):
hyperlinks = element.find('a')
for hyperlink in hyperlinks:
pquery_object = pq(hyperlink)
href = hyperlink.attrib['href']
copy = pquery_object.text()
if (copy == href):
replacement = copy
else:
replacement = "[{0}]({1})".format(copy, href)
pquery_object.replace_with(replacement)
def get_text(element):
''' return inner text in pyquery element '''
_add_links_to_text(element)
try:
return element.text(squash_space=False)
except TypeError:
return element.text()
def _extract_links_from_bing(html):
html.remove_namespaces()
return [a.attrib['href'] for a in html('.b_algo')('h2')('a')]
def _extract_links_from_google(html):
return [a.attrib['href'] for a in html('.l')] or \
[a.attrib['href'] for a in html('.r')('a')]
def _extract_links_from_duckduckgo(html):
html.remove_namespaces()
links_anchors = html.find('a.result__a')
results = []
for anchor in links_anchors:
link = anchor.attrib['href']
url_obj = urlparse(link)
parsed_url = parse_qs(url_obj.query).get('uddg', '')
if parsed_url:
results.append(parsed_url[0])
return results
def _extract_links(html, search_engine):
if search_engine == 'bing':
return _extract_links_from_bing(html)
if search_engine == 'duckduckgo':
return _extract_links_from_duckduckgo(html)
return _extract_links_from_google(html)
def _get_search_url(search_engine):
return SEARCH_URLS.get(search_engine, SEARCH_URLS['google'])
def _is_blocked(page):
for indicator in BLOCK_INDICATORS:
if page.find(indicator) != -1:
return True
return False
def _get_links(query):
search_engine = os.getenv('HOWDOI_SEARCH_ENGINE', 'google')
search_url = _get_search_url(search_engine)
result = _get_result(search_url.format(URL, url_quote(query)))
if _is_blocked(result):
_print_err('Unable to find an answer because the search engine temporarily blocked the request. '
'Please wait a few minutes or select a different search engine.')
raise BlockError("Temporary block by search engine")
html = pq(result)
return _extract_links(html, search_engine)
def get_link_at_pos(links, position):
if not links:
return False
if len(links) >= position:
link = links[position - 1]
else:
link = links[-1]
return link
def _format_output(code, args):
if not args['color']:
return code
lexer = None
# try to find a lexer using the StackOverflow tags
# or the query arguments
for keyword in args['query'].split() + args['tags']:
try:
lexer = get_lexer_by_name(keyword)
break
except ClassNotFound:
pass
# no lexer found above, use the guesser
if not lexer:
try:
lexer = guess_lexer(code)
except ClassNotFound:
return code
return highlight(code,
lexer,
TerminalFormatter(bg='dark'))
def _is_question(link):
for fragment in BLOCKED_QUESTION_FRAGMENTS:
if fragment in link:
return False
return re.search(r'questions/\d+/', link)
def _get_questions(links):
return [link for link in links if _is_question(link)]
def _get_answer(args, links):
link = get_link_at_pos(links, args['pos'])
if not link:
return False
cache_key = link
page = cache.get(link)
if not page:
page = _get_result(link + '?answertab=votes')
cache.set(cache_key, page)
html = pq(page)
first_answer = html('.answer').eq(0)
instructions = first_answer.find('pre') or first_answer.find('code')
args['tags'] = [t.text for t in html('.post-tag')]
if not instructions and not args['all']:
text = get_text(first_answer.find('.post-text').eq(0))
elif args['all']:
texts = []
for html_tag in first_answer.items('.post-text > *'):
current_text = get_text(html_tag)
if current_text:
if html_tag[0].tag in ['pre', 'code']:
texts.append(_format_output(current_text, args))
else:
texts.append(current_text)
text = '\n'.join(texts)
else:
text = _format_output(get_text(instructions.eq(0)), args)
if text is None:
text = NO_ANSWER_MSG
text = text.strip()
return text
def _get_links_with_cache(query):
cache_key = query + "-links"
res = cache.get(cache_key)
if res:
if res == CACHE_EMPTY_VAL:
res = False
return res
links = _get_links(query)
if not links:
cache.set(cache_key, CACHE_EMPTY_VAL)
question_links = _get_questions(links)
cache.set(cache_key, question_links or CACHE_EMPTY_VAL)
return question_links
def build_splitter(splitter_character='=', spliter_length=80):
return '\n' + splitter_character * spliter_length + '\n\n'
def _get_instructions(args):
"""
@args: command-line arguments
returns: json string with answers and other metadata
False if unable to get answers
"""
question_links = _get_links_with_cache(args['query'])
if not question_links:
return False
res = []
init_pos = args['pos']
num_answers = args['num_answers']
answers = []
initial_position = args['pos']
answer_spliter = build_splitter('=', 80)
for answer_number in range(args['num_answers']):
current_position = answer_number + initial_position
args['pos'] = current_position
link = get_link_at_pos(question_links, current_position)
answer = _get_answer(args, question_links)
if not answer:
continue
if not args['link'] and not args['json_output']:
star_headers = (num_answers > 1 or args['all'])
answer = format_answer(link, answer, star_headers)
res.append({
'answer': answer,
'link': link,
'position': curr_pos
})
return json.dumps(res)
def format_answer(link, answer, star_headers):
if star_headers:
return ANSWER_HEADER.format(link, answer, STAR_HEADER)
return answer
def _clear_cache():
global cache
if not cache:
cache = FileSystemCache(CACHE_DIR, CACHE_ENTRY_MAX, 0)
return cache.clear()
def _is_help_query(query: str):
return any([query.lower() == help_query for help_query in SUPPORTED_HELP_QUERIES])
def _parse_json(res, args):
"""
@res: json object with answers and metadata
@args: command-line arguments (used for parsing)
returns: formated string of text ready to be printed
"""
res = json.loads(res)
if "error" in res:
return res["error"]
splitter_length = 80
answer_splitter = '\n' + '=' * splitter_length + '\n\n'
formatted_answers = []
for answer in res:
next_ans = answer["answer"]
if args["link"]: # if we only want links
next_ans = answer["link"]
formatted_answers.append(next_ans)
def _get_help_instructions():
instruction_splitter = build_splitter(' ', 60)
query = 'print hello world in python'
instructions = [
'Here are a few popular howdoi commands ',
'>>> howdoi {} (default query)',
'>>> howdoi {} -a (read entire answer)',
'>>> howdoi {} -n [number] (retrieve n number of answers)',
'>>> howdoi {} -l (display only a link to where the answer is from',
'>>> howdoi {} -c (Add colors to the output)',
'>>> howdoi {} -e (Specify the search engine you want to use e.g google,bing)'
]
instructions = map(lambda s: s.format(query), instructions)
return instruction_splitter.join(instructions)
def howdoi(raw_query):
args = raw_query
if type(raw_query) is str: # you can pass either a raw or a parsed query
parser = get_parser()
args = vars(parser.parse_args(raw_query.split(' ')))
args['query'] = ' '.join(args['query']).replace('?', '')
cache_key = str(args)
if _is_help_query(args['query']):
return _get_help_instructions() + '\n'
res = cache.get(cache_key)
if res:
if args["json_output"]:
return res # if the json_output flag is true, return default / raw json format
else:
return _parse_json(res, args) # otherwise, return normal the string format
try:
res = _get_instructions(args)
if not res:
res = json.dumps({"error": "Sorry, couldn\'t find any help with that topic\n"})
cache.set(cache_key, res)
except (ConnectionError, SSLError):
return json.dumps({"error": "Failed to establish network connection\n"})
finally:
if args["json_output"]:
return res
else:
return _parse_json(res, args)
def get_parser():
parser = argparse.ArgumentParser(description='instant coding answers via the command line')
parser.add_argument('query', metavar='QUERY', type=str, nargs='*', help='the question to answer')
parser.add_argument('-p', '--pos', help='select answer in specified position (default: 1)', default=1, type=int)
parser.add_argument('-a', '--all', help='display the full text of the answer', action='store_true')
parser.add_argument('-l', '--link', help='display only the answer link', action='store_true')
parser.add_argument('-c', '--color', help='enable colorized output', action='store_true')
parser.add_argument('-n', '--num-answers', help='number of answers to return', default=1, type=int)
parser.add_argument('-C', '--clear-cache', help='clear the cache',
action='store_true')
parser.add_argument('-j', '--json-output', help='return answers in raw json format',
action='store_true')
parser.add_argument('-v', '--version', help='displays the current version of howdoi',
action='store_true')
parser.add_argument('-e', '--engine', help='change search engine for this query only (google, bing, duckduckgo)',
dest='search_engine', nargs="?", default='google')
return parser
def command_line_runner():
parser = get_parser()
args = vars(parser.parse_args())
if args['version']:
_print_ok(__version__)
return
if args['clear_cache']:
if _clear_cache():
_print_ok('Cache cleared successfully')
else:
_print_err('Clearing cache failed')
return
if not args['query']:
parser.print_help()
return
if os.getenv('HOWDOI_COLORIZE'):
args['color'] = True
if not args['search_engine'] in SUPPORTED_SEARCH_ENGINES:
_print_err('Unsupported engine.\nThe supported engines are: %s' % ', '.join(SUPPORTED_SEARCH_ENGINES))
return
elif args['search_engine'] != 'google':
os.environ['HOWDOI_SEARCH_ENGINE'] = args['search_engine']
utf8_result = howdoi(args).encode('utf-8', 'ignore')
if sys.version < '3':
print(utf8_result)
else:
# Write UTF-8 to stdout: https://stackoverflow.com/a/3603160
sys.stdout.buffer.write(utf8_result)
# close the session to release connection
howdoi_session.close()
if __name__ == '__main__':
command_line_runner()
|
import importlib
import inspect
import os
import re
import sys
import tempfile
from io import StringIO
from pathlib import Path
from django.conf.urls import url
from django.core import mail
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db import DatabaseError, connection
from django.shortcuts import render
from django.template import TemplateDoesNotExist
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.test.utils import LoggingCaptureMixin, patch_logger
from django.urls import reverse
from django.utils.encoding import force_bytes
from django.utils.functional import SimpleLazyObject
from django.views.debug import (
CLEANSED_SUBSTITUTE, CallableSettingWrapper, ExceptionReporter,
cleanse_setting, technical_500_response,
)
from ..views import (
custom_exception_reporter_filter_view, index_page,
multivalue_dict_key_error, non_sensitive_view, paranoid_view,
sensitive_args_function_caller, sensitive_kwargs_function_caller,
sensitive_method_view, sensitive_view,
)
PY36 = sys.version_info >= (3, 6)
class User:
def __str__(self):
return 'jacob'
class WithoutEmptyPathUrls:
urlpatterns = [url(r'url/$', index_page, name='url')]
class CallableSettingWrapperTests(SimpleTestCase):
""" Unittests for CallableSettingWrapper
"""
def test_repr(self):
class WrappedCallable:
def __repr__(self):
return "repr from the wrapped callable"
def __call__(self):
pass
actual = repr(CallableSettingWrapper(WrappedCallable()))
self.assertEqual(actual, "repr from the wrapped callable")
@override_settings(DEBUG=True, ROOT_URLCONF='view_tests.urls')
class DebugViewTests(LoggingCaptureMixin, SimpleTestCase):
def test_files(self):
response = self.client.get('/raises/')
self.assertEqual(response.status_code, 500)
data = {
'file_data.txt': SimpleUploadedFile('file_data.txt', b'haha'),
}
response = self.client.post('/raises/', data)
self.assertContains(response, 'file_data.txt', status_code=500)
self.assertNotContains(response, 'haha', status_code=500)
def test_400(self):
# When DEBUG=True, technical_500_template() is called.
response = self.client.get('/raises400/')
self.assertContains(response, '<div class="context" id="', status_code=400)
# Ensure no 403.html template exists to test the default case.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
}])
def test_403(self):
response = self.client.get('/raises403/')
self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403)
# Set up a test 403.html template.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'loaders': [
('django.template.loaders.locmem.Loader', {
'403.html': 'This is a test template for a 403 error ({{ exception }}).',
}),
],
},
}])
def test_403_template(self):
response = self.client.get('/raises403/')
self.assertContains(response, 'test template', status_code=403)
self.assertContains(response, '(Insufficient Permissions).', status_code=403)
def test_404(self):
response = self.client.get('/raises404/')
self.assertEqual(response.status_code, 404)
def test_raised_404(self):
response = self.client.get('/views/raises404/')
self.assertContains(response, "<code>not-in-urls</code>, didn't match", status_code=404)
def test_404_not_in_urls(self):
response = self.client.get('/not-in-urls')
self.assertNotContains(response, "Raised by:", status_code=404)
self.assertContains(response, "<code>not-in-urls</code>, didn't match", status_code=404)
@override_settings(ROOT_URLCONF=WithoutEmptyPathUrls)
def test_404_empty_path_not_in_urls(self):
response = self.client.get('/')
self.assertContains(response, "The empty path didn't match any of these.", status_code=404)
def test_technical_404(self):
response = self.client.get('/views/technical404/')
self.assertContains(response, "Raised by:", status_code=404)
self.assertContains(response, "view_tests.views.technical404", status_code=404)
def test_classbased_technical_404(self):
response = self.client.get('/views/classbased404/')
self.assertContains(response, "Raised by:", status_code=404)
self.assertContains(response, "view_tests.views.Http404View", status_code=404)
def test_non_l10ned_numeric_ids(self):
"""
Numeric IDs and fancy traceback context blocks line numbers shouldn't be localized.
"""
with self.settings(DEBUG=True, USE_L10N=True):
response = self.client.get('/raises500/')
# We look for a HTML fragment of the form
# '<div class="context" id="c38123208">', not '<div class="context" id="c38,123,208"'
self.assertContains(response, '<div class="context" id="', status_code=500)
match = re.search(b'<div class="context" id="(?P<id>[^"]+)">', response.content)
self.assertIsNotNone(match)
id_repr = match.group('id')
self.assertFalse(
re.search(b'[^c0-9]', id_repr),
"Numeric IDs in debug response HTML page shouldn't be localized (value: %s)." % id_repr.decode()
)
def test_template_exceptions(self):
try:
self.client.get(reverse('template_exception'))
except Exception:
raising_loc = inspect.trace()[-1][-2][0].strip()
self.assertNotEqual(
raising_loc.find("raise Exception('boom')"), -1,
"Failed to find 'raise Exception' in last frame of "
"traceback, instead found: %s" % raising_loc
)
def test_template_loader_postmortem(self):
"""Tests for not existing file"""
template_name = "notfound.html"
with tempfile.NamedTemporaryFile(prefix=template_name) as tmpfile:
tempdir = os.path.dirname(tmpfile.name)
template_path = os.path.join(tempdir, template_name)
with override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [tempdir],
}]):
response = self.client.get(reverse('raises_template_does_not_exist', kwargs={"path": template_name}))
self.assertContains(response, "%s (Source does not exist)" % template_path, status_code=500, count=2)
# Assert as HTML.
self.assertContains(
response,
'<li><code>django.template.loaders.filesystem.Loader</code>: '
'%s (Source does not exist)</li>' % os.path.join(tempdir, 'notfound.html'),
status_code=500,
html=True,
)
def test_no_template_source_loaders(self):
"""
Make sure if you don't specify a template, the debug view doesn't blow up.
"""
with self.assertRaises(TemplateDoesNotExist):
self.client.get('/render_no_template/')
@override_settings(ROOT_URLCONF='view_tests.default_urls')
def test_default_urlconf_template(self):
"""
Make sure that the default URLconf template is shown shown instead
of the technical 404 page, if the user has not altered their
URLconf yet.
"""
response = self.client.get('/')
self.assertContains(
response,
"<h2>Congratulations on your first Django-powered page.</h2>"
)
@override_settings(ROOT_URLCONF='view_tests.regression_21530_urls')
def test_regression_21530(self):
"""
Regression test for bug #21530.
If the admin app include is replaced with exactly one url
pattern, then the technical 404 template should be displayed.
The bug here was that an AttributeError caused a 500 response.
"""
response = self.client.get('/')
self.assertContains(
response,
"Page not found <span>(404)</span>",
status_code=404
)
class DebugViewQueriesAllowedTests(SimpleTestCase):
# May need a query to initialize MySQL connection
allow_database_queries = True
def test_handle_db_exception(self):
"""
Ensure the debug view works when a database exception is raised by
performing an invalid query and passing the exception to the debug view.
"""
with connection.cursor() as cursor:
try:
cursor.execute('INVALID SQL')
except DatabaseError:
exc_info = sys.exc_info()
rf = RequestFactory()
response = technical_500_response(rf.get('/'), *exc_info)
self.assertContains(response, 'OperationalError at /', status_code=500)
@override_settings(
DEBUG=True,
ROOT_URLCONF='view_tests.urls',
# No template directories are configured, so no templates will be found.
TEMPLATES=[{
'BACKEND': 'django.template.backends.dummy.TemplateStrings',
}],
)
class NonDjangoTemplatesDebugViewTests(SimpleTestCase):
def test_400(self):
# When DEBUG=True, technical_500_template() is called.
with patch_logger('django.security.SuspiciousOperation', 'error'):
response = self.client.get('/raises400/')
self.assertContains(response, '<div class="context" id="', status_code=400)
def test_403(self):
response = self.client.get('/raises403/')
self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403)
def test_404(self):
response = self.client.get('/raises404/')
self.assertEqual(response.status_code, 404)
def test_template_not_found_error(self):
# Raises a TemplateDoesNotExist exception and shows the debug view.
url = reverse('raises_template_does_not_exist', kwargs={"path": "notfound.html"})
response = self.client.get(url)
self.assertContains(response, '<div class="context" id="', status_code=500)
class ExceptionReporterTests(SimpleTestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
request.user = User()
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>ValueError at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertIn('<h3 id="user-info">USER</h3>', html)
self.assertIn('<p>jacob</p>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
self.assertIn('<p>No POST data</p>', html)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>ValueError</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertNotIn('<h3 id="user-info">USER</h3>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
def test_eol_support(self):
"""The ExceptionReporter supports Unix, Windows and Macintosh EOL markers"""
LINES = ['print %d' % i for i in range(1, 6)]
reporter = ExceptionReporter(None, None, None, None)
for newline in ['\n', '\r\n', '\r']:
fd, filename = tempfile.mkstemp(text=False)
os.write(fd, force_bytes(newline.join(LINES) + newline))
os.close(fd)
try:
self.assertEqual(
reporter._get_lines_from_file(filename, 3, 2),
(1, LINES[1:3], LINES[3], LINES[4:])
)
finally:
os.unlink(filename)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">No exception message supplied</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_reporting_of_nested_exceptions(self):
request = self.rf.get('/test_view/')
try:
try:
raise AttributeError('Top level')
except AttributeError as explicit:
try:
raise ValueError('Second exception') from explicit
except ValueError:
raise IndexError('Final exception')
except Exception:
# Custom exception handler, just pass it into ExceptionReporter
exc_type, exc_value, tb = sys.exc_info()
explicit_exc = 'The above exception ({0}) was the direct cause of the following exception:'
implicit_exc = 'During handling of the above exception ({0}), another exception occurred:'
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
# Both messages are twice on page -- one rendered as html,
# one as plain text (for pastebin)
self.assertEqual(2, html.count(explicit_exc.format("Top level")))
self.assertEqual(2, html.count(implicit_exc.format("Second exception")))
text = reporter.get_traceback_text()
self.assertIn(explicit_exc.format("Top level"), text)
self.assertIn(implicit_exc.format("Second exception"), text)
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>Report</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
def test_non_utf8_values_handling(self):
"Non-UTF-8 exceptions/values should not make the output generation choke."
try:
class NonUtf8Output(Exception):
def __repr__(self):
return b'EXC\xe9EXC'
somevar = b'VAL\xe9VAL' # NOQA
raise NonUtf8Output()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('VAL\\xe9VAL', html)
self.assertIn('EXC\\xe9EXC', html)
def test_unprintable_values_handling(self):
"Unprintable values should not make the output generation choke."
try:
class OomOutput:
def __repr__(self):
raise MemoryError('OOM')
oomvalue = OomOutput() # NOQA
raise ValueError()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<td class="code"><pre>Error in formatting', html)
def test_too_large_values_handling(self):
"Large values should not create a large HTML."
large = 256 * 1024
repr_of_str_adds = len(repr(''))
try:
class LargeOutput:
def __repr__(self):
return repr('A' * large)
largevalue = LargeOutput() # NOQA
raise ValueError()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertEqual(len(html) // 1024 // 128, 0) # still fit in 128Kb
self.assertIn('<trimmed %d bytes string>' % (large + repr_of_str_adds,), html)
def test_unfrozen_importlib(self):
"""
importlib is not a frozen app, but its loader thinks it's frozen which
results in an ImportError. Refs #21443.
"""
try:
request = self.rf.get('/test_view/')
importlib.import_module('abc.def.invalid.name')
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>%sError at /test_view/</h1>' % ('ModuleNotFound' if PY36 else 'Import'), html)
def test_ignore_traceback_evaluation_exceptions(self):
"""
Don't trip over exceptions generated by crafted objects when
evaluating them while cleansing (#24455).
"""
class BrokenEvaluation(Exception):
pass
def broken_setup():
raise BrokenEvaluation
request = self.rf.get('/test_view/')
broken_lazy = SimpleLazyObject(broken_setup)
try:
bool(broken_lazy)
except BrokenEvaluation:
exc_type, exc_value, tb = sys.exc_info()
self.assertIn(
"BrokenEvaluation",
ExceptionReporter(request, exc_type, exc_value, tb).get_traceback_html(),
"Evaluation exception reason not mentioned in traceback"
)
@override_settings(ALLOWED_HOSTS='example.com')
def test_disallowed_host(self):
"An exception report can be generated even for a disallowed host."
request = self.rf.get('/', HTTP_HOST='evil.com')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertIn("http://evil.com/", html)
def test_request_with_items_key(self):
"""
An exception report can be generated for requests with 'items' in
request GET, POST, FILES, or COOKIES QueryDicts.
"""
value = '<td>items</td><td class="code"><pre>'Oops'</pre></td>'
# GET
request = self.rf.get('/test_view/?items=Oops')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML(value, html)
# POST
request = self.rf.post('/test_view/', data={'items': 'Oops'})
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML(value, html)
# FILES
fp = StringIO('filecontent')
request = self.rf.post('/test_view/', data={'name': 'filename', 'items': fp})
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML(
'<td>items</td><td class="code"><pre><InMemoryUploadedFile: '
'items (application/octet-stream)></pre></td>',
html
)
# COOKES
rf = RequestFactory()
rf.cookies['items'] = 'Oops'
request = rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML('<td>items</td><td class="code"><pre>'Oops'</pre></td>', html)
def test_exception_fetching_user(self):
"""
The error page can be rendered if the current user can't be retrieved
(such as when the database is unavailable).
"""
class ExceptionUser:
def __str__(self):
raise Exception()
request = self.rf.get('/test_view/')
request.user = ExceptionUser()
try:
raise ValueError('Oops')
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>ValueError at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">Oops</pre>', html)
self.assertIn('<h3 id="user-info">USER</h3>', html)
self.assertIn('<p>[unable to retrieve the current user]</p>', html)
text = reporter.get_traceback_text()
self.assertIn('USER: [unable to retrieve the current user]', text)
class PlainTextReportTests(SimpleTestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
request.user = User()
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn('ValueError at /test_view/', text)
self.assertIn("Can't find my keys", text)
self.assertIn('Request Method:', text)
self.assertIn('Request URL:', text)
self.assertIn('USER: jacob', text)
self.assertIn('Exception Type:', text)
self.assertIn('Exception Value:', text)
self.assertIn('Traceback:', text)
self.assertIn('Request information:', text)
self.assertNotIn('Request data not supplied', text)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn('ValueError', text)
self.assertIn("Can't find my keys", text)
self.assertNotIn('Request Method:', text)
self.assertNotIn('Request URL:', text)
self.assertNotIn('USER:', text)
self.assertIn('Exception Type:', text)
self.assertIn('Exception Value:', text)
self.assertIn('Traceback:', text)
self.assertIn('Request data not supplied', text)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
reporter.get_traceback_text()
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
reporter.get_traceback_text()
@override_settings(DEBUG=True)
def test_template_exception(self):
request = self.rf.get('/test_view/')
try:
render(request, 'debug/template_error.html')
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
templ_path = Path(Path(__file__).parent.parent, 'templates', 'debug', 'template_error.html')
self.assertIn(
'Template error:\n'
'In template %(path)s, error at line 2\n'
' \'cycle\' tag requires at least two arguments\n'
' 1 : Template with error:\n'
' 2 : {%% cycle %%} \n'
' 3 : ' % {'path': templ_path},
text
)
def test_request_with_items_key(self):
"""
An exception report can be generated for requests with 'items' in
request GET, POST, FILES, or COOKIES QueryDicts.
"""
# GET
request = self.rf.get('/test_view/?items=Oops')
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("items = 'Oops'", text)
# POST
request = self.rf.post('/test_view/', data={'items': 'Oops'})
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("items = 'Oops'", text)
# FILES
fp = StringIO('filecontent')
request = self.rf.post('/test_view/', data={'name': 'filename', 'items': fp})
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn('items = <InMemoryUploadedFile:', text)
# COOKES
rf = RequestFactory()
rf.cookies['items'] = 'Oops'
request = rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("items = 'Oops'", text)
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
reporter.get_traceback_text()
@override_settings(ALLOWED_HOSTS='example.com')
def test_disallowed_host(self):
"An exception report can be generated even for a disallowed host."
request = self.rf.get('/', HTTP_HOST='evil.com')
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("http://evil.com/", text)
class ExceptionReportTestMixin:
# Mixin used in the ExceptionReporterFilterTests and
# AjaxResponseExceptionReporterFilter tests below
breakfast_data = {'sausage-key': 'sausage-value',
'baked-beans-key': 'baked-beans-value',
'hash-brown-key': 'hash-brown-value',
'bacon-key': 'bacon-value'}
def verify_unsafe_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that potentially sensitive info are displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# All variables are shown.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertContains(response, 'scrambled', status_code=500)
self.assertContains(response, 'sauce', status_code=500)
self.assertContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters are shown.
self.assertContains(response, k, status_code=500)
self.assertContains(response, v, status_code=500)
def verify_safe_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that certain sensitive info are not displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# Non-sensitive variable's name and value are shown.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertContains(response, 'scrambled', status_code=500)
# Sensitive variable's name is shown but not its value.
self.assertContains(response, 'sauce', status_code=500)
self.assertNotContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# Non-sensitive POST parameters' values are shown.
self.assertContains(response, 'baked-beans-value', status_code=500)
self.assertContains(response, 'hash-brown-value', status_code=500)
# Sensitive POST parameters' values are not shown.
self.assertNotContains(response, 'sausage-value', status_code=500)
self.assertNotContains(response, 'bacon-value', status_code=500)
def verify_paranoid_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that no variables or POST parameters are displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# Show variable names but not their values.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertNotContains(response, 'scrambled', status_code=500)
self.assertContains(response, 'sauce', status_code=500)
self.assertNotContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# No POST parameters' values are shown.
self.assertNotContains(response, v, status_code=500)
def verify_unsafe_email(self, view, check_for_POST_params=True):
"""
Asserts that potentially sensitive info are displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body_plain = str(email.body)
self.assertNotIn('cooked_eggs', body_plain)
self.assertNotIn('scrambled', body_plain)
self.assertNotIn('sauce', body_plain)
self.assertNotIn('worcestershire', body_plain)
# Frames vars are shown in html email reports.
body_html = str(email.alternatives[0][0])
self.assertIn('cooked_eggs', body_html)
self.assertIn('scrambled', body_html)
self.assertIn('sauce', body_html)
self.assertIn('worcestershire', body_html)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters are shown.
self.assertIn(k, body_plain)
self.assertIn(v, body_plain)
self.assertIn(k, body_html)
self.assertIn(v, body_html)
def verify_safe_email(self, view, check_for_POST_params=True):
"""
Asserts that certain sensitive info are not displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body_plain = str(email.body)
self.assertNotIn('cooked_eggs', body_plain)
self.assertNotIn('scrambled', body_plain)
self.assertNotIn('sauce', body_plain)
self.assertNotIn('worcestershire', body_plain)
# Frames vars are shown in html email reports.
body_html = str(email.alternatives[0][0])
self.assertIn('cooked_eggs', body_html)
self.assertIn('scrambled', body_html)
self.assertIn('sauce', body_html)
self.assertNotIn('worcestershire', body_html)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertIn(k, body_plain)
# Non-sensitive POST parameters' values are shown.
self.assertIn('baked-beans-value', body_plain)
self.assertIn('hash-brown-value', body_plain)
self.assertIn('baked-beans-value', body_html)
self.assertIn('hash-brown-value', body_html)
# Sensitive POST parameters' values are not shown.
self.assertNotIn('sausage-value', body_plain)
self.assertNotIn('bacon-value', body_plain)
self.assertNotIn('sausage-value', body_html)
self.assertNotIn('bacon-value', body_html)
def verify_paranoid_email(self, view):
"""
Asserts that no variables or POST parameters are displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body = str(email.body)
self.assertNotIn('cooked_eggs', body)
self.assertNotIn('scrambled', body)
self.assertNotIn('sauce', body)
self.assertNotIn('worcestershire', body)
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertIn(k, body)
# No POST parameters' values are shown.
self.assertNotIn(v, body)
@override_settings(ROOT_URLCONF='view_tests.urls')
class ExceptionReporterFilterTests(ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase):
"""
Sensitive information can be filtered out of error reports (#14614).
"""
rf = RequestFactory()
def test_non_sensitive_request(self):
"""
Everything (request info and frame variables) can bee seen
in the default error reports for non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
def test_sensitive_request(self):
"""
Sensitive POST parameters and frame variables cannot be
seen in the default error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view)
self.verify_unsafe_email(sensitive_view)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view)
self.verify_safe_email(sensitive_view)
def test_paranoid_request(self):
"""
No POST parameters and frame variables can be seen in the
default error reports for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view)
self.verify_unsafe_email(paranoid_view)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view)
self.verify_paranoid_email(paranoid_view)
def test_multivalue_dict_key_error(self):
"""
#21098 -- Sensitive POST parameters cannot be seen in the
error reports for if request.POST['nonexistent_key'] throws an error.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(multivalue_dict_key_error)
self.verify_unsafe_email(multivalue_dict_key_error)
with self.settings(DEBUG=False):
self.verify_safe_response(multivalue_dict_key_error)
self.verify_safe_email(multivalue_dict_key_error)
def test_custom_exception_reporter_filter(self):
"""
It's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
def test_sensitive_method(self):
"""
The sensitive_variables decorator works with object methods.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_method_view, check_for_POST_params=False)
self.verify_unsafe_email(sensitive_method_view, check_for_POST_params=False)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_method_view, check_for_POST_params=False)
self.verify_safe_email(sensitive_method_view, check_for_POST_params=False)
def test_sensitive_function_arguments(self):
"""
Sensitive variables don't leak in the sensitive_variables decorator's
frame, when those variables are passed as arguments to the decorated
function.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_args_function_caller)
self.verify_unsafe_email(sensitive_args_function_caller)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_args_function_caller, check_for_POST_params=False)
self.verify_safe_email(sensitive_args_function_caller, check_for_POST_params=False)
def test_sensitive_function_keyword_arguments(self):
"""
Sensitive variables don't leak in the sensitive_variables decorator's
frame, when those variables are passed as keyword arguments to the
decorated function.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_kwargs_function_caller)
self.verify_unsafe_email(sensitive_kwargs_function_caller)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_kwargs_function_caller, check_for_POST_params=False)
self.verify_safe_email(sensitive_kwargs_function_caller, check_for_POST_params=False)
def test_callable_settings(self):
"""
Callable settings should not be evaluated in the debug page (#21345).
"""
def callable_setting():
return "This should not be displayed"
with self.settings(DEBUG=True, FOOBAR=callable_setting):
response = self.client.get('/raises500/')
self.assertNotContains(response, "This should not be displayed", status_code=500)
def test_callable_settings_forbidding_to_set_attributes(self):
"""
Callable settings which forbid to set attributes should not break
the debug page (#23070).
"""
class CallableSettingWithSlots:
__slots__ = []
def __call__(self):
return "This should not be displayed"
with self.settings(DEBUG=True, WITH_SLOTS=CallableSettingWithSlots()):
response = self.client.get('/raises500/')
self.assertNotContains(response, "This should not be displayed", status_code=500)
def test_dict_setting_with_non_str_key(self):
"""
A dict setting containing a non-string key should not break the
debug page (#12744).
"""
with self.settings(DEBUG=True, FOOBAR={42: None}):
response = self.client.get('/raises500/')
self.assertContains(response, 'FOOBAR', status_code=500)
def test_sensitive_settings(self):
"""
The debug page should not show some sensitive settings
(password, secret key, ...).
"""
sensitive_settings = [
'SECRET_KEY',
'PASSWORD',
'API_KEY',
'AUTH_TOKEN',
]
for setting in sensitive_settings:
with self.settings(DEBUG=True, **{setting: "should not be displayed"}):
response = self.client.get('/raises500/')
self.assertNotContains(response, 'should not be displayed', status_code=500)
def test_settings_with_sensitive_keys(self):
"""
The debug page should filter out some sensitive information found in
dict settings.
"""
sensitive_settings = [
'SECRET_KEY',
'PASSWORD',
'API_KEY',
'AUTH_TOKEN',
]
for setting in sensitive_settings:
FOOBAR = {
setting: "should not be displayed",
'recursive': {setting: "should not be displayed"},
}
with self.settings(DEBUG=True, FOOBAR=FOOBAR):
response = self.client.get('/raises500/')
self.assertNotContains(response, 'should not be displayed', status_code=500)
class AjaxResponseExceptionReporterFilter(ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase):
"""
Sensitive information can be filtered out of error reports.
Here we specifically test the plain text 500 debug-only error page served
when it has been detected the request was sent by JS code. We don't check
for (non)existence of frames vars in the traceback information section of
the response content because we don't include them in these error pages.
Refs #14614.
"""
rf = RequestFactory(HTTP_X_REQUESTED_WITH='XMLHttpRequest')
def test_non_sensitive_request(self):
"""
Request info can bee seen in the default error reports for
non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view, check_for_vars=False)
def test_sensitive_request(self):
"""
Sensitive POST parameters cannot be seen in the default
error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view, check_for_vars=False)
def test_paranoid_request(self):
"""
No POST parameters can be seen in the default error reports
for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view, check_for_vars=False)
def test_custom_exception_reporter_filter(self):
"""
It's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(custom_exception_reporter_filter_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_unsafe_response(custom_exception_reporter_filter_view, check_for_vars=False)
@override_settings(DEBUG=True, ROOT_URLCONF='view_tests.urls')
def test_ajax_response_encoding(self):
response = self.client.get('/raises500/', HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response['Content-Type'], 'text/plain; charset=utf-8')
class HelperFunctionTests(SimpleTestCase):
def test_cleanse_setting_basic(self):
self.assertEqual(cleanse_setting('TEST', 'TEST'), 'TEST')
self.assertEqual(cleanse_setting('PASSWORD', 'super_secret'), CLEANSED_SUBSTITUTE)
def test_cleanse_setting_ignore_case(self):
self.assertEqual(cleanse_setting('password', 'super_secret'), CLEANSED_SUBSTITUTE)
def test_cleanse_setting_recurses_in_dictionary(self):
initial = {'login': 'cooper', 'password': 'secret'}
expected = {'login': 'cooper', 'password': CLEANSED_SUBSTITUTE}
self.assertEqual(cleanse_setting('SETTING_NAME', initial), expected)
Refs #5046 -- Tested the 'unicode hint' in the technical 500 debug view.
import importlib
import inspect
import os
import re
import sys
import tempfile
from io import StringIO
from pathlib import Path
from django.conf.urls import url
from django.core import mail
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db import DatabaseError, connection
from django.shortcuts import render
from django.template import TemplateDoesNotExist
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.test.utils import LoggingCaptureMixin, patch_logger
from django.urls import reverse
from django.utils.encoding import force_bytes
from django.utils.functional import SimpleLazyObject
from django.views.debug import (
CLEANSED_SUBSTITUTE, CallableSettingWrapper, ExceptionReporter,
cleanse_setting, technical_500_response,
)
from ..views import (
custom_exception_reporter_filter_view, index_page,
multivalue_dict_key_error, non_sensitive_view, paranoid_view,
sensitive_args_function_caller, sensitive_kwargs_function_caller,
sensitive_method_view, sensitive_view,
)
PY36 = sys.version_info >= (3, 6)
class User:
def __str__(self):
return 'jacob'
class WithoutEmptyPathUrls:
urlpatterns = [url(r'url/$', index_page, name='url')]
class CallableSettingWrapperTests(SimpleTestCase):
""" Unittests for CallableSettingWrapper
"""
def test_repr(self):
class WrappedCallable:
def __repr__(self):
return "repr from the wrapped callable"
def __call__(self):
pass
actual = repr(CallableSettingWrapper(WrappedCallable()))
self.assertEqual(actual, "repr from the wrapped callable")
@override_settings(DEBUG=True, ROOT_URLCONF='view_tests.urls')
class DebugViewTests(LoggingCaptureMixin, SimpleTestCase):
def test_files(self):
response = self.client.get('/raises/')
self.assertEqual(response.status_code, 500)
data = {
'file_data.txt': SimpleUploadedFile('file_data.txt', b'haha'),
}
response = self.client.post('/raises/', data)
self.assertContains(response, 'file_data.txt', status_code=500)
self.assertNotContains(response, 'haha', status_code=500)
def test_400(self):
# When DEBUG=True, technical_500_template() is called.
response = self.client.get('/raises400/')
self.assertContains(response, '<div class="context" id="', status_code=400)
# Ensure no 403.html template exists to test the default case.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
}])
def test_403(self):
response = self.client.get('/raises403/')
self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403)
# Set up a test 403.html template.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'loaders': [
('django.template.loaders.locmem.Loader', {
'403.html': 'This is a test template for a 403 error ({{ exception }}).',
}),
],
},
}])
def test_403_template(self):
response = self.client.get('/raises403/')
self.assertContains(response, 'test template', status_code=403)
self.assertContains(response, '(Insufficient Permissions).', status_code=403)
def test_404(self):
response = self.client.get('/raises404/')
self.assertEqual(response.status_code, 404)
def test_raised_404(self):
response = self.client.get('/views/raises404/')
self.assertContains(response, "<code>not-in-urls</code>, didn't match", status_code=404)
def test_404_not_in_urls(self):
response = self.client.get('/not-in-urls')
self.assertNotContains(response, "Raised by:", status_code=404)
self.assertContains(response, "<code>not-in-urls</code>, didn't match", status_code=404)
@override_settings(ROOT_URLCONF=WithoutEmptyPathUrls)
def test_404_empty_path_not_in_urls(self):
response = self.client.get('/')
self.assertContains(response, "The empty path didn't match any of these.", status_code=404)
def test_technical_404(self):
response = self.client.get('/views/technical404/')
self.assertContains(response, "Raised by:", status_code=404)
self.assertContains(response, "view_tests.views.technical404", status_code=404)
def test_classbased_technical_404(self):
response = self.client.get('/views/classbased404/')
self.assertContains(response, "Raised by:", status_code=404)
self.assertContains(response, "view_tests.views.Http404View", status_code=404)
def test_non_l10ned_numeric_ids(self):
"""
Numeric IDs and fancy traceback context blocks line numbers shouldn't be localized.
"""
with self.settings(DEBUG=True, USE_L10N=True):
response = self.client.get('/raises500/')
# We look for a HTML fragment of the form
# '<div class="context" id="c38123208">', not '<div class="context" id="c38,123,208"'
self.assertContains(response, '<div class="context" id="', status_code=500)
match = re.search(b'<div class="context" id="(?P<id>[^"]+)">', response.content)
self.assertIsNotNone(match)
id_repr = match.group('id')
self.assertFalse(
re.search(b'[^c0-9]', id_repr),
"Numeric IDs in debug response HTML page shouldn't be localized (value: %s)." % id_repr.decode()
)
def test_template_exceptions(self):
try:
self.client.get(reverse('template_exception'))
except Exception:
raising_loc = inspect.trace()[-1][-2][0].strip()
self.assertNotEqual(
raising_loc.find("raise Exception('boom')"), -1,
"Failed to find 'raise Exception' in last frame of "
"traceback, instead found: %s" % raising_loc
)
def test_template_loader_postmortem(self):
"""Tests for not existing file"""
template_name = "notfound.html"
with tempfile.NamedTemporaryFile(prefix=template_name) as tmpfile:
tempdir = os.path.dirname(tmpfile.name)
template_path = os.path.join(tempdir, template_name)
with override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [tempdir],
}]):
response = self.client.get(reverse('raises_template_does_not_exist', kwargs={"path": template_name}))
self.assertContains(response, "%s (Source does not exist)" % template_path, status_code=500, count=2)
# Assert as HTML.
self.assertContains(
response,
'<li><code>django.template.loaders.filesystem.Loader</code>: '
'%s (Source does not exist)</li>' % os.path.join(tempdir, 'notfound.html'),
status_code=500,
html=True,
)
def test_no_template_source_loaders(self):
"""
Make sure if you don't specify a template, the debug view doesn't blow up.
"""
with self.assertRaises(TemplateDoesNotExist):
self.client.get('/render_no_template/')
@override_settings(ROOT_URLCONF='view_tests.default_urls')
def test_default_urlconf_template(self):
"""
Make sure that the default URLconf template is shown shown instead
of the technical 404 page, if the user has not altered their
URLconf yet.
"""
response = self.client.get('/')
self.assertContains(
response,
"<h2>Congratulations on your first Django-powered page.</h2>"
)
@override_settings(ROOT_URLCONF='view_tests.regression_21530_urls')
def test_regression_21530(self):
"""
Regression test for bug #21530.
If the admin app include is replaced with exactly one url
pattern, then the technical 404 template should be displayed.
The bug here was that an AttributeError caused a 500 response.
"""
response = self.client.get('/')
self.assertContains(
response,
"Page not found <span>(404)</span>",
status_code=404
)
class DebugViewQueriesAllowedTests(SimpleTestCase):
# May need a query to initialize MySQL connection
allow_database_queries = True
def test_handle_db_exception(self):
"""
Ensure the debug view works when a database exception is raised by
performing an invalid query and passing the exception to the debug view.
"""
with connection.cursor() as cursor:
try:
cursor.execute('INVALID SQL')
except DatabaseError:
exc_info = sys.exc_info()
rf = RequestFactory()
response = technical_500_response(rf.get('/'), *exc_info)
self.assertContains(response, 'OperationalError at /', status_code=500)
@override_settings(
DEBUG=True,
ROOT_URLCONF='view_tests.urls',
# No template directories are configured, so no templates will be found.
TEMPLATES=[{
'BACKEND': 'django.template.backends.dummy.TemplateStrings',
}],
)
class NonDjangoTemplatesDebugViewTests(SimpleTestCase):
def test_400(self):
# When DEBUG=True, technical_500_template() is called.
with patch_logger('django.security.SuspiciousOperation', 'error'):
response = self.client.get('/raises400/')
self.assertContains(response, '<div class="context" id="', status_code=400)
def test_403(self):
response = self.client.get('/raises403/')
self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403)
def test_404(self):
response = self.client.get('/raises404/')
self.assertEqual(response.status_code, 404)
def test_template_not_found_error(self):
# Raises a TemplateDoesNotExist exception and shows the debug view.
url = reverse('raises_template_does_not_exist', kwargs={"path": "notfound.html"})
response = self.client.get(url)
self.assertContains(response, '<div class="context" id="', status_code=500)
class ExceptionReporterTests(SimpleTestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
request.user = User()
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>ValueError at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertIn('<h3 id="user-info">USER</h3>', html)
self.assertIn('<p>jacob</p>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
self.assertIn('<p>No POST data</p>', html)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>ValueError</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertNotIn('<h3 id="user-info">USER</h3>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
def test_eol_support(self):
"""The ExceptionReporter supports Unix, Windows and Macintosh EOL markers"""
LINES = ['print %d' % i for i in range(1, 6)]
reporter = ExceptionReporter(None, None, None, None)
for newline in ['\n', '\r\n', '\r']:
fd, filename = tempfile.mkstemp(text=False)
os.write(fd, force_bytes(newline.join(LINES) + newline))
os.close(fd)
try:
self.assertEqual(
reporter._get_lines_from_file(filename, 3, 2),
(1, LINES[1:3], LINES[3], LINES[4:])
)
finally:
os.unlink(filename)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">No exception message supplied</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_reporting_of_nested_exceptions(self):
request = self.rf.get('/test_view/')
try:
try:
raise AttributeError('Top level')
except AttributeError as explicit:
try:
raise ValueError('Second exception') from explicit
except ValueError:
raise IndexError('Final exception')
except Exception:
# Custom exception handler, just pass it into ExceptionReporter
exc_type, exc_value, tb = sys.exc_info()
explicit_exc = 'The above exception ({0}) was the direct cause of the following exception:'
implicit_exc = 'During handling of the above exception ({0}), another exception occurred:'
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
# Both messages are twice on page -- one rendered as html,
# one as plain text (for pastebin)
self.assertEqual(2, html.count(explicit_exc.format("Top level")))
self.assertEqual(2, html.count(implicit_exc.format("Second exception")))
text = reporter.get_traceback_text()
self.assertIn(explicit_exc.format("Top level"), text)
self.assertIn(implicit_exc.format("Second exception"), text)
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>Report</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
def test_non_utf8_values_handling(self):
"Non-UTF-8 exceptions/values should not make the output generation choke."
try:
class NonUtf8Output(Exception):
def __repr__(self):
return b'EXC\xe9EXC'
somevar = b'VAL\xe9VAL' # NOQA
raise NonUtf8Output()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('VAL\\xe9VAL', html)
self.assertIn('EXC\\xe9EXC', html)
def test_unprintable_values_handling(self):
"Unprintable values should not make the output generation choke."
try:
class OomOutput:
def __repr__(self):
raise MemoryError('OOM')
oomvalue = OomOutput() # NOQA
raise ValueError()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<td class="code"><pre>Error in formatting', html)
def test_too_large_values_handling(self):
"Large values should not create a large HTML."
large = 256 * 1024
repr_of_str_adds = len(repr(''))
try:
class LargeOutput:
def __repr__(self):
return repr('A' * large)
largevalue = LargeOutput() # NOQA
raise ValueError()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertEqual(len(html) // 1024 // 128, 0) # still fit in 128Kb
self.assertIn('<trimmed %d bytes string>' % (large + repr_of_str_adds,), html)
def test_encoding_error(self):
"""A UnicodeError displays a portion of the problematic string."""
try:
'abcdefghijklmnὀpqrstuwxyz'.encode('ascii')
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h2>Unicode error hint</h2>', html)
self.assertIn('The string that could not be encoded/decoded was: ', html)
self.assertIn('<strong>jklmnὀpqrst</strong>', html)
def test_unfrozen_importlib(self):
"""
importlib is not a frozen app, but its loader thinks it's frozen which
results in an ImportError. Refs #21443.
"""
try:
request = self.rf.get('/test_view/')
importlib.import_module('abc.def.invalid.name')
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>%sError at /test_view/</h1>' % ('ModuleNotFound' if PY36 else 'Import'), html)
def test_ignore_traceback_evaluation_exceptions(self):
"""
Don't trip over exceptions generated by crafted objects when
evaluating them while cleansing (#24455).
"""
class BrokenEvaluation(Exception):
pass
def broken_setup():
raise BrokenEvaluation
request = self.rf.get('/test_view/')
broken_lazy = SimpleLazyObject(broken_setup)
try:
bool(broken_lazy)
except BrokenEvaluation:
exc_type, exc_value, tb = sys.exc_info()
self.assertIn(
"BrokenEvaluation",
ExceptionReporter(request, exc_type, exc_value, tb).get_traceback_html(),
"Evaluation exception reason not mentioned in traceback"
)
@override_settings(ALLOWED_HOSTS='example.com')
def test_disallowed_host(self):
"An exception report can be generated even for a disallowed host."
request = self.rf.get('/', HTTP_HOST='evil.com')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertIn("http://evil.com/", html)
def test_request_with_items_key(self):
"""
An exception report can be generated for requests with 'items' in
request GET, POST, FILES, or COOKIES QueryDicts.
"""
value = '<td>items</td><td class="code"><pre>'Oops'</pre></td>'
# GET
request = self.rf.get('/test_view/?items=Oops')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML(value, html)
# POST
request = self.rf.post('/test_view/', data={'items': 'Oops'})
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML(value, html)
# FILES
fp = StringIO('filecontent')
request = self.rf.post('/test_view/', data={'name': 'filename', 'items': fp})
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML(
'<td>items</td><td class="code"><pre><InMemoryUploadedFile: '
'items (application/octet-stream)></pre></td>',
html
)
# COOKES
rf = RequestFactory()
rf.cookies['items'] = 'Oops'
request = rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML('<td>items</td><td class="code"><pre>'Oops'</pre></td>', html)
def test_exception_fetching_user(self):
"""
The error page can be rendered if the current user can't be retrieved
(such as when the database is unavailable).
"""
class ExceptionUser:
def __str__(self):
raise Exception()
request = self.rf.get('/test_view/')
request.user = ExceptionUser()
try:
raise ValueError('Oops')
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>ValueError at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">Oops</pre>', html)
self.assertIn('<h3 id="user-info">USER</h3>', html)
self.assertIn('<p>[unable to retrieve the current user]</p>', html)
text = reporter.get_traceback_text()
self.assertIn('USER: [unable to retrieve the current user]', text)
class PlainTextReportTests(SimpleTestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
request.user = User()
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn('ValueError at /test_view/', text)
self.assertIn("Can't find my keys", text)
self.assertIn('Request Method:', text)
self.assertIn('Request URL:', text)
self.assertIn('USER: jacob', text)
self.assertIn('Exception Type:', text)
self.assertIn('Exception Value:', text)
self.assertIn('Traceback:', text)
self.assertIn('Request information:', text)
self.assertNotIn('Request data not supplied', text)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn('ValueError', text)
self.assertIn("Can't find my keys", text)
self.assertNotIn('Request Method:', text)
self.assertNotIn('Request URL:', text)
self.assertNotIn('USER:', text)
self.assertIn('Exception Type:', text)
self.assertIn('Exception Value:', text)
self.assertIn('Traceback:', text)
self.assertIn('Request data not supplied', text)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
reporter.get_traceback_text()
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
reporter.get_traceback_text()
@override_settings(DEBUG=True)
def test_template_exception(self):
request = self.rf.get('/test_view/')
try:
render(request, 'debug/template_error.html')
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
templ_path = Path(Path(__file__).parent.parent, 'templates', 'debug', 'template_error.html')
self.assertIn(
'Template error:\n'
'In template %(path)s, error at line 2\n'
' \'cycle\' tag requires at least two arguments\n'
' 1 : Template with error:\n'
' 2 : {%% cycle %%} \n'
' 3 : ' % {'path': templ_path},
text
)
def test_request_with_items_key(self):
"""
An exception report can be generated for requests with 'items' in
request GET, POST, FILES, or COOKIES QueryDicts.
"""
# GET
request = self.rf.get('/test_view/?items=Oops')
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("items = 'Oops'", text)
# POST
request = self.rf.post('/test_view/', data={'items': 'Oops'})
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("items = 'Oops'", text)
# FILES
fp = StringIO('filecontent')
request = self.rf.post('/test_view/', data={'name': 'filename', 'items': fp})
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn('items = <InMemoryUploadedFile:', text)
# COOKES
rf = RequestFactory()
rf.cookies['items'] = 'Oops'
request = rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("items = 'Oops'", text)
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
reporter.get_traceback_text()
@override_settings(ALLOWED_HOSTS='example.com')
def test_disallowed_host(self):
"An exception report can be generated even for a disallowed host."
request = self.rf.get('/', HTTP_HOST='evil.com')
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("http://evil.com/", text)
class ExceptionReportTestMixin:
# Mixin used in the ExceptionReporterFilterTests and
# AjaxResponseExceptionReporterFilter tests below
breakfast_data = {'sausage-key': 'sausage-value',
'baked-beans-key': 'baked-beans-value',
'hash-brown-key': 'hash-brown-value',
'bacon-key': 'bacon-value'}
def verify_unsafe_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that potentially sensitive info are displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# All variables are shown.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertContains(response, 'scrambled', status_code=500)
self.assertContains(response, 'sauce', status_code=500)
self.assertContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters are shown.
self.assertContains(response, k, status_code=500)
self.assertContains(response, v, status_code=500)
def verify_safe_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that certain sensitive info are not displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# Non-sensitive variable's name and value are shown.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertContains(response, 'scrambled', status_code=500)
# Sensitive variable's name is shown but not its value.
self.assertContains(response, 'sauce', status_code=500)
self.assertNotContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# Non-sensitive POST parameters' values are shown.
self.assertContains(response, 'baked-beans-value', status_code=500)
self.assertContains(response, 'hash-brown-value', status_code=500)
# Sensitive POST parameters' values are not shown.
self.assertNotContains(response, 'sausage-value', status_code=500)
self.assertNotContains(response, 'bacon-value', status_code=500)
def verify_paranoid_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that no variables or POST parameters are displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# Show variable names but not their values.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertNotContains(response, 'scrambled', status_code=500)
self.assertContains(response, 'sauce', status_code=500)
self.assertNotContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# No POST parameters' values are shown.
self.assertNotContains(response, v, status_code=500)
def verify_unsafe_email(self, view, check_for_POST_params=True):
"""
Asserts that potentially sensitive info are displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body_plain = str(email.body)
self.assertNotIn('cooked_eggs', body_plain)
self.assertNotIn('scrambled', body_plain)
self.assertNotIn('sauce', body_plain)
self.assertNotIn('worcestershire', body_plain)
# Frames vars are shown in html email reports.
body_html = str(email.alternatives[0][0])
self.assertIn('cooked_eggs', body_html)
self.assertIn('scrambled', body_html)
self.assertIn('sauce', body_html)
self.assertIn('worcestershire', body_html)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters are shown.
self.assertIn(k, body_plain)
self.assertIn(v, body_plain)
self.assertIn(k, body_html)
self.assertIn(v, body_html)
def verify_safe_email(self, view, check_for_POST_params=True):
"""
Asserts that certain sensitive info are not displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body_plain = str(email.body)
self.assertNotIn('cooked_eggs', body_plain)
self.assertNotIn('scrambled', body_plain)
self.assertNotIn('sauce', body_plain)
self.assertNotIn('worcestershire', body_plain)
# Frames vars are shown in html email reports.
body_html = str(email.alternatives[0][0])
self.assertIn('cooked_eggs', body_html)
self.assertIn('scrambled', body_html)
self.assertIn('sauce', body_html)
self.assertNotIn('worcestershire', body_html)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertIn(k, body_plain)
# Non-sensitive POST parameters' values are shown.
self.assertIn('baked-beans-value', body_plain)
self.assertIn('hash-brown-value', body_plain)
self.assertIn('baked-beans-value', body_html)
self.assertIn('hash-brown-value', body_html)
# Sensitive POST parameters' values are not shown.
self.assertNotIn('sausage-value', body_plain)
self.assertNotIn('bacon-value', body_plain)
self.assertNotIn('sausage-value', body_html)
self.assertNotIn('bacon-value', body_html)
def verify_paranoid_email(self, view):
"""
Asserts that no variables or POST parameters are displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body = str(email.body)
self.assertNotIn('cooked_eggs', body)
self.assertNotIn('scrambled', body)
self.assertNotIn('sauce', body)
self.assertNotIn('worcestershire', body)
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertIn(k, body)
# No POST parameters' values are shown.
self.assertNotIn(v, body)
@override_settings(ROOT_URLCONF='view_tests.urls')
class ExceptionReporterFilterTests(ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase):
"""
Sensitive information can be filtered out of error reports (#14614).
"""
rf = RequestFactory()
def test_non_sensitive_request(self):
"""
Everything (request info and frame variables) can bee seen
in the default error reports for non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
def test_sensitive_request(self):
"""
Sensitive POST parameters and frame variables cannot be
seen in the default error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view)
self.verify_unsafe_email(sensitive_view)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view)
self.verify_safe_email(sensitive_view)
def test_paranoid_request(self):
"""
No POST parameters and frame variables can be seen in the
default error reports for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view)
self.verify_unsafe_email(paranoid_view)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view)
self.verify_paranoid_email(paranoid_view)
def test_multivalue_dict_key_error(self):
"""
#21098 -- Sensitive POST parameters cannot be seen in the
error reports for if request.POST['nonexistent_key'] throws an error.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(multivalue_dict_key_error)
self.verify_unsafe_email(multivalue_dict_key_error)
with self.settings(DEBUG=False):
self.verify_safe_response(multivalue_dict_key_error)
self.verify_safe_email(multivalue_dict_key_error)
def test_custom_exception_reporter_filter(self):
"""
It's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
def test_sensitive_method(self):
"""
The sensitive_variables decorator works with object methods.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_method_view, check_for_POST_params=False)
self.verify_unsafe_email(sensitive_method_view, check_for_POST_params=False)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_method_view, check_for_POST_params=False)
self.verify_safe_email(sensitive_method_view, check_for_POST_params=False)
def test_sensitive_function_arguments(self):
"""
Sensitive variables don't leak in the sensitive_variables decorator's
frame, when those variables are passed as arguments to the decorated
function.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_args_function_caller)
self.verify_unsafe_email(sensitive_args_function_caller)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_args_function_caller, check_for_POST_params=False)
self.verify_safe_email(sensitive_args_function_caller, check_for_POST_params=False)
def test_sensitive_function_keyword_arguments(self):
"""
Sensitive variables don't leak in the sensitive_variables decorator's
frame, when those variables are passed as keyword arguments to the
decorated function.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_kwargs_function_caller)
self.verify_unsafe_email(sensitive_kwargs_function_caller)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_kwargs_function_caller, check_for_POST_params=False)
self.verify_safe_email(sensitive_kwargs_function_caller, check_for_POST_params=False)
def test_callable_settings(self):
"""
Callable settings should not be evaluated in the debug page (#21345).
"""
def callable_setting():
return "This should not be displayed"
with self.settings(DEBUG=True, FOOBAR=callable_setting):
response = self.client.get('/raises500/')
self.assertNotContains(response, "This should not be displayed", status_code=500)
def test_callable_settings_forbidding_to_set_attributes(self):
"""
Callable settings which forbid to set attributes should not break
the debug page (#23070).
"""
class CallableSettingWithSlots:
__slots__ = []
def __call__(self):
return "This should not be displayed"
with self.settings(DEBUG=True, WITH_SLOTS=CallableSettingWithSlots()):
response = self.client.get('/raises500/')
self.assertNotContains(response, "This should not be displayed", status_code=500)
def test_dict_setting_with_non_str_key(self):
"""
A dict setting containing a non-string key should not break the
debug page (#12744).
"""
with self.settings(DEBUG=True, FOOBAR={42: None}):
response = self.client.get('/raises500/')
self.assertContains(response, 'FOOBAR', status_code=500)
def test_sensitive_settings(self):
"""
The debug page should not show some sensitive settings
(password, secret key, ...).
"""
sensitive_settings = [
'SECRET_KEY',
'PASSWORD',
'API_KEY',
'AUTH_TOKEN',
]
for setting in sensitive_settings:
with self.settings(DEBUG=True, **{setting: "should not be displayed"}):
response = self.client.get('/raises500/')
self.assertNotContains(response, 'should not be displayed', status_code=500)
def test_settings_with_sensitive_keys(self):
"""
The debug page should filter out some sensitive information found in
dict settings.
"""
sensitive_settings = [
'SECRET_KEY',
'PASSWORD',
'API_KEY',
'AUTH_TOKEN',
]
for setting in sensitive_settings:
FOOBAR = {
setting: "should not be displayed",
'recursive': {setting: "should not be displayed"},
}
with self.settings(DEBUG=True, FOOBAR=FOOBAR):
response = self.client.get('/raises500/')
self.assertNotContains(response, 'should not be displayed', status_code=500)
class AjaxResponseExceptionReporterFilter(ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase):
"""
Sensitive information can be filtered out of error reports.
Here we specifically test the plain text 500 debug-only error page served
when it has been detected the request was sent by JS code. We don't check
for (non)existence of frames vars in the traceback information section of
the response content because we don't include them in these error pages.
Refs #14614.
"""
rf = RequestFactory(HTTP_X_REQUESTED_WITH='XMLHttpRequest')
def test_non_sensitive_request(self):
"""
Request info can bee seen in the default error reports for
non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view, check_for_vars=False)
def test_sensitive_request(self):
"""
Sensitive POST parameters cannot be seen in the default
error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view, check_for_vars=False)
def test_paranoid_request(self):
"""
No POST parameters can be seen in the default error reports
for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view, check_for_vars=False)
def test_custom_exception_reporter_filter(self):
"""
It's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(custom_exception_reporter_filter_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_unsafe_response(custom_exception_reporter_filter_view, check_for_vars=False)
@override_settings(DEBUG=True, ROOT_URLCONF='view_tests.urls')
def test_ajax_response_encoding(self):
response = self.client.get('/raises500/', HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response['Content-Type'], 'text/plain; charset=utf-8')
class HelperFunctionTests(SimpleTestCase):
def test_cleanse_setting_basic(self):
self.assertEqual(cleanse_setting('TEST', 'TEST'), 'TEST')
self.assertEqual(cleanse_setting('PASSWORD', 'super_secret'), CLEANSED_SUBSTITUTE)
def test_cleanse_setting_ignore_case(self):
self.assertEqual(cleanse_setting('password', 'super_secret'), CLEANSED_SUBSTITUTE)
def test_cleanse_setting_recurses_in_dictionary(self):
initial = {'login': 'cooper', 'password': 'secret'}
expected = {'login': 'cooper', 'password': CLEANSED_SUBSTITUTE}
self.assertEqual(cleanse_setting('SETTING_NAME', initial), expected)
|
"""
Marshalls a collection of project information together in to a dictionary called the `context`.
When an instance of a project is launched on AWS, we need to tweak things a bit
with no manual steps in some cases, or as few as possible in other cases.
Case 1: New, standardized environment
We launch journal--ci, a testing instance for the journal project within the `ci` environment.
Case 2: Ad-hoc instances
We launch journal--testsomething, a testing instance we will use to check something works as expected.
Case 3: Stack updates
We want to add an external volume to an EC2 instance to increase available space, so we partially update the CloudFormation template to create it.
"""
import os, json, copy
import re
from collections import OrderedDict, namedtuple
import deepdiff
import netaddr
from slugify import slugify
from . import utils, cloudformation, terraform, core, project, context_handler
from .utils import ensure, lmap
from .config import STACK_DIR
import logging
LOG = logging.getLogger(__name__)
# TODO: this function needs some TLC - it's getting fat.
def build_context(pname, **more_context): # pylint: disable=too-many-locals
"""wrangles parameters into a dictionary (context) that can be given to
whatever renders the final template"""
supported_projects = project.project_list()
ensure(pname in supported_projects, "Unknown project %r. Known projects: %s" % (pname, supported_projects))
# this is the context data from the currently existing template (if any)
# by re-using current values we can avoid making unnecessary changes when
# regenerating templates (like random passwords)
existing_context = more_context.pop('existing_context', {})
# order is important. always use the alt-config in more_context (explicit) also when regenerating
alt_config = more_context.get('alt-config')
project_data = project.project_data(pname)
if alt_config and project_data.get('aws-alt', {}).get(alt_config):
project_data = project.set_project_alt(project_data, 'aws', alt_config)
defaults = {
'project_name': pname,
'project': project_data,
'author': os.environ.get("LOGNAME") or 'unknown',
'date_rendered': utils.ymd(), # TODO: if this value is used at all, more precision might be nice
# a stackname looks like: <pname>--<instance_id>[--<cluster-id>]
'stackname': None, # must be provided by whatever is calling this
'instance_id': None, # derived from the stackname
'cluster_id': None, # derived from the stackname
'alt-config': None,
'branch': project_data.get('default-branch'),
'revision': None, # may be used in future to checkout a specific revision of project
# TODO: shift these rds_ values under the 'rds' key
'rds_dbname': None, # generated from the instance_id when present
'rds_username': None, # could possibly live in the project data, but really no need.
'rds_password': None,
'rds_instance_id': None,
'rds': {},
'ec2': False,
's3': {},
'elb': False,
'sns': [],
'sqs': {},
'ext': False,
'cloudfront': False,
'elasticache': False,
}
context = copy.deepcopy(defaults)
context.update(more_context)
# proceed with wrangling
# TODO: don't like this. if a stackname is required, make it a parameter.
# stackname used to be derived inside this func from pname + id + cluster number
ensure(context['stackname'], "a stackname wasn't provided.")
stackname = context['stackname']
# stackname data
bits = core.parse_stackname(stackname, all_bits=True, idx=True)
ensure(bits['project_name'] == pname,
"the project name %r derived from the given `stackname` %r doesn't match" % (bits['project_name'], pname))
context.update(bits)
# hostname data
context.update(core.hostname_struct(stackname))
# rds
context.update(build_context_rds(context, existing_context))
if 'ext' in context['project']['aws']:
context['ext'] = context['project']['aws']['ext']
# ec2
# TODO: this is a problem. using the default 'True' preserves the behaviour of
# when 'ec2: True' meant, 'use defaults with nothing changed'
# but now I need to store master ip info there.
#context['ec2'] = context['project']['aws'].get('ec2', True)
context['ec2'] = context['project']['aws']['ec2']
if context['ec2'] == True:
context['ec2'] = {}
context['project']['aws']['ec2'] = {}
LOG.warn("stack needs it's context refreshed: %s", stackname)
# we can now assume these will always be dicts
if isinstance(context['ec2'], dict): # the other case is aws.ec2 == False
context['ec2']['type'] = context['project']['aws']['type'] # TODO: shift aws.type to aws.ec2.type in project file
context = set_master_address(context)
build_context_elb(context)
def _parameterize(string):
return string.format(instance=context['instance_id'])
for topic_template_name in context['project']['aws']['sns']:
topic_name = _parameterize(topic_template_name)
context['sns'].append(topic_name)
for queue_template_name in context['project']['aws']['sqs']:
queue_name = _parameterize(queue_template_name)
queue_configuration = context['project']['aws']['sqs'][queue_template_name]
subscriptions = lmap(_parameterize, queue_configuration.get('subscriptions', []))
context['sqs'][queue_name] = subscriptions
# future: build what is necessary for buildercore.bootstrap.setup_s3()
default_bucket_configuration = {
'sqs-notifications': {},
'deletion-policy': 'delete',
'website-configuration': None,
'cors': None,
'public': False,
}
for bucket_template_name in context['project']['aws']['s3']:
bucket_name = _parameterize(bucket_template_name)
configuration = context['project']['aws']['s3'][bucket_template_name]
context['s3'][bucket_name] = default_bucket_configuration.copy()
context['s3'][bucket_name].update(configuration if configuration else {})
build_context_cloudfront(context, parameterize=_parameterize)
build_context_fastly(context, parameterize=_parameterize)
build_context_subdomains(context)
build_context_elasticache(context)
return context
def set_master_address(data, master_ip=None):
"can update both context and buildvars data"
master_ip = master_ip or data['ec2'].get('master_ip') # or data['project']['aws']['ec2']['master_ip']
ensure(master_ip, "a master-ip was neither explicitly given nor found in the data provided")
data['ec2']['master_ip'] = master_ip
if 'aws' in data['project']:
# context (rather than buildvars)
data['project']['aws']['ec2']['master_ip'] = master_ip
if data['ec2'].get('masterless'):
# this is a masterless instance, delete key
del data['project']['aws']['ec2']['master_ip']
return data
def build_context_rds(context, existing_context):
if 'rds' not in context['project']['aws']:
return {}
stackname = context['stackname']
# deletion policy
deletion_policy = utils.lookup(context, 'project.aws.rds.deletion-policy', 'Snapshot')
# used to give mysql a range of valid ip addresses to connect from
subnet_cidr = netaddr.IPNetwork(context['project']['aws']['subnet-cidr'])
net = subnet_cidr.network
mask = subnet_cidr.netmask
networkmask = "%s/%s" % (net, mask) # ll: 10.0.2.0/255.255.255.0
# pull password from existing context, if it exists
generated_password = utils.random_alphanumeric(length=32)
rds_password = existing_context.get('rds_password', generated_password)
return {
'netmask': networkmask,
'rds_username': 'root',
'rds_password': rds_password,
# alpha-numeric only
# TODO: investigate possibility of ambiguous RDS naming here
'rds_dbname': context.get('rds_dbname') or slugify(stackname, separator=""), # *must* use 'or' here
'rds_instance_id': slugify(stackname), # *completely* different to database name
'rds_params': context['project']['aws']['rds'].get('params', []),
'rds': {
'deletion-policy': deletion_policy
}
}
def build_context_elb(context):
if 'elb' in context['project']['aws']:
if isinstance(context['project']['aws']['elb'], dict):
context['elb'] = context['project']['aws']['elb']
else:
context['elb'] = {}
context['elb'].update({
'subnets': [
context['project']['aws']['subnet-id'],
context['project']['aws']['redundant-subnet-id']
],
})
def build_context_cloudfront(context, parameterize):
def build_subdomain(x):
return complete_domain(parameterize(x), context['domain'])
if 'cloudfront' in context['project']['aws']:
errors = None
if context['project']['aws']['cloudfront']['errors']:
errors = {
'domain': parameterize(context['project']['aws']['cloudfront']['errors']['domain']),
'pattern': context['project']['aws']['cloudfront']['errors']['pattern'],
'codes': context['project']['aws']['cloudfront']['errors']['codes'],
'protocol': context['project']['aws']['cloudfront']['errors']['protocol'],
}
context['cloudfront'] = {
'subdomains': [build_subdomain(x) for x in context['project']['aws']['cloudfront']['subdomains']],
'subdomains-without-dns': [build_subdomain(x) for x in context['project']['aws']['cloudfront']['subdomains-without-dns']],
'certificate_id': context['project']['aws']['cloudfront']['certificate_id'],
'cookies': context['project']['aws']['cloudfront']['cookies'],
'compress': context['project']['aws']['cloudfront']['compress'],
'headers': context['project']['aws']['cloudfront']['headers'],
'default-ttl': context['project']['aws']['cloudfront']['default-ttl'],
'errors': errors,
'logging': context['project']['aws']['cloudfront'].get('logging', False),
'origins': OrderedDict([
(o_id, {
'hostname': parameterize(o['hostname']),
'pattern': o.get('pattern'),
'headers': o.get('headers', []),
'cookies': o.get('cookies', []),
})
for o_id, o in context['project']['aws']['cloudfront']['origins'].items()
]),
}
else:
context['cloudfront'] = False
def build_context_fastly(context, parameterize):
def build_subdomain(x):
return complete_domain(parameterize(x), context['domain'])
if 'fastly' in context['project']['aws']:
context['fastly'] = {
'subdomains': [build_subdomain(x) for x in context['project']['aws']['fastly']['subdomains']],
'subdomains-without-dns': [build_subdomain(x) for x in context['project']['aws']['fastly']['subdomains-without-dns']],
'dns': context['project']['aws']['fastly']['dns'],
# TODO: add templating of bucket name
'gcslogging': context['project']['aws']['fastly']['gcslogging'],
}
else:
context['fastly'] = False
def complete_domain(host, default_main):
is_main = host == ''
is_complete = host.count(".") > 0
if is_main:
return default_main
elif is_complete:
return host
return host + '.' + default_main # something + '.' + elifesciences.org
def build_context_subdomains(context):
context['subdomains'] = [complete_domain(s, context['project']['domain']) for s in context['project']['aws'].get('subdomains', [])]
def build_context_elasticache(context):
if 'elasticache' in context['project']['aws']:
context['elasticache'] = context['project']['aws']['elasticache']
def choose_alt_config(stackname):
"""returns the name of the alt-config you think the user would want, based on given stackname"""
pname, instance_id = core.parse_stackname(stackname)
pdata = project.project_data(pname)
if instance_id in project.project_alt_config_names(pdata):
# instance_id exactly matches an alternative config. use that.
return instance_id
#
#
#
# TODO: move to cloudformation.py
def write_cloudformation_template(stackname, contents):
"writes a json version of the python cloudformation template to the stacks directory"
output_fname = os.path.join(STACK_DIR, stackname + ".json")
open(output_fname, 'w').write(contents)
return output_fname
# TODO: prefer this single dispatch function for handling creation of template files
def write_template(stackname, contents):
"writes any provider templates and returns a list of paths to templates"
# cfn = cloudformation.write_template(stackname, contents)
# tfm = terraform.write_template(stackname, contents)
# return [cfn, tfm]
pass
# TODO: move implementation to cloudformation.py
# TODO: perhaps add terraform support?
def read_template(stackname):
"returns the contents of a cloudformation template as a python data structure"
output_fname = os.path.join(STACK_DIR, stackname + ".json")
return json.load(open(output_fname, 'r'))
#
#
#
def more_validation(json_template_str):
"local cloudformation template checks. complements the validation AWS does"
try:
data = json.loads(json_template_str)
# case: when "DBInstanceIdentifier" == "lax--temp2"
# The parameter Filter: db-instance-id is not a valid identifier. Identifiers must begin with a letter;
# must contain only ASCII letters, digits, and hyphens; and must not end with a hyphen or contain two consecutive hyphens.
dbid = utils.lookup(data, 'Resources.AttachedDB.Properties.DBInstanceIdentifier', False)
if dbid:
ensure('--' not in dbid, "database instance identifier contains a double hyphen: %r" % dbid)
return True
except BaseException:
LOG.exception("uncaught error attempting to validate cloudformation template")
raise
#
#
#
def validate_project(pname, **extra):
"""validates all of project's possible cloudformation templates.
only called during testing"""
LOG.info('validating %s', pname)
template = quick_render(pname)
pdata = project.project_data(pname)
altconfig = None
cloudformation.validate_template(pname, template)
more_validation(template)
# validate all alternative configurations
for altconfig in pdata.get('aws-alt', {}).keys():
LOG.info('validating %s, %s', pname, altconfig)
extra = {
'alt-config': altconfig
}
template = quick_render(pname, **extra)
cloudformation.validate_template(pname, template)
#
# create new template
#
def quick_render(project_name, **more_context):
"""generates a representative Cloudformation template for given project with dummy values
only called during testing"""
# set a dummy instance id if one hasn't been set.
more_context['stackname'] = more_context.get('stackname', core.mk_stackname(project_name, 'dummy'))
context = build_context(project_name, **more_context)
return cloudformation.render_template(context)
def generate_stack(pname, **more_context):
"""given a project name and any context overrides, generates a Cloudformation
stack file, writes it to file and returns a pair of (context, stackfilename)"""
context = build_context(pname, **more_context)
cloudformation_template = cloudformation.render_template(context)
terraform_template = terraform.render(context)
stackname = context['stackname']
context_handler.write_context(stackname, context)
cloudformation_template_file = write_cloudformation_template(stackname, cloudformation_template)
terraform_template_file = terraform.write_template(stackname, terraform_template)
return context, cloudformation_template_file, terraform_template_file
#
# update existing template
#
# can't add ExtDNS: it changes dynamically when we start/stop instances and should not be touched after creation
UPDATABLE_TITLE_PATTERNS = ['^CloudFront.*', '^ElasticLoadBalancer.*', '^EC2Instance.*', '.*Bucket$', '.*BucketPolicy', '^StackSecurityGroup$', '^ELBSecurityGroup$', '^CnameDNS.+$', 'FastlyDNS\\d+$', '^AttachedDB$', '^AttachedDBSubnet$', '^ExtraStorage.+$', '^MountPoint.+$', '^IntDNS.*$', '^ElastiCache.*$']
REMOVABLE_TITLE_PATTERNS = ['^CloudFront.*', '^CnameDNS\\d+$', 'FastlyDNS\\d+$', '^ExtDNS$', '^ExtraStorage.+$', '^MountPoint.+$', '^.+Queue$', '^EC2Instance.+$', '^IntDNS.*$', '^ElastiCache.*$', '^.+Topic$']
EC2_NOT_UPDATABLE_PROPERTIES = ['ImageId', 'Tags', 'UserData']
# CloudFormation is nicely chopped up into:
# * what to add
# * what to modify
# * what to remove
# What we see here is the new Terraform generated.tf file, containing all resources (just Fastly so far).
# We can do a diff with the current one which would already be an improvement, but ultimately the source of truth
# is changing it and running a terraform plan to see proposed changes. We should however roll it back if the user
# doesn't confirm.
class Delta(namedtuple('Delta', ['plus', 'edit', 'minus', 'terraform'])):
@property
def non_empty(self):
return any([
self.plus['Resources'],
self.plus['Outputs'],
self.edit['Resources'],
self.edit['Outputs'],
self.minus['Resources'],
self.minus['Outputs'],
self.terraform
])
_empty_cloudformation_dictionary = {'Resources': {}, 'Outputs': {}}
Delta.__new__.__defaults__ = (_empty_cloudformation_dictionary, _empty_cloudformation_dictionary, _empty_cloudformation_dictionary, None)
class TerraformDelta(namedtuple('TerraformDelta', ['old_contents', 'new_contents'])):
def __str__(self):
return self.new_contents
def diff(self):
return deepdiff.DeepDiff(json.loads(self.old_contents), json.loads(self.new_contents))
def template_delta(context):
"""given an already existing template, regenerates it and produces a delta containing only the new resources.
Some the existing resources are treated as immutable and not put in the delta. Most that support non-destructive updates like CloudFront are instead included"""
old_template = read_template(context['stackname'])
template = json.loads(cloudformation.render_template(context))
old_terraform_template_file = '{}'
new_terraform_template_file = '{}'
if context['fastly']:
old_terraform_template_file = terraform.read_template(context['stackname'])
new_terraform_template_file = terraform.render(context)
def _related_to_ec2(output):
if 'Value' in output:
if 'Ref' in output['Value']:
return 'EC2Instance' in output['Value']['Ref']
if 'Fn::GetAtt' in output['Value']:
return 'EC2Instance' in output['Value']['Fn::GetAtt'][0]
return False
def _title_is_updatable(title):
return len([p for p in UPDATABLE_TITLE_PATTERNS if re.match(p, title)]) > 0
def _title_is_removable(title):
return len([p for p in REMOVABLE_TITLE_PATTERNS if re.match(p, title)]) > 0
# TODO: investigate if this is still necessary
# start backward compatibility code
# back for when EC2Instance was the title rather than EC2Instance1
if 'EC2Instance' in old_template['Resources']:
if 'ExtraStorage' in template['Resources']:
template['Resources']['ExtraStorage']['Properties']['AvailabilityZone']['Fn::GetAtt'][0] = 'EC2Instance'
if 'MountPoint' in template['Resources']:
template['Resources']['MountPoint']['Properties']['InstanceId']['Ref'] = 'EC2Instance'
if 'IntDNS' in template['Resources']:
template['Resources']['IntDNS']['Properties']['ResourceRecords'][0]['Fn::GetAtt'][0] = 'EC2Instance'
if 'ExtDNS' in template['Resources']:
template['Resources']['ExtDNS']['Properties']['ResourceRecords'][0]['Fn::GetAtt'][0] = 'EC2Instance'
# end backward compatibility code
def _title_has_been_updated(title, section):
# title was there before with a deprecated name, leave it alone
# e.g. 'EC2Instance' rather than 'EC2Instance1'
if not title in old_template[section]:
return False
title_in_old = dict(old_template[section][title])
title_in_new = dict(template[section][title])
# ignore UserData changes, it's not useful to update them and cause
# a needless reboot
if 'Type' in title_in_old:
if title_in_old['Type'] == 'AWS::EC2::Instance':
for property_name in EC2_NOT_UPDATABLE_PROPERTIES:
title_in_old['Properties'][property_name] = None
title_in_new['Properties'][property_name] = None
return title_in_old != title_in_new
def legacy_title(title):
# some titles like EC2Instance1 were originally EC2Instance
# however, no reason not to let EC2Instance2 be created?
if title in ['EC2Instance1', 'ExtraStorage1', 'MountPoint1']:
return title.strip('1')
delta_plus_resources = {
title: r for (title, r) in template['Resources'].items()
if (title not in old_template['Resources']
and (legacy_title(title) not in old_template['Resources'])
and (title != 'EC2Instance'))
}
delta_plus_outputs = {
title: o for (title, o) in template.get('Outputs', {}).items()
if (title not in old_template.get('Outputs', {}) and not _related_to_ec2(o))
}
delta_edit_resources = {
title: r for (title, r) in template['Resources'].items()
if (_title_is_updatable(title) and _title_has_been_updated(title, 'Resources'))
}
delta_edit_outputs = {
title: o for (title, o) in template.get('Outputs', {}).items()
if (_title_is_updatable(title) and _title_has_been_updated(title, 'Outputs'))
}
delta_minus_resources = {r: v for r, v in old_template['Resources'].items() if r not in template['Resources'] and _title_is_removable(r)}
delta_minus_outputs = {o: v for o, v in old_template.get('Outputs', {}).items() if o not in template.get('Outputs', {})}
return Delta(
{
'Resources': delta_plus_resources,
'Outputs': delta_plus_outputs,
},
{
'Resources': delta_edit_resources,
'Outputs': delta_edit_outputs,
},
{
'Resources': delta_minus_resources,
'Outputs': delta_minus_outputs,
},
TerraformDelta(old_terraform_template_file, new_terraform_template_file)
)
def merge_delta(stackname, delta):
"""Merges the new resources in delta in the local copy of the Cloudformation template"""
template = read_template(stackname)
apply_delta(template, delta)
write_cloudformation_template(stackname, json.dumps(template))
terraform.write_template(stackname, str(delta.terraform))
return template
def apply_delta(template, delta):
for component in delta.plus:
ensure(component in ["Resources", "Outputs"], "Template component %s not recognized" % component)
data = template.get(component, {})
data.update(delta.plus[component])
template[component] = data
for component in delta.edit:
ensure(component in ["Resources", "Outputs"], "Template component %s not recognized" % component)
data = template.get(component, {})
data.update(delta.edit[component])
template[component] = data
for component in delta.minus:
ensure(component in ["Resources", "Outputs"], "Template component %s not recognized" % component)
for title in delta.minus[component]:
del template[component][title]
def _current_cloudformation_template(stackname):
"retrieves a template from the CloudFormation API, using it as the source of truth"
cfn = core.boto_conn(stackname, 'cloudformation', client=True)
return cfn.get_template(StackName=stackname)['TemplateBody']
def download_cloudformation_template(stackname):
write_cloudformation_template(stackname, json.dumps(_current_cloudformation_template(stackname)))
def regenerate_stack(stackname, **more_context):
current_context = context_handler.load_context(stackname)
download_cloudformation_template(stackname)
(pname, instance_id) = core.parse_stackname(stackname)
more_context['stackname'] = stackname # TODO: purge this crap
more_context['alt-config'] = instance_id
context = build_context(pname, existing_context=current_context, **more_context)
delta = template_delta(context)
return context, delta, current_context
Document delta classes
"""
Marshalls a collection of project information together in to a dictionary called the `context`.
When an instance of a project is launched on AWS, we need to tweak things a bit
with no manual steps in some cases, or as few as possible in other cases.
Case 1: New, standardized environment
We launch journal--ci, a testing instance for the journal project within the `ci` environment.
Case 2: Ad-hoc instances
We launch journal--testsomething, a testing instance we will use to check something works as expected.
Case 3: Stack updates
We want to add an external volume to an EC2 instance to increase available space, so we partially update the CloudFormation template to create it.
"""
import os, json, copy
import re
from collections import OrderedDict, namedtuple
import deepdiff
import netaddr
from slugify import slugify
from . import utils, cloudformation, terraform, core, project, context_handler
from .utils import ensure, lmap
from .config import STACK_DIR
import logging
LOG = logging.getLogger(__name__)
# TODO: this function needs some TLC - it's getting fat.
def build_context(pname, **more_context): # pylint: disable=too-many-locals
"""wrangles parameters into a dictionary (context) that can be given to
whatever renders the final template"""
supported_projects = project.project_list()
ensure(pname in supported_projects, "Unknown project %r. Known projects: %s" % (pname, supported_projects))
# this is the context data from the currently existing template (if any)
# by re-using current values we can avoid making unnecessary changes when
# regenerating templates (like random passwords)
existing_context = more_context.pop('existing_context', {})
# order is important. always use the alt-config in more_context (explicit) also when regenerating
alt_config = more_context.get('alt-config')
project_data = project.project_data(pname)
if alt_config and project_data.get('aws-alt', {}).get(alt_config):
project_data = project.set_project_alt(project_data, 'aws', alt_config)
defaults = {
'project_name': pname,
'project': project_data,
'author': os.environ.get("LOGNAME") or 'unknown',
'date_rendered': utils.ymd(), # TODO: if this value is used at all, more precision might be nice
# a stackname looks like: <pname>--<instance_id>[--<cluster-id>]
'stackname': None, # must be provided by whatever is calling this
'instance_id': None, # derived from the stackname
'cluster_id': None, # derived from the stackname
'alt-config': None,
'branch': project_data.get('default-branch'),
'revision': None, # may be used in future to checkout a specific revision of project
# TODO: shift these rds_ values under the 'rds' key
'rds_dbname': None, # generated from the instance_id when present
'rds_username': None, # could possibly live in the project data, but really no need.
'rds_password': None,
'rds_instance_id': None,
'rds': {},
'ec2': False,
's3': {},
'elb': False,
'sns': [],
'sqs': {},
'ext': False,
'cloudfront': False,
'elasticache': False,
}
context = copy.deepcopy(defaults)
context.update(more_context)
# proceed with wrangling
# TODO: don't like this. if a stackname is required, make it a parameter.
# stackname used to be derived inside this func from pname + id + cluster number
ensure(context['stackname'], "a stackname wasn't provided.")
stackname = context['stackname']
# stackname data
bits = core.parse_stackname(stackname, all_bits=True, idx=True)
ensure(bits['project_name'] == pname,
"the project name %r derived from the given `stackname` %r doesn't match" % (bits['project_name'], pname))
context.update(bits)
# hostname data
context.update(core.hostname_struct(stackname))
# rds
context.update(build_context_rds(context, existing_context))
if 'ext' in context['project']['aws']:
context['ext'] = context['project']['aws']['ext']
# ec2
# TODO: this is a problem. using the default 'True' preserves the behaviour of
# when 'ec2: True' meant, 'use defaults with nothing changed'
# but now I need to store master ip info there.
#context['ec2'] = context['project']['aws'].get('ec2', True)
context['ec2'] = context['project']['aws']['ec2']
if context['ec2'] == True:
context['ec2'] = {}
context['project']['aws']['ec2'] = {}
LOG.warn("stack needs it's context refreshed: %s", stackname)
# we can now assume these will always be dicts
if isinstance(context['ec2'], dict): # the other case is aws.ec2 == False
context['ec2']['type'] = context['project']['aws']['type'] # TODO: shift aws.type to aws.ec2.type in project file
context = set_master_address(context)
build_context_elb(context)
def _parameterize(string):
return string.format(instance=context['instance_id'])
for topic_template_name in context['project']['aws']['sns']:
topic_name = _parameterize(topic_template_name)
context['sns'].append(topic_name)
for queue_template_name in context['project']['aws']['sqs']:
queue_name = _parameterize(queue_template_name)
queue_configuration = context['project']['aws']['sqs'][queue_template_name]
subscriptions = lmap(_parameterize, queue_configuration.get('subscriptions', []))
context['sqs'][queue_name] = subscriptions
# future: build what is necessary for buildercore.bootstrap.setup_s3()
default_bucket_configuration = {
'sqs-notifications': {},
'deletion-policy': 'delete',
'website-configuration': None,
'cors': None,
'public': False,
}
for bucket_template_name in context['project']['aws']['s3']:
bucket_name = _parameterize(bucket_template_name)
configuration = context['project']['aws']['s3'][bucket_template_name]
context['s3'][bucket_name] = default_bucket_configuration.copy()
context['s3'][bucket_name].update(configuration if configuration else {})
build_context_cloudfront(context, parameterize=_parameterize)
build_context_fastly(context, parameterize=_parameterize)
build_context_subdomains(context)
build_context_elasticache(context)
return context
def set_master_address(data, master_ip=None):
"can update both context and buildvars data"
master_ip = master_ip or data['ec2'].get('master_ip') # or data['project']['aws']['ec2']['master_ip']
ensure(master_ip, "a master-ip was neither explicitly given nor found in the data provided")
data['ec2']['master_ip'] = master_ip
if 'aws' in data['project']:
# context (rather than buildvars)
data['project']['aws']['ec2']['master_ip'] = master_ip
if data['ec2'].get('masterless'):
# this is a masterless instance, delete key
del data['project']['aws']['ec2']['master_ip']
return data
def build_context_rds(context, existing_context):
if 'rds' not in context['project']['aws']:
return {}
stackname = context['stackname']
# deletion policy
deletion_policy = utils.lookup(context, 'project.aws.rds.deletion-policy', 'Snapshot')
# used to give mysql a range of valid ip addresses to connect from
subnet_cidr = netaddr.IPNetwork(context['project']['aws']['subnet-cidr'])
net = subnet_cidr.network
mask = subnet_cidr.netmask
networkmask = "%s/%s" % (net, mask) # ll: 10.0.2.0/255.255.255.0
# pull password from existing context, if it exists
generated_password = utils.random_alphanumeric(length=32)
rds_password = existing_context.get('rds_password', generated_password)
return {
'netmask': networkmask,
'rds_username': 'root',
'rds_password': rds_password,
# alpha-numeric only
# TODO: investigate possibility of ambiguous RDS naming here
'rds_dbname': context.get('rds_dbname') or slugify(stackname, separator=""), # *must* use 'or' here
'rds_instance_id': slugify(stackname), # *completely* different to database name
'rds_params': context['project']['aws']['rds'].get('params', []),
'rds': {
'deletion-policy': deletion_policy
}
}
def build_context_elb(context):
if 'elb' in context['project']['aws']:
if isinstance(context['project']['aws']['elb'], dict):
context['elb'] = context['project']['aws']['elb']
else:
context['elb'] = {}
context['elb'].update({
'subnets': [
context['project']['aws']['subnet-id'],
context['project']['aws']['redundant-subnet-id']
],
})
def build_context_cloudfront(context, parameterize):
def build_subdomain(x):
return complete_domain(parameterize(x), context['domain'])
if 'cloudfront' in context['project']['aws']:
errors = None
if context['project']['aws']['cloudfront']['errors']:
errors = {
'domain': parameterize(context['project']['aws']['cloudfront']['errors']['domain']),
'pattern': context['project']['aws']['cloudfront']['errors']['pattern'],
'codes': context['project']['aws']['cloudfront']['errors']['codes'],
'protocol': context['project']['aws']['cloudfront']['errors']['protocol'],
}
context['cloudfront'] = {
'subdomains': [build_subdomain(x) for x in context['project']['aws']['cloudfront']['subdomains']],
'subdomains-without-dns': [build_subdomain(x) for x in context['project']['aws']['cloudfront']['subdomains-without-dns']],
'certificate_id': context['project']['aws']['cloudfront']['certificate_id'],
'cookies': context['project']['aws']['cloudfront']['cookies'],
'compress': context['project']['aws']['cloudfront']['compress'],
'headers': context['project']['aws']['cloudfront']['headers'],
'default-ttl': context['project']['aws']['cloudfront']['default-ttl'],
'errors': errors,
'logging': context['project']['aws']['cloudfront'].get('logging', False),
'origins': OrderedDict([
(o_id, {
'hostname': parameterize(o['hostname']),
'pattern': o.get('pattern'),
'headers': o.get('headers', []),
'cookies': o.get('cookies', []),
})
for o_id, o in context['project']['aws']['cloudfront']['origins'].items()
]),
}
else:
context['cloudfront'] = False
def build_context_fastly(context, parameterize):
def build_subdomain(x):
return complete_domain(parameterize(x), context['domain'])
if 'fastly' in context['project']['aws']:
context['fastly'] = {
'subdomains': [build_subdomain(x) for x in context['project']['aws']['fastly']['subdomains']],
'subdomains-without-dns': [build_subdomain(x) for x in context['project']['aws']['fastly']['subdomains-without-dns']],
'dns': context['project']['aws']['fastly']['dns'],
# TODO: add templating of bucket name
'gcslogging': context['project']['aws']['fastly']['gcslogging'],
}
else:
context['fastly'] = False
def complete_domain(host, default_main):
is_main = host == ''
is_complete = host.count(".") > 0
if is_main:
return default_main
elif is_complete:
return host
return host + '.' + default_main # something + '.' + elifesciences.org
def build_context_subdomains(context):
context['subdomains'] = [complete_domain(s, context['project']['domain']) for s in context['project']['aws'].get('subdomains', [])]
def build_context_elasticache(context):
if 'elasticache' in context['project']['aws']:
context['elasticache'] = context['project']['aws']['elasticache']
def choose_alt_config(stackname):
"""returns the name of the alt-config you think the user would want, based on given stackname"""
pname, instance_id = core.parse_stackname(stackname)
pdata = project.project_data(pname)
if instance_id in project.project_alt_config_names(pdata):
# instance_id exactly matches an alternative config. use that.
return instance_id
#
#
#
# TODO: move to cloudformation.py
def write_cloudformation_template(stackname, contents):
"writes a json version of the python cloudformation template to the stacks directory"
output_fname = os.path.join(STACK_DIR, stackname + ".json")
open(output_fname, 'w').write(contents)
return output_fname
# TODO: prefer this single dispatch function for handling creation of template files
def write_template(stackname, contents):
"writes any provider templates and returns a list of paths to templates"
# cfn = cloudformation.write_template(stackname, contents)
# tfm = terraform.write_template(stackname, contents)
# return [cfn, tfm]
pass
# TODO: move implementation to cloudformation.py
# TODO: perhaps add terraform support?
def read_template(stackname):
"returns the contents of a cloudformation template as a python data structure"
output_fname = os.path.join(STACK_DIR, stackname + ".json")
return json.load(open(output_fname, 'r'))
#
#
#
def more_validation(json_template_str):
"local cloudformation template checks. complements the validation AWS does"
try:
data = json.loads(json_template_str)
# case: when "DBInstanceIdentifier" == "lax--temp2"
# The parameter Filter: db-instance-id is not a valid identifier. Identifiers must begin with a letter;
# must contain only ASCII letters, digits, and hyphens; and must not end with a hyphen or contain two consecutive hyphens.
dbid = utils.lookup(data, 'Resources.AttachedDB.Properties.DBInstanceIdentifier', False)
if dbid:
ensure('--' not in dbid, "database instance identifier contains a double hyphen: %r" % dbid)
return True
except BaseException:
LOG.exception("uncaught error attempting to validate cloudformation template")
raise
#
#
#
def validate_project(pname, **extra):
"""validates all of project's possible cloudformation templates.
only called during testing"""
LOG.info('validating %s', pname)
template = quick_render(pname)
pdata = project.project_data(pname)
altconfig = None
cloudformation.validate_template(pname, template)
more_validation(template)
# validate all alternative configurations
for altconfig in pdata.get('aws-alt', {}).keys():
LOG.info('validating %s, %s', pname, altconfig)
extra = {
'alt-config': altconfig
}
template = quick_render(pname, **extra)
cloudformation.validate_template(pname, template)
#
# create new template
#
def quick_render(project_name, **more_context):
"""generates a representative Cloudformation template for given project with dummy values
only called during testing"""
# set a dummy instance id if one hasn't been set.
more_context['stackname'] = more_context.get('stackname', core.mk_stackname(project_name, 'dummy'))
context = build_context(project_name, **more_context)
return cloudformation.render_template(context)
def generate_stack(pname, **more_context):
"""given a project name and any context overrides, generates a Cloudformation
stack file, writes it to file and returns a pair of (context, stackfilename)"""
context = build_context(pname, **more_context)
cloudformation_template = cloudformation.render_template(context)
terraform_template = terraform.render(context)
stackname = context['stackname']
context_handler.write_context(stackname, context)
cloudformation_template_file = write_cloudformation_template(stackname, cloudformation_template)
terraform_template_file = terraform.write_template(stackname, terraform_template)
return context, cloudformation_template_file, terraform_template_file
#
# update existing template
#
# can't add ExtDNS: it changes dynamically when we start/stop instances and should not be touched after creation
UPDATABLE_TITLE_PATTERNS = ['^CloudFront.*', '^ElasticLoadBalancer.*', '^EC2Instance.*', '.*Bucket$', '.*BucketPolicy', '^StackSecurityGroup$', '^ELBSecurityGroup$', '^CnameDNS.+$', 'FastlyDNS\\d+$', '^AttachedDB$', '^AttachedDBSubnet$', '^ExtraStorage.+$', '^MountPoint.+$', '^IntDNS.*$', '^ElastiCache.*$']
REMOVABLE_TITLE_PATTERNS = ['^CloudFront.*', '^CnameDNS\\d+$', 'FastlyDNS\\d+$', '^ExtDNS$', '^ExtraStorage.+$', '^MountPoint.+$', '^.+Queue$', '^EC2Instance.+$', '^IntDNS.*$', '^ElastiCache.*$', '^.+Topic$']
EC2_NOT_UPDATABLE_PROPERTIES = ['ImageId', 'Tags', 'UserData']
# CloudFormation is nicely chopped up into:
# * what to add
# * what to modify
# * what to remove
# What we see here is the new Terraform generated.tf file, containing all resources (just Fastly so far).
# We can do a diff with the current one which would already be an improvement, but ultimately the source of truth
# is changing it and running a terraform plan to see proposed changes. We should however roll it back if the user
# doesn't confirm.
"represents a delta between and old and new CloudFormation generated template, showing which resources are being added, updated, or removed"
class Delta(namedtuple('Delta', ['plus', 'edit', 'minus', 'terraform'])):
@property
def non_empty(self):
return any([
self.plus['Resources'],
self.plus['Outputs'],
self.edit['Resources'],
self.edit['Outputs'],
self.minus['Resources'],
self.minus['Outputs'],
self.terraform
])
_empty_cloudformation_dictionary = {'Resources': {}, 'Outputs': {}}
Delta.__new__.__defaults__ = (_empty_cloudformation_dictionary, _empty_cloudformation_dictionary, _empty_cloudformation_dictionary, None)
"represents a delta between and old and new Terraform generated template, showing which resources are being added, updated, or removed"
class TerraformDelta(namedtuple('TerraformDelta', ['old_contents', 'new_contents'])):
def __str__(self):
return self.new_contents
def diff(self):
return deepdiff.DeepDiff(json.loads(self.old_contents), json.loads(self.new_contents))
def template_delta(context):
"""given an already existing template, regenerates it and produces a delta containing only the new resources.
Some the existing resources are treated as immutable and not put in the delta. Most that support non-destructive updates like CloudFront are instead included"""
old_template = read_template(context['stackname'])
template = json.loads(cloudformation.render_template(context))
old_terraform_template_file = '{}'
new_terraform_template_file = '{}'
if context['fastly']:
old_terraform_template_file = terraform.read_template(context['stackname'])
new_terraform_template_file = terraform.render(context)
def _related_to_ec2(output):
if 'Value' in output:
if 'Ref' in output['Value']:
return 'EC2Instance' in output['Value']['Ref']
if 'Fn::GetAtt' in output['Value']:
return 'EC2Instance' in output['Value']['Fn::GetAtt'][0]
return False
def _title_is_updatable(title):
return len([p for p in UPDATABLE_TITLE_PATTERNS if re.match(p, title)]) > 0
def _title_is_removable(title):
return len([p for p in REMOVABLE_TITLE_PATTERNS if re.match(p, title)]) > 0
# TODO: investigate if this is still necessary
# start backward compatibility code
# back for when EC2Instance was the title rather than EC2Instance1
if 'EC2Instance' in old_template['Resources']:
if 'ExtraStorage' in template['Resources']:
template['Resources']['ExtraStorage']['Properties']['AvailabilityZone']['Fn::GetAtt'][0] = 'EC2Instance'
if 'MountPoint' in template['Resources']:
template['Resources']['MountPoint']['Properties']['InstanceId']['Ref'] = 'EC2Instance'
if 'IntDNS' in template['Resources']:
template['Resources']['IntDNS']['Properties']['ResourceRecords'][0]['Fn::GetAtt'][0] = 'EC2Instance'
if 'ExtDNS' in template['Resources']:
template['Resources']['ExtDNS']['Properties']['ResourceRecords'][0]['Fn::GetAtt'][0] = 'EC2Instance'
# end backward compatibility code
def _title_has_been_updated(title, section):
# title was there before with a deprecated name, leave it alone
# e.g. 'EC2Instance' rather than 'EC2Instance1'
if not title in old_template[section]:
return False
title_in_old = dict(old_template[section][title])
title_in_new = dict(template[section][title])
# ignore UserData changes, it's not useful to update them and cause
# a needless reboot
if 'Type' in title_in_old:
if title_in_old['Type'] == 'AWS::EC2::Instance':
for property_name in EC2_NOT_UPDATABLE_PROPERTIES:
title_in_old['Properties'][property_name] = None
title_in_new['Properties'][property_name] = None
return title_in_old != title_in_new
def legacy_title(title):
# some titles like EC2Instance1 were originally EC2Instance
# however, no reason not to let EC2Instance2 be created?
if title in ['EC2Instance1', 'ExtraStorage1', 'MountPoint1']:
return title.strip('1')
delta_plus_resources = {
title: r for (title, r) in template['Resources'].items()
if (title not in old_template['Resources']
and (legacy_title(title) not in old_template['Resources'])
and (title != 'EC2Instance'))
}
delta_plus_outputs = {
title: o for (title, o) in template.get('Outputs', {}).items()
if (title not in old_template.get('Outputs', {}) and not _related_to_ec2(o))
}
delta_edit_resources = {
title: r for (title, r) in template['Resources'].items()
if (_title_is_updatable(title) and _title_has_been_updated(title, 'Resources'))
}
delta_edit_outputs = {
title: o for (title, o) in template.get('Outputs', {}).items()
if (_title_is_updatable(title) and _title_has_been_updated(title, 'Outputs'))
}
delta_minus_resources = {r: v for r, v in old_template['Resources'].items() if r not in template['Resources'] and _title_is_removable(r)}
delta_minus_outputs = {o: v for o, v in old_template.get('Outputs', {}).items() if o not in template.get('Outputs', {})}
return Delta(
{
'Resources': delta_plus_resources,
'Outputs': delta_plus_outputs,
},
{
'Resources': delta_edit_resources,
'Outputs': delta_edit_outputs,
},
{
'Resources': delta_minus_resources,
'Outputs': delta_minus_outputs,
},
TerraformDelta(old_terraform_template_file, new_terraform_template_file)
)
def merge_delta(stackname, delta):
"""Merges the new resources in delta in the local copy of the Cloudformation template"""
template = read_template(stackname)
apply_delta(template, delta)
write_cloudformation_template(stackname, json.dumps(template))
terraform.write_template(stackname, str(delta.terraform))
return template
def apply_delta(template, delta):
for component in delta.plus:
ensure(component in ["Resources", "Outputs"], "Template component %s not recognized" % component)
data = template.get(component, {})
data.update(delta.plus[component])
template[component] = data
for component in delta.edit:
ensure(component in ["Resources", "Outputs"], "Template component %s not recognized" % component)
data = template.get(component, {})
data.update(delta.edit[component])
template[component] = data
for component in delta.minus:
ensure(component in ["Resources", "Outputs"], "Template component %s not recognized" % component)
for title in delta.minus[component]:
del template[component][title]
def _current_cloudformation_template(stackname):
"retrieves a template from the CloudFormation API, using it as the source of truth"
cfn = core.boto_conn(stackname, 'cloudformation', client=True)
return cfn.get_template(StackName=stackname)['TemplateBody']
def download_cloudformation_template(stackname):
write_cloudformation_template(stackname, json.dumps(_current_cloudformation_template(stackname)))
def regenerate_stack(stackname, **more_context):
current_context = context_handler.load_context(stackname)
download_cloudformation_template(stackname)
(pname, instance_id) = core.parse_stackname(stackname)
more_context['stackname'] = stackname # TODO: purge this crap
more_context['alt-config'] = instance_id
context = build_context(pname, existing_context=current_context, **more_context)
delta = template_delta(context)
return context, delta, current_context
|
from test.test_support import verbose, have_unicode, TestFailed
import sys
from test.test_support import MAX_Py_ssize_t
maxsize = MAX_Py_ssize_t
# test string formatting operator (I am not sure if this is being tested
# elsewhere but, surely, some of the given cases are *not* tested because
# they crash python)
# test on unicode strings as well
overflowok = 1
overflowrequired = 0
def testformat(formatstr, args, output=None, limit=None):
if verbose:
if output:
print "%s %% %s =? %s ..." %\
(repr(formatstr), repr(args), repr(output)),
else:
print "%s %% %s works? ..." % (repr(formatstr), repr(args)),
try:
result = formatstr % args
except OverflowError:
if not overflowok:
raise
if verbose:
print 'overflow (this is fine)'
else:
if overflowrequired:
if verbose:
print 'no'
print "overflow expected on %s %% %s" % \
(repr(formatstr), repr(args))
elif output and limit is None and result != output:
if verbose:
print 'no'
print "%s %% %s == %s != %s" % \
(repr(formatstr), repr(args), repr(result), repr(output))
# when 'limit' is specified, it determines how many characters
# must match exactly; lengths must always match.
# ex: limit=5, '12345678' matches '12345___'
# (mainly for floating point format tests for which an exact match
# can't be guaranteed due to rounding and representation errors)
elif output and limit is not None and (
len(result)!=len(output) or result[:limit]!=output[:limit]):
if verbose:
print 'no'
print "%s %% %s == %s != %s" % \
(repr(formatstr), repr(args), repr(result), repr(output))
else:
if verbose:
print 'yes'
def testboth(formatstr, *args):
testformat(formatstr, *args)
if have_unicode:
testformat(unicode(formatstr), *args)
testboth("%.1d", (1,), "1")
testboth("%.*d", (sys.maxint,1)) # expect overflow
testboth("%.100d", (1,), '0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001')
testboth("%#.117x", (1,), '0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001')
testboth("%#.118x", (1,), '0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001')
testboth("%f", (1.0,), "1.000000")
# these are trying to test the limits of the internal magic-number-length
# formatting buffer, if that number changes then these tests are less
# effective
testboth("%#.*g", (109, -1.e+49/3.))
testboth("%#.*g", (110, -1.e+49/3.))
testboth("%#.*g", (110, -1.e+100/3.))
# test some ridiculously large precision, expect overflow
testboth('%12.*f', (123456, 1.0))
# check for internal overflow validation on length of precision
overflowrequired = 1
testboth("%#.*g", (110, -1.e+100/3.))
testboth("%#.*G", (110, -1.e+100/3.))
testboth("%#.*f", (110, -1.e+100/3.))
testboth("%#.*F", (110, -1.e+100/3.))
overflowrequired = 0
# Formatting of long integers. Overflow is not ok
overflowok = 0
testboth("%x", 10L, "a")
testboth("%x", 100000000000L, "174876e800")
testboth("%o", 10L, "12")
testboth("%o", 100000000000L, "1351035564000")
testboth("%d", 10L, "10")
testboth("%d", 100000000000L, "100000000000")
big = 123456789012345678901234567890L
testboth("%d", big, "123456789012345678901234567890")
testboth("%d", -big, "-123456789012345678901234567890")
testboth("%5d", -big, "-123456789012345678901234567890")
testboth("%31d", -big, "-123456789012345678901234567890")
testboth("%32d", -big, " -123456789012345678901234567890")
testboth("%-32d", -big, "-123456789012345678901234567890 ")
testboth("%032d", -big, "-0123456789012345678901234567890")
testboth("%-032d", -big, "-123456789012345678901234567890 ")
testboth("%034d", -big, "-000123456789012345678901234567890")
testboth("%034d", big, "0000123456789012345678901234567890")
testboth("%0+34d", big, "+000123456789012345678901234567890")
testboth("%+34d", big, " +123456789012345678901234567890")
testboth("%34d", big, " 123456789012345678901234567890")
testboth("%.2d", big, "123456789012345678901234567890")
testboth("%.30d", big, "123456789012345678901234567890")
testboth("%.31d", big, "0123456789012345678901234567890")
testboth("%32.31d", big, " 0123456789012345678901234567890")
testboth("%d", float(big), "123456________________________", 6)
big = 0x1234567890abcdef12345L # 21 hex digits
testboth("%x", big, "1234567890abcdef12345")
testboth("%x", -big, "-1234567890abcdef12345")
testboth("%5x", -big, "-1234567890abcdef12345")
testboth("%22x", -big, "-1234567890abcdef12345")
testboth("%23x", -big, " -1234567890abcdef12345")
testboth("%-23x", -big, "-1234567890abcdef12345 ")
testboth("%023x", -big, "-01234567890abcdef12345")
testboth("%-023x", -big, "-1234567890abcdef12345 ")
testboth("%025x", -big, "-0001234567890abcdef12345")
testboth("%025x", big, "00001234567890abcdef12345")
testboth("%0+25x", big, "+0001234567890abcdef12345")
testboth("%+25x", big, " +1234567890abcdef12345")
testboth("%25x", big, " 1234567890abcdef12345")
testboth("%.2x", big, "1234567890abcdef12345")
testboth("%.21x", big, "1234567890abcdef12345")
testboth("%.22x", big, "01234567890abcdef12345")
testboth("%23.22x", big, " 01234567890abcdef12345")
testboth("%-23.22x", big, "01234567890abcdef12345 ")
testboth("%X", big, "1234567890ABCDEF12345")
testboth("%#X", big, "0X1234567890ABCDEF12345")
testboth("%#x", big, "0x1234567890abcdef12345")
testboth("%#x", -big, "-0x1234567890abcdef12345")
testboth("%#.23x", -big, "-0x001234567890abcdef12345")
testboth("%#+.23x", big, "+0x001234567890abcdef12345")
testboth("%# .23x", big, " 0x001234567890abcdef12345")
testboth("%#+.23X", big, "+0X001234567890ABCDEF12345")
testboth("%#-+.23X", big, "+0X001234567890ABCDEF12345")
testboth("%#-+26.23X", big, "+0X001234567890ABCDEF12345")
testboth("%#-+27.23X", big, "+0X001234567890ABCDEF12345 ")
testboth("%#+27.23X", big, " +0X001234567890ABCDEF12345")
# next one gets two leading zeroes from precision, and another from the
# 0 flag and the width
testboth("%#+027.23X", big, "+0X0001234567890ABCDEF12345")
# same, except no 0 flag
testboth("%#+27.23X", big, " +0X001234567890ABCDEF12345")
testboth("%x", float(big), "123456_______________", 6)
big = 012345670123456701234567012345670L # 32 octal digits
testboth("%o", big, "12345670123456701234567012345670")
testboth("%o", -big, "-12345670123456701234567012345670")
testboth("%5o", -big, "-12345670123456701234567012345670")
testboth("%33o", -big, "-12345670123456701234567012345670")
testboth("%34o", -big, " -12345670123456701234567012345670")
testboth("%-34o", -big, "-12345670123456701234567012345670 ")
testboth("%034o", -big, "-012345670123456701234567012345670")
testboth("%-034o", -big, "-12345670123456701234567012345670 ")
testboth("%036o", -big, "-00012345670123456701234567012345670")
testboth("%036o", big, "000012345670123456701234567012345670")
testboth("%0+36o", big, "+00012345670123456701234567012345670")
testboth("%+36o", big, " +12345670123456701234567012345670")
testboth("%36o", big, " 12345670123456701234567012345670")
testboth("%.2o", big, "12345670123456701234567012345670")
testboth("%.32o", big, "12345670123456701234567012345670")
testboth("%.33o", big, "012345670123456701234567012345670")
testboth("%34.33o", big, " 012345670123456701234567012345670")
testboth("%-34.33o", big, "012345670123456701234567012345670 ")
testboth("%o", big, "12345670123456701234567012345670")
testboth("%#o", big, "012345670123456701234567012345670")
testboth("%#o", -big, "-012345670123456701234567012345670")
testboth("%#.34o", -big, "-0012345670123456701234567012345670")
testboth("%#+.34o", big, "+0012345670123456701234567012345670")
testboth("%# .34o", big, " 0012345670123456701234567012345670")
testboth("%#+.34o", big, "+0012345670123456701234567012345670")
testboth("%#-+.34o", big, "+0012345670123456701234567012345670")
testboth("%#-+37.34o", big, "+0012345670123456701234567012345670 ")
testboth("%#+37.34o", big, " +0012345670123456701234567012345670")
# next one gets one leading zero from precision
testboth("%.33o", big, "012345670123456701234567012345670")
# base marker shouldn't change that, since "0" is redundant
testboth("%#.33o", big, "012345670123456701234567012345670")
# but reduce precision, and base marker should add a zero
testboth("%#.32o", big, "012345670123456701234567012345670")
# one leading zero from precision, and another from "0" flag & width
testboth("%034.33o", big, "0012345670123456701234567012345670")
# base marker shouldn't change that
testboth("%0#34.33o", big, "0012345670123456701234567012345670")
testboth("%o", float(big), "123456__________________________", 6)
# Some small ints, in both Python int and long flavors).
testboth("%d", 42, "42")
testboth("%d", -42, "-42")
testboth("%d", 42L, "42")
testboth("%d", -42L, "-42")
testboth("%d", 42.0, "42")
testboth("%#x", 1, "0x1")
testboth("%#x", 1L, "0x1")
testboth("%#X", 1, "0X1")
testboth("%#X", 1L, "0X1")
testboth("%#x", 1.0, "0x1")
testboth("%#o", 1, "01")
testboth("%#o", 1L, "01")
testboth("%#o", 0, "0")
testboth("%#o", 0L, "0")
testboth("%o", 0, "0")
testboth("%o", 0L, "0")
testboth("%d", 0, "0")
testboth("%d", 0L, "0")
testboth("%#x", 0, "0x0")
testboth("%#x", 0L, "0x0")
testboth("%#X", 0, "0X0")
testboth("%#X", 0L, "0X0")
testboth("%x", 0x42, "42")
testboth("%x", -0x42, "-42")
testboth("%x", 0x42L, "42")
testboth("%x", -0x42L, "-42")
testboth("%x", float(0x42), "42")
testboth("%o", 042, "42")
testboth("%o", -042, "-42")
testboth("%o", 042L, "42")
testboth("%o", -042L, "-42")
testboth("%o", float(042), "42")
# Test exception for unknown format characters
if verbose:
print 'Testing exceptions'
def test_exc(formatstr, args, exception, excmsg):
try:
testformat(formatstr, args)
except exception, exc:
if str(exc) == excmsg:
if verbose:
print "yes"
else:
if verbose: print 'no'
print 'Unexpected ', exception, ':', repr(str(exc))
except:
if verbose: print 'no'
print 'Unexpected exception'
raise
else:
raise TestFailed, 'did not get expected exception: %s' % excmsg
test_exc('abc %a', 1, ValueError,
"unsupported format character 'a' (0x61) at index 5")
if have_unicode:
test_exc(unicode('abc %\u3000','raw-unicode-escape'), 1, ValueError,
"unsupported format character '?' (0x3000) at index 5")
test_exc('%d', '1', TypeError, "%d format: a number is required, not str")
test_exc('%g', '1', TypeError, "float argument required, not str")
test_exc('no format', '1', TypeError,
"not all arguments converted during string formatting")
test_exc('no format', u'1', TypeError,
"not all arguments converted during string formatting")
test_exc(u'no format', '1', TypeError,
"not all arguments converted during string formatting")
test_exc(u'no format', u'1', TypeError,
"not all arguments converted during string formatting")
class Foobar(long):
def __oct__(self):
# Returning a non-string should not blow up.
return self + 1
test_exc('%o', Foobar(), TypeError,
"expected string or Unicode object, long found")
if maxsize == 2**31-1:
# crashes 2.2.1 and earlier:
try:
"%*d"%(maxsize, -127)
except MemoryError:
pass
else:
raise TestFailed, '"%*d"%(maxsize, -127) should fail'
Changed test so it no longer runs as a side effect of importing.
import sys
from test.test_support import verbose, have_unicode, TestFailed
import test.test_support as test_support
import unittest
maxsize = test_support.MAX_Py_ssize_t
# test string formatting operator (I am not sure if this is being tested
# elsewhere but, surely, some of the given cases are *not* tested because
# they crash python)
# test on unicode strings as well
overflowok = 1
overflowrequired = 0
def testformat(formatstr, args, output=None, limit=None):
if verbose:
if output:
print "%s %% %s =? %s ..." %\
(repr(formatstr), repr(args), repr(output)),
else:
print "%s %% %s works? ..." % (repr(formatstr), repr(args)),
try:
result = formatstr % args
except OverflowError:
if not overflowok:
raise
if verbose:
print 'overflow (this is fine)'
else:
if overflowrequired:
if verbose:
print 'no'
print "overflow expected on %s %% %s" % \
(repr(formatstr), repr(args))
elif output and limit is None and result != output:
if verbose:
print 'no'
print "%s %% %s == %s != %s" % \
(repr(formatstr), repr(args), repr(result), repr(output))
# when 'limit' is specified, it determines how many characters
# must match exactly; lengths must always match.
# ex: limit=5, '12345678' matches '12345___'
# (mainly for floating point format tests for which an exact match
# can't be guaranteed due to rounding and representation errors)
elif output and limit is not None and (
len(result)!=len(output) or result[:limit]!=output[:limit]):
if verbose:
print 'no'
print "%s %% %s == %s != %s" % \
(repr(formatstr), repr(args), repr(result), repr(output))
else:
if verbose:
print 'yes'
def testboth(formatstr, *args):
testformat(formatstr, *args)
if have_unicode:
testformat(unicode(formatstr), *args)
class FormatTest(unittest.TestCase):
def test_format(self):
testboth("%.1d", (1,), "1")
testboth("%.*d", (sys.maxint,1)) # expect overflow
testboth("%.100d", (1,), '0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001')
testboth("%#.117x", (1,), '0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001')
testboth("%#.118x", (1,), '0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001')
testboth("%f", (1.0,), "1.000000")
# these are trying to test the limits of the internal magic-number-length
# formatting buffer, if that number changes then these tests are less
# effective
testboth("%#.*g", (109, -1.e+49/3.))
testboth("%#.*g", (110, -1.e+49/3.))
testboth("%#.*g", (110, -1.e+100/3.))
# test some ridiculously large precision, expect overflow
testboth('%12.*f', (123456, 1.0))
# check for internal overflow validation on length of precision
overflowrequired = 1
testboth("%#.*g", (110, -1.e+100/3.))
testboth("%#.*G", (110, -1.e+100/3.))
testboth("%#.*f", (110, -1.e+100/3.))
testboth("%#.*F", (110, -1.e+100/3.))
overflowrequired = 0
# Formatting of long integers. Overflow is not ok
overflowok = 0
testboth("%x", 10L, "a")
testboth("%x", 100000000000L, "174876e800")
testboth("%o", 10L, "12")
testboth("%o", 100000000000L, "1351035564000")
testboth("%d", 10L, "10")
testboth("%d", 100000000000L, "100000000000")
big = 123456789012345678901234567890L
testboth("%d", big, "123456789012345678901234567890")
testboth("%d", -big, "-123456789012345678901234567890")
testboth("%5d", -big, "-123456789012345678901234567890")
testboth("%31d", -big, "-123456789012345678901234567890")
testboth("%32d", -big, " -123456789012345678901234567890")
testboth("%-32d", -big, "-123456789012345678901234567890 ")
testboth("%032d", -big, "-0123456789012345678901234567890")
testboth("%-032d", -big, "-123456789012345678901234567890 ")
testboth("%034d", -big, "-000123456789012345678901234567890")
testboth("%034d", big, "0000123456789012345678901234567890")
testboth("%0+34d", big, "+000123456789012345678901234567890")
testboth("%+34d", big, " +123456789012345678901234567890")
testboth("%34d", big, " 123456789012345678901234567890")
testboth("%.2d", big, "123456789012345678901234567890")
testboth("%.30d", big, "123456789012345678901234567890")
testboth("%.31d", big, "0123456789012345678901234567890")
testboth("%32.31d", big, " 0123456789012345678901234567890")
testboth("%d", float(big), "123456________________________", 6)
big = 0x1234567890abcdef12345L # 21 hex digits
testboth("%x", big, "1234567890abcdef12345")
testboth("%x", -big, "-1234567890abcdef12345")
testboth("%5x", -big, "-1234567890abcdef12345")
testboth("%22x", -big, "-1234567890abcdef12345")
testboth("%23x", -big, " -1234567890abcdef12345")
testboth("%-23x", -big, "-1234567890abcdef12345 ")
testboth("%023x", -big, "-01234567890abcdef12345")
testboth("%-023x", -big, "-1234567890abcdef12345 ")
testboth("%025x", -big, "-0001234567890abcdef12345")
testboth("%025x", big, "00001234567890abcdef12345")
testboth("%0+25x", big, "+0001234567890abcdef12345")
testboth("%+25x", big, " +1234567890abcdef12345")
testboth("%25x", big, " 1234567890abcdef12345")
testboth("%.2x", big, "1234567890abcdef12345")
testboth("%.21x", big, "1234567890abcdef12345")
testboth("%.22x", big, "01234567890abcdef12345")
testboth("%23.22x", big, " 01234567890abcdef12345")
testboth("%-23.22x", big, "01234567890abcdef12345 ")
testboth("%X", big, "1234567890ABCDEF12345")
testboth("%#X", big, "0X1234567890ABCDEF12345")
testboth("%#x", big, "0x1234567890abcdef12345")
testboth("%#x", -big, "-0x1234567890abcdef12345")
testboth("%#.23x", -big, "-0x001234567890abcdef12345")
testboth("%#+.23x", big, "+0x001234567890abcdef12345")
testboth("%# .23x", big, " 0x001234567890abcdef12345")
testboth("%#+.23X", big, "+0X001234567890ABCDEF12345")
testboth("%#-+.23X", big, "+0X001234567890ABCDEF12345")
testboth("%#-+26.23X", big, "+0X001234567890ABCDEF12345")
testboth("%#-+27.23X", big, "+0X001234567890ABCDEF12345 ")
testboth("%#+27.23X", big, " +0X001234567890ABCDEF12345")
# next one gets two leading zeroes from precision, and another from the
# 0 flag and the width
testboth("%#+027.23X", big, "+0X0001234567890ABCDEF12345")
# same, except no 0 flag
testboth("%#+27.23X", big, " +0X001234567890ABCDEF12345")
testboth("%x", float(big), "123456_______________", 6)
big = 012345670123456701234567012345670L # 32 octal digits
testboth("%o", big, "12345670123456701234567012345670")
testboth("%o", -big, "-12345670123456701234567012345670")
testboth("%5o", -big, "-12345670123456701234567012345670")
testboth("%33o", -big, "-12345670123456701234567012345670")
testboth("%34o", -big, " -12345670123456701234567012345670")
testboth("%-34o", -big, "-12345670123456701234567012345670 ")
testboth("%034o", -big, "-012345670123456701234567012345670")
testboth("%-034o", -big, "-12345670123456701234567012345670 ")
testboth("%036o", -big, "-00012345670123456701234567012345670")
testboth("%036o", big, "000012345670123456701234567012345670")
testboth("%0+36o", big, "+00012345670123456701234567012345670")
testboth("%+36o", big, " +12345670123456701234567012345670")
testboth("%36o", big, " 12345670123456701234567012345670")
testboth("%.2o", big, "12345670123456701234567012345670")
testboth("%.32o", big, "12345670123456701234567012345670")
testboth("%.33o", big, "012345670123456701234567012345670")
testboth("%34.33o", big, " 012345670123456701234567012345670")
testboth("%-34.33o", big, "012345670123456701234567012345670 ")
testboth("%o", big, "12345670123456701234567012345670")
testboth("%#o", big, "012345670123456701234567012345670")
testboth("%#o", -big, "-012345670123456701234567012345670")
testboth("%#.34o", -big, "-0012345670123456701234567012345670")
testboth("%#+.34o", big, "+0012345670123456701234567012345670")
testboth("%# .34o", big, " 0012345670123456701234567012345670")
testboth("%#+.34o", big, "+0012345670123456701234567012345670")
testboth("%#-+.34o", big, "+0012345670123456701234567012345670")
testboth("%#-+37.34o", big, "+0012345670123456701234567012345670 ")
testboth("%#+37.34o", big, " +0012345670123456701234567012345670")
# next one gets one leading zero from precision
testboth("%.33o", big, "012345670123456701234567012345670")
# base marker shouldn't change that, since "0" is redundant
testboth("%#.33o", big, "012345670123456701234567012345670")
# but reduce precision, and base marker should add a zero
testboth("%#.32o", big, "012345670123456701234567012345670")
# one leading zero from precision, and another from "0" flag & width
testboth("%034.33o", big, "0012345670123456701234567012345670")
# base marker shouldn't change that
testboth("%0#34.33o", big, "0012345670123456701234567012345670")
testboth("%o", float(big), "123456__________________________", 6)
# Some small ints, in both Python int and long flavors).
testboth("%d", 42, "42")
testboth("%d", -42, "-42")
testboth("%d", 42L, "42")
testboth("%d", -42L, "-42")
testboth("%d", 42.0, "42")
testboth("%#x", 1, "0x1")
testboth("%#x", 1L, "0x1")
testboth("%#X", 1, "0X1")
testboth("%#X", 1L, "0X1")
testboth("%#x", 1.0, "0x1")
testboth("%#o", 1, "01")
testboth("%#o", 1L, "01")
testboth("%#o", 0, "0")
testboth("%#o", 0L, "0")
testboth("%o", 0, "0")
testboth("%o", 0L, "0")
testboth("%d", 0, "0")
testboth("%d", 0L, "0")
testboth("%#x", 0, "0x0")
testboth("%#x", 0L, "0x0")
testboth("%#X", 0, "0X0")
testboth("%#X", 0L, "0X0")
testboth("%x", 0x42, "42")
testboth("%x", -0x42, "-42")
testboth("%x", 0x42L, "42")
testboth("%x", -0x42L, "-42")
testboth("%x", float(0x42), "42")
testboth("%o", 042, "42")
testboth("%o", -042, "-42")
testboth("%o", 042L, "42")
testboth("%o", -042L, "-42")
testboth("%o", float(042), "42")
# Test exception for unknown format characters
if verbose:
print 'Testing exceptions'
def test_exc(formatstr, args, exception, excmsg):
try:
testformat(formatstr, args)
except exception, exc:
if str(exc) == excmsg:
if verbose:
print "yes"
else:
if verbose: print 'no'
print 'Unexpected ', exception, ':', repr(str(exc))
except:
if verbose: print 'no'
print 'Unexpected exception'
raise
else:
raise TestFailed, 'did not get expected exception: %s' % excmsg
test_exc('abc %a', 1, ValueError,
"unsupported format character 'a' (0x61) at index 5")
if have_unicode:
test_exc(unicode('abc %\u3000','raw-unicode-escape'), 1, ValueError,
"unsupported format character '?' (0x3000) at index 5")
test_exc('%d', '1', TypeError, "%d format: a number is required, not str")
test_exc('%g', '1', TypeError, "float argument required, not str")
test_exc('no format', '1', TypeError,
"not all arguments converted during string formatting")
test_exc('no format', u'1', TypeError,
"not all arguments converted during string formatting")
test_exc(u'no format', '1', TypeError,
"not all arguments converted during string formatting")
test_exc(u'no format', u'1', TypeError,
"not all arguments converted during string formatting")
class Foobar(long):
def __oct__(self):
# Returning a non-string should not blow up.
return self + 1
test_exc('%o', Foobar(), TypeError,
"expected string or Unicode object, long found")
if maxsize == 2**31-1:
# crashes 2.2.1 and earlier:
try:
"%*d"%(maxsize, -127)
except MemoryError:
pass
else:
raise TestFailed, '"%*d"%(maxsize, -127) should fail'
def test_main():
test_support.run_unittest(FormatTest)
if __name__ == "__main__":
unittest.main()
|
"""
function for calculating the convergence of an x, y data set
main function:
test_conv(xs, ys, name, tol)
tries to fit multiple functions to the x, y data
calculates which function fits best
for tol < 0
returns the x value for which y is converged within tol of the assymtotic value
for tol > 0
returns the x_value for which dy(x)/dx < tol for all x >= x_value, conv is true is such a x_value exists
for the best fit a gnuplot line is printed plotting the data, the function and the assymthotic value
"""
from __future__ import division
__author__ = "Michiel van Setten"
__copyright__ = " "
__version__ = "0.9"
__maintainer__ = "Michiel van Setten"
__email__ = "mjvansetten@gmail.com"
__date__ = "May 2014"
import string
import random
def id_generator(size=8, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
class SplineInputError(Exception):
def __init__(self, msg):
self.msg = msg
def get_derivatives(xs, ys, fd=False):
"""
return the derivatives of y(x) at the points x
if scipy is available a spline is generated to calculate the derivatives
if scipy is not available the left and right slopes are calculated, if both exist the average is returned
putting fd to zero always returns the finite difference slopes
"""
try:
if fd:
raise SplineInputError('no spline wanted')
if len(xs) < 4:
er = SplineInputError('too few data points')
raise er
from scipy.interpolate import UnivariateSpline
spline = UnivariateSpline(xs, ys)
d = spline.derivative(1)(xs)
except (ImportError, SplineInputError):
d = []
m, left, right = 0, 0, 0
for n in range(0, len(xs), 1):
try:
left = (ys[n] - ys[n-1]) / (xs[n] - xs[n-1])
m += 1
except IndexError:
pass
try:
right = (ys[n+1] - ys[n]) / (xs[n+1] - xs[n])
m += 1
except IndexError:
pass
d.append(left + right / m)
return d
"""
functions used in the fitting procedure, with initial guesses
"""
def reciprocal(x, a, b, n):
"""
reciprocal function to the power n to fit convergence data
"""
import numpy as np
if n < 1:
n = 1
elif n > 5:
n = 5
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b / x_v ** n)
y = np.array(y_l)
else:
y = a + b / x ** n
return y
def p0_reciprocal(xs, ys):
"""
predictor for first guess for reciprocal
"""
a0 = ys[len(ys) - 1]
b0 = ys[0]*xs[0] - a0*xs[0]
return [a0, b0, 1]
def exponential(x, a, b, n):
"""
exponential function base n to fit convergence data
"""
import numpy as np
if n < 1.000001:
n = 1.000001
#print n
elif n > 1.2:
n = 1.2
#print n
if b < -10:
b = -10
#print b
elif b > 10:
b = 10
#print b
#print a, b, x
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b * n ** -x_v)
y = np.array(y_l)
else:
y = a + b * n ** -x
#print y
#print type(y)
return y
def p0_exponential(xs, ys):
n0 = 1.005
b0 = (n0 ** -xs[-1] - n0 ** -xs[1]) / (ys[-1] - ys[1])
a0 = ys[1] - b0 * n0 ** -xs[1]
#a0 = ys[-1]
#b0 = (ys[0] - a0) / n0 ** xs[0]
return [a0, b0, n0]
def single_reciprocal(x, a, b, c):
"""
reciprocal function to fit convergence data
"""
import numpy as np
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b / (x_v - c))
y = np.array(y_l)
else:
y = a + b / (x - c)
return y
def p0_single_reciprocal(xs, ys):
c = 1
b = (1/(xs[-1] - c)-1/(xs[1] - c)) / (ys[-1] - ys[1])
a = ys[1] - b / (xs[1] - c)
return [a, b, c]
def simple_reciprocal(x, a, b):
"""
reciprocal function to fit convergence data
"""
import numpy as np
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b / x_v)
y = np.array(y_l)
else:
y = a + b / x
return y
def p0_simple_reciprocal(xs, ys):
c = 0
b = (1/(xs[-1] - c) - 1/(xs[1] - c)) / (ys[-1] - ys[1])
a = ys[1] - b / (xs[1] - c)
return [a, b]
def simple_2reciprocal(x, a, b):
"""
reciprocal function to fit convergence data
"""
import numpy as np
c = 2
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b / x_v ** c)
y = np.array(y_l)
else:
y = a + b / x ** c
return y
def p0_simple_2reciprocal(xs, ys):
c = 2
b = (1/xs[-1]**c - 1/xs[1]**c) / (ys[-1] - ys[1])
a = ys[1] - b / xs[1]**c
return [a, b]
def simple_4reciprocal(x, a, b):
"""
reciprocal function to fit convergence data
"""
import numpy as np
c = 4
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b / x_v ** c)
y = np.array(y_l)
else:
y = a + b / x ** c
return y
def p0_simple_4reciprocal(xs, ys):
c = 4
b = (1/xs[-1]**c - 1/xs[1]**c) / (ys[-1] - ys[1])
a = ys[1] - b / xs[1]**c
return [a, b]
def simple_5reciprocal(x, a, b):
"""
reciprocal function to fit convergence data
"""
import numpy as np
c = 0.5
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b / x_v ** c)
y = np.array(y_l)
else:
y = a + b / x ** c
return y
def p0_simple_5reciprocal(xs, ys):
c = 0.5
b = (1/xs[-1]**c - 1/xs[1]**c) / (ys[-1] - ys[1])
a = ys[1] - b / xs[1]**c
return [a, b]
def measure(function, xs, ys, popt, weights):
"""
measure the quality of the fit
"""
m = 0
n = 0
for x in xs:
if len(popt) == 2:
m += abs(ys[n] - function(x, popt[0], popt[1])) # * weights[n]
elif len(popt) == 3:
m += abs(ys[n] - function(x, popt[0], popt[1], popt[2])) # * weights[n]
else:
raise NotImplementedError
n += 1
return m
def get_weigts(xs, ys, mode=2):
ds = get_derivatives(xs, ys, fd=True)
if mode == 1:
import numpy as np
mind = np.inf
for d in ds:
mind = min(abs(d), mind)
weights = []
for d in ds:
weights.append(abs((mind / d)))
if mode == 2:
maxxs = max(xs)
weights = xs / maxxs
else:
weights = [1] * len(xs)
return weights
def multy_curve_fit(xs, ys, verbose):
"""
fit multiple functions to the x, y data, return the best fit
"""
#functions = {exponential: p0_exponential, reciprocal: p0_reciprocal, single_reciprocal: p0_single_reciprocal}
functions = {
exponential: p0_exponential,
reciprocal: p0_reciprocal,
single_reciprocal: p0_single_reciprocal,
simple_reciprocal: p0_simple_reciprocal,
simple_2reciprocal: p0_simple_2reciprocal,
simple_4reciprocal: p0_simple_4reciprocal,
simple_5reciprocal: p0_simple_5reciprocal
}
import numpy as np
from scipy.optimize import curve_fit
fit_results = {}
best = ['', np.inf]
for function in functions:
try:
weights = get_weigts(xs, ys)
popt, pcov = curve_fit(function, xs, ys, functions[function](xs, ys), maxfev=8000, sigma=weights)
m = measure(function, xs, ys, popt, weights)
perr = max(np.sqrt(np.diag(pcov)))
#print 'pcov:\n', pcov
#print 'diag:\n', np.sqrt(np.diag(pcov))
#print 'function:\n', function, perr, m
fit_results.update({function: {'measure': m, 'perr': perr, 'popt': popt, 'pcov': pcov}})
for f in fit_results:
if fit_results[f]['measure'] <= best[1]:
best = f, fit_results[f]['measure']
print function, m
except RuntimeError:
if True:
print 'no fit found for ', function
return fit_results[best[0]]['popt'], fit_results[best[0]]['pcov'], best
def print_plot_line(function, popt, xs, ys, name, extra=''):
"""
print the gnuplot command line to plot the x, y data with the fitted function using the popt parameters
"""
idp = id_generator()
f = open('convdat.'+str(idp), mode='w')
for n in range(0, len(ys), 1):
f.write(str(xs[n]) + ' ' + str(ys[n]) + '\n')
f.write('\n')
f.close()
line = ''
if function is exponential:
line = "plot %s + %s * %s ** -x, 'convdat.%s' pointsize 4 lt 0, %s" % \
(popt[0], popt[1], min(max(1.00001, popt[2]), 1.2), idp, popt[0])
elif function is reciprocal:
line = "plot %s + %s / x**%s, 'convdat.%s' pointsize 4 lt 0, %s" % \
(popt[0], popt[1], min(max(1, popt[2]), 5), idp, popt[0])
elif function is single_reciprocal:
line = "plot %s + %s / (x - %s), 'convdat.%s' pointsize 4 lt 0, %s" % \
(popt[0], popt[1], popt[2], idp, popt[0])
elif function is simple_reciprocal:
line = "plot %s + %s / x, 'convdat.%s' pointsize 4 lt 0, %s" % \
(popt[0], popt[1], idp, popt[0])
elif function is simple_2reciprocal:
line = "plot %s + %s / x**2, 'convdat.%s' pointsize 4 lt 0, %s" % \
(popt[0], popt[1], idp, popt[0])
elif function is simple_4reciprocal:
line = "plot %s + %s / x**4, 'convdat.%s' pointsize 4 lt 0, %s" % \
(popt[0], popt[1], idp, popt[0])
elif function is simple_5reciprocal:
line = "plot %s + %s / x**0.5, 'convdat.%s' pointsize 4 lt 0, %s" % \
(popt[0], popt[1], idp, popt[0])
else:
print function, ' no plot '
f = open('plot-fits', mode='a')
f.write('pause -1 \n')
f.write('set title "' + name + ' - ' + extra + '"\n')
f.write("set output '" + name + '-' + idp + ".gif'" + '\n')
f.write(line + '\n')
f.close()
def test_conv(xs, ys, name, tol=0.0001, extra='', verbose=False):
"""
test it and at which x_value dy(x)/dx < tol for all x >= x_value, conv is true is such a x_value exists.
"""
conv = False
x_value = float('inf')
y_value = None
n_value = None
popt = [None, None, None]
if len(xs) > 2:
ds = get_derivatives(xs[0:len(ys)], ys)
try:
import numpy as np
from scipy.optimize import curve_fit
if None not in ys:
#popt, pcov = curve_fit(exponential, xs, ys, p0_exponential(xs, ys), maxfev=8000)
#perr = np.sqrt(np.diag(pcov))
#print perr
popt, pcov, func = multy_curve_fit(xs, ys, verbose)
if func[1] > abs(len(ys)*tol):
print 'warning function ', func[0], ' as the best fit but not a good fit: ', func[1]
#print popt
#print pcov
#print func
# todo print this to file via a method in helper, as dict
f = open(name+'.fitdat', mode='a')
f.write('{')
f.write('"popt": ' + str(popt) + ', ')
f.write('"pcov": ' + str(pcov) + ', ')
f.write('"data": [')
for n in range(0, len(ys), 1):
f.write('[' + str(xs[n]) + ' ' + str(ys[n]) + ']')
f.write(']}\n')
f.close()
print_plot_line(func[0], popt, xs, ys, name, extra=extra)
# print 'plot ', popt[0], ' + ', popt[1], "/x**", popt[2], ', "'+name+'.convdat"'
# print 'plot ', popt[0], ' + ', popt[1], "/x", popt[2], '/x**2, "'+name+'.convdat"'
# id = id_generator()
# print 'plot ', popt[0], ' + ', popt[1], "* ", popt[2], " ** -x," "'"+'convdat.'+id+"'"
except ImportError:
popt, pcov = None, None
for n in range(0, len(ds), 1):
if tol < 0:
if popt[0] is not None:
test = abs(popt[0] - ys[n])
else:
test = float('inf')
else:
test = abs(ds[n])
if verbose:
print test
if test < abs(tol):
if verbose:
print 'converged'
conv = True
if xs[n] < x_value:
x_value = xs[n]
y_value = ys[n]
n_value = n
else:
if verbose:
print 'not converged'
conv = False
x_value = float('inf')
if n_value is None:
return [conv, x_value, y_value, n_value, popt[0], None]
else:
return [conv, x_value, y_value, n_value, popt[0], ds[n_value]]
else:
return [conv, x_value, y_value, n_value, popt[0], None]
convergence
Former-commit-id: 176f1b44903da6faa97e079a5450f7a82fe21318 [formerly dec97a35d7f078073f2165a53ca8215e3147dbf4]
Former-commit-id: 92322e59113dfbae194662c863d19af08545fef4
"""
function for calculating the convergence of an x, y data set
main function:
test_conv(xs, ys, name, tol)
tries to fit multiple functions to the x, y data
calculates which function fits best
for tol < 0
returns the x value for which y is converged within tol of the assymtotic value
for tol > 0
returns the x_value for which dy(x)/dx < tol for all x >= x_value, conv is true is such a x_value exists
for the best fit a gnuplot line is printed plotting the data, the function and the assymthotic value
"""
from __future__ import division
__author__ = "Michiel van Setten"
__copyright__ = " "
__version__ = "0.9"
__maintainer__ = "Michiel van Setten"
__email__ = "mjvansetten@gmail.com"
__date__ = "May 2014"
import string
import random
def id_generator(size=8, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
class SplineInputError(Exception):
def __init__(self, msg):
self.msg = msg
def get_derivatives(xs, ys, fd=False):
"""
return the derivatives of y(x) at the points x
if scipy is available a spline is generated to calculate the derivatives
if scipy is not available the left and right slopes are calculated, if both exist the average is returned
putting fd to zero always returns the finite difference slopes
"""
try:
if fd:
raise SplineInputError('no spline wanted')
if len(xs) < 4:
er = SplineInputError('too few data points')
raise er
from scipy.interpolate import UnivariateSpline
spline = UnivariateSpline(xs, ys)
d = spline.derivative(1)(xs)
except (ImportError, SplineInputError):
d = []
m, left, right = 0, 0, 0
for n in range(0, len(xs), 1):
try:
left = (ys[n] - ys[n-1]) / (xs[n] - xs[n-1])
m += 1
except IndexError:
pass
try:
right = (ys[n+1] - ys[n]) / (xs[n+1] - xs[n])
m += 1
except IndexError:
pass
d.append(left + right / m)
return d
"""
functions used in the fitting procedure, with initial guesses
"""
def reciprocal(x, a, b, n):
"""
reciprocal function to the power n to fit convergence data
"""
import numpy as np
if n < 1:
n = 1
elif n > 5:
n = 5
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b / x_v ** n)
y = np.array(y_l)
else:
y = a + b / x ** n
return y
def p0_reciprocal(xs, ys):
"""
predictor for first guess for reciprocal
"""
a0 = ys[len(ys) - 1]
b0 = ys[0]*xs[0] - a0*xs[0]
return [a0, b0, 1]
def exponential(x, a, b, n):
"""
exponential function base n to fit convergence data
"""
import numpy as np
if n < 1.000001:
n = 1.000001
#print n
elif n > 1.2:
n = 1.2
#print n
if b < -10:
b = -10
#print b
elif b > 10:
b = 10
#print b
#print a, b, x
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b * n ** -x_v)
y = np.array(y_l)
else:
y = a + b * n ** -x
#print y
#print type(y)
return y
def p0_exponential(xs, ys):
n0 = 1.005
b0 = (n0 ** -xs[-1] - n0 ** -xs[1]) / (ys[-1] - ys[1])
a0 = ys[1] - b0 * n0 ** -xs[1]
#a0 = ys[-1]
#b0 = (ys[0] - a0) / n0 ** xs[0]
return [a0, b0, n0]
def single_reciprocal(x, a, b, c):
"""
reciprocal function to fit convergence data
"""
import numpy as np
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b / (x_v - c))
y = np.array(y_l)
else:
y = a + b / (x - c)
return y
def p0_single_reciprocal(xs, ys):
c = 1
b = (1/(xs[-1] - c)-1/(xs[1] - c)) / (ys[-1] - ys[1])
a = ys[1] - b / (xs[1] - c)
return [a, b, c]
def simple_reciprocal(x, a, b):
"""
reciprocal function to fit convergence data
"""
import numpy as np
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b / x_v)
y = np.array(y_l)
else:
y = a + b / x
return y
def p0_simple_reciprocal(xs, ys):
c = 0
b = (1/(xs[-1] - c) - 1/(xs[1] - c)) / (ys[-1] - ys[1])
a = ys[1] - b / (xs[1] - c)
return [a, b]
def simple_2reciprocal(x, a, b):
"""
reciprocal function to fit convergence data
"""
import numpy as np
c = 2
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b / x_v ** c)
y = np.array(y_l)
else:
y = a + b / x ** c
return y
def p0_simple_2reciprocal(xs, ys):
c = 2
b = (1/xs[-1]**c - 1/xs[1]**c) / (ys[-1] - ys[1])
a = ys[1] - b / xs[1]**c
return [a, b]
def simple_4reciprocal(x, a, b):
"""
reciprocal function to fit convergence data
"""
import numpy as np
c = 4
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b / x_v ** c)
y = np.array(y_l)
else:
y = a + b / x ** c
return y
def p0_simple_4reciprocal(xs, ys):
c = 4
b = (1/xs[-1]**c - 1/xs[1]**c) / (ys[-1] - ys[1])
a = ys[1] - b / xs[1]**c
return [a, b]
def simple_5reciprocal(x, a, b):
"""
reciprocal function to fit convergence data
"""
import numpy as np
c = 0.5
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b / x_v ** c)
y = np.array(y_l)
else:
y = a + b / x ** c
return y
def p0_simple_5reciprocal(xs, ys):
c = 0.5
b = (1/xs[-1]**c - 1/xs[1]**c) / (ys[-1] - ys[1])
a = ys[1] - b / xs[1]**c
return [a, b]
def measure(function, xs, ys, popt, weights):
"""
measure the quality of the fit
"""
m = 0
n = 0
for x in xs:
if len(popt) == 2:
m += abs(ys[n] - function(x, popt[0], popt[1])) # * weights[n]
elif len(popt) == 3:
m += abs(ys[n] - function(x, popt[0], popt[1], popt[2])) # * weights[n]
else:
raise NotImplementedError
n += 1
return m
def get_weigts(xs, ys, mode=2):
ds = get_derivatives(xs, ys, fd=True)
if mode == 1:
import numpy as np
mind = np.inf
for d in ds:
mind = min(abs(d), mind)
weights = []
for d in ds:
weights.append(abs((mind / d)))
if mode == 2:
maxxs = max(xs)
weights = xs / maxxs
else:
weights = [1] * len(xs)
return weights
def multy_curve_fit(xs, ys, verbose):
"""
fit multiple functions to the x, y data, return the best fit
"""
#functions = {exponential: p0_exponential, reciprocal: p0_reciprocal, single_reciprocal: p0_single_reciprocal}
functions = {
exponential: p0_exponential,
reciprocal: p0_reciprocal,
single_reciprocal: p0_single_reciprocal,
simple_reciprocal: p0_simple_reciprocal,
simple_2reciprocal: p0_simple_2reciprocal,
simple_4reciprocal: p0_simple_4reciprocal,
simple_5reciprocal: p0_simple_5reciprocal
}
import numpy as np
from scipy.optimize import curve_fit
fit_results = {}
best = ['', np.inf]
for function in functions:
try:
weights = get_weigts(xs, ys)
popt, pcov = curve_fit(function, xs, ys, functions[function](xs, ys), maxfev=8000, sigma=weights)
m = measure(function, xs, ys, popt, weights)
perr = max(np.sqrt(np.diag(pcov)))
#print 'pcov:\n', pcov
#print 'diag:\n', np.sqrt(np.diag(pcov))
#print 'function:\n', function, perr, m
fit_results.update({function: {'measure': m, 'perr': perr, 'popt': popt, 'pcov': pcov}})
for f in fit_results:
if fit_results[f]['measure'] <= best[1]:
best = f, fit_results[f]['measure']
print function, m
except RuntimeError:
if True:
print 'no fit found for ', function
return fit_results[best[0]]['popt'], fit_results[best[0]]['pcov'], best
def print_plot_line(function, popt, xs, ys, name, extra=''):
"""
print the gnuplot command line to plot the x, y data with the fitted function using the popt parameters
"""
idp = id_generator()
f = open('convdat.'+str(idp), mode='w')
for n in range(0, len(ys), 1):
f.write(str(xs[n]) + ' ' + str(ys[n]) + '\n')
f.write('\n')
f.close()
line = ''
if function is exponential:
line = "plot %s + %s * %s ** -x, 'convdat.%s' pointsize 4 lt 0, %s" % \
(popt[0], popt[1], min(max(1.00001, popt[2]), 1.2), idp, popt[0])
elif function is reciprocal:
line = "plot %s + %s / x**%s, 'convdat.%s' pointsize 4 lt 0, %s" % \
(popt[0], popt[1], min(max(1, popt[2]), 5), idp, popt[0])
elif function is single_reciprocal:
line = "plot %s + %s / (x - %s), 'convdat.%s' pointsize 4 lt 0, %s" % \
(popt[0], popt[1], popt[2], idp, popt[0])
elif function is simple_reciprocal:
line = "plot %s + %s / x, 'convdat.%s' pointsize 4 lt 0, %s" % \
(popt[0], popt[1], idp, popt[0])
elif function is simple_2reciprocal:
line = "plot %s + %s / x**2, 'convdat.%s' pointsize 4 lt 0, %s" % \
(popt[0], popt[1], idp, popt[0])
elif function is simple_4reciprocal:
line = "plot %s + %s / x**4, 'convdat.%s' pointsize 4 lt 0, %s" % \
(popt[0], popt[1], idp, popt[0])
elif function is simple_5reciprocal:
line = "plot %s + %s / x**0.5, 'convdat.%s' pointsize 4 lt 0, %s" % \
(popt[0], popt[1], idp, popt[0])
else:
print function, ' no plot '
f = open('plot-fits', mode='a')
f.write('pause -1 \n')
f.write('set title "' + name + ' - ' + extra + '"\n')
f.write("set output '" + name + '-' + idp + ".gif'" + '\n')
f.write(line + '\n')
f.close()
def test_conv(xs, ys, name, tol=0.0001, extra='', verbose=False):
"""
test it and at which x_value dy(x)/dx < tol for all x >= x_value, conv is true is such a x_value exists.
"""
conv = False
x_value = float('inf')
y_value = None
n_value = None
popt = [None, None, None]
if len(xs) > 2:
ds = get_derivatives(xs[0:len(ys)], ys)
try:
import numpy as np
from scipy.optimize import curve_fit
if None not in ys:
#popt, pcov = curve_fit(exponential, xs, ys, p0_exponential(xs, ys), maxfev=8000)
#perr = np.sqrt(np.diag(pcov))
#print perr
popt, pcov, func = multy_curve_fit(xs, ys, verbose)
if func[1] > abs(tol):
print 'warning function ', func[0], ' as the best fit but not a good fit: ', func[1]
#print popt
#print pcov
#print func
# todo print this to file via a method in helper, as dict
f = open(name+'.fitdat', mode='a')
f.write('{')
f.write('"popt": ' + str(popt) + ', ')
f.write('"pcov": ' + str(pcov) + ', ')
f.write('"data": [')
for n in range(0, len(ys), 1):
f.write('[' + str(xs[n]) + ' ' + str(ys[n]) + ']')
f.write(']}\n')
f.close()
print_plot_line(func[0], popt, xs, ys, name, extra=extra)
# print 'plot ', popt[0], ' + ', popt[1], "/x**", popt[2], ', "'+name+'.convdat"'
# print 'plot ', popt[0], ' + ', popt[1], "/x", popt[2], '/x**2, "'+name+'.convdat"'
# id = id_generator()
# print 'plot ', popt[0], ' + ', popt[1], "* ", popt[2], " ** -x," "'"+'convdat.'+id+"'"
except ImportError:
popt, pcov = None, None
for n in range(0, len(ds), 1):
if tol < 0:
if popt[0] is not None:
test = abs(popt[0] - ys[n])
else:
test = float('inf')
else:
test = abs(ds[n])
if verbose:
print test
if test < abs(tol):
if verbose:
print 'converged'
conv = True
if xs[n] < x_value:
x_value = xs[n]
y_value = ys[n]
n_value = n
else:
if verbose:
print 'not converged'
conv = False
x_value = float('inf')
if n_value is None:
return [conv, x_value, y_value, n_value, popt[0], None]
else:
return [conv, x_value, y_value, n_value, popt[0], ds[n_value]]
else:
return [conv, x_value, y_value, n_value, popt[0], None]
|
from ctypes import *
import numpy as np
import sys
if sys.platform.startswith('linux'):
libcsound = CDLL("libcsound64.so")
elif sys.platform.startswith('win'):
libcsound = cdll.libcsound64
elif sys.platform.startswith('darwin'):
libcsound = CDLL("libcsound64.dylib")
else:
sys.exit("Don't know your system! Exiting...")
MYFLT = c_double
class CsoundParams(Structure):
_fields_ = [("debug_mode", c_int), # debug mode, 0 or 1
("buffer_frames", c_int), # number of frames in in/out buffers
("hardware_buffer_frames", c_int), # ibid. hardware
("displays", c_int), # graph displays, 0 or 1
("ascii_graphs", c_int), # use ASCII graphs, 0 or 1
("postscript_graphs", c_int), # use postscript graphs, 0 or 1
("message_level", c_int), # message printout control
("tempo", c_int), # tempo ("sets Beatmode)
("ring_bell", c_int), # bell, 0 or 1
("use_cscore", c_int), # use cscore for processing
("terminate_on_midi", c_int), # terminate performance at the end
# of midifile, 0 or 1
("heartbeat", c_int), # print heart beat, 0 or 1
("defer_gen01_load", c_int), # defer GEN01 load, 0 or 1
("midi_key", c_int), # pfield to map midi key no
("midi_key_cps", c_int), # pfield to map midi key no as cps
("midi_key_oct", c_int), # pfield to map midi key no as oct
("midi_key_pch", c_int), # pfield to map midi key no as pch
("midi_velocity", c_int), # pfield to map midi velocity
("midi_velocity_amp", c_int), # pfield to map midi velocity as amplitude
("no_default_paths", c_int), # disable relative paths from files, 0 or 1
("number_of_threads", c_int), # number of threads for multicore performance
("syntax_check_only", c_int), # do not compile, only check syntax
("csd_line_counts", c_int), # csd line error reporting
("compute_weights", c_int), # deprecated, kept for backwards comp.
("realtime_mode", c_int), # use realtime priority mode, 0 or 1
("sample_accurate", c_int), # use sample-level score event accuracy
("sample_rate_override", MYFLT), # overriding sample rate
("control_rate_override", MYFLT), # overriding control rate
("nchnls_override", c_int), # overriding number of out channels
("nchnls_i_override", c_int), # overriding number of in channels
("e0dbfs_override", MYFLT), # overriding 0dbfs
("daemon", c_int), # daemon mode
("ksmps_override", c_int)] # ksmps override
string64 = c_char * 64
class CsoundAudioDevice(Structure):
_fields_ = [("device_name", string64),
("device_id", string64),
("rt_module", string64),
("max_nchnls", c_int),
("isOutput", c_int)]
class CsoundMidiDevice(Structure):
_fields_ = [("device_name", string64),
("interface_name", string64),
("device_id", string64),
("midi_module", string64),
("isOutput", c_int)]
class CsoundRtAudioParams(Structure):
_fields_ = [("devName", c_char_p), # device name (NULL/empty: default)
("devNum", c_int), # device number (0-1023), 1024: default
("bufSamp_SW", c_uint), # buffer fragment size (-b) in sample frames
("bufSamp_HW", c_int), # total buffer size (-B) in sample frames
("nChannels", c_int), # number of channels
("sampleFormat", c_int), # sample format (AE_SHORT etc.)
("sampleRate", c_float)] # sample rate in Hz
# PVSDATEXT is a variation on PVSDAT used in the pvs bus interface
class PvsdatExt(Structure):
_fields_ = [("N", c_int32),
("sliding", c_int), # Flag to indicate sliding case
("NB", c_int32),
("overlap", c_int32),
("winsize", c_int32),
("wintype", c_int),
("format", c_int32),
("framecount", c_uint32),
("frame", POINTER(c_float))]
# This structure holds the parameter hints for control channels
class ControlChannelHints(Structure):
_fields_ = [("behav", c_int),
("dflt", MYFLT),
("min", MYFLT),
("max", MYFLT),
("x", c_int),
("y", c_int),
("width", c_int),
("height", c_int),
# This member must be set explicitly to None if not used
("attributes", c_char_p)]
class ControlChannelInfo(Structure):
_fields_ = [("name", c_char_p),
("type", c_int),
("hints", ControlChannelHints)]
libcsound.csoundCreate.restype = c_void_p
libcsound.csoundCreate.argtypes = [py_object]
libcsound.csoundDestroy.argtypes = [c_void_p]
libcsound.csoundParseOrc.restype = c_void_p
libcsound.csoundParseOrc.argtypes = [c_void_p, c_char_p]
libcsound.csoundCompileTree.argtypes = [c_void_p, c_void_p]
libcsound.csoundDeleteTree.argtypes = [c_void_p, c_void_p]
libcsound.csoundCompileOrc.argtypes = [c_void_p, c_char_p]
libcsound.csoundEvalCode.restype = MYFLT
libcsound.csoundEvalCode.argtypes = [c_void_p, c_char_p]
libcsound.csoundCompileArgs.argtypes = [c_void_p, c_int, POINTER(c_char_p)]
libcsound.csoundStart.argtypes = [c_void_p]
libcsound.csoundCompile.argtypes = [c_void_p, c_int, POINTER(c_char_p)]
libcsound.csoundCompileCsd.argtypes = [c_void_p, c_char_p]
libcsound.csoundCompileCsdText.argtypes = [c_void_p, c_char_p]
libcsound.csoundPerform.argtypes = [c_void_p]
libcsound.csoundPerformKsmps.argtypes = [c_void_p]
libcsound.csoundPerformBuffer.argtypes = [c_void_p]
libcsound.csoundStop.argtypes = [c_void_p]
libcsound.csoundCleanup.argtypes = [c_void_p]
libcsound.csoundReset.argtypes = [c_void_p]
libcsound.csoundGetSr.restype = MYFLT
libcsound.csoundGetSr.argtypes = [c_void_p]
libcsound.csoundGetKr.restype = MYFLT
libcsound.csoundGetKr.argtypes = [c_void_p]
libcsound.csoundGetKsmps.restype = c_uint32
libcsound.csoundGetKsmps.argtypes = [c_void_p]
libcsound.csoundGetNchnls.restype = c_uint32
libcsound.csoundGetNchnls.argtypes = [c_void_p]
libcsound.csoundGetNchnlsInput.restype = c_uint32
libcsound.csoundGetNchnlsInput.argtypes = [c_void_p]
libcsound.csoundGet0dBFS.restype = MYFLT
libcsound.csoundGet0dBFS.argtypes = [c_void_p]
libcsound.csoundGetCurrentTimeSamples.restype = c_int64
libcsound.csoundGetCurrentTimeSamples.argtypes = [c_void_p]
libcsound.csoundGetHostData.restype = py_object
libcsound.csoundGetHostData.argtypes = [c_void_p]
libcsound.csoundSetHostData.argtypes = [c_void_p, py_object]
libcsound.csoundSetOption.argtypes = [c_void_p, c_char_p]
libcsound.csoundSetParams.argtypes = [c_void_p, POINTER(CsoundParams)]
libcsound.csoundGetParams.argtypes = [c_void_p, POINTER(CsoundParams)]
libcsound.csoundGetDebug.argtypes = [c_void_p]
libcsound.csoundSetDebug.argtypes = [c_void_p, c_int]
libcsound.csoundGetOutputName.restype = c_char_p
libcsound.csoundGetOutputName.argtypes = [c_void_p]
libcsound.csoundSetOutput.argtypes = [c_void_p, c_char_p, c_char_p, c_char_p]
libcsound.csoundSetInput.argtypes = [c_void_p, c_char_p]
libcsound.csoundSetMIDIInput.argtypes = [c_void_p, c_char_p]
libcsound.csoundSetMIDIFileInput.argtypes = [c_void_p, c_char_p]
libcsound.csoundSetMIDIOutput.argtypes = [c_void_p, c_char_p]
libcsound.csoundSetMIDIFileOutput.argtypes = [c_void_p, c_char_p]
FILEOPENFUNC = CFUNCTYPE(None, c_void_p, c_char_p, c_int, c_int, c_int)
libcsound.csoundSetFileOpenCallback.argtypes = [c_void_p, FILEOPENFUNC]
libcsound.csoundSetRTAudioModule.argtypes = [c_void_p, c_char_p]
libcsound.csoundGetModule.argtypes = [c_void_p, c_int, POINTER(c_char_p), POINTER(c_char_p)]
libcsound.csoundGetInputBufferSize.restype = c_long
libcsound.csoundGetInputBufferSize.argtypes = [c_void_p]
libcsound.csoundGetOutputBufferSize.restype = c_long
libcsound.csoundGetOutputBufferSize.argtypes = [c_void_p]
libcsound.csoundGetInputBuffer.restype = POINTER(MYFLT)
libcsound.csoundGetInputBuffer.argtypes = [c_void_p]
libcsound.csoundGetOutputBuffer.restype = POINTER(MYFLT)
libcsound.csoundGetOutputBuffer.argtypes = [c_void_p]
libcsound.csoundGetSpin.restype = POINTER(MYFLT)
libcsound.csoundGetSpin.argtypes = [c_void_p]
libcsound.csoundAddSpinSample.argtypes = [c_void_p, c_int, c_int, MYFLT]
libcsound.csoundGetSpout.restype = POINTER(MYFLT)
libcsound.csoundGetSpout.argtypes = [c_void_p]
libcsound.csoundGetSpoutSample.restype = MYFLT
libcsound.csoundGetSpoutSample.argtypes = [c_void_p, c_int, c_int]
libcsound.csoundGetRtRecordUserData.restype = POINTER(c_void_p)
libcsound.csoundGetRtRecordUserData.argtypes = [c_void_p]
libcsound.csoundGetRtPlayUserData.restype = POINTER(c_void_p)
libcsound.csoundGetRtPlayUserData.argtypes = [c_void_p]
libcsound.csoundSetHostImplementedAudioIO.argtypes = [c_void_p, c_int, c_int]
libcsound.csoundGetAudioDevList.argtypes = [c_void_p, c_void_p, c_int]
PLAYOPENFUNC = CFUNCTYPE(c_int, c_void_p, POINTER(CsoundRtAudioParams))
libcsound.csoundSetPlayopenCallback.argtypes = [c_void_p, PLAYOPENFUNC]
RTPLAYFUNC = CFUNCTYPE(None, c_void_p, POINTER(MYFLT), c_int)
libcsound.csoundSetRtplayCallback.argtypes = [c_void_p, RTPLAYFUNC]
RECORDOPENFUNC = CFUNCTYPE(c_int, c_void_p, POINTER(CsoundRtAudioParams))
libcsound.csoundSetRecopenCallback.argtypes = [c_void_p, RECORDOPENFUNC]
RTRECORDFUNC = CFUNCTYPE(c_int, c_void_p, POINTER(MYFLT), c_int)
libcsound.csoundSetRtrecordCallback.argtypes = [c_void_p, RTRECORDFUNC]
RTCLOSEFUNC = CFUNCTYPE(None, c_void_p)
libcsound.csoundSetRtcloseCallback.argtypes = [c_void_p, RTCLOSEFUNC]
AUDIODEVLISTFUNC = CFUNCTYPE(c_int, c_void_p, POINTER(CsoundAudioDevice), c_int)
libcsound.csoundSetAudioDeviceListCallback.argtypes = [c_void_p, AUDIODEVLISTFUNC]
libcsound.csoundSetMIDIModule.argtypes = [c_void_p, c_char_p]
libcsound.csoundSetHostImplementedMIDIIO.argtypes = [c_void_p, c_int]
libcsound.csoundGetMIDIDevList.argtypes = [c_void_p, c_void_p, c_int]
MIDIINOPENFUNC = CFUNCTYPE(c_int, c_void_p, POINTER(c_void_p), c_char_p)
libcsound.csoundSetExternalMidiInOpenCallback.argtypes = [c_void_p, MIDIINOPENFUNC]
MIDIREADFUNC = CFUNCTYPE(c_int, c_void_p, c_void_p, c_char_p, c_int)
libcsound.csoundSetExternalMidiReadCallback.argtypes = [c_void_p, MIDIREADFUNC]
MIDIINCLOSEFUNC = CFUNCTYPE(c_int, c_void_p, c_void_p)
libcsound.csoundSetExternalMidiInCloseCallback.argtypes = [c_void_p, MIDIINCLOSEFUNC]
MIDIOUTOPENFUNC = CFUNCTYPE(c_int, c_void_p, POINTER(c_void_p), c_char_p)
libcsound.csoundSetExternalMidiOutOpenCallback.argtypes = [c_void_p, MIDIOUTOPENFUNC]
MIDIWRITEFUNC = CFUNCTYPE(c_int, c_void_p, c_void_p, c_char_p, c_int)
libcsound.csoundSetExternalMidiWriteCallback.argtypes = [c_void_p, MIDIWRITEFUNC]
MIDIOUTCLOSEFUNC = CFUNCTYPE(c_int, c_void_p, c_void_p)
libcsound.csoundSetExternalMidiOutCloseCallback.argtypes = [c_void_p, MIDIOUTCLOSEFUNC]
MIDIERRORFUNC = CFUNCTYPE(c_char_p, c_int)
libcsound.csoundSetExternalMidiErrorStringCallback.argtypes = [c_void_p, MIDIERRORFUNC]
MIDIDEVLISTFUNC = CFUNCTYPE(c_int, c_void_p, POINTER(CsoundMidiDevice), c_int)
libcsound.csoundSetMIDIDeviceListCallback.argtypes = [c_void_p, MIDIDEVLISTFUNC]
libcsound.csoundReadScore.argtypes = [c_void_p, c_char_p]
libcsound.csoundGetScoreTime.restype = c_double
libcsound.csoundGetScoreTime.argtypes = [c_void_p]
libcsound.csoundIsScorePending.argtypes = [c_void_p]
libcsound.csoundSetScorePending.argtypes = [c_void_p, c_int]
libcsound.csoundGetScoreOffsetSeconds.restype = MYFLT
libcsound.csoundGetScoreOffsetSeconds.argtypes = [c_void_p]
libcsound.csoundSetScoreOffsetSeconds.argtypes = [c_void_p, MYFLT]
libcsound.csoundRewindScore.argtypes = [c_void_p]
CSCOREFUNC = CFUNCTYPE(None, c_void_p)
libcsound.csoundSetCscoreCallback.argtypes = [c_void_p, CSCOREFUNC]
libcsound.csoundMessage.argtypes = [c_void_p, c_char_p, c_char_p]
libcsound.csoundMessageS.argtypes = [c_void_p, c_int, c_char_p, c_char_p]
libcsound.csoundSetMessageLevel.argtypes = [c_void_p, c_int]
libcsound.csoundCreateMessageBuffer.argtypes = [c_void_p, c_int]
libcsound.csoundGetFirstMessage.restype = c_char_p
libcsound.csoundGetFirstMessage.argtypes = [c_void_p]
libcsound.csoundGetFirstMessageAttr.argtypes = [c_void_p]
libcsound.csoundPopFirstMessage.argtypes = [c_void_p]
libcsound.csoundGetMessageCnt.argtypes = [c_void_p]
libcsound.csoundDestroyMessageBuffer.argtypes = [c_void_p]
libcsound.csoundGetChannelPtr.argtypes = [c_void_p, POINTER(POINTER(MYFLT)), c_char_p, c_int]
libcsound.csoundListChannels.argtypes = [c_void_p, POINTER(POINTER(ControlChannelInfo))]
libcsound.csoundDeleteChannelList.argtypes = [c_void_p, POINTER(ControlChannelInfo)]
libcsound.csoundSetControlChannelHints.argtypes = [c_void_p, c_char_p, ControlChannelHints]
libcsound.csoundGetControlChannelHints.argtypes = [c_void_p, c_char_p, POINTER(ControlChannelHints)]
libcsound.csoundGetChannelLock.restype = POINTER(c_int)
libcsound.csoundGetChannelLock.argtypes = [c_void_p, c_char_p]
libcsound.csoundGetControlChannel.restype = MYFLT
libcsound.csoundGetControlChannel.argtypes = [c_void_p, c_char_p, POINTER(c_int)]
libcsound.csoundSetControlChannel.argtypes = [c_void_p, c_char_p, MYFLT]
libcsound.csoundGetAudioChannel.argtypes = [c_void_p, c_char_p, POINTER(c_int)]
libcsound.csoundSetAudioChannel.argtypes = [c_void_p, c_char_p, POINTER(c_int)]
libcsound.csoundGetStringChannel.argtypes = [c_void_p, c_char_p, c_char_p]
libcsound.csoundSetStringChannel.argtypes = [c_void_p, c_char_p, c_char_p]
libcsound.csoundGetChannelDatasize.argtypes = [c_void_p, c_char_p]
CHANNELFUNC = CFUNCTYPE(None, c_void_p, c_char_p, c_void_p, c_void_p)
libcsound.csoundSetInputChannelCallback.argtypes = [c_void_p, CHANNELFUNC]
libcsound.csoundSetOutputChannelCallback.argtypes = [c_void_p, CHANNELFUNC]
libcsound.csoundSetPvsChannel.argtypes = [c_void_p, POINTER(PvsdatExt), c_char_p]
libcsound.csoundGetPvsChannel.argtypes = [c_void_p, POINTER(PvsdatExt), c_char_p]
libcsound.csoundScoreEvent.argtypes = [c_void_p, c_char, POINTER(MYFLT), c_long]
libcsound.csoundScoreEventAbsolute.argtypes = [c_void_p, c_char, POINTER(MYFLT), c_long, c_double]
libcsound.csoundInputMessage.argtypes = [c_void_p, c_char_p]
libcsound.csoundKillInstance.argtypes = [c_void_p, MYFLT, c_char_p, c_int, c_int]
SENSEFUNC = CFUNCTYPE(None, c_void_p, py_object)
libcsound.csoundRegisterSenseEventCallback.argtypes = [c_void_p, SENSEFUNC, py_object]
libcsound.csoundKeyPress.argtypes = [c_void_p, c_char]
KEYBOARDFUNC = CFUNCTYPE(c_int, py_object, c_void_p, c_uint)
libcsound.csoundRegisterKeyboardCallback.argtypes = [c_void_p, KEYBOARDFUNC, py_object, c_uint]
libcsound.csoundRemoveKeyboardCallback.argtypes = [c_void_p, KEYBOARDFUNC]
libcsound.csoundTableLength.argtypes = [c_void_p, c_int]
libcsound.csoundTableGet.restype = MYFLT
libcsound.csoundTableGet.argtypes = [c_void_p, c_int, c_int]
libcsound.csoundTableSet.argtypes = [c_void_p, c_int, c_int, MYFLT]
libcsound.csoundTableCopyOut.argtypes = [c_void_p, c_int, POINTER(MYFLT)]
libcsound.csoundTableCopyIn.argtypes = [c_void_p, c_int, POINTER(MYFLT)]
libcsound.csoundGetTable.argtypes = [c_void_p, POINTER(POINTER(MYFLT)), c_int]
libcsound.csoundGetTableArgs.argtypes = [c_void_p, POINTER(POINTER(MYFLT)), c_int]
def cstring(s):
if sys.version_info[0] >= 3 and s != None:
return bytes(s, 'utf-8')
return s
def pstring(s):
if sys.version_info[0] >= 3and s != None:
return str(s, 'utf-8')
return s
def csoundArgList(lst):
argc = len(lst)
argv = (POINTER(c_char_p) * argc)()
for i in range(argc):
v = cstring(lst[i])
argv[i] = cast(pointer(create_string_buffer(v)), POINTER(c_char_p))
return c_int(argc), cast(argv, POINTER(c_char_p))
# message types (only one can be specified)
CSOUNDMSG_DEFAULT = 0x0000 # standard message
CSOUNDMSG_ERROR = 0x1000 # error message (initerror, perferror, etc.)
CSOUNDMSG_ORCH = 0x2000 # orchestra opcodes (e.g. printks)
CSOUNDMSG_REALTIME = 0x3000 # for progress display and heartbeat characters
CSOUNDMSG_WARNING = 0x4000 # warning messages
# format attributes (colors etc.), use the bitwise OR of any of these:
CSOUNDMSG_FG_BLACK = 0x0100
CSOUNDMSG_FG_RED = 0x0101
CSOUNDMSG_FG_GREEN = 0x0102
CSOUNDMSG_FG_YELLOW = 0x0103
CSOUNDMSG_FG_BLUE = 0x0104
CSOUNDMSG_FG_MAGENTA = 0x0105
CSOUNDMSG_FG_CYAN = 0x0106
CSOUNDMSG_FG_WHITE = 0x0107
CSOUNDMSG_FG_BOLD = 0x0008
CSOUNDMSG_FG_UNDERLINE = 0x0080
CSOUNDMSG_BG_BLACK = 0x0200
CSOUNDMSG_BG_RED = 0x0210
CSOUNDMSG_BG_GREEN = 0x0220
CSOUNDMSG_BG_ORANGE = 0x0230
CSOUNDMSG_BG_BLUE = 0x0240
CSOUNDMSG_BG_MAGENTA = 0x0250
CSOUNDMSG_BG_CYAN = 0x0260
CSOUNDMSG_BG_GREY = 0x0270
CSOUNDMSG_TYPE_MASK = 0x7000
CSOUNDMSG_FG_COLOR_MASK = 0x0107
CSOUNDMSG_FG_ATTR_MASK = 0x0088
CSOUNDMSG_BG_COLOR_MASK = 0x0270
# ERROR DEFINITIONS
CSOUND_SUCCESS = 0 # Completed successfully.
CSOUND_ERROR = -1 # Unspecified failure.
CSOUND_INITIALIZATION = -2 # Failed during initialization.
CSOUND_PERFORMANCE = -3 # Failed during performance.
CSOUND_MEMORY = -4 # Failed to allocate requested memory.
CSOUND_SIGNAL = -5 # Termination requested by SIGINT or SIGTERM.
# Constants used by the bus interface (csoundGetChannelPtr() etc.).
CSOUND_CONTROL_CHANNEL = 1
CSOUND_AUDIO_CHANNEL = 2
CSOUND_STRING_CHANNEL = 3
CSOUND_PVS_CHANNEL = 4
CSOUND_VAR_CHANNEL = 5
CSOUND_CHANNEL_TYPE_MASK = 15
CSOUND_INPUT_CHANNEL = 16
CSOUND_OUTPUT_CHANNEL = 32
CSOUND_CONTROL_CHANNEL_NO_HINTS = 0
CSOUND_CONTROL_CHANNEL_INT = 1
CSOUND_CONTROL_CHANNEL_LIN = 2
CSOUND_CONTROL_CHANNEL_EXP = 3
class Csound:
# Instantiation
def __init__(self, hostData=None):
"""Creates an instance of Csound.
Get an opaque pointer that must be passed to most Csound API
functions. The hostData parameter can be None, or it can be any
sort of data; these data can be accessed from the Csound instance
that is passed to callback routines.
"""
self.cs = libcsound.csoundCreate(py_object(hostData))
def __del__(self):
"""Destroys an instance of Csound."""
libcsound.csoundDestroy(self.cs)
def version(self):
"""Returns the version number times 1000 (5.00.0 = 5000)."""
return libcsound.csoundGetVersion(self.cs)
def APIVersion(self):
"""Returns the API version number times 100 (1.00 = 100)."""
return libcsound.csoundGetAPIVersion(self.cs)
#Performance
def parseOrc(self, orc):
"""Parse the given orchestra from an ASCII string into a TREE.
This can be called during performance to parse new code.
"""
return libcsound.csoundParseOrc(self.cs, cstring(orc))
def compileTree(self, tree):
"""Compile the given TREE node into structs for Csound to use.
This can be called during performance to compile a new TREE.
"""
return libcsound.csoundCompileTree(self.cs, tree)
def deleteTree(self, tree):
"""Free the resources associated with the TREE tree.
This function should be called whenever the TREE was
created with parseOrc and memory can be deallocated.
"""
libcsound.csoundDeleteTree(self.cs, tree)
def compileOrc(self, orc):
"""Parse, and compile the given orchestra from an ASCII string.
Also evaluating any global space code (i-time only).
This can be called during performance to compile a new orchestra.
orc = "instr 1 \n a1 rand 0dbfs/4 \n out a1 \n"
cs.compileOrc(orc)
"""
return libcsound.csoundCompileOrc(self.cs, cstring(orc))
def evalCode(self, code):
"""Parse and compile an orchestra given on an string.
Evaluating any global space code (i-time only).
On SUCCESS it returns a value passed to the
'return' opcode in global space.
code = "i1 = 2 + 2 \n return i1 \n"
retval = cs.evalCode(code)
"""
return libcsound.csoundEvalCode(self.cs, cstring(code))
#def initializeCscore(insco, outsco):
def compileArgs(self, *args):
"""Compile args.
Read arguments, parse and compile an orchestra,
read, process and load a score.
"""
argc, argv = csoundArgList(args)
return libcsound.csoundCompileArgs(self.cs, argc, argv)
def start(self):
"""Prepares Csound for performance after compilation.
Using one or more of the above functions.
NB: this is called internally by compile_(), therefore
it is only required if performance is started without
a call to that function.
"""
return libcsound.csoundStart(self.cs)
def compile_(self, *args):
"""Compile Csound input files (such as an orchestra and score).
As directed by the supplied command-line arguments,
but does not perform them. Returns a non-zero error code on failure.
This function cannot be called during performance, and before a
repeated call, reset() needs to be called.
In this (host-driven) mode, the sequence of calls should be as follows:
cs.compile_(args)
while (cs.performBuffer() == 0)
pass
cs.cleanup()
cs.reset()
Calls start() internally.
"""
argc, argv = csoundArgList(args)
return libcsound.csoundCompile(self.cs, argc, argv)
def compileCsd(self, csd_filename):
"""Compile a Csound input file (.csd file).
The input file includes command-line arguments, but does not
perform the file. Return a non-zero error code on failure.
In this (host-driven) mode, the sequence of calls should be
as follows:
cs.compileCsd(args)
while (cs.performBuffer() == 0)
pass
cs.cleanup()
cs.reset()
NB: this function can be called during performance to
replace or add new instruments and events.
"""
return libcsound.csoundCompileCsd(self.cs, cstring(csd_filename))
def compileCsdText(self, csd_text):
"""Compile a Csound input file contained in a string of text.
The string of text includes command-line arguments, orchestra, score,
etc., but it is not performed. Returns a non-zero error code on failure.
In this (host-driven) mode, the sequence of calls should be as follows:
cs.compileCsdText(csd_text);
while (cs.performBuffer() == 0)
pass
cs.cleanup()
cs.reset()
NB: a temporary file is created, the csd_text is written to the
temporary file, and compileCsd is called with the name of the temporary
file, which is deleted after compilation. Behavior may vary by platform.
"""
return libcsound.csoundCompileCsdText(self.cs, cstring(csd_text))
def perform(self):
"""Sense input events and performs audio output.
This is done until the end of score is reached (positive return value),
an error occurs (negative return value), or performance is stopped by
calling stop() from another thread (zero return value).
Note that compile_(), or compileOrc(), readScore(), start() must be
called first.
In the case of zero return value, perform() can be called again
to continue the stopped performance. Otherwise, reset() should be
called to clean up after the finished or failed performance.
"""
return libcsound.csoundPerform(self.cs)
def performKsmps(self):
"""Sense input events, and performs audio output.
This is done for one control sample worth (ksmps).
Note that compile_(), or compileOrc(), readScore(), start() must be
called first.
Returns False during performance, and True when performance is
finished. If called until it returns True, will perform an entire
score.
Enables external software to control the execution of Csound,
and to synchronize performance with audio input and output.
"""
return libcsound.csoundPerformKsmps(self.cs)
def performBuffer(self):
"""Perform Csound, sensing real-time and score events.
Processing one buffer's worth (-b frames) of interleaved audio.
Note that compile_ must be called first, then call
outputBuffer() and inputBuffer() to get the pointer
to csound's I/O buffers.
Returns false during performance, and true when performance is finished.
"""
return libcsound.csoundBuffer(self.cs)
def stop(self):
"""Stop a perform() running in another thread.
Note that it is not guaranteed that perform() has already stopped
when this function returns.
"""
return libcsound.csoundStop(self.cs)
def cleanup(self):
"""Print information and closes audio and MIDI devices.
The information is about the end of a performance.
Note: after calling cleanup(), the operation of the perform
functions is undefined.
"""
return libcsound.csoundCleanup(self.cs)
def reset(self):
"""Reset all internal memory and state.
In preparation for a new performance.
Enable external software to run successive Csound performances
without reloading Csound. Implies cleanup(), unless already called.
"""
return libcsound.csoundReset(self.cs)
# Attributes
def sr(self):
"""Return the number of audio sample frames per second."""
return libcsound.csoundGetSr(self.cs)
def kr(self):
"""Return the number of control samples per second."""
return libcsound.csoundGetKr(self.cs)
def ksmps(self):
"""Return the number of audio sample frames per control sample."""
return libcsound.csoundGetKsmps(self.cs)
def nchnls(self):
"""Return the number of audio output channels.
Set through the nchnls header variable in the csd file.
"""
return libcsound.csoundGetNchnls(self.cs)
def nchnlsInput(self):
"""Return the number of audio input channels.
Set through the nchnls_i header variable in the csd file. If this
variable is not set, the value is taken from nchnls.
"""
return libcsound.csoundGetNchnlsInput(self.cs)
def get0dBFS(self):
"""Return the 0dBFS level of the spin/spout buffers."""
return libcsound.csoundGet0dBFS(self.cs)
def currentTimeSamples(self):
"""Return the current performance time in samples."""
return libcsound.csoundGetCurrentTimeSamples(self.cs)
def sizeOfMYFLT(self):
"""Return the size of MYFLT in bytes."""
return libcsound.csoundGetSizeOfMYFLT()
def hostData(self):
"""Return host data."""
return libcsound.csoundGetHostData(self.cs)
def setHostData(self, data):
"""Set host data."""
libcsound.csoundSetHostData(self.cs, py_object(data))
def setOption(self, option):
"""Set a single csound option (flag).
Returns CSOUND_SUCCESS on success.
NB: blank spaces are not allowed.
"""
return libcsound.csoundSetOption(self.cs, cstring(option))
def setParams(self, params):
"""Configure Csound with a given set of parameters.
These parameters are defined in the CsoundParams structure.
They are the part of the OPARMS struct that are configurable through
command line flags.
The CsoundParams structure can be obtained using params().
These options should only be changed before performance has started.
"""
libcsound.csoundSetParams(self.cs, byref(params))
def params(self, params):
"""Get the current set of parameters from a CSOUND instance.
These parameters are in a CsoundParams structure. See setParams().
p = CsoundParams()
cs.params(p)
"""
libcsound.csoundGetParams(self.cs, byref(params))
def debug(self):
"""Return whether Csound is set to print debug messages.
Those messages are sent through the DebugMsg() internal API function.
"""
return libcsound.csoundGetDebug(self.cs) != 0
def setDebug(self, debug):
"""Set whether Csound prints debug messages.
The debug argument must have value True or False.
Those messaged come from the DebugMsg() internal API function.
"""
libcsound.csoundSetDebug(self.cs, c_int(debug))
# General Input/Output
def outputName(self):
"""Return the output audio output name (-o)"""
s = libcsound.csoundGetOutputName(self.cs)
return pstring(s)
def setOutput(self, name, type_, format):
"""Set output destination, type and format.
type_ can be one of "wav", "aiff", "au", "raw", "paf", "svx", "nist",
"voc", "ircam", "w64", "mat4", "mat5", "pvf", "xi", "htk", "sds",
"avr", "wavex", "sd2", "flac", "caf", "wve", "ogg", "mpc2k", "rf64",
or NULL (use default or realtime IO).
format can be one of "alaw", "schar", "uchar", "float", "double",
"long", "short", "ulaw", "24bit", "vorbis", or NULL (use default or
realtime IO).
For RT audio, use device_id from CS_AUDIODEVICE for a given audio
device.
"""
n = cstring(name)
t = cstring(type_)
f = cstring(format)
libcsound.csoundSetOutput(self.cs, n, t, f)
def setInput(self, name):
"""Set input source."""
libcsound.csoundSetInput(self.cs, cstring(name))
def setMIDIInput(self, name):
"""Set MIDI input device name/number."""
libcsound.csoundSetMidiInput(self.cs, cstring(name))
def setMIDIFileInput(self, name):
"""Set MIDI file input name."""
libcsound.csoundSetMIDIFileInput(self.cs, cstring(name))
def setMIDIOutput(self, name):
"""Set MIDI output device name/number."""
libcsound.csoundSetMIDIOutput(self.cs, cstring(name))
def setMIDIFileOutput(self, name):
"""Set MIDI file output name."""
libcsound.csoundSetMIDIFileOutput(self.cs, cstring(name))
def setFileOpenCallback(self, function):
"""Set a callback for receiving notices whenever Csound opens a file.
The callback is made after the file is successfully opened.
The following information is passed to the callback:
bytes pathname of the file; either full or relative to current dir
int a file type code from the enumeration CSOUND_FILETYPES
int 1 if Csound is writing the file, 0 if reading
int 1 if a temporary file that Csound will delete; 0 if not
Pass NULL to disable the callback.
This callback is retained after a csoundReset() call.
"""
libcsound.csoundSetFileOpenCallback(self.cs, FILEOPENFUNC(function))
# Realtime Audio I/O
def setRTAudioModule(self, module):
"""Set the current RT audio module."""
libcsound.csoundSetRTAudioModule(self.cs, cstring(module))
def module(self, number):
"""Retrieve a module name and type given a number.
Type is "audio" or "midi". Modules are added to list as csound loads
them. Return CSOUND_SUCCESS on success and CSOUND_ERROR if module
number was not found.
n = 0
while True:
name, type_, err = cs.module(n)
if err == ctcsound.CSOUND_ERROR:
break
print("Module %d:%s (%s)\n" % (n, name, type_))
n = n + 1
"""
name = pointer(c_char_p(cstring("dummy")))
type_ = pointer(c_char_p(cstring("dummy")))
err = libcsound.csoundGetModule(self.cs, number, name, type_)
if err == CSOUND_ERROR:
return None, None, err
n = pstring(string_at(name.contents))
t = pstring(string_at(type_.contents))
return n, t, err
def inputBufferSize(self):
"""Return the number of samples in Csound's input buffer."""
return libcsound.csoundGetInputBufferSize(self.cs)
def outputBufferSize(self):
"""Return the number of samples in Csound's output buffer."""
return libcsound.csoundGetOutputBufferSize(self.cs)
def inputBuffer(self):
"""Return the Csound audio input buffer as a numpy array.
Enable external software to write audio into Csound before
calling performBuffer.
"""
buf = libcsound.csoundGetInputBuffer(self.cs)
size = libcsound.csoundGetInputBufferSize(self.cs)
arrayType = np.ctypeslib.ndpointer(MYFLT, 1, (size,), 'C_CONTIGUOUS')
p = cast(addressof(buf), arrayType)
return np.ctypeslib.as_array(p)
def outputBuffer(self):
"""Return the Csound audio output buffer as a numpy array.
Enable external software to read audio from Csound after
calling performBuffer.
"""
buf = libcsound.csoundGetOutputBuffer(self.cs)
size = libcsound.csoundGetOutputBufferSize(self.cs)
arrayType = np.ctypeslib.ndpointer(MYFLT, 1, (size,), 'C_CONTIGUOUS')
p = cast(addressof(buf), arrayType)
return np.ctypeslib.as_array(p)
def spin(self):
"""Return the Csound audio input working buffer (spin) as a numpy array.
Enables external software to write audio into Csound before
calling performKsmps.
"""
buf = libcsound.csoundGetSpin(self.cs)
size = self.ksmps() * self.nchnlsInput()
arrayType = np.ctypeslib.ndpointer(MYFLT, 1, (size,), 'C_CONTIGUOUS')
p = cast(addressof(buf), arrayType)
return np.ctypeslib.as_array(p)
def addSpinSample(self, frame, channel, sample):
"""Add the indicated sample into the audio input working buffer (spin).
This only ever makes sense before calling performKsmps(). The frame
and channel must be in bounds relative to ksmps and nchnlsInput.
"""
libcsound.csoundAddSpinSample(self.cs, frame, channel, sample)
def spout(self):
"""Return the address of the Csound audio output working buffer (spout).
Enable external software to read audio from Csound after
calling performKsmps.
"""
buf = libcsound.csoundGetSpout(self.cs)
size = self.ksmps() * self.nchnls()
arrayType = np.ctypeslib.ndpointer(MYFLT, 1, (size,), 'C_CONTIGUOUS')
p = cast(addressof(buf), arrayType)
return np.ctypeslib.as_array(p)
def spoutSample(self, frame, channel):
"""Return one sample from the Csound audio output working buf (spout).
Only ever makes sense after calling performKsmps(). The frame and
channel must be in bounds relative to ksmps and nchnls.
"""
return libcsound.csoundGetSpoutSample(self.cs, frame, channel)
def rtRecordUserData(self):
"""Return pointer to user data pointer for real time audio input."""
return libcsound.csoundGetRtRecordUserData(self.cs)
def rtPlaydUserData(self):
"""Return pointer to user data pointer for real time audio output."""
return libcsound.csoundGetRtPlayUserData(self.cs)
def setHostImplementedAudioIO(self, state, bufSize):
"""Set user handling of sound I/O.
Calling this function with a True 'state' value between creation of
the Csound object and the start of performance will disable all default
handling of sound I/O by the Csound library, allowing the host
application to use the spin/spout/input/output buffers directly.
If 'bufSize' is greater than zero, the buffer size (-b) will be
set to the integer multiple of ksmps that is nearest to the value
specified.
"""
libcsound.csoundSetHostImplementedAudioIO(self.cs, c_int(state), bufSize)
def audioDevList(self, isOutput):
"""Return a list of available input or output audio devices.
Each item in the list is a dictionnary representing a device. The
dictionnary keys are "device_name", "device_id", "rt_module" (value
type string), "max_nchnls" (value type int), and "isOutput" (value
type boolean). Must be called after an orchestra has been compiled
to get meaningful information.
"""
n = libcsound.csoundGetAudioDevList(self.cs, None, c_int(isOutput))
devs = (CsoundAudioDevice * n)()
libcsound.csoundGetAudioDevList(self.cs, byref(devs), c_int(isOutput))
lst = []
for dev in devs:
d = {}
d["device_name"] = pstring(dev.device_name)
d["device_id"] = pstring(dev.device_id)
d["rt_module"] = pstring(dev.rt_module)
d["max_nchnls"] = dev.max_nchnls
d["isOutput"] = (dev.isOutput == 1)
lst.append(d)
return lst
def setPlayOpenCallback(self, function):
"""Set a callback for opening real-time audio playback."""
libcsound.csoundSetPlayopenCallback(self.cs, PLAYOPENFUNC(function))
def setRtPlayCallback(self, function):
"""Set a callback for performing real-time audio playback."""
libcsound.csoundSetRtplayCallback(self.cs, RTPLAYFUNC(function))
def setRecordOpenCallback(self, function):
"""Set a callback for opening real-time audio recording."""
libcsound.csoundSetRecopenCallback(self.cs, RECORDOPENFUNC(function))
def setRtRecordCallback(self, function):
"""Set a callback for performing real-time audio recording."""
libcsound.csoundSetRtrecordCallback(self.cs, RTRECORDFUNC(function))
def setRtCloseCallback(self, function):
"""Set a callback for closing real-time audio playback and recording."""
libcsound.csoundSetRtcloseCallback(self.cs, RTCLOSEFUNC(function))
def setAudioDevListCallback(self, function):
"""Set a callback for obtaining a list of audio devices.
This should be set by rtaudio modules and should not be set by hosts.
(See audioDevList()).
"""
libcsound.csoundSetAudioDeviceListCallback(self.cs, AUDIODEVLISTFUNC(function))
#Realtime MIDI I/O
def setMIDIModule(self, module):
"""Sets the current MIDI IO module."""
libcsound.csoundSetMIDIModule(self.cs, cstring(module))
def setHostImplementedMIDIIO(self, state):
"""Called with state True if the host is implementing via callbacks."""
libcsound.csoundSetHostImplementedMIDIIO(self.cs, c_int(state))
def midiDevList(self, isOutput):
"""Return a list of available input or output midi devices.
Each item in the list is a dictionnary representing a device. The
dictionnary keys are "device_name", "interface_name", "device_id",
"midi_module" (value type string), "isOutput" (value type boolean).
Must be called after an orchestra has been compiled
to get meaningful information.
"""
n = libcsound.csoundGetMIDIDevList(self.cs, None, c_int(isOutput))
devs = (csoundMidiDevice * n)()
libcsound.csoundGetMIDIDevList(self.cs, byref(devs), c_int(isOutput))
lst = []
for dev in devs:
d = {}
d["device_name"] = pstring(dev.device_name)
d["interface_name"] = pstring(dev.max_nchnlsinterface_name)
d["device_id"] = pstring(dev.device_id)
d["midi_module"] = pstring(dev.midi_module)
d["isOutput"] = (dev.isOutput == 1)
lst.append(d)
return lst
def setExternalMidiInOpenCallback(self, function):
"""Set a callback for opening real-time MIDI input."""
libcsound.csoundSetExternalMidiInOpenCallback(self.cs, MIDIINOPENFUNC(function))
def setExternalMidiReadCallback(self, function):
"""Set a callback for reading from real time MIDI input."""
libcsound.csoundSetExternalMidiReadCallback(self.cs, MIDIREADFUNC(function))
def setExternalMidiInCloseCallback(self, function):
"""Set a callback for closing real time MIDI input."""
libcsound.csoundSetExternalMidiInCloseCallback(self.cs, MIDIINCLOSEFUNC(function))
def setExternalMidiOutOpenCallback(self, function):
"""Set a callback for opening real-time MIDI input."""
libcsound.csoundSetExternalMidiOutOpenCallback(self.cs, MIDIOUTOPENFUNC(function))
def setExternalMidiWriteCallback(self, function):
"""Set a callback for reading from real time MIDI input."""
libcsound.csoundSetExternalMidiWriteCallback(self.cs, MIDIWRITEFUNC(function))
def setExternalMidiOutCloseCallback(self, function):
"""Set a callback for closing real time MIDI input."""
libcsound.csoundSetExternalMidiOutCloseCallback(self.cs, MIDIOUTCLOSEFUNC(function))
def setExternalMidiErrorStringCallback(self, function):
""" Set a callback for converting MIDI error codes to strings."""
libcsound.csoundSetExternalMidiErrorStringCallback(self.cs, MIDIERRORFUNC(function))
def setMidiDevListCallback(self, function):
"""Set a callback for obtaining a list of MIDI devices.
This should be set by IO plugins and should not be set by hosts.
(See midiDevList()).
"""
libcsound.csoundSetMIDIDeviceListCallback(self.cs, MIDIDEVLISTFUNC(function))
#Score Handling
def readScore(self, sco):
"""Read, preprocess, and load a score from an ASCII string.
It can be called repeatedly, with the new score events
being added to the currently scheduled ones.
"""
return libcsound.csoundReadScore(self.cs, cstring(sco))
def scoreTime(self):
"""Returns the current score time.
The return value is the time in seconds since the beginning of
performance.
"""
return libcsound.csoundGetScoreTime(self.cs)
def isScorePending(self):
"""Tell whether Csound score events are performed or not.
Independently of real-time MIDI events (see setScorePending()).
"""
return libcsound.csoundIsScorePending(self.cs)
def setScorePending(self, pending):
"""Set whether Csound score events are performed or not.
Real-time events will continue to be performed. Can be used by external
software, such as a VST host, to turn off performance of score events
(while continuing to perform real-time events), for example to mute
a Csound score while working on other tracks of a piece, or to play
the Csound instruments live.
"""
libcsound.csoundSetScorePending(self.cs, c_int(pending))
def scoreOffsetSeconds(self):
"""Return the score time beginning midway through a Csound score.
At this time score events will actually immediately be performed
(see setScoreOffsetSeconds()).
"""
return libcsound.csoundGetScoreOffsetSeconds(self.cs)
def setScoreOffsetSeconds(self, time):
"""Csound score events prior to the specified time are not performed.
Performance begins immediately at the specified time (real-time events
will continue to be performed as they are received). Can be used by
external software, such as a VST host, to begin score performance
midway through a Csound score, for example to repeat a loop in a
sequencer, or to synchronize other events with the Csound score.
"""
libcsound.csoundSetScoreOffsetSeconds(self.cs, MYFLT(time))
def rewindScore(self):
"""Rewinds a compiled Csound score.
It is rewinded to the time specified with setScoreOffsetSeconds().
"""
libcsound.csoundRewindScore(self.cs)
def setCscoreCallback(self, function):
"""Set an external callback for Cscore processing.
Pass None to reset to the internal cscore() function (which does
nothing). This callback is retained after a reset() call.
"""
libcsound.csoundSetCscoreCallback(self.cs, CSCOREFUNC(function))
#def scoreSort(self, inFile, outFile):
#def scoreExtract(self, inFile, outFile, extractFile)
#Messages and Text
def message(self, fmt, *args):
"""Display an informational message.
This is a workaround because ctypes does not support variadic functions.
The arguments are formatted in a string, using the python way, either
old style or new style, and then this formatted string is passed to
the csound display message system.
"""
if fmt[0] == '{':
s = fmt.format(*args)
else:
s = fmt % args
libcsound.csoundMessage(self.cs, cstring("%s"), cstring(s))
def messageS(self, attr, fmt, *args):
"""Print message with special attributes.
(See msg_attr for the list of available attributes). With attr=0,
messageS() is identical to message().
This is a workaround because ctypes does not support variadic functions.
The arguments are formatted in a string, using the python way, either
old style or new style, and then this formatted string is passed to
the csound display message system.
"""
if fmt[0] == '{':
s = fmt.format(*args)
else:
s = fmt % args
libcsound.csoundMessageS(self.cs, attr, cstring("%s"), cstring(s))
#def setDefaultMessageCallback():
#def setMessageCallback():
def setMessageLevel(self, messageLevel):
"""Set the Csound message level (from 0 to 231)."""
libcsound.csoundSetMessageLevel(self.cs, messageLevel)
def createMessageBuffer(self, toStdOut):
"""Create a buffer for storing messages printed by Csound.
Should be called after creating a Csound instance and the buffer
can be freed by calling destroyMessageBuffer() before deleting the
Csound instance. You will generally want to call cleanup() to make
sure the last messages are flushed to the message buffer before
destroying Csound.
If 'toStdOut' is True, the messages are also printed to
stdout and stderr (depending on the type of the message),
in addition to being stored in the buffer.
Using the message buffer ties up the internal message callback, so
setMessageCallback should not be called after creating the
message buffer.
"""
libcsound.csoundCreateMessageBuffer(self.cs, c_int(toStdOut))
def firstMessage(self):
"""Return the first message from the buffer."""
s = libcsound.csoundGetFirstMessage(self.cs)
return pstring(s)
def firstMessageAttr(self):
"""Return the attribute parameter of the first message in the buffer."""
return libcsound.csoundGetFirstMessageAttr(self.cs)
def popFirstMessage(self):
"""Remove the first message from the buffer."""
libcsound.csoundPopFirstMessage(self.cs)
def messageCnt(self):
"""Return the number of pending messages in the buffer."""
return libcsound.csoundGetMessageCnt(self.cs)
def destroyMessageBuffer(self):
"""Release all memory used by the message buffer."""
libcsound.csoundDestroyMessageBuffer(self.cs)
#Channels, Control and Events
def channelPtr(self, name, type_):
"""Return a pointer to the specified channel and an error message.
If the channel is a control or an audio channel, the pointer is
translated to an ndarray of MYFLT. If the channel is a string channel,
the pointer is casted to c_char_p. The error message is either an empty
string or a string describing the error that occured.
The channel is created first if it does not exist yet.
'type_' must be the bitwise OR of exactly one of the following values,
CSOUND_CONTROL_CHANNEL
control data (one MYFLT value)
CSOUND_AUDIO_CHANNEL
audio data (csoundGetKsmps(csound) MYFLT values)
CSOUND_STRING_CHANNEL
string data (MYFLT values with enough space to store
csoundGetChannelDatasize() characters, including the
NULL character at the end of the string)
and at least one of these:
CSOUND_INPUT_CHANNEL
CSOUND_OUTPUT_CHANNEL
If the channel already exists, it must match the data type
(control, audio, or string), however, the input/output bits are
OR'd with the new value. Note that audio and string channels
can only be created after calling csoundCompile(), because the
storage size is not known until then.
In the C API, return value is zero on success, or a negative error code,
CSOUND_MEMORY there is not enough memory for allocating the channel
CSOUND_ERROR the specified name or type is invalid
or, if a channel with the same name but incompatible type
already exists, the type of the existing channel. In the case
of any non-zero return value, *p is set to NULL.
Note: to find out the type of a channel without actually
creating or changing it, set 'type' to zero, so that the return
value will be either the type of the channel, or CSOUND_ERROR
if it does not exist.
Operations on the pointer are not thread-safe by default. The host is
required to take care of threadsafety by retrieving the channel lock
with channelLock() and using spinLock() and spinUnLock() to protect
access to the pointer.
See Top/threadsafe.c in the Csound library sources for
examples. Optionally, use the channel get/set functions
provided below, which are threadsafe by default.
"""
length = 0
chanType = type_ & CSOUND_CHANNEL_TYPE_MASK
if chanType == CSOUND_CONTROL_CHANNEL:
length = 1
elif chanType == CSOUND_AUDIO_CHANNEL:
length = libcsound.csoundGetKsmps(self.cs)
ptr = pointer(MYFLT(0.0))
err = ''
ret = libcsound.csoundGetChannelPtr(self.cs, byref(ptr), cstring(name), type_)
if ret == CSOUND_SUCCESS:
if chanType == CSOUND_STRING_CHANNEL:
return cast(ptr, c_char_p), err
else:
arrayType = np.ctypeslib.ndpointer(MYFLT, 1, (length,), 'C_CONTIGUOUS')
p = cast(addressof(ptr), arrayType)
return np.ctypeslib.as_array(p), err
elif ret == CSOUND_MEMORY:
err = 'Not enough memory for allocating channel'
elif ret == CSOUND_ERROR:
err = 'The specified channel name or type is not valid'
elif ret == CSOUND_CONTROL_CHANNEL:
err = 'A control channel named {} already exists'.format(name)
elif ret == CSOUND_AUDIO_CHANNEL:
err = 'An audio channel named {} already exists'.format(name)
elif ret == CSOUND_STRING_CHANNEL:
err = 'A string channel named {} already exists'.format(name)
else:
err = 'Unknown error'
return None, err
def listChannels(self):
"""Return a pointer and an error message.
The pointer points to a list of ControlChannelInfo objects for allocated
channels. A ControlChannelInfo object contains the channel
characteristics. The error message indicates if there is not enough
memory for allocating the list or it is an empty string if there is no
error. In the case of no channels or an error, the pointer is None.
Notes: the caller is responsible for freeing the list returned by the
C API with deleteChannelList(). The name pointers may become invalid
after calling reset().
"""
cInfos = None
err = ''
ptr = cast(pointer(MYFLT(0.0)), POINTER(ControlChannelInfo))
n = libcsound.csoundListChannels(self.cs, byref(ptr))
if n == CSOUND_MEMORY :
err = 'There is not enough memory for allocating the list'
if n > 0:
ptr = cast(ptr, POINTER(ControlChannelInfo * n))
cInfos = cast(ptr, POINTER(ControlChannelInfo * n)).contents
return cInfos, err
def deleteChannelList(self, lst):
"""Release a channel list previously returned by listChannels()."""
ptr = cast(lst, POINTER(ControlChannelInfo))
libcsound.csoundDeleteChannelList(self.cs, ptr)
def setControlChannelHints(self, name, hints):
"""Set parameters hints for a control channel.
These hints have no internal function but can be used by front ends to
construct GUIs or to constrain values. See the ControlChannelHints
structure for details.
Returns zero on success, or a non-zero error code on failure:
CSOUND_ERROR: the channel does not exist, is not a control channel,
or the specified parameters are invalid
CSOUND_MEMORY: could not allocate memory
"""
return libcsound.csoundSetControlChannelHints(self.cs, cstring(name), hints)
def controlChannelHints(self, name):
"""Return special parameters (if any) of a control channel.
Those parameters have been previously set with setControlChannelHints()
or the chnparams opcode.
The return values are a ControlChannelHints structure and CSOUND_SUCCESS
if the channel exists and is a control channel, otherwise, None and an
error code are returned.
"""
hints = ControlChannelHints()
ret = libcsound.csoundGetControlChannelHints(self.cs, cstring(name), byref(hints))
if ret != CSOUND_SUCCESS:
hints = None
return hints, ret
def channelLock(self, name):
"""Recover a pointer to a lock for the specified channel called 'name'.
The returned lock can be locked/unlocked with the spinLock() and
spinUnLock() functions.
Return the address of the lock or NULL if the channel does not exist.
"""
return libcsound.csoundGetChannelLock(self.cs, cstring(name))
def controlChannel(self, name):
"""Retrieve the value of control channel identified by name.
A second value is returned, which the error (or success) code
finding or accessing the channel.
"""
err = c_int(0)
ret = libcsound.csoundGetControlChannel(self.cs, cstring(name), byref(err))
return ret, err
def setControlChannel(self, name, val):
"""Set the value of control channel identified by name."""
libcsound.csoundSetControlChannel(self.cs, cstring(name), MYFLT(val))
def audioChannel(self, name, samples):
"""Copy the audio channel identified by name into ndarray samples.
samples should contain enough memory for ksmps MYFLTs.
"""
ptr = samples.ctypes.data_as(POINTER(MYFLT))
libcsound.csoundGetAudioChannel(self.cs, cstring(name), ptr)
def setAudioChannel(self, name, samples):
"""Set the audio channel 'name' with data from ndarray 'samples'.
'samples' should contain at least ksmps MYFLTs.
"""
ptr = samples.ctypes.data_as(POINTER(MYFLT))
libcsound.csoundSetAudioChannel(self.cs, cstring(name), ptr)
def stringChannel(self, name, string):
"""Copy the string channel identified by name into string.
string should contain enough memory for the string
(see channelDatasize() below).
"""
libcsound.csoundGetStringChannel(self.cs, cstring(name), cstring(string))
def setStringChannel(self, name, string):
"""Set the string channel identified by name with string."""
libcsound.csoundSetStringChannel(self.cs, cstring(name), cstring(string))
def channelDatasize(self, name):
"""Return the size of data stored in a channel.
For string channels this might change if the channel space gets
reallocated. Since string variables use dynamic memory allocation in
Csound6, this function can be called to get the space required for
stringChannel().
"""
return libcsound.csoundGetChannelDatasize(self.cs, cstring(name))
def setInputChannelCallback(self, function):
"""Set the function to call whenever the invalue opcode is used."""
libcsound.csoundSetInputChannelCallback(self.cs, CHANNELFUNC(function))
def setOutputChannelCallback(self, function):
"""Set the function to call whenever the outvalue opcode is used."""
libcsound.csoundSetOutputChannelCallback(self.cs, CHANNELFUNC(function))
def setPvsChannel(self, fin, name):
"""Send a PvsdatExt fin to the pvsin opcode (f-rate) for channel 'name'.
Return zero on success, CSOUND_ERROR if the index is invalid or
fsig framesizes are incompatible.
CSOUND_MEMORY if there is not enough memory to extend the bus.
"""
return libcsound.csoundSetPvsChannel(self.cs, byref(fin), cstring(name))
def pvsChannel(self, fout, name):
"""Receive a PvsdatExt fout from the pvsout opcode (f-rate) at channel 'name'.
Return zero on success, CSOUND_ERROR if the index is invalid or
if fsig framesizes are incompatible.
CSOUND_MEMORY if there is not enough memory to extend the bus.
"""
return libcsound.csoundGetPvsChannel(self.cs, byref(fout), cstring(name))
def scoreEvent(self, type_, pFields):
"""Send a new score event.
'type_' is the score event type ('a', 'i', 'q', 'f', or 'e').
'pFields' is an ndarray of MYFLTs with all the pfields for this event,
starting with the p1 value specified in pFields[0].
"""
ptr = pFields.ctypes.data_as(POINTER(MYFLT))
numFields = c_long(pFields.size)
return libcsound.csoundScoreEvent(self.cs, c_char(type_), ptr, numFields)
def scoreEventAbsolute(self, type_, pFields, timeOffset):
"""Like scoreEvent(), this function inserts a score event.
The event is inserted at absolute time with respect to the start of
performance, or from an offset set with timeOffset.
"""
ptr = pFields.ctypes.data_as(POINTER(MYFLT))
numFields = c_long(pFields.size)
return libcsound.csoundScoreEventAbsolute(self.cs, c_char(type_), ptr, numFields, c_double(timeOffset))
def inputMessage(self, message):
"""Input a NULL-terminated string (as if from a console).
Used for line events.
"""
libcsound.csoundInputMessage(self.cs, cstring(message))
def killInstance(self, instr, instrName, mode, allowRelease):
"""Kills off one or more running instances of an instrument.
The instances are identified by instr (number) or instrName (name).
If instrName is None, the instrument number is used.
Mode is a sum of the following values:
0, 1, 2: kill all instances (0), oldest only (1), or newest (2)
4: only turnoff notes with exactly matching (fractional) instr number
8: only turnoff notes with indefinite duration (p3 < 0 or MIDI).
If allowRelease is True, the killed instances are allowed to release.
"""
return libcsound.csoundKillInstance(self.cs, MYFLT(instr), cstring(instrName), mode, c_int(allowRelease))
def registerSenseEventCallback(self, function, userData):
"""Register a function to be called by sensevents().
This function will be called once in every control period. Any number
of functions may be registered, and will be called in the order of
registration.
The callback function takes two arguments: the Csound instance
pointer, and the userData pointer as passed to this function.
This facility can be used to ensure a function is called synchronously
before every csound control buffer processing. It is important
to make sure no blocking operations are performed in the callback.
The callbacks are cleared on cleanup().
Return zero on success.
"""
return libcsound.csoundRegisterSenseEventCallback(self.cs, SENSEFUNC(function), py_object(userData))
def kerPress(self, c):
"""Set the ASCII code of the most recent key pressed.
This value is used by the 'sensekey' opcode if a callback for
returning keyboard events is not set (see registerKeyboardCallback()).
"""
libcsound.csoundKeyPress(self.cs, c_char(c))
def registerKeyboardCallback(self, function, userData, type_):
"""Registers general purpose callback functions for keyboard events.
These callbacks are called on every control period by the sensekey
opcode.
The callback is preserved on reset(), and multiple
callbacks may be set and will be called in reverse order of
registration. If the same function is set again, it is only moved
in the list of callbacks so that it will be called first, and the
user data and type mask parameters are updated. 'type_' can be the
bitwise OR of callback types for which the function should be called,
or zero for all types.
Returns zero on success, CSOUND_ERROR if the specified function
pointer or type mask is invalid, and CSOUND_MEMORY if there is not
enough memory.
The callback function takes the following arguments:
userData
the "user data" pointer, as specified when setting the callback
p
data pointer, depending on the callback type
type_
callback type, can be one of the following (more may be added in
future versions of Csound):
CSOUND_CALLBACK_KBD_EVENT
CSOUND_CALLBACK_KBD_TEXT
called by the sensekey opcode to fetch key codes. The data
pointer is a pointer to a single value of type 'int', for
returning the key code, which can be in the range 1 to 65535,
or 0 if there is no keyboard event.
For CSOUND_CALLBACK_KBD_EVENT, both key press and release
events should be returned (with 65536 (0x10000) added to the
key code in the latter case) as unshifted ASCII codes.
CSOUND_CALLBACK_KBD_TEXT expects key press events only as the
actual text that is typed.
The return value should be zero on success, negative on error, and
positive if the callback was ignored (for example because the type is
not known).
"""
return libcsound.csoundRegisterKeyboardCallback(self.cs, KEYBOARDFUNC(function), py_object(userData), c_uint(type_))
def removeKeyboardCallback(self, function):
"""Remove a callback previously set with registerKeyboardCallback()."""
libcsound.csoundRemoveKeyboardCallback(self.cs, KEYBOARDFUNC(function))
#Tables
def tableLength(self, table):
"""Return the length of a function table.
(not including the guard point).
If the table does not exist, return -1.
"""
return libcsound.csoundTableLength(self.cs, table)
def tableGet(self, table, index):
"""Return the value of a slot in a function table.
The table number and index are assumed to be valid.
"""
return libcsound.csoundTableGet(self.cs, table, index)
def tableSet(self, table, index, value):
"""Set the value of a slot in a function table.
The table number and index are assumed to be valid.
"""
libcsound.csoundTableSet(self.cs, table, index, MYFLT(value))
def tableCopyOut(self, table, dest):
"""Copy the contents of a function table into a supplied ndarray dest.
The table number is assumed to be valid, and the destination needs to
have sufficient space to receive all the function table contents.
"""
ptr = dest.ctypes.data_as(POINTER(MYFLT))
libcsound.csoundTableCopyOut(self.cs, table, ptr)
def tableCopyIn(self, table, src):
"""Copy the contents of an ndarray src into a given function table.
The table number is assumed to be valid, and the table needs to
have sufficient space to receive all the array contents.
"""
ptr = src.ctypes.data_as(POINTER(MYFLT))
libcsound.csoundTableCopyIn(self.cs, table, ptr)
def table(self, tableNum):
"""Return a pointer to function table 'tableNum' as an ndarray.
The ndarray does not include the guard point. If the table does not
exist, None is returned.
"""
ptr = pointer(MYFLT(0.0))
size = libcsound.csoundGetTable(self.cs, byref(ptr), tableNum)
if size < 0:
return None
arrayType = np.ctypeslib.ndpointer(MYFLT, 1, (size,), 'C_CONTIGUOUS')
p = cast(addressof(ptr), arrayType)
return np.ctypeslib.as_array(p)
def tableArgs(self, tableNum):
"""Return a pointer to the args used to generate a function table.
The pointer is returned as an ndarray. If the table does not exist,
None is returned.
NB: the argument list starts with the GEN number and is followed by
its parameters. eg. f 1 0 1024 10 1 0.5 yields the list
{10.0, 1.0, 0.5}
"""
ptr = pointer(MYFLT(0.0))
size = libcsound.csoundGetTableArgs(self.cs, byref(ptr), tableNum)
if size < 0:
return None
arrayType = np.ctypeslib.ndpointer(MYFLT, 1, (size,), 'C_CONTIGUOUS')
p = cast(addressof(ptr), arrayType)
return np.ctypeslib.as_array(p)
#Function table display
Added Function table display and Opcodes sections
from ctypes import *
import numpy as np
import sys
if sys.platform.startswith('linux'):
libcsound = CDLL("libcsound64.so")
elif sys.platform.startswith('win'):
libcsound = cdll.libcsound64
elif sys.platform.startswith('darwin'):
libcsound = CDLL("libcsound64.dylib")
else:
sys.exit("Don't know your system! Exiting...")
MYFLT = c_double
class CsoundParams(Structure):
_fields_ = [("debug_mode", c_int), # debug mode, 0 or 1
("buffer_frames", c_int), # number of frames in in/out buffers
("hardware_buffer_frames", c_int), # ibid. hardware
("displays", c_int), # graph displays, 0 or 1
("ascii_graphs", c_int), # use ASCII graphs, 0 or 1
("postscript_graphs", c_int), # use postscript graphs, 0 or 1
("message_level", c_int), # message printout control
("tempo", c_int), # tempo ("sets Beatmode)
("ring_bell", c_int), # bell, 0 or 1
("use_cscore", c_int), # use cscore for processing
("terminate_on_midi", c_int), # terminate performance at the end
# of midifile, 0 or 1
("heartbeat", c_int), # print heart beat, 0 or 1
("defer_gen01_load", c_int), # defer GEN01 load, 0 or 1
("midi_key", c_int), # pfield to map midi key no
("midi_key_cps", c_int), # pfield to map midi key no as cps
("midi_key_oct", c_int), # pfield to map midi key no as oct
("midi_key_pch", c_int), # pfield to map midi key no as pch
("midi_velocity", c_int), # pfield to map midi velocity
("midi_velocity_amp", c_int), # pfield to map midi velocity as amplitude
("no_default_paths", c_int), # disable relative paths from files, 0 or 1
("number_of_threads", c_int), # number of threads for multicore performance
("syntax_check_only", c_int), # do not compile, only check syntax
("csd_line_counts", c_int), # csd line error reporting
("compute_weights", c_int), # deprecated, kept for backwards comp.
("realtime_mode", c_int), # use realtime priority mode, 0 or 1
("sample_accurate", c_int), # use sample-level score event accuracy
("sample_rate_override", MYFLT), # overriding sample rate
("control_rate_override", MYFLT), # overriding control rate
("nchnls_override", c_int), # overriding number of out channels
("nchnls_i_override", c_int), # overriding number of in channels
("e0dbfs_override", MYFLT), # overriding 0dbfs
("daemon", c_int), # daemon mode
("ksmps_override", c_int)] # ksmps override
string64 = c_char * 64
class CsoundAudioDevice(Structure):
_fields_ = [("device_name", string64),
("device_id", string64),
("rt_module", string64),
("max_nchnls", c_int),
("isOutput", c_int)]
class CsoundMidiDevice(Structure):
_fields_ = [("device_name", string64),
("interface_name", string64),
("device_id", string64),
("midi_module", string64),
("isOutput", c_int)]
class CsoundRtAudioParams(Structure):
_fields_ = [("devName", c_char_p), # device name (NULL/empty: default)
("devNum", c_int), # device number (0-1023), 1024: default
("bufSamp_SW", c_uint), # buffer fragment size (-b) in sample frames
("bufSamp_HW", c_int), # total buffer size (-B) in sample frames
("nChannels", c_int), # number of channels
("sampleFormat", c_int), # sample format (AE_SHORT etc.)
("sampleRate", c_float)] # sample rate in Hz
class OpcodeListEntry(Structure):
_fields_ = [("opname", c_char_p),
("outypes", c_char_p),
("intypes", c_char_p),
("flags", c_int)]
# PVSDATEXT is a variation on PVSDAT used in the pvs bus interface
class PvsdatExt(Structure):
_fields_ = [("N", c_int32),
("sliding", c_int), # Flag to indicate sliding case
("NB", c_int32),
("overlap", c_int32),
("winsize", c_int32),
("wintype", c_int),
("format", c_int32),
("framecount", c_uint32),
("frame", POINTER(c_float))]
# This structure holds the parameter hints for control channels
class ControlChannelHints(Structure):
_fields_ = [("behav", c_int),
("dflt", MYFLT),
("min", MYFLT),
("max", MYFLT),
("x", c_int),
("y", c_int),
("width", c_int),
("height", c_int),
# This member must be set explicitly to None if not used
("attributes", c_char_p)]
class ControlChannelInfo(Structure):
_fields_ = [("name", c_char_p),
("type", c_int),
("hints", ControlChannelHints)]
CAPSIZE = 60
class Windat(Structure):
_fields_ = [("windid", POINTER(c_uint)), # set by makeGraph()
("fdata", POINTER(MYFLT)), # data passed to drawGraph()
("npts", c_int32), # size of above array
("caption", c_char * CAPSIZE), # caption string for graph
("waitflg", c_int16 ), # set =1 to wait for ms after Draw
("polarity", c_int16), # controls positioning of X axis
("max", MYFLT), # workspace .. extrema this frame
("min", MYFLT),
("absmax", MYFLT), # workspace .. largest of above
("oabsmax", MYFLT), # Y axis scaling factor
("danflag", c_int), # set to 1 for extra Yaxis mid span
("absflag", c_int)] # set to 1 to skip abs check
# Symbols for Windat.polarity field
NOPOL = 0
NEGPOL = 1
POSPOL = 2
BIPOL = 3
class NamedGen(Structure):
pass
NamedGen._fields_ = [
("name", c_char_p),
("genum", c_int),
("next", POINTER(NamedGen))]
libcsound.csoundCreate.restype = c_void_p
libcsound.csoundCreate.argtypes = [py_object]
libcsound.csoundDestroy.argtypes = [c_void_p]
libcsound.csoundParseOrc.restype = c_void_p
libcsound.csoundParseOrc.argtypes = [c_void_p, c_char_p]
libcsound.csoundCompileTree.argtypes = [c_void_p, c_void_p]
libcsound.csoundDeleteTree.argtypes = [c_void_p, c_void_p]
libcsound.csoundCompileOrc.argtypes = [c_void_p, c_char_p]
libcsound.csoundEvalCode.restype = MYFLT
libcsound.csoundEvalCode.argtypes = [c_void_p, c_char_p]
libcsound.csoundCompileArgs.argtypes = [c_void_p, c_int, POINTER(c_char_p)]
libcsound.csoundStart.argtypes = [c_void_p]
libcsound.csoundCompile.argtypes = [c_void_p, c_int, POINTER(c_char_p)]
libcsound.csoundCompileCsd.argtypes = [c_void_p, c_char_p]
libcsound.csoundCompileCsdText.argtypes = [c_void_p, c_char_p]
libcsound.csoundPerform.argtypes = [c_void_p]
libcsound.csoundPerformKsmps.argtypes = [c_void_p]
libcsound.csoundPerformBuffer.argtypes = [c_void_p]
libcsound.csoundStop.argtypes = [c_void_p]
libcsound.csoundCleanup.argtypes = [c_void_p]
libcsound.csoundReset.argtypes = [c_void_p]
libcsound.csoundGetSr.restype = MYFLT
libcsound.csoundGetSr.argtypes = [c_void_p]
libcsound.csoundGetKr.restype = MYFLT
libcsound.csoundGetKr.argtypes = [c_void_p]
libcsound.csoundGetKsmps.restype = c_uint32
libcsound.csoundGetKsmps.argtypes = [c_void_p]
libcsound.csoundGetNchnls.restype = c_uint32
libcsound.csoundGetNchnls.argtypes = [c_void_p]
libcsound.csoundGetNchnlsInput.restype = c_uint32
libcsound.csoundGetNchnlsInput.argtypes = [c_void_p]
libcsound.csoundGet0dBFS.restype = MYFLT
libcsound.csoundGet0dBFS.argtypes = [c_void_p]
libcsound.csoundGetCurrentTimeSamples.restype = c_int64
libcsound.csoundGetCurrentTimeSamples.argtypes = [c_void_p]
libcsound.csoundGetHostData.restype = py_object
libcsound.csoundGetHostData.argtypes = [c_void_p]
libcsound.csoundSetHostData.argtypes = [c_void_p, py_object]
libcsound.csoundSetOption.argtypes = [c_void_p, c_char_p]
libcsound.csoundSetParams.argtypes = [c_void_p, POINTER(CsoundParams)]
libcsound.csoundGetParams.argtypes = [c_void_p, POINTER(CsoundParams)]
libcsound.csoundGetDebug.argtypes = [c_void_p]
libcsound.csoundSetDebug.argtypes = [c_void_p, c_int]
libcsound.csoundGetOutputName.restype = c_char_p
libcsound.csoundGetOutputName.argtypes = [c_void_p]
libcsound.csoundSetOutput.argtypes = [c_void_p, c_char_p, c_char_p, c_char_p]
libcsound.csoundSetInput.argtypes = [c_void_p, c_char_p]
libcsound.csoundSetMIDIInput.argtypes = [c_void_p, c_char_p]
libcsound.csoundSetMIDIFileInput.argtypes = [c_void_p, c_char_p]
libcsound.csoundSetMIDIOutput.argtypes = [c_void_p, c_char_p]
libcsound.csoundSetMIDIFileOutput.argtypes = [c_void_p, c_char_p]
FILEOPENFUNC = CFUNCTYPE(None, c_void_p, c_char_p, c_int, c_int, c_int)
libcsound.csoundSetFileOpenCallback.argtypes = [c_void_p, FILEOPENFUNC]
libcsound.csoundSetRTAudioModule.argtypes = [c_void_p, c_char_p]
libcsound.csoundGetModule.argtypes = [c_void_p, c_int, POINTER(c_char_p), POINTER(c_char_p)]
libcsound.csoundGetInputBufferSize.restype = c_long
libcsound.csoundGetInputBufferSize.argtypes = [c_void_p]
libcsound.csoundGetOutputBufferSize.restype = c_long
libcsound.csoundGetOutputBufferSize.argtypes = [c_void_p]
libcsound.csoundGetInputBuffer.restype = POINTER(MYFLT)
libcsound.csoundGetInputBuffer.argtypes = [c_void_p]
libcsound.csoundGetOutputBuffer.restype = POINTER(MYFLT)
libcsound.csoundGetOutputBuffer.argtypes = [c_void_p]
libcsound.csoundGetSpin.restype = POINTER(MYFLT)
libcsound.csoundGetSpin.argtypes = [c_void_p]
libcsound.csoundAddSpinSample.argtypes = [c_void_p, c_int, c_int, MYFLT]
libcsound.csoundGetSpout.restype = POINTER(MYFLT)
libcsound.csoundGetSpout.argtypes = [c_void_p]
libcsound.csoundGetSpoutSample.restype = MYFLT
libcsound.csoundGetSpoutSample.argtypes = [c_void_p, c_int, c_int]
libcsound.csoundGetRtRecordUserData.restype = POINTER(c_void_p)
libcsound.csoundGetRtRecordUserData.argtypes = [c_void_p]
libcsound.csoundGetRtPlayUserData.restype = POINTER(c_void_p)
libcsound.csoundGetRtPlayUserData.argtypes = [c_void_p]
libcsound.csoundSetHostImplementedAudioIO.argtypes = [c_void_p, c_int, c_int]
libcsound.csoundGetAudioDevList.argtypes = [c_void_p, c_void_p, c_int]
PLAYOPENFUNC = CFUNCTYPE(c_int, c_void_p, POINTER(CsoundRtAudioParams))
libcsound.csoundSetPlayopenCallback.argtypes = [c_void_p, PLAYOPENFUNC]
RTPLAYFUNC = CFUNCTYPE(None, c_void_p, POINTER(MYFLT), c_int)
libcsound.csoundSetRtplayCallback.argtypes = [c_void_p, RTPLAYFUNC]
RECORDOPENFUNC = CFUNCTYPE(c_int, c_void_p, POINTER(CsoundRtAudioParams))
libcsound.csoundSetRecopenCallback.argtypes = [c_void_p, RECORDOPENFUNC]
RTRECORDFUNC = CFUNCTYPE(c_int, c_void_p, POINTER(MYFLT), c_int)
libcsound.csoundSetRtrecordCallback.argtypes = [c_void_p, RTRECORDFUNC]
RTCLOSEFUNC = CFUNCTYPE(None, c_void_p)
libcsound.csoundSetRtcloseCallback.argtypes = [c_void_p, RTCLOSEFUNC]
AUDIODEVLISTFUNC = CFUNCTYPE(c_int, c_void_p, POINTER(CsoundAudioDevice), c_int)
libcsound.csoundSetAudioDeviceListCallback.argtypes = [c_void_p, AUDIODEVLISTFUNC]
libcsound.csoundSetMIDIModule.argtypes = [c_void_p, c_char_p]
libcsound.csoundSetHostImplementedMIDIIO.argtypes = [c_void_p, c_int]
libcsound.csoundGetMIDIDevList.argtypes = [c_void_p, c_void_p, c_int]
MIDIINOPENFUNC = CFUNCTYPE(c_int, c_void_p, POINTER(c_void_p), c_char_p)
libcsound.csoundSetExternalMidiInOpenCallback.argtypes = [c_void_p, MIDIINOPENFUNC]
MIDIREADFUNC = CFUNCTYPE(c_int, c_void_p, c_void_p, c_char_p, c_int)
libcsound.csoundSetExternalMidiReadCallback.argtypes = [c_void_p, MIDIREADFUNC]
MIDIINCLOSEFUNC = CFUNCTYPE(c_int, c_void_p, c_void_p)
libcsound.csoundSetExternalMidiInCloseCallback.argtypes = [c_void_p, MIDIINCLOSEFUNC]
MIDIOUTOPENFUNC = CFUNCTYPE(c_int, c_void_p, POINTER(c_void_p), c_char_p)
libcsound.csoundSetExternalMidiOutOpenCallback.argtypes = [c_void_p, MIDIOUTOPENFUNC]
MIDIWRITEFUNC = CFUNCTYPE(c_int, c_void_p, c_void_p, c_char_p, c_int)
libcsound.csoundSetExternalMidiWriteCallback.argtypes = [c_void_p, MIDIWRITEFUNC]
MIDIOUTCLOSEFUNC = CFUNCTYPE(c_int, c_void_p, c_void_p)
libcsound.csoundSetExternalMidiOutCloseCallback.argtypes = [c_void_p, MIDIOUTCLOSEFUNC]
MIDIERRORFUNC = CFUNCTYPE(c_char_p, c_int)
libcsound.csoundSetExternalMidiErrorStringCallback.argtypes = [c_void_p, MIDIERRORFUNC]
MIDIDEVLISTFUNC = CFUNCTYPE(c_int, c_void_p, POINTER(CsoundMidiDevice), c_int)
libcsound.csoundSetMIDIDeviceListCallback.argtypes = [c_void_p, MIDIDEVLISTFUNC]
libcsound.csoundReadScore.argtypes = [c_void_p, c_char_p]
libcsound.csoundGetScoreTime.restype = c_double
libcsound.csoundGetScoreTime.argtypes = [c_void_p]
libcsound.csoundIsScorePending.argtypes = [c_void_p]
libcsound.csoundSetScorePending.argtypes = [c_void_p, c_int]
libcsound.csoundGetScoreOffsetSeconds.restype = MYFLT
libcsound.csoundGetScoreOffsetSeconds.argtypes = [c_void_p]
libcsound.csoundSetScoreOffsetSeconds.argtypes = [c_void_p, MYFLT]
libcsound.csoundRewindScore.argtypes = [c_void_p]
CSCOREFUNC = CFUNCTYPE(None, c_void_p)
libcsound.csoundSetCscoreCallback.argtypes = [c_void_p, CSCOREFUNC]
libcsound.csoundMessage.argtypes = [c_void_p, c_char_p, c_char_p]
libcsound.csoundMessageS.argtypes = [c_void_p, c_int, c_char_p, c_char_p]
libcsound.csoundSetMessageLevel.argtypes = [c_void_p, c_int]
libcsound.csoundCreateMessageBuffer.argtypes = [c_void_p, c_int]
libcsound.csoundGetFirstMessage.restype = c_char_p
libcsound.csoundGetFirstMessage.argtypes = [c_void_p]
libcsound.csoundGetFirstMessageAttr.argtypes = [c_void_p]
libcsound.csoundPopFirstMessage.argtypes = [c_void_p]
libcsound.csoundGetMessageCnt.argtypes = [c_void_p]
libcsound.csoundDestroyMessageBuffer.argtypes = [c_void_p]
libcsound.csoundGetChannelPtr.argtypes = [c_void_p, POINTER(POINTER(MYFLT)), c_char_p, c_int]
libcsound.csoundListChannels.argtypes = [c_void_p, POINTER(POINTER(ControlChannelInfo))]
libcsound.csoundDeleteChannelList.argtypes = [c_void_p, POINTER(ControlChannelInfo)]
libcsound.csoundSetControlChannelHints.argtypes = [c_void_p, c_char_p, ControlChannelHints]
libcsound.csoundGetControlChannelHints.argtypes = [c_void_p, c_char_p, POINTER(ControlChannelHints)]
libcsound.csoundGetChannelLock.restype = POINTER(c_int)
libcsound.csoundGetChannelLock.argtypes = [c_void_p, c_char_p]
libcsound.csoundGetControlChannel.restype = MYFLT
libcsound.csoundGetControlChannel.argtypes = [c_void_p, c_char_p, POINTER(c_int)]
libcsound.csoundSetControlChannel.argtypes = [c_void_p, c_char_p, MYFLT]
libcsound.csoundGetAudioChannel.argtypes = [c_void_p, c_char_p, POINTER(c_int)]
libcsound.csoundSetAudioChannel.argtypes = [c_void_p, c_char_p, POINTER(c_int)]
libcsound.csoundGetStringChannel.argtypes = [c_void_p, c_char_p, c_char_p]
libcsound.csoundSetStringChannel.argtypes = [c_void_p, c_char_p, c_char_p]
libcsound.csoundGetChannelDatasize.argtypes = [c_void_p, c_char_p]
CHANNELFUNC = CFUNCTYPE(None, c_void_p, c_char_p, c_void_p, c_void_p)
libcsound.csoundSetInputChannelCallback.argtypes = [c_void_p, CHANNELFUNC]
libcsound.csoundSetOutputChannelCallback.argtypes = [c_void_p, CHANNELFUNC]
libcsound.csoundSetPvsChannel.argtypes = [c_void_p, POINTER(PvsdatExt), c_char_p]
libcsound.csoundGetPvsChannel.argtypes = [c_void_p, POINTER(PvsdatExt), c_char_p]
libcsound.csoundScoreEvent.argtypes = [c_void_p, c_char, POINTER(MYFLT), c_long]
libcsound.csoundScoreEventAbsolute.argtypes = [c_void_p, c_char, POINTER(MYFLT), c_long, c_double]
libcsound.csoundInputMessage.argtypes = [c_void_p, c_char_p]
libcsound.csoundKillInstance.argtypes = [c_void_p, MYFLT, c_char_p, c_int, c_int]
SENSEFUNC = CFUNCTYPE(None, c_void_p, py_object)
libcsound.csoundRegisterSenseEventCallback.argtypes = [c_void_p, SENSEFUNC, py_object]
libcsound.csoundKeyPress.argtypes = [c_void_p, c_char]
KEYBOARDFUNC = CFUNCTYPE(c_int, py_object, c_void_p, c_uint)
libcsound.csoundRegisterKeyboardCallback.argtypes = [c_void_p, KEYBOARDFUNC, py_object, c_uint]
libcsound.csoundRemoveKeyboardCallback.argtypes = [c_void_p, KEYBOARDFUNC]
libcsound.csoundTableLength.argtypes = [c_void_p, c_int]
libcsound.csoundTableGet.restype = MYFLT
libcsound.csoundTableGet.argtypes = [c_void_p, c_int, c_int]
libcsound.csoundTableSet.argtypes = [c_void_p, c_int, c_int, MYFLT]
libcsound.csoundTableCopyOut.argtypes = [c_void_p, c_int, POINTER(MYFLT)]
libcsound.csoundTableCopyIn.argtypes = [c_void_p, c_int, POINTER(MYFLT)]
libcsound.csoundGetTable.argtypes = [c_void_p, POINTER(POINTER(MYFLT)), c_int]
libcsound.csoundGetTableArgs.argtypes = [c_void_p, POINTER(POINTER(MYFLT)), c_int]
libcsound.csoundSetIsGraphable.argtypes = [c_void_p, c_int]
MAKEGRAPHFUNC = CFUNCTYPE(None, c_void_p, POINTER(Windat), c_char_p)
libcsound.csoundSetMakeGraphCallback.argtypes = [c_void_p, MAKEGRAPHFUNC]
DRAWGRAPHFUNC = CFUNCTYPE(None, c_void_p, POINTER(Windat))
libcsound.csoundSetDrawGraphCallback.argtypes = [c_void_p, DRAWGRAPHFUNC]
KILLGRAPHFUNC = CFUNCTYPE(None, c_void_p, POINTER(Windat))
libcsound.csoundSetKillGraphCallback.argtypes = [c_void_p, KILLGRAPHFUNC]
EXITGRAPHFUNC = CFUNCTYPE(c_int, c_void_p)
libcsound.csoundSetExitGraphCallback.argtypes = [c_void_p, EXITGRAPHFUNC]
libcsound.csoundGetNamedGens.restype = c_void_p
libcsound.csoundGetNamedGens.argtypes = [c_void_p]
libcsound.csoundNewOpcodeList.argtypes = [c_void_p, POINTER(POINTER(OpcodeListEntry))]
libcsound.csoundDisposeOpcodeList.argtypes = [c_void_p, POINTER(OpcodeListEntry)]
OPCODEFUNC = CFUNCTYPE(c_int, c_void_p, c_void_p)
libcsound.csoundAppendOpcode.argtypes = [c_void_p, c_char_p, c_int, c_int, c_int, \
c_char_p, c_char_p, OPCODEFUNC, OPCODEFUNC, OPCODEFUNC]
def cstring(s):
if sys.version_info[0] >= 3 and s != None:
return bytes(s, 'utf-8')
return s
def pstring(s):
if sys.version_info[0] >= 3and s != None:
return str(s, 'utf-8')
return s
def csoundArgList(lst):
argc = len(lst)
argv = (POINTER(c_char_p) * argc)()
for i in range(argc):
v = cstring(lst[i])
argv[i] = cast(pointer(create_string_buffer(v)), POINTER(c_char_p))
return c_int(argc), cast(argv, POINTER(c_char_p))
# message types (only one can be specified)
CSOUNDMSG_DEFAULT = 0x0000 # standard message
CSOUNDMSG_ERROR = 0x1000 # error message (initerror, perferror, etc.)
CSOUNDMSG_ORCH = 0x2000 # orchestra opcodes (e.g. printks)
CSOUNDMSG_REALTIME = 0x3000 # for progress display and heartbeat characters
CSOUNDMSG_WARNING = 0x4000 # warning messages
# format attributes (colors etc.), use the bitwise OR of any of these:
CSOUNDMSG_FG_BLACK = 0x0100
CSOUNDMSG_FG_RED = 0x0101
CSOUNDMSG_FG_GREEN = 0x0102
CSOUNDMSG_FG_YELLOW = 0x0103
CSOUNDMSG_FG_BLUE = 0x0104
CSOUNDMSG_FG_MAGENTA = 0x0105
CSOUNDMSG_FG_CYAN = 0x0106
CSOUNDMSG_FG_WHITE = 0x0107
CSOUNDMSG_FG_BOLD = 0x0008
CSOUNDMSG_FG_UNDERLINE = 0x0080
CSOUNDMSG_BG_BLACK = 0x0200
CSOUNDMSG_BG_RED = 0x0210
CSOUNDMSG_BG_GREEN = 0x0220
CSOUNDMSG_BG_ORANGE = 0x0230
CSOUNDMSG_BG_BLUE = 0x0240
CSOUNDMSG_BG_MAGENTA = 0x0250
CSOUNDMSG_BG_CYAN = 0x0260
CSOUNDMSG_BG_GREY = 0x0270
CSOUNDMSG_TYPE_MASK = 0x7000
CSOUNDMSG_FG_COLOR_MASK = 0x0107
CSOUNDMSG_FG_ATTR_MASK = 0x0088
CSOUNDMSG_BG_COLOR_MASK = 0x0270
# ERROR DEFINITIONS
CSOUND_SUCCESS = 0 # Completed successfully.
CSOUND_ERROR = -1 # Unspecified failure.
CSOUND_INITIALIZATION = -2 # Failed during initialization.
CSOUND_PERFORMANCE = -3 # Failed during performance.
CSOUND_MEMORY = -4 # Failed to allocate requested memory.
CSOUND_SIGNAL = -5 # Termination requested by SIGINT or SIGTERM.
# Constants used by the bus interface (csoundGetChannelPtr() etc.).
CSOUND_CONTROL_CHANNEL = 1
CSOUND_AUDIO_CHANNEL = 2
CSOUND_STRING_CHANNEL = 3
CSOUND_PVS_CHANNEL = 4
CSOUND_VAR_CHANNEL = 5
CSOUND_CHANNEL_TYPE_MASK = 15
CSOUND_INPUT_CHANNEL = 16
CSOUND_OUTPUT_CHANNEL = 32
CSOUND_CONTROL_CHANNEL_NO_HINTS = 0
CSOUND_CONTROL_CHANNEL_INT = 1
CSOUND_CONTROL_CHANNEL_LIN = 2
CSOUND_CONTROL_CHANNEL_EXP = 3
class Csound:
# Instantiation
def __init__(self, hostData=None):
"""Creates an instance of Csound.
Get an opaque pointer that must be passed to most Csound API
functions. The hostData parameter can be None, or it can be any
sort of data; these data can be accessed from the Csound instance
that is passed to callback routines.
"""
self.cs = libcsound.csoundCreate(py_object(hostData))
def __del__(self):
"""Destroys an instance of Csound."""
libcsound.csoundDestroy(self.cs)
def version(self):
"""Returns the version number times 1000 (5.00.0 = 5000)."""
return libcsound.csoundGetVersion(self.cs)
def APIVersion(self):
"""Returns the API version number times 100 (1.00 = 100)."""
return libcsound.csoundGetAPIVersion(self.cs)
#Performance
def parseOrc(self, orc):
"""Parse the given orchestra from an ASCII string into a TREE.
This can be called during performance to parse new code.
"""
return libcsound.csoundParseOrc(self.cs, cstring(orc))
def compileTree(self, tree):
"""Compile the given TREE node into structs for Csound to use.
This can be called during performance to compile a new TREE.
"""
return libcsound.csoundCompileTree(self.cs, tree)
def deleteTree(self, tree):
"""Free the resources associated with the TREE tree.
This function should be called whenever the TREE was
created with parseOrc and memory can be deallocated.
"""
libcsound.csoundDeleteTree(self.cs, tree)
def compileOrc(self, orc):
"""Parse, and compile the given orchestra from an ASCII string.
Also evaluating any global space code (i-time only).
This can be called during performance to compile a new orchestra.
orc = "instr 1 \n a1 rand 0dbfs/4 \n out a1 \n"
cs.compileOrc(orc)
"""
return libcsound.csoundCompileOrc(self.cs, cstring(orc))
def evalCode(self, code):
"""Parse and compile an orchestra given on an string.
Evaluating any global space code (i-time only).
On SUCCESS it returns a value passed to the
'return' opcode in global space.
code = "i1 = 2 + 2 \n return i1 \n"
retval = cs.evalCode(code)
"""
return libcsound.csoundEvalCode(self.cs, cstring(code))
#def initializeCscore(insco, outsco):
def compileArgs(self, *args):
"""Compile args.
Read arguments, parse and compile an orchestra,
read, process and load a score.
"""
argc, argv = csoundArgList(args)
return libcsound.csoundCompileArgs(self.cs, argc, argv)
def start(self):
"""Prepares Csound for performance after compilation.
Using one or more of the above functions.
NB: this is called internally by compile_(), therefore
it is only required if performance is started without
a call to that function.
"""
return libcsound.csoundStart(self.cs)
def compile_(self, *args):
"""Compile Csound input files (such as an orchestra and score).
As directed by the supplied command-line arguments,
but does not perform them. Returns a non-zero error code on failure.
This function cannot be called during performance, and before a
repeated call, reset() needs to be called.
In this (host-driven) mode, the sequence of calls should be as follows:
cs.compile_(args)
while (cs.performBuffer() == 0)
pass
cs.cleanup()
cs.reset()
Calls start() internally.
"""
argc, argv = csoundArgList(args)
return libcsound.csoundCompile(self.cs, argc, argv)
def compileCsd(self, csd_filename):
"""Compile a Csound input file (.csd file).
The input file includes command-line arguments, but does not
perform the file. Return a non-zero error code on failure.
In this (host-driven) mode, the sequence of calls should be
as follows:
cs.compileCsd(args)
while (cs.performBuffer() == 0)
pass
cs.cleanup()
cs.reset()
NB: this function can be called during performance to
replace or add new instruments and events.
"""
return libcsound.csoundCompileCsd(self.cs, cstring(csd_filename))
def compileCsdText(self, csd_text):
"""Compile a Csound input file contained in a string of text.
The string of text includes command-line arguments, orchestra, score,
etc., but it is not performed. Returns a non-zero error code on failure.
In this (host-driven) mode, the sequence of calls should be as follows:
cs.compileCsdText(csd_text);
while (cs.performBuffer() == 0)
pass
cs.cleanup()
cs.reset()
NB: a temporary file is created, the csd_text is written to the
temporary file, and compileCsd is called with the name of the temporary
file, which is deleted after compilation. Behavior may vary by platform.
"""
return libcsound.csoundCompileCsdText(self.cs, cstring(csd_text))
def perform(self):
"""Sense input events and performs audio output.
This is done until the end of score is reached (positive return value),
an error occurs (negative return value), or performance is stopped by
calling stop() from another thread (zero return value).
Note that compile_(), or compileOrc(), readScore(), start() must be
called first.
In the case of zero return value, perform() can be called again
to continue the stopped performance. Otherwise, reset() should be
called to clean up after the finished or failed performance.
"""
return libcsound.csoundPerform(self.cs)
def performKsmps(self):
"""Sense input events, and performs audio output.
This is done for one control sample worth (ksmps).
Note that compile_(), or compileOrc(), readScore(), start() must be
called first.
Returns False during performance, and True when performance is
finished. If called until it returns True, will perform an entire
score.
Enables external software to control the execution of Csound,
and to synchronize performance with audio input and output.
"""
return libcsound.csoundPerformKsmps(self.cs)
def performBuffer(self):
"""Perform Csound, sensing real-time and score events.
Processing one buffer's worth (-b frames) of interleaved audio.
Note that compile_ must be called first, then call
outputBuffer() and inputBuffer() to get the pointer
to csound's I/O buffers.
Returns false during performance, and true when performance is finished.
"""
return libcsound.csoundBuffer(self.cs)
def stop(self):
"""Stop a perform() running in another thread.
Note that it is not guaranteed that perform() has already stopped
when this function returns.
"""
return libcsound.csoundStop(self.cs)
def cleanup(self):
"""Print information and closes audio and MIDI devices.
The information is about the end of a performance.
Note: after calling cleanup(), the operation of the perform
functions is undefined.
"""
return libcsound.csoundCleanup(self.cs)
def reset(self):
"""Reset all internal memory and state.
In preparation for a new performance.
Enable external software to run successive Csound performances
without reloading Csound. Implies cleanup(), unless already called.
"""
return libcsound.csoundReset(self.cs)
# Attributes
def sr(self):
"""Return the number of audio sample frames per second."""
return libcsound.csoundGetSr(self.cs)
def kr(self):
"""Return the number of control samples per second."""
return libcsound.csoundGetKr(self.cs)
def ksmps(self):
"""Return the number of audio sample frames per control sample."""
return libcsound.csoundGetKsmps(self.cs)
def nchnls(self):
"""Return the number of audio output channels.
Set through the nchnls header variable in the csd file.
"""
return libcsound.csoundGetNchnls(self.cs)
def nchnlsInput(self):
"""Return the number of audio input channels.
Set through the nchnls_i header variable in the csd file. If this
variable is not set, the value is taken from nchnls.
"""
return libcsound.csoundGetNchnlsInput(self.cs)
def get0dBFS(self):
"""Return the 0dBFS level of the spin/spout buffers."""
return libcsound.csoundGet0dBFS(self.cs)
def currentTimeSamples(self):
"""Return the current performance time in samples."""
return libcsound.csoundGetCurrentTimeSamples(self.cs)
def sizeOfMYFLT(self):
"""Return the size of MYFLT in bytes."""
return libcsound.csoundGetSizeOfMYFLT()
def hostData(self):
"""Return host data."""
return libcsound.csoundGetHostData(self.cs)
def setHostData(self, data):
"""Set host data."""
libcsound.csoundSetHostData(self.cs, py_object(data))
def setOption(self, option):
"""Set a single csound option (flag).
Returns CSOUND_SUCCESS on success.
NB: blank spaces are not allowed.
"""
return libcsound.csoundSetOption(self.cs, cstring(option))
def setParams(self, params):
"""Configure Csound with a given set of parameters.
These parameters are defined in the CsoundParams structure.
They are the part of the OPARMS struct that are configurable through
command line flags.
The CsoundParams structure can be obtained using params().
These options should only be changed before performance has started.
"""
libcsound.csoundSetParams(self.cs, byref(params))
def params(self, params):
"""Get the current set of parameters from a CSOUND instance.
These parameters are in a CsoundParams structure. See setParams().
p = CsoundParams()
cs.params(p)
"""
libcsound.csoundGetParams(self.cs, byref(params))
def debug(self):
"""Return whether Csound is set to print debug messages.
Those messages are sent through the DebugMsg() internal API function.
"""
return libcsound.csoundGetDebug(self.cs) != 0
def setDebug(self, debug):
"""Set whether Csound prints debug messages.
The debug argument must have value True or False.
Those messaged come from the DebugMsg() internal API function.
"""
libcsound.csoundSetDebug(self.cs, c_int(debug))
# General Input/Output
def outputName(self):
"""Return the output audio output name (-o)"""
s = libcsound.csoundGetOutputName(self.cs)
return pstring(s)
def setOutput(self, name, type_, format):
"""Set output destination, type and format.
type_ can be one of "wav", "aiff", "au", "raw", "paf", "svx", "nist",
"voc", "ircam", "w64", "mat4", "mat5", "pvf", "xi", "htk", "sds",
"avr", "wavex", "sd2", "flac", "caf", "wve", "ogg", "mpc2k", "rf64",
or NULL (use default or realtime IO).
format can be one of "alaw", "schar", "uchar", "float", "double",
"long", "short", "ulaw", "24bit", "vorbis", or NULL (use default or
realtime IO).
For RT audio, use device_id from CS_AUDIODEVICE for a given audio
device.
"""
n = cstring(name)
t = cstring(type_)
f = cstring(format)
libcsound.csoundSetOutput(self.cs, n, t, f)
def setInput(self, name):
"""Set input source."""
libcsound.csoundSetInput(self.cs, cstring(name))
def setMIDIInput(self, name):
"""Set MIDI input device name/number."""
libcsound.csoundSetMidiInput(self.cs, cstring(name))
def setMIDIFileInput(self, name):
"""Set MIDI file input name."""
libcsound.csoundSetMIDIFileInput(self.cs, cstring(name))
def setMIDIOutput(self, name):
"""Set MIDI output device name/number."""
libcsound.csoundSetMIDIOutput(self.cs, cstring(name))
def setMIDIFileOutput(self, name):
"""Set MIDI file output name."""
libcsound.csoundSetMIDIFileOutput(self.cs, cstring(name))
def setFileOpenCallback(self, function):
"""Set a callback for receiving notices whenever Csound opens a file.
The callback is made after the file is successfully opened.
The following information is passed to the callback:
bytes pathname of the file; either full or relative to current dir
int a file type code from the enumeration CSOUND_FILETYPES
int 1 if Csound is writing the file, 0 if reading
int 1 if a temporary file that Csound will delete; 0 if not
Pass NULL to disable the callback.
This callback is retained after a csoundReset() call.
"""
libcsound.csoundSetFileOpenCallback(self.cs, FILEOPENFUNC(function))
# Realtime Audio I/O
def setRTAudioModule(self, module):
"""Set the current RT audio module."""
libcsound.csoundSetRTAudioModule(self.cs, cstring(module))
def module(self, number):
"""Retrieve a module name and type given a number.
Type is "audio" or "midi". Modules are added to list as csound loads
them. Return CSOUND_SUCCESS on success and CSOUND_ERROR if module
number was not found.
n = 0
while True:
name, type_, err = cs.module(n)
if err == ctcsound.CSOUND_ERROR:
break
print("Module %d:%s (%s)\n" % (n, name, type_))
n = n + 1
"""
name = pointer(c_char_p(cstring("dummy")))
type_ = pointer(c_char_p(cstring("dummy")))
err = libcsound.csoundGetModule(self.cs, number, name, type_)
if err == CSOUND_ERROR:
return None, None, err
n = pstring(string_at(name.contents))
t = pstring(string_at(type_.contents))
return n, t, err
def inputBufferSize(self):
"""Return the number of samples in Csound's input buffer."""
return libcsound.csoundGetInputBufferSize(self.cs)
def outputBufferSize(self):
"""Return the number of samples in Csound's output buffer."""
return libcsound.csoundGetOutputBufferSize(self.cs)
def inputBuffer(self):
"""Return the Csound audio input buffer as a numpy array.
Enable external software to write audio into Csound before
calling performBuffer.
"""
buf = libcsound.csoundGetInputBuffer(self.cs)
size = libcsound.csoundGetInputBufferSize(self.cs)
arrayType = np.ctypeslib.ndpointer(MYFLT, 1, (size,), 'C_CONTIGUOUS')
p = cast(addressof(buf), arrayType)
return np.ctypeslib.as_array(p)
def outputBuffer(self):
"""Return the Csound audio output buffer as a numpy array.
Enable external software to read audio from Csound after
calling performBuffer.
"""
buf = libcsound.csoundGetOutputBuffer(self.cs)
size = libcsound.csoundGetOutputBufferSize(self.cs)
arrayType = np.ctypeslib.ndpointer(MYFLT, 1, (size,), 'C_CONTIGUOUS')
p = cast(addressof(buf), arrayType)
return np.ctypeslib.as_array(p)
def spin(self):
"""Return the Csound audio input working buffer (spin) as a numpy array.
Enables external software to write audio into Csound before
calling performKsmps.
"""
buf = libcsound.csoundGetSpin(self.cs)
size = self.ksmps() * self.nchnlsInput()
arrayType = np.ctypeslib.ndpointer(MYFLT, 1, (size,), 'C_CONTIGUOUS')
p = cast(addressof(buf), arrayType)
return np.ctypeslib.as_array(p)
def addSpinSample(self, frame, channel, sample):
"""Add the indicated sample into the audio input working buffer (spin).
This only ever makes sense before calling performKsmps(). The frame
and channel must be in bounds relative to ksmps and nchnlsInput.
"""
libcsound.csoundAddSpinSample(self.cs, frame, channel, sample)
def spout(self):
"""Return the address of the Csound audio output working buffer (spout).
Enable external software to read audio from Csound after
calling performKsmps.
"""
buf = libcsound.csoundGetSpout(self.cs)
size = self.ksmps() * self.nchnls()
arrayType = np.ctypeslib.ndpointer(MYFLT, 1, (size,), 'C_CONTIGUOUS')
p = cast(addressof(buf), arrayType)
return np.ctypeslib.as_array(p)
def spoutSample(self, frame, channel):
"""Return one sample from the Csound audio output working buf (spout).
Only ever makes sense after calling performKsmps(). The frame and
channel must be in bounds relative to ksmps and nchnls.
"""
return libcsound.csoundGetSpoutSample(self.cs, frame, channel)
def rtRecordUserData(self):
"""Return pointer to user data pointer for real time audio input."""
return libcsound.csoundGetRtRecordUserData(self.cs)
def rtPlaydUserData(self):
"""Return pointer to user data pointer for real time audio output."""
return libcsound.csoundGetRtPlayUserData(self.cs)
def setHostImplementedAudioIO(self, state, bufSize):
"""Set user handling of sound I/O.
Calling this function with a True 'state' value between creation of
the Csound object and the start of performance will disable all default
handling of sound I/O by the Csound library, allowing the host
application to use the spin/spout/input/output buffers directly.
If 'bufSize' is greater than zero, the buffer size (-b) will be
set to the integer multiple of ksmps that is nearest to the value
specified.
"""
libcsound.csoundSetHostImplementedAudioIO(self.cs, c_int(state), bufSize)
def audioDevList(self, isOutput):
"""Return a list of available input or output audio devices.
Each item in the list is a dictionnary representing a device. The
dictionnary keys are "device_name", "device_id", "rt_module" (value
type string), "max_nchnls" (value type int), and "isOutput" (value
type boolean). Must be called after an orchestra has been compiled
to get meaningful information.
"""
n = libcsound.csoundGetAudioDevList(self.cs, None, c_int(isOutput))
devs = (CsoundAudioDevice * n)()
libcsound.csoundGetAudioDevList(self.cs, byref(devs), c_int(isOutput))
lst = []
for dev in devs:
d = {}
d["device_name"] = pstring(dev.device_name)
d["device_id"] = pstring(dev.device_id)
d["rt_module"] = pstring(dev.rt_module)
d["max_nchnls"] = dev.max_nchnls
d["isOutput"] = (dev.isOutput == 1)
lst.append(d)
return lst
def setPlayOpenCallback(self, function):
"""Set a callback for opening real-time audio playback."""
libcsound.csoundSetPlayopenCallback(self.cs, PLAYOPENFUNC(function))
def setRtPlayCallback(self, function):
"""Set a callback for performing real-time audio playback."""
libcsound.csoundSetRtplayCallback(self.cs, RTPLAYFUNC(function))
def setRecordOpenCallback(self, function):
"""Set a callback for opening real-time audio recording."""
libcsound.csoundSetRecopenCallback(self.cs, RECORDOPENFUNC(function))
def setRtRecordCallback(self, function):
"""Set a callback for performing real-time audio recording."""
libcsound.csoundSetRtrecordCallback(self.cs, RTRECORDFUNC(function))
def setRtCloseCallback(self, function):
"""Set a callback for closing real-time audio playback and recording."""
libcsound.csoundSetRtcloseCallback(self.cs, RTCLOSEFUNC(function))
def setAudioDevListCallback(self, function):
"""Set a callback for obtaining a list of audio devices.
This should be set by rtaudio modules and should not be set by hosts.
(See audioDevList()).
"""
libcsound.csoundSetAudioDeviceListCallback(self.cs, AUDIODEVLISTFUNC(function))
#Realtime MIDI I/O
def setMIDIModule(self, module):
"""Sets the current MIDI IO module."""
libcsound.csoundSetMIDIModule(self.cs, cstring(module))
def setHostImplementedMIDIIO(self, state):
"""Called with state True if the host is implementing via callbacks."""
libcsound.csoundSetHostImplementedMIDIIO(self.cs, c_int(state))
def midiDevList(self, isOutput):
"""Return a list of available input or output midi devices.
Each item in the list is a dictionnary representing a device. The
dictionnary keys are "device_name", "interface_name", "device_id",
"midi_module" (value type string), "isOutput" (value type boolean).
Must be called after an orchestra has been compiled
to get meaningful information.
"""
n = libcsound.csoundGetMIDIDevList(self.cs, None, c_int(isOutput))
devs = (csoundMidiDevice * n)()
libcsound.csoundGetMIDIDevList(self.cs, byref(devs), c_int(isOutput))
lst = []
for dev in devs:
d = {}
d["device_name"] = pstring(dev.device_name)
d["interface_name"] = pstring(dev.max_nchnlsinterface_name)
d["device_id"] = pstring(dev.device_id)
d["midi_module"] = pstring(dev.midi_module)
d["isOutput"] = (dev.isOutput == 1)
lst.append(d)
return lst
def setExternalMidiInOpenCallback(self, function):
"""Set a callback for opening real-time MIDI input."""
libcsound.csoundSetExternalMidiInOpenCallback(self.cs, MIDIINOPENFUNC(function))
def setExternalMidiReadCallback(self, function):
"""Set a callback for reading from real time MIDI input."""
libcsound.csoundSetExternalMidiReadCallback(self.cs, MIDIREADFUNC(function))
def setExternalMidiInCloseCallback(self, function):
"""Set a callback for closing real time MIDI input."""
libcsound.csoundSetExternalMidiInCloseCallback(self.cs, MIDIINCLOSEFUNC(function))
def setExternalMidiOutOpenCallback(self, function):
"""Set a callback for opening real-time MIDI input."""
libcsound.csoundSetExternalMidiOutOpenCallback(self.cs, MIDIOUTOPENFUNC(function))
def setExternalMidiWriteCallback(self, function):
"""Set a callback for reading from real time MIDI input."""
libcsound.csoundSetExternalMidiWriteCallback(self.cs, MIDIWRITEFUNC(function))
def setExternalMidiOutCloseCallback(self, function):
"""Set a callback for closing real time MIDI input."""
libcsound.csoundSetExternalMidiOutCloseCallback(self.cs, MIDIOUTCLOSEFUNC(function))
def setExternalMidiErrorStringCallback(self, function):
""" Set a callback for converting MIDI error codes to strings."""
libcsound.csoundSetExternalMidiErrorStringCallback(self.cs, MIDIERRORFUNC(function))
def setMidiDevListCallback(self, function):
"""Set a callback for obtaining a list of MIDI devices.
This should be set by IO plugins and should not be set by hosts.
(See midiDevList()).
"""
libcsound.csoundSetMIDIDeviceListCallback(self.cs, MIDIDEVLISTFUNC(function))
#Score Handling
def readScore(self, sco):
"""Read, preprocess, and load a score from an ASCII string.
It can be called repeatedly, with the new score events
being added to the currently scheduled ones.
"""
return libcsound.csoundReadScore(self.cs, cstring(sco))
def scoreTime(self):
"""Returns the current score time.
The return value is the time in seconds since the beginning of
performance.
"""
return libcsound.csoundGetScoreTime(self.cs)
def isScorePending(self):
"""Tell whether Csound score events are performed or not.
Independently of real-time MIDI events (see setScorePending()).
"""
return libcsound.csoundIsScorePending(self.cs)
def setScorePending(self, pending):
"""Set whether Csound score events are performed or not.
Real-time events will continue to be performed. Can be used by external
software, such as a VST host, to turn off performance of score events
(while continuing to perform real-time events), for example to mute
a Csound score while working on other tracks of a piece, or to play
the Csound instruments live.
"""
libcsound.csoundSetScorePending(self.cs, c_int(pending))
def scoreOffsetSeconds(self):
"""Return the score time beginning midway through a Csound score.
At this time score events will actually immediately be performed
(see setScoreOffsetSeconds()).
"""
return libcsound.csoundGetScoreOffsetSeconds(self.cs)
def setScoreOffsetSeconds(self, time):
"""Csound score events prior to the specified time are not performed.
Performance begins immediately at the specified time (real-time events
will continue to be performed as they are received). Can be used by
external software, such as a VST host, to begin score performance
midway through a Csound score, for example to repeat a loop in a
sequencer, or to synchronize other events with the Csound score.
"""
libcsound.csoundSetScoreOffsetSeconds(self.cs, MYFLT(time))
def rewindScore(self):
"""Rewinds a compiled Csound score.
It is rewinded to the time specified with setScoreOffsetSeconds().
"""
libcsound.csoundRewindScore(self.cs)
def setCscoreCallback(self, function):
"""Set an external callback for Cscore processing.
Pass None to reset to the internal cscore() function (which does
nothing). This callback is retained after a reset() call.
"""
libcsound.csoundSetCscoreCallback(self.cs, CSCOREFUNC(function))
#def scoreSort(self, inFile, outFile):
#def scoreExtract(self, inFile, outFile, extractFile)
#Messages and Text
def message(self, fmt, *args):
"""Display an informational message.
This is a workaround because ctypes does not support variadic functions.
The arguments are formatted in a string, using the python way, either
old style or new style, and then this formatted string is passed to
the csound display message system.
"""
if fmt[0] == '{':
s = fmt.format(*args)
else:
s = fmt % args
libcsound.csoundMessage(self.cs, cstring("%s"), cstring(s))
def messageS(self, attr, fmt, *args):
"""Print message with special attributes.
(See msg_attr for the list of available attributes). With attr=0,
messageS() is identical to message().
This is a workaround because ctypes does not support variadic functions.
The arguments are formatted in a string, using the python way, either
old style or new style, and then this formatted string is passed to
the csound display message system.
"""
if fmt[0] == '{':
s = fmt.format(*args)
else:
s = fmt % args
libcsound.csoundMessageS(self.cs, attr, cstring("%s"), cstring(s))
#def setDefaultMessageCallback():
#def setMessageCallback():
def setMessageLevel(self, messageLevel):
"""Set the Csound message level (from 0 to 231)."""
libcsound.csoundSetMessageLevel(self.cs, messageLevel)
def createMessageBuffer(self, toStdOut):
"""Create a buffer for storing messages printed by Csound.
Should be called after creating a Csound instance and the buffer
can be freed by calling destroyMessageBuffer() before deleting the
Csound instance. You will generally want to call cleanup() to make
sure the last messages are flushed to the message buffer before
destroying Csound.
If 'toStdOut' is True, the messages are also printed to
stdout and stderr (depending on the type of the message),
in addition to being stored in the buffer.
Using the message buffer ties up the internal message callback, so
setMessageCallback should not be called after creating the
message buffer.
"""
libcsound.csoundCreateMessageBuffer(self.cs, c_int(toStdOut))
def firstMessage(self):
"""Return the first message from the buffer."""
s = libcsound.csoundGetFirstMessage(self.cs)
return pstring(s)
def firstMessageAttr(self):
"""Return the attribute parameter of the first message in the buffer."""
return libcsound.csoundGetFirstMessageAttr(self.cs)
def popFirstMessage(self):
"""Remove the first message from the buffer."""
libcsound.csoundPopFirstMessage(self.cs)
def messageCnt(self):
"""Return the number of pending messages in the buffer."""
return libcsound.csoundGetMessageCnt(self.cs)
def destroyMessageBuffer(self):
"""Release all memory used by the message buffer."""
libcsound.csoundDestroyMessageBuffer(self.cs)
#Channels, Control and Events
def channelPtr(self, name, type_):
"""Return a pointer to the specified channel and an error message.
If the channel is a control or an audio channel, the pointer is
translated to an ndarray of MYFLT. If the channel is a string channel,
the pointer is casted to c_char_p. The error message is either an empty
string or a string describing the error that occured.
The channel is created first if it does not exist yet.
'type_' must be the bitwise OR of exactly one of the following values,
CSOUND_CONTROL_CHANNEL
control data (one MYFLT value)
CSOUND_AUDIO_CHANNEL
audio data (csoundGetKsmps(csound) MYFLT values)
CSOUND_STRING_CHANNEL
string data (MYFLT values with enough space to store
csoundGetChannelDatasize() characters, including the
NULL character at the end of the string)
and at least one of these:
CSOUND_INPUT_CHANNEL
CSOUND_OUTPUT_CHANNEL
If the channel already exists, it must match the data type
(control, audio, or string), however, the input/output bits are
OR'd with the new value. Note that audio and string channels
can only be created after calling csoundCompile(), because the
storage size is not known until then.
In the C API, return value is zero on success, or a negative error code,
CSOUND_MEMORY there is not enough memory for allocating the channel
CSOUND_ERROR the specified name or type is invalid
or, if a channel with the same name but incompatible type
already exists, the type of the existing channel. In the case
of any non-zero return value, *p is set to NULL.
Note: to find out the type of a channel without actually
creating or changing it, set 'type' to zero, so that the return
value will be either the type of the channel, or CSOUND_ERROR
if it does not exist.
Operations on the pointer are not thread-safe by default. The host is
required to take care of threadsafety by retrieving the channel lock
with channelLock() and using spinLock() and spinUnLock() to protect
access to the pointer.
See Top/threadsafe.c in the Csound library sources for
examples. Optionally, use the channel get/set functions
provided below, which are threadsafe by default.
"""
length = 0
chanType = type_ & CSOUND_CHANNEL_TYPE_MASK
if chanType == CSOUND_CONTROL_CHANNEL:
length = 1
elif chanType == CSOUND_AUDIO_CHANNEL:
length = libcsound.csoundGetKsmps(self.cs)
ptr = pointer(MYFLT(0.0))
err = ''
ret = libcsound.csoundGetChannelPtr(self.cs, byref(ptr), cstring(name), type_)
if ret == CSOUND_SUCCESS:
if chanType == CSOUND_STRING_CHANNEL:
return cast(ptr, c_char_p), err
else:
arrayType = np.ctypeslib.ndpointer(MYFLT, 1, (length,), 'C_CONTIGUOUS')
p = cast(addressof(ptr), arrayType)
return np.ctypeslib.as_array(p), err
elif ret == CSOUND_MEMORY:
err = 'Not enough memory for allocating channel'
elif ret == CSOUND_ERROR:
err = 'The specified channel name or type is not valid'
elif ret == CSOUND_CONTROL_CHANNEL:
err = 'A control channel named {} already exists'.format(name)
elif ret == CSOUND_AUDIO_CHANNEL:
err = 'An audio channel named {} already exists'.format(name)
elif ret == CSOUND_STRING_CHANNEL:
err = 'A string channel named {} already exists'.format(name)
else:
err = 'Unknown error'
return None, err
def listChannels(self):
"""Return a pointer and an error message.
The pointer points to a list of ControlChannelInfo objects for allocated
channels. A ControlChannelInfo object contains the channel
characteristics. The error message indicates if there is not enough
memory for allocating the list or it is an empty string if there is no
error. In the case of no channels or an error, the pointer is None.
Notes: the caller is responsible for freeing the list returned by the
C API with deleteChannelList(). The name pointers may become invalid
after calling reset().
"""
cInfos = None
err = ''
ptr = cast(pointer(MYFLT(0.0)), POINTER(ControlChannelInfo))
n = libcsound.csoundListChannels(self.cs, byref(ptr))
if n == CSOUND_MEMORY :
err = 'There is not enough memory for allocating the list'
if n > 0:
cInfos = cast(ptr, POINTER(ControlChannelInfo * n)).contents
return cInfos, err
def deleteChannelList(self, lst):
"""Release a channel list previously returned by listChannels()."""
ptr = cast(lst, POINTER(ControlChannelInfo))
libcsound.csoundDeleteChannelList(self.cs, ptr)
def setControlChannelHints(self, name, hints):
"""Set parameters hints for a control channel.
These hints have no internal function but can be used by front ends to
construct GUIs or to constrain values. See the ControlChannelHints
structure for details.
Returns zero on success, or a non-zero error code on failure:
CSOUND_ERROR: the channel does not exist, is not a control channel,
or the specified parameters are invalid
CSOUND_MEMORY: could not allocate memory
"""
return libcsound.csoundSetControlChannelHints(self.cs, cstring(name), hints)
def controlChannelHints(self, name):
"""Return special parameters (if any) of a control channel.
Those parameters have been previously set with setControlChannelHints()
or the chnparams opcode.
The return values are a ControlChannelHints structure and CSOUND_SUCCESS
if the channel exists and is a control channel, otherwise, None and an
error code are returned.
"""
hints = ControlChannelHints()
ret = libcsound.csoundGetControlChannelHints(self.cs, cstring(name), byref(hints))
if ret != CSOUND_SUCCESS:
hints = None
return hints, ret
def channelLock(self, name):
"""Recover a pointer to a lock for the specified channel called 'name'.
The returned lock can be locked/unlocked with the spinLock() and
spinUnLock() functions.
Return the address of the lock or NULL if the channel does not exist.
"""
return libcsound.csoundGetChannelLock(self.cs, cstring(name))
def controlChannel(self, name):
"""Retrieve the value of control channel identified by name.
A second value is returned, which the error (or success) code
finding or accessing the channel.
"""
err = c_int(0)
ret = libcsound.csoundGetControlChannel(self.cs, cstring(name), byref(err))
return ret, err
def setControlChannel(self, name, val):
"""Set the value of control channel identified by name."""
libcsound.csoundSetControlChannel(self.cs, cstring(name), MYFLT(val))
def audioChannel(self, name, samples):
"""Copy the audio channel identified by name into ndarray samples.
samples should contain enough memory for ksmps MYFLTs.
"""
ptr = samples.ctypes.data_as(POINTER(MYFLT))
libcsound.csoundGetAudioChannel(self.cs, cstring(name), ptr)
def setAudioChannel(self, name, samples):
"""Set the audio channel 'name' with data from ndarray 'samples'.
'samples' should contain at least ksmps MYFLTs.
"""
ptr = samples.ctypes.data_as(POINTER(MYFLT))
libcsound.csoundSetAudioChannel(self.cs, cstring(name), ptr)
def stringChannel(self, name, string):
"""Copy the string channel identified by name into string.
string should contain enough memory for the string
(see channelDatasize() below).
"""
libcsound.csoundGetStringChannel(self.cs, cstring(name), cstring(string))
def setStringChannel(self, name, string):
"""Set the string channel identified by name with string."""
libcsound.csoundSetStringChannel(self.cs, cstring(name), cstring(string))
def channelDatasize(self, name):
"""Return the size of data stored in a channel.
For string channels this might change if the channel space gets
reallocated. Since string variables use dynamic memory allocation in
Csound6, this function can be called to get the space required for
stringChannel().
"""
return libcsound.csoundGetChannelDatasize(self.cs, cstring(name))
def setInputChannelCallback(self, function):
"""Set the function to call whenever the invalue opcode is used."""
libcsound.csoundSetInputChannelCallback(self.cs, CHANNELFUNC(function))
def setOutputChannelCallback(self, function):
"""Set the function to call whenever the outvalue opcode is used."""
libcsound.csoundSetOutputChannelCallback(self.cs, CHANNELFUNC(function))
def setPvsChannel(self, fin, name):
"""Send a PvsdatExt fin to the pvsin opcode (f-rate) for channel 'name'.
Return zero on success, CSOUND_ERROR if the index is invalid or
fsig framesizes are incompatible.
CSOUND_MEMORY if there is not enough memory to extend the bus.
"""
return libcsound.csoundSetPvsChannel(self.cs, byref(fin), cstring(name))
def pvsChannel(self, fout, name):
"""Receive a PvsdatExt fout from the pvsout opcode (f-rate) at channel 'name'.
Return zero on success, CSOUND_ERROR if the index is invalid or
if fsig framesizes are incompatible.
CSOUND_MEMORY if there is not enough memory to extend the bus.
"""
return libcsound.csoundGetPvsChannel(self.cs, byref(fout), cstring(name))
def scoreEvent(self, type_, pFields):
"""Send a new score event.
'type_' is the score event type ('a', 'i', 'q', 'f', or 'e').
'pFields' is an ndarray of MYFLTs with all the pfields for this event,
starting with the p1 value specified in pFields[0].
"""
ptr = pFields.ctypes.data_as(POINTER(MYFLT))
numFields = c_long(pFields.size)
return libcsound.csoundScoreEvent(self.cs, c_char(type_), ptr, numFields)
def scoreEventAbsolute(self, type_, pFields, timeOffset):
"""Like scoreEvent(), this function inserts a score event.
The event is inserted at absolute time with respect to the start of
performance, or from an offset set with timeOffset.
"""
ptr = pFields.ctypes.data_as(POINTER(MYFLT))
numFields = c_long(pFields.size)
return libcsound.csoundScoreEventAbsolute(self.cs, c_char(type_), ptr, numFields, c_double(timeOffset))
def inputMessage(self, message):
"""Input a NULL-terminated string (as if from a console).
Used for line events.
"""
libcsound.csoundInputMessage(self.cs, cstring(message))
def killInstance(self, instr, instrName, mode, allowRelease):
"""Kills off one or more running instances of an instrument.
The instances are identified by instr (number) or instrName (name).
If instrName is None, the instrument number is used.
Mode is a sum of the following values:
0, 1, 2: kill all instances (0), oldest only (1), or newest (2)
4: only turnoff notes with exactly matching (fractional) instr number
8: only turnoff notes with indefinite duration (p3 < 0 or MIDI).
If allowRelease is True, the killed instances are allowed to release.
"""
return libcsound.csoundKillInstance(self.cs, MYFLT(instr), cstring(instrName), mode, c_int(allowRelease))
def registerSenseEventCallback(self, function, userData):
"""Register a function to be called by sensevents().
This function will be called once in every control period. Any number
of functions may be registered, and will be called in the order of
registration.
The callback function takes two arguments: the Csound instance
pointer, and the userData pointer as passed to this function.
This facility can be used to ensure a function is called synchronously
before every csound control buffer processing. It is important
to make sure no blocking operations are performed in the callback.
The callbacks are cleared on cleanup().
Return zero on success.
"""
return libcsound.csoundRegisterSenseEventCallback(self.cs, SENSEFUNC(function), py_object(userData))
def kerPress(self, c):
"""Set the ASCII code of the most recent key pressed.
This value is used by the 'sensekey' opcode if a callback for
returning keyboard events is not set (see registerKeyboardCallback()).
"""
libcsound.csoundKeyPress(self.cs, c_char(c))
def registerKeyboardCallback(self, function, userData, type_):
"""Registers general purpose callback functions for keyboard events.
These callbacks are called on every control period by the sensekey
opcode.
The callback is preserved on reset(), and multiple
callbacks may be set and will be called in reverse order of
registration. If the same function is set again, it is only moved
in the list of callbacks so that it will be called first, and the
user data and type mask parameters are updated. 'type_' can be the
bitwise OR of callback types for which the function should be called,
or zero for all types.
Returns zero on success, CSOUND_ERROR if the specified function
pointer or type mask is invalid, and CSOUND_MEMORY if there is not
enough memory.
The callback function takes the following arguments:
userData
the "user data" pointer, as specified when setting the callback
p
data pointer, depending on the callback type
type_
callback type, can be one of the following (more may be added in
future versions of Csound):
CSOUND_CALLBACK_KBD_EVENT
CSOUND_CALLBACK_KBD_TEXT
called by the sensekey opcode to fetch key codes. The data
pointer is a pointer to a single value of type 'int', for
returning the key code, which can be in the range 1 to 65535,
or 0 if there is no keyboard event.
For CSOUND_CALLBACK_KBD_EVENT, both key press and release
events should be returned (with 65536 (0x10000) added to the
key code in the latter case) as unshifted ASCII codes.
CSOUND_CALLBACK_KBD_TEXT expects key press events only as the
actual text that is typed.
The return value should be zero on success, negative on error, and
positive if the callback was ignored (for example because the type is
not known).
"""
return libcsound.csoundRegisterKeyboardCallback(self.cs, KEYBOARDFUNC(function), py_object(userData), c_uint(type_))
def removeKeyboardCallback(self, function):
"""Remove a callback previously set with registerKeyboardCallback()."""
libcsound.csoundRemoveKeyboardCallback(self.cs, KEYBOARDFUNC(function))
#Tables
def tableLength(self, table):
"""Return the length of a function table.
(not including the guard point).
If the table does not exist, return -1.
"""
return libcsound.csoundTableLength(self.cs, table)
def tableGet(self, table, index):
"""Return the value of a slot in a function table.
The table number and index are assumed to be valid.
"""
return libcsound.csoundTableGet(self.cs, table, index)
def tableSet(self, table, index, value):
"""Set the value of a slot in a function table.
The table number and index are assumed to be valid.
"""
libcsound.csoundTableSet(self.cs, table, index, MYFLT(value))
def tableCopyOut(self, table, dest):
"""Copy the contents of a function table into a supplied ndarray dest.
The table number is assumed to be valid, and the destination needs to
have sufficient space to receive all the function table contents.
"""
ptr = dest.ctypes.data_as(POINTER(MYFLT))
libcsound.csoundTableCopyOut(self.cs, table, ptr)
def tableCopyIn(self, table, src):
"""Copy the contents of an ndarray src into a given function table.
The table number is assumed to be valid, and the table needs to
have sufficient space to receive all the array contents.
"""
ptr = src.ctypes.data_as(POINTER(MYFLT))
libcsound.csoundTableCopyIn(self.cs, table, ptr)
def table(self, tableNum):
"""Return a pointer to function table 'tableNum' as an ndarray.
The ndarray does not include the guard point. If the table does not
exist, None is returned.
"""
ptr = pointer(MYFLT(0.0))
size = libcsound.csoundGetTable(self.cs, byref(ptr), tableNum)
if size < 0:
return None
arrayType = np.ctypeslib.ndpointer(MYFLT, 1, (size,), 'C_CONTIGUOUS')
p = cast(addressof(ptr), arrayType)
return np.ctypeslib.as_array(p)
def tableArgs(self, tableNum):
"""Return a pointer to the args used to generate a function table.
The pointer is returned as an ndarray. If the table does not exist,
None is returned.
NB: the argument list starts with the GEN number and is followed by
its parameters. eg. f 1 0 1024 10 1 0.5 yields the list
{10.0, 1.0, 0.5}
"""
ptr = pointer(MYFLT(0.0))
size = libcsound.csoundGetTableArgs(self.cs, byref(ptr), tableNum)
if size < 0:
return None
arrayType = np.ctypeslib.ndpointer(MYFLT, 1, (size,), 'C_CONTIGUOUS')
p = cast(addressof(ptr), arrayType)
return np.ctypeslib.as_array(p)
#Function table display
def setIsGraphable(self, isGraphable):
"""Tell Csound whether external graphic table display is supported.
Return the previously set value (initially False).
"""
ret = libcsound.csoundSetIsGraphable(self.cs, c_int(isGraphable))
return (ret != 0)
def setMakeGraphCallback(self, function):
"""Called by external software to set Csound's MakeGraph function."""
libcsound.csoundSetMakeGraphCallback(self.cs, MAKEGRAPHFUNC(function))
def setDrawGraphCallback(self, function):
"""Called by external software to set Csound's DrawGraph function."""
libcsound.csoundSetDrawGraphCallback(self.cs, DRAWGRAPHFUNC(function))
def setKillGraphCallback(self, function):
"""Called by external software to set Csound's KillGraph function."""
libcsound.csoundSetKillGraphCallback(self.cs, KILLGRAPHFUNC(function))
def setExitGraphCallback(self, function):
"""Called by external software to set Csound's ExitGraph function."""
libcsound.csoundSetExitGraphCallback(self.cs, EXITGRAPHFUNC(function))
#Opcodes
def namedGens(self):
"""Find the list of named gens."""
lst = []
ptr = libcsound.csoundGetNamedGens(self.cs)
ptr = cast(ptr, POINTER(NamedGen))
while (ptr):
ng = ptr.contents
lst.append((pstring(ng.name), int(ng.genum)))
ptr = ng.next
return lst
def newOpcodeList(self):
"""Get an alphabetically sorted list of all opcodes.
Should be called after externals are loaded by compile_().
Return a pointer to the list of OpcodeListEntry structures and the
number of opcodes, or a negative error code on failure.
Make sure to call disposeOpcodeList() when done with the list.
"""
opcodes = None
ptr = cast(pointer(MYFLT(0.0)), POINTER(OpcodeListEntry))
n = libcsound.csoundNewOpcodeList(self.cs, byref(ptr))
if n > 0:
opcodes = cast(ptr, POINTER(OpcodeListEntry * n)).contents
return opcodes, n
def disposeOpcodeList(self, lst):
"""Release an opcode list."""
ptr = cast(lst, POINTER(OpcodeListEntry))
libcsound.csoundDisposeOpcodeList(self.cs, ptr)
def appendOpcode(self, opname, dsblksiz, flags, thread, outypes, intypes, iopfunc, kopfunc, aopfunc):
"""Appends an opcode implemented by external software.
This opcode is added to Csound's internal opcode list.
The opcode list is extended by one slot, and the parameters are copied
into the new slot.
Return zero on success.
"""
return libcsound.csoundAppendOpcode(self.cs, cstring(opname), dsblksiz, flags, thread,\
cstring(outypes), cstring(intypes),\
OPCODEFUNC(iopfunc),\
OPCODEFUNC(kopfunc),
OPCODEFUNC(aopfunc))
|
# Copyright 2019 VyOS maintainers and contributors <maintainers@vyos.io>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
import os
import re
import jinja2
import json
import glob
import time
from copy import deepcopy
import vyos.interfaces
from vyos.validate import * # should not * include
from vyos.config import Config # not used anymore
from vyos import ConfigError
from ipaddress import IPv4Network, IPv6Address
from netifaces import ifaddresses, AF_INET, AF_INET6
from time import sleep
from os.path import isfile
from tabulate import tabulate
from hurry.filesize import size,alternative
from datetime import timedelta
from vyos.ifconfig.control import Control
dhclient_base = r'/var/lib/dhcp/dhclient_'
dhcp_cfg = """
# generated by ifconfig.py
option rfc3442-classless-static-routes code 121 = array of unsigned integer 8;
timeout 60;
retry 300;
interface "{{ intf }}" {
send host-name "{{ hostname }}";
{% if client_id -%}
send dhcp-client-identifier "{{ client_id }}";
{% endif -%}
{% if vendor_class_id -%}
send vendor-class-identifier "{{ vendor_class_id }}";
{% endif -%}
request subnet-mask, broadcast-address, routers, domain-name-servers,
rfc3442-classless-static-routes, domain-name, interface-mtu;
require subnet-mask;
}
"""
dhcpv6_cfg = """
# generated by ifconfig.py
interface "{{ intf }}" {
request routers, domain-name-servers, domain-name;
}
"""
class Interface(Control):
options = []
required = []
default = {
'type': '',
}
_command_set = {
'mac': {
'validate': assert_mac,
'shellcmd': 'ip link set dev {ifname} address {value}',
},
'add_vrf': {
'shellcmd': 'ip link set dev {ifname} master {value}',
},
'del_vrf': {
'shellcmd': 'ip link set dev {ifname} nomaster {value}',
},
}
_sysfs_get = {
'mtu': {
'location': '/sys/class/net/{ifname}/mtu',
},
}
_sysfs_set = {
'alias': {
'convert': lambda name: name if name else '\0',
'location': '/sys/class/net/{ifname}/ifalias',
},
'mtu': {
'validate': assert_mtu,
'location': '/sys/class/net/{ifname}/mtu',
},
'arp_cache_tmo': {
'convert': lambda tmo: (int(tmo) * 1000),
'location': '/proc/sys/net/ipv4/neigh/{ifname}/base_reachable_time_ms',
},
'arp_filter': {
'validate': assert_boolean,
'location': '/proc/sys/net/ipv4/conf/{ifname}/arp_filter',
},
'arp_accept': {
'validate': lambda arp: assert_range(arp,0,2),
'location': '/proc/sys/net/ipv4/conf/{ifname}/arp_accept',
},
'arp_announce': {
'validate': assert_boolean,
'location': '/proc/sys/net/ipv4/conf/{ifname}/arp_announce',
},
'arp_ignore': {
'validate': assert_boolean,
'location': '/proc/sys/net/ipv4/conf/{ifname}/arp_ignore',
},
'proxy_arp': {
'validate': assert_boolean,
'location': '/proc/sys/net/ipv4/conf/{ifname}/proxy_arp',
},
'proxy_arp_pvlan': {
'validate': assert_boolean,
'location': '/proc/sys/net/ipv4/conf/{ifname}/proxy_arp_pvlan',
},
# link_detect vs link_filter name weirdness
'link_detect': {
'validate': lambda link: assert_range(link,0,3),
'location': '/proc/sys/net/ipv4/conf/{ifname}/link_filter',
},
}
def __init__(self, ifname, **kargs):
"""
This is the base interface class which supports basic IP/MAC address
operations as well as DHCP(v6). Other interface which represent e.g.
and ethernet bridge are implemented as derived classes adding all
additional functionality.
For creation you will need to provide the interface type, otherwise
the existing interface is used
DEBUG:
This class has embedded debugging (print) which can be enabled by
creating the following file:
vyos@vyos# touch /tmp/vyos.ifconfig.debug
Example:
>>> from vyos.ifconfig import Interface
>>> i = Interface('eth0')
"""
self.config = deepcopy(self.default)
self.config['ifname'] = ifname
for k in self.options:
if k in kargs:
self.config[k] = kargs[k]
for k in self.required:
if k not in kargs:
raise ConfigError('missing required option {} for {}'.format(k,self.__class__))
if not os.path.exists('/sys/class/net/{}'.format(self.config['ifname'])):
if not self.config['type']:
raise Exception('interface "{}" not found'.format(self.config['ifname']))
self._create()
# per interface DHCP config files
self._dhcp_cfg_file = dhclient_base + self.config['ifname'] + '.conf'
self._dhcp_pid_file = dhclient_base + self.config['ifname'] + '.pid'
self._dhcp_lease_file = dhclient_base + self.config['ifname'] + '.leases'
# per interface DHCPv6 config files
self._dhcpv6_cfg_file = dhclient_base + self.config['ifname'] + '.v6conf'
self._dhcpv6_pid_file = dhclient_base + self.config['ifname'] + '.v6pid'
self._dhcpv6_lease_file = dhclient_base + self.config['ifname'] + '.v6leases'
# DHCP options
self._dhcp_options = {
'intf' : self.config['ifname'],
'hostname' : '',
'client_id' : '',
'vendor_class_id' : ''
}
# DHCPv6 options
self._dhcpv6_options = {
'intf' : self.config['ifname'],
'dhcpv6_prm_only' : False,
'dhcpv6_temporary' : False
}
# list of assigned IP addresses
self._addr = []
def _create(self):
cmd = 'ip link add dev {ifname} type {type}'.format(**self.config)
self._cmd(cmd)
def remove(self):
"""
Remove interface from operating system. Removing the interface
deconfigures all assigned IP addresses and clear possible DHCP(v6)
client processes.
Example:
>>> from vyos.ifconfig import Interface
>>> i = Interface('eth0')
>>> i.remove()
"""
# stop DHCP(v6) if running
self._del_dhcp()
self._del_dhcpv6()
# remove all assigned IP addresses from interface - this is a bit redundant
# as the kernel will remove all addresses on interface deletion, but we
# can not delete ALL interfaces, see below
for addr in self.get_addr():
self.del_addr(addr)
# ---------------------------------------------------------------------
# A code refactoring is required as this type check is present as
# Interface implement behaviour for one of it's sub-class.
# It is required as the current pattern for vlan is:
# Interface('name').remove() to delete an interface
# The code should be modified to have a class method called connect and
# have Interface.connect('name').remove()
# each subclass should register within Interface the pattern for that
# interface ie: (ethX, etc.) and use this to create an instance of
# the right class (EthernetIf, ...)
# Ethernet interfaces can not be removed
# Commented out as nowhere in the code do we call Interface()
# This would also cause an import loop
# if self.__class__ == EthernetIf:
# return
# ---------------------------------------------------------------------
self._delete()
def _delete(self):
# NOTE (Improvement):
# after interface removal no other commands should be allowed
# to be called and instead should raise an Exception:
cmd = 'ip link del dev {}'.format(self.config['ifname'])
return self._cmd(cmd)
def get_mtu(self):
"""
Get/set interface mtu in bytes.
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').get_mtu()
'1500'
"""
return self.get_interface('mtu')
def set_mtu(self, mtu):
"""
Get/set interface mtu in bytes.
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').set_mtu(1400)
>>> Interface('eth0').get_mtu()
'1400'
"""
return self.set_interface('mtu', mtu)
def set_mac(self, mac):
"""
Set interface MAC (Media Access Contrl) address to given value.
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').set_mac('00:50:ab:cd:ef:01')
"""
self.set_interface('mac', mac)
def add_vrf(self, vrf):
"""
Add interface to given VRF instance.
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').add_vrf('foo')
"""
self.set_interface('add_vrf', vrf)
def del_vrf(self, vrf):
"""
Remove interface from given VRF instance.
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').del_vrf('foo')
"""
self.set_interface('del_vrf', vrf)
def set_arp_cache_tmo(self, tmo):
"""
Set ARP cache timeout value in seconds. Internal Kernel representation
is in milliseconds.
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').set_arp_cache_tmo(40)
"""
return self.set_interface('arp_cache_tmo', tmo)
def set_arp_filter(self, arp_filter):
"""
Filter ARP requests
1 - Allows you to have multiple network interfaces on the same
subnet, and have the ARPs for each interface be answered
based on whether or not the kernel would route a packet from
the ARP'd IP out that interface (therefore you must use source
based routing for this to work). In other words it allows control
of which cards (usually 1) will respond to an arp request.
0 - (default) The kernel can respond to arp requests with addresses
from other interfaces. This may seem wrong but it usually makes
sense, because it increases the chance of successful communication.
IP addresses are owned by the complete host on Linux, not by
particular interfaces. Only for more complex setups like load-
balancing, does this behaviour cause problems.
"""
return self.set_interface('arp_filter', arp_filter)
def set_arp_accept(self, arp_accept):
"""
Define behavior for gratuitous ARP frames who's IP is not
already present in the ARP table:
0 - don't create new entries in the ARP table
1 - create new entries in the ARP table
Both replies and requests type gratuitous arp will trigger the
ARP table to be updated, if this setting is on.
If the ARP table already contains the IP address of the
gratuitous arp frame, the arp table will be updated regardless
if this setting is on or off.
"""
return self.set_interface('arp_accept', arp_accept)
def set_arp_announce(self, arp_announce):
"""
Define different restriction levels for announcing the local
source IP address from IP packets in ARP requests sent on
interface:
0 - (default) Use any local address, configured on any interface
1 - Try to avoid local addresses that are not in the target's
subnet for this interface. This mode is useful when target
hosts reachable via this interface require the source IP
address in ARP requests to be part of their logical network
configured on the receiving interface. When we generate the
request we will check all our subnets that include the
target IP and will preserve the source address if it is from
such subnet.
Increasing the restriction level gives more chance for
receiving answer from the resolved target while decreasing
the level announces more valid sender's information.
"""
return self.set_interface('arp_announce', arp_announce)
def set_arp_ignore(self, arp_ignore):
"""
Define different modes for sending replies in response to received ARP
requests that resolve local target IP addresses:
0 - (default): reply for any local target IP address, configured
on any interface
1 - reply only if the target IP address is local address
configured on the incoming interface
"""
return self.set_interface('arp_ignore', arp_ignore)
def set_link_detect(self, link_filter):
"""
Configure kernel response in packets received on interfaces that are 'down'
0 - Allow packets to be received for the address on this interface
even if interface is disabled or no carrier.
1 - Ignore packets received if interface associated with the incoming
address is down.
2 - Ignore packets received if interface associated with the incoming
address is down or has no carrier.
Default value is 0. Note that some distributions enable it in startup
scripts.
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').set_link_detect(1)
"""
return self.set_interface('link_detect', link_filter)
def set_alias(self, ifalias=''):
"""
Set interface alias name used by e.g. SNMP
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').set_alias('VyOS upstream interface')
to clear alias e.g. delete it use:
>>> Interface('eth0').set_ifalias('')
"""
self.set_interface('alias', ifalias)
def get_state(self):
"""
Enable (up) / Disable (down) an interface
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').get_state()
'up'
"""
cmd = 'ip -json link show dev {}'.format(self.config['ifname'])
tmp = self._cmd(cmd)
out = json.loads(tmp)
return out[0]['operstate'].lower()
def set_state(self, state):
"""
Enable (up) / Disable (down) an interface
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').set_state('down')
>>> Interface('eth0').get_state()
'down'
"""
if state not in ['up', 'down']:
raise ValueError('state must be "up" or "down"')
# Assemble command executed on system. Unfortunately there is no way
# to up/down an interface via sysfs
cmd = 'ip link set dev {} {}'.format(self.config['ifname'], state)
return self._cmd(cmd)
def set_proxy_arp(self, enable):
"""
Set per interface proxy ARP configuration
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').set_proxy_arp(1)
"""
self.set_interface('proxy_arp', enable)
def set_proxy_arp_pvlan(self, enable):
"""
Private VLAN proxy arp.
Basically allow proxy arp replies back to the same interface
(from which the ARP request/solicitation was received).
This is done to support (ethernet) switch features, like RFC
3069, where the individual ports are NOT allowed to
communicate with each other, but they are allowed to talk to
the upstream router. As described in RFC 3069, it is possible
to allow these hosts to communicate through the upstream
router by proxy_arp'ing. Don't need to be used together with
proxy_arp.
This technology is known by different names:
In RFC 3069 it is called VLAN Aggregation.
Cisco and Allied Telesyn call it Private VLAN.
Hewlett-Packard call it Source-Port filtering or port-isolation.
Ericsson call it MAC-Forced Forwarding (RFC Draft).
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').set_proxy_arp_pvlan(1)
"""
self.set_interface('proxy_arp_pvlan', enable)
def get_addr(self):
"""
Retrieve assigned IPv4 and IPv6 addresses from given interface.
This is done using the netifaces and ipaddress python modules.
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').get_addrs()
['172.16.33.30/24', 'fe80::20c:29ff:fe11:a174/64']
"""
ipv4 = []
ipv6 = []
if AF_INET in ifaddresses(self.config['ifname']).keys():
for v4_addr in ifaddresses(self.config['ifname'])[AF_INET]:
# we need to manually assemble a list of IPv4 address/prefix
prefix = '/' + \
str(IPv4Network('0.0.0.0/' + v4_addr['netmask']).prefixlen)
ipv4.append(v4_addr['addr'] + prefix)
if AF_INET6 in ifaddresses(self.config['ifname']).keys():
for v6_addr in ifaddresses(self.config['ifname'])[AF_INET6]:
# Note that currently expanded netmasks are not supported. That means
# 2001:db00::0/24 is a valid argument while 2001:db00::0/ffff:ff00:: not.
# see https://docs.python.org/3/library/ipaddress.html
bits = bin(
int(v6_addr['netmask'].replace(':', ''), 16)).count('1')
prefix = '/' + str(bits)
# we alsoneed to remove the interface suffix on link local
# addresses
v6_addr['addr'] = v6_addr['addr'].split('%')[0]
ipv6.append(v6_addr['addr'] + prefix)
return ipv4 + ipv6
def add_addr(self, addr):
"""
Add IP(v6) address to interface. Address is only added if it is not
already assigned to that interface.
addr: can be an IPv4 address, IPv6 address, dhcp or dhcpv6!
IPv4: add IPv4 address to interface
IPv6: add IPv6 address to interface
dhcp: start dhclient (IPv4) on interface
dhcpv6: start dhclient (IPv6) on interface
Example:
>>> from vyos.ifconfig import Interface
>>> j = Interface('eth0')
>>> j.add_addr('192.0.2.1/24')
>>> j.add_addr('2001:db8::ffff/64')
>>> j.get_addr()
['192.0.2.1/24', '2001:db8::ffff/64']
"""
# cache new IP address which is assigned to interface
self._addr.append(addr)
# we can not have both DHCP and static IPv4 addresses assigned to an interface
if 'dhcp' in self._addr:
for addr in self._addr:
# do not change below 'if' ordering esle you will get an exception as:
# ValueError: 'dhcp' does not appear to be an IPv4 or IPv6 address
if addr != 'dhcp' and is_ipv4(addr):
raise ConfigError("Can't configure both static IPv4 and DHCP address on the same interface")
if addr == 'dhcp':
self._set_dhcp()
elif addr == 'dhcpv6':
self._set_dhcpv6()
else:
if not is_intf_addr_assigned(self.config['ifname'], addr):
cmd = 'ip addr add "{}" dev "{}"'.format(addr, self.config['ifname'])
return self._cmd(cmd)
def del_addr(self, addr):
"""
Delete IP(v6) address to interface. Address is only added if it is
assigned to that interface.
addr: can be an IPv4 address, IPv6 address, dhcp or dhcpv6!
IPv4: delete IPv4 address from interface
IPv6: delete IPv6 address from interface
dhcp: stop dhclient (IPv4) on interface
dhcpv6: stop dhclient (IPv6) on interface
Example:
>>> from vyos.ifconfig import Interface
>>> j = Interface('eth0')
>>> j.add_addr('2001:db8::ffff/64')
>>> j.add_addr('192.0.2.1/24')
>>> j.get_addr()
['192.0.2.1/24', '2001:db8::ffff/64']
>>> j.del_addr('192.0.2.1/24')
>>> j.get_addr()
['2001:db8::ffff/64']
"""
if addr == 'dhcp':
self._del_dhcp()
elif addr == 'dhcpv6':
self._del_dhcpv6()
else:
if is_intf_addr_assigned(self.config['ifname'], addr):
cmd = 'ip addr del "{}" dev "{}"'.format(addr, self.config['ifname'])
return self._cmd(cmd)
def get_dhcp_options(self):
"""
Return dictionary with supported DHCP options.
Dictionary should be altered and send back via set_dhcp_options()
so those options are applied when DHCP is run.
"""
return self._dhcp_options
def set_dhcp_options(self, options):
"""
Store new DHCP options used by next run of DHCP client.
"""
self._dhcp_options = options
def get_dhcpv6_options(self):
"""
Return dictionary with supported DHCPv6 options.
Dictionary should be altered and send back via set_dhcp_options()
so those options are applied when DHCP is run.
"""
return self._dhcpv6_options
def set_dhcpv6_options(self, options):
"""
Store new DHCP options used by next run of DHCP client.
"""
self._dhcpv6_options = options
# replace dhcpv4/v6 with systemd.networkd?
def _set_dhcp(self):
"""
Configure interface as DHCP client. The dhclient binary is automatically
started in background!
Example:
>>> from vyos.ifconfig import Interface
>>> j = Interface('eth0')
>>> j.set_dhcp()
"""
dhcp = self.get_dhcp_options()
if not dhcp['hostname']:
# read configured system hostname.
# maybe change to vyos hostd client ???
with open('/etc/hostname', 'r') as f:
dhcp['hostname'] = f.read().rstrip('\n')
# render DHCP configuration
tmpl = jinja2.Template(dhcp_cfg)
dhcp_text = tmpl.render(dhcp)
with open(self._dhcp_cfg_file, 'w') as f:
f.write(dhcp_text)
cmd = 'start-stop-daemon --start --quiet --pidfile ' + \
self._dhcp_pid_file
cmd += ' --exec /sbin/dhclient --'
# now pass arguments to dhclient binary
cmd += ' -4 -nw -cf {} -pf {} -lf {} {}'.format(
self._dhcp_cfg_file, self._dhcp_pid_file, self._dhcp_lease_file, self.config['ifname'])
return self._cmd(cmd)
def _del_dhcp(self):
"""
De-configure interface as DHCP clinet. All auto generated files like
pid, config and lease will be removed.
Example:
>>> from vyos.ifconfig import Interface
>>> j = Interface('eth0')
>>> j.del_dhcp()
"""
pid = 0
if os.path.isfile(self._dhcp_pid_file):
with open(self._dhcp_pid_file, 'r') as f:
pid = int(f.read())
else:
self._debug_msg('No DHCP client PID found')
return None
# stop dhclient, we need to call dhclient and tell it should release the
# aquired IP address. tcpdump tells me:
# 172.16.35.103.68 > 172.16.35.254.67: [bad udp cksum 0xa0cb -> 0xb943!] BOOTP/DHCP, Request from 00:50:56:9d:11:df, length 300, xid 0x620e6946, Flags [none] (0x0000)
# Client-IP 172.16.35.103
# Client-Ethernet-Address 00:50:56:9d:11:df
# Vendor-rfc1048 Extensions
# Magic Cookie 0x63825363
# DHCP-Message Option 53, length 1: Release
# Server-ID Option 54, length 4: 172.16.35.254
# Hostname Option 12, length 10: "vyos"
#
cmd = '/sbin/dhclient -cf {} -pf {} -lf {} -r {}'.format(
self._dhcp_cfg_file, self._dhcp_pid_file, self._dhcp_lease_file, self.config['ifname'])
self._cmd(cmd)
# cleanup old config file
if os.path.isfile(self._dhcp_cfg_file):
os.remove(self._dhcp_cfg_file)
# cleanup old pid file
if os.path.isfile(self._dhcp_pid_file):
os.remove(self._dhcp_pid_file)
# cleanup old lease file
if os.path.isfile(self._dhcp_lease_file):
os.remove(self._dhcp_lease_file)
def _set_dhcpv6(self):
"""
Configure interface as DHCPv6 client. The dhclient binary is automatically
started in background!
Example:
>>> from vyos.ifconfig import Interface
>>> j = Interface('eth0')
>>> j.set_dhcpv6()
"""
dhcpv6 = self.get_dhcpv6_options()
# better save then sorry .. should be checked in interface script
# but if you missed it we are safe!
if dhcpv6['dhcpv6_prm_only'] and dhcpv6['dhcpv6_temporary']:
raise Exception('DHCPv6 temporary and parameters-only options are mutually exclusive!')
# render DHCP configuration
tmpl = jinja2.Template(dhcpv6_cfg)
dhcpv6_text = tmpl.render(dhcpv6)
with open(self._dhcpv6_cfg_file, 'w') as f:
f.write(dhcpv6_text)
# no longer accept router announcements on this interface
self._write_sysfs('/proc/sys/net/ipv6/conf/{}/accept_ra'
.format(self.config['ifname']), 0)
# assemble command-line to start DHCPv6 client (dhclient)
cmd = 'start-stop-daemon --start --quiet --pidfile ' + \
self._dhcpv6_pid_file
cmd += ' --exec /sbin/dhclient --'
# now pass arguments to dhclient binary
cmd += ' -6 -nw -cf {} -pf {} -lf {}'.format(
self._dhcpv6_cfg_file, self._dhcpv6_pid_file, self._dhcpv6_lease_file)
# add optional arguments
if dhcpv6['dhcpv6_prm_only']:
cmd += ' -S'
if dhcpv6['dhcpv6_temporary']:
cmd += ' -T'
cmd += ' {}'.format(self.config['ifname'])
return self._cmd(cmd)
def _del_dhcpv6(self):
"""
De-configure interface as DHCPv6 clinet. All auto generated files like
pid, config and lease will be removed.
Example:
>>> from vyos.ifconfig import Interface
>>> j = Interface('eth0')
>>> j.del_dhcpv6()
"""
pid = 0
if os.path.isfile(self._dhcpv6_pid_file):
with open(self._dhcpv6_pid_file, 'r') as f:
pid = int(f.read())
else:
self._debug_msg('No DHCPv6 client PID found')
return None
# stop dhclient
cmd = 'start-stop-daemon --stop --quiet --pidfile {}'.format(self._dhcpv6_pid_file)
self._cmd(cmd)
# accept router announcements on this interface
self._write_sysfs('/proc/sys/net/ipv6/conf/{}/accept_ra'
.format(self.config['ifname']), 1)
# cleanup old config file
if os.path.isfile(self._dhcpv6_cfg_file):
os.remove(self._dhcpv6_cfg_file)
# cleanup old pid file
if os.path.isfile(self._dhcpv6_pid_file):
os.remove(self._dhcpv6_pid_file)
# cleanup old lease file
if os.path.isfile(self._dhcpv6_lease_file):
os.remove(self._dhcpv6_lease_file)
def op_show_interface_stats(self):
stats = self.get_interface_stats()
rx = [['bytes','packets','errors','dropped','overrun','mcast'],[stats['rx_bytes'],stats['rx_packets'],stats['rx_errors'],stats['rx_dropped'],stats['rx_over_errors'],stats['multicast']]]
tx = [['bytes','packets','errors','dropped','carrier','collisions'],[stats['tx_bytes'],stats['tx_packets'],stats['tx_errors'],stats['tx_dropped'],stats['tx_carrier_errors'],stats['collisions']]]
output = "RX: \n"
output += tabulate(rx,headers="firstrow",numalign="right",tablefmt="plain")
output += "\n\nTX: \n"
output += tabulate(tx,headers="firstrow",numalign="right",tablefmt="plain")
print(' '.join(('\n'+output.lstrip()).splitlines(True)))
def get_interface_stats(self):
interface_stats = dict()
devices = [f for f in glob.glob("/sys/class/net/**/statistics")]
for dev_path in devices:
metrics = [f for f in glob.glob(dev_path +"/**")]
dev = re.findall(r"/sys/class/net/(.*)/statistics",dev_path)[0]
dev_dict = dict()
for metric_path in metrics:
metric = metric_path.replace(dev_path+"/","")
if isfile(metric_path):
data = open(metric_path, 'r').read()[:-1]
dev_dict[metric] = int(data)
interface_stats[dev] = dev_dict
return interface_stats[self.config['ifname']]
ifconfig: T2057: make set_state use set_interface
# Copyright 2019 VyOS maintainers and contributors <maintainers@vyos.io>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
import os
import re
import jinja2
import json
import glob
import time
from copy import deepcopy
import vyos.interfaces
from vyos.validate import * # should not * include
from vyos.config import Config # not used anymore
from vyos import ConfigError
from ipaddress import IPv4Network, IPv6Address
from netifaces import ifaddresses, AF_INET, AF_INET6
from time import sleep
from os.path import isfile
from tabulate import tabulate
from hurry.filesize import size,alternative
from datetime import timedelta
from vyos.ifconfig.control import Control
dhclient_base = r'/var/lib/dhcp/dhclient_'
dhcp_cfg = """
# generated by ifconfig.py
option rfc3442-classless-static-routes code 121 = array of unsigned integer 8;
timeout 60;
retry 300;
interface "{{ intf }}" {
send host-name "{{ hostname }}";
{% if client_id -%}
send dhcp-client-identifier "{{ client_id }}";
{% endif -%}
{% if vendor_class_id -%}
send vendor-class-identifier "{{ vendor_class_id }}";
{% endif -%}
request subnet-mask, broadcast-address, routers, domain-name-servers,
rfc3442-classless-static-routes, domain-name, interface-mtu;
require subnet-mask;
}
"""
dhcpv6_cfg = """
# generated by ifconfig.py
interface "{{ intf }}" {
request routers, domain-name-servers, domain-name;
}
"""
class Interface(Control):
options = []
required = []
default = {
'type': '',
}
_command_set = {
'state': {
'validate': lambda v: assert_list(v, ['up', 'down']),
'shellcmd': 'ip link set dev {ifname} {value}',
},
'mac': {
'validate': assert_mac,
'shellcmd': 'ip link set dev {ifname} address {value}',
},
'add_vrf': {
'shellcmd': 'ip link set dev {ifname} master {value}',
},
'del_vrf': {
'shellcmd': 'ip link set dev {ifname} nomaster {value}',
},
}
_sysfs_get = {
'mtu': {
'location': '/sys/class/net/{ifname}/mtu',
},
}
_sysfs_set = {
'alias': {
'convert': lambda name: name if name else '\0',
'location': '/sys/class/net/{ifname}/ifalias',
},
'mtu': {
'validate': assert_mtu,
'location': '/sys/class/net/{ifname}/mtu',
},
'arp_cache_tmo': {
'convert': lambda tmo: (int(tmo) * 1000),
'location': '/proc/sys/net/ipv4/neigh/{ifname}/base_reachable_time_ms',
},
'arp_filter': {
'validate': assert_boolean,
'location': '/proc/sys/net/ipv4/conf/{ifname}/arp_filter',
},
'arp_accept': {
'validate': lambda arp: assert_range(arp,0,2),
'location': '/proc/sys/net/ipv4/conf/{ifname}/arp_accept',
},
'arp_announce': {
'validate': assert_boolean,
'location': '/proc/sys/net/ipv4/conf/{ifname}/arp_announce',
},
'arp_ignore': {
'validate': assert_boolean,
'location': '/proc/sys/net/ipv4/conf/{ifname}/arp_ignore',
},
'proxy_arp': {
'validate': assert_boolean,
'location': '/proc/sys/net/ipv4/conf/{ifname}/proxy_arp',
},
'proxy_arp_pvlan': {
'validate': assert_boolean,
'location': '/proc/sys/net/ipv4/conf/{ifname}/proxy_arp_pvlan',
},
# link_detect vs link_filter name weirdness
'link_detect': {
'validate': lambda link: assert_range(link,0,3),
'location': '/proc/sys/net/ipv4/conf/{ifname}/link_filter',
},
}
def __init__(self, ifname, **kargs):
"""
This is the base interface class which supports basic IP/MAC address
operations as well as DHCP(v6). Other interface which represent e.g.
and ethernet bridge are implemented as derived classes adding all
additional functionality.
For creation you will need to provide the interface type, otherwise
the existing interface is used
DEBUG:
This class has embedded debugging (print) which can be enabled by
creating the following file:
vyos@vyos# touch /tmp/vyos.ifconfig.debug
Example:
>>> from vyos.ifconfig import Interface
>>> i = Interface('eth0')
"""
self.config = deepcopy(self.default)
self.config['ifname'] = ifname
for k in self.options:
if k in kargs:
self.config[k] = kargs[k]
for k in self.required:
if k not in kargs:
raise ConfigError('missing required option {} for {}'.format(k,self.__class__))
if not os.path.exists('/sys/class/net/{}'.format(self.config['ifname'])):
if not self.config['type']:
raise Exception('interface "{}" not found'.format(self.config['ifname']))
self._create()
# per interface DHCP config files
self._dhcp_cfg_file = dhclient_base + self.config['ifname'] + '.conf'
self._dhcp_pid_file = dhclient_base + self.config['ifname'] + '.pid'
self._dhcp_lease_file = dhclient_base + self.config['ifname'] + '.leases'
# per interface DHCPv6 config files
self._dhcpv6_cfg_file = dhclient_base + self.config['ifname'] + '.v6conf'
self._dhcpv6_pid_file = dhclient_base + self.config['ifname'] + '.v6pid'
self._dhcpv6_lease_file = dhclient_base + self.config['ifname'] + '.v6leases'
# DHCP options
self._dhcp_options = {
'intf' : self.config['ifname'],
'hostname' : '',
'client_id' : '',
'vendor_class_id' : ''
}
# DHCPv6 options
self._dhcpv6_options = {
'intf' : self.config['ifname'],
'dhcpv6_prm_only' : False,
'dhcpv6_temporary' : False
}
# list of assigned IP addresses
self._addr = []
def _create(self):
cmd = 'ip link add dev {ifname} type {type}'.format(**self.config)
self._cmd(cmd)
def remove(self):
"""
Remove interface from operating system. Removing the interface
deconfigures all assigned IP addresses and clear possible DHCP(v6)
client processes.
Example:
>>> from vyos.ifconfig import Interface
>>> i = Interface('eth0')
>>> i.remove()
"""
# stop DHCP(v6) if running
self._del_dhcp()
self._del_dhcpv6()
# remove all assigned IP addresses from interface - this is a bit redundant
# as the kernel will remove all addresses on interface deletion, but we
# can not delete ALL interfaces, see below
for addr in self.get_addr():
self.del_addr(addr)
# ---------------------------------------------------------------------
# A code refactoring is required as this type check is present as
# Interface implement behaviour for one of it's sub-class.
# It is required as the current pattern for vlan is:
# Interface('name').remove() to delete an interface
# The code should be modified to have a class method called connect and
# have Interface.connect('name').remove()
# each subclass should register within Interface the pattern for that
# interface ie: (ethX, etc.) and use this to create an instance of
# the right class (EthernetIf, ...)
# Ethernet interfaces can not be removed
# Commented out as nowhere in the code do we call Interface()
# This would also cause an import loop
# if self.__class__ == EthernetIf:
# return
# ---------------------------------------------------------------------
self._delete()
def _delete(self):
# NOTE (Improvement):
# after interface removal no other commands should be allowed
# to be called and instead should raise an Exception:
cmd = 'ip link del dev {}'.format(self.config['ifname'])
return self._cmd(cmd)
def get_mtu(self):
"""
Get/set interface mtu in bytes.
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').get_mtu()
'1500'
"""
return self.get_interface('mtu')
def set_mtu(self, mtu):
"""
Get/set interface mtu in bytes.
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').set_mtu(1400)
>>> Interface('eth0').get_mtu()
'1400'
"""
return self.set_interface('mtu', mtu)
def set_mac(self, mac):
"""
Set interface MAC (Media Access Contrl) address to given value.
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').set_mac('00:50:ab:cd:ef:01')
"""
self.set_interface('mac', mac)
def add_vrf(self, vrf):
"""
Add interface to given VRF instance.
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').add_vrf('foo')
"""
self.set_interface('add_vrf', vrf)
def del_vrf(self, vrf):
"""
Remove interface from given VRF instance.
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').del_vrf('foo')
"""
self.set_interface('del_vrf', vrf)
def set_arp_cache_tmo(self, tmo):
"""
Set ARP cache timeout value in seconds. Internal Kernel representation
is in milliseconds.
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').set_arp_cache_tmo(40)
"""
return self.set_interface('arp_cache_tmo', tmo)
def set_arp_filter(self, arp_filter):
"""
Filter ARP requests
1 - Allows you to have multiple network interfaces on the same
subnet, and have the ARPs for each interface be answered
based on whether or not the kernel would route a packet from
the ARP'd IP out that interface (therefore you must use source
based routing for this to work). In other words it allows control
of which cards (usually 1) will respond to an arp request.
0 - (default) The kernel can respond to arp requests with addresses
from other interfaces. This may seem wrong but it usually makes
sense, because it increases the chance of successful communication.
IP addresses are owned by the complete host on Linux, not by
particular interfaces. Only for more complex setups like load-
balancing, does this behaviour cause problems.
"""
return self.set_interface('arp_filter', arp_filter)
def set_arp_accept(self, arp_accept):
"""
Define behavior for gratuitous ARP frames who's IP is not
already present in the ARP table:
0 - don't create new entries in the ARP table
1 - create new entries in the ARP table
Both replies and requests type gratuitous arp will trigger the
ARP table to be updated, if this setting is on.
If the ARP table already contains the IP address of the
gratuitous arp frame, the arp table will be updated regardless
if this setting is on or off.
"""
return self.set_interface('arp_accept', arp_accept)
def set_arp_announce(self, arp_announce):
"""
Define different restriction levels for announcing the local
source IP address from IP packets in ARP requests sent on
interface:
0 - (default) Use any local address, configured on any interface
1 - Try to avoid local addresses that are not in the target's
subnet for this interface. This mode is useful when target
hosts reachable via this interface require the source IP
address in ARP requests to be part of their logical network
configured on the receiving interface. When we generate the
request we will check all our subnets that include the
target IP and will preserve the source address if it is from
such subnet.
Increasing the restriction level gives more chance for
receiving answer from the resolved target while decreasing
the level announces more valid sender's information.
"""
return self.set_interface('arp_announce', arp_announce)
def set_arp_ignore(self, arp_ignore):
"""
Define different modes for sending replies in response to received ARP
requests that resolve local target IP addresses:
0 - (default): reply for any local target IP address, configured
on any interface
1 - reply only if the target IP address is local address
configured on the incoming interface
"""
return self.set_interface('arp_ignore', arp_ignore)
def set_link_detect(self, link_filter):
"""
Configure kernel response in packets received on interfaces that are 'down'
0 - Allow packets to be received for the address on this interface
even if interface is disabled or no carrier.
1 - Ignore packets received if interface associated with the incoming
address is down.
2 - Ignore packets received if interface associated with the incoming
address is down or has no carrier.
Default value is 0. Note that some distributions enable it in startup
scripts.
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').set_link_detect(1)
"""
return self.set_interface('link_detect', link_filter)
def set_alias(self, ifalias=''):
"""
Set interface alias name used by e.g. SNMP
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').set_alias('VyOS upstream interface')
to clear alias e.g. delete it use:
>>> Interface('eth0').set_ifalias('')
"""
self.set_interface('alias', ifalias)
def get_state(self):
"""
Enable (up) / Disable (down) an interface
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').get_state()
'up'
"""
cmd = 'ip -json link show dev {}'.format(self.config['ifname'])
tmp = self._cmd(cmd)
out = json.loads(tmp)
return out[0]['operstate'].lower()
def set_state(self, state):
"""
Enable (up) / Disable (down) an interface
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').set_state('down')
>>> Interface('eth0').get_state()
'down'
"""
return self.set_interface('state', state)
def set_proxy_arp(self, enable):
"""
Set per interface proxy ARP configuration
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').set_proxy_arp(1)
"""
self.set_interface('proxy_arp', enable)
def set_proxy_arp_pvlan(self, enable):
"""
Private VLAN proxy arp.
Basically allow proxy arp replies back to the same interface
(from which the ARP request/solicitation was received).
This is done to support (ethernet) switch features, like RFC
3069, where the individual ports are NOT allowed to
communicate with each other, but they are allowed to talk to
the upstream router. As described in RFC 3069, it is possible
to allow these hosts to communicate through the upstream
router by proxy_arp'ing. Don't need to be used together with
proxy_arp.
This technology is known by different names:
In RFC 3069 it is called VLAN Aggregation.
Cisco and Allied Telesyn call it Private VLAN.
Hewlett-Packard call it Source-Port filtering or port-isolation.
Ericsson call it MAC-Forced Forwarding (RFC Draft).
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').set_proxy_arp_pvlan(1)
"""
self.set_interface('proxy_arp_pvlan', enable)
def get_addr(self):
"""
Retrieve assigned IPv4 and IPv6 addresses from given interface.
This is done using the netifaces and ipaddress python modules.
Example:
>>> from vyos.ifconfig import Interface
>>> Interface('eth0').get_addrs()
['172.16.33.30/24', 'fe80::20c:29ff:fe11:a174/64']
"""
ipv4 = []
ipv6 = []
if AF_INET in ifaddresses(self.config['ifname']).keys():
for v4_addr in ifaddresses(self.config['ifname'])[AF_INET]:
# we need to manually assemble a list of IPv4 address/prefix
prefix = '/' + \
str(IPv4Network('0.0.0.0/' + v4_addr['netmask']).prefixlen)
ipv4.append(v4_addr['addr'] + prefix)
if AF_INET6 in ifaddresses(self.config['ifname']).keys():
for v6_addr in ifaddresses(self.config['ifname'])[AF_INET6]:
# Note that currently expanded netmasks are not supported. That means
# 2001:db00::0/24 is a valid argument while 2001:db00::0/ffff:ff00:: not.
# see https://docs.python.org/3/library/ipaddress.html
bits = bin(
int(v6_addr['netmask'].replace(':', ''), 16)).count('1')
prefix = '/' + str(bits)
# we alsoneed to remove the interface suffix on link local
# addresses
v6_addr['addr'] = v6_addr['addr'].split('%')[0]
ipv6.append(v6_addr['addr'] + prefix)
return ipv4 + ipv6
def add_addr(self, addr):
"""
Add IP(v6) address to interface. Address is only added if it is not
already assigned to that interface.
addr: can be an IPv4 address, IPv6 address, dhcp or dhcpv6!
IPv4: add IPv4 address to interface
IPv6: add IPv6 address to interface
dhcp: start dhclient (IPv4) on interface
dhcpv6: start dhclient (IPv6) on interface
Example:
>>> from vyos.ifconfig import Interface
>>> j = Interface('eth0')
>>> j.add_addr('192.0.2.1/24')
>>> j.add_addr('2001:db8::ffff/64')
>>> j.get_addr()
['192.0.2.1/24', '2001:db8::ffff/64']
"""
# cache new IP address which is assigned to interface
self._addr.append(addr)
# we can not have both DHCP and static IPv4 addresses assigned to an interface
if 'dhcp' in self._addr:
for addr in self._addr:
# do not change below 'if' ordering esle you will get an exception as:
# ValueError: 'dhcp' does not appear to be an IPv4 or IPv6 address
if addr != 'dhcp' and is_ipv4(addr):
raise ConfigError("Can't configure both static IPv4 and DHCP address on the same interface")
if addr == 'dhcp':
self._set_dhcp()
elif addr == 'dhcpv6':
self._set_dhcpv6()
else:
if not is_intf_addr_assigned(self.config['ifname'], addr):
cmd = 'ip addr add "{}" dev "{}"'.format(addr, self.config['ifname'])
return self._cmd(cmd)
def del_addr(self, addr):
"""
Delete IP(v6) address to interface. Address is only added if it is
assigned to that interface.
addr: can be an IPv4 address, IPv6 address, dhcp or dhcpv6!
IPv4: delete IPv4 address from interface
IPv6: delete IPv6 address from interface
dhcp: stop dhclient (IPv4) on interface
dhcpv6: stop dhclient (IPv6) on interface
Example:
>>> from vyos.ifconfig import Interface
>>> j = Interface('eth0')
>>> j.add_addr('2001:db8::ffff/64')
>>> j.add_addr('192.0.2.1/24')
>>> j.get_addr()
['192.0.2.1/24', '2001:db8::ffff/64']
>>> j.del_addr('192.0.2.1/24')
>>> j.get_addr()
['2001:db8::ffff/64']
"""
if addr == 'dhcp':
self._del_dhcp()
elif addr == 'dhcpv6':
self._del_dhcpv6()
else:
if is_intf_addr_assigned(self.config['ifname'], addr):
cmd = 'ip addr del "{}" dev "{}"'.format(addr, self.config['ifname'])
return self._cmd(cmd)
def get_dhcp_options(self):
"""
Return dictionary with supported DHCP options.
Dictionary should be altered and send back via set_dhcp_options()
so those options are applied when DHCP is run.
"""
return self._dhcp_options
def set_dhcp_options(self, options):
"""
Store new DHCP options used by next run of DHCP client.
"""
self._dhcp_options = options
def get_dhcpv6_options(self):
"""
Return dictionary with supported DHCPv6 options.
Dictionary should be altered and send back via set_dhcp_options()
so those options are applied when DHCP is run.
"""
return self._dhcpv6_options
def set_dhcpv6_options(self, options):
"""
Store new DHCP options used by next run of DHCP client.
"""
self._dhcpv6_options = options
# replace dhcpv4/v6 with systemd.networkd?
def _set_dhcp(self):
"""
Configure interface as DHCP client. The dhclient binary is automatically
started in background!
Example:
>>> from vyos.ifconfig import Interface
>>> j = Interface('eth0')
>>> j.set_dhcp()
"""
dhcp = self.get_dhcp_options()
if not dhcp['hostname']:
# read configured system hostname.
# maybe change to vyos hostd client ???
with open('/etc/hostname', 'r') as f:
dhcp['hostname'] = f.read().rstrip('\n')
# render DHCP configuration
tmpl = jinja2.Template(dhcp_cfg)
dhcp_text = tmpl.render(dhcp)
with open(self._dhcp_cfg_file, 'w') as f:
f.write(dhcp_text)
cmd = 'start-stop-daemon --start --quiet --pidfile ' + \
self._dhcp_pid_file
cmd += ' --exec /sbin/dhclient --'
# now pass arguments to dhclient binary
cmd += ' -4 -nw -cf {} -pf {} -lf {} {}'.format(
self._dhcp_cfg_file, self._dhcp_pid_file, self._dhcp_lease_file, self.config['ifname'])
return self._cmd(cmd)
def _del_dhcp(self):
"""
De-configure interface as DHCP clinet. All auto generated files like
pid, config and lease will be removed.
Example:
>>> from vyos.ifconfig import Interface
>>> j = Interface('eth0')
>>> j.del_dhcp()
"""
pid = 0
if os.path.isfile(self._dhcp_pid_file):
with open(self._dhcp_pid_file, 'r') as f:
pid = int(f.read())
else:
self._debug_msg('No DHCP client PID found')
return None
# stop dhclient, we need to call dhclient and tell it should release the
# aquired IP address. tcpdump tells me:
# 172.16.35.103.68 > 172.16.35.254.67: [bad udp cksum 0xa0cb -> 0xb943!] BOOTP/DHCP, Request from 00:50:56:9d:11:df, length 300, xid 0x620e6946, Flags [none] (0x0000)
# Client-IP 172.16.35.103
# Client-Ethernet-Address 00:50:56:9d:11:df
# Vendor-rfc1048 Extensions
# Magic Cookie 0x63825363
# DHCP-Message Option 53, length 1: Release
# Server-ID Option 54, length 4: 172.16.35.254
# Hostname Option 12, length 10: "vyos"
#
cmd = '/sbin/dhclient -cf {} -pf {} -lf {} -r {}'.format(
self._dhcp_cfg_file, self._dhcp_pid_file, self._dhcp_lease_file, self.config['ifname'])
self._cmd(cmd)
# cleanup old config file
if os.path.isfile(self._dhcp_cfg_file):
os.remove(self._dhcp_cfg_file)
# cleanup old pid file
if os.path.isfile(self._dhcp_pid_file):
os.remove(self._dhcp_pid_file)
# cleanup old lease file
if os.path.isfile(self._dhcp_lease_file):
os.remove(self._dhcp_lease_file)
def _set_dhcpv6(self):
"""
Configure interface as DHCPv6 client. The dhclient binary is automatically
started in background!
Example:
>>> from vyos.ifconfig import Interface
>>> j = Interface('eth0')
>>> j.set_dhcpv6()
"""
dhcpv6 = self.get_dhcpv6_options()
# better save then sorry .. should be checked in interface script
# but if you missed it we are safe!
if dhcpv6['dhcpv6_prm_only'] and dhcpv6['dhcpv6_temporary']:
raise Exception('DHCPv6 temporary and parameters-only options are mutually exclusive!')
# render DHCP configuration
tmpl = jinja2.Template(dhcpv6_cfg)
dhcpv6_text = tmpl.render(dhcpv6)
with open(self._dhcpv6_cfg_file, 'w') as f:
f.write(dhcpv6_text)
# no longer accept router announcements on this interface
self._write_sysfs('/proc/sys/net/ipv6/conf/{}/accept_ra'
.format(self.config['ifname']), 0)
# assemble command-line to start DHCPv6 client (dhclient)
cmd = 'start-stop-daemon --start --quiet --pidfile ' + \
self._dhcpv6_pid_file
cmd += ' --exec /sbin/dhclient --'
# now pass arguments to dhclient binary
cmd += ' -6 -nw -cf {} -pf {} -lf {}'.format(
self._dhcpv6_cfg_file, self._dhcpv6_pid_file, self._dhcpv6_lease_file)
# add optional arguments
if dhcpv6['dhcpv6_prm_only']:
cmd += ' -S'
if dhcpv6['dhcpv6_temporary']:
cmd += ' -T'
cmd += ' {}'.format(self.config['ifname'])
return self._cmd(cmd)
def _del_dhcpv6(self):
"""
De-configure interface as DHCPv6 clinet. All auto generated files like
pid, config and lease will be removed.
Example:
>>> from vyos.ifconfig import Interface
>>> j = Interface('eth0')
>>> j.del_dhcpv6()
"""
pid = 0
if os.path.isfile(self._dhcpv6_pid_file):
with open(self._dhcpv6_pid_file, 'r') as f:
pid = int(f.read())
else:
self._debug_msg('No DHCPv6 client PID found')
return None
# stop dhclient
cmd = 'start-stop-daemon --stop --quiet --pidfile {}'.format(self._dhcpv6_pid_file)
self._cmd(cmd)
# accept router announcements on this interface
self._write_sysfs('/proc/sys/net/ipv6/conf/{}/accept_ra'
.format(self.config['ifname']), 1)
# cleanup old config file
if os.path.isfile(self._dhcpv6_cfg_file):
os.remove(self._dhcpv6_cfg_file)
# cleanup old pid file
if os.path.isfile(self._dhcpv6_pid_file):
os.remove(self._dhcpv6_pid_file)
# cleanup old lease file
if os.path.isfile(self._dhcpv6_lease_file):
os.remove(self._dhcpv6_lease_file)
def op_show_interface_stats(self):
stats = self.get_interface_stats()
rx = [['bytes','packets','errors','dropped','overrun','mcast'],[stats['rx_bytes'],stats['rx_packets'],stats['rx_errors'],stats['rx_dropped'],stats['rx_over_errors'],stats['multicast']]]
tx = [['bytes','packets','errors','dropped','carrier','collisions'],[stats['tx_bytes'],stats['tx_packets'],stats['tx_errors'],stats['tx_dropped'],stats['tx_carrier_errors'],stats['collisions']]]
output = "RX: \n"
output += tabulate(rx,headers="firstrow",numalign="right",tablefmt="plain")
output += "\n\nTX: \n"
output += tabulate(tx,headers="firstrow",numalign="right",tablefmt="plain")
print(' '.join(('\n'+output.lstrip()).splitlines(True)))
def get_interface_stats(self):
interface_stats = dict()
devices = [f for f in glob.glob("/sys/class/net/**/statistics")]
for dev_path in devices:
metrics = [f for f in glob.glob(dev_path +"/**")]
dev = re.findall(r"/sys/class/net/(.*)/statistics",dev_path)[0]
dev_dict = dict()
for metric_path in metrics:
metric = metric_path.replace(dev_path+"/","")
if isfile(metric_path):
data = open(metric_path, 'r').read()[:-1]
dev_dict[metric] = int(data)
interface_stats[dev] = dev_dict
return interface_stats[self.config['ifname']]
|
from __future__ import absolute_import
from itertools import izip_longest, repeat
import logging
import time
import numbers
from threading import Lock, Thread, Event
from multiprocessing import Process, Queue as MPQueue, Event as MPEvent, Value
from Queue import Empty, Queue
import kafka
from kafka.common import (
FetchRequest,
OffsetRequest,
OffsetCommitRequest,
OffsetFetchRequest,
ConsumerFetchSizeTooSmall,
ConsumerNoMoreData,
BufferTooLargeError
)
from kafka.util import ReentrantTimer
log = logging.getLogger("kafka")
AUTO_COMMIT_MSG_COUNT = 100
AUTO_COMMIT_INTERVAL = 5000
FETCH_DEFAULT_BLOCK_TIMEOUT = 1
FETCH_MAX_WAIT_TIME = 100
FETCH_MIN_BYTES = 4096
FETCH_BUFFER_SIZE_BYTES = 262144
MAX_FETCH_BUFFER_SIZE_BYTES = 157286400 # 104857600(kafka socket.request.max.bytes) * 1.5
ITER_TIMEOUT_SECONDS = 60
NO_MESSAGES_WAIT_TIME_SECONDS = 0.1
MAX_QUEUE_SIZE = 10 * 1024
class FetchContext(object):
"""
Class for managing the state of a consumer during fetch
"""
def __init__(self, consumer, block, timeout):
self.consumer = consumer
self.block = block
if block:
if not timeout:
timeout = FETCH_DEFAULT_BLOCK_TIMEOUT
self.timeout = timeout * 1000
def __enter__(self):
"""Set fetch values based on blocking status"""
self.orig_fetch_max_wait_time = self.consumer.fetch_max_wait_time
self.orig_fetch_min_bytes = self.consumer.fetch_min_bytes
if self.block:
self.consumer.fetch_max_wait_time = self.timeout
self.consumer.fetch_min_bytes = 1
else:
self.consumer.fetch_min_bytes = 0
def __exit__(self, type, value, traceback):
"""Reset values"""
self.consumer.fetch_max_wait_time = self.orig_fetch_max_wait_time
self.consumer.fetch_min_bytes = self.orig_fetch_min_bytes
class Consumer(object):
"""
Base class to be used by other consumers. Not to be used directly
This base class provides logic for
* initialization and fetching metadata of partitions
* Auto-commit logic
* APIs for fetching pending message count
"""
def __init__(self, client, group, topic, partitions=None, auto_commit=True,
auto_commit_every_n=AUTO_COMMIT_MSG_COUNT,
auto_commit_every_t=AUTO_COMMIT_INTERVAL):
self.client = client
self.topic = topic
self.group = group
self.client.load_metadata_for_topics(topic)
self.offsets = {}
if not partitions:
partitions = self.client.topic_partitions[topic]
else:
assert all(isinstance(x, numbers.Integral) for x in partitions)
# Variables for handling offset commits
self.commit_lock = Lock()
self.commit_timer = None
self.count_since_commit = 0
self.auto_commit = auto_commit
self.auto_commit_every_n = auto_commit_every_n
self.auto_commit_every_t = auto_commit_every_t
# Set up the auto-commit timer
if auto_commit is True and auto_commit_every_t is not None:
self.commit_timer = ReentrantTimer(auto_commit_every_t,
self.commit)
self.commit_timer.start()
def get_or_init_offset_callback(resp):
try:
kafka.common.check_error(resp)
return resp.offset
except kafka.common.UnknownTopicOrPartitionError:
return 0
if auto_commit:
for partition in partitions:
req = OffsetFetchRequest(topic, partition)
(offset,) = self.client.send_offset_fetch_request(group, [req],
callback=get_or_init_offset_callback,
fail_on_error=False)
self.offsets[partition] = offset
else:
for partition in partitions:
self.offsets[partition] = 0
def commit(self, partitions=None):
"""
Commit offsets for this consumer
partitions: list of partitions to commit, default is to commit
all of them
"""
# short circuit if nothing happened. This check is kept outside
# to prevent un-necessarily acquiring a lock for checking the state
if self.count_since_commit == 0:
return
with self.commit_lock:
# Do this check again, just in case the state has changed
# during the lock acquiring timeout
if self.count_since_commit == 0:
return
reqs = []
if not partitions: # commit all partitions
partitions = self.offsets.keys()
for partition in partitions:
offset = self.offsets[partition]
log.info("Commit offset %d in SimpleConsumer: "
"group=%s, topic=%s, partition=%s" %
(offset, self.group, self.topic, partition))
reqs.append(OffsetCommitRequest(self.topic, partition,
offset, None))
resps = self.client.send_offset_commit_request(self.group, reqs)
for resp in resps:
kafka.common.check_error(resp)
self.count_since_commit = 0
def _auto_commit(self):
"""
Check if we have to commit based on number of messages and commit
"""
# Check if we are supposed to do an auto-commit
if not self.auto_commit or self.auto_commit_every_n is None:
return
if self.count_since_commit >= self.auto_commit_every_n:
self.commit()
def stop(self):
if self.commit_timer is not None:
self.commit_timer.stop()
self.commit()
def pending(self, partitions=None):
"""
Gets the pending message count
partitions: list of partitions to check for, default is to check all
"""
if not partitions:
partitions = self.offsets.keys()
total = 0
reqs = []
for partition in partitions:
reqs.append(OffsetRequest(self.topic, partition, -1, 1))
resps = self.client.send_offset_request(reqs)
for resp in resps:
partition = resp.partition
pending = resp.offsets[0]
offset = self.offsets[partition]
total += pending - offset - (1 if offset > 0 else 0)
return total
class DefaultSimpleConsumerException(Exception):
pass
class SimpleConsumer(Consumer):
"""
A simple consumer implementation that consumes all/specified partitions
for a topic
client: a connected KafkaClient
group: a name for this consumer, used for offset storage and must be unique
topic: the topic to consume
partitions: An optional list of partitions to consume the data from
auto_commit: default True. Whether or not to auto commit the offsets
auto_commit_every_n: default 100. How many messages to consume
before a commit
auto_commit_every_t: default 5000. How much time (in milliseconds) to
wait before commit
fetch_size_bytes: number of bytes to request in a FetchRequest
buffer_size: default 4K. Initial number of bytes to tell kafka we
have available. This will double as needed.
max_buffer_size: default 16K. Max number of bytes to tell kafka we have
available. None means no limit.
iter_timeout: default None. How much time (in seconds) to wait for a
message in the iterator before exiting. None means no
timeout, so it will wait forever.
Auto commit details:
If both auto_commit_every_n and auto_commit_every_t are set, they will
reset one another when one is triggered. These triggers simply call the
commit method on this class. A manual call to commit will also reset
these triggers
"""
def __init__(self, client, group, topic, auto_commit=True, partitions=None,
auto_commit_every_n=AUTO_COMMIT_MSG_COUNT,
auto_commit_every_t=AUTO_COMMIT_INTERVAL,
fetch_size_bytes=FETCH_MIN_BYTES,
buffer_size=FETCH_BUFFER_SIZE_BYTES,
max_buffer_size=MAX_FETCH_BUFFER_SIZE_BYTES,
iter_timeout=None):
super(SimpleConsumer, self).__init__(
client, group, topic,
partitions=partitions,
auto_commit=auto_commit,
auto_commit_every_n=auto_commit_every_n,
auto_commit_every_t=auto_commit_every_t)
if max_buffer_size is not None and buffer_size > max_buffer_size:
raise ValueError("buffer_size (%d) is greater than "
"max_buffer_size (%d)" %
(buffer_size, max_buffer_size))
self.buffer_size = buffer_size
self.max_buffer_size = max_buffer_size
self.partition_info = False # Do not return partition info in msgs
self.fetch_max_wait_time = FETCH_MAX_WAIT_TIME
self.fetch_min_bytes = fetch_size_bytes
self.fetch_offsets = self.offsets.copy()
self.iter_timeout = iter_timeout
self.queue = Queue(maxsize=MAX_QUEUE_SIZE)
self.should_fetch = Event()
self.fetch_thread = Thread(target=self._fetch_loop)
self.fetch_thread.daemon = True
self.fetch_thread.start()
self.got_error = False
self.error = DefaultSimpleConsumerException()
def __repr__(self):
return '<SimpleConsumer group=%s, topic=%s, partitions=%s>' % \
(self.group, self.topic, str(self.offsets.keys()))
def provide_partition_info(self):
"""
Indicates that partition info must be returned by the consumer
"""
self.partition_info = True
def seek(self, offset, whence, input_partition=None):
"""
Alter the current offset in the consumer, similar to fseek
offset: how much to modify the offset
whence: where to modify it from
0 is relative to the earliest available offset (head)
1 is relative to the current offset
2 is relative to the latest known offset (tail)
"""
if whence == 1: # relative to current position
if input_partition is not None:
self.offsets[input_partition] += offset
else:
for partition, _offset in self.offsets.items():
self.offsets[partition] = _offset + offset
elif whence in (0, 2): # relative to beginning or end
# divide the request offset by number of partitions,
# distribute the remained evenly
(delta, rem) = divmod(offset, len(self.offsets))
deltas = {}
if input_partition is not None:
# we want this particular partiton offset incremented
deltas[input_partition] = offset
else:
for partition, r in izip_longest(self.offsets.keys(),
repeat(1, rem), fillvalue=0):
deltas[partition] = delta + r
reqs = []
if input_partition is not None:
if whence == 0:
reqs.append(OffsetRequest(self.topic, input_partition, -2, 1))
elif whence == 2:
reqs.append(OffsetRequest(self.topic, input_partition, -1, 1))
else:
pass
else:
for partition in self.offsets.keys():
if whence == 0:
reqs.append(OffsetRequest(self.topic, partition, -2, 1))
elif whence == 2:
reqs.append(OffsetRequest(self.topic, partition, -1, 1))
else:
pass
resps = self.client.send_offset_request(reqs)
for resp in resps:
self.offsets[resp.partition] = \
resp.offsets[0] + deltas[resp.partition]
else:
raise ValueError("Unexpected value for `whence`, %d" % whence)
# Reset queue and fetch offsets since they are invalid
self.fetch_offsets = self.offsets.copy()
if self.auto_commit:
self.count_since_commit += 1
self.commit()
self.queue = Queue(maxsize=MAX_QUEUE_SIZE)
def get_messages(self, count=1, block=True, timeout=0.1):
"""
Fetch the specified number of messages
count: Indicates the maximum number of messages to be fetched
block: If True, the API will block till some messages are fetched.
timeout: If block is True, the function will block for the specified
time (in seconds) until count messages is fetched. If None,
it will block forever.
"""
messages = []
if timeout is not None:
max_time = time.time() + timeout
new_offsets = {}
while count > 0 and (timeout is None or timeout > 0):
result = self._get_message(block, timeout, get_partition_info=True,
update_offset=False)
if result:
partition, message = result
if self.partition_info:
messages.append(result)
else:
messages.append(message)
new_offsets[partition] = message.offset + 1
count -= 1
else:
# Ran out of messages for the last request.
if not block:
# If we're not blocking, break.
break
if timeout is not None:
# If we're blocking and have a timeout, reduce it to the
# appropriate value
timeout = max_time - time.time()
# Update and commit offsets if necessary
self.offsets.update(new_offsets)
self.count_since_commit += len(messages)
self._auto_commit()
return messages
def get_message(self, block=True, timeout=0.1, get_partition_info=None):
return self._get_message(block, timeout, get_partition_info)
def _get_message(self, block=True, timeout=0.1, get_partition_info=None,
update_offset=True):
"""
If no messages can be fetched, returns None.
If get_partition_info is None, it defaults to self.partition_info
If get_partition_info is True, returns (partition, message)
If get_partition_info is False, returns message
"""
if self.got_error:
raise self.error
try:
partition, message = self.queue.get(timeout=timeout)
if update_offset:
# Update partition offset
self.offsets[partition] = message.offset + 1
# Count, check and commit messages if necessary
self.count_since_commit += 1
self._auto_commit()
if get_partition_info is None:
get_partition_info = self.partition_info
if get_partition_info:
return partition, message
else:
return message
except Empty:
return None
def __iter__(self):
if self.iter_timeout is None:
timeout = ITER_TIMEOUT_SECONDS
else:
timeout = self.iter_timeout
while True:
message = self.get_message(True, timeout)
if message:
yield message
elif self.iter_timeout is None:
# We did not receive any message yet but we don't have a
# timeout, so give up the CPU for a while before trying again
time.sleep(NO_MESSAGES_WAIT_TIME_SECONDS)
else:
# Timed out waiting for a message
break
def stop(self):
super(SimpleConsumer, self).stop()
self.should_fetch.set()
def _fetch_loop(self):
log.info("Starting fetch loop")
while not self.should_fetch.is_set():
try:
self._fetch()
except BufferTooLargeError as e:
# this is a serious issue, bail out
self.got_error = True
self.error = e
self.stop()
except Exception as e:
self.got_error = True
self.error = e
self.stop()
log.info("Stopping fetch loop")
def _fetch(self):
# Create fetch request payloads for all the partitions
requests = []
partitions = self.fetch_offsets.keys()
while partitions:
for partition in partitions:
requests.append(FetchRequest(self.topic,
partition,
self.fetch_offsets[partition],
self.buffer_size))
# Send request
responses = self.client.send_fetch_request(
requests,
max_wait_time=int(self.fetch_max_wait_time),
min_bytes=self.fetch_min_bytes)
retry_partitions = set()
for resp in responses:
partition = resp.partition
try:
for message in resp.messages:
# Put the message in our queue
self.queue.put((partition, message), block=True)
self.fetch_offsets[partition] = message.offset + 1
except ConsumerFetchSizeTooSmall:
if (self.max_buffer_size is not None and
self.buffer_size == self.max_buffer_size):
log.error("Max fetch size %d too small",
self.max_buffer_size)
raise
if self.max_buffer_size is None:
self.buffer_size *= 2
# although the client has specifies None for max_buffer_size i.e. no limit
# we want to make sure we have an upper bound to how much it grows
# If the buffer has exceed the max, bail out
if self.buffer_size > MAX_FETCH_BUFFER_SIZE_BYTES:
log.error('Message size exceeded maximum allowed of {0}'.format(MAX_FETCH_BUFFER_SIZE_BYTES))
log.error('Current buffer_size is: {0}'.format(self.buffer_size))
log.error('topic: {0}, partition: {1}, offset:{2}'.format(self.topic, partition, self.fetch_offsets[partition]))
old_offset = self.fetch_offsets[partition]
self.seek(1, 1, input_partition=partition)
log.error('Incremented offset. New offset is: {0}'.format(self.offsets[partition]))
raise BufferTooLargeError(self.topic, partition, old_offset, self.offsets[partition])
else:
self.buffer_size = max(self.buffer_size * 2,
self.max_buffer_size)
log.warn("Fetch size too small, increase to %d (2x) "
"and retry", self.buffer_size)
retry_partitions.add(partition)
except ConsumerNoMoreData as e:
log.debug("Iteration was ended by %r", e)
self.got_error = True
self.error = e
self.stop()
except StopIteration:
# Stop iterating through this partition
log.debug("Done iterating over partition %s" % partition)
except Exception as e:
self.got_error = True
self.error = e
self.stop()
partitions = retry_partitions
def _mp_consume(client, group, topic, chunk, queue, start, exit, pause, size):
"""
A child process worker which consumes messages based on the
notifications given by the controller process
NOTE: Ideally, this should have been a method inside the Consumer
class. However, multiprocessing module has issues in windows. The
functionality breaks unless this function is kept outside of a class
"""
# Make the child processes open separate socket connections
client.reinit()
# We will start consumers without auto-commit. Auto-commit will be
# done by the master controller process.
consumer = SimpleConsumer(client, group, topic,
partitions=chunk,
auto_commit=False,
auto_commit_every_n=None,
auto_commit_every_t=None)
# Ensure that the consumer provides the partition information
consumer.provide_partition_info()
while True:
# Wait till the controller indicates us to start consumption
start.wait()
# If we are asked to quit, do so
if exit.is_set():
break
# Consume messages and add them to the queue. If the controller
# indicates a specific number of messages, follow that advice
count = 0
message = consumer.get_message()
if message:
queue.put(message)
count += 1
# We have reached the required size. The controller might have
# more than what he needs. Wait for a while.
# Without this logic, it is possible that we run into a big
# loop consuming all available messages before the controller
# can reset the 'start' event
if count == size.value:
pause.wait()
else:
# In case we did not receive any message, give up the CPU for
# a while before we try again
time.sleep(NO_MESSAGES_WAIT_TIME_SECONDS)
consumer.stop()
class MultiProcessConsumer(Consumer):
"""
A consumer implementation that consumes partitions for a topic in
parallel using multiple processes
client: a connected KafkaClient
group: a name for this consumer, used for offset storage and must be unique
topic: the topic to consume
auto_commit: default True. Whether or not to auto commit the offsets
auto_commit_every_n: default 100. How many messages to consume
before a commit
auto_commit_every_t: default 5000. How much time (in milliseconds) to
wait before commit
num_procs: Number of processes to start for consuming messages.
The available partitions will be divided among these processes
partitions_per_proc: Number of partitions to be allocated per process
(overrides num_procs)
Auto commit details:
If both auto_commit_every_n and auto_commit_every_t are set, they will
reset one another when one is triggered. These triggers simply call the
commit method on this class. A manual call to commit will also reset
these triggers
"""
def __init__(self, client, group, topic, auto_commit=True,
auto_commit_every_n=AUTO_COMMIT_MSG_COUNT,
auto_commit_every_t=AUTO_COMMIT_INTERVAL,
num_procs=1, partitions_per_proc=0):
# Initiate the base consumer class
super(MultiProcessConsumer, self).__init__(
client, group, topic,
partitions=None,
auto_commit=auto_commit,
auto_commit_every_n=auto_commit_every_n,
auto_commit_every_t=auto_commit_every_t)
# Variables for managing and controlling the data flow from
# consumer child process to master
self.queue = MPQueue(1024) # Child consumers dump messages into this
self.start = MPEvent() # Indicates the consumers to start fetch
self.exit = MPEvent() # Requests the consumers to shutdown
self.pause = MPEvent() # Requests the consumers to pause fetch
self.size = Value('i', 0) # Indicator of number of messages to fetch
partitions = self.offsets.keys()
# If unspecified, start one consumer per partition
# The logic below ensures that
# * we do not cross the num_procs limit
# * we have an even distribution of partitions among processes
if not partitions_per_proc:
partitions_per_proc = round(len(partitions) * 1.0 / num_procs)
if partitions_per_proc < num_procs * 0.5:
partitions_per_proc += 1
# The final set of chunks
chunker = lambda *x: [] + list(x)
chunks = map(chunker, *[iter(partitions)] * int(partitions_per_proc))
self.procs = []
for chunk in chunks:
chunk = filter(lambda x: x is not None, chunk)
args = (client.copy(),
group, topic, chunk,
self.queue, self.start, self.exit,
self.pause, self.size)
proc = Process(target=_mp_consume, args=args)
proc.daemon = True
proc.start()
self.procs.append(proc)
def __repr__(self):
return '<MultiProcessConsumer group=%s, topic=%s, consumers=%d>' % \
(self.group, self.topic, len(self.procs))
def stop(self):
# Set exit and start off all waiting consumers
self.exit.set()
self.pause.set()
self.start.set()
for proc in self.procs:
proc.join()
proc.terminate()
super(MultiProcessConsumer, self).stop()
def __iter__(self):
"""
Iterator to consume the messages available on this consumer
"""
# Trigger the consumer procs to start off.
# We will iterate till there are no more messages available
self.size.value = 0
self.pause.set()
while True:
self.start.set()
try:
# We will block for a small while so that the consumers get
# a chance to run and put some messages in the queue
# TODO: This is a hack and will make the consumer block for
# at least one second. Need to find a better way of doing this
partition, message = self.queue.get(block=True, timeout=1)
except Empty:
break
# Count, check and commit messages if necessary
self.offsets[partition] = message.offset + 1
self.start.clear()
self.count_since_commit += 1
self._auto_commit()
yield message
self.start.clear()
def get_messages(self, count=1, block=True, timeout=10):
"""
Fetch the specified number of messages
count: Indicates the maximum number of messages to be fetched
block: If True, the API will block till some messages are fetched.
timeout: If block is True, the function will block for the specified
time (in seconds) until count messages is fetched. If None,
it will block forever.
"""
messages = []
# Give a size hint to the consumers. Each consumer process will fetch
# a maximum of "count" messages. This will fetch more messages than
# necessary, but these will not be committed to kafka. Also, the extra
# messages can be provided in subsequent runs
self.size.value = count
self.pause.clear()
if timeout is not None:
max_time = time.time() + timeout
new_offsets = {}
while count > 0 and (timeout is None or timeout > 0):
# Trigger consumption only if the queue is empty
# By doing this, we will ensure that consumers do not
# go into overdrive and keep consuming thousands of
# messages when the user might need only a few
if self.queue.empty():
self.start.set()
try:
partition, message = self.queue.get(block, timeout)
except Empty:
break
messages.append(message)
new_offsets[partition] = message.offset + 1
count -= 1
if timeout is not None:
timeout = max_time - time.time()
self.size.value = 0
self.start.clear()
self.pause.set()
# Update and commit offsets if necessary
self.offsets.update(new_offsets)
self.count_since_commit += len(messages)
self._auto_commit()
return messages
review comments
from __future__ import absolute_import
from itertools import izip_longest, repeat
import logging
import time
import numbers
from threading import Lock, Thread, Event
from multiprocessing import Process, Queue as MPQueue, Event as MPEvent, Value
from Queue import Empty, Queue
import kafka
from kafka.common import (
FetchRequest,
OffsetRequest,
OffsetCommitRequest,
OffsetFetchRequest,
ConsumerFetchSizeTooSmall,
ConsumerNoMoreData,
BufferTooLargeError
)
from kafka.util import ReentrantTimer
log = logging.getLogger("kafka")
AUTO_COMMIT_MSG_COUNT = 100
AUTO_COMMIT_INTERVAL = 5000
FETCH_DEFAULT_BLOCK_TIMEOUT = 1
FETCH_MAX_WAIT_TIME = 100
FETCH_MIN_BYTES = 4096
FETCH_BUFFER_SIZE_BYTES = 262144
MAX_FETCH_BUFFER_SIZE_BYTES = 157286400 # 104857600(kafka socket.request.max.bytes) * 1.5
ITER_TIMEOUT_SECONDS = 60
NO_MESSAGES_WAIT_TIME_SECONDS = 0.1
MAX_QUEUE_SIZE = 10 * 1024
class FetchContext(object):
"""
Class for managing the state of a consumer during fetch
"""
def __init__(self, consumer, block, timeout):
self.consumer = consumer
self.block = block
if block:
if not timeout:
timeout = FETCH_DEFAULT_BLOCK_TIMEOUT
self.timeout = timeout * 1000
def __enter__(self):
"""Set fetch values based on blocking status"""
self.orig_fetch_max_wait_time = self.consumer.fetch_max_wait_time
self.orig_fetch_min_bytes = self.consumer.fetch_min_bytes
if self.block:
self.consumer.fetch_max_wait_time = self.timeout
self.consumer.fetch_min_bytes = 1
else:
self.consumer.fetch_min_bytes = 0
def __exit__(self, type, value, traceback):
"""Reset values"""
self.consumer.fetch_max_wait_time = self.orig_fetch_max_wait_time
self.consumer.fetch_min_bytes = self.orig_fetch_min_bytes
class Consumer(object):
"""
Base class to be used by other consumers. Not to be used directly
This base class provides logic for
* initialization and fetching metadata of partitions
* Auto-commit logic
* APIs for fetching pending message count
"""
def __init__(self, client, group, topic, partitions=None, auto_commit=True,
auto_commit_every_n=AUTO_COMMIT_MSG_COUNT,
auto_commit_every_t=AUTO_COMMIT_INTERVAL):
self.client = client
self.topic = topic
self.group = group
self.client.load_metadata_for_topics(topic)
self.offsets = {}
if not partitions:
partitions = self.client.topic_partitions[topic]
else:
assert all(isinstance(x, numbers.Integral) for x in partitions)
# Variables for handling offset commits
self.commit_lock = Lock()
self.commit_timer = None
self.count_since_commit = 0
self.auto_commit = auto_commit
self.auto_commit_every_n = auto_commit_every_n
self.auto_commit_every_t = auto_commit_every_t
# Set up the auto-commit timer
if auto_commit is True and auto_commit_every_t is not None:
self.commit_timer = ReentrantTimer(auto_commit_every_t,
self.commit)
self.commit_timer.start()
def get_or_init_offset_callback(resp):
try:
kafka.common.check_error(resp)
return resp.offset
except kafka.common.UnknownTopicOrPartitionError:
return 0
if auto_commit:
for partition in partitions:
req = OffsetFetchRequest(topic, partition)
(offset,) = self.client.send_offset_fetch_request(group, [req],
callback=get_or_init_offset_callback,
fail_on_error=False)
self.offsets[partition] = offset
else:
for partition in partitions:
self.offsets[partition] = 0
def commit(self, partitions=None):
"""
Commit offsets for this consumer
partitions: list of partitions to commit, default is to commit
all of them
"""
# short circuit if nothing happened. This check is kept outside
# to prevent un-necessarily acquiring a lock for checking the state
if self.count_since_commit == 0:
return
with self.commit_lock:
# Do this check again, just in case the state has changed
# during the lock acquiring timeout
if self.count_since_commit == 0:
return
reqs = []
if not partitions: # commit all partitions
partitions = self.offsets.keys()
for partition in partitions:
offset = self.offsets[partition]
log.info("Commit offset %d in SimpleConsumer: "
"group=%s, topic=%s, partition=%s" %
(offset, self.group, self.topic, partition))
reqs.append(OffsetCommitRequest(self.topic, partition,
offset, None))
resps = self.client.send_offset_commit_request(self.group, reqs)
for resp in resps:
kafka.common.check_error(resp)
self.count_since_commit = 0
def _auto_commit(self):
"""
Check if we have to commit based on number of messages and commit
"""
# Check if we are supposed to do an auto-commit
if not self.auto_commit or self.auto_commit_every_n is None:
return
if self.count_since_commit >= self.auto_commit_every_n:
self.commit()
def stop(self):
if self.commit_timer is not None:
self.commit_timer.stop()
self.commit()
def pending(self, partitions=None):
"""
Gets the pending message count
partitions: list of partitions to check for, default is to check all
"""
if not partitions:
partitions = self.offsets.keys()
total = 0
reqs = []
for partition in partitions:
reqs.append(OffsetRequest(self.topic, partition, -1, 1))
resps = self.client.send_offset_request(reqs)
for resp in resps:
partition = resp.partition
pending = resp.offsets[0]
offset = self.offsets[partition]
total += pending - offset - (1 if offset > 0 else 0)
return total
class DefaultSimpleConsumerException(Exception):
pass
class SimpleConsumer(Consumer):
"""
A simple consumer implementation that consumes all/specified partitions
for a topic
client: a connected KafkaClient
group: a name for this consumer, used for offset storage and must be unique
topic: the topic to consume
partitions: An optional list of partitions to consume the data from
auto_commit: default True. Whether or not to auto commit the offsets
auto_commit_every_n: default 100. How many messages to consume
before a commit
auto_commit_every_t: default 5000. How much time (in milliseconds) to
wait before commit
fetch_size_bytes: number of bytes to request in a FetchRequest
buffer_size: default 4K. Initial number of bytes to tell kafka we
have available. This will double as needed.
max_buffer_size: default 16K. Max number of bytes to tell kafka we have
available. None means no limit.
iter_timeout: default None. How much time (in seconds) to wait for a
message in the iterator before exiting. None means no
timeout, so it will wait forever.
skip_buffer_size_error: Skip over the error when the buffer size grows too large.
i.e. BufferTooLargeError. Default: True (i.e. we will increment the
offset by 1 when we encounter this)
Auto commit details:
If both auto_commit_every_n and auto_commit_every_t are set, they will
reset one another when one is triggered. These triggers simply call the
commit method on this class. A manual call to commit will also reset
these triggers
"""
def __init__(self, client, group, topic, auto_commit=True, partitions=None,
auto_commit_every_n=AUTO_COMMIT_MSG_COUNT,
auto_commit_every_t=AUTO_COMMIT_INTERVAL,
fetch_size_bytes=FETCH_MIN_BYTES,
buffer_size=FETCH_BUFFER_SIZE_BYTES,
max_buffer_size=MAX_FETCH_BUFFER_SIZE_BYTES,
iter_timeout=None,
skip_buffer_size_error=True):
super(SimpleConsumer, self).__init__(
client, group, topic,
partitions=partitions,
auto_commit=auto_commit,
auto_commit_every_n=auto_commit_every_n,
auto_commit_every_t=auto_commit_every_t)
if max_buffer_size is not None and buffer_size > max_buffer_size:
raise ValueError("buffer_size (%d) is greater than "
"max_buffer_size (%d)" %
(buffer_size, max_buffer_size))
self.buffer_size = buffer_size
self.max_buffer_size = max_buffer_size
self.partition_info = False # Do not return partition info in msgs
self.fetch_max_wait_time = FETCH_MAX_WAIT_TIME
self.fetch_min_bytes = fetch_size_bytes
self.fetch_offsets = self.offsets.copy()
self.iter_timeout = iter_timeout
self.queue = Queue(maxsize=MAX_QUEUE_SIZE)
self.should_fetch = Event()
self.fetch_thread = Thread(target=self._fetch_loop)
self.fetch_thread.daemon = True
self.fetch_thread.start()
self.got_error = False
self.skip_buffer_size_error = skip_buffer_size_error
self.error = DefaultSimpleConsumerException()
def __repr__(self):
return '<SimpleConsumer group=%s, topic=%s, partitions=%s>' % \
(self.group, self.topic, str(self.offsets.keys()))
def provide_partition_info(self):
"""
Indicates that partition info must be returned by the consumer
"""
self.partition_info = True
def seek(self, offset, whence, input_partition=None):
"""
Alter the current offset in the consumer, similar to fseek
offset: how much to modify the offset
whence: where to modify it from
0 is relative to the earliest available offset (head)
1 is relative to the current offset
2 is relative to the latest known offset (tail)
"""
if whence == 1: # relative to current position
if input_partition is not None:
self.offsets[input_partition] += offset
else:
for partition, _offset in self.offsets.items():
self.offsets[partition] = _offset + offset
elif whence in (0, 2): # relative to beginning or end
# divide the request offset by number of partitions,
# distribute the remained evenly
(delta, rem) = divmod(offset, len(self.offsets))
deltas = {}
if input_partition is not None:
# we want this particular partiton offset incremented
deltas[input_partition] = offset
else:
for partition, r in izip_longest(self.offsets.keys(),
repeat(1, rem), fillvalue=0):
deltas[partition] = delta + r
reqs = []
if input_partition is not None:
if whence == 0:
reqs.append(OffsetRequest(self.topic, input_partition, -2, 1))
elif whence == 2:
reqs.append(OffsetRequest(self.topic, input_partition, -1, 1))
else:
pass
else:
for partition in self.offsets.keys():
if whence == 0:
reqs.append(OffsetRequest(self.topic, partition, -2, 1))
elif whence == 2:
reqs.append(OffsetRequest(self.topic, partition, -1, 1))
else:
pass
resps = self.client.send_offset_request(reqs)
for resp in resps:
self.offsets[resp.partition] = \
resp.offsets[0] + deltas[resp.partition]
else:
raise ValueError("Unexpected value for `whence`, %d" % whence)
# Reset queue and fetch offsets since they are invalid
self.fetch_offsets = self.offsets.copy()
if self.auto_commit:
self.count_since_commit += 1
self.commit()
self.queue = Queue(maxsize=MAX_QUEUE_SIZE)
def get_messages(self, count=1, block=True, timeout=0.1):
"""
Fetch the specified number of messages
count: Indicates the maximum number of messages to be fetched
block: If True, the API will block till some messages are fetched.
timeout: If block is True, the function will block for the specified
time (in seconds) until count messages is fetched. If None,
it will block forever.
"""
messages = []
if timeout is not None:
max_time = time.time() + timeout
new_offsets = {}
while count > 0 and (timeout is None or timeout > 0):
result = self._get_message(block, timeout, get_partition_info=True,
update_offset=False)
if result:
partition, message = result
if self.partition_info:
messages.append(result)
else:
messages.append(message)
new_offsets[partition] = message.offset + 1
count -= 1
else:
# Ran out of messages for the last request.
if not block:
# If we're not blocking, break.
break
if timeout is not None:
# If we're blocking and have a timeout, reduce it to the
# appropriate value
timeout = max_time - time.time()
# Update and commit offsets if necessary
self.offsets.update(new_offsets)
self.count_since_commit += len(messages)
self._auto_commit()
return messages
def get_message(self, block=True, timeout=0.1, get_partition_info=None):
return self._get_message(block, timeout, get_partition_info)
def _get_message(self, block=True, timeout=0.1, get_partition_info=None,
update_offset=True):
"""
If no messages can be fetched, returns None.
If get_partition_info is None, it defaults to self.partition_info
If get_partition_info is True, returns (partition, message)
If get_partition_info is False, returns message
"""
if self.got_error:
raise self.error
try:
partition, message = self.queue.get(timeout=timeout)
if update_offset:
# Update partition offset
self.offsets[partition] = message.offset + 1
# Count, check and commit messages if necessary
self.count_since_commit += 1
self._auto_commit()
if get_partition_info is None:
get_partition_info = self.partition_info
if get_partition_info:
return partition, message
else:
return message
except Empty:
return None
def __iter__(self):
if self.iter_timeout is None:
timeout = ITER_TIMEOUT_SECONDS
else:
timeout = self.iter_timeout
while True:
message = self.get_message(True, timeout)
if message:
yield message
elif self.iter_timeout is None:
# We did not receive any message yet but we don't have a
# timeout, so give up the CPU for a while before trying again
time.sleep(NO_MESSAGES_WAIT_TIME_SECONDS)
else:
# Timed out waiting for a message
break
def stop(self):
super(SimpleConsumer, self).stop()
self.should_fetch.set()
def _fetch_loop(self):
log.info("Starting fetch loop")
while not self.should_fetch.is_set():
try:
self._fetch()
except BufferTooLargeError as e:
# this is a serious issue, bail out
self.got_error = True
self.error = e
self.stop()
except Exception as e:
self.got_error = True
self.error = e
self.stop()
log.info("Stopping fetch loop")
def _fetch(self):
# Create fetch request payloads for all the partitions
requests = []
partitions = self.fetch_offsets.keys()
while partitions:
for partition in partitions:
requests.append(FetchRequest(self.topic,
partition,
self.fetch_offsets[partition],
self.buffer_size))
# Send request
responses = self.client.send_fetch_request(
requests,
max_wait_time=int(self.fetch_max_wait_time),
min_bytes=self.fetch_min_bytes)
retry_partitions = set()
for resp in responses:
partition = resp.partition
try:
for message in resp.messages:
# Put the message in our queue
self.queue.put((partition, message), block=True)
self.fetch_offsets[partition] = message.offset + 1
except ConsumerFetchSizeTooSmall:
if (self.max_buffer_size is not None and
self.buffer_size == self.max_buffer_size):
log.error("Max fetch size %d too small",
self.max_buffer_size)
raise
if self.max_buffer_size is None:
self.buffer_size *= 2
# although the client has specifies None for max_buffer_size i.e. no limit
# we want to make sure we have an upper bound to how much it grows
# If the buffer has exceed the max, bail out
if self.skip_buffer_size_error:
if self.buffer_size > MAX_FETCH_BUFFER_SIZE_BYTES:
log.error('Message size exceeded maximum allowed of {0}'.format(MAX_FETCH_BUFFER_SIZE_BYTES))
log.error('Current buffer_size is: {0}'.format(self.buffer_size))
log.error('topic: {0}, partition: {1}, offset:{2}'.format(self.topic, partition, self.fetch_offsets[partition]))
old_offset = self.fetch_offsets[partition]
self.seek(1, 1, input_partition=partition)
log.error('Incremented offset. New offset is: {0}'.format(self.offsets[partition]))
raise BufferTooLargeError(self.topic, partition, old_offset, self.offsets[partition])
else:
self.buffer_size = max(self.buffer_size * 2,
self.max_buffer_size)
log.warn("Fetch size too small, increase to %d (2x) "
"and retry", self.buffer_size)
retry_partitions.add(partition)
except ConsumerNoMoreData as e:
log.debug("Iteration was ended by %r", e)
self.got_error = True
self.error = e
self.stop()
except StopIteration:
# Stop iterating through this partition
log.debug("Done iterating over partition %s" % partition)
except Exception as e:
self.got_error = True
self.error = e
self.stop()
partitions = retry_partitions
def _mp_consume(client, group, topic, chunk, queue, start, exit, pause, size):
"""
A child process worker which consumes messages based on the
notifications given by the controller process
NOTE: Ideally, this should have been a method inside the Consumer
class. However, multiprocessing module has issues in windows. The
functionality breaks unless this function is kept outside of a class
"""
# Make the child processes open separate socket connections
client.reinit()
# We will start consumers without auto-commit. Auto-commit will be
# done by the master controller process.
consumer = SimpleConsumer(client, group, topic,
partitions=chunk,
auto_commit=False,
auto_commit_every_n=None,
auto_commit_every_t=None)
# Ensure that the consumer provides the partition information
consumer.provide_partition_info()
while True:
# Wait till the controller indicates us to start consumption
start.wait()
# If we are asked to quit, do so
if exit.is_set():
break
# Consume messages and add them to the queue. If the controller
# indicates a specific number of messages, follow that advice
count = 0
message = consumer.get_message()
if message:
queue.put(message)
count += 1
# We have reached the required size. The controller might have
# more than what he needs. Wait for a while.
# Without this logic, it is possible that we run into a big
# loop consuming all available messages before the controller
# can reset the 'start' event
if count == size.value:
pause.wait()
else:
# In case we did not receive any message, give up the CPU for
# a while before we try again
time.sleep(NO_MESSAGES_WAIT_TIME_SECONDS)
consumer.stop()
class MultiProcessConsumer(Consumer):
"""
A consumer implementation that consumes partitions for a topic in
parallel using multiple processes
client: a connected KafkaClient
group: a name for this consumer, used for offset storage and must be unique
topic: the topic to consume
auto_commit: default True. Whether or not to auto commit the offsets
auto_commit_every_n: default 100. How many messages to consume
before a commit
auto_commit_every_t: default 5000. How much time (in milliseconds) to
wait before commit
num_procs: Number of processes to start for consuming messages.
The available partitions will be divided among these processes
partitions_per_proc: Number of partitions to be allocated per process
(overrides num_procs)
Auto commit details:
If both auto_commit_every_n and auto_commit_every_t are set, they will
reset one another when one is triggered. These triggers simply call the
commit method on this class. A manual call to commit will also reset
these triggers
"""
def __init__(self, client, group, topic, auto_commit=True,
auto_commit_every_n=AUTO_COMMIT_MSG_COUNT,
auto_commit_every_t=AUTO_COMMIT_INTERVAL,
num_procs=1, partitions_per_proc=0):
# Initiate the base consumer class
super(MultiProcessConsumer, self).__init__(
client, group, topic,
partitions=None,
auto_commit=auto_commit,
auto_commit_every_n=auto_commit_every_n,
auto_commit_every_t=auto_commit_every_t)
# Variables for managing and controlling the data flow from
# consumer child process to master
self.queue = MPQueue(1024) # Child consumers dump messages into this
self.start = MPEvent() # Indicates the consumers to start fetch
self.exit = MPEvent() # Requests the consumers to shutdown
self.pause = MPEvent() # Requests the consumers to pause fetch
self.size = Value('i', 0) # Indicator of number of messages to fetch
partitions = self.offsets.keys()
# If unspecified, start one consumer per partition
# The logic below ensures that
# * we do not cross the num_procs limit
# * we have an even distribution of partitions among processes
if not partitions_per_proc:
partitions_per_proc = round(len(partitions) * 1.0 / num_procs)
if partitions_per_proc < num_procs * 0.5:
partitions_per_proc += 1
# The final set of chunks
chunker = lambda *x: [] + list(x)
chunks = map(chunker, *[iter(partitions)] * int(partitions_per_proc))
self.procs = []
for chunk in chunks:
chunk = filter(lambda x: x is not None, chunk)
args = (client.copy(),
group, topic, chunk,
self.queue, self.start, self.exit,
self.pause, self.size)
proc = Process(target=_mp_consume, args=args)
proc.daemon = True
proc.start()
self.procs.append(proc)
def __repr__(self):
return '<MultiProcessConsumer group=%s, topic=%s, consumers=%d>' % \
(self.group, self.topic, len(self.procs))
def stop(self):
# Set exit and start off all waiting consumers
self.exit.set()
self.pause.set()
self.start.set()
for proc in self.procs:
proc.join()
proc.terminate()
super(MultiProcessConsumer, self).stop()
def __iter__(self):
"""
Iterator to consume the messages available on this consumer
"""
# Trigger the consumer procs to start off.
# We will iterate till there are no more messages available
self.size.value = 0
self.pause.set()
while True:
self.start.set()
try:
# We will block for a small while so that the consumers get
# a chance to run and put some messages in the queue
# TODO: This is a hack and will make the consumer block for
# at least one second. Need to find a better way of doing this
partition, message = self.queue.get(block=True, timeout=1)
except Empty:
break
# Count, check and commit messages if necessary
self.offsets[partition] = message.offset + 1
self.start.clear()
self.count_since_commit += 1
self._auto_commit()
yield message
self.start.clear()
def get_messages(self, count=1, block=True, timeout=10):
"""
Fetch the specified number of messages
count: Indicates the maximum number of messages to be fetched
block: If True, the API will block till some messages are fetched.
timeout: If block is True, the function will block for the specified
time (in seconds) until count messages is fetched. If None,
it will block forever.
"""
messages = []
# Give a size hint to the consumers. Each consumer process will fetch
# a maximum of "count" messages. This will fetch more messages than
# necessary, but these will not be committed to kafka. Also, the extra
# messages can be provided in subsequent runs
self.size.value = count
self.pause.clear()
if timeout is not None:
max_time = time.time() + timeout
new_offsets = {}
while count > 0 and (timeout is None or timeout > 0):
# Trigger consumption only if the queue is empty
# By doing this, we will ensure that consumers do not
# go into overdrive and keep consuming thousands of
# messages when the user might need only a few
if self.queue.empty():
self.start.set()
try:
partition, message = self.queue.get(block, timeout)
except Empty:
break
messages.append(message)
new_offsets[partition] = message.offset + 1
count -= 1
if timeout is not None:
timeout = max_time - time.time()
self.size.value = 0
self.start.clear()
self.pause.set()
# Update and commit offsets if necessary
self.offsets.update(new_offsets)
self.count_since_commit += len(messages)
self._auto_commit()
return messages
|
from .base_settings import *
ALLOWED_HOSTS = ['*']
if os.getenv('AUTH', 'NONE') == 'SAML_MOCK':
MOCK_SAML_ATTRIBUTES = {
'uwnetid': ['jfaculty'],
'affiliations': ['faculty', 'employee', 'member'],
'eppn': ['jfacult@washington.edu'],
'scopedAffiliations': [
'employee@washington.edu', 'member@washington.edu'],
'isMemberOf': ['u_test_group', 'u_test_another_group',
'u_acadev_panopto_support'],
}
INSTALLED_APPS += [
'django_prometheus',
'compressor',
'django.contrib.humanize',
'userservice',
'scheduler.apps.SchedulerConfig',
'blti',
'supporttools',
]
MIDDLEWARE = ['django_prometheus.middleware.PrometheusBeforeMiddleware'] +\
MIDDLEWARE +\
['userservice.user.UserServiceMiddleware',
'django_prometheus.middleware.PrometheusAfterMiddleware']
if not os.getenv("ENV") == "localdev":
DATABASES['default']['ENGINE'] = 'django_prometheus.db.backends.postgresql'
COMPRESS_ENABLED = True
COMPRESS_OFFLINE = True
COMPRESS_ROOT = '/static/'
COMPRESS_PRECOMPILERS = (
('text/less', 'lessc {infile} {outfile}'),
)
STATICFILES_FINDERS += (
'compressor.finders.CompressorFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
COMPRESS_PRECOMPILERS += (
('text/x-sass', 'pyscss {infile} > {outfile}'),
('text/x-scss', 'pyscss {infile} > {outfile}'),
)
COMPRESS_CSS_FILTERS = [
'compressor.filters.css_default.CssAbsoluteFilter',
'compressor.filters.cssmin.CSSMinFilter'
]
COMPRESS_JS_FILTERS = [
'compressor.filters.jsmin.JSMinFilter',
]
USERSERVICE_VALIDATION_MODULE = "scheduler.userservice_validation.validate"
PANOPTO_ADMIN_GROUP = 'u_acadev_panopto_support'
RESTCLIENTS_ADMIN_GROUP = PANOPTO_ADMIN_GROUP
USERSERVICE_ADMIN_GROUP = PANOPTO_ADMIN_GROUP
AUTHZ_GROUP_BACKEND = 'authz_group.authz_implementation.uw_group_service.UWGroupService'
#if not os.getenv("ENV") == "localdev":
# INSTALLED_APPS += ['rc_django',]
# RESTCLIENTS_DAO_CACHE_CLASS = 'scheduler.cache.RestClientsCache'
RESTCLIENTS_DEFAULT_TIMEOUT = 3
SUPPORTTOOLS_PARENT_APP = 'Panopto'
#USERSERVICE_OVERRIDE_AUTH_MODULE = "scheduler.authorization.can_override_user"
#RESTCLIENTS_ADMIN_AUTH_MODULE = "scheduler.authorization.can_proxy_restclient"
DETECT_USER_AGENTS = {
'is_tablet': False,
'is_mobile': False,
'is_desktop': True,
}
if not os.getenv("ENV", "localdev") == "localdev":
PANOPTO_API_USER = os.getenv('PANOPTO_API_USER')
PANOPTO_API_APP_ID = os.getenv('PANOPTO_API_APP_ID')
PANOPTO_API_TOKEN = os.getenv('PANOPTO_API_TOKEN')
PANOPTO_SERVER = os.getenv('PANOPTO_SERVER')
# BLTI consumer key:secret pairs
LTI_CONSUMERS = {k: v for k, v in [s.split('=') for s in os.getenv(
"LTI_CONSUMERS", "").split(',') if len(s)]}
# BLTI session object encryption values
BLTI_AES_KEY = os.getenv('BLTI_AES_KEY', '').encode()
BLTI_AES_IV = os.getenv('BLTI_AES_IV', '').encode()
DEBUG = True if os.getenv('ENV', 'localdev') == "localdev" else False
settings comment
from .base_settings import *
ALLOWED_HOSTS = ['*']
if os.getenv('AUTH', 'NONE') == 'SAML_MOCK':
MOCK_SAML_ATTRIBUTES = {
'uwnetid': ['jfaculty'],
'affiliations': ['faculty', 'employee', 'member'],
'eppn': ['jfacult@washington.edu'],
'scopedAffiliations': [
'employee@washington.edu', 'member@washington.edu'],
'isMemberOf': ['u_test_group', 'u_test_another_group',
'u_acadev_panopto_support'],
}
INSTALLED_APPS += [
'django_prometheus',
'compressor',
'django.contrib.humanize',
'userservice',
'scheduler.apps.SchedulerConfig',
'blti',
'supporttools',
]
MIDDLEWARE = ['django_prometheus.middleware.PrometheusBeforeMiddleware'] +\
MIDDLEWARE +\
['userservice.user.UserServiceMiddleware',
'django_prometheus.middleware.PrometheusAfterMiddleware']
if not os.getenv("ENV") == "localdev":
DATABASES['default']['ENGINE'] = 'django_prometheus.db.backends.postgresql'
COMPRESS_ENABLED = True
COMPRESS_OFFLINE = True
COMPRESS_ROOT = '/static/'
COMPRESS_PRECOMPILERS = (
('text/less', 'lessc {infile} {outfile}'),
)
STATICFILES_FINDERS += (
'compressor.finders.CompressorFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
COMPRESS_PRECOMPILERS += (
('text/x-sass', 'pyscss {infile} > {outfile}'),
('text/x-scss', 'pyscss {infile} > {outfile}'),
)
COMPRESS_CSS_FILTERS = [
'compressor.filters.css_default.CssAbsoluteFilter',
'compressor.filters.cssmin.CSSMinFilter'
]
COMPRESS_JS_FILTERS = [
'compressor.filters.jsmin.JSMinFilter',
]
USERSERVICE_VALIDATION_MODULE = "scheduler.userservice_validation.validate"
PANOPTO_ADMIN_GROUP = 'u_acadev_panopto_support'
RESTCLIENTS_ADMIN_GROUP = PANOPTO_ADMIN_GROUP
USERSERVICE_ADMIN_GROUP = PANOPTO_ADMIN_GROUP
AUTHZ_GROUP_BACKEND = 'authz_group.authz_implementation.uw_group_service.UWGroupService'
#if not os.getenv("ENV") == "localdev":
# INSTALLED_APPS += ['rc_django',]
# RESTCLIENTS_DAO_CACHE_CLASS = 'scheduler.cache.RestClientsCache'
RESTCLIENTS_DEFAULT_TIMEOUT = 3
SUPPORTTOOLS_PARENT_APP = 'Panopto'
#USERSERVICE_OVERRIDE_AUTH_MODULE = "scheduler.authorization.can_override_user"
#RESTCLIENTS_ADMIN_AUTH_MODULE = "scheduler.authorization.can_proxy_restclient"
DETECT_USER_AGENTS = {
'is_tablet': False,
'is_mobile': False,
'is_desktop': True,
}
if not os.getenv("ENV", "localdev") == "localdev":
PANOPTO_API_USER = os.getenv('PANOPTO_API_USER')
PANOPTO_API_APP_ID = os.getenv('PANOPTO_API_APP_ID')
PANOPTO_API_TOKEN = os.getenv('PANOPTO_API_TOKEN')
PANOPTO_SERVER = os.getenv('PANOPTO_SERVER')
# BLTI consumer key:secret pairs in env as "k1=val1,k2=val2"
LTI_CONSUMERS = {k: v for k, v in [s.split('=') for s in os.getenv(
"LTI_CONSUMERS", "").split(',') if len(s)]}
# BLTI session object encryption values
BLTI_AES_KEY = os.getenv('BLTI_AES_KEY', '').encode()
BLTI_AES_IV = os.getenv('BLTI_AES_IV', '').encode()
DEBUG = True if os.getenv('ENV', 'localdev') == "localdev" else False
|
#!/usr/bin/env python
# Author: Scott Haskell
# Company: Splunk Inc.
# Date: 2016-10-21
#
# The MIT License
#
# Copyright (c) 2016 Scott Haskell, Splunk Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from pykafka import KafkaClient
import httplib
from random import randint
from datetime import datetime
import re
def readFile(log_file):
fh = open(log_file, 'r')
lines = fh.readlines()
fh.close()
return(lines)
def main():
access_log_file = './access.log'
dt_fmt = '%d/%b/%Y:%H:%M:%S'
client = KafkaClient(hosts="172.17.0.2:9092,172.17.0.3:9093,172.17.0.4:9094")
topic = client.topics['nginx']
producer = topic.get_producer(use_rdkafka=True)
access_logs = readFile(access_log_file)
log_len = len(access_logs)
while(True):
try:
rando = randint(0, log_len-1)
rand_log = access_logs[rando]
dtnow = datetime.now()
new_time = dtnow.strftime(dt_fmt)
rand_log = re.sub(r'\[([\d\w:\/]+)', '[%s' % new_time, rand_log)
producer.produce(rand_log)
except Exception as e:
print "failed"
continue
if __name__ == '__main__':
main()
updating with global config variables
#!/usr/bin/env python
# Author: Scott Haskell
# Company: Splunk Inc.
# Date: 2016-10-21
#
# The MIT License
#
# Copyright (c) 2016 Scott Haskell, Splunk Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from pykafka import KafkaClient
import httplib
from random import randint
from datetime import datetime
import re
# Update with your topic
TOPIC = "nginx"
# Update with your list of Kafka brokers <host>:<port>
KAFKA_HOSTS = "172.17.0.2:9092,172.17.0.3:9093,172.17.0.4:9094"
# Use rdkafka if pykafka is built against librdkafka
USE_RDKAFKA = True
def readFile(log_file):
""" open file and return contents """
fh = open(log_file, 'r')
lines = fh.readlines()
fh.close()
return(lines)
def main():
""" Simple Kafka producer to publish random NGINX events from access.log for testing """
access_log_file = './access.log'
dt_fmt = '%d/%b/%Y:%H:%M:%S'
client = KafkaClient(hosts=KAFKA_HOSTS)
topic = client.topics[TOPIC]
producer = topic.get_producer(use_rdkafka=USE_RDKAFKA)
access_logs = readFile(access_log_file)
log_len = len(access_logs)
while(True):
try:
rando = randint(0, log_len-1)
rand_log = access_logs[rando]
dtnow = datetime.now()
new_time = dtnow.strftime(dt_fmt)
rand_log = re.sub(r'\[([\d\w:\/]+)', '[%s' % new_time, rand_log)
producer.produce(rand_log)
except Exception as e:
print "failed"
continue
if __name__ == '__main__':
main()
|
import logging
from wordpress import WPException
from .config import WPPluginConfig
class WPPolylangConfig(WPPluginConfig):
def configure(self, languages=None, default=None, **kwargs):
""" kwargs:
- `languages`, array: all languages to install
- `default`, string: default language should be in `languages`
If no default is provided, uses the first item of the array
"""
# validate input (we keep en_GB instead of en_us to get UK flag, in admin)
languages = languages or ["fr_FR", "en_GB"]
default = default or languages[0]
if default not in languages:
raise WPException("Default language {} not found in list of supported languages {}".format(
default, languages
))
# adding languages
for language in languages:
is_default = 1 if language == default else 0
command = "polylang language add {} {}".format(language, is_default)
if not self.run_wp_cli(command):
logging.warning("%s - could not install language %s", self.wp_site, language)
else:
logging.info("%s - installed language %s %s", self.wp_site, language, is_default and "[default]" or "")
# configure options
logging.info("%s - setting polylang options ...", self.wp_site)
self.run_wp_cli("pll option update media_support 0")
# create menus
logging.info("%s - creating polylang menu ...", self.wp_site)
self.run_wp_cli("pll menu create Main top")
self.run_wp_cli("pll menu create footer_nav footer_nav")
# configure raw plugin
super(WPPolylangConfig, self).configure()
Add taxonomy sync
import logging
from wordpress import WPException
from .config import WPPluginConfig
class WPPolylangConfig(WPPluginConfig):
def configure(self, languages=None, default=None, **kwargs):
""" kwargs:
- `languages`, array: all languages to install
- `default`, string: default language should be in `languages`
If no default is provided, uses the first item of the array
"""
# validate input (we keep en_GB instead of en_us to get UK flag, in admin)
languages = languages or ["fr_FR", "en_GB"]
default = default or languages[0]
if default not in languages:
raise WPException("Default language {} not found in list of supported languages {}".format(
default, languages
))
# adding languages
for language in languages:
is_default = 1 if language == default else 0
command = "polylang language add {} {}".format(language, is_default)
if not self.run_wp_cli(command):
logging.warning("%s - could not install language %s", self.wp_site, language)
else:
logging.info("%s - installed language %s %s", self.wp_site, language, is_default and "[default]" or "")
# configure options
logging.info("%s - setting polylang options ...", self.wp_site)
self.run_wp_cli("pll option update media_support 0")
# Configure sync option
logging.info("%s - configuring option sync ...", self.wp_site)
self.run_wp_cli("pll option sync taxonomies")
# create menus
logging.info("%s - creating polylang menu ...", self.wp_site)
self.run_wp_cli("pll menu create Main top")
self.run_wp_cli("pll menu create footer_nav footer_nav")
# configure raw plugin
super(WPPolylangConfig, self).configure()
|
import os
from mimetypes import guess_type
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.files.storage import Storage, StorageFile
from django.utils.functional import curry
ACCESS_KEY_NAME = 'AWS_ACCESS_KEY_ID'
SECRET_KEY_NAME = 'AWS_SECRET_ACCESS_KEY'
AWS_HEADERS = 'AWS_HEADERS'
try:
from S3 import AWSAuthConnection, QueryStringAuthGenerator
except ImportError:
raise ImproperlyConfigured, "Could not load amazon's S3 bindings.\
\nSee http://developer.amazonwebservices.com/connect/entry.jspa?externalID=134"
class S3Storage(Storage):
"""Amazon Simple Storage Service"""
def __init__(self, bucket=settings.AWS_STORAGE_BUCKET_NAME,
access_key=None, secret_key=None, acl='public-read',
calling_format=settings.AWS_CALLING_FORMAT):
self.bucket = bucket
self.acl = acl
if not access_key and not secret_key:
access_key, secret_key = self._get_access_keys()
self.connection = AWSAuthConnection(access_key, secret_key,
calling_format=calling_format)
self.generator = QueryStringAuthGenerator(access_key, secret_key,
calling_format=calling_format, is_secure=False)
self.headers = getattr(settings, AWS_HEADERS, {})
def _get_access_keys(self):
access_key = getattr(settings, ACCESS_KEY_NAME, None)
secret_key = getattr(settings, SECRET_KEY_NAME, None)
if (access_key or secret_key) and (not access_key or not secret_key):
access_key = os.environ.get(ACCESS_KEY_NAME)
secret_key = os.environ.get(SECRET_KEY_NAME)
if access_key and secret_key:
# Both were provided, so use them
return access_key, secret_key
return None, None
def _get_connection(self):
return AWSAuthConnection(*self._get_access_keys())
def _put_file(self, name, content):
content_type = guess_type(name)[0] or "application/x-octet-stream"
self.headers.update({'x-amz-acl': self.acl, 'Content-Type': content_type})
response = self.connection.put(self.bucket, name, content, self.headers)
def _open(self, name, mode='rb'):
response = self.connection.get(self.bucket, name)
writer = curry(self._put_file, name)
#print response.object.data
remote_file = S3StorageFile(response.object.data, mode, writer)
remote_file.size = self.size(name)
return remote_file
def _save(self, name, content):
self._put_file(self.url(name), content.open())
def delete(self, name):
self.connection.delete(self.bucket, name)
def exists(self, name):
response = self.connection._make_request('HEAD', self.bucket, name)
return response.status == 200
def size(self, name):
response = self.connection._make_request('HEAD', self.bucket, name)
content_length = response.getheader('Content-Length')
return content_length and int(content_length) or 0
def url(self, name):
return self.generator.make_bare_url(self.bucket, name)
## UNCOMMENT BELOW IF NECESSARY
#def get_available_name(self, name):
# """ Overwrite existing file with the same name. """
# return name
class S3StorageFile(StorageFile):
def __init__(self, data, mode, writer):
self._mode = mode
self._write_to_storage = writer
self._is_dirty = False
self.file = StringIO(data)
def read(self, num_bytes=None):
return self.file.getvalue()
def write(self, content):
if 'w' not in self._mode:
raise AttributeError("File was opened for read-only access.")
self.file = StringIO(content)
self._is_dirty = True
def close(self):
if self._is_dirty:
self._write_to_storage(self.file.getvalue())
self.file.close()
S3: sync imports with the merged patch in django trunk, yay!
import os
from mimetypes import guess_type
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.files.base import File
from django.core.files.storage import Storage
from django.utils.functional import curry
ACCESS_KEY_NAME = 'AWS_ACCESS_KEY_ID'
SECRET_KEY_NAME = 'AWS_SECRET_ACCESS_KEY'
AWS_HEADERS = 'AWS_HEADERS'
try:
from S3 import AWSAuthConnection, QueryStringAuthGenerator
except ImportError:
raise ImproperlyConfigured, "Could not load amazon's S3 bindings.\
\nSee http://developer.amazonwebservices.com/connect/entry.jspa?externalID=134"
class S3Storage(Storage):
"""Amazon Simple Storage Service"""
def __init__(self, bucket=settings.AWS_STORAGE_BUCKET_NAME,
access_key=None, secret_key=None, acl='public-read',
calling_format=settings.AWS_CALLING_FORMAT):
self.bucket = bucket
self.acl = acl
if not access_key and not secret_key:
access_key, secret_key = self._get_access_keys()
self.connection = AWSAuthConnection(access_key, secret_key,
calling_format=calling_format)
self.generator = QueryStringAuthGenerator(access_key, secret_key,
calling_format=calling_format, is_secure=False)
self.headers = getattr(settings, AWS_HEADERS, {})
def _get_access_keys(self):
access_key = getattr(settings, ACCESS_KEY_NAME, None)
secret_key = getattr(settings, SECRET_KEY_NAME, None)
if (access_key or secret_key) and (not access_key or not secret_key):
access_key = os.environ.get(ACCESS_KEY_NAME)
secret_key = os.environ.get(SECRET_KEY_NAME)
if access_key and secret_key:
# Both were provided, so use them
return access_key, secret_key
return None, None
def _get_connection(self):
return AWSAuthConnection(*self._get_access_keys())
def _put_file(self, name, content):
content_type = guess_type(name)[0] or "application/x-octet-stream"
self.headers.update({'x-amz-acl': self.acl, 'Content-Type': content_type})
response = self.connection.put(self.bucket, name, content, self.headers)
def _open(self, name, mode='rb'):
response = self.connection.get(self.bucket, name)
writer = curry(self._put_file, name)
#print response.object.data
remote_file = S3StorageFile(response.object.data, mode, writer)
remote_file.size = self.size(name)
return remote_file
def _save(self, name, content):
self._put_file(self.url(name), content.open())
def delete(self, name):
self.connection.delete(self.bucket, name)
def exists(self, name):
response = self.connection._make_request('HEAD', self.bucket, name)
return response.status == 200
def size(self, name):
response = self.connection._make_request('HEAD', self.bucket, name)
content_length = response.getheader('Content-Length')
return content_length and int(content_length) or 0
def url(self, name):
return self.generator.make_bare_url(self.bucket, name)
## UNCOMMENT BELOW IF NECESSARY
#def get_available_name(self, name):
# """ Overwrite existing file with the same name. """
# return name
class S3StorageFile(File):
def __init__(self, data, mode, writer):
self._mode = mode
self._write_to_storage = writer
self._is_dirty = False
self.file = StringIO(data)
def read(self, num_bytes=None):
return self.file.getvalue()
def write(self, content):
if 'w' not in self._mode:
raise AttributeError("File was opened for read-only access.")
self.file = StringIO(content)
self._is_dirty = True
def close(self):
if self._is_dirty:
self._write_to_storage(self.file.getvalue())
self.file.close()
|
import re
import ipaddress
class VLAN(object):
"""
Represents a 802.1Q VLAN.
"""
def __init__(self, switch, vid):
"""
Constructs a new VLAN and retrieves its attributes from the given `switch` by using the given VID.
"""
self.vid = vid
self.switch = switch
def _get_name(self):
"""
The name configured for the VLAN.
"""
run_output = self.switch.execute_command("show run vlan " + str(self.vid))
# Try to extract the VLAN name, which may also contain spaces. This is achieved by greedily matching whitespace
# at the end of the line and matching the ` name ` prefix a the beginning and using whatever remains of the
# string as the VLAN name. The `name` group is matched in a non-greedy fashion as to not "eat up" all the
# following whitespace which is not part of the name.
name_match = re.search(r"^ name \"(?P<name>.*?)\"\s*$", run_output, re.MULTILINE)
return name_match.group('name')
def _set_name(self, value):
# Make sure that the name is legal according to the allowed VLAN names detailed in section 1-40 of the HP
# Advanced Traffic Management Guide
assert(all(map(lambda illegal_char: illegal_char not in value, "\"\'@#$^&*")))
# Issue the commands on the switch to set the new name.
self.switch.execute_command("config")
# Pass the name to the switch wrapped in quotes because the name could contain spaces.
self.switch.execute_command("vlan {0} name \"{1}\"".format(self.vid, value))
self.switch.execute_command("exit")
# Update the internally-cached attribute with the newly-set value.
self._name = value
name = property(_get_name, _set_name)
def _get_ipv4_interfaces(self):
"""
Get the IPv4 addresses configured, together with their netmasks called "interfaces" configured on this VLAN.
"""
run_output = self.switch.execute_command("show run vlan " + str(self.vid))
ipv4_address_matches = re.finditer(
r"^ ip address " \
# Match the IPv4 address consisting of 4 groups of up to 4 digits
"(?P<address>(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}))" \
" " \
# Match the IPv4 netmask consisting of 4 groups of up to 4 digits
"(?P<netmask>(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}))" \
"\s*$",
run_output, re.MULTILINE)
interfaces = []
for match in ipv4_address_matches:
interfaces.append(ipaddress.IPv4Interface(match.group('address') + '/' + match.group('netmask')))
return interfaces
ipv4_interfaces = property(_get_ipv4_interfaces)
def add_ipv4_interface(self, interface):
"""
Add the given IPv4 interface to the VLAN.
"""
self.switch.execute_command('config')
add_output = self.switch.execute_command('vlan {0} ip address {1}'.format(self.vid, interface.with_prefixlen))
self.switch.execute_command('exit')
# HP switches seem to be somewhat picky about the IPv4 addresses they like to assign to interfaces. For example,
# running `vlan 1001 ip address 192.168.1.1/32` results in the output `192.168.1.1/32: bad IP address.`.
# Therefore, we try to catch the worst things that could happen here.
if "bad IP address" in add_output:
raise Exception("IPv4 address {0} deemed \"bad\" by switch.".format(interface.with_prefixlen))
# Check if configuring the interface failed because the interface that we thought would not yet be configured on
# this VLAN was already configured on the switch.
if add_output == "The IP address (or subnet) {0} already exists.".format(interface.with_prefixlen):
raise Exception("The IPv4 interface {0} could not be configured because it was already configured for " \
"this VLAN.".format(interface.with_prefixlen))
def remove_ipv4_interface(self, interface):
"""
Remove the given IPv4 interface from the VLAN.
"""
self.switch.execute_command('config')
remove_output = self.switch.execute_command('no vlan {0} ip address {1}'.format(self.vid, interface.with_prefixlen))
self.switch.execute_command('exit')
# Check if the interface that we thought would be configured on this VLAN was successfully removed or if it
# didn't even exist and our `ipv4_interfaces` list was inconsistent.
if remove_output == "The IP address {0} is not configured on this VLAN.".format(interface.with_prefixlen):
raise Exception("The IPv4 interface {0} could not be removed because it is not configured on this " \
"VLAN.".format(interface.with_prefixlen))
Add better type checking when adding IPv4 addresses
import re
import ipaddress
class VLAN(object):
"""
Represents a 802.1Q VLAN.
"""
def __init__(self, switch, vid):
"""
Constructs a new VLAN and retrieves its attributes from the given `switch` by using the given VID.
"""
self.vid = vid
self.switch = switch
def _get_name(self):
"""
The name configured for the VLAN.
"""
run_output = self.switch.execute_command("show run vlan " + str(self.vid))
# Try to extract the VLAN name, which may also contain spaces. This is achieved by greedily matching whitespace
# at the end of the line and matching the ` name ` prefix a the beginning and using whatever remains of the
# string as the VLAN name. The `name` group is matched in a non-greedy fashion as to not "eat up" all the
# following whitespace which is not part of the name.
name_match = re.search(r"^ name \"(?P<name>.*?)\"\s*$", run_output, re.MULTILINE)
return name_match.group('name')
def _set_name(self, value):
# Make sure that the name is legal according to the allowed VLAN names detailed in section 1-40 of the HP
# Advanced Traffic Management Guide
assert(all(map(lambda illegal_char: illegal_char not in value, "\"\'@#$^&*")))
# Issue the commands on the switch to set the new name.
self.switch.execute_command("config")
# Pass the name to the switch wrapped in quotes because the name could contain spaces.
self.switch.execute_command("vlan {0} name \"{1}\"".format(self.vid, value))
self.switch.execute_command("exit")
# Update the internally-cached attribute with the newly-set value.
self._name = value
name = property(_get_name, _set_name)
def _get_ipv4_interfaces(self):
"""
Get the IPv4 addresses configured, together with their netmasks called "interfaces" configured on this VLAN.
"""
run_output = self.switch.execute_command("show run vlan " + str(self.vid))
ipv4_address_matches = re.finditer(
r"^ ip address " \
# Match the IPv4 address consisting of 4 groups of up to 4 digits
"(?P<address>(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}))" \
" " \
# Match the IPv4 netmask consisting of 4 groups of up to 4 digits
"(?P<netmask>(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}))" \
"\s*$",
run_output, re.MULTILINE)
interfaces = []
for match in ipv4_address_matches:
interfaces.append(ipaddress.IPv4Interface(match.group('address') + '/' + match.group('netmask')))
return interfaces
ipv4_interfaces = property(_get_ipv4_interfaces)
def add_ipv4_interface(self, interface):
"""
Add the given IPv4 interface to the VLAN.
"""
if type(interface) is not ipaddress.IPv4Interface:
raise TypeError("The given interface to configure is not an ipaddress.IPv4Interface.")
self.switch.execute_command('config')
add_output = self.switch.execute_command('vlan {0} ip address {1}'.format(self.vid, interface.with_prefixlen))
self.switch.execute_command('exit')
# HP switches seem to be somewhat picky about the IPv4 addresses they like to assign to interfaces. For example,
# running `vlan 1001 ip address 192.168.1.1/32` results in the output `192.168.1.1/32: bad IP address.`.
# Therefore, we try to catch the worst things that could happen here.
if "bad IP address" in add_output:
raise Exception("IPv4 address {0} deemed \"bad\" by switch.".format(interface.with_prefixlen))
# Check if configuring the interface failed because the interface that we thought would not yet be configured on
# this VLAN was already configured on the switch.
if add_output == "The IP address (or subnet) {0} already exists.".format(interface.with_prefixlen):
raise Exception("The IPv4 interface {0} could not be configured because it was already configured for " \
"this VLAN.".format(interface.with_prefixlen))
def remove_ipv4_interface(self, interface):
"""
Remove the given IPv4 interface from the VLAN.
"""
if type(interface) is not ipaddress.IPv4Interface:
raise TypeError("The given interface to remove is not an ipaddress.IPv4Interface.")
self.switch.execute_command('config')
remove_output = self.switch.execute_command('no vlan {0} ip address {1}'.format(self.vid, interface.with_prefixlen))
self.switch.execute_command('exit')
# Check if the interface that we thought would be configured on this VLAN was successfully removed or if it
# didn't even exist and our `ipv4_interfaces` list was inconsistent.
if remove_output == "The IP address {0} is not configured on this VLAN.".format(interface.with_prefixlen):
raise Exception("The IPv4 interface {0} could not be removed because it is not configured on this " \
"VLAN.".format(interface.with_prefixlen))
|
#!/usr/bin/env python
"""This file starts the game"""
import pygame
from pgu import gui, timer
class DrawingArea(gui.Widget):
def __init__(self, width, height):
gui.Widget.__init__(self, width=width, height=height)
self.imageBuffer = pygame.Surface((width, height))
def paint(self, surf):
# Paint whatever has been captured in the buffer
surf.blit(self.imageBuffer, (0, 0))
# Call self function to take a snapshot of whatever has been rendered
# onto the display over self widget.
def save_background(self):
disp = pygame.display.get_surface()
self.imageBuffer.blit(disp, self.get_abs_rect())
class TestDialog(gui.Dialog):
def __init__(self):
title = gui.Label("Some Dialog Box")
label = gui.Label("Close self window to resume.")
gui.Dialog.__init__(self, title, label)
"""It describes all the buttons and stuff like that. This is
where pgu comes in,"""
class MainGui(gui.Desktop):
gameAreaHeight = 500
gameArea = None
menuArea = None
# The game engine
engine = None
def __init__(self, disp):
gui.Desktop.__init__(self)
container = gui.Container()
# Setup the 'game' area where the action takes place
self.gameArea = DrawingArea(disp.get_width(),
self.gameAreaHeight)
# Setup the gui area
self.menuArea = gui.Container(
height=disp.get_height()-self.gameAreaHeight)
tbl = gui.Table(height=disp.get_height())
tbl.tr()
tbl.td(self.gameArea)
tbl.tr()
tbl.td(self.menuArea)
#self.setup_menu(container)
import human_player
human_player.App(self.menuArea)
container.add(tbl,0,0)
self.init(container, disp)
def setup_menu(self):
tbl = gui.Table(vpadding=5, hpadding=2)
tbl.tr()
dlg = TestDialog()
def dialog_cb():
dlg.open()
btn = gui.Button("Modal dialog", height=50)
btn.connect(gui.CLICK, dialog_cb)
tbl.td(btn)
# Add a button for pausing / resuming the game clock
def pause_cb():
if (self.engine.clock.paused):
self.engine.resume()
else:
self.engine.pause()
btn = gui.Button("Pause/resume clock", height=50)
btn.connect(gui.CLICK, pause_cb)
tbl.td(btn)
# Add a slider for adjusting the game clock speed
tbl2 = gui.Table()
timeLabel = gui.Label("Clock speed")
tbl2.tr()
tbl2.td(timeLabel)
slider = gui.HSlider(value=23,min=0,max=100,size=20,height=16,width=120)
def update_speed():
self.engine.clock.set_speed(slider.value/10.0)
slider.connect(gui.CHANGE, update_speed)
tbl2.tr()
tbl2.td(slider)
tbl.td(tbl2)
self.menuArea.add(tbl, 0, 0)
def open(self, dlg, pos=None):
# Gray out the game area before showing the popup
rect = self.gameArea.get_abs_rect()
dark = pygame.Surface(rect.size).convert_alpha()
dark.fill((0,0,0,150))
pygame.display.get_surface().blit(dark, rect)
# Save whatever has been rendered to the 'game area' so we can
# render it as a static image while the dialog is open.
self.gameArea.save_background()
# Pause the gameplay while the dialog is visible
running = not(self.engine.clock.paused)
self.engine.pause()
gui.Desktop.open(self, dlg, pos)
while (dlg.is_open()):
for ev in pygame.event.get():
self.event(ev)
rects = self.update()
if (rects):
pygame.display.update(rects)
if (running):
# Resume gameplay
self.engine.resume()
def get_render_area(self):
return self.gameArea.get_abs_rect()
"""In our main drawing are we don't want to use pgu because
its event driven, so you can't do any movement (since you need
events to move, so you'll get stupid stuff like only movement
on mouse move). The game engine punches a hole in the pgu
interface and keeps updating that hole."""
class GameEngine(object):
def __init__(self, disp):
self.disp = disp
self.app = MainGui(self.disp)
self.app.engine = self
from strings import Logo
self.logo = pygame.transform.scale(pygame.image.load(Logo.game), (200,200))
self.ballrect = self.logo.get_rect()
self.speed = [1, 2]
from board import Renderer
from collections import namedtuple
Size = namedtuple('Size', ['width', 'height'])
# construct factory, which gets used to (eventually) create the game manager
from factory import Factory
factory = Factory('config.json')
pcs = factory.makePlayerControllers()
state = factory.makeState(pcs)
self.gameManager = factory.makeGameManager(state)
# create board
self.renderBoard = Renderer(Size(
self.app.gameArea.rect.width,
self.app.gameArea.rect.height
)).render # a function
# Pause the game clock
def pause(self):
self.clock.pause()
# Resume the game clock
def resume(self):
self.clock.resume()
def render(self, dest, rect):
size = width, height = rect.width, rect.height
self.ballrect = self.ballrect.move(self.speed)
if self.ballrect.left < 0 or self.ballrect.right > width:
self.speed[0] = -self.speed[0]
if self.ballrect.top < 0 or self.ballrect.bottom > height:
self.speed[1] = -self.speed[1]
backgroundColor = 0, 0, 255 # which is blue
dest.fill(backgroundColor)
# YOU JUST GOT RECT!
dest.blit(self.logo, self.ballrect)
import math
def font(text, position, color=(255,255,255)):
tmp = self.font.render(text, True, color)
dest.blit(tmp, position)
def draw_clock(name, pt, radius, col, angle):
pygame.draw.circle(dest, col, pt, radius)
pygame.draw.line(dest, (0,0,0), pt,
(pt[0]+radius*math.cos(angle),
pt[1]+radius*math.sin(angle)), 2)
font(name,(pt[0]-radius, pt[1]+radius+5))
# Draw the real time clock
angle = self.clock.get_real_time()*2*math.pi/10.0
draw_clock("Real time", (30,30), 25, (255,200,100), angle)
# Now draw the game clock
angle = self.clock.get_time()*2*math.pi/10.0
draw_clock("Game time", (90,30), 25, (255,100,255), angle)
self.gameManager.update()
self.renderBoard(font, disp)
return (rect,)
def run(self):
self.app.update()
pygame.display.flip()
self.font = pygame.font.SysFont("", 16)
self.clock = timer.Clock() #pygame.time.Clock()
done = False
while not done:
# Process events
for ev in pygame.event.get():
if (ev.type == pygame.QUIT or
ev.type == pygame.KEYDOWN and ev.key == pygame.K_ESCAPE):
done = True
else:
# Pass the event off to pgu
self.app.event(ev)
# Render the game
rect = self.app.get_render_area()
updates = []
self.disp.set_clip(rect)
lst = self.render(self.disp, rect)
if (lst):
updates += lst
self.disp.set_clip()
# Cap it at 30fps
self.clock.tick(30)
# Give pgu a chance to update the display
lst = self.app.update()
if (lst):
updates += lst
pygame.display.update(updates)
pygame.time.wait(10)
###
disp = pygame.display.set_mode((1366, 768))
eng = GameEngine(disp)
eng.run()
Move the logo and clocks
#!/usr/bin/env python
"""This file starts the game"""
import pygame
from pgu import gui, timer
class DrawingArea(gui.Widget):
def __init__(self, width, height):
gui.Widget.__init__(self, width=width, height=height)
self.imageBuffer = pygame.Surface((width, height))
def paint(self, surf):
# Paint whatever has been captured in the buffer
surf.blit(self.imageBuffer, (0, 0))
# Call self function to take a snapshot of whatever has been rendered
# onto the display over self widget.
def save_background(self):
disp = pygame.display.get_surface()
self.imageBuffer.blit(disp, self.get_abs_rect())
class TestDialog(gui.Dialog):
def __init__(self):
title = gui.Label("Some Dialog Box")
label = gui.Label("Close self window to resume.")
gui.Dialog.__init__(self, title, label)
"""It describes all the buttons and stuff like that. This is
where pgu comes in,"""
class MainGui(gui.Desktop):
gameAreaHeight = 500
gameArea = None
menuArea = None
# The game engine
engine = None
def __init__(self, disp):
gui.Desktop.__init__(self)
container = gui.Container()
# Setup the 'game' area where the action takes place
self.gameArea = DrawingArea(disp.get_width(),
self.gameAreaHeight)
# Setup the gui area
self.menuArea = gui.Container(
height=disp.get_height()-self.gameAreaHeight)
tbl = gui.Table(height=disp.get_height())
tbl.tr()
tbl.td(self.gameArea)
tbl.tr()
tbl.td(self.menuArea)
#self.setup_menu(container)
import human_player
human_player.App(self.menuArea)
container.add(tbl,0,0)
self.init(container, disp)
def setup_menu(self):
tbl = gui.Table(vpadding=5, hpadding=2)
tbl.tr()
dlg = TestDialog()
def dialog_cb():
dlg.open()
btn = gui.Button("Modal dialog", height=50)
btn.connect(gui.CLICK, dialog_cb)
tbl.td(btn)
# Add a button for pausing / resuming the game clock
def pause_cb():
if (self.engine.clock.paused):
self.engine.resume()
else:
self.engine.pause()
btn = gui.Button("Pause/resume clock", height=50)
btn.connect(gui.CLICK, pause_cb)
tbl.td(btn)
# Add a slider for adjusting the game clock speed
tbl2 = gui.Table()
timeLabel = gui.Label("Clock speed")
tbl2.tr()
tbl2.td(timeLabel)
slider = gui.HSlider(value=23,min=0,max=100,size=20,height=16,width=120)
def update_speed():
self.engine.clock.set_speed(slider.value/10.0)
slider.connect(gui.CHANGE, update_speed)
tbl2.tr()
tbl2.td(slider)
tbl.td(tbl2)
self.menuArea.add(tbl, 0, 0)
def open(self, dlg, pos=None):
# Gray out the game area before showing the popup
rect = self.gameArea.get_abs_rect()
dark = pygame.Surface(rect.size).convert_alpha()
dark.fill((0,0,0,150))
pygame.display.get_surface().blit(dark, rect)
# Save whatever has been rendered to the 'game area' so we can
# render it as a static image while the dialog is open.
self.gameArea.save_background()
# Pause the gameplay while the dialog is visible
running = not(self.engine.clock.paused)
self.engine.pause()
gui.Desktop.open(self, dlg, pos)
while (dlg.is_open()):
for ev in pygame.event.get():
self.event(ev)
rects = self.update()
if (rects):
pygame.display.update(rects)
if (running):
# Resume gameplay
self.engine.resume()
def get_render_area(self):
return self.gameArea.get_abs_rect()
"""In our main drawing are we don't want to use pgu because
its event driven, so you can't do any movement (since you need
events to move, so you'll get stupid stuff like only movement
on mouse move). The game engine punches a hole in the pgu
interface and keeps updating that hole."""
class GameEngine(object):
def __init__(self, disp):
self.disp = disp
self.app = MainGui(self.disp)
self.app.engine = self
self.speed = [1, 2]
from board import Renderer
from collections import namedtuple
Size = namedtuple('Size', ['width', 'height'])
# construct factory, which gets used to (eventually) create the game manager
from factory import Factory
factory = Factory('config.json')
pcs = factory.makePlayerControllers()
state = factory.makeState(pcs)
self.gameManager = factory.makeGameManager(state)
# create board
self.renderBoard = Renderer(Size(
self.app.gameArea.rect.width,
self.app.gameArea.rect.height
)).render # a function
# Pause the game clock
def pause(self):
self.clock.pause()
# Resume the game clock
def resume(self):
self.clock.resume()
def render(self, dest, rect):
size = width, height = rect.width, rect.height
backgroundColor = 0, 0, 255 # which is blue
dest.fill(backgroundColor)
import math
def font(text, position, color=(255,255,255)):
tmp = self.font.render(text, True, color)
dest.blit(tmp, position)
self.gameManager.update()
self.renderBoard(font, disp)
return (rect,)
def run(self):
self.app.update()
pygame.display.flip()
self.font = pygame.font.SysFont("", 16)
self.clock = timer.Clock() #pygame.time.Clock()
done = False
while not done:
# Process events
for ev in pygame.event.get():
if (ev.type == pygame.QUIT or
ev.type == pygame.KEYDOWN and ev.key == pygame.K_ESCAPE):
done = True
else:
# Pass the event off to pgu
self.app.event(ev)
# Render the game
rect = self.app.get_render_area()
updates = []
self.disp.set_clip(rect)
lst = self.render(self.disp, rect)
if (lst):
updates += lst
self.disp.set_clip()
# Cap it at 30fps
self.clock.tick(30)
# Give pgu a chance to update the display
lst = self.app.update()
if (lst):
updates += lst
pygame.display.update(updates)
pygame.time.wait(10)
###
disp = pygame.display.set_mode((1366, 768))
eng = GameEngine(disp)
eng.run()
|
# We import importlib *ASAP* in order to test #15386
import importlib
import builtins
import imp
from importlib.test.import_ import test_suite as importlib_import_test_suite
from importlib.test.import_ import util as importlib_util
import marshal
import os
import platform
import py_compile
import random
import stat
import sys
import unittest
import textwrap
import errno
import shutil
import test.support
from test.support import (
EnvironmentVarGuard, TESTFN, check_warnings, forget, is_jython,
make_legacy_pyc, rmtree, run_unittest, swap_attr, swap_item, temp_umask,
unlink, unload, create_empty_file, cpython_only)
from test import script_helper
def remove_files(name):
for f in (name + ".py",
name + ".pyc",
name + ".pyo",
name + ".pyw",
name + "$py.class"):
unlink(f)
rmtree('__pycache__')
class ImportTests(unittest.TestCase):
def setUp(self):
remove_files(TESTFN)
importlib.invalidate_caches()
def tearDown(self):
unload(TESTFN)
setUp = tearDown
def test_case_sensitivity(self):
# Brief digression to test that import is case-sensitive: if we got
# this far, we know for sure that "random" exists.
with self.assertRaises(ImportError):
import RAnDoM
def test_double_const(self):
# Another brief digression to test the accuracy of manifest float
# constants.
from test import double_const # don't blink -- that *was* the test
def test_import(self):
def test_with_extension(ext):
# The extension is normally ".py", perhaps ".pyw".
source = TESTFN + ext
pyo = TESTFN + ".pyo"
if is_jython:
pyc = TESTFN + "$py.class"
else:
pyc = TESTFN + ".pyc"
with open(source, "w") as f:
print("# This tests Python's ability to import a",
ext, "file.", file=f)
a = random.randrange(1000)
b = random.randrange(1000)
print("a =", a, file=f)
print("b =", b, file=f)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
importlib.invalidate_caches()
try:
try:
mod = __import__(TESTFN)
except ImportError as err:
self.fail("import from %s failed: %s" % (ext, err))
self.assertEqual(mod.a, a,
"module loaded (%s) but contents invalid" % mod)
self.assertEqual(mod.b, b,
"module loaded (%s) but contents invalid" % mod)
finally:
forget(TESTFN)
unlink(source)
unlink(pyc)
unlink(pyo)
sys.path.insert(0, os.curdir)
try:
test_with_extension(".py")
if sys.platform.startswith("win"):
for ext in [".PY", ".Py", ".pY", ".pyw", ".PYW", ".pYw"]:
test_with_extension(ext)
finally:
del sys.path[0]
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_creation_mode(self):
mask = 0o022
with temp_umask(mask):
sys.path.insert(0, os.curdir)
try:
fname = TESTFN + os.extsep + "py"
create_empty_file(fname)
fn = imp.cache_from_source(fname)
unlink(fn)
importlib.invalidate_caches()
__import__(TESTFN)
if not os.path.exists(fn):
self.fail("__import__ did not result in creation of "
"either a .pyc or .pyo file")
s = os.stat(fn)
# Check that the umask is respected, and the executable bits
# aren't set.
self.assertEqual(stat.S_IMODE(s.st_mode), 0o666 & ~mask)
finally:
del sys.path[0]
remove_files(TESTFN)
unload(TESTFN)
def test_imp_module(self):
# Verify that the imp module can correctly load and find .py files
# XXX (ncoghlan): It would be nice to use support.CleanImport
# here, but that breaks because the os module registers some
# handlers in copy_reg on import. Since CleanImport doesn't
# revert that registration, the module is left in a broken
# state after reversion. Reinitialising the module contents
# and just reverting os.environ to its previous state is an OK
# workaround
orig_path = os.path
orig_getenv = os.getenv
with EnvironmentVarGuard():
x = imp.find_module("os")
self.addCleanup(x[0].close)
new_os = imp.load_module("os", *x)
self.assertIs(os, new_os)
self.assertIs(orig_path, new_os.path)
self.assertIsNot(orig_getenv, new_os.getenv)
def test_bug7732(self):
source = TESTFN + '.py'
os.mkdir(source)
try:
self.assertRaisesRegex(ImportError, '^No module',
imp.find_module, TESTFN, ["."])
finally:
os.rmdir(source)
def test_module_with_large_stack(self, module='longlist'):
# Regression test for http://bugs.python.org/issue561858.
filename = module + '.py'
# Create a file with a list of 65000 elements.
with open(filename, 'w') as f:
f.write('d = [\n')
for i in range(65000):
f.write('"",\n')
f.write(']')
try:
# Compile & remove .py file; we only need .pyc (or .pyo).
# Bytecode must be relocated from the PEP 3147 bytecode-only location.
py_compile.compile(filename)
finally:
unlink(filename)
# Need to be able to load from current dir.
sys.path.append('')
importlib.invalidate_caches()
try:
make_legacy_pyc(filename)
# This used to crash.
exec('import ' + module)
finally:
# Cleanup.
del sys.path[-1]
unlink(filename + 'c')
unlink(filename + 'o')
def test_failing_import_sticks(self):
source = TESTFN + ".py"
with open(source, "w") as f:
print("a = 1/0", file=f)
# New in 2.4, we shouldn't be able to import that no matter how often
# we try.
sys.path.insert(0, os.curdir)
importlib.invalidate_caches()
if TESTFN in sys.modules:
del sys.modules[TESTFN]
try:
for i in [1, 2, 3]:
self.assertRaises(ZeroDivisionError, __import__, TESTFN)
self.assertNotIn(TESTFN, sys.modules,
"damaged module in sys.modules on %i try" % i)
finally:
del sys.path[0]
remove_files(TESTFN)
def test_import_name_binding(self):
# import x.y.z binds x in the current namespace
import test as x
import test.support
self.assertTrue(x is test, x.__name__)
self.assertTrue(hasattr(test.support, "__file__"))
# import x.y.z as w binds z as w
import test.support as y
self.assertTrue(y is test.support, y.__name__)
def test_failing_reload(self):
# A failing reload should leave the module object in sys.modules.
source = TESTFN + os.extsep + "py"
with open(source, "w") as f:
f.write("a = 1\nb=2\n")
sys.path.insert(0, os.curdir)
try:
mod = __import__(TESTFN)
self.assertIn(TESTFN, sys.modules)
self.assertEqual(mod.a, 1, "module has wrong attribute values")
self.assertEqual(mod.b, 2, "module has wrong attribute values")
# On WinXP, just replacing the .py file wasn't enough to
# convince reload() to reparse it. Maybe the timestamp didn't
# move enough. We force it to get reparsed by removing the
# compiled file too.
remove_files(TESTFN)
# Now damage the module.
with open(source, "w") as f:
f.write("a = 10\nb=20//0\n")
self.assertRaises(ZeroDivisionError, imp.reload, mod)
# But we still expect the module to be in sys.modules.
mod = sys.modules.get(TESTFN)
self.assertIsNot(mod, None, "expected module to be in sys.modules")
# We should have replaced a w/ 10, but the old b value should
# stick.
self.assertEqual(mod.a, 10, "module has wrong attribute values")
self.assertEqual(mod.b, 2, "module has wrong attribute values")
finally:
del sys.path[0]
remove_files(TESTFN)
unload(TESTFN)
def test_file_to_source(self):
# check if __file__ points to the source file where available
source = TESTFN + ".py"
with open(source, "w") as f:
f.write("test = None\n")
sys.path.insert(0, os.curdir)
try:
mod = __import__(TESTFN)
self.assertTrue(mod.__file__.endswith('.py'))
os.remove(source)
del sys.modules[TESTFN]
make_legacy_pyc(source)
importlib.invalidate_caches()
mod = __import__(TESTFN)
base, ext = os.path.splitext(mod.__file__)
self.assertIn(ext, ('.pyc', '.pyo'))
finally:
del sys.path[0]
remove_files(TESTFN)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
def test_import_name_binding(self):
# import x.y.z binds x in the current namespace.
import test as x
import test.support
self.assertIs(x, test, x.__name__)
self.assertTrue(hasattr(test.support, "__file__"))
# import x.y.z as w binds z as w.
import test.support as y
self.assertIs(y, test.support, y.__name__)
def test_import_by_filename(self):
path = os.path.abspath(TESTFN)
encoding = sys.getfilesystemencoding()
try:
path.encode(encoding)
except UnicodeEncodeError:
self.skipTest('path is not encodable to {}'.format(encoding))
with self.assertRaises(ImportError) as c:
__import__(path)
def test_import_in_del_does_not_crash(self):
# Issue 4236
testfn = script_helper.make_script('', TESTFN, textwrap.dedent("""\
import sys
class C:
def __del__(self):
import imp
sys.argv.insert(0, C())
"""))
script_helper.assert_python_ok(testfn)
def test_timestamp_overflow(self):
# A modification timestamp larger than 2**32 should not be a problem
# when importing a module (issue #11235).
sys.path.insert(0, os.curdir)
try:
source = TESTFN + ".py"
compiled = imp.cache_from_source(source)
with open(source, 'w') as f:
pass
try:
os.utime(source, (2 ** 33 - 5, 2 ** 33 - 5))
except OverflowError:
self.skipTest("cannot set modification time to large integer")
except OSError as e:
if e.errno != getattr(errno, 'EOVERFLOW', None):
raise
self.skipTest("cannot set modification time to large integer ({})".format(e))
__import__(TESTFN)
# The pyc file was created.
os.stat(compiled)
finally:
del sys.path[0]
remove_files(TESTFN)
class PycRewritingTests(unittest.TestCase):
# Test that the `co_filename` attribute on code objects always points
# to the right file, even when various things happen (e.g. both the .py
# and the .pyc file are renamed).
module_name = "unlikely_module_name"
module_source = """
import sys
code_filename = sys._getframe().f_code.co_filename
module_filename = __file__
constant = 1
def func():
pass
func_filename = func.__code__.co_filename
"""
dir_name = os.path.abspath(TESTFN)
file_name = os.path.join(dir_name, module_name) + os.extsep + "py"
compiled_name = imp.cache_from_source(file_name)
def setUp(self):
self.sys_path = sys.path[:]
self.orig_module = sys.modules.pop(self.module_name, None)
os.mkdir(self.dir_name)
with open(self.file_name, "w") as f:
f.write(self.module_source)
sys.path.insert(0, self.dir_name)
importlib.invalidate_caches()
def tearDown(self):
sys.path[:] = self.sys_path
if self.orig_module is not None:
sys.modules[self.module_name] = self.orig_module
else:
unload(self.module_name)
unlink(self.file_name)
unlink(self.compiled_name)
rmtree(self.dir_name)
def import_module(self):
ns = globals()
__import__(self.module_name, ns, ns)
return sys.modules[self.module_name]
def test_basics(self):
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
del sys.modules[self.module_name]
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
def test_incorrect_code_name(self):
py_compile.compile(self.file_name, dfile="another_module.py")
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
def test_module_without_source(self):
target = "another_module.py"
py_compile.compile(self.file_name, dfile=target)
os.remove(self.file_name)
pyc_file = make_legacy_pyc(self.file_name)
importlib.invalidate_caches()
mod = self.import_module()
self.assertEqual(mod.module_filename, pyc_file)
self.assertEqual(mod.code_filename, target)
self.assertEqual(mod.func_filename, target)
def test_foreign_code(self):
py_compile.compile(self.file_name)
with open(self.compiled_name, "rb") as f:
header = f.read(12)
code = marshal.load(f)
constants = list(code.co_consts)
foreign_code = test_main.__code__
pos = constants.index(1)
constants[pos] = foreign_code
code = type(code)(code.co_argcount, code.co_kwonlyargcount,
code.co_nlocals, code.co_stacksize,
code.co_flags, code.co_code, tuple(constants),
code.co_names, code.co_varnames, code.co_filename,
code.co_name, code.co_firstlineno, code.co_lnotab,
code.co_freevars, code.co_cellvars)
with open(self.compiled_name, "wb") as f:
f.write(header)
marshal.dump(code, f)
mod = self.import_module()
self.assertEqual(mod.constant.co_filename, foreign_code.co_filename)
class PathsTests(unittest.TestCase):
SAMPLES = ('test', 'test\u00e4\u00f6\u00fc\u00df', 'test\u00e9\u00e8',
'test\u00b0\u00b3\u00b2')
path = TESTFN
def setUp(self):
os.mkdir(self.path)
self.syspath = sys.path[:]
def tearDown(self):
rmtree(self.path)
sys.path[:] = self.syspath
# Regression test for http://bugs.python.org/issue1293.
def test_trailing_slash(self):
with open(os.path.join(self.path, 'test_trailing_slash.py'), 'w') as f:
f.write("testdata = 'test_trailing_slash'")
sys.path.append(self.path+'/')
mod = __import__("test_trailing_slash")
self.assertEqual(mod.testdata, 'test_trailing_slash')
unload("test_trailing_slash")
# Regression test for http://bugs.python.org/issue3677.
@unittest.skipUnless(sys.platform == 'win32', 'Windows-specific')
def test_UNC_path(self):
with open(os.path.join(self.path, 'test_unc_path.py'), 'w') as f:
f.write("testdata = 'test_unc_path'")
importlib.invalidate_caches()
# Create the UNC path, like \\myhost\c$\foo\bar.
path = os.path.abspath(self.path)
import socket
hn = socket.gethostname()
drive = path[0]
unc = "\\\\%s\\%s$"%(hn, drive)
unc += path[2:]
try:
os.listdir(unc)
except OSError as e:
if e.errno in (errno.EPERM, errno.EACCES):
# See issue #15338
self.skipTest("cannot access administrative share %r" % (unc,))
raise
sys.path.insert(0, unc)
try:
mod = __import__("test_unc_path")
except ImportError as e:
self.fail("could not import 'test_unc_path' from %r: %r"
% (unc, e))
self.assertEqual(mod.testdata, 'test_unc_path')
self.assertTrue(mod.__file__.startswith(unc), mod.__file__)
unload("test_unc_path")
class RelativeImportTests(unittest.TestCase):
def tearDown(self):
unload("test.relimport")
setUp = tearDown
def test_relimport_star(self):
# This will import * from .test_import.
from . import relimport
self.assertTrue(hasattr(relimport, "RelativeImportTests"))
def test_issue3221(self):
# Note for mergers: the 'absolute' tests from the 2.x branch
# are missing in Py3k because implicit relative imports are
# a thing of the past
#
# Regression test for http://bugs.python.org/issue3221.
def check_relative():
exec("from . import relimport", ns)
# Check relative import OK with __package__ and __name__ correct
ns = dict(__package__='test', __name__='test.notarealmodule')
check_relative()
# Check relative import OK with only __name__ wrong
ns = dict(__package__='test', __name__='notarealpkg.notarealmodule')
check_relative()
# Check relative import fails with only __package__ wrong
ns = dict(__package__='foo', __name__='test.notarealmodule')
self.assertRaises(SystemError, check_relative)
# Check relative import fails with __package__ and __name__ wrong
ns = dict(__package__='foo', __name__='notarealpkg.notarealmodule')
self.assertRaises(SystemError, check_relative)
# Check relative import fails with package set to a non-string
ns = dict(__package__=object())
self.assertRaises(TypeError, check_relative)
def test_absolute_import_without_future(self):
# If explicit relative import syntax is used, then do not try
# to perform an absolute import in the face of failure.
# Issue #7902.
with self.assertRaises(ImportError):
from .os import sep
self.fail("explicit relative import triggered an "
"implicit absolute import")
class OverridingImportBuiltinTests(unittest.TestCase):
def test_override_builtin(self):
# Test that overriding builtins.__import__ can bypass sys.modules.
import os
def foo():
import os
return os
self.assertEqual(foo(), os) # Quick sanity check.
with swap_attr(builtins, "__import__", lambda *x: 5):
self.assertEqual(foo(), 5)
# Test what happens when we shadow __import__ in globals(); this
# currently does not impact the import process, but if this changes,
# other code will need to change, so keep this test as a tripwire.
with swap_item(globals(), "__import__", lambda *x: 5):
self.assertEqual(foo(), os)
class PycacheTests(unittest.TestCase):
# Test the various PEP 3147 related behaviors.
tag = imp.get_tag()
def _clean(self):
forget(TESTFN)
rmtree('__pycache__')
unlink(self.source)
def setUp(self):
self.source = TESTFN + '.py'
self._clean()
with open(self.source, 'w') as fp:
print('# This is a test file written by test_import.py', file=fp)
sys.path.insert(0, os.curdir)
importlib.invalidate_caches()
def tearDown(self):
assert sys.path[0] == os.curdir, 'Unexpected sys.path[0]'
del sys.path[0]
self._clean()
def test_import_pyc_path(self):
self.assertFalse(os.path.exists('__pycache__'))
__import__(TESTFN)
self.assertTrue(os.path.exists('__pycache__'))
self.assertTrue(os.path.exists(os.path.join(
'__pycache__', '{}.{}.py{}'.format(
TESTFN, self.tag, __debug__ and 'c' or 'o'))))
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
@unittest.skipIf(hasattr(os, 'geteuid') and os.geteuid() == 0,
"due to varying filesystem permission semantics (issue #11956)")
def test_unwritable_directory(self):
# When the umask causes the new __pycache__ directory to be
# unwritable, the import still succeeds but no .pyc file is written.
with temp_umask(0o222):
__import__(TESTFN)
self.assertTrue(os.path.exists('__pycache__'))
self.assertFalse(os.path.exists(os.path.join(
'__pycache__', '{}.{}.pyc'.format(TESTFN, self.tag))))
def test_missing_source(self):
# With PEP 3147 cache layout, removing the source but leaving the pyc
# file does not satisfy the import.
__import__(TESTFN)
pyc_file = imp.cache_from_source(self.source)
self.assertTrue(os.path.exists(pyc_file))
os.remove(self.source)
forget(TESTFN)
self.assertRaises(ImportError, __import__, TESTFN)
def test_missing_source_legacy(self):
# Like test_missing_source() except that for backward compatibility,
# when the pyc file lives where the py file would have been (and named
# without the tag), it is importable. The __file__ of the imported
# module is the pyc location.
__import__(TESTFN)
# pyc_file gets removed in _clean() via tearDown().
pyc_file = make_legacy_pyc(self.source)
os.remove(self.source)
unload(TESTFN)
importlib.invalidate_caches()
m = __import__(TESTFN)
self.assertEqual(m.__file__,
os.path.join(os.curdir, os.path.relpath(pyc_file)))
def test___cached__(self):
# Modules now also have an __cached__ that points to the pyc file.
m = __import__(TESTFN)
pyc_file = imp.cache_from_source(TESTFN + '.py')
self.assertEqual(m.__cached__, os.path.join(os.curdir, pyc_file))
def test___cached___legacy_pyc(self):
# Like test___cached__() except that for backward compatibility,
# when the pyc file lives where the py file would have been (and named
# without the tag), it is importable. The __cached__ of the imported
# module is the pyc location.
__import__(TESTFN)
# pyc_file gets removed in _clean() via tearDown().
pyc_file = make_legacy_pyc(self.source)
os.remove(self.source)
unload(TESTFN)
importlib.invalidate_caches()
m = __import__(TESTFN)
self.assertEqual(m.__cached__,
os.path.join(os.curdir, os.path.relpath(pyc_file)))
def test_package___cached__(self):
# Like test___cached__ but for packages.
def cleanup():
rmtree('pep3147')
unload('pep3147.foo')
unload('pep3147')
os.mkdir('pep3147')
self.addCleanup(cleanup)
# Touch the __init__.py
with open(os.path.join('pep3147', '__init__.py'), 'w'):
pass
with open(os.path.join('pep3147', 'foo.py'), 'w'):
pass
importlib.invalidate_caches()
m = __import__('pep3147.foo')
init_pyc = imp.cache_from_source(
os.path.join('pep3147', '__init__.py'))
self.assertEqual(m.__cached__, os.path.join(os.curdir, init_pyc))
foo_pyc = imp.cache_from_source(os.path.join('pep3147', 'foo.py'))
self.assertEqual(sys.modules['pep3147.foo'].__cached__,
os.path.join(os.curdir, foo_pyc))
def test_package___cached___from_pyc(self):
# Like test___cached__ but ensuring __cached__ when imported from a
# PEP 3147 pyc file.
def cleanup():
rmtree('pep3147')
unload('pep3147.foo')
unload('pep3147')
os.mkdir('pep3147')
self.addCleanup(cleanup)
# Touch the __init__.py
with open(os.path.join('pep3147', '__init__.py'), 'w'):
pass
with open(os.path.join('pep3147', 'foo.py'), 'w'):
pass
importlib.invalidate_caches()
m = __import__('pep3147.foo')
unload('pep3147.foo')
unload('pep3147')
importlib.invalidate_caches()
m = __import__('pep3147.foo')
init_pyc = imp.cache_from_source(
os.path.join('pep3147', '__init__.py'))
self.assertEqual(m.__cached__, os.path.join(os.curdir, init_pyc))
foo_pyc = imp.cache_from_source(os.path.join('pep3147', 'foo.py'))
self.assertEqual(sys.modules['pep3147.foo'].__cached__,
os.path.join(os.curdir, foo_pyc))
def test_recompute_pyc_same_second(self):
# Even when the source file doesn't change timestamp, a change in
# source size is enough to trigger recomputation of the pyc file.
__import__(TESTFN)
unload(TESTFN)
with open(self.source, 'a') as fp:
print("x = 5", file=fp)
m = __import__(TESTFN)
self.assertEqual(m.x, 5)
class TestSymbolicallyLinkedPackage(unittest.TestCase):
package_name = 'sample'
def setUp(self):
if os.path.exists(self.tagged):
shutil.rmtree(self.tagged)
if os.path.exists(self.package_name):
os.remove(self.package_name)
self.orig_sys_path = sys.path[:]
# create a sample package; imagine you have a package with a tag and
# you want to symbolically link it from its untagged name.
os.mkdir(self.tagged)
init_file = os.path.join(self.tagged, '__init__.py')
open(init_file, 'w').close()
self.assertEqual(os.path.exists(init_file), True)
# now create a symlink to the tagged package
# sample -> sample-tagged
os.symlink(self.tagged, self.package_name)
# disabled because os.isdir currently fails (see issue 15093)
# self.assertEqual(os.path.isdir(self.package_name), True)
self.assertEqual(
os.path.isfile(os.path.join(self.package_name, '__init__.py')),
True,
)
@property
def tagged(self):
return self.package_name + '-tagged'
# regression test for issue6727
@unittest.skipUnless(
not hasattr(sys, 'getwindowsversion')
or sys.getwindowsversion() >= (6, 0),
"Windows Vista or later required")
@test.support.skip_unless_symlink
@unittest.skipUnless(
sys.platform == 'win32',
"Test failing on Unix (see issue15091)"
)
def test_symlinked_dir_importable(self):
# make sure sample can only be imported from the current directory.
sys.path[:] = ['.']
# and try to import the package
__import__(self.package_name)
def tearDown(self):
# now cleanup
if os.path.exists(self.package_name):
os.remove(self.package_name)
if os.path.exists(self.tagged):
shutil.rmtree(self.tagged)
sys.path[:] = self.orig_sys_path
@cpython_only
class ImportlibBootstrapTests(unittest.TestCase):
# These tests check that importlib is bootstrapped.
def test_frozen_importlib(self):
mod = sys.modules['_frozen_importlib']
self.assertTrue(mod)
def test_frozen_importlib_is_bootstrap(self):
from importlib import _bootstrap
mod = sys.modules['_frozen_importlib']
self.assertIs(mod, _bootstrap)
self.assertEqual(mod.__name__, 'importlib._bootstrap')
self.assertEqual(mod.__package__, 'importlib')
self.assertTrue(mod.__file__.endswith('_bootstrap.py'), mod.__file__)
def test_there_can_be_only_one(self):
# Issue #15386 revealed a tricky loophole in the bootstrapping
# This test is technically redundant, since the bug caused importing
# this test module to crash completely, but it helps prove the point
from importlib import machinery
mod = sys.modules['_frozen_importlib']
self.assertIs(machinery.FileFinder, mod.FileFinder)
self.assertIs(imp.new_module, mod.new_module)
class ImportTracebackTests(unittest.TestCase):
def setUp(self):
os.mkdir(TESTFN)
self.old_path = sys.path[:]
sys.path.insert(0, TESTFN)
def tearDown(self):
sys.path[:] = self.old_path
rmtree(TESTFN)
def create_module(self, mod, contents):
with open(os.path.join(TESTFN, mod + ".py"), "w") as f:
f.write(contents)
self.addCleanup(unload, mod)
importlib.invalidate_caches()
def assert_traceback(self, tb, files):
deduped_files = []
while tb:
code = tb.tb_frame.f_code
fn = code.co_filename
if not deduped_files or fn != deduped_files[-1]:
deduped_files.append(fn)
tb = tb.tb_next
self.assertEqual(len(deduped_files), len(files), deduped_files)
for fn, pat in zip(deduped_files, files):
self.assertIn(pat, fn)
def test_nonexistent_module(self):
try:
# assertRaises() clears __traceback__
import nonexistent_xyzzy
except ImportError as e:
tb = e.__traceback__
else:
self.fail("ImportError should have been raised")
self.assert_traceback(tb, [__file__])
def test_nonexistent_module_nested(self):
self.create_module("foo", "import nonexistent_xyzzy")
try:
import foo
except ImportError as e:
tb = e.__traceback__
else:
self.fail("ImportError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py'])
def test_exec_failure(self):
self.create_module("foo", "1/0")
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py'])
def test_exec_failure_nested(self):
self.create_module("foo", "import bar")
self.create_module("bar", "1/0")
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py', 'bar.py'])
@cpython_only
def test_import_bug(self):
# We simulate a bug in importlib and check that it's not stripped
# away from the traceback.
self.create_module("foo", "")
importlib = sys.modules['_frozen_importlib']
old_load_module = importlib.SourceLoader.load_module
try:
def load_module(*args):
1/0
importlib.SourceLoader.load_module = load_module
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, '<frozen importlib', __file__])
finally:
importlib.SourceLoader.load_module = old_load_module
def test_main(verbose=None):
flag = importlib_util.using___import__
try:
importlib_util.using___import__ = True
run_unittest(ImportTests, PycacheTests,
PycRewritingTests, PathsTests, RelativeImportTests,
OverridingImportBuiltinTests,
ImportlibBootstrapTests,
TestSymbolicallyLinkedPackage,
ImportTracebackTests,
importlib_import_test_suite())
finally:
importlib_util.using___import__ = flag
if __name__ == '__main__':
# Test needs to be a package, so we can do relative imports.
from test.test_import import test_main
test_main()
Running the importlib tests from test_import is redundant as there is
no difference anymore between __import__ and importlib.__import__.
# We import importlib *ASAP* in order to test #15386
import importlib
import builtins
import imp
from importlib.test.import_ import util as importlib_util
import marshal
import os
import platform
import py_compile
import random
import stat
import sys
import unittest
import textwrap
import errno
import shutil
import test.support
from test.support import (
EnvironmentVarGuard, TESTFN, check_warnings, forget, is_jython,
make_legacy_pyc, rmtree, run_unittest, swap_attr, swap_item, temp_umask,
unlink, unload, create_empty_file, cpython_only)
from test import script_helper
def remove_files(name):
for f in (name + ".py",
name + ".pyc",
name + ".pyo",
name + ".pyw",
name + "$py.class"):
unlink(f)
rmtree('__pycache__')
class ImportTests(unittest.TestCase):
def setUp(self):
remove_files(TESTFN)
importlib.invalidate_caches()
def tearDown(self):
unload(TESTFN)
setUp = tearDown
def test_case_sensitivity(self):
# Brief digression to test that import is case-sensitive: if we got
# this far, we know for sure that "random" exists.
with self.assertRaises(ImportError):
import RAnDoM
def test_double_const(self):
# Another brief digression to test the accuracy of manifest float
# constants.
from test import double_const # don't blink -- that *was* the test
def test_import(self):
def test_with_extension(ext):
# The extension is normally ".py", perhaps ".pyw".
source = TESTFN + ext
pyo = TESTFN + ".pyo"
if is_jython:
pyc = TESTFN + "$py.class"
else:
pyc = TESTFN + ".pyc"
with open(source, "w") as f:
print("# This tests Python's ability to import a",
ext, "file.", file=f)
a = random.randrange(1000)
b = random.randrange(1000)
print("a =", a, file=f)
print("b =", b, file=f)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
importlib.invalidate_caches()
try:
try:
mod = __import__(TESTFN)
except ImportError as err:
self.fail("import from %s failed: %s" % (ext, err))
self.assertEqual(mod.a, a,
"module loaded (%s) but contents invalid" % mod)
self.assertEqual(mod.b, b,
"module loaded (%s) but contents invalid" % mod)
finally:
forget(TESTFN)
unlink(source)
unlink(pyc)
unlink(pyo)
sys.path.insert(0, os.curdir)
try:
test_with_extension(".py")
if sys.platform.startswith("win"):
for ext in [".PY", ".Py", ".pY", ".pyw", ".PYW", ".pYw"]:
test_with_extension(ext)
finally:
del sys.path[0]
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_creation_mode(self):
mask = 0o022
with temp_umask(mask):
sys.path.insert(0, os.curdir)
try:
fname = TESTFN + os.extsep + "py"
create_empty_file(fname)
fn = imp.cache_from_source(fname)
unlink(fn)
importlib.invalidate_caches()
__import__(TESTFN)
if not os.path.exists(fn):
self.fail("__import__ did not result in creation of "
"either a .pyc or .pyo file")
s = os.stat(fn)
# Check that the umask is respected, and the executable bits
# aren't set.
self.assertEqual(stat.S_IMODE(s.st_mode), 0o666 & ~mask)
finally:
del sys.path[0]
remove_files(TESTFN)
unload(TESTFN)
def test_imp_module(self):
# Verify that the imp module can correctly load and find .py files
# XXX (ncoghlan): It would be nice to use support.CleanImport
# here, but that breaks because the os module registers some
# handlers in copy_reg on import. Since CleanImport doesn't
# revert that registration, the module is left in a broken
# state after reversion. Reinitialising the module contents
# and just reverting os.environ to its previous state is an OK
# workaround
orig_path = os.path
orig_getenv = os.getenv
with EnvironmentVarGuard():
x = imp.find_module("os")
self.addCleanup(x[0].close)
new_os = imp.load_module("os", *x)
self.assertIs(os, new_os)
self.assertIs(orig_path, new_os.path)
self.assertIsNot(orig_getenv, new_os.getenv)
def test_bug7732(self):
source = TESTFN + '.py'
os.mkdir(source)
try:
self.assertRaisesRegex(ImportError, '^No module',
imp.find_module, TESTFN, ["."])
finally:
os.rmdir(source)
def test_module_with_large_stack(self, module='longlist'):
# Regression test for http://bugs.python.org/issue561858.
filename = module + '.py'
# Create a file with a list of 65000 elements.
with open(filename, 'w') as f:
f.write('d = [\n')
for i in range(65000):
f.write('"",\n')
f.write(']')
try:
# Compile & remove .py file; we only need .pyc (or .pyo).
# Bytecode must be relocated from the PEP 3147 bytecode-only location.
py_compile.compile(filename)
finally:
unlink(filename)
# Need to be able to load from current dir.
sys.path.append('')
importlib.invalidate_caches()
try:
make_legacy_pyc(filename)
# This used to crash.
exec('import ' + module)
finally:
# Cleanup.
del sys.path[-1]
unlink(filename + 'c')
unlink(filename + 'o')
def test_failing_import_sticks(self):
source = TESTFN + ".py"
with open(source, "w") as f:
print("a = 1/0", file=f)
# New in 2.4, we shouldn't be able to import that no matter how often
# we try.
sys.path.insert(0, os.curdir)
importlib.invalidate_caches()
if TESTFN in sys.modules:
del sys.modules[TESTFN]
try:
for i in [1, 2, 3]:
self.assertRaises(ZeroDivisionError, __import__, TESTFN)
self.assertNotIn(TESTFN, sys.modules,
"damaged module in sys.modules on %i try" % i)
finally:
del sys.path[0]
remove_files(TESTFN)
def test_import_name_binding(self):
# import x.y.z binds x in the current namespace
import test as x
import test.support
self.assertTrue(x is test, x.__name__)
self.assertTrue(hasattr(test.support, "__file__"))
# import x.y.z as w binds z as w
import test.support as y
self.assertTrue(y is test.support, y.__name__)
def test_failing_reload(self):
# A failing reload should leave the module object in sys.modules.
source = TESTFN + os.extsep + "py"
with open(source, "w") as f:
f.write("a = 1\nb=2\n")
sys.path.insert(0, os.curdir)
try:
mod = __import__(TESTFN)
self.assertIn(TESTFN, sys.modules)
self.assertEqual(mod.a, 1, "module has wrong attribute values")
self.assertEqual(mod.b, 2, "module has wrong attribute values")
# On WinXP, just replacing the .py file wasn't enough to
# convince reload() to reparse it. Maybe the timestamp didn't
# move enough. We force it to get reparsed by removing the
# compiled file too.
remove_files(TESTFN)
# Now damage the module.
with open(source, "w") as f:
f.write("a = 10\nb=20//0\n")
self.assertRaises(ZeroDivisionError, imp.reload, mod)
# But we still expect the module to be in sys.modules.
mod = sys.modules.get(TESTFN)
self.assertIsNot(mod, None, "expected module to be in sys.modules")
# We should have replaced a w/ 10, but the old b value should
# stick.
self.assertEqual(mod.a, 10, "module has wrong attribute values")
self.assertEqual(mod.b, 2, "module has wrong attribute values")
finally:
del sys.path[0]
remove_files(TESTFN)
unload(TESTFN)
def test_file_to_source(self):
# check if __file__ points to the source file where available
source = TESTFN + ".py"
with open(source, "w") as f:
f.write("test = None\n")
sys.path.insert(0, os.curdir)
try:
mod = __import__(TESTFN)
self.assertTrue(mod.__file__.endswith('.py'))
os.remove(source)
del sys.modules[TESTFN]
make_legacy_pyc(source)
importlib.invalidate_caches()
mod = __import__(TESTFN)
base, ext = os.path.splitext(mod.__file__)
self.assertIn(ext, ('.pyc', '.pyo'))
finally:
del sys.path[0]
remove_files(TESTFN)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
def test_import_name_binding(self):
# import x.y.z binds x in the current namespace.
import test as x
import test.support
self.assertIs(x, test, x.__name__)
self.assertTrue(hasattr(test.support, "__file__"))
# import x.y.z as w binds z as w.
import test.support as y
self.assertIs(y, test.support, y.__name__)
def test_import_by_filename(self):
path = os.path.abspath(TESTFN)
encoding = sys.getfilesystemencoding()
try:
path.encode(encoding)
except UnicodeEncodeError:
self.skipTest('path is not encodable to {}'.format(encoding))
with self.assertRaises(ImportError) as c:
__import__(path)
def test_import_in_del_does_not_crash(self):
# Issue 4236
testfn = script_helper.make_script('', TESTFN, textwrap.dedent("""\
import sys
class C:
def __del__(self):
import imp
sys.argv.insert(0, C())
"""))
script_helper.assert_python_ok(testfn)
def test_timestamp_overflow(self):
# A modification timestamp larger than 2**32 should not be a problem
# when importing a module (issue #11235).
sys.path.insert(0, os.curdir)
try:
source = TESTFN + ".py"
compiled = imp.cache_from_source(source)
with open(source, 'w') as f:
pass
try:
os.utime(source, (2 ** 33 - 5, 2 ** 33 - 5))
except OverflowError:
self.skipTest("cannot set modification time to large integer")
except OSError as e:
if e.errno != getattr(errno, 'EOVERFLOW', None):
raise
self.skipTest("cannot set modification time to large integer ({})".format(e))
__import__(TESTFN)
# The pyc file was created.
os.stat(compiled)
finally:
del sys.path[0]
remove_files(TESTFN)
class PycRewritingTests(unittest.TestCase):
# Test that the `co_filename` attribute on code objects always points
# to the right file, even when various things happen (e.g. both the .py
# and the .pyc file are renamed).
module_name = "unlikely_module_name"
module_source = """
import sys
code_filename = sys._getframe().f_code.co_filename
module_filename = __file__
constant = 1
def func():
pass
func_filename = func.__code__.co_filename
"""
dir_name = os.path.abspath(TESTFN)
file_name = os.path.join(dir_name, module_name) + os.extsep + "py"
compiled_name = imp.cache_from_source(file_name)
def setUp(self):
self.sys_path = sys.path[:]
self.orig_module = sys.modules.pop(self.module_name, None)
os.mkdir(self.dir_name)
with open(self.file_name, "w") as f:
f.write(self.module_source)
sys.path.insert(0, self.dir_name)
importlib.invalidate_caches()
def tearDown(self):
sys.path[:] = self.sys_path
if self.orig_module is not None:
sys.modules[self.module_name] = self.orig_module
else:
unload(self.module_name)
unlink(self.file_name)
unlink(self.compiled_name)
rmtree(self.dir_name)
def import_module(self):
ns = globals()
__import__(self.module_name, ns, ns)
return sys.modules[self.module_name]
def test_basics(self):
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
del sys.modules[self.module_name]
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
def test_incorrect_code_name(self):
py_compile.compile(self.file_name, dfile="another_module.py")
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
def test_module_without_source(self):
target = "another_module.py"
py_compile.compile(self.file_name, dfile=target)
os.remove(self.file_name)
pyc_file = make_legacy_pyc(self.file_name)
importlib.invalidate_caches()
mod = self.import_module()
self.assertEqual(mod.module_filename, pyc_file)
self.assertEqual(mod.code_filename, target)
self.assertEqual(mod.func_filename, target)
def test_foreign_code(self):
py_compile.compile(self.file_name)
with open(self.compiled_name, "rb") as f:
header = f.read(12)
code = marshal.load(f)
constants = list(code.co_consts)
foreign_code = test_main.__code__
pos = constants.index(1)
constants[pos] = foreign_code
code = type(code)(code.co_argcount, code.co_kwonlyargcount,
code.co_nlocals, code.co_stacksize,
code.co_flags, code.co_code, tuple(constants),
code.co_names, code.co_varnames, code.co_filename,
code.co_name, code.co_firstlineno, code.co_lnotab,
code.co_freevars, code.co_cellvars)
with open(self.compiled_name, "wb") as f:
f.write(header)
marshal.dump(code, f)
mod = self.import_module()
self.assertEqual(mod.constant.co_filename, foreign_code.co_filename)
class PathsTests(unittest.TestCase):
SAMPLES = ('test', 'test\u00e4\u00f6\u00fc\u00df', 'test\u00e9\u00e8',
'test\u00b0\u00b3\u00b2')
path = TESTFN
def setUp(self):
os.mkdir(self.path)
self.syspath = sys.path[:]
def tearDown(self):
rmtree(self.path)
sys.path[:] = self.syspath
# Regression test for http://bugs.python.org/issue1293.
def test_trailing_slash(self):
with open(os.path.join(self.path, 'test_trailing_slash.py'), 'w') as f:
f.write("testdata = 'test_trailing_slash'")
sys.path.append(self.path+'/')
mod = __import__("test_trailing_slash")
self.assertEqual(mod.testdata, 'test_trailing_slash')
unload("test_trailing_slash")
# Regression test for http://bugs.python.org/issue3677.
@unittest.skipUnless(sys.platform == 'win32', 'Windows-specific')
def test_UNC_path(self):
with open(os.path.join(self.path, 'test_unc_path.py'), 'w') as f:
f.write("testdata = 'test_unc_path'")
importlib.invalidate_caches()
# Create the UNC path, like \\myhost\c$\foo\bar.
path = os.path.abspath(self.path)
import socket
hn = socket.gethostname()
drive = path[0]
unc = "\\\\%s\\%s$"%(hn, drive)
unc += path[2:]
try:
os.listdir(unc)
except OSError as e:
if e.errno in (errno.EPERM, errno.EACCES):
# See issue #15338
self.skipTest("cannot access administrative share %r" % (unc,))
raise
sys.path.insert(0, unc)
try:
mod = __import__("test_unc_path")
except ImportError as e:
self.fail("could not import 'test_unc_path' from %r: %r"
% (unc, e))
self.assertEqual(mod.testdata, 'test_unc_path')
self.assertTrue(mod.__file__.startswith(unc), mod.__file__)
unload("test_unc_path")
class RelativeImportTests(unittest.TestCase):
def tearDown(self):
unload("test.relimport")
setUp = tearDown
def test_relimport_star(self):
# This will import * from .test_import.
from . import relimport
self.assertTrue(hasattr(relimport, "RelativeImportTests"))
def test_issue3221(self):
# Note for mergers: the 'absolute' tests from the 2.x branch
# are missing in Py3k because implicit relative imports are
# a thing of the past
#
# Regression test for http://bugs.python.org/issue3221.
def check_relative():
exec("from . import relimport", ns)
# Check relative import OK with __package__ and __name__ correct
ns = dict(__package__='test', __name__='test.notarealmodule')
check_relative()
# Check relative import OK with only __name__ wrong
ns = dict(__package__='test', __name__='notarealpkg.notarealmodule')
check_relative()
# Check relative import fails with only __package__ wrong
ns = dict(__package__='foo', __name__='test.notarealmodule')
self.assertRaises(SystemError, check_relative)
# Check relative import fails with __package__ and __name__ wrong
ns = dict(__package__='foo', __name__='notarealpkg.notarealmodule')
self.assertRaises(SystemError, check_relative)
# Check relative import fails with package set to a non-string
ns = dict(__package__=object())
self.assertRaises(TypeError, check_relative)
def test_absolute_import_without_future(self):
# If explicit relative import syntax is used, then do not try
# to perform an absolute import in the face of failure.
# Issue #7902.
with self.assertRaises(ImportError):
from .os import sep
self.fail("explicit relative import triggered an "
"implicit absolute import")
class OverridingImportBuiltinTests(unittest.TestCase):
def test_override_builtin(self):
# Test that overriding builtins.__import__ can bypass sys.modules.
import os
def foo():
import os
return os
self.assertEqual(foo(), os) # Quick sanity check.
with swap_attr(builtins, "__import__", lambda *x: 5):
self.assertEqual(foo(), 5)
# Test what happens when we shadow __import__ in globals(); this
# currently does not impact the import process, but if this changes,
# other code will need to change, so keep this test as a tripwire.
with swap_item(globals(), "__import__", lambda *x: 5):
self.assertEqual(foo(), os)
class PycacheTests(unittest.TestCase):
# Test the various PEP 3147 related behaviors.
tag = imp.get_tag()
def _clean(self):
forget(TESTFN)
rmtree('__pycache__')
unlink(self.source)
def setUp(self):
self.source = TESTFN + '.py'
self._clean()
with open(self.source, 'w') as fp:
print('# This is a test file written by test_import.py', file=fp)
sys.path.insert(0, os.curdir)
importlib.invalidate_caches()
def tearDown(self):
assert sys.path[0] == os.curdir, 'Unexpected sys.path[0]'
del sys.path[0]
self._clean()
def test_import_pyc_path(self):
self.assertFalse(os.path.exists('__pycache__'))
__import__(TESTFN)
self.assertTrue(os.path.exists('__pycache__'))
self.assertTrue(os.path.exists(os.path.join(
'__pycache__', '{}.{}.py{}'.format(
TESTFN, self.tag, __debug__ and 'c' or 'o'))))
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
@unittest.skipIf(hasattr(os, 'geteuid') and os.geteuid() == 0,
"due to varying filesystem permission semantics (issue #11956)")
def test_unwritable_directory(self):
# When the umask causes the new __pycache__ directory to be
# unwritable, the import still succeeds but no .pyc file is written.
with temp_umask(0o222):
__import__(TESTFN)
self.assertTrue(os.path.exists('__pycache__'))
self.assertFalse(os.path.exists(os.path.join(
'__pycache__', '{}.{}.pyc'.format(TESTFN, self.tag))))
def test_missing_source(self):
# With PEP 3147 cache layout, removing the source but leaving the pyc
# file does not satisfy the import.
__import__(TESTFN)
pyc_file = imp.cache_from_source(self.source)
self.assertTrue(os.path.exists(pyc_file))
os.remove(self.source)
forget(TESTFN)
self.assertRaises(ImportError, __import__, TESTFN)
def test_missing_source_legacy(self):
# Like test_missing_source() except that for backward compatibility,
# when the pyc file lives where the py file would have been (and named
# without the tag), it is importable. The __file__ of the imported
# module is the pyc location.
__import__(TESTFN)
# pyc_file gets removed in _clean() via tearDown().
pyc_file = make_legacy_pyc(self.source)
os.remove(self.source)
unload(TESTFN)
importlib.invalidate_caches()
m = __import__(TESTFN)
self.assertEqual(m.__file__,
os.path.join(os.curdir, os.path.relpath(pyc_file)))
def test___cached__(self):
# Modules now also have an __cached__ that points to the pyc file.
m = __import__(TESTFN)
pyc_file = imp.cache_from_source(TESTFN + '.py')
self.assertEqual(m.__cached__, os.path.join(os.curdir, pyc_file))
def test___cached___legacy_pyc(self):
# Like test___cached__() except that for backward compatibility,
# when the pyc file lives where the py file would have been (and named
# without the tag), it is importable. The __cached__ of the imported
# module is the pyc location.
__import__(TESTFN)
# pyc_file gets removed in _clean() via tearDown().
pyc_file = make_legacy_pyc(self.source)
os.remove(self.source)
unload(TESTFN)
importlib.invalidate_caches()
m = __import__(TESTFN)
self.assertEqual(m.__cached__,
os.path.join(os.curdir, os.path.relpath(pyc_file)))
def test_package___cached__(self):
# Like test___cached__ but for packages.
def cleanup():
rmtree('pep3147')
unload('pep3147.foo')
unload('pep3147')
os.mkdir('pep3147')
self.addCleanup(cleanup)
# Touch the __init__.py
with open(os.path.join('pep3147', '__init__.py'), 'w'):
pass
with open(os.path.join('pep3147', 'foo.py'), 'w'):
pass
importlib.invalidate_caches()
m = __import__('pep3147.foo')
init_pyc = imp.cache_from_source(
os.path.join('pep3147', '__init__.py'))
self.assertEqual(m.__cached__, os.path.join(os.curdir, init_pyc))
foo_pyc = imp.cache_from_source(os.path.join('pep3147', 'foo.py'))
self.assertEqual(sys.modules['pep3147.foo'].__cached__,
os.path.join(os.curdir, foo_pyc))
def test_package___cached___from_pyc(self):
# Like test___cached__ but ensuring __cached__ when imported from a
# PEP 3147 pyc file.
def cleanup():
rmtree('pep3147')
unload('pep3147.foo')
unload('pep3147')
os.mkdir('pep3147')
self.addCleanup(cleanup)
# Touch the __init__.py
with open(os.path.join('pep3147', '__init__.py'), 'w'):
pass
with open(os.path.join('pep3147', 'foo.py'), 'w'):
pass
importlib.invalidate_caches()
m = __import__('pep3147.foo')
unload('pep3147.foo')
unload('pep3147')
importlib.invalidate_caches()
m = __import__('pep3147.foo')
init_pyc = imp.cache_from_source(
os.path.join('pep3147', '__init__.py'))
self.assertEqual(m.__cached__, os.path.join(os.curdir, init_pyc))
foo_pyc = imp.cache_from_source(os.path.join('pep3147', 'foo.py'))
self.assertEqual(sys.modules['pep3147.foo'].__cached__,
os.path.join(os.curdir, foo_pyc))
def test_recompute_pyc_same_second(self):
# Even when the source file doesn't change timestamp, a change in
# source size is enough to trigger recomputation of the pyc file.
__import__(TESTFN)
unload(TESTFN)
with open(self.source, 'a') as fp:
print("x = 5", file=fp)
m = __import__(TESTFN)
self.assertEqual(m.x, 5)
class TestSymbolicallyLinkedPackage(unittest.TestCase):
package_name = 'sample'
def setUp(self):
if os.path.exists(self.tagged):
shutil.rmtree(self.tagged)
if os.path.exists(self.package_name):
os.remove(self.package_name)
self.orig_sys_path = sys.path[:]
# create a sample package; imagine you have a package with a tag and
# you want to symbolically link it from its untagged name.
os.mkdir(self.tagged)
init_file = os.path.join(self.tagged, '__init__.py')
open(init_file, 'w').close()
self.assertEqual(os.path.exists(init_file), True)
# now create a symlink to the tagged package
# sample -> sample-tagged
os.symlink(self.tagged, self.package_name)
# disabled because os.isdir currently fails (see issue 15093)
# self.assertEqual(os.path.isdir(self.package_name), True)
self.assertEqual(
os.path.isfile(os.path.join(self.package_name, '__init__.py')),
True,
)
@property
def tagged(self):
return self.package_name + '-tagged'
# regression test for issue6727
@unittest.skipUnless(
not hasattr(sys, 'getwindowsversion')
or sys.getwindowsversion() >= (6, 0),
"Windows Vista or later required")
@test.support.skip_unless_symlink
@unittest.skipUnless(
sys.platform == 'win32',
"Test failing on Unix (see issue15091)"
)
def test_symlinked_dir_importable(self):
# make sure sample can only be imported from the current directory.
sys.path[:] = ['.']
# and try to import the package
__import__(self.package_name)
def tearDown(self):
# now cleanup
if os.path.exists(self.package_name):
os.remove(self.package_name)
if os.path.exists(self.tagged):
shutil.rmtree(self.tagged)
sys.path[:] = self.orig_sys_path
@cpython_only
class ImportlibBootstrapTests(unittest.TestCase):
# These tests check that importlib is bootstrapped.
def test_frozen_importlib(self):
mod = sys.modules['_frozen_importlib']
self.assertTrue(mod)
def test_frozen_importlib_is_bootstrap(self):
from importlib import _bootstrap
mod = sys.modules['_frozen_importlib']
self.assertIs(mod, _bootstrap)
self.assertEqual(mod.__name__, 'importlib._bootstrap')
self.assertEqual(mod.__package__, 'importlib')
self.assertTrue(mod.__file__.endswith('_bootstrap.py'), mod.__file__)
def test_there_can_be_only_one(self):
# Issue #15386 revealed a tricky loophole in the bootstrapping
# This test is technically redundant, since the bug caused importing
# this test module to crash completely, but it helps prove the point
from importlib import machinery
mod = sys.modules['_frozen_importlib']
self.assertIs(machinery.FileFinder, mod.FileFinder)
self.assertIs(imp.new_module, mod.new_module)
class ImportTracebackTests(unittest.TestCase):
def setUp(self):
os.mkdir(TESTFN)
self.old_path = sys.path[:]
sys.path.insert(0, TESTFN)
def tearDown(self):
sys.path[:] = self.old_path
rmtree(TESTFN)
def create_module(self, mod, contents):
with open(os.path.join(TESTFN, mod + ".py"), "w") as f:
f.write(contents)
self.addCleanup(unload, mod)
importlib.invalidate_caches()
def assert_traceback(self, tb, files):
deduped_files = []
while tb:
code = tb.tb_frame.f_code
fn = code.co_filename
if not deduped_files or fn != deduped_files[-1]:
deduped_files.append(fn)
tb = tb.tb_next
self.assertEqual(len(deduped_files), len(files), deduped_files)
for fn, pat in zip(deduped_files, files):
self.assertIn(pat, fn)
def test_nonexistent_module(self):
try:
# assertRaises() clears __traceback__
import nonexistent_xyzzy
except ImportError as e:
tb = e.__traceback__
else:
self.fail("ImportError should have been raised")
self.assert_traceback(tb, [__file__])
def test_nonexistent_module_nested(self):
self.create_module("foo", "import nonexistent_xyzzy")
try:
import foo
except ImportError as e:
tb = e.__traceback__
else:
self.fail("ImportError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py'])
def test_exec_failure(self):
self.create_module("foo", "1/0")
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py'])
def test_exec_failure_nested(self):
self.create_module("foo", "import bar")
self.create_module("bar", "1/0")
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py', 'bar.py'])
@cpython_only
def test_import_bug(self):
# We simulate a bug in importlib and check that it's not stripped
# away from the traceback.
self.create_module("foo", "")
importlib = sys.modules['_frozen_importlib']
old_load_module = importlib.SourceLoader.load_module
try:
def load_module(*args):
1/0
importlib.SourceLoader.load_module = load_module
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, '<frozen importlib', __file__])
finally:
importlib.SourceLoader.load_module = old_load_module
def test_main(verbose=None):
run_unittest(ImportTests, PycacheTests,
PycRewritingTests, PathsTests, RelativeImportTests,
OverridingImportBuiltinTests,
ImportlibBootstrapTests,
TestSymbolicallyLinkedPackage,
ImportTracebackTests)
if __name__ == '__main__':
# Test needs to be a package, so we can do relative imports.
from test.test_import import test_main
test_main()
|
#!/usr/bin/python
'''The MIT License (MIT)
Copyright (c) 2017 Yu Xiong Wei(try.dash.now@gmail.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
__author__ = 'sean yu (Yu, Xiongwei)'
__doc__ = '''
it's GUI of DasH aka Do as Human
created 2017-05-06 by Sean Yu
'''
from datetime import datetime
import wx.grid as gridlib
import traceback
import wx
from gui.MainFrame import MainFrame
import os
from lib.common import load_bench, caller_stack_info,info, get_next_in_ring_list,get_folder_item, info,debug, warn, error, parse_command_line, call_function_in_module
import re
import time
import threading
import ConfigParser
import sys
import inspect
import Queue
from SessionTab import SessionTab
import imp
import types
#from dut import dut
class RedirectText(object):
font_point_size = 10
old_stdout = None
old_stderr = None
write_lock = None
log_file = None
def __init__(self,aWxTextCtrl, log_path=None):
self.old_stderr , self.old_stdout=sys.stderr , sys.stdout
self.out=aWxTextCtrl
self.font_point_size = self.out.GetFont().PointSize
self.write_lock = threading.Lock()
if log_path:
name = '{}/dash.log'.format(log_path)
self.log_file = open(name, 'w+')
self.fileno = self.log_file.fileno
def write(self,string):
self.write_lock.acquire()
self.old_stdout.write(string)
#string = string.replace('\\033\[[0-9\;]+m', '')
#self.old_stderr.write(string)
if re.search('error|\s+err\s+|fail|wrong',string.lower()):
self.out.SetDefaultStyle(wx.TextAttr(wx.RED, wx.YELLOW, font =wx.Font(self.font_point_size+2, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.BOLD, faceName = 'Consolas')))#wx.CallAfter(s
else:
self.out.SetDefaultStyle(wx.TextAttr(wx.GREEN, wx.BLACK,font =wx.Font(self.font_point_size, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.NORMAL, faceName = 'Consolas')))#wx.CallAfter(
wx.CallAfter(self.out.AppendText, string)
if self.log_file:
self.log_file.write(string)
self.log_file.flush()
self.write_lock.release()
def close(self):
if self.log_file:
self.log_file.flush()
self.log_file.close()
class process_info(object):
process = None
pid=None
full_name=None
returncode = None
def __init__(self,name, process):
self.process= process
self.pid = process.pid
self.full_name =name
self.returncode = process.returncode
@property
def returncode(self):
return self.process.returncode
class FileEditor(wx.Panel):
editor =None
font_size=10
parent=None
type = None
sessions_node =None
function_node =None
case_suite_node =None
full_file_name = None
file_instance = None
def on_close(self):
if self.full_file_name:
data = self.editor.GetValue()
with open(self.full_file_name, 'w') as f:
f.write(data)
f.flush()
#done 2017-9-12: handle close tab in edit_area
def __init__(self, parent, title='pageOne', type ='grid', file_name = None):
wx.Panel.__init__(self, parent)
self.parent = parent
self.type = type
self.full_file_name = file_name
#self.editor = wx.TextCtrl(self, style = wx.TE_MULTILINE|wx.TE_RICH2|wx.EXPAND|wx.ALL, size=(-1,-1))
if type in ['text']:
self.editor = wx.TextCtrl( self, -1, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, wx.TE_AUTO_URL|wx.VSCROLL|wx.TE_RICH|wx.TE_MULTILINE&(~wx.TE_PROCESS_ENTER))
#wx.richtext.RichTextCtrl( self, -1, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0|wx.VSCROLL|wx.HSCROLL|wx.NO_BORDER|wx.WANTS_CHARS )
with open(self.full_file_name, 'r') as f:
for line in f.readlines():
self.editor.AppendText(line)
else:
self.editor= gridlib.Grid(self)
self.editor.CreateGrid(50, 5)
col = self.editor.GetNumberCols()
row = self.editor.GetNumberRows()
function_color ='black'
arg_color = 'blue'
for c in range(0, col):
if c < 1 :
self.editor.SetColLabelValue(c, 'Function Name')
else:
self.editor.SetColLabelValue(c, 'arg# {}'.format(c))
for r in range (0, row):
self.editor.SetCellTextColour(r,c,function_color if c <1 else arg_color)
for r in range (0, row):
self.editor.SetCellFont(r, 0, wx.Font(self.font_size,wx.SWISS, wx.NORMAL, wx.BOLD ))
self.editor.Bind( wx.EVT_MOUSEWHEEL, self.editor_OnMouseWheel )
sizer = wx.BoxSizer()
sizer.Add(self.editor, 1, wx.EXPAND)
self.SetSizer(sizer)
def editor_OnMouseWheel(self,event):
min_font_size = 5
interval_step = 2
if event.ControlDown():
pass
else:
return
if event.GetWheelRotation() < 0:
if self.font_size>min_font_size:
self.font_size-=interval_step
else:
self.font_size+=1
if self.type in ['text']:
f =self.editor.GetFont()
f.PointSize= self.font_size
self.editor.SetFont(f)
else:
col = self.editor.GetNumberCols()
row = self.editor.GetNumberRows()
for c in range(0, col):
for r in range (0, row):
f = self.editor.GetCellFont(r, c)
f.PointSize = self.font_size
self.editor.SetCellFont(r, c, f)
self.Refresh()
#wx.StaticText(self, -1, "THIS IS A PAGE OBJECT", (20,20))
#DONE: DasHFrame should handle CLOSE event when closing the app, call on_close_tab_in_edit_area for all opened sessions and files
class DasHFrame(MainFrame):#wx.Frame
ini_setting = None
#m_left_navigator =None
redir = None
edit_area=None
tabs_in_edit_area = None
src_path = None
sessions_alive=None
sequence_queue=None
history_cmd = []
history_cmd_index = -1
import_modules={'TC':'TC'}
lib_path ='./lib'
log_path = '../log'
session_path = './sessions'
suite_path = '../test_suite'
dict_test_report= None
alive =True
mail_server=None
mail_to_list=None
mail_from=None
def __init__(self,parent=None, ini_file = './gDasH.ini'):
#wx.Frame.__init__(self, None, title="DasH")
self.dict_test_report={}
self.tabs_in_edit_area=[]
self.sessions_alive={}
MainFrame.__init__(self, parent=parent)
self.sequence_queue= Queue.Queue()
#self.sequence_queue.put()
self.ini_setting = ConfigParser.ConfigParser()
self.ini_setting.read(ini_file)
self.src_path = os.path.abspath(self.ini_setting.get('dash','src_path'))
self.lib_path = os.path.abspath(self.ini_setting.get('dash','lib_path'))
self.log_path = os.path.abspath(self.ini_setting.get('dash','log_path'))
self.suite_path = os.path.abspath(self.ini_setting.get('dash', 'test_suite_path'))
self.mail_server= self.ini_setting.get('dash', 'mail_server')
self.mail_from =self.ini_setting.get('dash', 'mail_from')
self.mail_to_list=self.ini_setting.get('dash', 'mail_to_list')
from lib.common import create_case_folder, create_dir
sys.argv.append('-l')
sys.argv.append('{}'.format(self.log_path))
self.log_path = create_case_folder(self.log_path)
self.suite_path = create_dir(self.suite_path)
self.lib_path = create_dir(self.lib_path)
self.src_path = create_dir(self.src_path)
if not os.path.exists(self.log_path):
os.mkdir(self.log_path)
self.add_src_path_to_python_path(self.src_path)
self.redir = RedirectText(self.m_log, self.log_path)
sys.stdout = self.redir
sys.stderr = self.redir
self.m_log.SetBackgroundColour('Black')
self.m_log.SetDefaultStyle(wx.TextAttr(wx.GREEN, wx.BLACK, font =wx.Font(9, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.BOLD, faceName = 'Consolas')))
#self.m_editor.WriteText('welcome to dash world')
self.m_log.WriteText('Welcome to DasH!\n')
self.m_command_box.WriteText('functions.static_function_in_module test_ssh 2')
fileMenu = wx.Menu()
open_test_suite = fileMenu.Append(wx.NewId(), "Open TestSuite", "Open a Test Suite")
open_test_case = fileMenu.Append(wx.NewId(), "Open TestCase", "Open a Test Case")
mail_test_report = fileMenu.Append(wx.NewId(), "Mail Test Report", "Mail Test Report")
self.m_menubar_main.Append(fileMenu, "&Open")
self.Bind(wx.EVT_MENU,self.on_mail_test_report ,mail_test_report)
self.Bind(wx.EVT_CLOSE, self.on_close)
self.m_command_box.Bind(wx.EVT_TEXT_ENTER, self.on_command_enter)
self.m_command_box.Bind(wx.EVT_KEY_UP, self.on_key_up)
self.m_command_box.Bind(wx.EVT_KEY_DOWN, self.on_key_down)
from wx.aui import AuiNotebook
bookStyle = wx.aui.AUI_NB_DEFAULT_STYLE &(~wx.aui.AUI_NB_CLOSE_ON_ACTIVE_TAB)
self.navigator = AuiNotebook(self.m_left_navigator, style= bookStyle )
self.case_suite_page = wx.TreeCtrl(self.navigator, wx.ID_ANY, wx.DefaultPosition, wx.Size(-1, -1), wx.TR_DEFAULT_STYLE | wx.TR_EDIT_LABELS | wx.TR_EXTENDED | wx.TR_HAS_BUTTONS | wx.TR_HAS_VARIABLE_ROW_HEIGHT | wx.HSCROLL | wx.TAB_TRAVERSAL | wx.VSCROLL | wx.WANTS_CHARS)
self.function_page = wx.TreeCtrl(self.navigator, wx.ID_ANY, wx.DefaultPosition, wx.Size(-1, -1), wx.TR_DEFAULT_STYLE | wx.TR_EDIT_LABELS | wx.TR_EXTENDED | wx.TR_HAS_BUTTONS | wx.TR_HAS_VARIABLE_ROW_HEIGHT | wx.HSCROLL | wx.TAB_TRAVERSAL | wx.VSCROLL | wx.WANTS_CHARS)
self.session_page = wx.TreeCtrl(self.navigator, wx.ID_ANY, wx.DefaultPosition, wx.Size(-1, -1), wx.TR_DEFAULT_STYLE | wx.TR_EDIT_LABELS | wx.TR_EXTENDED | wx.TR_HAS_BUTTONS | wx.TR_HAS_VARIABLE_ROW_HEIGHT | wx.HSCROLL | wx.TAB_TRAVERSAL | wx.VSCROLL | wx.WANTS_CHARS)
self.navigator.AddPage(self.session_page, 'SESSION')
self.navigator.AddPage(self.function_page, 'FUNCTION')
self.navigator.AddPage(self.case_suite_page, 'CASE')
self.edit_area = AuiNotebook(self.m_file_editor, style = wx.aui.AUI_NB_DEFAULT_STYLE)
self.edit_area.Bind(wx.aui.EVT_AUINOTEBOOK_PAGE_CLOSE, self.on_close_tab_in_edit_area, self.edit_area)
if False:
new_page = FileEditor(self.edit_area, 'a', type= type)
self.edit_area.AddPage(new_page, 'test')
self.tabs_in_edit_area.append(('test'))
self.edit_area.Enable(True)
right_sizer = wx.BoxSizer(wx.VERTICAL)
#right_sizer =wx.GridSizer( 3, 1, 0, 0 )
left_sizer = wx.BoxSizer(wx.HORIZONTAL)
left_sizer.Add(self.m_left_navigator, 1, wx.EXPAND)
self.case_suite_page.Bind(wx.EVT_LEFT_DCLICK, self.m_case_treeOnLeftDClick)
#self.case_suite_page.Bind(wx.EVT_MOUSEWHEEL, self.case_tree_OnMouseWheel)
self.case_suite_page.Bind(wx.EVT_TREE_ITEM_EXPANDING, self.m_case_treeOnTreeItemExpanding)
self.session_page.Bind(wx.EVT_LEFT_DCLICK, self.on_LeftDClick_in_Session_tab)
self.function_page.Bind(wx.EVT_LEFT_DCLICK, self.on_LeftDClick_in_Function_tab)
self.function_page.Bind(wx.EVT_RIGHT_DOWN, self.on_right_down_in_function_tab)
self.case_suite_page.Bind(wx.EVT_RIGHT_DOWN, self.on_right_down_in_case_tab)
main_sizer = wx.BoxSizer(wx.HORIZONTAL)
#main_sizer = wx.GridSizer( 1, 2, 0, 0 )
nav_sizer = wx.BoxSizer()
nav_sizer.Add(self.navigator, 1, wx.EXPAND, 1)
self.m_left_navigator.SetSizer(nav_sizer)
#main_sizer = wx.BoxSizer(wx.HORIZONTAL)
#main_sizer.Add(left_sizer, 3, wx.EXPAND)
main_sizer.Add(left_sizer, 2, wx.EXPAND)
edit_sizer = wx.BoxSizer()
edit_sizer.Add(self.edit_area, 1, wx.EXPAND, 1)
self.m_file_editor.SetSizer(edit_sizer)
right_sizer.Add(self.m_file_editor, 6, wx.ALL|wx.EXPAND, 1)
right_sizer.Add(self.m_log, 3, wx.ALL|wx.EXPAND, 2)
right_sizer.Add(self.m_command_box, 0, wx.ALL|wx.EXPAND, 3)
main_sizer.Add(right_sizer, 8, wx.EXPAND)
self.SetSizer(main_sizer)
self.build_session_tab()
self.build_suite_tree()
self.build_function_tab()
ico = wx.Icon('./gui/dash.bmp', wx.BITMAP_TYPE_ICO)
self.SetIcon(ico)
th= threading.Thread(target=self.polling_running_cases)
th.start()
def on_close(self, event):
self.alive =False
time.sleep(0.01)
self.generate_code(file_name='{}/test_script.py'.format(self.suite_path))
self.mail_test_report("DASH TEST REPORT")
for index in range(0,self.edit_area.GetPageCount()): #len(self.tabs_in_edit_area)):
closing_page = self.edit_area.GetPage(index)
if isinstance(closing_page, (SessionTab)):
if closing_page:
name = closing_page.name
self.tabs_in_edit_area.pop(self.tabs_in_edit_area.index(name))
closing_page.on_close()
self.redir.close()
sys.stderr =self.redir.old_stderr
sys.stdout = self.redir.old_stdout
event.Skip()
def generate_report(self, filename):
report = '''Test Report
RESULT\tStart_Time\tEnd_Time\tPID\tDuration\tCase_Name\tLog\n'''
if len(self.dict_test_report):
with open(filename, 'a+') as f:
f.write(report)
for pi in self.dict_test_report:
for case_name in self.dict_test_report[pi]:
start_time, end_time, duration, return_code ,proc, log_path =self.dict_test_report[pi][case_name][:6]
if return_code is None:
result = 'RUNNING'
else:
result = return_code # 'FAIL' if return_code else 'PASS'
record = '\t'.join(['{}'.format(x) for x in [result,start_time,end_time,pi,duration,case_name,log_path ]])
report+=record+'\n'
f.write(record+'\n')
return report
def on_close_tab_in_edit_area(self, event):
#self.edit_area.GetPage(self.edit_area.GetSelection()).on_close()
closing_page = self.edit_area.GetPage(self.edit_area.GetSelection())
closing_page.on_close()
if isinstance(closing_page, (SessionTab)):
ses_name = closing_page.name
self.tabs_in_edit_area.pop(self.tabs_in_edit_area.index(ses_name))
if globals().has_key(ses_name):
#g = dict(globals())
#globals()[ses_name]=None
#del g[ses_name]
globals()[ses_name].close_session()
del globals()[ses_name]
def add_item_to_subfolder_in_tree(self,node):
subfolder_path_name = self.case_suite_page.GetPyData(node)['path_name']
items = get_folder_item(subfolder_path_name)
if items is None:
self.case_suite_page.SetItemText(node, self.m_case_tree.GetItemText(node) + ' Not Exists!!!')
self.case_suite_page.SetItemTextColour(node, wx.Colour(255, 0, 0))
return
for i in items:
path_name = '{}/{}'.format(subfolder_path_name,i)
base_name = os.path.basename(i)
item_info = wx.TreeItemData({'path_name':path_name})
new_item = self.case_suite_page.InsertItem(node, node, base_name)
self.case_suite_page.SetItemData(new_item, item_info)
if os.path.isdir(path_name):
self.case_suite_page.SetItemHasChildren(new_item)
#self.m_case_tree.ItemHasChildren()
#self.m_case_tree.InsertItem(new_item,new_item,'')
def build_suite_tree(self):
suite_path = self.suite_path #os.path.abspath(self.ini_setting.get('dash','test_suite_path'))
if not os.path.exists(suite_path):
suite_path= os.path.abspath(os.path.curdir)
base_name = os.path.basename(suite_path)
root =self.case_suite_page.AddRoot(base_name)
item_info = wx.TreeItemData({'path_name':suite_path})
self.case_suite_page.SetItemData(root, item_info)
self.add_item_to_subfolder_in_tree(root)
self.case_suite_page.Expand(root)
# def OnSelChanged(self, event):
# item = event.GetItem()
# self.display.SetLabel(self.tree.GetItemText(item))
#def case_tree_OnMouseWheel(self, event):
def m_case_treeOnLeftDClick(self, event):
ht_item =self.case_suite_page.GetSelection()
#ht_item = self.HitTest(event.GetPosition())
item_name = self.case_suite_page.GetItemText(ht_item)
item_data = self.case_suite_page.GetItemData(ht_item)
if self.case_suite_page.ItemHasChildren(ht_item):
if self.case_suite_page.IsExpanded(ht_item):
self.case_suite_page.Collapse(ht_item)
else:
self.case_suite_page.ExpandAllChildren(ht_item)
else:
if item_name.lower() in ['.csv', '.xlsx','.xls']:
type = 'grid'
file_name = item_data.Data['path_name']
else:
type = 'text'
file_name = item_data.Data['path_name']
new_page = FileEditor(self.edit_area, 'a', type= type,file_name=file_name)
self.edit_area.AddPage(new_page, item_name)
index = self.edit_area.GetPageIndex(new_page)
self.edit_area.SetSelection(index)
def m_case_treeOnTreeItemExpanding(self,event):
ht_item =self.case_suite_page.GetSelection()
try:
item_info = self.case_suite_page.GetPyData(ht_item)
if 0== self.case_suite_page.GetChildrenCount(ht_item):
if os.path.isdir(item_info['path_name']):
self.add_item_to_subfolder_in_tree(ht_item)
except Exception as e:
pass
def build_session_tab(self):
if self.session_page.RootItem:
self.session_page.DeleteAllItems()
session_path = os.path.abspath(self.ini_setting.get('dash','session_path'))
self.session_path= session_path
if not os.path.exists(session_path):
session_path= os.path.abspath(os.path.curdir)
base_name = os.path.basename(session_path)
sessions = {}
root =self.session_page.AddRoot(base_name)
item_info = wx.TreeItemData({'path_name':session_path})
self.session_page.SetItemData(root, item_info)
self.session_page.Expand(root)
item_list = get_folder_item(session_path)
session_files=[]
for item in item_list:
if os.path.isfile('{}/{}'.format(session_path,item)) and '{}'.format(item).lower().strip().endswith('.csv'):
session_files.append(item)
for csv_file in sorted(session_files):
try:
ses_in_bench = load_bench(os.path.abspath('{}/{}'.format(session_path, csv_file)))
for bench in ses_in_bench:
for ses in ses_in_bench[bench]:
if ses_in_bench[bench][ses].has_key('login_step') and ses_in_bench[bench][ses]['login_step'].strip() not in ['', None]:
ses_in_bench[bench][ses].update(
{'login_step': os.path.abspath('{}/{}'.format(session_path, ses_in_bench[bench][ses]['login_step'].strip()))}
)
sessions.update(ses_in_bench)
except Exception as e:
pass
root =self.session_page.GetRootItem()
for file_name in sorted(sessions.keys()):
item_name = os.path.basename(file_name)
item_info = wx.TreeItemData({'file_name':file_name})
new_bench = self.session_page.InsertItem(root, root, item_name)
self.case_suite_page.SetItemData(new_bench, item_info)
for ses in sorted(sessions[file_name]):
item_name = ses
item_info = wx.TreeItemData({'attribute':sessions[file_name][ses]})
new_item = self.session_page.InsertItem(new_bench, new_bench, item_name)
self.case_suite_page.SetItemData(new_item, item_info)
self.session_page.Expand(root)
first_child = self.session_page.GetFirstChild(root)
self.session_page.Expand(first_child[0])
def on_LeftDClick_in_Session_tab(self, event):
event.Skip()
ses_name = self.session_page.GetItemText(self.session_page.GetSelection())
self.session_page.GetItemText(self.session_page.GetSelection())
session_attribute = self.session_page.GetItemData(self.session_page.GetSelection())
if session_attribute.Data.has_key('attribute'):
info(session_attribute.Data['attribute'])
counter =1
original_ses_name = ses_name
while ses_name in self.tabs_in_edit_area:
ses_name= '{}_{}'.format(original_ses_name,counter)
counter+=1
if globals().has_key(ses_name):
if not globals().has_key('_{}'.format(ses_name)):
info("variable '{}' is existed in global, change the name to _{}".format(ses_name, ses_name))
ses_name='_{}'.format(ses_name)
self.session_page.SetItemText(self.session_page.GetSelection(), ses_name)
else:
error(("variable '{}' is existed in global, please change the name".format(ses_name)))
return
new_page = SessionTab(self.edit_area, ses_name, session_attribute.Data['attribute'], self.sequence_queue, log_path=self.log_path)
window_id = self.edit_area.AddPage(new_page, ses_name)
index = self.edit_area.GetPageIndex(new_page)
self.edit_area.SetSelection(index)
self.tabs_in_edit_area.append(ses_name)
self.sessions_alive.update({ses_name: new_page.name})
attribute = session_attribute.Data['attribute']
log_path='a_fake_log_path_for_auto_script'
attribute['log_path']=log_path
self.add_new_session_to_globals(new_page, '{}'.format(attribute))
#globals().update({ses_name: new_page.session})
def add_new_session_to_globals(self, new_page, args_str):
if globals().has_key(new_page.name):
if globals()[new_page.name]==None:
pass
else:
error('{} already '.format(new_page.name))
else:
globals().update({new_page.name: new_page})
self.add_cmd_to_sequence_queue('{} = dut.dut(name= "{}", **{})'.format(new_page.name,new_page.name,args_str.replace("'a_fake_log_path_for_auto_script'",'log_path').replace("'not_call_open': True,", "'not_call_open': False,") ), 'dut')
#session = dut(name, **attributes)
def on_command_enter(self, event):
info('called on_command_enter')
cmd = self.m_command_box.GetValue()
self.m_command_box.Clear()
if cmd.strip()=='':
return
module,class_name, function,args = parse_command_line(cmd)
#args[0]=self.sessions_alive['test_ssh'].session
if module !='' or class_name!='' or function!='':
instance_name, function_name, new_argvs, new_kwargs, str_code = call_function_in_module(module,class_name,function,args, globals())
call_function = None
if class_name!="":
call_function = getattr(instance_name, function_name)
#(*new_argvs,**new_kwargs)
else:
call_function = instance_name#(*new_argvs,**new_kwargs)
th =threading.Thread(target=call_function, args=new_argvs, kwargs=new_kwargs)
th.start()
self.add_cmd_to_history(cmd, module, str_code)
else:
error('"{}" is NOT a valid call in format:\n\tmodule.class.function call or \n\tmodule.function'.format(cmd))
def add_src_path_to_python_path(self, path):
paths = path.split(';')
old_path = sys.path
for p in paths:
if p in old_path:
info('path {} already in sys.path'.format(p))
else:
abspath = os.path.abspath(p)
if os.path.exists(abspath):
sys.path.insert(0,abspath)
else:
warn('path {} is not existed, ignored to add it into sys.path'.format(p))
def on_key_down(self, event):
#error(event.KeyCode)
keycode = event.KeyCode
if keycode ==wx.WXK_TAB:
self.m_command_box.AppendText('\t')
self.on_command_enter(event)
elif keycode == wx.PAPER_ENV_INVITE and wx.GetKeyState(wx.WXK_SHIFT):
self.m_command_box.AppendText('?')
self.on_command_enter(event)
else:
event.Skip()
def on_key_up(self, event):
keycode = event.KeyCode
increase =False
if keycode ==wx.WXK_UP:
pass
elif keycode ==wx.WXK_DOWN:
increase =True#
if keycode in [wx.WXK_UP, wx.WXK_DOWN]:
self.m_command_box.Clear()
self.history_cmd_index, new_command = get_next_in_ring_list(self.history_cmd_index,self.history_cmd,increase=increase)
self.m_command_box.AppendText(new_command)
if keycode in [wx.WXK_TAB]:
pass
else:
event.Skip()
def add_cmd_to_history(self, cmd, module_name, str_code):
if self.history_cmd==[]:
self.history_cmd.append(cmd)
elif self.history_cmd[-1]==cmd:
pass
else:
self.history_cmd.append(cmd)
self.history_cmd_index= len(self.history_cmd)
self.add_cmd_to_sequence_queue(str_code,module_name )
#self.sequence_queue.put([cmd, datetime.now()])
def build_function_tab(self):
src_path = os.path.abspath(self.src_path)
if not os.path.exists(src_path):
src_path= os.path.abspath(os.path.curdir)
base_name = os.path.basename(src_path)
root =self.function_page.AddRoot(base_name)
item_info = wx.TreeItemData({'name':src_path})
self.function_page.SetItemData(root, item_info)
modules = get_folder_item(src_path)
if modules is None:
self.function_page.SetItemText(root, self.function_page.GetItemText(root) + ' Not Exists!!!')
self.function_page.SetItemTextColour(root, wx.Colour(255, 0, 0))
return
for module_file in modules:
path_name = '{}'.format(os.path.abspath(self.src_path))
module_name = os.path.basename(module_file).split('.')[0]
new_module = self.function_page.InsertItem(root, root, module_name)
file, path_name, description = imp.find_module(module_name)
lmod = imp.load_module(module_name, file, path_name,description)
for attr in sorted(dir(lmod)):
if attr.startswith('__'):
continue
attr_obj = getattr(lmod, attr)
attr_type = type(attr_obj)
if attr_type == types.FunctionType :
new_item = self.function_page.InsertItem(new_module, new_module, '{}'.format( attr))
item_info = wx.TreeItemData({'name':'{}.{}'.format(module_name,attr)})
self.function_page.SetItemData(new_item, item_info)
elif attr_type== types.TypeType:
class_obj = getattr(lmod, attr)
new_class = self.function_page.InsertItem(new_module, new_module, attr)
item_info = wx.TreeItemData({'name':'{}.{}'.format(module_name,attr)})
self.function_page.SetItemData(new_item, item_info)
for attr_in_class in sorted(dir(class_obj)):
if attr_in_class.startswith('__'):
continue
attr_obj = getattr(class_obj,attr_in_class)
attr_type =type(attr_obj)
if attr_type == types.MethodType :
item_info = wx.TreeItemData({'name':'{}.{}.{}'.format(module_name,attr,attr_in_class)})
new_item = self.function_page.InsertItem(new_class, new_class, attr_in_class)
self.function_page.SetItemData(new_item, item_info)
self.function_page.Expand(root)
first_child = self.function_page.GetFirstChild(root)
self.function_page.Expand(first_child[0])
def on_LeftDClick_in_Function_tab(self,event):
event.Skip()
select_item = self.function_page.GetSelection()
fun_name = self.function_page.GetItemData(select_item)
text_in_tree = self.function_page.GetItemText(select_item)
if fun_name != None and fun_name.Data.has_key('name'):
cmd = fun_name.Data['name']
info('click item in Functions tab: {}'.format(fun_name.Data['name']))
wx.CallAfter(self.m_command_box.Clear)
wx.CallAfter(self.m_command_box.AppendText, cmd+' ')
wx.CallAfter(self.m_command_box.SetFocus)
wx.CallAfter(self.m_command_box.SetInsertionPointEnd)
wx.CallAfter(self.m_command_box.Refresh)
def on_right_down_in_function_tab(self, event):
menu = wx.Menu()
item = wx.MenuItem(menu, wx.NewId(), "Refresh")
#acc = wx.AcceleratorEntry()
#acc.Set(wx.ACCEL_NORMAL, ord('O'), self.popupID1)
#item.SetAccel(acc)
menu.AppendItem(item)
self.Bind(wx.EVT_MENU, self.on_refresh_function_page,item)
self.PopupMenu(menu,event.GetPosition())
def on_refresh_function_page(self, event):
self.function_page.DeleteAllItems()
self.build_function_tab()
info('Refresh Function tab done!')
def add_cmd_to_sequence_queue(self, cmd, module_name):
if self.import_modules.has_key(module_name):
pass
else:
self.import_modules.update({module_name:module_name})
self.sequence_queue.put([cmd,datetime.now() ])
def generate_code(self, file_name ):
str_code ="""#created by DasH
if __name__ == "__main__":
import sys, traceback
sys.path.insert(0,r'{}')
sys.path.insert(0,r'{}')
import lib.common
log_path= '../log/tmp'
log_path= lib.common.create_case_folder()
try:
""".format(self.src_path,self.lib_path )
sessions =[]
for module in self.import_modules:
str_code+=' import {mod}\n'.format(mod=module)#\n {mod}_instance = {mod}()
no_operation = True
while True:
try:
cmd, timestamp =self.sequence_queue.get(block=False)[:2]
str_code +=' {} #{}\n'.format(cmd, timestamp.isoformat( ' '))
if cmd.find('dut.dut(')!=-1:
sessions.append(cmd.split('=')[0].strip())
no_operation=False
#datetime.now().isoformat()
except Exception as e:
break
close_session=''
str_code+=''' except Exception as e:
print(traceback.format_exc())\n'''
for ses in sessions:
str_code+=''' {}.close_session()\n'''.format(ses)
str_code+=' sys.exit(-1)\n'#, sys.exit(-1)
for ses in sessions:
str_code+=''' {}.close_session()\n'''.format(ses)
info(str_code)
if not no_operation:
with open(file_name, 'a+') as f:
f.write(str_code)
def on_right_down_in_case_tab(self, event):
menu = wx.Menu()
item1 = wx.MenuItem(menu, wx.NewId(), "Run Test")
item2 = wx.MenuItem(menu, wx.NewId(), "Kill Test")
#acc = wx.AcceleratorEntry()
#acc.Set(wx.ACCEL_NORMAL, ord('O'), self.popupID1)
#item.SetAccel(acc)
menu.AppendItem(item1)
menu.AppendItem(item2)
self.Bind(wx.EVT_MENU, self.on_run_script,item1)
self.Bind(wx.EVT_MENU, self.on_kill_script,item2)
self.PopupMenu(menu,event.GetPosition())
def on_kill_script(self,event):
hit_item = self.case_suite_page.GetSelection()
item_name = self.case_suite_page.GetItemText(hit_item)
item_data = self.case_suite_page.GetItemData(hit_item).Data
if item_data.has_key('PROCESS'):
p = item_data['PROCESS']
name= item_data['FULL_NAME']
info('script:{}, returncode:{}'.format(name,p.returncode))
if p.returncode is None:
#if p.is_alive():
info('Terminate alive process {}:{}'.format(item_name, p.pid))
result ='KILL'
p.terminate()
else:
result ='FAIL' if p.returncode else 'PASS'
info('{}:{} completed with returncode {}'.format(item_name, p.pid, result))
self.update_case_status(p.pid,item_name,result)
def on_run_script(self,event):
hit_item = self.case_suite_page.GetSelection()
item_name = self.case_suite_page.GetItemText(hit_item)
import shlex
lex = shlex.shlex(item_name)
lex.quotes = '"'
lex.whitespace_split = True
script_args =list(lex)[1:]
item_data = self.case_suite_page.GetItemData(hit_item).Data
script_name = self.case_suite_page.GetItemData(hit_item).Data['path_name']
from lib.common import run_script
from multiprocessing import Process, Queue
import subprocess
self.on_kill_script(event)
#queue = Queue()
from lib.common import create_case_folder
old_sys_argv = sys.argv
sys.argv= [script_name]+script_args
case_log_path = create_case_folder()
sys.argv= old_sys_argv
try:
if os.path.exists('script_runner.exe'):
execute = 'script_runner.exe'
cmd = [execute,script_name ]+script_args + ['-l','{}'.format(case_log_path)]
#p=subprocess.Popen(cmd, creationflags = subprocess.CREATE_NEW_CONSOLE)
else:
cmd = [sys.executable, script_name ]+script_args+ ['-l','{}'.format(case_log_path)]
p=subprocess.Popen(cmd, creationflags = subprocess.CREATE_NEW_CONSOLE)
self.case_suite_page.GetItemData(hit_item).Data['PROCESS']=p
self.case_suite_page.GetItemData(hit_item).Data['FULL_NAME']= item_name
info('start process {} :{}'.format(item_name, p.pid))
self.add_newe_case_to_report(p.pid,item_name,p,case_log_path)
#p.join() # this blocks until the process terminates
time.sleep(1)
except Exception as e :
error(traceback.format_exc())
#p = Process(target=run_script, args=[script_name, script_and_args])
#p.start()
def check_case_status(self):
changed=False
for pid in self.dict_test_report:
for case_name in self.dict_test_report[pid]:
start_time, end_time, duration, return_code ,proc, log_path= self.dict_test_report[pid][case_name]
if return_code is None:
if proc.poll() is None:
pass
debug('RUNNING', start_time, end_time, duration, return_code ,proc, log_path)
else:
changed=True
return_code = 'FAIL' if proc.returncode else 'PASS'
self.update_case_status(pid,case_name,return_code)
if changed:
#test_report = self.generate_report(filename='{}/dash_report.txt'.format(self.log_path))
self.mail_test_report('DasH Test Report-updated')
return changed
def polling_running_cases(self):
while True:
time.sleep(10)
try:
if not self.alive:
break
except:
break
self.check_case_status()
def add_newe_case_to_report(self, pid, case_name, proc, log_path):
start_time=datetime.now()
duration = 0
end_time = None
return_code = None
if pid in self.dict_test_report:
self.dict_test_report[pid].update({case_name:[start_time,end_time, duration, return_code, proc,log_path]})
else:
self.dict_test_report[pid]={case_name:[start_time, end_time, duration,return_code, proc, log_path ]}
def update_case_status(self, pid,case_name, return_code=None):
now = datetime.now()
start_time, end_time, duration, tmp_return_code ,proc,log_path= self.dict_test_report[pid][case_name]
if return_code is None:
duration = (now-start_time).total_seconds()
self.dict_test_report[pid][case_name]=[start_time, end_time, duration, tmp_return_code, proc, log_path]
else:
duration = (now-start_time).total_seconds()
self.dict_test_report[pid][case_name]=[start_time, now, duration, return_code, proc, log_path]
def mail_test_report(self, subject="DASH TEST REPORT-updated"):
try:
from lib.common import send_mail_smtp_without_login
self.check_case_status()
test_report = self.generate_report(filename='{}/dash_report.txt'.format(self.log_path))
#TO, SUBJECT, TEXT, SERVER, FROM
send_mail_smtp_without_login(self.mail_to_list, subject,test_report,self.mail_server,self.mail_from)
except Exception as e:
error(traceback.format_exc())
def on_mail_test_report(self,event):
self.mail_test_report('DasH Test Report-updated')
#p.terminate()
#done: 2017-08-22, 2017-08-19 save main log window to a file
#todo: 2017-08-19 add timestamps to log message
#done: 2017-08-22, 2017-08-19 mail to someone
#todo: 2017-08-19 run a script in DasH
#todo: 2017-08-19 generate test report
#todo: 2017-08-19 publish all test cases in a web page
#todo: 2017-08-19 trigger a test remote via web page
#todo: 2017-08-19 re-run failed cases
#todo: 2017-08-19 build executable packege for DasH
#todo: 2017-08-19 a popup window to get email address/password/mail_server...
#todo: 2017-08-22 output in m_log window has a lot of empty line, need remove them
#todo: 2017-08-23 in common.call_function_in_module, should end all threads which are started in previous instance
#todo: 2017-08-23 add tips for all tree item in teh left
Reformat test report
send test report mail when killing a In-Progress(IP) case
#!/usr/bin/python
'''The MIT License (MIT)
Copyright (c) 2017 Yu Xiong Wei(try.dash.now@gmail.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
__author__ = 'sean yu (Yu, Xiongwei)'
__doc__ = '''
it's GUI of DasH aka Do as Human
created 2017-05-06 by Sean Yu
'''
from datetime import datetime
import wx.grid as gridlib
import traceback
import wx
from gui.MainFrame import MainFrame
import os
from lib.common import load_bench, caller_stack_info,info, get_next_in_ring_list,get_folder_item, info,debug, warn, error, parse_command_line, call_function_in_module
import re
import time
import threading
import ConfigParser
import sys
import inspect
import Queue
from SessionTab import SessionTab
import imp
import types
#from dut import dut
class RedirectText(object):
font_point_size = 10
old_stdout = None
old_stderr = None
write_lock = None
log_file = None
def __init__(self,aWxTextCtrl, log_path=None):
self.old_stderr , self.old_stdout=sys.stderr , sys.stdout
self.out=aWxTextCtrl
self.font_point_size = self.out.GetFont().PointSize
self.write_lock = threading.Lock()
if log_path:
name = '{}/dash.log'.format(log_path)
self.log_file = open(name, 'w+')
self.fileno = self.log_file.fileno
def write(self,string):
self.write_lock.acquire()
self.old_stdout.write(string)
#string = string.replace('\\033\[[0-9\;]+m', '')
#self.old_stderr.write(string)
if re.search('error|\s+err\s+|fail|wrong',string.lower()):
self.out.SetDefaultStyle(wx.TextAttr(wx.RED, wx.YELLOW, font =wx.Font(self.font_point_size+2, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.BOLD, faceName = 'Consolas')))#wx.CallAfter(s
else:
self.out.SetDefaultStyle(wx.TextAttr(wx.GREEN, wx.BLACK,font =wx.Font(self.font_point_size, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.NORMAL, faceName = 'Consolas')))#wx.CallAfter(
wx.CallAfter(self.out.AppendText, string)
if self.log_file:
self.log_file.write(string)
self.log_file.flush()
self.write_lock.release()
def close(self):
if self.log_file:
self.log_file.flush()
self.log_file.close()
class process_info(object):
process = None
pid=None
full_name=None
returncode = None
def __init__(self,name, process):
self.process= process
self.pid = process.pid
self.full_name =name
self.returncode = process.returncode
@property
def returncode(self):
return self.process.returncode
class FileEditor(wx.Panel):
editor =None
font_size=10
parent=None
type = None
sessions_node =None
function_node =None
case_suite_node =None
full_file_name = None
file_instance = None
def on_close(self):
if self.full_file_name:
data = self.editor.GetValue()
with open(self.full_file_name, 'w') as f:
f.write(data)
f.flush()
#done 2017-9-12: handle close tab in edit_area
def __init__(self, parent, title='pageOne', type ='grid', file_name = None):
wx.Panel.__init__(self, parent)
self.parent = parent
self.type = type
self.full_file_name = file_name
#self.editor = wx.TextCtrl(self, style = wx.TE_MULTILINE|wx.TE_RICH2|wx.EXPAND|wx.ALL, size=(-1,-1))
if type in ['text']:
self.editor = wx.TextCtrl( self, -1, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, wx.TE_AUTO_URL|wx.VSCROLL|wx.TE_RICH|wx.TE_MULTILINE&(~wx.TE_PROCESS_ENTER))
#wx.richtext.RichTextCtrl( self, -1, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0|wx.VSCROLL|wx.HSCROLL|wx.NO_BORDER|wx.WANTS_CHARS )
with open(self.full_file_name, 'r') as f:
for line in f.readlines():
self.editor.AppendText(line)
else:
self.editor= gridlib.Grid(self)
self.editor.CreateGrid(50, 5)
col = self.editor.GetNumberCols()
row = self.editor.GetNumberRows()
function_color ='black'
arg_color = 'blue'
for c in range(0, col):
if c < 1 :
self.editor.SetColLabelValue(c, 'Function Name')
else:
self.editor.SetColLabelValue(c, 'arg# {}'.format(c))
for r in range (0, row):
self.editor.SetCellTextColour(r,c,function_color if c <1 else arg_color)
for r in range (0, row):
self.editor.SetCellFont(r, 0, wx.Font(self.font_size,wx.SWISS, wx.NORMAL, wx.BOLD ))
self.editor.Bind( wx.EVT_MOUSEWHEEL, self.editor_OnMouseWheel )
sizer = wx.BoxSizer()
sizer.Add(self.editor, 1, wx.EXPAND)
self.SetSizer(sizer)
def editor_OnMouseWheel(self,event):
min_font_size = 5
interval_step = 2
if event.ControlDown():
pass
else:
return
if event.GetWheelRotation() < 0:
if self.font_size>min_font_size:
self.font_size-=interval_step
else:
self.font_size+=1
if self.type in ['text']:
f =self.editor.GetFont()
f.PointSize= self.font_size
self.editor.SetFont(f)
else:
col = self.editor.GetNumberCols()
row = self.editor.GetNumberRows()
for c in range(0, col):
for r in range (0, row):
f = self.editor.GetCellFont(r, c)
f.PointSize = self.font_size
self.editor.SetCellFont(r, c, f)
self.Refresh()
#wx.StaticText(self, -1, "THIS IS A PAGE OBJECT", (20,20))
#DONE: DasHFrame should handle CLOSE event when closing the app, call on_close_tab_in_edit_area for all opened sessions and files
class DasHFrame(MainFrame):#wx.Frame
ini_setting = None
#m_left_navigator =None
redir = None
edit_area=None
tabs_in_edit_area = None
src_path = None
sessions_alive=None
sequence_queue=None
history_cmd = []
history_cmd_index = -1
import_modules={'TC':'TC'}
lib_path ='./lib'
log_path = '../log'
session_path = './sessions'
suite_path = '../test_suite'
dict_test_report= None
alive =True
mail_server=None
mail_to_list=None
mail_from=None
def __init__(self,parent=None, ini_file = './gDasH.ini'):
#wx.Frame.__init__(self, None, title="DasH")
self.dict_test_report={}
self.tabs_in_edit_area=[]
self.sessions_alive={}
MainFrame.__init__(self, parent=parent)
self.sequence_queue= Queue.Queue()
#self.sequence_queue.put()
self.ini_setting = ConfigParser.ConfigParser()
self.ini_setting.read(ini_file)
self.src_path = os.path.abspath(self.ini_setting.get('dash','src_path'))
self.lib_path = os.path.abspath(self.ini_setting.get('dash','lib_path'))
self.log_path = os.path.abspath(self.ini_setting.get('dash','log_path'))
self.suite_path = os.path.abspath(self.ini_setting.get('dash', 'test_suite_path'))
self.mail_server= self.ini_setting.get('dash', 'mail_server')
self.mail_from =self.ini_setting.get('dash', 'mail_from')
self.mail_to_list=self.ini_setting.get('dash', 'mail_to_list')
from lib.common import create_case_folder, create_dir
sys.argv.append('-l')
sys.argv.append('{}'.format(self.log_path))
self.log_path = create_case_folder(self.log_path)
self.suite_path = create_dir(self.suite_path)
self.lib_path = create_dir(self.lib_path)
self.src_path = create_dir(self.src_path)
if not os.path.exists(self.log_path):
os.mkdir(self.log_path)
self.add_src_path_to_python_path(self.src_path)
self.redir = RedirectText(self.m_log, self.log_path)
sys.stdout = self.redir
sys.stderr = self.redir
self.m_log.SetBackgroundColour('Black')
self.m_log.SetDefaultStyle(wx.TextAttr(wx.GREEN, wx.BLACK, font =wx.Font(9, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.BOLD, faceName = 'Consolas')))
#self.m_editor.WriteText('welcome to dash world')
self.m_log.WriteText('Welcome to DasH!\n')
self.m_command_box.WriteText('functions.static_function_in_module test_ssh 2')
fileMenu = wx.Menu()
open_test_suite = fileMenu.Append(wx.NewId(), "Open TestSuite", "Open a Test Suite")
open_test_case = fileMenu.Append(wx.NewId(), "Open TestCase", "Open a Test Case")
mail_test_report = fileMenu.Append(wx.NewId(), "Mail Test Report", "Mail Test Report")
self.m_menubar_main.Append(fileMenu, "&Open")
self.Bind(wx.EVT_MENU,self.on_mail_test_report ,mail_test_report)
self.Bind(wx.EVT_CLOSE, self.on_close)
self.m_command_box.Bind(wx.EVT_TEXT_ENTER, self.on_command_enter)
self.m_command_box.Bind(wx.EVT_KEY_UP, self.on_key_up)
self.m_command_box.Bind(wx.EVT_KEY_DOWN, self.on_key_down)
from wx.aui import AuiNotebook
bookStyle = wx.aui.AUI_NB_DEFAULT_STYLE &(~wx.aui.AUI_NB_CLOSE_ON_ACTIVE_TAB)
self.navigator = AuiNotebook(self.m_left_navigator, style= bookStyle )
self.case_suite_page = wx.TreeCtrl(self.navigator, wx.ID_ANY, wx.DefaultPosition, wx.Size(-1, -1), wx.TR_DEFAULT_STYLE | wx.TR_EDIT_LABELS | wx.TR_EXTENDED | wx.TR_HAS_BUTTONS | wx.TR_HAS_VARIABLE_ROW_HEIGHT | wx.HSCROLL | wx.TAB_TRAVERSAL | wx.VSCROLL | wx.WANTS_CHARS)
self.function_page = wx.TreeCtrl(self.navigator, wx.ID_ANY, wx.DefaultPosition, wx.Size(-1, -1), wx.TR_DEFAULT_STYLE | wx.TR_EDIT_LABELS | wx.TR_EXTENDED | wx.TR_HAS_BUTTONS | wx.TR_HAS_VARIABLE_ROW_HEIGHT | wx.HSCROLL | wx.TAB_TRAVERSAL | wx.VSCROLL | wx.WANTS_CHARS)
self.session_page = wx.TreeCtrl(self.navigator, wx.ID_ANY, wx.DefaultPosition, wx.Size(-1, -1), wx.TR_DEFAULT_STYLE | wx.TR_EDIT_LABELS | wx.TR_EXTENDED | wx.TR_HAS_BUTTONS | wx.TR_HAS_VARIABLE_ROW_HEIGHT | wx.HSCROLL | wx.TAB_TRAVERSAL | wx.VSCROLL | wx.WANTS_CHARS)
self.navigator.AddPage(self.session_page, 'SESSION')
self.navigator.AddPage(self.function_page, 'FUNCTION')
self.navigator.AddPage(self.case_suite_page, 'CASE')
self.edit_area = AuiNotebook(self.m_file_editor, style = wx.aui.AUI_NB_DEFAULT_STYLE)
self.edit_area.Bind(wx.aui.EVT_AUINOTEBOOK_PAGE_CLOSE, self.on_close_tab_in_edit_area, self.edit_area)
if False:
new_page = FileEditor(self.edit_area, 'a', type= type)
self.edit_area.AddPage(new_page, 'test')
self.tabs_in_edit_area.append(('test'))
self.edit_area.Enable(True)
right_sizer = wx.BoxSizer(wx.VERTICAL)
#right_sizer =wx.GridSizer( 3, 1, 0, 0 )
left_sizer = wx.BoxSizer(wx.HORIZONTAL)
left_sizer.Add(self.m_left_navigator, 1, wx.EXPAND)
self.case_suite_page.Bind(wx.EVT_LEFT_DCLICK, self.m_case_treeOnLeftDClick)
#self.case_suite_page.Bind(wx.EVT_MOUSEWHEEL, self.case_tree_OnMouseWheel)
self.case_suite_page.Bind(wx.EVT_TREE_ITEM_EXPANDING, self.m_case_treeOnTreeItemExpanding)
self.session_page.Bind(wx.EVT_LEFT_DCLICK, self.on_LeftDClick_in_Session_tab)
self.function_page.Bind(wx.EVT_LEFT_DCLICK, self.on_LeftDClick_in_Function_tab)
self.function_page.Bind(wx.EVT_RIGHT_DOWN, self.on_right_down_in_function_tab)
self.case_suite_page.Bind(wx.EVT_RIGHT_DOWN, self.on_right_down_in_case_tab)
main_sizer = wx.BoxSizer(wx.HORIZONTAL)
#main_sizer = wx.GridSizer( 1, 2, 0, 0 )
nav_sizer = wx.BoxSizer()
nav_sizer.Add(self.navigator, 1, wx.EXPAND, 1)
self.m_left_navigator.SetSizer(nav_sizer)
#main_sizer = wx.BoxSizer(wx.HORIZONTAL)
#main_sizer.Add(left_sizer, 3, wx.EXPAND)
main_sizer.Add(left_sizer, 2, wx.EXPAND)
edit_sizer = wx.BoxSizer()
edit_sizer.Add(self.edit_area, 1, wx.EXPAND, 1)
self.m_file_editor.SetSizer(edit_sizer)
right_sizer.Add(self.m_file_editor, 6, wx.ALL|wx.EXPAND, 1)
right_sizer.Add(self.m_log, 3, wx.ALL|wx.EXPAND, 2)
right_sizer.Add(self.m_command_box, 0, wx.ALL|wx.EXPAND, 3)
main_sizer.Add(right_sizer, 8, wx.EXPAND)
self.SetSizer(main_sizer)
self.build_session_tab()
self.build_suite_tree()
self.build_function_tab()
ico = wx.Icon('./gui/dash.bmp', wx.BITMAP_TYPE_ICO)
self.SetIcon(ico)
th= threading.Thread(target=self.polling_running_cases)
th.start()
def on_close(self, event):
self.alive =False
time.sleep(0.01)
self.generate_code(file_name='{}/test_script.py'.format(self.suite_path))
self.mail_test_report("DASH TEST REPORT")
for index in range(0,self.edit_area.GetPageCount()): #len(self.tabs_in_edit_area)):
closing_page = self.edit_area.GetPage(index)
if isinstance(closing_page, (SessionTab)):
if closing_page:
name = closing_page.name
self.tabs_in_edit_area.pop(self.tabs_in_edit_area.index(name))
closing_page.on_close()
self.redir.close()
sys.stderr =self.redir.old_stderr
sys.stdout = self.redir.old_stdout
event.Skip()
def generate_report(self, filename):
report = '''Test Report
RESULT,\tStart_Time,\tEnd_Time,\tPID,\tDuration,\tCase_Name,\tLog\n'''
if len(self.dict_test_report):
with open(filename, 'a+') as f:
f.write(report)
for pi in self.dict_test_report:
for case_name in self.dict_test_report[pi]:
start_time, end_time, duration, return_code ,proc, log_path =self.dict_test_report[pi][case_name][:6]
if return_code is None:
result = 'IP'
else:
result = return_code # 'FAIL' if return_code else 'PASS'
record = '\t'.join(['{},'.format(x) for x in [result,start_time,end_time,pi,duration,case_name,log_path ]])
report+=record+'\n'
f.write(record+'\n')
return report
def on_close_tab_in_edit_area(self, event):
#self.edit_area.GetPage(self.edit_area.GetSelection()).on_close()
closing_page = self.edit_area.GetPage(self.edit_area.GetSelection())
closing_page.on_close()
if isinstance(closing_page, (SessionTab)):
ses_name = closing_page.name
self.tabs_in_edit_area.pop(self.tabs_in_edit_area.index(ses_name))
if globals().has_key(ses_name):
#g = dict(globals())
#globals()[ses_name]=None
#del g[ses_name]
globals()[ses_name].close_session()
del globals()[ses_name]
def add_item_to_subfolder_in_tree(self,node):
subfolder_path_name = self.case_suite_page.GetPyData(node)['path_name']
items = get_folder_item(subfolder_path_name)
if items is None:
self.case_suite_page.SetItemText(node, self.m_case_tree.GetItemText(node) + ' Not Exists!!!')
self.case_suite_page.SetItemTextColour(node, wx.Colour(255, 0, 0))
return
for i in items:
path_name = '{}/{}'.format(subfolder_path_name,i)
base_name = os.path.basename(i)
item_info = wx.TreeItemData({'path_name':path_name})
new_item = self.case_suite_page.InsertItem(node, node, base_name)
self.case_suite_page.SetItemData(new_item, item_info)
if os.path.isdir(path_name):
self.case_suite_page.SetItemHasChildren(new_item)
#self.m_case_tree.ItemHasChildren()
#self.m_case_tree.InsertItem(new_item,new_item,'')
def build_suite_tree(self):
suite_path = self.suite_path #os.path.abspath(self.ini_setting.get('dash','test_suite_path'))
if not os.path.exists(suite_path):
suite_path= os.path.abspath(os.path.curdir)
base_name = os.path.basename(suite_path)
root =self.case_suite_page.AddRoot(base_name)
item_info = wx.TreeItemData({'path_name':suite_path})
self.case_suite_page.SetItemData(root, item_info)
self.add_item_to_subfolder_in_tree(root)
self.case_suite_page.Expand(root)
# def OnSelChanged(self, event):
# item = event.GetItem()
# self.display.SetLabel(self.tree.GetItemText(item))
#def case_tree_OnMouseWheel(self, event):
def m_case_treeOnLeftDClick(self, event):
ht_item =self.case_suite_page.GetSelection()
#ht_item = self.HitTest(event.GetPosition())
item_name = self.case_suite_page.GetItemText(ht_item)
item_data = self.case_suite_page.GetItemData(ht_item)
if self.case_suite_page.ItemHasChildren(ht_item):
if self.case_suite_page.IsExpanded(ht_item):
self.case_suite_page.Collapse(ht_item)
else:
self.case_suite_page.ExpandAllChildren(ht_item)
else:
if item_name.lower() in ['.csv', '.xlsx','.xls']:
type = 'grid'
file_name = item_data.Data['path_name']
else:
type = 'text'
file_name = item_data.Data['path_name']
new_page = FileEditor(self.edit_area, 'a', type= type,file_name=file_name)
self.edit_area.AddPage(new_page, item_name)
index = self.edit_area.GetPageIndex(new_page)
self.edit_area.SetSelection(index)
def m_case_treeOnTreeItemExpanding(self,event):
ht_item =self.case_suite_page.GetSelection()
try:
item_info = self.case_suite_page.GetPyData(ht_item)
if 0== self.case_suite_page.GetChildrenCount(ht_item):
if os.path.isdir(item_info['path_name']):
self.add_item_to_subfolder_in_tree(ht_item)
except Exception as e:
pass
def build_session_tab(self):
if self.session_page.RootItem:
self.session_page.DeleteAllItems()
session_path = os.path.abspath(self.ini_setting.get('dash','session_path'))
self.session_path= session_path
if not os.path.exists(session_path):
session_path= os.path.abspath(os.path.curdir)
base_name = os.path.basename(session_path)
sessions = {}
root =self.session_page.AddRoot(base_name)
item_info = wx.TreeItemData({'path_name':session_path})
self.session_page.SetItemData(root, item_info)
self.session_page.Expand(root)
item_list = get_folder_item(session_path)
session_files=[]
for item in item_list:
if os.path.isfile('{}/{}'.format(session_path,item)) and '{}'.format(item).lower().strip().endswith('.csv'):
session_files.append(item)
for csv_file in sorted(session_files):
try:
ses_in_bench = load_bench(os.path.abspath('{}/{}'.format(session_path, csv_file)))
for bench in ses_in_bench:
for ses in ses_in_bench[bench]:
if ses_in_bench[bench][ses].has_key('login_step') and ses_in_bench[bench][ses]['login_step'].strip() not in ['', None]:
ses_in_bench[bench][ses].update(
{'login_step': os.path.abspath('{}/{}'.format(session_path, ses_in_bench[bench][ses]['login_step'].strip()))}
)
sessions.update(ses_in_bench)
except Exception as e:
pass
root =self.session_page.GetRootItem()
for file_name in sorted(sessions.keys()):
item_name = os.path.basename(file_name)
item_info = wx.TreeItemData({'file_name':file_name})
new_bench = self.session_page.InsertItem(root, root, item_name)
self.case_suite_page.SetItemData(new_bench, item_info)
for ses in sorted(sessions[file_name]):
item_name = ses
item_info = wx.TreeItemData({'attribute':sessions[file_name][ses]})
new_item = self.session_page.InsertItem(new_bench, new_bench, item_name)
self.case_suite_page.SetItemData(new_item, item_info)
self.session_page.Expand(root)
first_child = self.session_page.GetFirstChild(root)
self.session_page.Expand(first_child[0])
def on_LeftDClick_in_Session_tab(self, event):
event.Skip()
ses_name = self.session_page.GetItemText(self.session_page.GetSelection())
self.session_page.GetItemText(self.session_page.GetSelection())
session_attribute = self.session_page.GetItemData(self.session_page.GetSelection())
if session_attribute.Data.has_key('attribute'):
info(session_attribute.Data['attribute'])
counter =1
original_ses_name = ses_name
while ses_name in self.tabs_in_edit_area:
ses_name= '{}_{}'.format(original_ses_name,counter)
counter+=1
if globals().has_key(ses_name):
if not globals().has_key('_{}'.format(ses_name)):
info("variable '{}' is existed in global, change the name to _{}".format(ses_name, ses_name))
ses_name='_{}'.format(ses_name)
self.session_page.SetItemText(self.session_page.GetSelection(), ses_name)
else:
error(("variable '{}' is existed in global, please change the name".format(ses_name)))
return
new_page = SessionTab(self.edit_area, ses_name, session_attribute.Data['attribute'], self.sequence_queue, log_path=self.log_path)
window_id = self.edit_area.AddPage(new_page, ses_name)
index = self.edit_area.GetPageIndex(new_page)
self.edit_area.SetSelection(index)
self.tabs_in_edit_area.append(ses_name)
self.sessions_alive.update({ses_name: new_page.name})
attribute = session_attribute.Data['attribute']
log_path='a_fake_log_path_for_auto_script'
attribute['log_path']=log_path
self.add_new_session_to_globals(new_page, '{}'.format(attribute))
#globals().update({ses_name: new_page.session})
def add_new_session_to_globals(self, new_page, args_str):
if globals().has_key(new_page.name):
if globals()[new_page.name]==None:
pass
else:
error('{} already '.format(new_page.name))
else:
globals().update({new_page.name: new_page})
self.add_cmd_to_sequence_queue('{} = dut.dut(name= "{}", **{})'.format(new_page.name,new_page.name,args_str.replace("'a_fake_log_path_for_auto_script'",'log_path').replace("'not_call_open': True,", "'not_call_open': False,") ), 'dut')
#session = dut(name, **attributes)
def on_command_enter(self, event):
info('called on_command_enter')
cmd = self.m_command_box.GetValue()
self.m_command_box.Clear()
if cmd.strip()=='':
return
module,class_name, function,args = parse_command_line(cmd)
#args[0]=self.sessions_alive['test_ssh'].session
if module !='' or class_name!='' or function!='':
instance_name, function_name, new_argvs, new_kwargs, str_code = call_function_in_module(module,class_name,function,args, globals())
call_function = None
if class_name!="":
call_function = getattr(instance_name, function_name)
#(*new_argvs,**new_kwargs)
else:
call_function = instance_name#(*new_argvs,**new_kwargs)
th =threading.Thread(target=call_function, args=new_argvs, kwargs=new_kwargs)
th.start()
self.add_cmd_to_history(cmd, module, str_code)
else:
error('"{}" is NOT a valid call in format:\n\tmodule.class.function call or \n\tmodule.function'.format(cmd))
def add_src_path_to_python_path(self, path):
paths = path.split(';')
old_path = sys.path
for p in paths:
if p in old_path:
info('path {} already in sys.path'.format(p))
else:
abspath = os.path.abspath(p)
if os.path.exists(abspath):
sys.path.insert(0,abspath)
else:
warn('path {} is not existed, ignored to add it into sys.path'.format(p))
def on_key_down(self, event):
#error(event.KeyCode)
keycode = event.KeyCode
if keycode ==wx.WXK_TAB:
self.m_command_box.AppendText('\t')
self.on_command_enter(event)
elif keycode == wx.PAPER_ENV_INVITE and wx.GetKeyState(wx.WXK_SHIFT):
self.m_command_box.AppendText('?')
self.on_command_enter(event)
else:
event.Skip()
def on_key_up(self, event):
keycode = event.KeyCode
increase =False
if keycode ==wx.WXK_UP:
pass
elif keycode ==wx.WXK_DOWN:
increase =True#
if keycode in [wx.WXK_UP, wx.WXK_DOWN]:
self.m_command_box.Clear()
self.history_cmd_index, new_command = get_next_in_ring_list(self.history_cmd_index,self.history_cmd,increase=increase)
self.m_command_box.AppendText(new_command)
if keycode in [wx.WXK_TAB]:
pass
else:
event.Skip()
def add_cmd_to_history(self, cmd, module_name, str_code):
if self.history_cmd==[]:
self.history_cmd.append(cmd)
elif self.history_cmd[-1]==cmd:
pass
else:
self.history_cmd.append(cmd)
self.history_cmd_index= len(self.history_cmd)
self.add_cmd_to_sequence_queue(str_code,module_name )
#self.sequence_queue.put([cmd, datetime.now()])
def build_function_tab(self):
src_path = os.path.abspath(self.src_path)
if not os.path.exists(src_path):
src_path= os.path.abspath(os.path.curdir)
base_name = os.path.basename(src_path)
root =self.function_page.AddRoot(base_name)
item_info = wx.TreeItemData({'name':src_path})
self.function_page.SetItemData(root, item_info)
modules = get_folder_item(src_path)
if modules is None:
self.function_page.SetItemText(root, self.function_page.GetItemText(root) + ' Not Exists!!!')
self.function_page.SetItemTextColour(root, wx.Colour(255, 0, 0))
return
for module_file in modules:
path_name = '{}'.format(os.path.abspath(self.src_path))
module_name = os.path.basename(module_file).split('.')[0]
new_module = self.function_page.InsertItem(root, root, module_name)
file, path_name, description = imp.find_module(module_name)
lmod = imp.load_module(module_name, file, path_name,description)
for attr in sorted(dir(lmod)):
if attr.startswith('__'):
continue
attr_obj = getattr(lmod, attr)
attr_type = type(attr_obj)
if attr_type == types.FunctionType :
new_item = self.function_page.InsertItem(new_module, new_module, '{}'.format( attr))
item_info = wx.TreeItemData({'name':'{}.{}'.format(module_name,attr)})
self.function_page.SetItemData(new_item, item_info)
elif attr_type== types.TypeType:
class_obj = getattr(lmod, attr)
new_class = self.function_page.InsertItem(new_module, new_module, attr)
item_info = wx.TreeItemData({'name':'{}.{}'.format(module_name,attr)})
self.function_page.SetItemData(new_item, item_info)
for attr_in_class in sorted(dir(class_obj)):
if attr_in_class.startswith('__'):
continue
attr_obj = getattr(class_obj,attr_in_class)
attr_type =type(attr_obj)
if attr_type == types.MethodType :
item_info = wx.TreeItemData({'name':'{}.{}.{}'.format(module_name,attr,attr_in_class)})
new_item = self.function_page.InsertItem(new_class, new_class, attr_in_class)
self.function_page.SetItemData(new_item, item_info)
self.function_page.Expand(root)
first_child = self.function_page.GetFirstChild(root)
self.function_page.Expand(first_child[0])
def on_LeftDClick_in_Function_tab(self,event):
event.Skip()
select_item = self.function_page.GetSelection()
fun_name = self.function_page.GetItemData(select_item)
text_in_tree = self.function_page.GetItemText(select_item)
if fun_name != None and fun_name.Data.has_key('name'):
cmd = fun_name.Data['name']
info('click item in Functions tab: {}'.format(fun_name.Data['name']))
wx.CallAfter(self.m_command_box.Clear)
wx.CallAfter(self.m_command_box.AppendText, cmd+' ')
wx.CallAfter(self.m_command_box.SetFocus)
wx.CallAfter(self.m_command_box.SetInsertionPointEnd)
wx.CallAfter(self.m_command_box.Refresh)
def on_right_down_in_function_tab(self, event):
menu = wx.Menu()
item = wx.MenuItem(menu, wx.NewId(), "Refresh")
#acc = wx.AcceleratorEntry()
#acc.Set(wx.ACCEL_NORMAL, ord('O'), self.popupID1)
#item.SetAccel(acc)
menu.AppendItem(item)
self.Bind(wx.EVT_MENU, self.on_refresh_function_page,item)
self.PopupMenu(menu,event.GetPosition())
def on_refresh_function_page(self, event):
self.function_page.DeleteAllItems()
self.build_function_tab()
info('Refresh Function tab done!')
def add_cmd_to_sequence_queue(self, cmd, module_name):
if self.import_modules.has_key(module_name):
pass
else:
self.import_modules.update({module_name:module_name})
self.sequence_queue.put([cmd,datetime.now() ])
def generate_code(self, file_name ):
str_code ="""#created by DasH
if __name__ == "__main__":
import sys, traceback
sys.path.insert(0,r'{}')
sys.path.insert(0,r'{}')
import lib.common
log_path= '../log/tmp'
log_path= lib.common.create_case_folder()
try:
""".format(self.src_path,self.lib_path )
sessions =[]
for module in self.import_modules:
str_code+=' import {mod}\n'.format(mod=module)#\n {mod}_instance = {mod}()
no_operation = True
while True:
try:
cmd, timestamp =self.sequence_queue.get(block=False)[:2]
str_code +=' {} #{}\n'.format(cmd, timestamp.isoformat( ' '))
if cmd.find('dut.dut(')!=-1:
sessions.append(cmd.split('=')[0].strip())
no_operation=False
#datetime.now().isoformat()
except Exception as e:
break
close_session=''
str_code+=''' except Exception as e:
print(traceback.format_exc())\n'''
for ses in sessions:
str_code+=''' {}.close_session()\n'''.format(ses)
str_code+=' sys.exit(-1)\n'#, sys.exit(-1)
for ses in sessions:
str_code+=''' {}.close_session()\n'''.format(ses)
info(str_code)
if not no_operation:
with open(file_name, 'a+') as f:
f.write(str_code)
def on_right_down_in_case_tab(self, event):
menu = wx.Menu()
item1 = wx.MenuItem(menu, wx.NewId(), "Run Test")
item2 = wx.MenuItem(menu, wx.NewId(), "Kill Test")
#acc = wx.AcceleratorEntry()
#acc.Set(wx.ACCEL_NORMAL, ord('O'), self.popupID1)
#item.SetAccel(acc)
menu.AppendItem(item1)
menu.AppendItem(item2)
self.Bind(wx.EVT_MENU, self.on_run_script,item1)
self.Bind(wx.EVT_MENU, self.on_kill_script,item2)
self.PopupMenu(menu,event.GetPosition())
def on_kill_script(self,event):
hit_item = self.case_suite_page.GetSelection()
item_name = self.case_suite_page.GetItemText(hit_item)
item_data = self.case_suite_page.GetItemData(hit_item).Data
if item_data.has_key('PROCESS'):
p = item_data['PROCESS']
name= item_data['FULL_NAME']
info('script:{}, returncode:{}'.format(name,p.returncode))
if p.returncode is None:
#if p.is_alive():
info('Terminate alive process {}:{}'.format(item_name, p.pid))
result ='KILL'
self.mail_test_report("DASH TEST REPORT-updating")
p.terminate()
else:
result ='FAIL' if p.returncode else 'PASS'
info('{}:{} completed with returncode {}'.format(item_name, p.pid, result))
self.update_case_status(p.pid,item_name,result)
def on_run_script(self,event):
hit_item = self.case_suite_page.GetSelection()
item_name = self.case_suite_page.GetItemText(hit_item)
import shlex
lex = shlex.shlex(item_name)
lex.quotes = '"'
lex.whitespace_split = True
script_args =list(lex)[1:]
item_data = self.case_suite_page.GetItemData(hit_item).Data
script_name = self.case_suite_page.GetItemData(hit_item).Data['path_name']
from lib.common import run_script
from multiprocessing import Process, Queue
import subprocess
self.on_kill_script(event)
#queue = Queue()
from lib.common import create_case_folder
old_sys_argv = sys.argv
sys.argv= [script_name]+script_args
case_log_path = create_case_folder()
sys.argv= old_sys_argv
try:
if os.path.exists('script_runner.exe'):
execute = 'script_runner.exe'
cmd = [execute,script_name ]+script_args + ['-l','{}'.format(case_log_path)]
#p=subprocess.Popen(cmd, creationflags = subprocess.CREATE_NEW_CONSOLE)
else:
cmd = [sys.executable, script_name ]+script_args+ ['-l','{}'.format(case_log_path)]
p=subprocess.Popen(cmd, creationflags = subprocess.CREATE_NEW_CONSOLE)
self.case_suite_page.GetItemData(hit_item).Data['PROCESS']=p
self.case_suite_page.GetItemData(hit_item).Data['FULL_NAME']= item_name
info('start process {} :{}'.format(item_name, p.pid))
self.add_newe_case_to_report(p.pid,item_name,p,case_log_path)
#p.join() # this blocks until the process terminates
time.sleep(1)
except Exception as e :
error(traceback.format_exc())
#p = Process(target=run_script, args=[script_name, script_and_args])
#p.start()
def check_case_status(self):
changed=False
for pid in self.dict_test_report:
for case_name in self.dict_test_report[pid]:
start_time, end_time, duration, return_code ,proc, log_path= self.dict_test_report[pid][case_name]
if return_code is None:
if proc.poll() is None:
pass
debug('RUNNING', start_time, end_time, duration, return_code ,proc, log_path)
else:
changed=True
return_code = 'FAIL' if proc.returncode else 'PASS'
self.update_case_status(pid,case_name,return_code)
if changed:
#test_report = self.generate_report(filename='{}/dash_report.txt'.format(self.log_path))
self.mail_test_report('DasH Test Report-updating')
return changed
def polling_running_cases(self):
while True:
time.sleep(10)
try:
if not self.alive:
break
except:
break
self.check_case_status()
def add_newe_case_to_report(self, pid, case_name, proc, log_path):
start_time=datetime.now()
duration = 0
end_time = None
return_code = None
if pid in self.dict_test_report:
self.dict_test_report[pid].update({case_name:[start_time,end_time, duration, return_code, proc,log_path]})
else:
self.dict_test_report[pid]={case_name:[start_time, end_time, duration,return_code, proc, log_path ]}
def update_case_status(self, pid,case_name, return_code=None):
now = datetime.now()
start_time, end_time, duration, tmp_return_code ,proc,log_path= self.dict_test_report[pid][case_name]
if return_code is None:
duration = (now-start_time).total_seconds()
self.dict_test_report[pid][case_name]=[start_time, end_time, duration, tmp_return_code, proc, log_path]
else:
duration = (now-start_time).total_seconds()
self.dict_test_report[pid][case_name]=[start_time, now, duration, return_code, proc, log_path]
def mail_test_report(self, subject="DASH TEST REPORT-updating"):
try:
from lib.common import send_mail_smtp_without_login
self.check_case_status()
test_report = self.generate_report(filename='{}/dash_report.txt'.format(self.log_path))
#TO, SUBJECT, TEXT, SERVER, FROM
send_mail_smtp_without_login(self.mail_to_list, subject,test_report,self.mail_server,self.mail_from)
except Exception as e:
error(traceback.format_exc())
def on_mail_test_report(self,event):
self.mail_test_report('DasH Test Report-updating')
#p.terminate()
#done: 2017-08-22, 2017-08-19 save main log window to a file
#todo: 2017-08-19 add timestamps to log message
#done: 2017-08-22, 2017-08-19 mail to someone
#todo: 2017-08-19 run a script in DasH
#todo: 2017-08-19 generate test report
#todo: 2017-08-19 publish all test cases in a web page
#todo: 2017-08-19 trigger a test remote via web page
#todo: 2017-08-19 re-run failed cases
#todo: 2017-08-19 build executable packege for DasH
#todo: 2017-08-19 a popup window to get email address/password/mail_server...
#todo: 2017-08-22 output in m_log window has a lot of empty line, need remove them
#todo: 2017-08-23 in common.call_function_in_module, should end all threads which are started in previous instance
#todo: 2017-08-23 add tips for all tree item in teh left |
import time
import asyncio
import aiohttp
import requests
import mimetypes
from requests.adapters import HTTPAdapter
from python_rucaptcha.config import app_key
from python_rucaptcha.errors import RuCaptchaError
from python_rucaptcha.result_handler import get_sync_result, get_async_result
from python_rucaptcha.decorators import api_key_check, service_check
class RotateCaptcha:
def __init__(
self,
rucaptcha_key: str,
service_type: str = "2captcha",
sleep_time: int = 5,
pingback: str = None,
**kwargs,
):
"""
Инициализация нужных переменных, создание папки для изображений и кэша
После завершения работы - удалются временные фалйы и папки
:param rucaptcha_key: АПИ ключ капчи из кабинета пользователя
:param service_type: Тип сервиса через который будет работать билиотека. Доступны `rucaptcha` или `2captcha`
:param sleep_time: Вермя ожидания решения капчи
:param pingback: Параметр для ссылки с на которой будет ожидание callback ответа от RuCaptcha
:param kwargs: Для передачи дополнительных параметров
"""
# время ожидания решения капчи
self.sleep_time = sleep_time
# тип URL на с которым будет работать библиотека
self.service_type = service_type
# пайлоад POST запроса на отправку капчи на сервер
self.post_payload = {
"key": rucaptcha_key,
"method": "rotatecaptcha",
"json": 1,
"soft_id": app_key,
}
# если был передан параметр для callback`a - добавляем его
if pingback:
self.post_payload.update({"pingback": pingback})
# Если переданы ещё параметры - вносим их в post_payload
if kwargs:
for key in kwargs:
self.post_payload.update({key: kwargs[key]})
# пайлоад GET запроса на получение результата решения капчи
self.get_payload = {"key": rucaptcha_key, "action": "get", "json": 1}
# создаём сессию
self.session = requests.Session()
# выставляем кол-во попыток подключения к серверу при ошибке
self.session.mount("http://", HTTPAdapter(max_retries=5))
self.session.mount("https://", HTTPAdapter(max_retries=5))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type:
return False
return True
@api_key_check
@service_check
def captcha_handler(self, captcha_link: str, **kwargs):
"""
Метод получает от вас ссылку на изображение, скачивает его, отправляет изображение на сервер
RuCaptcha, дожидается решения капчи и вовзращает вам результат
:param captcha_link: Ссылка на изображение или путь до файла
:param kwargs: Для передачи дополнительных параметров
:return: Ответ на капчу в виде JSON строки с полями:
captchaSolve - решение капчи,
taskId - находится Id задачи на решение капчи, можно использовать при жалобах и прочем,
error - False - если всё хорошо, True - если есть ошибка,
errorBody - полная информация об ошибке:
{
text - Развернётое пояснение ошибки
id - уникальный номер ошибка в ЭТОЙ бибилотеке
}
"""
# result, url_request, url_response - задаются в декораторе `service_check`, после проверки переданного названия
# Если переданы ещё параметры - вносим их в get_payload
if kwargs:
for key in kwargs:
self.get_payload.update({key: kwargs[key]})
# Скачиваем изображение
content = self.session.get(captcha_link).content if "http" in captcha_link else open(captcha_link, "rb")
# Отправляем изображение файлом
files = {"file_1": ("file_1", content, mimetypes.guess_type(captcha_link))}
# Отправляем на рукапча изображение капчи и другие парметры,
# в результате получаем JSON ответ с номером решаемой капчи и получая ответ - извлекаем номер
captcha_id = self.session.post(
self.url_request, data=self.post_payload, files=files
).json()
# если вернулся ответ с ошибкой то записываем её и возвращаем результат
if captcha_id["status"] == 0:
self.result.update(
{"error": True, "errorBody": RuCaptchaError().errors(captcha_id["request"])}
)
return self.result
# иначе берём ключ отправленной на решение капчи и ждём решения
else:
captcha_id = captcha_id["request"]
# вписываем в taskId ключ отправленной на решение капчи
self.result.update({"taskId": captcha_id})
# обновляем пайлоад, вносим в него ключ отправленной на решение капчи
self.get_payload.update({"id": captcha_id})
# если передан параметр `pingback` - не ждём решения капчи а возвращаем незаполненный ответ
if self.post_payload.get("pingback"):
return self.get_payload
else:
# Ожидаем решения капчи 20 секунд
time.sleep(self.sleep_time)
return get_sync_result(
get_payload=self.get_payload,
sleep_time=self.sleep_time,
url_response=self.url_response,
result=self.result,
)
class aioRotateCaptcha:
def __init__(
self,
rucaptcha_key: str,
service_type: str = "2captcha",
sleep_time: int = 5,
pingback: str = None,
**kwargs,
):
"""
Инициализация нужных переменных, создание папки для изображений и кэша
После завершения работы - удалются временные фалйы и папки
:param rucaptcha_key: АПИ ключ капчи из кабинета пользователя
:param service_type: Тип сервиса через который будет работать билиотека. Доступны `rucaptcha` или `2captcha`
:param sleep_time: Вермя ожидания решения капчи
:param pingback: Параметр для ссылки с на которой будет ожидание callback ответа от RuCaptcha
:param kwargs: Для передачи дополнительных параметров
"""
# время ожидания решения капчи
self.sleep_time = sleep_time
# тип URL на с которым будет работать библиотека
self.service_type = service_type
# пайлоад POST запроса на отправку капчи на сервер
self.post_payload = {
"key": rucaptcha_key,
"method": "rotatecaptcha",
"json": 1,
"soft_id": app_key,
}
# если был передан параметр для callback`a - добавляем его
if pingback:
self.post_payload.update({"pingback": pingback})
# Если переданы ещё параметры - вносим их в post_payload
if kwargs:
for key in kwargs:
self.post_payload.update({key: kwargs[key]})
# пайлоад GET запроса на получение результата решения капчи
self.get_payload = {"key": rucaptcha_key, "action": "get", "json": 1}
# создаём сессию
self.session = requests.Session()
# выставляем кол-во попыток подключения к серверу при ошибке
self.session.mount("http://", HTTPAdapter(max_retries=5))
self.session.mount("https://", HTTPAdapter(max_retries=5))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type:
return False
return True
@api_key_check
@service_check
async def captcha_handler(self, captcha_link: str, **kwargs):
"""
Метод получает от вас ссылку на изображение, скачивает его, отправляет изображение на сервер
RuCaptcha, дожидается решения капчи и вовзращает вам результат
:param captcha_link: Ссылка на изображение
:param kwargs: Для передачи дополнительных параметров
:return: Ответ на капчу в виде JSON строки с полями:
captchaSolve - решение капчи,
taskId - находится Id задачи на решение капчи, можно использовать при жалобах и прочем,
error - False - если всё хорошо, True - если есть ошибка,
errorBody - полная информация об ошибке:
{
text - Развернётое пояснение ошибки
id - уникальный номер ошибка в ЭТОЙ бибилотеке
}
"""
# result, url_request, url_response - задаются в декораторе `service_check`, после проверки переданного названия
# Если переданы ещё параметры - вносим их в get_payload
if kwargs:
for key in kwargs:
self.get_payload.update({key: kwargs[key]})
# Скачиваем изображение
content = self.session.get(captcha_link).content
# Отправляем изображение файлом
self.post_payload.update({"file": content})
# получаем ID капчи
async with aiohttp.ClientSession() as session:
async with session.post(self.url_request, data=self.post_payload) as resp:
captcha_id = await resp.json()
# если вернулся ответ с ошибкой то записываем её и возвращаем результат
if captcha_id["status"] == 0:
self.result.update(
{"error": True, "errorBody": RuCaptchaError().errors(captcha_id["request"])}
)
return self.result
# иначе берём ключ отправленной на решение капчи и ждём решения
else:
captcha_id = captcha_id["request"]
# вписываем в taskId ключ отправленной на решение капчи
self.result.update({"taskId": captcha_id})
# обновляем пайлоад, вносим в него ключ отправленной на решение капчи
self.get_payload.update({"id": captcha_id})
# если передан параметр `pingback` - не ждём решения капчи а возвращаем незаполненный ответ
if self.post_payload.get("pingback"):
return self.get_payload
else:
# Ожидаем решения капчи
await asyncio.sleep(self.sleep_time)
return await get_async_result(
get_payload=self.get_payload,
sleep_time=self.sleep_time,
url_response=self.url_response,
result=self.result,
)
Fix for the mistake =)
import time
import asyncio
import aiohttp
import requests
import mimetypes
from requests.adapters import HTTPAdapter
from python_rucaptcha.config import app_key
from python_rucaptcha.errors import RuCaptchaError
from python_rucaptcha.result_handler import get_sync_result, get_async_result
from python_rucaptcha.decorators import api_key_check, service_check
class RotateCaptcha:
def __init__(
self,
rucaptcha_key: str,
service_type: str = "2captcha",
sleep_time: int = 5,
pingback: str = None,
**kwargs,
):
"""
Инициализация нужных переменных, создание папки для изображений и кэша
После завершения работы - удалются временные фалйы и папки
:param rucaptcha_key: АПИ ключ капчи из кабинета пользователя
:param service_type: Тип сервиса через который будет работать билиотека. Доступны `rucaptcha` или `2captcha`
:param sleep_time: Вермя ожидания решения капчи
:param pingback: Параметр для ссылки с на которой будет ожидание callback ответа от RuCaptcha
:param kwargs: Для передачи дополнительных параметров
"""
# время ожидания решения капчи
self.sleep_time = sleep_time
# тип URL на с которым будет работать библиотека
self.service_type = service_type
# пайлоад POST запроса на отправку капчи на сервер
self.post_payload = {
"key": rucaptcha_key,
"method": "rotatecaptcha",
"json": 1,
"soft_id": app_key,
}
# если был передан параметр для callback`a - добавляем его
if pingback:
self.post_payload.update({"pingback": pingback})
# Если переданы ещё параметры - вносим их в post_payload
if kwargs:
for key in kwargs:
self.post_payload.update({key: kwargs[key]})
# пайлоад GET запроса на получение результата решения капчи
self.get_payload = {"key": rucaptcha_key, "action": "get", "json": 1}
# создаём сессию
self.session = requests.Session()
# выставляем кол-во попыток подключения к серверу при ошибке
self.session.mount("http://", HTTPAdapter(max_retries=5))
self.session.mount("https://", HTTPAdapter(max_retries=5))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type:
return False
return True
@api_key_check
@service_check
def captcha_handler(self, captcha_link: str, **kwargs):
"""
Метод получает от вас ссылку на изображение, скачивает его, отправляет изображение на сервер
RuCaptcha, дожидается решения капчи и вовзращает вам результат
:param captcha_link: Ссылка на изображение или путь до файла
:param kwargs: Для передачи дополнительных параметров
:return: Ответ на капчу в виде JSON строки с полями:
captchaSolve - решение капчи,
taskId - находится Id задачи на решение капчи, можно использовать при жалобах и прочем,
error - False - если всё хорошо, True - если есть ошибка,
errorBody - полная информация об ошибке:
{
text - Развернётое пояснение ошибки
id - уникальный номер ошибка в ЭТОЙ бибилотеке
}
"""
# result, url_request, url_response - задаются в декораторе `service_check`, после проверки переданного названия
# Если переданы ещё параметры - вносим их в get_payload
if kwargs:
for key in kwargs:
self.get_payload.update({key: kwargs[key]})
# Скачиваем изображение
content = self.session.get(captcha_link).content if "http" in captcha_link else open(captcha_link, "rb")
# Отправляем изображение файлом
files = {"file_1": ("file_1", content, mimetypes.guess_type(captcha_link)[0])}
# Отправляем на рукапча изображение капчи и другие парметры,
# в результате получаем JSON ответ с номером решаемой капчи и получая ответ - извлекаем номер
captcha_id = self.session.post(
self.url_request, data=self.post_payload, files=files
).json()
# если вернулся ответ с ошибкой то записываем её и возвращаем результат
if captcha_id["status"] == 0:
self.result.update(
{"error": True, "errorBody": RuCaptchaError().errors(captcha_id["request"])}
)
return self.result
# иначе берём ключ отправленной на решение капчи и ждём решения
else:
captcha_id = captcha_id["request"]
# вписываем в taskId ключ отправленной на решение капчи
self.result.update({"taskId": captcha_id})
# обновляем пайлоад, вносим в него ключ отправленной на решение капчи
self.get_payload.update({"id": captcha_id})
# если передан параметр `pingback` - не ждём решения капчи а возвращаем незаполненный ответ
if self.post_payload.get("pingback"):
return self.get_payload
else:
# Ожидаем решения капчи 20 секунд
time.sleep(self.sleep_time)
return get_sync_result(
get_payload=self.get_payload,
sleep_time=self.sleep_time,
url_response=self.url_response,
result=self.result,
)
class aioRotateCaptcha:
def __init__(
self,
rucaptcha_key: str,
service_type: str = "2captcha",
sleep_time: int = 5,
pingback: str = None,
**kwargs,
):
"""
Инициализация нужных переменных, создание папки для изображений и кэша
После завершения работы - удалются временные фалйы и папки
:param rucaptcha_key: АПИ ключ капчи из кабинета пользователя
:param service_type: Тип сервиса через который будет работать билиотека. Доступны `rucaptcha` или `2captcha`
:param sleep_time: Вермя ожидания решения капчи
:param pingback: Параметр для ссылки с на которой будет ожидание callback ответа от RuCaptcha
:param kwargs: Для передачи дополнительных параметров
"""
# время ожидания решения капчи
self.sleep_time = sleep_time
# тип URL на с которым будет работать библиотека
self.service_type = service_type
# пайлоад POST запроса на отправку капчи на сервер
self.post_payload = {
"key": rucaptcha_key,
"method": "rotatecaptcha",
"json": 1,
"soft_id": app_key,
}
# если был передан параметр для callback`a - добавляем его
if pingback:
self.post_payload.update({"pingback": pingback})
# Если переданы ещё параметры - вносим их в post_payload
if kwargs:
for key in kwargs:
self.post_payload.update({key: kwargs[key]})
# пайлоад GET запроса на получение результата решения капчи
self.get_payload = {"key": rucaptcha_key, "action": "get", "json": 1}
# создаём сессию
self.session = requests.Session()
# выставляем кол-во попыток подключения к серверу при ошибке
self.session.mount("http://", HTTPAdapter(max_retries=5))
self.session.mount("https://", HTTPAdapter(max_retries=5))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type:
return False
return True
@api_key_check
@service_check
async def captcha_handler(self, captcha_link: str, **kwargs):
"""
Метод получает от вас ссылку на изображение, скачивает его, отправляет изображение на сервер
RuCaptcha, дожидается решения капчи и вовзращает вам результат
:param captcha_link: Ссылка на изображение
:param kwargs: Для передачи дополнительных параметров
:return: Ответ на капчу в виде JSON строки с полями:
captchaSolve - решение капчи,
taskId - находится Id задачи на решение капчи, можно использовать при жалобах и прочем,
error - False - если всё хорошо, True - если есть ошибка,
errorBody - полная информация об ошибке:
{
text - Развернётое пояснение ошибки
id - уникальный номер ошибка в ЭТОЙ бибилотеке
}
"""
# result, url_request, url_response - задаются в декораторе `service_check`, после проверки переданного названия
# Если переданы ещё параметры - вносим их в get_payload
if kwargs:
for key in kwargs:
self.get_payload.update({key: kwargs[key]})
# Скачиваем изображение
content = self.session.get(captcha_link).content
# Отправляем изображение файлом
self.post_payload.update({"file": content})
# получаем ID капчи
async with aiohttp.ClientSession() as session:
async with session.post(self.url_request, data=self.post_payload) as resp:
captcha_id = await resp.json()
# если вернулся ответ с ошибкой то записываем её и возвращаем результат
if captcha_id["status"] == 0:
self.result.update(
{"error": True, "errorBody": RuCaptchaError().errors(captcha_id["request"])}
)
return self.result
# иначе берём ключ отправленной на решение капчи и ждём решения
else:
captcha_id = captcha_id["request"]
# вписываем в taskId ключ отправленной на решение капчи
self.result.update({"taskId": captcha_id})
# обновляем пайлоад, вносим в него ключ отправленной на решение капчи
self.get_payload.update({"id": captcha_id})
# если передан параметр `pingback` - не ждём решения капчи а возвращаем незаполненный ответ
if self.post_payload.get("pingback"):
return self.get_payload
else:
# Ожидаем решения капчи
await asyncio.sleep(self.sleep_time)
return await get_async_result(
get_payload=self.get_payload,
sleep_time=self.sleep_time,
url_response=self.url_response,
result=self.result,
)
|
"""
function for calculating the convergence of an x, y data set
main function:
test_conv(xs, ys, name, tol)
tries to fit multiple functions to the x, y data
calculates which function fits best
for tol < 0
returns the x value for which y is converged within tol of the assymtotic value
for tol > 0
returns the x_value for which dy(x)/dx < tol for all x >= x_value, conv is true is such a x_value exists
for the best fit a gnuplot line is printed plotting the data, the function and the assymthotic value
"""
from __future__ import division
__author__ = "Michiel van Setten"
__copyright__ = " "
__version__ = "0.9"
__maintainer__ = "Michiel van Setten"
__email__ = "mjvansetten@gmail.com"
__date__ = "May 2014"
import string
import random
def id_generator(size=8, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
class SplineInputError(Exception):
def __init__(self, msg):
self.msg = msg
def get_derivatives(xs, ys):
"""
return the derivatives of y(x) at the points x
if scipy is available a spline is generated to calculate the derivatives
if scipy is not available the left and right slopes are calculated, if both exist the average is returned
"""
try:
if len(xs) < 4:
er = SplineInputError('too few data points')
raise er
from scipy.interpolate import UnivariateSpline
spline = UnivariateSpline(xs, ys)
d = spline.derivative(1)(xs)
except (ImportError, SplineInputError):
d = []
m, left, right = 0, 0, 0
for n in range(0, len(xs), 1):
try:
left = (ys[n] - ys[n-1]) / (xs[n] - xs[n-1])
m += 1
except IndexError:
pass
try:
right = (ys[n+1] - ys[n]) / (xs[n+1] - xs[n])
m += 1
except IndexError:
pass
d.append(left + right / m)
return d
"""
functions used in the fitting procedure, with initial guesses
"""
def reciprocal(x, a, b, n):
"""
reciprocal function to the power n to fit convergence data
"""
import numpy as np
if n < 1:
n = 1
elif n > 5:
n = 5
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b / x_v ** n)
y = np.array(y_l)
else:
y = a + b / x ** n
return y
def p0_reciprocal(xs, ys):
"""
predictor for first guess for reciprocal
"""
a0 = ys[len(ys) - 1]
b0 = ys[0]*xs[0] - a0*xs[0]
return [a0, b0, 1]
def exponential(x, a, b, n):
"""
exponential function base n to fit convergence data
"""
import numpy as np
if n < 1.000001:
n = 1.000001
#print n
elif n > 1.2:
n = 1.2
#print n
if b < -10:
b = -10
#print b
elif b > 10:
b = 10
#print b
#print a, b, x
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b * n ** -x_v)
y = np.array(y_l)
else:
y = a + b * n ** -x
#print y
#print type(y)
return y
def p0_exponential(xs, ys):
n0 = 1.005
b0 = (n0 ** -xs[-1] - n0 ** -xs[0]) / (ys[-1] - ys[0])
a0 = ys[0] - b0 * n0 ** -xs[0]
#a0 = ys[-1]
#b0 = (ys[0] - a0) / n0 ** xs[0]
return [a0, b0, n0]
def single_reciprocal(x, a, b, c):
"""
reciprocal function to fit convergence data
"""
import numpy as np
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b / (x_v - c))
y = np.array(y_l)
else:
y = a + b / (x - c)
return y
def p0_single_reciprocal(xs, ys):
c = 1
b = (1/(xs[-1] - c)-1/(xs[0] - c)) / (ys[-1] - ys[0])
a = ys[0] - b / (xs[0] - c)
return [a, b, c]
def measure(function, xs, ys, popt, weights):
"""
measure the quality of the fit
"""
m = 0
n = 0
for x in xs:
if len(popt) == 3:
m += abs(ys[n] - function(x, popt[0], popt[1], popt[2])) * weights[n]
else:
raise NotImplementedError
n += 1
return m
def multy_curve_fit(xs, ys, verbose):
"""
fit multiple functions to the x, y data, return the best fit
"""
functions = {exponential: p0_exponential, reciprocal: p0_reciprocal, single_reciprocal: p0_single_reciprocal}
import numpy as np
from scipy.optimize import curve_fit
fit_results = {}
best = ['', np.inf]
for function in functions:
try:
weights = abs(1 / get_derivatives(xs, ys))
print weights
print 'sss'
popt, pcov = curve_fit(function, xs, ys, functions[function](xs, ys), maxfev=8000, sigma=weights)
m = measure(function, xs, ys, popt, weights)
perr = max(np.sqrt(np.diag(pcov)))
#print 'pcov:\n', pcov
#print 'diag:\n', np.sqrt(np.diag(pcov))
#print 'function:\n', function, perr, m
fit_results.update({function: {'measure': m, 'perr': perr, 'popt': popt, 'pcov': pcov}})
for f in fit_results:
if fit_results[f]['measure'] <= best[1]:
best = f, fit_results[f]['measure']
except RuntimeError:
if verbose:
print 'no fit found for ', function
return fit_results[best[0]]['popt'], fit_results[best[0]]['pcov'], best
def print_plot_line(function, popt, xs, ys, name, extra=''):
"""
print the gnuplot command line to plot the x, y data with the fitted function using the popt parameters
"""
idp = id_generator()
f = open('convdat.'+str(idp), mode='w')
for n in range(0, len(ys), 1):
f.write(str(xs[n]) + ' ' + str(ys[n]) + '\n')
f.write('\n')
f.close()
line = ''
if function is exponential:
line = "plot %s + %s * %s ** -x, 'convdat.%s', %s" % (popt[0], popt[1], popt[2], idp, popt[0])
#print 'plot ', popt[0], ' + ', popt[1], "* ", popt[2], " ** -x," "'"+'convdat.'+idp+"'"
elif function is reciprocal:
line = "plot %s + %s / x**%s, 'convdat.%s', %s" % (popt[0], popt[1], popt[2], idp, popt[0])
#print 'plot ', popt[0], ' + ', popt[1], "/x**", popt[2], "," "'"+'convdat.'+idp+"'"
elif function is single_reciprocal:
line = "plot %s + %s / (x - %s), 'convdat.%s', %s" % (popt[0], popt[1], popt[2], idp, popt[0])
#print 'plot ', popt[0], ' + ', popt[1], "/ (x - ", popt[2], ")," "'"+'convdat.'+idp+"'"
f = open('plot-fits', mode='a')
f.write('set title "' + name + ' - ' + extra + '"\n')
f.write("set output '" + name + '-' + idp + ".gif'" + '\n')
f.write(line + '\n')
f.close()
def test_conv(xs, ys, name, tol=0.0001, extra='', verbose=False):
"""
test it and at which x_value dy(x)/dx < tol for all x >= x_value, conv is true is such a x_value exists.
"""
conv = False
x_value = float('inf')
y_value = None
n_value = None
popt = [None, None, None]
if len(xs) > 2:
ds = get_derivatives(xs[0:len(ys)], ys)
try:
import numpy as np
from scipy.optimize import curve_fit
if None not in ys:
#popt, pcov = curve_fit(exponential, xs, ys, p0_exponential(xs, ys), maxfev=8000)
#perr = np.sqrt(np.diag(pcov))
#print perr
popt, pcov, func = multy_curve_fit(xs, ys, verbose)
#print popt
#print pcov
#print func
# todo print this to file via a method in helper, as dict
f = open(name+'.fitdat', mode='a')
f.write('{')
f.write('"popt": ' + str(popt) + ', ')
f.write('"pcov": ' + str(pcov) + ', ')
f.write('"data": [')
for n in range(0, len(ys), 1):
f.write('[' + str(xs[n]) + ' ' + str(ys[n]) + ']')
f.write(']}\n')
f.close()
print_plot_line(func[0], popt, xs, ys, name, extra=extra)
# print 'plot ', popt[0], ' + ', popt[1], "/x**", popt[2], ', "'+name+'.convdat"'
# print 'plot ', popt[0], ' + ', popt[1], "/x", popt[2], '/x**2, "'+name+'.convdat"'
# id = id_generator()
# print 'plot ', popt[0], ' + ', popt[1], "* ", popt[2], " ** -x," "'"+'convdat.'+id+"'"
except ImportError:
popt, pcov = None, None
for n in range(0, len(ds), 1):
if tol < 0:
if popt[0] is not None:
test = abs(popt[0] - ys[n])
else:
test = float('inf')
else:
test = abs(ds[n])
if verbose:
print test
if test < abs(tol):
if verbose:
print 'converged'
conv = True
if xs[n] < x_value:
x_value = xs[n]
y_value = ys[n]
n_value = n
else:
if verbose:
print 'not converged'
conv = False
x_value = float('inf')
if n_value is None:
return [conv, x_value, y_value, n_value, popt[0], None]
else:
return [conv, x_value, y_value, n_value, popt[0], ds[n_value]]
else:
return [conv, x_value, y_value, n_value, popt[0], None]
convergence
"""
function for calculating the convergence of an x, y data set
main function:
test_conv(xs, ys, name, tol)
tries to fit multiple functions to the x, y data
calculates which function fits best
for tol < 0
returns the x value for which y is converged within tol of the assymtotic value
for tol > 0
returns the x_value for which dy(x)/dx < tol for all x >= x_value, conv is true is such a x_value exists
for the best fit a gnuplot line is printed plotting the data, the function and the assymthotic value
"""
from __future__ import division
__author__ = "Michiel van Setten"
__copyright__ = " "
__version__ = "0.9"
__maintainer__ = "Michiel van Setten"
__email__ = "mjvansetten@gmail.com"
__date__ = "May 2014"
import string
import random
def id_generator(size=8, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
class SplineInputError(Exception):
def __init__(self, msg):
self.msg = msg
def get_derivatives(xs, ys):
"""
return the derivatives of y(x) at the points x
if scipy is available a spline is generated to calculate the derivatives
if scipy is not available the left and right slopes are calculated, if both exist the average is returned
"""
try:
if len(xs) < 4:
er = SplineInputError('too few data points')
raise er
from scipy.interpolate import UnivariateSpline
spline = UnivariateSpline(xs, ys)
d = spline.derivative(1)(xs)
except (ImportError, SplineInputError):
d = []
m, left, right = 0, 0, 0
for n in range(0, len(xs), 1):
try:
left = (ys[n] - ys[n-1]) / (xs[n] - xs[n-1])
m += 1
except IndexError:
pass
try:
right = (ys[n+1] - ys[n]) / (xs[n+1] - xs[n])
m += 1
except IndexError:
pass
d.append(left + right / m)
return d
"""
functions used in the fitting procedure, with initial guesses
"""
def reciprocal(x, a, b, n):
"""
reciprocal function to the power n to fit convergence data
"""
import numpy as np
if n < 1:
n = 1
elif n > 5:
n = 5
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b / x_v ** n)
y = np.array(y_l)
else:
y = a + b / x ** n
return y
def p0_reciprocal(xs, ys):
"""
predictor for first guess for reciprocal
"""
a0 = ys[len(ys) - 1]
b0 = ys[0]*xs[0] - a0*xs[0]
return [a0, b0, 1]
def exponential(x, a, b, n):
"""
exponential function base n to fit convergence data
"""
import numpy as np
if n < 1.000001:
n = 1.000001
#print n
elif n > 1.2:
n = 1.2
#print n
if b < -10:
b = -10
#print b
elif b > 10:
b = 10
#print b
#print a, b, x
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b * n ** -x_v)
y = np.array(y_l)
else:
y = a + b * n ** -x
#print y
#print type(y)
return y
def p0_exponential(xs, ys):
n0 = 1.005
b0 = (n0 ** -xs[-1] - n0 ** -xs[0]) / (ys[-1] - ys[0])
a0 = ys[0] - b0 * n0 ** -xs[0]
#a0 = ys[-1]
#b0 = (ys[0] - a0) / n0 ** xs[0]
return [a0, b0, n0]
def single_reciprocal(x, a, b, c):
"""
reciprocal function to fit convergence data
"""
import numpy as np
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b / (x_v - c))
y = np.array(y_l)
else:
y = a + b / (x - c)
return y
def p0_single_reciprocal(xs, ys):
c = 1
b = (1/(xs[-1] - c)-1/(xs[0] - c)) / (ys[-1] - ys[0])
a = ys[0] - b / (xs[0] - c)
return [a, b, c]
def measure(function, xs, ys, popt, weights):
"""
measure the quality of the fit
"""
m = 0
n = 0
for x in xs:
if len(popt) == 3:
m += abs(ys[n] - function(x, popt[0], popt[1], popt[2])) * weights[n]
else:
raise NotImplementedError
n += 1
return m
def multy_curve_fit(xs, ys, verbose):
"""
fit multiple functions to the x, y data, return the best fit
"""
functions = {exponential: p0_exponential, reciprocal: p0_reciprocal, single_reciprocal: p0_single_reciprocal}
import numpy as np
from scipy.optimize import curve_fit
fit_results = {}
best = ['', np.inf]
for function in functions:
try:
d = get_derivatives()
weights = abs(1 / d * d[0])
print weights
print 'sss'
popt, pcov = curve_fit(function, xs, ys, functions[function](xs, ys), maxfev=8000, sigma=weights)
m = measure(function, xs, ys, popt, weights)
perr = max(np.sqrt(np.diag(pcov)))
#print 'pcov:\n', pcov
#print 'diag:\n', np.sqrt(np.diag(pcov))
#print 'function:\n', function, perr, m
fit_results.update({function: {'measure': m, 'perr': perr, 'popt': popt, 'pcov': pcov}})
for f in fit_results:
if fit_results[f]['measure'] <= best[1]:
best = f, fit_results[f]['measure']
except RuntimeError:
if verbose:
print 'no fit found for ', function
return fit_results[best[0]]['popt'], fit_results[best[0]]['pcov'], best
def print_plot_line(function, popt, xs, ys, name, extra=''):
"""
print the gnuplot command line to plot the x, y data with the fitted function using the popt parameters
"""
idp = id_generator()
f = open('convdat.'+str(idp), mode='w')
for n in range(0, len(ys), 1):
f.write(str(xs[n]) + ' ' + str(ys[n]) + '\n')
f.write('\n')
f.close()
line = ''
if function is exponential:
line = "plot %s + %s * %s ** -x, 'convdat.%s', %s" % (popt[0], popt[1], popt[2], idp, popt[0])
#print 'plot ', popt[0], ' + ', popt[1], "* ", popt[2], " ** -x," "'"+'convdat.'+idp+"'"
elif function is reciprocal:
line = "plot %s + %s / x**%s, 'convdat.%s', %s" % (popt[0], popt[1], popt[2], idp, popt[0])
#print 'plot ', popt[0], ' + ', popt[1], "/x**", popt[2], "," "'"+'convdat.'+idp+"'"
elif function is single_reciprocal:
line = "plot %s + %s / (x - %s), 'convdat.%s', %s" % (popt[0], popt[1], popt[2], idp, popt[0])
#print 'plot ', popt[0], ' + ', popt[1], "/ (x - ", popt[2], ")," "'"+'convdat.'+idp+"'"
f = open('plot-fits', mode='a')
f.write('set title "' + name + ' - ' + extra + '"\n')
f.write("set output '" + name + '-' + idp + ".gif'" + '\n')
f.write(line + '\n')
f.close()
def test_conv(xs, ys, name, tol=0.0001, extra='', verbose=False):
"""
test it and at which x_value dy(x)/dx < tol for all x >= x_value, conv is true is such a x_value exists.
"""
conv = False
x_value = float('inf')
y_value = None
n_value = None
popt = [None, None, None]
if len(xs) > 2:
ds = get_derivatives(xs[0:len(ys)], ys)
try:
import numpy as np
from scipy.optimize import curve_fit
if None not in ys:
#popt, pcov = curve_fit(exponential, xs, ys, p0_exponential(xs, ys), maxfev=8000)
#perr = np.sqrt(np.diag(pcov))
#print perr
popt, pcov, func = multy_curve_fit(xs, ys, verbose)
#print popt
#print pcov
#print func
# todo print this to file via a method in helper, as dict
f = open(name+'.fitdat', mode='a')
f.write('{')
f.write('"popt": ' + str(popt) + ', ')
f.write('"pcov": ' + str(pcov) + ', ')
f.write('"data": [')
for n in range(0, len(ys), 1):
f.write('[' + str(xs[n]) + ' ' + str(ys[n]) + ']')
f.write(']}\n')
f.close()
print_plot_line(func[0], popt, xs, ys, name, extra=extra)
# print 'plot ', popt[0], ' + ', popt[1], "/x**", popt[2], ', "'+name+'.convdat"'
# print 'plot ', popt[0], ' + ', popt[1], "/x", popt[2], '/x**2, "'+name+'.convdat"'
# id = id_generator()
# print 'plot ', popt[0], ' + ', popt[1], "* ", popt[2], " ** -x," "'"+'convdat.'+id+"'"
except ImportError:
popt, pcov = None, None
for n in range(0, len(ds), 1):
if tol < 0:
if popt[0] is not None:
test = abs(popt[0] - ys[n])
else:
test = float('inf')
else:
test = abs(ds[n])
if verbose:
print test
if test < abs(tol):
if verbose:
print 'converged'
conv = True
if xs[n] < x_value:
x_value = xs[n]
y_value = ys[n]
n_value = n
else:
if verbose:
print 'not converged'
conv = False
x_value = float('inf')
if n_value is None:
return [conv, x_value, y_value, n_value, popt[0], None]
else:
return [conv, x_value, y_value, n_value, popt[0], ds[n_value]]
else:
return [conv, x_value, y_value, n_value, popt[0], None]
|
# -*- coding: utf-8 -*-
##
## $Id$
##
## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Prints publcation information and link to ejournal
"""
__revision__ = "$Id$"
from urllib import quote
import cgi
def format(bfo):
"""
Displays inline publication information with html link to ejournal
(when available).
"""
out = ''
publication_info = bfo.field('909C4')
if publication_info == "":
return ""
journal_source = publication_info.get('p')
journal = bfo.kb('ejournals', journal_source)
volume = publication_info.get('v')
year = publication_info.get('y')
number = publication_info.get('n')
pages = publication_info.get('c')
if journal != '' and volume is not None:
journal = cgi.escape(journal)
volume = cgi.escape(volume)
year = cgi.escape(year)
number = cgi.escape(number)
pages = cgi.escape(pages)
out += '<a href="http://weblib.cern.ch/cgi-bin/ejournals?publication='
out += quote(journal_source)
out += '&volume=' + volume
out += '&year=' + year
out += '&page='
page = pages.split('-')# get first page from range
if len(page) > 0:
out += page[0]
out += '">%(journal)s :%(volume)s %(year)s %(page)s</a>' % {'journal': journal,
'volume': volume,
'year': year,
'page': pages}
else:
out += journal_source + ': '
if volume is not None:
out += volume
if year is not None:
out += ' (' + year + ') '
if number is not None:
out += 'no. ' + number + ', '
if pages is not None:
out += 'pp. ' + pages
return out
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
Fixed bug when trying to escape 'None' values.
# -*- coding: utf-8 -*-
##
## $Id$
##
## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Prints publcation information and link to ejournal
"""
__revision__ = "$Id$"
from urllib import quote
import cgi
def format(bfo):
"""
Displays inline publication information with html link to ejournal
(when available).
"""
out = ''
publication_info = bfo.field('909C4')
if publication_info == "":
return ""
journal_source = publication_info.get('p')
journal = bfo.kb('ejournals', journal_source)
volume = publication_info.get('v')
year = publication_info.get('y')
number = publication_info.get('n')
pages = publication_info.get('c')
if journal is not None:
journal = cgi.escape(journal)
if volume is not None:
volume = cgi.escape(volume)
if year is not None:
year = cgi.escape(year)
if number is not None:
number = cgi.escape(number)
if pages is not None:
pages = cgi.escape(pages)
if journal != '' and volume is not None:
out += '<a href="http://weblib.cern.ch/cgi-bin/ejournals?publication='
out += quote(journal_source)
out += '&volume=' + volume
out += '&year=' + year
out += '&page='
page = pages.split('-')# get first page from range
if len(page) > 0:
out += page[0]
out += '">%(journal)s :%(volume)s %(year)s %(page)s</a>' % {'journal': journal,
'volume': volume,
'year': year,
'page': pages}
else:
out += journal_source + ': '
if volume is not None:
out += volume
if year is not None:
out += ' (' + year + ') '
if number is not None:
out += 'no. ' + number + ', '
if pages is not None:
out += 'pp. ' + pages
return out
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
|
"""Django settings for use within the docker container."""
from os import environ
import dj_database_url
from .base import *
# Disable debug mode
DEBUG = False
SECRET_KEY = environ.get('SECRET_KEY') or 'please-change-me'
PROJECT_ROOT = (
environ.get('PROJECT_ROOT') or dirname(dirname(abspath(__file__))))
SERVICE_DIRECTORY_API_BASE_URL = environ.get(
'SERVICE_DIRECTORY_API_BASE_URL', '')
SERVICE_DIRECTORY_API_USERNAME = environ.get(
'SERVICE_DIRECTORY_API_USERNAME', '')
SERVICE_DIRECTORY_API_PASSWORD = environ.get(
'SERVICE_DIRECTORY_API_PASSWORD', '')
GOOGLE_PLACES_API_SERVER_KEY = environ.get(
'GOOGLE_PLACES_API_SERVER_KEY', '')
RAVEN_DSN = environ.get('RAVEN_DSN')
RAVEN_CONFIG = {'dsn': RAVEN_DSN} if RAVEN_DSN else {}
COMPRESS_OFFLINE = True
DATABASES = {
'default': dj_database_url.config(
default='sqlite:///%s' % (join(PROJECT_ROOT, 'gemmolo.db'),))}
MEDIA_ROOT = join(PROJECT_ROOT, 'media')
STATIC_ROOT = join(PROJECT_ROOT, 'static')
LOCALE_PATHS = (
join(PROJECT_ROOT, "locale"),
)
Import production settings
"""Django settings for use within the docker container."""
from os import environ
import dj_database_url
from .production import *
# Disable debug mode
DEBUG = False
SECRET_KEY = environ.get('SECRET_KEY') or 'please-change-me'
PROJECT_ROOT = (
environ.get('PROJECT_ROOT') or dirname(dirname(abspath(__file__))))
SERVICE_DIRECTORY_API_BASE_URL = environ.get(
'SERVICE_DIRECTORY_API_BASE_URL', '')
SERVICE_DIRECTORY_API_USERNAME = environ.get(
'SERVICE_DIRECTORY_API_USERNAME', '')
SERVICE_DIRECTORY_API_PASSWORD = environ.get(
'SERVICE_DIRECTORY_API_PASSWORD', '')
GOOGLE_PLACES_API_SERVER_KEY = environ.get(
'GOOGLE_PLACES_API_SERVER_KEY', '')
RAVEN_DSN = environ.get('RAVEN_DSN')
RAVEN_CONFIG = {'dsn': RAVEN_DSN} if RAVEN_DSN else {}
COMPRESS_OFFLINE = True
DATABASES = {
'default': dj_database_url.config(
default='sqlite:///%s' % (join(PROJECT_ROOT, 'gemmolo.db'),))}
MEDIA_ROOT = join(PROJECT_ROOT, 'media')
STATIC_ROOT = join(PROJECT_ROOT, 'static')
LOCALE_PATHS = (
join(PROJECT_ROOT, "locale"),
)
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
__version__ = '0.4'
__release__ = '0.4.4dev'
Version 0.4.4
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
__version__ = '0.4'
__release__ = '0.4.4'
|
import json
import operator
from collections import OrderedDict
from functools import reduce
from django.conf import settings
from django.core.exceptions import FieldDoesNotExist
from django.forms import BooleanField, CharField, ModelForm, MultipleChoiceField, ValidationError
from django.forms.widgets import HiddenInput
from django.utils.translation import ugettext_lazy as _
from shapely.geometry.geo import mapping
from c3nav.editor.models import ChangeSet, ChangeSetUpdate
class EditorFormBase(ModelForm):
def __init__(self, *args, request=None, **kwargs):
self.request = request
super().__init__(*args, **kwargs)
creating = not self.instance.pk
if 'level' in self.fields:
# hide level widget
self.fields['level'].widget = HiddenInput()
if 'space' in self.fields:
# hide space widget
self.fields['space'].widget = HiddenInput()
if 'geometry' in self.fields:
# hide geometry widget
self.fields['geometry'].widget = HiddenInput()
if not creating:
self.initial['geometry'] = json.dumps(mapping(self.instance.geometry), separators=(',', ':'))
if 'groups' in self.fields:
LocationGroupCategory = self.request.changeset.wrap_model('LocationGroupCategory')
categories = LocationGroupCategory.objects.all().prefetch_related('groups')
instance_groups = set(self.instance.groups.values_list('pk', flat=True)) if self.instance.pk else set()
self.fields.pop('groups')
for category in categories:
choices = tuple((str(group.pk), group.title) for group in category.groups.all())
initial = instance_groups & set(group.pk for group in category.groups.all())
initial = tuple(str(s) for s in initial)
field = MultipleChoiceField(label=category.title, required=False, initial=initial, choices=choices)
self.fields['groups_'+category.name] = field
self.fields.move_to_end('groups_'+category.name, last=False)
if 'category' in self.fields:
self.fields['category'].label_from_instance = lambda obj: obj.title
# parse titles
self.titles = None
if hasattr(self.instance, 'titles'):
titles = OrderedDict((lang_code, '') for lang_code, language in settings.LANGUAGES)
if self.instance is not None and self.instance.pk:
titles.update(self.instance.titles)
language_titles = dict(settings.LANGUAGES)
for language in reversed(titles.keys()):
new_title = self.data.get('title_' + language)
if new_title is not None:
titles[language] = new_title
self.fields['title_' + language] = CharField(label=language_titles.get(language, language),
required=False,
initial=titles[language].strip(), max_length=50)
self.fields.move_to_end('title_' + language, last=False)
self.titles = titles
if 'name' in self.fields:
self.fields.move_to_end('name', last=False)
self.redirect_slugs = None
self.add_redirect_slugs = None
self.remove_redirect_slugs = None
if 'slug' in self.fields:
self.redirect_slugs = sorted(self.instance.redirects.values_list('slug', flat=True))
self.fields['redirect_slugs'] = CharField(label=_('Redirecting Slugs (comma seperated)'), required=False,
initial=','.join(self.redirect_slugs))
self.fields.move_to_end('redirect_slugs', last=False)
self.fields.move_to_end('slug', last=False)
def clean_redirect_slugs(self):
old_redirect_slugs = set(self.redirect_slugs)
new_redirect_slugs = set(s for s in (s.strip() for s in self.cleaned_data['redirect_slugs'].split(',')) if s)
self.add_redirect_slugs = new_redirect_slugs - old_redirect_slugs
self.remove_redirect_slugs = old_redirect_slugs - new_redirect_slugs
for slug in self.add_redirect_slugs:
self.fields['slug'].run_validators(slug)
LocationSlug = self.request.changeset.wrap_model('LocationSlug')
qs = LocationSlug.objects.filter(slug__in=self.add_redirect_slugs)
if self.cleaned_data['slug'] in self.add_redirect_slugs:
raise ValidationError(
_('Can not add redirecting slug “%s”: it\'s the slug of this object.') % self.cleaned_data['slug']
)
else:
qs = qs.exclude(pk=self.instance.pk)
for slug in qs.values_list('slug', flat=True)[:1]:
raise ValidationError(
_('Can not add redirecting slug “%s”: it is already used elsewhere.') % slug
)
def clean(self):
if 'geometry' in self.fields:
if not self.cleaned_data.get('geometry'):
raise ValidationError('Missing geometry.')
super().clean()
def _save_m2m(self):
super()._save_m2m()
try:
field = self._meta.model._meta.get_field('groups')
except FieldDoesNotExist:
pass
else:
if field.many_to_many:
groups = reduce(operator.or_, (set(value) for name, value in self.cleaned_data.items()
if name.startswith('groups_')), set())
groups = tuple((int(val) if val.isdigit() else val) for val in groups)
self.instance.groups.set(groups)
def create_editor_form(editor_model):
possible_fields = ['slug', 'name', 'altitude', 'category', 'width', 'groups', 'color', 'public',
'can_search', 'can_describe', 'outside', 'stuffed', 'geometry',
'priority', 'single', 'allow_levels', 'allow_spaces', 'allow_areas', 'allow_pois',
'left', 'top', 'right', 'bottom']
field_names = [field.name for field in editor_model._meta.get_fields() if not field.one_to_many]
existing_fields = [name for name in possible_fields if name in field_names]
class EditorForm(EditorFormBase, ModelForm):
class Meta:
model = editor_model
fields = existing_fields
EditorForm.__name__ = editor_model.__name__+'EditorForm'
return EditorForm
class ChangeSetForm(ModelForm):
class Meta:
model = ChangeSet
fields = ('title', 'description')
class RejectForm(ModelForm):
final = BooleanField(label=_('Final rejection'), required=False)
class Meta:
model = ChangeSetUpdate
fields = ('comment', )
exclude group categories according to allow_*
import json
import operator
from collections import OrderedDict
from functools import reduce
from django.conf import settings
from django.core.exceptions import FieldDoesNotExist
from django.forms import BooleanField, CharField, ModelForm, MultipleChoiceField, ValidationError
from django.forms.widgets import HiddenInput
from django.utils.translation import ugettext_lazy as _
from shapely.geometry.geo import mapping
from c3nav.editor.models import ChangeSet, ChangeSetUpdate
class EditorFormBase(ModelForm):
def __init__(self, *args, request=None, **kwargs):
self.request = request
super().__init__(*args, **kwargs)
creating = not self.instance.pk
if 'level' in self.fields:
# hide level widget
self.fields['level'].widget = HiddenInput()
if 'space' in self.fields:
# hide space widget
self.fields['space'].widget = HiddenInput()
if 'geometry' in self.fields:
# hide geometry widget
self.fields['geometry'].widget = HiddenInput()
if not creating:
self.initial['geometry'] = json.dumps(mapping(self.instance.geometry), separators=(',', ':'))
if 'groups' in self.fields:
LocationGroupCategory = self.request.changeset.wrap_model('LocationGroupCategory')
kwargs = {'allow_'+self._meta.model._meta.default_related_name: True}
categories = LocationGroupCategory.objects.filter(**kwargs).prefetch_related('groups')
instance_groups = set(self.instance.groups.values_list('pk', flat=True)) if self.instance.pk else set()
self.fields.pop('groups')
for category in categories:
choices = tuple((str(group.pk), group.title) for group in category.groups.all())
initial = instance_groups & set(group.pk for group in category.groups.all())
initial = tuple(str(s) for s in initial)
field = MultipleChoiceField(label=category.title, required=False, initial=initial, choices=choices)
self.fields['groups_'+category.name] = field
self.fields.move_to_end('groups_'+category.name, last=False)
if 'category' in self.fields:
self.fields['category'].label_from_instance = lambda obj: obj.title
# parse titles
self.titles = None
if hasattr(self.instance, 'titles'):
titles = OrderedDict((lang_code, '') for lang_code, language in settings.LANGUAGES)
if self.instance is not None and self.instance.pk:
titles.update(self.instance.titles)
language_titles = dict(settings.LANGUAGES)
for language in reversed(titles.keys()):
new_title = self.data.get('title_' + language)
if new_title is not None:
titles[language] = new_title
self.fields['title_' + language] = CharField(label=language_titles.get(language, language),
required=False,
initial=titles[language].strip(), max_length=50)
self.fields.move_to_end('title_' + language, last=False)
self.titles = titles
if 'name' in self.fields:
self.fields.move_to_end('name', last=False)
self.redirect_slugs = None
self.add_redirect_slugs = None
self.remove_redirect_slugs = None
if 'slug' in self.fields:
self.redirect_slugs = sorted(self.instance.redirects.values_list('slug', flat=True))
self.fields['redirect_slugs'] = CharField(label=_('Redirecting Slugs (comma seperated)'), required=False,
initial=','.join(self.redirect_slugs))
self.fields.move_to_end('redirect_slugs', last=False)
self.fields.move_to_end('slug', last=False)
def clean_redirect_slugs(self):
old_redirect_slugs = set(self.redirect_slugs)
new_redirect_slugs = set(s for s in (s.strip() for s in self.cleaned_data['redirect_slugs'].split(',')) if s)
self.add_redirect_slugs = new_redirect_slugs - old_redirect_slugs
self.remove_redirect_slugs = old_redirect_slugs - new_redirect_slugs
for slug in self.add_redirect_slugs:
self.fields['slug'].run_validators(slug)
LocationSlug = self.request.changeset.wrap_model('LocationSlug')
qs = LocationSlug.objects.filter(slug__in=self.add_redirect_slugs)
if self.cleaned_data['slug'] in self.add_redirect_slugs:
raise ValidationError(
_('Can not add redirecting slug “%s”: it\'s the slug of this object.') % self.cleaned_data['slug']
)
else:
qs = qs.exclude(pk=self.instance.pk)
for slug in qs.values_list('slug', flat=True)[:1]:
raise ValidationError(
_('Can not add redirecting slug “%s”: it is already used elsewhere.') % slug
)
def clean(self):
if 'geometry' in self.fields:
if not self.cleaned_data.get('geometry'):
raise ValidationError('Missing geometry.')
super().clean()
def _save_m2m(self):
super()._save_m2m()
try:
field = self._meta.model._meta.get_field('groups')
except FieldDoesNotExist:
pass
else:
if field.many_to_many:
groups = reduce(operator.or_, (set(value) for name, value in self.cleaned_data.items()
if name.startswith('groups_')), set())
groups = tuple((int(val) if val.isdigit() else val) for val in groups)
self.instance.groups.set(groups)
def create_editor_form(editor_model):
possible_fields = ['slug', 'name', 'altitude', 'category', 'width', 'groups', 'color', 'public',
'can_search', 'can_describe', 'outside', 'stuffed', 'geometry',
'priority', 'single', 'allow_levels', 'allow_spaces', 'allow_areas', 'allow_pois',
'left', 'top', 'right', 'bottom']
field_names = [field.name for field in editor_model._meta.get_fields() if not field.one_to_many]
existing_fields = [name for name in possible_fields if name in field_names]
class EditorForm(EditorFormBase, ModelForm):
class Meta:
model = editor_model
fields = existing_fields
EditorForm.__name__ = editor_model.__name__+'EditorForm'
return EditorForm
class ChangeSetForm(ModelForm):
class Meta:
model = ChangeSet
fields = ('title', 'description')
class RejectForm(ModelForm):
final = BooleanField(label=_('Final rejection'), required=False)
class Meta:
model = ChangeSetUpdate
fields = ('comment', )
|
# $Id: ethernet.py 65 2010-03-26 02:53:51Z dugsong $
# -*- coding: utf-8 -*-
"""Ethernet II, LLC (802.3+802.2), LLC/SNAP, and Novell raw 802.3,
with automatic 802.1q, MPLS, PPPoE, and Cisco ISL decapsulation."""
from copy import copy
import dpkt
import llc
ETH_CRC_LEN = 4
ETH_HDR_LEN = 14
ETH_LEN_MIN = 64 # minimum frame length with CRC
ETH_LEN_MAX = 1518 # maximum frame length with CRC
ETH_MTU = (ETH_LEN_MAX - ETH_HDR_LEN - ETH_CRC_LEN)
ETH_MIN = (ETH_LEN_MIN - ETH_HDR_LEN - ETH_CRC_LEN)
# Ethernet payload types - http://standards.ieee.org/regauth/ethertype
ETH_TYPE_PUP = 0x0200 # PUP protocol
ETH_TYPE_IP = 0x0800 # IP protocol
ETH_TYPE_ARP = 0x0806 # address resolution protocol
ETH_TYPE_AOE = 0x88a2 # AoE protocol
ETH_TYPE_CDP = 0x2000 # Cisco Discovery Protocol
ETH_TYPE_DTP = 0x2004 # Cisco Dynamic Trunking Protocol
ETH_TYPE_REVARP = 0x8035 # reverse addr resolution protocol
ETH_TYPE_8021Q = 0x8100 # IEEE 802.1Q VLAN tagging
ETH_TYPE_IPX = 0x8137 # Internetwork Packet Exchange
ETH_TYPE_IP6 = 0x86DD # IPv6 protocol
ETH_TYPE_PPP = 0x880B # PPP
ETH_TYPE_MPLS = 0x8847 # MPLS
ETH_TYPE_MPLS_MCAST = 0x8848 # MPLS Multicast
ETH_TYPE_PPPoE_DISC = 0x8863 # PPP Over Ethernet Discovery Stage
ETH_TYPE_PPPoE = 0x8864 # PPP Over Ethernet Session Stage
ETH_TYPE_LLDP = 0x88CC # Link Layer Discovery Protocol
class Ethernet(dpkt.Packet):
__hdr__ = (
('dst', '6s', ''),
('src', '6s', ''),
('type', 'H', ETH_TYPE_IP)
)
_typesw = {}
def _unpack_data(self, buf):
if self.type == ETH_TYPE_8021Q:
self.vlan_tags = []
# support up to 2 tags (double tagging aka QinQ)
for _ in range(2):
tag = VLANtag8021Q(buf)
buf = buf[tag.__hdr_len__:]
self.vlan_tags.append(tag)
self.type = tag.type
if self.type != ETH_TYPE_8021Q:
break
# backward compatibility, use the 1st tag
self.vlanid, self.priority, self.cfi = self.vlan_tags[0].as_tuple()
elif self.type == ETH_TYPE_MPLS or self.type == ETH_TYPE_MPLS_MCAST:
self.labels = [] # old list containing labels as tuples
self.mpls_labels = [] # new list containing labels as instances of MPLSlabel
# XXX - max # of labels is undefined, just use 24
for i in range(24):
lbl = MPLSlabel(buf)
buf = buf[lbl.__hdr_len__:]
self.mpls_labels.append(lbl)
self.labels.append(lbl.as_tuple())
if lbl.s: # bottom of stack
break
self.type = ETH_TYPE_IP
try:
self.data = self._typesw[self.type](buf)
setattr(self, self.data.__class__.__name__.lower(), self.data)
except (KeyError, dpkt.UnpackError):
self.data = buf
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.type > 1500:
# Ethernet II
self._unpack_data(self.data)
elif (self.dst.startswith('\x01\x00\x0c\x00\x00') or
self.dst.startswith('\x03\x00\x0c\x00\x00')):
# Cisco ISL
tag = VLANtagISL(buf)
buf = buf[tag.__hdr_len__:]
self.vlan_tags = [tag]
self.vlan = tag.id # backward compatibility
self.unpack(buf)
elif self.data.startswith('\xff\xff'):
# Novell "raw" 802.3
self.type = ETH_TYPE_IPX
self.data = self.ipx = self._typesw[ETH_TYPE_IPX](self.data[2:])
else:
self.data = self.llc = llc.LLC(self.data)
def pack_hdr(self):
tags_buf = ''
orig_type = copy(self.type) # packing should not modify self.type
if getattr(self, 'mpls_labels', None):
# mark all labels with s=0, last one with s=1
for lbl in self.mpls_labels:
lbl.s = 0
lbl.s = 1
# set encapsulation type
if not (self.type == ETH_TYPE_MPLS or self.type == ETH_TYPE_MPLS_MCAST):
self.type = ETH_TYPE_MPLS
tags_buf = ''.join(lbl.pack_hdr() for lbl in self.mpls_labels)
elif getattr(self, 'vlan_tags', None):
# set encapsulation types
t1 = self.vlan_tags[0]
if len(self.vlan_tags) == 1:
if isinstance(t1, VLANtag8021Q):
t1.type = orig_type
self.type = ETH_TYPE_8021Q
elif isinstance(t1, VLANtagISL):
t1.type = 0 # 0 means Ethernet
return t1.pack_hdr() + dpkt.Packet.pack_hdr(self)
elif len(self.vlan_tags) == 2:
t2 = self.vlan_tags[1]
if isinstance(t1, VLANtag8021Q) and isinstance(t2, VLANtag8021Q):
t2.type = orig_type
self.type = t1.type = ETH_TYPE_8021Q
else:
raise dpkt.PackError('maximum is 2 VLAN tags per Ethernet frame')
tags_buf = ''.join(tag.pack_hdr() for tag in self.vlan_tags)
# if self.data is LLC then this is IEEE 802.3 Ethernet and self.type
# then actually encodes the length of data
if isinstance(self.data, llc.LLC):
self.type = len(self.data)
buf = dpkt.Packet.pack_hdr(self) + tags_buf
self.type = orig_type # restore self.type after packing
return buf
def __len__(self):
tags = getattr(self, 'mpls_labels', []) + getattr(self, 'vlan_tags', [])
return self.__hdr_len__ + len(self.data) + sum(t.__hdr_len__ for t in tags)
@classmethod
def set_type(cls, t, pktclass):
cls._typesw[t] = pktclass
@classmethod
def get_type(cls, t):
return cls._typesw[t]
# XXX - auto-load Ethernet dispatch table from ETH_TYPE_* definitions
def __load_types():
g = globals()
for k, v in g.iteritems():
if k.startswith('ETH_TYPE_'):
name = k[9:]
modname = name.lower()
try:
mod = __import__(modname, g)
Ethernet.set_type(v, getattr(mod, name))
except (ImportError, AttributeError):
continue
if not Ethernet._typesw:
__load_types()
# Misc protocols
class MPLSlabel(dpkt.Packet):
"""A single entry in MPLS label stack"""
__hdr__ = (
('_val_exp_s_ttl', 'I', 0),
)
# field names are according to RFC3032
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.val = (self._val_exp_s_ttl & 0xfffff000) >> 12 # label value, 20 bits
self.exp = (self._val_exp_s_ttl & 0x00000e00) >> 9 # experimental use, 3 bits
self.s = (self._val_exp_s_ttl & 0x00000100) >> 8 # bottom of stack flag, 1 bit
self.ttl = self._val_exp_s_ttl & 0x000000ff # time to live, 8 bits
self.data = ''
def pack_hdr(self):
self._val_exp_s_ttl = (
((self.val & 0xfffff) << 12) |
((self.exp & 7) << 9) |
((self.s & 1) << 8) |
((self.ttl & 0xff))
)
return dpkt.Packet.pack_hdr(self)
def as_tuple(self): # backward-compatible representation
return (self.val, self.exp, self.ttl)
class VLANtag8021Q(dpkt.Packet):
"""IEEE 802.1q VLAN tag"""
__hdr__ = (
('_pri_cfi_id', 'H', 0),
('type', 'H', ETH_TYPE_IP)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.pri = (self._pri_cfi_id & 0xe000) >> 13 # priority, 3 bits
self.cfi = (self._pri_cfi_id & 0x1000) >> 12 # canonical format indicator, 1 bit
self.id = self._pri_cfi_id & 0x0fff # VLAN id, 12 bits
self.data = ''
def pack_hdr(self):
self._pri_cfi_id = (
((self.pri & 7) << 13) |
((self.cfi & 1) << 12) |
((self.id & 0xfff))
)
return dpkt.Packet.pack_hdr(self)
def as_tuple(self):
return (self.id, self.pri, self.cfi)
class VLANtagISL(dpkt.Packet):
"""Cisco Inter-Switch Link VLAN tag"""
__hdr__ = (
('da', '5s', '\x01\x00\x0c\x00\x00'),
('_type_pri', 'B', 3),
('sa', '6s', ''),
('len', 'H', 0),
('snap', '3s', '\xaa\xaa\x03'),
('hsa', '3s', '\x00\x00\x0c'),
('_id_bpdu', 'H', 0),
('indx', 'H', 0),
('res', 'H', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.type = (self._type_pri & 0xf0) >> 4 # encapsulation type, 4 bits; 0 means Ethernet
self.pri = self._type_pri & 0x03 # user defined bits, 2 bits are used; means priority
self.id = self._id_bpdu >> 1 # VLAN id
self.bpdu = self._id_bpdu & 1
self.data = ''
def pack_hdr(self):
self._type_pri = ((self.type & 0xf) << 4) | (self.pri & 0x3)
self._id_bpdu = ((self.id & 0x7fff) << 1) | (self.bpdu & 1)
return dpkt.Packet.pack_hdr(self)
# Unit tests
def test_eth(): # TODO recheck this test
import ip6
s = ('\x00\xb0\xd0\xe1\x80\x72\x00\x11\x24\x8c\x11\xde\x86\xdd\x60\x00\x00\x00'
'\x00\x28\x06\x40\xfe\x80\x00\x00\x00\x00\x00\x00\x02\x11\x24\xff\xfe\x8c'
'\x11\xde\xfe\x80\x00\x00\x00\x00\x00\x00\x02\xb0\xd0\xff\xfe\xe1\x80\x72'
'\xcd\xd3\x00\x16\xff\x50\xd7\x13\x00\x00\x00\x00\xa0\x02\xff\xff\x67\xd3'
'\x00\x00\x02\x04\x05\xa0\x01\x03\x03\x00\x01\x01\x08\x0a\x7d\x18\x3a\x61'
'\x00\x00\x00\x00')
eth = Ethernet(s)
assert eth
assert isinstance(eth.data, ip6.IP6)
assert str(eth) == s
assert len(eth) == len(s)
def test_mpls_label():
s = '\x00\x01\x0b\xff'
m = MPLSlabel(s)
assert m.val == 16
assert m.exp == 5
assert m.s == 1
assert m.ttl == 255
assert str(m) == s
assert len(m) == len(s)
def test_802dot1q_tag():
s = '\xa0\x76\x01\x65'
t = VLANtag8021Q(s)
assert t.pri == 5
assert t.cfi == 0
assert t.id == 118
assert str(t) == s
t.cfi = 1
assert str(t) == '\xb0\x76\x01\x65'
assert len(t) == len(s)
def test_isl_tag():
s = ('\x01\x00\x0c\x00\x00\x03\x00\x02\xfd\x2c\xb8\x97\x00\x00\xaa\xaa\x03\x00\x00\x00\x04\x57'
'\x00\x00\x00\x00')
t = VLANtagISL(s)
assert t.pri == 3
assert t.id == 555
assert t.bpdu == 1
assert str(t) == s
assert len(t) == len(s)
def test_eth_802dot1q():
import ip
s = ('\x00\x60\x08\x9f\xb1\xf3\x00\x40\x05\x40\xef\x24\x81\x00\x90\x20\x08'
'\x00\x45\x00\x00\x34\x3b\x64\x40\x00\x40\x06\xb7\x9b\x83\x97\x20\x81'
'\x83\x97\x20\x15\x04\x95\x17\x70\x51\xd4\xee\x9c\x51\xa5\x5b\x36\x80'
'\x10\x7c\x70\x12\xc7\x00\x00\x01\x01\x08\x0a\x00\x04\xf0\xd4\x01\x99'
'\xa3\xfd')
eth = Ethernet(s)
assert eth.cfi == 1
assert eth.vlanid == 32
assert eth.priority == 4
assert len(eth.vlan_tags) == 1
assert eth.vlan_tags[0].type == ETH_TYPE_IP
assert isinstance(eth.data, ip.IP)
# construction
assert str(eth) == s, 'pack 1'
assert str(eth) == s, 'pack 2'
assert len(eth) == len(s)
# construction w/o the tag
del eth.vlan_tags, eth.cfi, eth.vlanid, eth.priority
assert str(eth) == s[:12] + '\x08\x00' + s[18:]
def test_eth_802dot1q_stacked(): # 2 VLAN tags
import arp
import ip
s = ('\x00\x1b\xd4\x1b\xa4\xd8\x00\x13\xc3\xdf\xae\x18\x81\x00\x00\x76\x81\x00\x00\x0a\x08\x00'
'\x45\x00\x00\x64\x00\x0f\x00\x00\xff\x01\x92\x9b\x0a\x76\x0a\x01\x0a\x76\x0a\x02\x08\x00'
'\xce\xb7\x00\x03\x00\x00\x00\x00\x00\x00\x00\x1f\xaf\x70\xab\xcd\xab\xcd\xab\xcd\xab\xcd'
'\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd'
'\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd'
'\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd')
eth = Ethernet(s)
assert eth.type == ETH_TYPE_IP
assert len(eth.vlan_tags) == 2
assert eth.vlan_tags[0].id == 118
assert eth.vlan_tags[1].id == 10
assert eth.vlan_tags[0].type == ETH_TYPE_8021Q
assert eth.vlan_tags[1].type == ETH_TYPE_IP
assert [t.as_tuple() for t in eth.vlan_tags] == [(118, 0, 0), (10, 0, 0)]
assert isinstance(eth.data, ip.IP)
# construction
assert str(eth) == s, 'pack 1'
assert str(eth) == s, 'pack 2'
assert len(eth) == len(s)
# construction w/o the tags
del eth.vlan_tags, eth.cfi, eth.vlanid, eth.priority
assert str(eth) == s[:12] + '\x08\x00' + s[22:]
# 2 VLAN tags + ARP
s = ('\xff\xff\xff\xff\xff\xff\xca\x03\x0d\xb4\x00\x1c\x81\x00\x00\x64\x81\x00\x00\xc8\x08\x06'
'\x00\x01\x08\x00\x06\x04\x00\x01\xca\x03\x0d\xb4\x00\x1c\xc0\xa8\x02\xc8\x00\x00\x00\x00'
'\x00\x00\xc0\xa8\x02\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')
eth = Ethernet(s)
assert len(eth.vlan_tags) == 2
assert eth.vlan_tags[0].type == ETH_TYPE_8021Q
assert eth.vlan_tags[1].type == ETH_TYPE_ARP
assert isinstance(eth.data, arp.ARP)
def test_eth_mpls_stacked(): # 2 MPLS labels
import ip
s = ('\x00\x30\x96\xe6\xfc\x39\x00\x30\x96\x05\x28\x38\x88\x47\x00\x01\x20\xff\x00\x01\x01\xff'
'\x45\x00\x00\x64\x00\x50\x00\x00\xff\x01\xa7\x06\x0a\x1f\x00\x01\x0a\x22\x00\x01\x08\x00'
'\xbd\x11\x0f\x65\x12\xa0\x00\x00\x00\x00\x00\x53\x9e\xe0\xab\xcd\xab\xcd\xab\xcd\xab\xcd'
'\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd'
'\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd'
'\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd')
eth = Ethernet(s)
assert len(eth.mpls_labels) == 2
assert eth.mpls_labels[0].val == 18
assert eth.mpls_labels[1].val == 16
assert eth.labels == [(18, 0, 255), (16, 0, 255)]
assert isinstance(eth.data, ip.IP)
# construction
assert str(eth) == s, 'pack 1'
assert str(eth) == s, 'pack 2'
assert len(eth) == len(s)
# construction w/o labels
del eth.labels, eth.mpls_labels
assert str(eth) == s[:12] + '\x08\x00' + s[22:]
def test_isl_eth_llc_stp(): # ISL VLAN - Ethernet - LLC - STP
import llc
import stp
s = ('\x01\x00\x0c\x00\x00\x03\x00\x02\xfd\x2c\xb8\x97\x00\x00\xaa\xaa\x03\x00\x00\x00\x02\x9b'
'\x00\x00\x00\x00\x01\x80\xc2\x00\x00\x00\x00\x02\xfd\x2c\xb8\x98\x00\x26\x42\x42\x03\x00'
'\x00\x00\x00\x00\x80\x00\x00\x02\xfd\x2c\xb8\x83\x00\x00\x00\x00\x80\x00\x00\x02\xfd\x2c'
'\xb8\x83\x80\x26\x00\x00\x14\x00\x02\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x41\xc6'
'\x75\xd6')
eth = Ethernet(s)
assert eth.vlan == 333
assert len(eth.vlan_tags) == 1
assert eth.vlan_tags[0].id == 333
assert eth.vlan_tags[0].pri == 3
# stack
assert isinstance(eth.data, llc.LLC)
assert isinstance(eth.data.data, stp.STP)
assert str(eth) == s, 'pack 1'
assert str(eth) == s, 'pack 2'
assert len(eth) == len(s)
def test_eth_llc_snap_cdp(): # Ethernet - LLC/SNAP - CDP
import cdp
import llc
s = ('\x01\x00\x0c\xcc\xcc\xcc\xc4\x022k\x00\x00\x01T\xaa\xaa\x03\x00\x00\x0c \x00\x02\xb4,B'
'\x00\x01\x00\x06R2\x00\x05\x00\xffCisco IOS Software, 3700 Software (C3745-ADVENTERPRI'
'SEK9_SNA-M), Version 12.4(25d), RELEASE SOFTWARE (fc1)\nTechnical Support: http://www.'
'cisco.com/techsupport\nCopyright (c) 1986-2010 by Cisco Systems, Inc.\nCompiled Wed 18'
'-Aug-10 08:18 by prod_rel_team\x00\x06\x00\x0eCisco 3745\x00\x02\x00\x11\x00\x00\x00\x01'
'\x01\x01\xcc\x00\x04\n\x00\x00\x02\x00\x03\x00\x13FastEthernet0/0\x00\x04\x00\x08\x00'
'\x00\x00)\x00\t\x00\x04\x00\x0b\x00\x05\x00')
eth = Ethernet(s)
# stack
assert isinstance(eth.data, llc.LLC)
assert isinstance(eth.data.data, cdp.CDP)
assert len(eth.data.data.data) == 8 # number of CDP TLVs; ensures they are decoded
assert str(eth) == s, 'pack 1'
assert str(eth) == s, 'pack 2'
assert len(eth) == len(s)
def test_eth_llc_ipx(): # 802.3 Ethernet - LLC - IPX
import ipx
import llc
s = ('\xff\xff\xff\xff\xff\xff\x00\xb0\xd0\x22\xf7\xf3\x00\x54\xe0\xe0\x03\xff\xff\x00\x50\x00'
'\x14\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\x04\x55\x00\x00\x00\x00\x00\xb0\xd0\x22\xf7'
'\xf3\x04\x55\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x01\x02\x5f\x5f\x4d\x53\x42'
'\x52\x4f\x57\x53\x45\x5f\x5f\x02\x01\x00')
eth = Ethernet(s)
# stack
assert isinstance(eth.data, llc.LLC)
assert isinstance(eth.data.data, ipx.IPX)
assert eth.data.data.pt == 0x14
assert str(eth) == s, 'pack 1'
assert str(eth) == s, 'pack 2'
assert len(eth) == len(s)
if __name__ == '__main__':
test_eth()
test_mpls_label()
test_802dot1q_tag()
test_isl_tag()
test_eth_802dot1q()
test_eth_802dot1q_stacked()
test_eth_mpls_stacked()
test_isl_eth_llc_stp()
test_eth_llc_snap_cdp()
test_eth_llc_ipx()
print 'Tests Successful...'
also fix #83
# $Id: ethernet.py 65 2010-03-26 02:53:51Z dugsong $
# -*- coding: utf-8 -*-
"""Ethernet II, LLC (802.3+802.2), LLC/SNAP, and Novell raw 802.3,
with automatic 802.1q, MPLS, PPPoE, and Cisco ISL decapsulation."""
from copy import copy
import dpkt
import llc
ETH_CRC_LEN = 4
ETH_HDR_LEN = 14
ETH_LEN_MIN = 64 # minimum frame length with CRC
ETH_LEN_MAX = 1518 # maximum frame length with CRC
ETH_MTU = (ETH_LEN_MAX - ETH_HDR_LEN - ETH_CRC_LEN)
ETH_MIN = (ETH_LEN_MIN - ETH_HDR_LEN - ETH_CRC_LEN)
# Ethernet payload types - http://standards.ieee.org/regauth/ethertype
ETH_TYPE_PUP = 0x0200 # PUP protocol
ETH_TYPE_IP = 0x0800 # IP protocol
ETH_TYPE_ARP = 0x0806 # address resolution protocol
ETH_TYPE_AOE = 0x88a2 # AoE protocol
ETH_TYPE_CDP = 0x2000 # Cisco Discovery Protocol
ETH_TYPE_DTP = 0x2004 # Cisco Dynamic Trunking Protocol
ETH_TYPE_REVARP = 0x8035 # reverse addr resolution protocol
ETH_TYPE_8021Q = 0x8100 # IEEE 802.1Q VLAN tagging
ETH_TYPE_IPX = 0x8137 # Internetwork Packet Exchange
ETH_TYPE_IP6 = 0x86DD # IPv6 protocol
ETH_TYPE_PPP = 0x880B # PPP
ETH_TYPE_MPLS = 0x8847 # MPLS
ETH_TYPE_MPLS_MCAST = 0x8848 # MPLS Multicast
ETH_TYPE_PPPoE_DISC = 0x8863 # PPP Over Ethernet Discovery Stage
ETH_TYPE_PPPoE = 0x8864 # PPP Over Ethernet Session Stage
ETH_TYPE_LLDP = 0x88CC # Link Layer Discovery Protocol
class Ethernet(dpkt.Packet):
__hdr__ = (
('dst', '6s', ''),
('src', '6s', ''),
('type', 'H', ETH_TYPE_IP)
)
_typesw = {}
_typesw_rev = {} # reverse mapping
def __init__(self, *args, **kwargs):
dpkt.Packet.__init__(self, *args, **kwargs)
# if data was given in kwargs, try to unpack it
if self.data and isinstance(self.data, basestring):
self._unpack_data(self.data)
def _unpack_data(self, buf):
if self.type == ETH_TYPE_8021Q:
self.vlan_tags = []
# support up to 2 tags (double tagging aka QinQ)
for _ in range(2):
tag = VLANtag8021Q(buf)
buf = buf[tag.__hdr_len__:]
self.vlan_tags.append(tag)
self.type = tag.type
if self.type != ETH_TYPE_8021Q:
break
# backward compatibility, use the 1st tag
self.vlanid, self.priority, self.cfi = self.vlan_tags[0].as_tuple()
elif self.type == ETH_TYPE_MPLS or self.type == ETH_TYPE_MPLS_MCAST:
self.labels = [] # old list containing labels as tuples
self.mpls_labels = [] # new list containing labels as instances of MPLSlabel
# XXX - max # of labels is undefined, just use 24
for i in range(24):
lbl = MPLSlabel(buf)
buf = buf[lbl.__hdr_len__:]
self.mpls_labels.append(lbl)
self.labels.append(lbl.as_tuple())
if lbl.s: # bottom of stack
break
self.type = ETH_TYPE_IP
try:
self.data = self._typesw[self.type](buf)
setattr(self, self.data.__class__.__name__.lower(), self.data)
except (KeyError, dpkt.UnpackError):
self.data = buf
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.type > 1500:
# Ethernet II
self._unpack_data(self.data)
elif (self.dst.startswith('\x01\x00\x0c\x00\x00') or
self.dst.startswith('\x03\x00\x0c\x00\x00')):
# Cisco ISL
tag = VLANtagISL(buf)
buf = buf[tag.__hdr_len__:]
self.vlan_tags = [tag]
self.vlan = tag.id # backward compatibility
self.unpack(buf)
elif self.data.startswith('\xff\xff'):
# Novell "raw" 802.3
self.type = ETH_TYPE_IPX
self.data = self.ipx = self._typesw[ETH_TYPE_IPX](self.data[2:])
else:
self.data = self.llc = llc.LLC(self.data)
def pack_hdr(self):
tags_buf = ''
orig_type = copy(self.type) # packing should not modify self.type
# initial type is based on next layer, pointed by self.data;
# try to find an ETH_TYPE matching the data class
if not isinstance(self.data, basestring):
self.type = self._typesw_rev.get(self.data.__class__, self.type)
if getattr(self, 'mpls_labels', None):
# mark all labels with s=0, last one with s=1
for lbl in self.mpls_labels:
lbl.s = 0
lbl.s = 1
# set encapsulation type
if not (self.type == ETH_TYPE_MPLS or self.type == ETH_TYPE_MPLS_MCAST):
self.type = ETH_TYPE_MPLS
tags_buf = ''.join(lbl.pack_hdr() for lbl in self.mpls_labels)
elif getattr(self, 'vlan_tags', None):
# set encapsulation types
t1 = self.vlan_tags[0]
if len(self.vlan_tags) == 1:
if isinstance(t1, VLANtag8021Q):
t1.type = orig_type
self.type = ETH_TYPE_8021Q
elif isinstance(t1, VLANtagISL):
t1.type = 0 # 0 means Ethernet
return t1.pack_hdr() + dpkt.Packet.pack_hdr(self)
elif len(self.vlan_tags) == 2:
t2 = self.vlan_tags[1]
if isinstance(t1, VLANtag8021Q) and isinstance(t2, VLANtag8021Q):
t2.type = orig_type
self.type = t1.type = ETH_TYPE_8021Q
else:
raise dpkt.PackError('maximum is 2 VLAN tags per Ethernet frame')
tags_buf = ''.join(tag.pack_hdr() for tag in self.vlan_tags)
# if self.data is LLC then this is IEEE 802.3 Ethernet and self.type
# then actually encodes the length of data
if isinstance(self.data, llc.LLC):
self.type = len(self.data)
buf = dpkt.Packet.pack_hdr(self) + tags_buf
self.type = orig_type # restore self.type after packing
return buf
def __len__(self):
tags = getattr(self, 'mpls_labels', []) + getattr(self, 'vlan_tags', [])
return self.__hdr_len__ + len(self.data) + sum(t.__hdr_len__ for t in tags)
@classmethod
def set_type(cls, t, pktclass):
cls._typesw[t] = pktclass
cls._typesw_rev[pktclass] = t
@classmethod
def get_type(cls, t):
return cls._typesw[t]
# XXX - auto-load Ethernet dispatch table from ETH_TYPE_* definitions
def __load_types():
g = globals()
for k, v in g.iteritems():
if k.startswith('ETH_TYPE_'):
name = k[9:]
modname = name.lower()
try:
mod = __import__(modname, g)
Ethernet.set_type(v, getattr(mod, name))
except (ImportError, AttributeError):
continue
if not Ethernet._typesw:
__load_types()
# Misc protocols
class MPLSlabel(dpkt.Packet):
"""A single entry in MPLS label stack"""
__hdr__ = (
('_val_exp_s_ttl', 'I', 0),
)
# field names are according to RFC3032
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.val = (self._val_exp_s_ttl & 0xfffff000) >> 12 # label value, 20 bits
self.exp = (self._val_exp_s_ttl & 0x00000e00) >> 9 # experimental use, 3 bits
self.s = (self._val_exp_s_ttl & 0x00000100) >> 8 # bottom of stack flag, 1 bit
self.ttl = self._val_exp_s_ttl & 0x000000ff # time to live, 8 bits
self.data = ''
def pack_hdr(self):
self._val_exp_s_ttl = (
((self.val & 0xfffff) << 12) |
((self.exp & 7) << 9) |
((self.s & 1) << 8) |
((self.ttl & 0xff))
)
return dpkt.Packet.pack_hdr(self)
def as_tuple(self): # backward-compatible representation
return (self.val, self.exp, self.ttl)
class VLANtag8021Q(dpkt.Packet):
"""IEEE 802.1q VLAN tag"""
__hdr__ = (
('_pri_cfi_id', 'H', 0),
('type', 'H', ETH_TYPE_IP)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.pri = (self._pri_cfi_id & 0xe000) >> 13 # priority, 3 bits
self.cfi = (self._pri_cfi_id & 0x1000) >> 12 # canonical format indicator, 1 bit
self.id = self._pri_cfi_id & 0x0fff # VLAN id, 12 bits
self.data = ''
def pack_hdr(self):
self._pri_cfi_id = (
((self.pri & 7) << 13) |
((self.cfi & 1) << 12) |
((self.id & 0xfff))
)
return dpkt.Packet.pack_hdr(self)
def as_tuple(self):
return (self.id, self.pri, self.cfi)
class VLANtagISL(dpkt.Packet):
"""Cisco Inter-Switch Link VLAN tag"""
__hdr__ = (
('da', '5s', '\x01\x00\x0c\x00\x00'),
('_type_pri', 'B', 3),
('sa', '6s', ''),
('len', 'H', 0),
('snap', '3s', '\xaa\xaa\x03'),
('hsa', '3s', '\x00\x00\x0c'),
('_id_bpdu', 'H', 0),
('indx', 'H', 0),
('res', 'H', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.type = (self._type_pri & 0xf0) >> 4 # encapsulation type, 4 bits; 0 means Ethernet
self.pri = self._type_pri & 0x03 # user defined bits, 2 bits are used; means priority
self.id = self._id_bpdu >> 1 # VLAN id
self.bpdu = self._id_bpdu & 1
self.data = ''
def pack_hdr(self):
self._type_pri = ((self.type & 0xf) << 4) | (self.pri & 0x3)
self._id_bpdu = ((self.id & 0x7fff) << 1) | (self.bpdu & 1)
return dpkt.Packet.pack_hdr(self)
# Unit tests
def test_eth(): # TODO recheck this test
import ip6
s = ('\x00\xb0\xd0\xe1\x80\x72\x00\x11\x24\x8c\x11\xde\x86\xdd\x60\x00\x00\x00'
'\x00\x28\x06\x40\xfe\x80\x00\x00\x00\x00\x00\x00\x02\x11\x24\xff\xfe\x8c'
'\x11\xde\xfe\x80\x00\x00\x00\x00\x00\x00\x02\xb0\xd0\xff\xfe\xe1\x80\x72'
'\xcd\xd3\x00\x16\xff\x50\xd7\x13\x00\x00\x00\x00\xa0\x02\xff\xff\x67\xd3'
'\x00\x00\x02\x04\x05\xa0\x01\x03\x03\x00\x01\x01\x08\x0a\x7d\x18\x3a\x61'
'\x00\x00\x00\x00')
eth = Ethernet(s)
assert eth
assert isinstance(eth.data, ip6.IP6)
assert str(eth) == s
assert len(eth) == len(s)
def test_eth_init_with_data():
# initialize with a data string, test that it gets unpacked
import arp
eth1 = Ethernet(
dst='PQRSTU', src='ABCDEF', type=ETH_TYPE_ARP,
data='\x00\x01\x08\x00\x06\x04\x00\x01123456abcd7890abwxyz')
assert isinstance(eth1.data, arp.ARP)
# now initialize with a class, test packing
eth2 = Ethernet(
dst='PQRSTU', src='ABCDEF',
data=arp.ARP(sha='123456', spa='abcd', tha='7890ab', tpa='wxyz'))
assert str(eth1) == str(eth2)
def test_mpls_label():
s = '\x00\x01\x0b\xff'
m = MPLSlabel(s)
assert m.val == 16
assert m.exp == 5
assert m.s == 1
assert m.ttl == 255
assert str(m) == s
assert len(m) == len(s)
def test_802dot1q_tag():
s = '\xa0\x76\x01\x65'
t = VLANtag8021Q(s)
assert t.pri == 5
assert t.cfi == 0
assert t.id == 118
assert str(t) == s
t.cfi = 1
assert str(t) == '\xb0\x76\x01\x65'
assert len(t) == len(s)
def test_isl_tag():
s = ('\x01\x00\x0c\x00\x00\x03\x00\x02\xfd\x2c\xb8\x97\x00\x00\xaa\xaa\x03\x00\x00\x00\x04\x57'
'\x00\x00\x00\x00')
t = VLANtagISL(s)
assert t.pri == 3
assert t.id == 555
assert t.bpdu == 1
assert str(t) == s
assert len(t) == len(s)
def test_eth_802dot1q():
import ip
s = ('\x00\x60\x08\x9f\xb1\xf3\x00\x40\x05\x40\xef\x24\x81\x00\x90\x20\x08'
'\x00\x45\x00\x00\x34\x3b\x64\x40\x00\x40\x06\xb7\x9b\x83\x97\x20\x81'
'\x83\x97\x20\x15\x04\x95\x17\x70\x51\xd4\xee\x9c\x51\xa5\x5b\x36\x80'
'\x10\x7c\x70\x12\xc7\x00\x00\x01\x01\x08\x0a\x00\x04\xf0\xd4\x01\x99'
'\xa3\xfd')
eth = Ethernet(s)
assert eth.cfi == 1
assert eth.vlanid == 32
assert eth.priority == 4
assert len(eth.vlan_tags) == 1
assert eth.vlan_tags[0].type == ETH_TYPE_IP
assert isinstance(eth.data, ip.IP)
# construction
assert str(eth) == s, 'pack 1'
assert str(eth) == s, 'pack 2'
assert len(eth) == len(s)
# construction w/o the tag
del eth.vlan_tags, eth.cfi, eth.vlanid, eth.priority
assert str(eth) == s[:12] + '\x08\x00' + s[18:]
def test_eth_802dot1q_stacked(): # 2 VLAN tags
import arp
import ip
s = ('\x00\x1b\xd4\x1b\xa4\xd8\x00\x13\xc3\xdf\xae\x18\x81\x00\x00\x76\x81\x00\x00\x0a\x08\x00'
'\x45\x00\x00\x64\x00\x0f\x00\x00\xff\x01\x92\x9b\x0a\x76\x0a\x01\x0a\x76\x0a\x02\x08\x00'
'\xce\xb7\x00\x03\x00\x00\x00\x00\x00\x00\x00\x1f\xaf\x70\xab\xcd\xab\xcd\xab\xcd\xab\xcd'
'\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd'
'\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd'
'\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd')
eth = Ethernet(s)
assert eth.type == ETH_TYPE_IP
assert len(eth.vlan_tags) == 2
assert eth.vlan_tags[0].id == 118
assert eth.vlan_tags[1].id == 10
assert eth.vlan_tags[0].type == ETH_TYPE_8021Q
assert eth.vlan_tags[1].type == ETH_TYPE_IP
assert [t.as_tuple() for t in eth.vlan_tags] == [(118, 0, 0), (10, 0, 0)]
assert isinstance(eth.data, ip.IP)
# construction
assert str(eth) == s, 'pack 1'
assert str(eth) == s, 'pack 2'
assert len(eth) == len(s)
# construction w/o the tags
del eth.vlan_tags, eth.cfi, eth.vlanid, eth.priority
assert str(eth) == s[:12] + '\x08\x00' + s[22:]
# 2 VLAN tags + ARP
s = ('\xff\xff\xff\xff\xff\xff\xca\x03\x0d\xb4\x00\x1c\x81\x00\x00\x64\x81\x00\x00\xc8\x08\x06'
'\x00\x01\x08\x00\x06\x04\x00\x01\xca\x03\x0d\xb4\x00\x1c\xc0\xa8\x02\xc8\x00\x00\x00\x00'
'\x00\x00\xc0\xa8\x02\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')
eth = Ethernet(s)
assert len(eth.vlan_tags) == 2
assert eth.vlan_tags[0].type == ETH_TYPE_8021Q
assert eth.vlan_tags[1].type == ETH_TYPE_ARP
assert isinstance(eth.data, arp.ARP)
def test_eth_mpls_stacked(): # 2 MPLS labels
import ip
s = ('\x00\x30\x96\xe6\xfc\x39\x00\x30\x96\x05\x28\x38\x88\x47\x00\x01\x20\xff\x00\x01\x01\xff'
'\x45\x00\x00\x64\x00\x50\x00\x00\xff\x01\xa7\x06\x0a\x1f\x00\x01\x0a\x22\x00\x01\x08\x00'
'\xbd\x11\x0f\x65\x12\xa0\x00\x00\x00\x00\x00\x53\x9e\xe0\xab\xcd\xab\xcd\xab\xcd\xab\xcd'
'\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd'
'\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd'
'\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd\xab\xcd')
eth = Ethernet(s)
assert len(eth.mpls_labels) == 2
assert eth.mpls_labels[0].val == 18
assert eth.mpls_labels[1].val == 16
assert eth.labels == [(18, 0, 255), (16, 0, 255)]
assert isinstance(eth.data, ip.IP)
# construction
assert str(eth) == s, 'pack 1'
assert str(eth) == s, 'pack 2'
assert len(eth) == len(s)
# construction w/o labels
del eth.labels, eth.mpls_labels
assert str(eth) == s[:12] + '\x08\x00' + s[22:]
def test_isl_eth_llc_stp(): # ISL VLAN - Ethernet - LLC - STP
import llc
import stp
s = ('\x01\x00\x0c\x00\x00\x03\x00\x02\xfd\x2c\xb8\x97\x00\x00\xaa\xaa\x03\x00\x00\x00\x02\x9b'
'\x00\x00\x00\x00\x01\x80\xc2\x00\x00\x00\x00\x02\xfd\x2c\xb8\x98\x00\x26\x42\x42\x03\x00'
'\x00\x00\x00\x00\x80\x00\x00\x02\xfd\x2c\xb8\x83\x00\x00\x00\x00\x80\x00\x00\x02\xfd\x2c'
'\xb8\x83\x80\x26\x00\x00\x14\x00\x02\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x41\xc6'
'\x75\xd6')
eth = Ethernet(s)
assert eth.vlan == 333
assert len(eth.vlan_tags) == 1
assert eth.vlan_tags[0].id == 333
assert eth.vlan_tags[0].pri == 3
# stack
assert isinstance(eth.data, llc.LLC)
assert isinstance(eth.data.data, stp.STP)
assert str(eth) == s, 'pack 1'
assert str(eth) == s, 'pack 2'
assert len(eth) == len(s)
def test_eth_llc_snap_cdp(): # Ethernet - LLC/SNAP - CDP
import cdp
import llc
s = ('\x01\x00\x0c\xcc\xcc\xcc\xc4\x022k\x00\x00\x01T\xaa\xaa\x03\x00\x00\x0c \x00\x02\xb4,B'
'\x00\x01\x00\x06R2\x00\x05\x00\xffCisco IOS Software, 3700 Software (C3745-ADVENTERPRI'
'SEK9_SNA-M), Version 12.4(25d), RELEASE SOFTWARE (fc1)\nTechnical Support: http://www.'
'cisco.com/techsupport\nCopyright (c) 1986-2010 by Cisco Systems, Inc.\nCompiled Wed 18'
'-Aug-10 08:18 by prod_rel_team\x00\x06\x00\x0eCisco 3745\x00\x02\x00\x11\x00\x00\x00\x01'
'\x01\x01\xcc\x00\x04\n\x00\x00\x02\x00\x03\x00\x13FastEthernet0/0\x00\x04\x00\x08\x00'
'\x00\x00)\x00\t\x00\x04\x00\x0b\x00\x05\x00')
eth = Ethernet(s)
# stack
assert isinstance(eth.data, llc.LLC)
assert isinstance(eth.data.data, cdp.CDP)
assert len(eth.data.data.data) == 8 # number of CDP TLVs; ensures they are decoded
assert str(eth) == s, 'pack 1'
assert str(eth) == s, 'pack 2'
assert len(eth) == len(s)
def test_eth_llc_ipx(): # 802.3 Ethernet - LLC - IPX
import ipx
import llc
s = ('\xff\xff\xff\xff\xff\xff\x00\xb0\xd0\x22\xf7\xf3\x00\x54\xe0\xe0\x03\xff\xff\x00\x50\x00'
'\x14\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\x04\x55\x00\x00\x00\x00\x00\xb0\xd0\x22\xf7'
'\xf3\x04\x55\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x01\x02\x5f\x5f\x4d\x53\x42'
'\x52\x4f\x57\x53\x45\x5f\x5f\x02\x01\x00')
eth = Ethernet(s)
# stack
assert isinstance(eth.data, llc.LLC)
assert isinstance(eth.data.data, ipx.IPX)
assert eth.data.data.pt == 0x14
assert str(eth) == s, 'pack 1'
assert str(eth) == s, 'pack 2'
assert len(eth) == len(s)
if __name__ == '__main__':
test_eth()
test_eth_init_with_data()
test_mpls_label()
test_802dot1q_tag()
test_isl_tag()
test_eth_802dot1q()
test_eth_802dot1q_stacked()
test_eth_mpls_stacked()
test_isl_eth_llc_stp()
test_eth_llc_snap_cdp()
test_eth_llc_ipx()
print 'Tests Successful...'
|
"""Define celery tasks for hs_core app."""
import os
import sys
import traceback
import zipfile
import logging
import json
from datetime import datetime, timedelta, date
from xml.etree import ElementTree
import requests
from celery import shared_task
from celery.schedules import crontab
from celery.task import periodic_task
from django.conf import settings
from django.core.mail import send_mail
from django.core.exceptions import ObjectDoesNotExist
from rest_framework import status
from hs_core.hydroshare import utils
from hs_core.hydroshare.hs_bagit import create_bag_files
from hs_core.hydroshare.resource import get_activated_doi, get_resource_doi, \
get_crossref_url, deposit_res_metadata_with_crossref
from hs_odm2.models import ODM2Variable
from django_irods.storage import IrodsStorage
from theme.models import UserQuota, QuotaMessage, UserProfile, User
from django_irods.icommands import SessionException
from hs_core.models import BaseResource
from theme.utils import get_quota_message
# Pass 'django' into getLogger instead of __name__
# for celery tasks (as this seems to be the
# only way to successfully log in code executed
# by celery, despite our catch-all handler).
logger = logging.getLogger('django')
# Currently there are two different cleanups scheduled.
# One is 20 minutes after creation, the other is nightly.
# TODO Clean up zipfiles in remote federated storage as well.
@periodic_task(ignore_result=True, run_every=crontab(minute=30, hour=23))
def nightly_zips_cleanup():
# delete 2 days ago
date_folder = (date.today() - timedelta(2)).strftime('%Y-%m-%d')
zips_daily_date = "zips/{daily_date}".format(daily_date=date_folder)
if __debug__:
logger.debug("cleaning up {}".format(zips_daily_date))
istorage = IrodsStorage()
if istorage.exists(zips_daily_date):
istorage.delete(zips_daily_date)
federated_prefixes = BaseResource.objects.all().values_list('resource_federation_path')\
.distinct()
for p in federated_prefixes:
prefix = p[0] # strip tuple
if prefix != "":
zips_daily_date = "{prefix}/zips/{daily_date}"\
.format(prefix=prefix, daily_date=date_folder)
if __debug__:
logger.debug("cleaning up {}".format(zips_daily_date))
istorage = IrodsStorage("federated")
if istorage.exists(zips_daily_date):
istorage.delete(zips_daily_date)
@periodic_task(ignore_result=True, run_every=crontab(minute=0, hour=0))
def sync_email_subscriptions():
sixty_days = datetime.today() - timedelta(days=60)
active_subscribed = UserProfile.objects.filter(email_opt_out=False,
user__last_login__gte=sixty_days,
user__is_active=True)
sync_mailchimp(active_subscribed, settings.MAILCHIMP_ACTIVE_SUBSCRIBERS)
subscribed = UserProfile.objects.filter(email_opt_out=False, user__is_active=True)
sync_mailchimp(subscribed, settings.MAILCHIMP_SUBSCRIBERS)
def sync_mailchimp(active_subscribed, list_id):
session = requests.Session()
url = "https://us3.api.mailchimp.com/3.0/lists/{list_id}/members"
# get total members
response = session.get(url.format(list_id=list_id), auth=requests.auth.HTTPBasicAuth(
'hs-celery', settings.MAILCHIMP_PASSWORD))
total_items = json.loads(response.content.decode())["total_items"]
# get list of all member ids
response = session.get((url + "?offset=0&count={total_items}").format(list_id=list_id,
total_items=total_items),
auth=requests.auth.HTTPBasicAuth('hs-celery',
settings.MAILCHIMP_PASSWORD))
# clear the email list
delete_count = 0
for member in json.loads(response.content.decode())["members"]:
if member["status"] == "subscribed":
session_response = session.delete(
(url + "/{id}").format(list_id=list_id, id=member["id"]),
auth=requests.auth.HTTPBasicAuth('hs-celery', settings.MAILCHIMP_PASSWORD))
if session_response.status_code != 204:
logger.info("Expected 204 status code, got " + str(session_response.status_code))
logger.debug(session_response.content)
else:
delete_count += 1
# add active subscribed users to mailchimp
add_count = 0
for subscriber in active_subscribed:
json_data = {"email_address": subscriber.user.email, "status": "subscribed",
"merge_fields": {"FNAME": subscriber.user.first_name,
"LNAME": subscriber.user.last_name}}
session_response = session.post(
url.format(list_id=list_id), json=json_data, auth=requests.auth.HTTPBasicAuth(
'hs-celery', settings.MAILCHIMP_PASSWORD))
if session_response.status_code != 200:
logger.info("Expected 200 status code, got " + str(session_response.status_code))
logger.debug(session_response.content)
else:
add_count += 1
if delete_count == active_subscribed.count():
logger.info("successfully cleared mailchimp for list id " + list_id)
else:
logger.info(
"cleared " + str(delete_count) + " out of " + str(
active_subscribed.count()) + " for list id " + list_id)
if active_subscribed.count() == add_count:
logger.info("successfully synced all subscriptions for list id " + list_id)
else:
logger.info("added " + str(add_count) + " out of " + str(
active_subscribed.count()) + " for list id " + list_id)
@periodic_task(ignore_result=True, run_every=crontab(minute=0, hour=0))
def manage_task_nightly():
# The nightly running task do DOI activation check
# Check DOI activation on failed and pending resources and send email.
msg_lst = []
# retrieve all published resources with failed metadata deposition with CrossRef if any and
# retry metadata deposition
failed_resources = BaseResource.objects.filter(raccess__published=True, doi__contains='failure')
for res in failed_resources:
if res.metadata.dates.all().filter(type='published'):
pub_date = res.metadata.dates.all().filter(type='published')[0]
pub_date = pub_date.start_date.strftime('%m/%d/%Y')
act_doi = get_activated_doi(res.doi)
response = deposit_res_metadata_with_crossref(res)
if response.status_code == status.HTTP_200_OK:
# retry of metadata deposition succeeds, change resource flag from failure
# to pending
res.doi = get_resource_doi(act_doi, 'pending')
res.save()
# create bag and compute checksum for published resource to meet DataONE requirement
create_bag_by_irods(res.short_id)
else:
# retry of metadata deposition failed again, notify admin
msg_lst.append("Metadata deposition with CrossRef for the published resource "
"DOI {res_doi} failed again after retry with first metadata "
"deposition requested since {pub_date}.".format(res_doi=act_doi,
pub_date=pub_date))
logger.debug(response.content)
else:
msg_lst.append("{res_id} does not have published date in its metadata.".format(
res_id=res.short_id))
pending_resources = BaseResource.objects.filter(raccess__published=True,
doi__contains='pending')
for res in pending_resources:
if res.metadata.dates.all().filter(type='published'):
pub_date = res.metadata.dates.all().filter(type='published')[0]
pub_date = pub_date.start_date.strftime('%m/%d/%Y')
act_doi = get_activated_doi(res.doi)
main_url = get_crossref_url()
req_str = '{MAIN_URL}servlet/submissionDownload?usr={USERNAME}&pwd=' \
'{PASSWORD}&doi_batch_id={DOI_BATCH_ID}&type={TYPE}'
response = requests.get(req_str.format(MAIN_URL=main_url,
USERNAME=settings.CROSSREF_LOGIN_ID,
PASSWORD=settings.CROSSREF_LOGIN_PWD,
DOI_BATCH_ID=res.short_id,
TYPE='result'))
root = ElementTree.fromstring(response.content)
rec_cnt_elem = root.find('.//record_count')
failure_cnt_elem = root.find('.//failure_count')
success = False
if rec_cnt_elem is not None and failure_cnt_elem is not None:
rec_cnt = int(rec_cnt_elem.text)
failure_cnt = int(failure_cnt_elem.text)
if rec_cnt > 0 and failure_cnt == 0:
res.doi = act_doi
res.save()
success = True
# create bag and compute checksum for published resource to meet DataONE requirement
create_bag_by_irods(res.short_id)
if not success:
msg_lst.append("Published resource DOI {res_doi} is not yet activated with request "
"data deposited since {pub_date}.".format(res_doi=act_doi,
pub_date=pub_date))
logger.debug(response.content)
else:
msg_lst.append("{res_id} does not have published date in its metadata.".format(
res_id=res.short_id))
if msg_lst:
email_msg = '\n'.join(msg_lst)
subject = 'Notification of pending DOI deposition/activation of published resources'
# send email for people monitoring and follow-up as needed
send_mail(subject, email_msg, settings.DEFAULT_FROM_EMAIL, [settings.DEFAULT_SUPPORT_EMAIL])
@periodic_task(ignore_result=True, run_every=crontab(minute=15, hour=0, day_of_week=1,
day_of_month='1-7'))
def send_over_quota_emails():
# check over quota cases and send quota warning emails as needed
hs_internal_zone = "hydroshare"
if not QuotaMessage.objects.exists():
QuotaMessage.objects.create()
qmsg = QuotaMessage.objects.first()
users = User.objects.filter(is_active=True).filter(is_superuser=False).all()
for u in users:
uq = UserQuota.objects.filter(user__username=u.username, zone=hs_internal_zone).first()
if uq:
used_percent = uq.used_percent
if used_percent >= qmsg.soft_limit_percent:
if used_percent >= 100 and used_percent < qmsg.hard_limit_percent:
if uq.remaining_grace_period < 0:
# triggers grace period counting
uq.remaining_grace_period = qmsg.grace_period
elif uq.remaining_grace_period > 0:
# reduce remaining_grace_period by one day
uq.remaining_grace_period -= 1
elif used_percent >= qmsg.hard_limit_percent:
# set grace period to 0 when user quota exceeds hard limit
uq.remaining_grace_period = 0
uq.save()
if u.first_name and u.last_name:
sal_name = '{} {}'.format(u.first_name, u.last_name)
elif u.first_name:
sal_name = u.first_name
elif u.last_name:
sal_name = u.last_name
else:
sal_name = u.username
msg_str = 'Dear ' + sal_name + ':\n\n'
ori_qm = get_quota_message(u)
# make embedded settings.DEFAULT_SUPPORT_EMAIL clickable with subject auto-filled
replace_substr = "<a href='mailto:{0}?subject=Request more quota'>{0}</a>".format(
settings.DEFAULT_SUPPORT_EMAIL)
new_qm = ori_qm.replace(settings.DEFAULT_SUPPORT_EMAIL, replace_substr)
msg_str += new_qm
msg_str += '\n\nHydroShare Support'
subject = 'Quota warning'
if settings.DEBUG:
logger.info("quota warning email not sent out on debug server but logged instead: "
"{}".format(msg_str))
else:
try:
# send email for people monitoring and follow-up as needed
send_mail(subject, '', settings.DEFAULT_FROM_EMAIL,
[u.email, settings.DEFAULT_SUPPORT_EMAIL],
html_message=msg_str)
except Exception as ex:
logger.debug("Failed to send quota warning email: " + ex.message)
else:
if uq.remaining_grace_period >= 0:
# turn grace period off now that the user is below quota soft limit
uq.remaining_grace_period = -1
uq.save()
else:
logger.debug('user ' + u.username + ' does not have UserQuota foreign key relation')
@shared_task
def add_zip_file_contents_to_resource(pk, zip_file_path):
"""Add zip file to existing resource and remove tmp zip file."""
zfile = None
resource = None
try:
resource = utils.get_resource_by_shortkey(pk, or_404=False)
zfile = zipfile.ZipFile(zip_file_path)
num_files = len(zfile.infolist())
zcontents = utils.ZipContents(zfile)
files = zcontents.get_files()
resource.file_unpack_status = 'Running'
resource.save()
for i, f in enumerate(files):
logger.debug("Adding file {0} to resource {1}".format(f.name, pk))
utils.add_file_to_resource(resource, f)
resource.file_unpack_message = "Imported {0} of about {1} file(s) ...".format(
i, num_files)
resource.save()
# This might make the resource unsuitable for public consumption
resource.update_public_and_discoverable()
# TODO: this is a bit of a lie because a different user requested the bag overwrite
utils.resource_modified(resource, resource.creator, overwrite_bag=False)
# Call success callback
resource.file_unpack_message = None
resource.file_unpack_status = 'Done'
resource.save()
except BaseResource.DoesNotExist:
msg = "Unable to add zip file contents to non-existent resource {pk}."
msg = msg.format(pk=pk)
logger.error(msg)
except:
exc_info = "".join(traceback.format_exception(*sys.exc_info()))
if resource:
resource.file_unpack_status = 'Error'
resource.file_unpack_message = exc_info
resource.save()
if zfile:
zfile.close()
logger.error(exc_info)
finally:
# Delete upload file
os.unlink(zip_file_path)
@shared_task
def delete_zip(zip_path):
istorage = IrodsStorage()
if istorage.exists(zip_path):
istorage.delete(zip_path)
@shared_task
def create_temp_zip(resource_id, input_path, output_path, aggregation_name=None, sf_zip=False, download_path,
request_username=None):
""" Create temporary zip file from input_path and store in output_path
:param resource_id: the short_id of a resource
:param input_path: full irods path of input starting with federation path
:param output_path: full irods path of output starting with federation path
:param aggregation_name: The name of the aggregation to zip
:param sf_zip: signals a single file to zip
:param download_path: download path to return as task payload
:param request_username: the username of the requesting user
"""
from hs_core.hydroshare.utils import get_resource_by_shortkey
res = get_resource_by_shortkey(resource_id)
aggregation = None
if aggregation_name:
aggregation = res.get_aggregation_by_aggregation_name(aggregation_name)
istorage = res.get_irods_storage() # invoke federated storage as necessary
if res.resource_type == "CompositeResource":
if '/data/contents/' in input_path:
short_path = input_path.split('/data/contents/')[1] # strip /data/contents/
res.create_aggregation_xml_documents(path=short_path)
else: # all metadata included, e.g., /data/*
res.create_aggregation_xml_documents()
try:
if aggregation or sf_zip:
# input path points to single file aggregation
# ensure that foo.zip contains aggregation metadata
# by copying these into a temp subdirectory foo/foo parallel to where foo.zip is stored
temp_folder_name, ext = os.path.splitext(output_path) # strip zip to get scratch dir
head, tail = os.path.split(temp_folder_name) # tail is unqualified folder name "foo"
out_with_folder = os.path.join(temp_folder_name, tail) # foo/foo is subdir to zip
istorage.copyFiles(input_path, out_with_folder)
if aggregation:
try:
istorage.copyFiles(aggregation.map_file_path, temp_folder_name)
except SessionException:
logger.error("cannot copy {}".format(aggregation.map_file_path))
try:
istorage.copyFiles(aggregation.metadata_file_path, temp_folder_name)
except SessionException:
logger.error("cannot copy {}".format(aggregation.metadata_file_path))
for file in aggregation.files.all():
try:
istorage.copyFiles(file.storage_path, temp_folder_name)
except SessionException:
logger.error("cannot copy {}".format(file.storage_path))
istorage.zipup(temp_folder_name, output_path)
istorage.delete(temp_folder_name) # delete working directory; this isn't the zipfile
else: # regular folder to zip
istorage.zipup(input_path, output_path)
except SessionException as ex:
logger.error(ex.stderr)
return ''
return download_path
@shared_task
def create_bag_by_irods(resource_id, request_username=None):
"""Create a resource bag on iRODS side by running the bagit rule and ibun zip.
This function runs as a celery task, invoked asynchronously so that it does not
block the main web thread when it creates bags for very large files which will take some time.
:param
resource_id: the resource uuid that is used to look for the resource to create the bag for.
:return: bag_url if bag creation operation succeeds or
raise an exception if resource does not exist or any other issues that prevent bags from being created.
"""
res = utils.get_resource_by_shortkey(resource_id)
istorage = res.get_irods_storage()
bag_path = res.bag_path
metadata_dirty = istorage.getAVU(res.root_path, 'metadata_dirty')
# if metadata has been changed, then regenerate metadata xml files
if metadata_dirty is None or metadata_dirty.lower() == "true":
create_bag_files(res)
irods_bagit_input_path = res.get_irods_path(resource_id, prepend_short_id=False)
# check to see if bagit readme.txt file exists or not
bagit_readme_file = res.get_irods_path('readme.txt')
is_bagit_readme_exist = istorage.exists(bagit_readme_file)
if irods_bagit_input_path.startswith(resource_id):
# resource is in data zone, need to append the full path for iRODS bagit rule execution
irods_dest_prefix = "/" + settings.IRODS_ZONE + "/home/" + settings.IRODS_USERNAME
irods_bagit_input_path = os.path.join(irods_dest_prefix, resource_id)
bagit_input_resource = "*DESTRESC='{def_res}'".format(
def_res=settings.IRODS_DEFAULT_RESOURCE)
else:
# this will need to be changed with the default resource in whatever federated zone the
# resource is stored in when we have such use cases to support
bagit_input_resource = "*DESTRESC='{def_res}'".format(
def_res=settings.HS_IRODS_USER_ZONE_DEF_RES)
bagit_input_path = "*BAGITDATA='{path}'".format(path=irods_bagit_input_path)
bagit_files = [
res.get_irods_path('bagit.txt'),
res.get_irods_path('manifest-md5.txt'),
res.get_irods_path('tagmanifest-md5.txt'),
bag_path
]
# only proceed when the resource is not deleted potentially by another request
# when being downloaded
is_exist = istorage.exists(irods_bagit_input_path)
if is_exist:
# if bagit readme.txt does not exist, add it.
if not is_bagit_readme_exist:
from_file_name = getattr(settings, 'HS_BAGIT_README_FILE_WITH_PATH',
'docs/bagit/readme.txt')
istorage.saveFile(from_file_name, bagit_readme_file, True)
# call iRODS bagit rule here
bagit_rule_file = getattr(settings, 'IRODS_BAGIT_RULE',
'hydroshare/irods/ruleGenerateBagIt_HS.r')
try:
# call iRODS run and ibun command to create and zip the bag, ignore SessionException
# for now as a workaround which could be raised from potential race conditions when
# multiple ibun commands try to create the same zip file or the very same resource
# gets deleted by another request when being downloaded
istorage.runBagitRule(bagit_rule_file, bagit_input_path, bagit_input_resource)
istorage.zipup(irods_bagit_input_path, bag_path)
if res.raccess.published:
# compute checksum to meet DataONE distribution requirement
chksum = istorage.checksum(bag_path)
res.bag_checksum = chksum
istorage.setAVU(irods_bagit_input_path, 'bag_modified', "false")
return res.bag_url
except SessionException as ex:
# if an exception occurs, delete incomplete files potentially being generated by
# iRODS bagit rule and zipping operations
for fname in bagit_files:
if istorage.exists(fname):
istorage.delete(fname)
raise SessionException(-1, '', ex.stderr)
else:
raise ObjectDoesNotExist('Resource {} does not exist.'.format(resource_id))
@shared_task
def update_web_services(services_url, api_token, timeout, publish_urls, res_id):
"""Update web services hosted by GeoServer and HydroServer.
This function sends a resource id to the HydroShare web services manager
application, which will check the current status of the resource and register
or unregister services hosted by GeoServer and HydroServer.
The HydroShare web services manager will return a list of endpoint URLs
for both the resource and individual aggregations. If publish_urls is set to
True, these endpoints will be added to the extra metadata fields of the
resource and aggregations.
"""
session = requests.Session()
session.headers.update(
{"Authorization": " ".join(("Token", str(api_token)))}
)
rest_url = str(services_url) + "/" + str(res_id) + "/"
try:
response = session.post(rest_url, timeout=timeout)
if publish_urls and response.status_code == status.HTTP_201_CREATED:
try:
resource = utils.get_resource_by_shortkey(res_id)
response_content = json.loads(response.content.decode())
for key, value in response_content["resource"].items():
resource.extra_metadata[key] = value
resource.save()
for url in response_content["content"]:
logical_files = list(resource.logical_files)
lf = logical_files[[i.aggregation_name for i in
logical_files].index(
url["layer_name"].encode()
)]
lf.metadata.extra_metadata["Web Services URL"] = url["message"]
lf.metadata.save()
except Exception as e:
logger.error(e)
return e
return response
except (requests.exceptions.RequestException, ValueError) as e:
logger.error(e)
return e
@shared_task
def resource_debug(resource_id):
"""Update web services hosted by GeoServer and HydroServer.
"""
from hs_core.hydroshare.utils import get_resource_by_shortkey
resource = get_resource_by_shortkey(resource_id)
from hs_core.management.utils import check_irods_files
return check_irods_files(resource, log_errors=False, return_errors=True)
@periodic_task(ignore_result=True, run_every=crontab(minute=00, hour=12))
def daily_odm2_sync():
"""
ODM2 variables are maintained on an external site this synchronizes data to HydroShare for local caching
"""
ODM2Variable.sync()
fixed a bug
"""Define celery tasks for hs_core app."""
import os
import sys
import traceback
import zipfile
import logging
import json
from datetime import datetime, timedelta, date
from xml.etree import ElementTree
import requests
from celery import shared_task
from celery.schedules import crontab
from celery.task import periodic_task
from django.conf import settings
from django.core.mail import send_mail
from django.core.exceptions import ObjectDoesNotExist
from rest_framework import status
from hs_core.hydroshare import utils
from hs_core.hydroshare.hs_bagit import create_bag_files
from hs_core.hydroshare.resource import get_activated_doi, get_resource_doi, \
get_crossref_url, deposit_res_metadata_with_crossref
from hs_odm2.models import ODM2Variable
from django_irods.storage import IrodsStorage
from theme.models import UserQuota, QuotaMessage, UserProfile, User
from django_irods.icommands import SessionException
from hs_core.models import BaseResource
from theme.utils import get_quota_message
# Pass 'django' into getLogger instead of __name__
# for celery tasks (as this seems to be the
# only way to successfully log in code executed
# by celery, despite our catch-all handler).
logger = logging.getLogger('django')
# Currently there are two different cleanups scheduled.
# One is 20 minutes after creation, the other is nightly.
# TODO Clean up zipfiles in remote federated storage as well.
@periodic_task(ignore_result=True, run_every=crontab(minute=30, hour=23))
def nightly_zips_cleanup():
# delete 2 days ago
date_folder = (date.today() - timedelta(2)).strftime('%Y-%m-%d')
zips_daily_date = "zips/{daily_date}".format(daily_date=date_folder)
if __debug__:
logger.debug("cleaning up {}".format(zips_daily_date))
istorage = IrodsStorage()
if istorage.exists(zips_daily_date):
istorage.delete(zips_daily_date)
federated_prefixes = BaseResource.objects.all().values_list('resource_federation_path')\
.distinct()
for p in federated_prefixes:
prefix = p[0] # strip tuple
if prefix != "":
zips_daily_date = "{prefix}/zips/{daily_date}"\
.format(prefix=prefix, daily_date=date_folder)
if __debug__:
logger.debug("cleaning up {}".format(zips_daily_date))
istorage = IrodsStorage("federated")
if istorage.exists(zips_daily_date):
istorage.delete(zips_daily_date)
@periodic_task(ignore_result=True, run_every=crontab(minute=0, hour=0))
def sync_email_subscriptions():
sixty_days = datetime.today() - timedelta(days=60)
active_subscribed = UserProfile.objects.filter(email_opt_out=False,
user__last_login__gte=sixty_days,
user__is_active=True)
sync_mailchimp(active_subscribed, settings.MAILCHIMP_ACTIVE_SUBSCRIBERS)
subscribed = UserProfile.objects.filter(email_opt_out=False, user__is_active=True)
sync_mailchimp(subscribed, settings.MAILCHIMP_SUBSCRIBERS)
def sync_mailchimp(active_subscribed, list_id):
session = requests.Session()
url = "https://us3.api.mailchimp.com/3.0/lists/{list_id}/members"
# get total members
response = session.get(url.format(list_id=list_id), auth=requests.auth.HTTPBasicAuth(
'hs-celery', settings.MAILCHIMP_PASSWORD))
total_items = json.loads(response.content.decode())["total_items"]
# get list of all member ids
response = session.get((url + "?offset=0&count={total_items}").format(list_id=list_id,
total_items=total_items),
auth=requests.auth.HTTPBasicAuth('hs-celery',
settings.MAILCHIMP_PASSWORD))
# clear the email list
delete_count = 0
for member in json.loads(response.content.decode())["members"]:
if member["status"] == "subscribed":
session_response = session.delete(
(url + "/{id}").format(list_id=list_id, id=member["id"]),
auth=requests.auth.HTTPBasicAuth('hs-celery', settings.MAILCHIMP_PASSWORD))
if session_response.status_code != 204:
logger.info("Expected 204 status code, got " + str(session_response.status_code))
logger.debug(session_response.content)
else:
delete_count += 1
# add active subscribed users to mailchimp
add_count = 0
for subscriber in active_subscribed:
json_data = {"email_address": subscriber.user.email, "status": "subscribed",
"merge_fields": {"FNAME": subscriber.user.first_name,
"LNAME": subscriber.user.last_name}}
session_response = session.post(
url.format(list_id=list_id), json=json_data, auth=requests.auth.HTTPBasicAuth(
'hs-celery', settings.MAILCHIMP_PASSWORD))
if session_response.status_code != 200:
logger.info("Expected 200 status code, got " + str(session_response.status_code))
logger.debug(session_response.content)
else:
add_count += 1
if delete_count == active_subscribed.count():
logger.info("successfully cleared mailchimp for list id " + list_id)
else:
logger.info(
"cleared " + str(delete_count) + " out of " + str(
active_subscribed.count()) + " for list id " + list_id)
if active_subscribed.count() == add_count:
logger.info("successfully synced all subscriptions for list id " + list_id)
else:
logger.info("added " + str(add_count) + " out of " + str(
active_subscribed.count()) + " for list id " + list_id)
@periodic_task(ignore_result=True, run_every=crontab(minute=0, hour=0))
def manage_task_nightly():
# The nightly running task do DOI activation check
# Check DOI activation on failed and pending resources and send email.
msg_lst = []
# retrieve all published resources with failed metadata deposition with CrossRef if any and
# retry metadata deposition
failed_resources = BaseResource.objects.filter(raccess__published=True, doi__contains='failure')
for res in failed_resources:
if res.metadata.dates.all().filter(type='published'):
pub_date = res.metadata.dates.all().filter(type='published')[0]
pub_date = pub_date.start_date.strftime('%m/%d/%Y')
act_doi = get_activated_doi(res.doi)
response = deposit_res_metadata_with_crossref(res)
if response.status_code == status.HTTP_200_OK:
# retry of metadata deposition succeeds, change resource flag from failure
# to pending
res.doi = get_resource_doi(act_doi, 'pending')
res.save()
# create bag and compute checksum for published resource to meet DataONE requirement
create_bag_by_irods(res.short_id)
else:
# retry of metadata deposition failed again, notify admin
msg_lst.append("Metadata deposition with CrossRef for the published resource "
"DOI {res_doi} failed again after retry with first metadata "
"deposition requested since {pub_date}.".format(res_doi=act_doi,
pub_date=pub_date))
logger.debug(response.content)
else:
msg_lst.append("{res_id} does not have published date in its metadata.".format(
res_id=res.short_id))
pending_resources = BaseResource.objects.filter(raccess__published=True,
doi__contains='pending')
for res in pending_resources:
if res.metadata.dates.all().filter(type='published'):
pub_date = res.metadata.dates.all().filter(type='published')[0]
pub_date = pub_date.start_date.strftime('%m/%d/%Y')
act_doi = get_activated_doi(res.doi)
main_url = get_crossref_url()
req_str = '{MAIN_URL}servlet/submissionDownload?usr={USERNAME}&pwd=' \
'{PASSWORD}&doi_batch_id={DOI_BATCH_ID}&type={TYPE}'
response = requests.get(req_str.format(MAIN_URL=main_url,
USERNAME=settings.CROSSREF_LOGIN_ID,
PASSWORD=settings.CROSSREF_LOGIN_PWD,
DOI_BATCH_ID=res.short_id,
TYPE='result'))
root = ElementTree.fromstring(response.content)
rec_cnt_elem = root.find('.//record_count')
failure_cnt_elem = root.find('.//failure_count')
success = False
if rec_cnt_elem is not None and failure_cnt_elem is not None:
rec_cnt = int(rec_cnt_elem.text)
failure_cnt = int(failure_cnt_elem.text)
if rec_cnt > 0 and failure_cnt == 0:
res.doi = act_doi
res.save()
success = True
# create bag and compute checksum for published resource to meet DataONE requirement
create_bag_by_irods(res.short_id)
if not success:
msg_lst.append("Published resource DOI {res_doi} is not yet activated with request "
"data deposited since {pub_date}.".format(res_doi=act_doi,
pub_date=pub_date))
logger.debug(response.content)
else:
msg_lst.append("{res_id} does not have published date in its metadata.".format(
res_id=res.short_id))
if msg_lst:
email_msg = '\n'.join(msg_lst)
subject = 'Notification of pending DOI deposition/activation of published resources'
# send email for people monitoring and follow-up as needed
send_mail(subject, email_msg, settings.DEFAULT_FROM_EMAIL, [settings.DEFAULT_SUPPORT_EMAIL])
@periodic_task(ignore_result=True, run_every=crontab(minute=15, hour=0, day_of_week=1,
day_of_month='1-7'))
def send_over_quota_emails():
# check over quota cases and send quota warning emails as needed
hs_internal_zone = "hydroshare"
if not QuotaMessage.objects.exists():
QuotaMessage.objects.create()
qmsg = QuotaMessage.objects.first()
users = User.objects.filter(is_active=True).filter(is_superuser=False).all()
for u in users:
uq = UserQuota.objects.filter(user__username=u.username, zone=hs_internal_zone).first()
if uq:
used_percent = uq.used_percent
if used_percent >= qmsg.soft_limit_percent:
if used_percent >= 100 and used_percent < qmsg.hard_limit_percent:
if uq.remaining_grace_period < 0:
# triggers grace period counting
uq.remaining_grace_period = qmsg.grace_period
elif uq.remaining_grace_period > 0:
# reduce remaining_grace_period by one day
uq.remaining_grace_period -= 1
elif used_percent >= qmsg.hard_limit_percent:
# set grace period to 0 when user quota exceeds hard limit
uq.remaining_grace_period = 0
uq.save()
if u.first_name and u.last_name:
sal_name = '{} {}'.format(u.first_name, u.last_name)
elif u.first_name:
sal_name = u.first_name
elif u.last_name:
sal_name = u.last_name
else:
sal_name = u.username
msg_str = 'Dear ' + sal_name + ':\n\n'
ori_qm = get_quota_message(u)
# make embedded settings.DEFAULT_SUPPORT_EMAIL clickable with subject auto-filled
replace_substr = "<a href='mailto:{0}?subject=Request more quota'>{0}</a>".format(
settings.DEFAULT_SUPPORT_EMAIL)
new_qm = ori_qm.replace(settings.DEFAULT_SUPPORT_EMAIL, replace_substr)
msg_str += new_qm
msg_str += '\n\nHydroShare Support'
subject = 'Quota warning'
if settings.DEBUG:
logger.info("quota warning email not sent out on debug server but logged instead: "
"{}".format(msg_str))
else:
try:
# send email for people monitoring and follow-up as needed
send_mail(subject, '', settings.DEFAULT_FROM_EMAIL,
[u.email, settings.DEFAULT_SUPPORT_EMAIL],
html_message=msg_str)
except Exception as ex:
logger.debug("Failed to send quota warning email: " + ex.message)
else:
if uq.remaining_grace_period >= 0:
# turn grace period off now that the user is below quota soft limit
uq.remaining_grace_period = -1
uq.save()
else:
logger.debug('user ' + u.username + ' does not have UserQuota foreign key relation')
@shared_task
def add_zip_file_contents_to_resource(pk, zip_file_path):
"""Add zip file to existing resource and remove tmp zip file."""
zfile = None
resource = None
try:
resource = utils.get_resource_by_shortkey(pk, or_404=False)
zfile = zipfile.ZipFile(zip_file_path)
num_files = len(zfile.infolist())
zcontents = utils.ZipContents(zfile)
files = zcontents.get_files()
resource.file_unpack_status = 'Running'
resource.save()
for i, f in enumerate(files):
logger.debug("Adding file {0} to resource {1}".format(f.name, pk))
utils.add_file_to_resource(resource, f)
resource.file_unpack_message = "Imported {0} of about {1} file(s) ...".format(
i, num_files)
resource.save()
# This might make the resource unsuitable for public consumption
resource.update_public_and_discoverable()
# TODO: this is a bit of a lie because a different user requested the bag overwrite
utils.resource_modified(resource, resource.creator, overwrite_bag=False)
# Call success callback
resource.file_unpack_message = None
resource.file_unpack_status = 'Done'
resource.save()
except BaseResource.DoesNotExist:
msg = "Unable to add zip file contents to non-existent resource {pk}."
msg = msg.format(pk=pk)
logger.error(msg)
except:
exc_info = "".join(traceback.format_exception(*sys.exc_info()))
if resource:
resource.file_unpack_status = 'Error'
resource.file_unpack_message = exc_info
resource.save()
if zfile:
zfile.close()
logger.error(exc_info)
finally:
# Delete upload file
os.unlink(zip_file_path)
@shared_task
def delete_zip(zip_path):
istorage = IrodsStorage()
if istorage.exists(zip_path):
istorage.delete(zip_path)
@shared_task
def create_temp_zip(resource_id, input_path, output_path, aggregation_name=None, sf_zip=False, download_path='',
request_username=None):
""" Create temporary zip file from input_path and store in output_path
:param resource_id: the short_id of a resource
:param input_path: full irods path of input starting with federation path
:param output_path: full irods path of output starting with federation path
:param aggregation_name: The name of the aggregation to zip
:param sf_zip: signals a single file to zip
:param download_path: download path to return as task payload
:param request_username: the username of the requesting user
"""
from hs_core.hydroshare.utils import get_resource_by_shortkey
res = get_resource_by_shortkey(resource_id)
aggregation = None
if aggregation_name:
aggregation = res.get_aggregation_by_aggregation_name(aggregation_name)
istorage = res.get_irods_storage() # invoke federated storage as necessary
if res.resource_type == "CompositeResource":
if '/data/contents/' in input_path:
short_path = input_path.split('/data/contents/')[1] # strip /data/contents/
res.create_aggregation_xml_documents(path=short_path)
else: # all metadata included, e.g., /data/*
res.create_aggregation_xml_documents()
if aggregation or sf_zip:
# input path points to single file aggregation
# ensure that foo.zip contains aggregation metadata
# by copying these into a temp subdirectory foo/foo parallel to where foo.zip is stored
temp_folder_name, ext = os.path.splitext(output_path) # strip zip to get scratch dir
head, tail = os.path.split(temp_folder_name) # tail is unqualified folder name "foo"
out_with_folder = os.path.join(temp_folder_name, tail) # foo/foo is subdir to zip
istorage.copyFiles(input_path, out_with_folder)
if aggregation:
try:
istorage.copyFiles(aggregation.map_file_path, temp_folder_name)
except SessionException:
logger.error("cannot copy {}".format(aggregation.map_file_path))
try:
istorage.copyFiles(aggregation.metadata_file_path, temp_folder_name)
except SessionException:
logger.error("cannot copy {}".format(aggregation.metadata_file_path))
for file in aggregation.files.all():
try:
istorage.copyFiles(file.storage_path, temp_folder_name)
except SessionException:
logger.error("cannot copy {}".format(file.storage_path))
istorage.zipup(temp_folder_name, output_path)
istorage.delete(temp_folder_name) # delete working directory; this isn't the zipfile
else: # regular folder to zip
istorage.zipup(input_path, output_path)
return download_path
@shared_task
def create_bag_by_irods(resource_id, request_username=None):
"""Create a resource bag on iRODS side by running the bagit rule and ibun zip.
This function runs as a celery task, invoked asynchronously so that it does not
block the main web thread when it creates bags for very large files which will take some time.
:param
resource_id: the resource uuid that is used to look for the resource to create the bag for.
:return: bag_url if bag creation operation succeeds or
raise an exception if resource does not exist or any other issues that prevent bags from being created.
"""
res = utils.get_resource_by_shortkey(resource_id)
istorage = res.get_irods_storage()
bag_path = res.bag_path
metadata_dirty = istorage.getAVU(res.root_path, 'metadata_dirty')
# if metadata has been changed, then regenerate metadata xml files
if metadata_dirty is None or metadata_dirty.lower() == "true":
create_bag_files(res)
irods_bagit_input_path = res.get_irods_path(resource_id, prepend_short_id=False)
# check to see if bagit readme.txt file exists or not
bagit_readme_file = res.get_irods_path('readme.txt')
is_bagit_readme_exist = istorage.exists(bagit_readme_file)
if irods_bagit_input_path.startswith(resource_id):
# resource is in data zone, need to append the full path for iRODS bagit rule execution
irods_dest_prefix = "/" + settings.IRODS_ZONE + "/home/" + settings.IRODS_USERNAME
irods_bagit_input_path = os.path.join(irods_dest_prefix, resource_id)
bagit_input_resource = "*DESTRESC='{def_res}'".format(
def_res=settings.IRODS_DEFAULT_RESOURCE)
else:
# this will need to be changed with the default resource in whatever federated zone the
# resource is stored in when we have such use cases to support
bagit_input_resource = "*DESTRESC='{def_res}'".format(
def_res=settings.HS_IRODS_USER_ZONE_DEF_RES)
bagit_input_path = "*BAGITDATA='{path}'".format(path=irods_bagit_input_path)
bagit_files = [
res.get_irods_path('bagit.txt'),
res.get_irods_path('manifest-md5.txt'),
res.get_irods_path('tagmanifest-md5.txt'),
bag_path
]
# only proceed when the resource is not deleted potentially by another request
# when being downloaded
is_exist = istorage.exists(irods_bagit_input_path)
if is_exist:
# if bagit readme.txt does not exist, add it.
if not is_bagit_readme_exist:
from_file_name = getattr(settings, 'HS_BAGIT_README_FILE_WITH_PATH',
'docs/bagit/readme.txt')
istorage.saveFile(from_file_name, bagit_readme_file, True)
# call iRODS bagit rule here
bagit_rule_file = getattr(settings, 'IRODS_BAGIT_RULE',
'hydroshare/irods/ruleGenerateBagIt_HS.r')
try:
# call iRODS run and ibun command to create and zip the bag, ignore SessionException
# for now as a workaround which could be raised from potential race conditions when
# multiple ibun commands try to create the same zip file or the very same resource
# gets deleted by another request when being downloaded
istorage.runBagitRule(bagit_rule_file, bagit_input_path, bagit_input_resource)
istorage.zipup(irods_bagit_input_path, bag_path)
if res.raccess.published:
# compute checksum to meet DataONE distribution requirement
chksum = istorage.checksum(bag_path)
res.bag_checksum = chksum
istorage.setAVU(irods_bagit_input_path, 'bag_modified', "false")
return res.bag_url
except SessionException as ex:
# if an exception occurs, delete incomplete files potentially being generated by
# iRODS bagit rule and zipping operations
for fname in bagit_files:
if istorage.exists(fname):
istorage.delete(fname)
raise SessionException(-1, '', ex.stderr)
else:
raise ObjectDoesNotExist('Resource {} does not exist.'.format(resource_id))
@shared_task
def update_web_services(services_url, api_token, timeout, publish_urls, res_id):
"""Update web services hosted by GeoServer and HydroServer.
This function sends a resource id to the HydroShare web services manager
application, which will check the current status of the resource and register
or unregister services hosted by GeoServer and HydroServer.
The HydroShare web services manager will return a list of endpoint URLs
for both the resource and individual aggregations. If publish_urls is set to
True, these endpoints will be added to the extra metadata fields of the
resource and aggregations.
"""
session = requests.Session()
session.headers.update(
{"Authorization": " ".join(("Token", str(api_token)))}
)
rest_url = str(services_url) + "/" + str(res_id) + "/"
try:
response = session.post(rest_url, timeout=timeout)
if publish_urls and response.status_code == status.HTTP_201_CREATED:
try:
resource = utils.get_resource_by_shortkey(res_id)
response_content = json.loads(response.content.decode())
for key, value in response_content["resource"].items():
resource.extra_metadata[key] = value
resource.save()
for url in response_content["content"]:
logical_files = list(resource.logical_files)
lf = logical_files[[i.aggregation_name for i in
logical_files].index(
url["layer_name"].encode()
)]
lf.metadata.extra_metadata["Web Services URL"] = url["message"]
lf.metadata.save()
except Exception as e:
logger.error(e)
return e
return response
except (requests.exceptions.RequestException, ValueError) as e:
logger.error(e)
return e
@shared_task
def resource_debug(resource_id):
"""Update web services hosted by GeoServer and HydroServer.
"""
from hs_core.hydroshare.utils import get_resource_by_shortkey
resource = get_resource_by_shortkey(resource_id)
from hs_core.management.utils import check_irods_files
return check_irods_files(resource, log_errors=False, return_errors=True)
@periodic_task(ignore_result=True, run_every=crontab(minute=00, hour=12))
def daily_odm2_sync():
"""
ODM2 variables are maintained on an external site this synchronizes data to HydroShare for local caching
"""
ODM2Variable.sync()
|
# -*- coding: utf-8 -*-
import pytz
import re
import time
import openerp
import openerp.service.report
import uuid
from werkzeug.exceptions import BadRequest
from datetime import datetime, timedelta
from dateutil import parser
from dateutil import rrule
from dateutil.relativedelta import relativedelta
from openerp import tools, SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools.translate import _
from openerp.http import request
from operator import itemgetter
import logging
_logger = logging.getLogger(__name__)
def calendar_id2real_id(calendar_id=None, with_date=False):
"""
Convert a "virtual/recurring event id" (type string) into a real event id (type int).
E.g. virtual/recurring event id is 4-20091201100000, so it will return 4.
@param calendar_id: id of calendar
@param with_date: if a value is passed to this param it will return dates based on value of withdate + calendar_id
@return: real event id
"""
if calendar_id and isinstance(calendar_id, (str, unicode)):
res = calendar_id.split('-')
if len(res) >= 2:
real_id = res[0]
if with_date:
real_date = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT, time.strptime(res[1], "%Y%m%d%H%M%S"))
start = datetime.strptime(real_date, DEFAULT_SERVER_DATETIME_FORMAT)
end = start + timedelta(hours=with_date)
return (int(real_id), real_date, end.strftime(DEFAULT_SERVER_DATETIME_FORMAT))
return int(real_id)
return calendar_id and int(calendar_id) or calendar_id
def get_real_ids(ids):
if isinstance(ids, (str, int, long)):
return calendar_id2real_id(ids)
if isinstance(ids, (list, tuple)):
return [calendar_id2real_id(id) for id in ids]
class calendar_attendee(osv.Model):
"""
Calendar Attendee Information
"""
_name = 'calendar.attendee'
_rec_name = 'cn'
_description = 'Attendee information'
def _compute_data(self, cr, uid, ids, name, arg, context=None):
"""
Compute data on function fields for attendee values.
@param ids: list of calendar attendee's IDs
@param name: name of field
@return: dictionary of form {id: {'field Name': value'}}
"""
name = name[0]
result = {}
for attdata in self.browse(cr, uid, ids, context=context):
id = attdata.id
result[id] = {}
if name == 'cn':
if attdata.partner_id:
result[id][name] = attdata.partner_id.name or False
else:
result[id][name] = attdata.email or ''
return result
STATE_SELECTION = [
('needsAction', 'Needs Action'),
('tentative', 'Uncertain'),
('declined', 'Declined'),
('accepted', 'Accepted'),
]
_columns = {
'state': fields.selection(STATE_SELECTION, 'Status', readonly=True, help="Status of the attendee's participation"),
'cn': fields.function(_compute_data, string='Common name', type="char", multi='cn', store=True),
'partner_id': fields.many2one('res.partner', 'Contact', readonly="True"),
'email': fields.char('Email', help="Email of Invited Person"),
'availability': fields.selection([('free', 'Free'), ('busy', 'Busy')], 'Free/Busy', readonly="True"),
'access_token': fields.char('Invitation Token'),
'event_id': fields.many2one('calendar.event', 'Meeting linked'),
}
_defaults = {
'state': 'needsAction',
}
def copy(self, cr, uid, id, default=None, context=None):
raise osv.except_osv(_('Warning!'), _('You cannot duplicate a calendar attendee.'))
def onchange_partner_id(self, cr, uid, ids, partner_id, context=None):
"""
Make entry on email and availability on change of partner_id field.
@param partner_id: changed value of partner id
"""
if not partner_id:
return {'value': {'email': ''}}
partner = self.pool['res.partner'].browse(cr, uid, partner_id, context=context)
return {'value': {'email': partner.email}}
def get_ics_file(self, cr, uid, event_obj, context=None):
"""
Returns iCalendar file for the event invitation.
@param event_obj: event object (browse record)
@return: .ics file content
"""
res = None
def ics_datetime(idate, allday=False):
if idate:
if allday:
return datetime.strptime(idate.split(' ')[0], DEFAULT_SERVER_DATE_FORMAT).replace(tzinfo=pytz.timezone('UTC'))
else:
return datetime.strptime(idate.split('.')[0], DEFAULT_SERVER_DATETIME_FORMAT).replace(tzinfo=pytz.timezone('UTC'))
return False
try:
# FIXME: why isn't this in CalDAV?
import vobject
except ImportError:
return res
cal = vobject.iCalendar()
event = cal.add('vevent')
if not event_obj.start or not event_obj.stop:
raise osv.except_osv(_('Warning!'), _("First you have to specify the date of the invitation."))
event.add('created').value = ics_datetime(time.strftime(DEFAULT_SERVER_DATETIME_FORMAT))
event.add('dtstart').value = ics_datetime(event_obj.start, event_obj.allday)
event.add('dtend').value = ics_datetime(event_obj.stop, event_obj.allday)
event.add('summary').value = event_obj.name
if event_obj.description:
event.add('description').value = event_obj.description
if event_obj.location:
event.add('location').value = event_obj.location
if event_obj.rrule:
event.add('rrule').value = event_obj.rrule
if event_obj.alarm_ids:
for alarm in event_obj.alarm_ids:
valarm = event.add('valarm')
interval = alarm.interval
duration = alarm.duration
trigger = valarm.add('TRIGGER')
trigger.params['related'] = ["START"]
if interval == 'days':
delta = timedelta(days=duration)
elif interval == 'hours':
delta = timedelta(hours=duration)
elif interval == 'minutes':
delta = timedelta(minutes=duration)
trigger.value = delta
valarm.add('DESCRIPTION').value = alarm.name or 'OpenERP'
for attendee in event_obj.attendee_ids:
attendee_add = event.add('attendee')
attendee_add.value = 'MAILTO:' + (attendee.email or '')
res = cal.serialize()
return res
def _send_mail_to_attendees(self, cr, uid, ids, email_from=tools.config.get('email_from', False),
template_xmlid='calendar_template_meeting_invitation', context=None):
"""
Send mail for event invitation to event attendees.
@param email_from: email address for user sending the mail
"""
res = False
if self.pool['ir.config_parameter'].get_param(cr, uid, 'calendar.block_mail', default=False) or context.get("no_mail_to_attendees"):
return res
mail_ids = []
data_pool = self.pool['ir.model.data']
mailmess_pool = self.pool['mail.message']
mail_pool = self.pool['mail.mail']
template_pool = self.pool['email.template']
local_context = context.copy()
color = {
'needsAction': 'grey',
'accepted': 'green',
'tentative': '#FFFF00',
'declined': 'red'
}
if not isinstance(ids, (tuple, list)):
ids = [ids]
dummy, template_id = data_pool.get_object_reference(cr, uid, 'calendar', template_xmlid)
dummy, act_id = data_pool.get_object_reference(cr, uid, 'calendar', "view_calendar_event_calendar")
local_context.update({
'color': color,
'action_id': self.pool['ir.actions.act_window'].search(cr, uid, [('view_id', '=', act_id)], context=context)[0],
'dbname': cr.dbname,
'base_url': self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url', default='http://localhost:8069', context=context)
})
for attendee in self.browse(cr, uid, ids, context=context):
if attendee.email and email_from and attendee.email != email_from:
ics_file = self.get_ics_file(cr, uid, attendee.event_id, context=context)
mail_id = template_pool.send_mail(cr, uid, template_id, attendee.id, context=local_context)
vals = {}
if ics_file:
vals['attachment_ids'] = [(0, 0, {'name': 'invitation.ics',
'datas_fname': 'invitation.ics',
'datas': str(ics_file).encode('base64')})]
vals['model'] = None # We don't want to have the mail in the tchatter while in queue!
the_mailmess = mail_pool.browse(cr, uid, mail_id, context=context).mail_message_id
mailmess_pool.write(cr, uid, [the_mailmess.id], vals, context=context)
mail_ids.append(mail_id)
if mail_ids:
res = mail_pool.send(cr, uid, mail_ids, context=context)
return res
def onchange_user_id(self, cr, uid, ids, user_id, *args, **argv):
"""
Make entry on email and availability on change of user_id field.
@param ids: list of attendee's IDs
@param user_id: changed value of User id
@return: dictionary of values which put value in email and availability fields
"""
if not user_id:
return {'value': {'email': ''}}
user = self.pool['res.users'].browse(cr, uid, user_id, *args)
return {'value': {'email': user.email, 'availability': user.availability}}
def do_tentative(self, cr, uid, ids, context=None, *args):
"""
Makes event invitation as Tentative.
@param ids: list of attendee's IDs
"""
return self.write(cr, uid, ids, {'state': 'tentative'}, context)
def do_accept(self, cr, uid, ids, context=None, *args):
"""
Marks event invitation as Accepted.
@param ids: list of attendee's IDs
"""
if context is None:
context = {}
meeting_obj = self.pool['calendar.event']
res = self.write(cr, uid, ids, {'state': 'accepted'}, context)
for attendee in self.browse(cr, uid, ids, context=context):
meeting_obj.message_post(cr, uid, attendee.event_id.id, body=_(("%s has accepted invitation") % (attendee.cn)),
subtype="calendar.subtype_invitation", context=context)
return res
def do_decline(self, cr, uid, ids, context=None, *args):
"""
Marks event invitation as Declined.
@param ids: list of calendar attendee's IDs
"""
if context is None:
context = {}
meeting_obj = self.pool['calendar.event']
res = self.write(cr, uid, ids, {'state': 'declined'}, context)
for attendee in self.browse(cr, uid, ids, context=context):
meeting_obj.message_post(cr, uid, attendee.event_id.id, body=_(("%s has declined invitation") % (attendee.cn)), subtype="calendar.subtype_invitation", context=context)
return res
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if not vals.get("email") and vals.get("cn"):
cnval = vals.get("cn").split(':')
email = filter(lambda x: x.__contains__('@'), cnval)
vals['email'] = email and email[0] or ''
vals['cn'] = vals.get("cn")
res = super(calendar_attendee, self).create(cr, uid, vals, context=context)
return res
class res_partner(osv.Model):
_inherit = 'res.partner'
_columns = {
'calendar_last_notif_ack': fields.datetime('Last notification marked as read from base Calendar'),
}
def get_attendee_detail(self, cr, uid, ids, meeting_id, context=None):
"""
Return a list of tuple (id, name, status)
Used by web_calendar.js : Many2ManyAttendee
"""
datas = []
meeting = None
if meeting_id:
meeting = self.pool['calendar.event'].browse(cr, uid, get_real_ids(meeting_id), context=context)
for partner in self.browse(cr, uid, ids, context=context):
data = self.name_get(cr, uid, [partner.id], context)[0]
if meeting:
for attendee in meeting.attendee_ids:
if attendee.partner_id.id == partner.id:
data = (data[0], data[1], attendee.state)
datas.append(data)
return datas
def _set_calendar_last_notif_ack(self, cr, uid, context=None):
partner = self.pool['res.users'].browse(cr, uid, uid, context=context).partner_id
self.write(cr, uid, partner.id, {'calendar_last_notif_ack': datetime.now()}, context=context)
return
class calendar_alarm_manager(osv.AbstractModel):
_name = 'calendar.alarm_manager'
def get_next_potential_limit_alarm(self, cr, uid, seconds, notif=True, mail=True, partner_id=None, context=None):
res = {}
base_request = """
SELECT
cal.id,
cal.start - interval '1' minute * calcul_delta.max_delta AS first_alarm,
CASE
WHEN cal.recurrency THEN cal.final_date - interval '1' minute * calcul_delta.min_delta
ELSE cal.stop - interval '1' minute * calcul_delta.min_delta
END as last_alarm,
cal.start as first_event_date,
CASE
WHEN cal.recurrency THEN cal.final_date
ELSE cal.stop
END as last_event_date,
calcul_delta.min_delta,
calcul_delta.max_delta,
cal.rrule AS rule
FROM
calendar_event AS cal
RIGHT JOIN
(
SELECT
rel.calendar_event_id, max(alarm.duration_minutes) AS max_delta,min(alarm.duration_minutes) AS min_delta
FROM
calendar_alarm_calendar_event_rel AS rel
LEFT JOIN calendar_alarm AS alarm ON alarm.id = rel.calendar_alarm_id
WHERE alarm.type in %s
GROUP BY rel.calendar_event_id
) AS calcul_delta ON calcul_delta.calendar_event_id = cal.id
"""
filter_user = """
RIGHT JOIN calendar_event_res_partner_rel AS part_rel ON part_rel.calendar_event_id = cal.id
AND part_rel.res_partner_id = %s
"""
#Add filter on type
type_to_read = ()
if notif:
type_to_read += ('notification',)
if mail:
type_to_read += ('email',)
tuple_params = (type_to_read,)
# ADD FILTER ON PARTNER_ID
if partner_id:
base_request += filter_user
tuple_params += (partner_id, )
#Add filter on hours
tuple_params += (seconds, seconds,)
cr.execute("""SELECT *
FROM ( %s WHERE cal.active = True ) AS ALL_EVENTS
WHERE ALL_EVENTS.first_alarm < (now() at time zone 'utc' + interval '%%s' second )
AND ALL_EVENTS.last_alarm > (now() at time zone 'utc' - interval '%%s' second )
""" % base_request, tuple_params)
for event_id, first_alarm, last_alarm, first_meeting, last_meeting, min_duration, max_duration, rule in cr.fetchall():
res[event_id] = {
'event_id': event_id,
'first_alarm': first_alarm,
'last_alarm': last_alarm,
'first_meeting': first_meeting,
'last_meeting': last_meeting,
'min_duration': min_duration,
'max_duration': max_duration,
'rrule': rule
}
return res
def do_check_alarm_for_one_date(self, cr, uid, one_date, event, event_maxdelta, in_the_next_X_seconds, after=False, notif=True, mail=True, context=None):
res = []
alarm_type = []
if notif:
alarm_type.append('notification')
if mail:
alarm_type.append('email')
if one_date - timedelta(minutes=event_maxdelta) < datetime.now() + timedelta(seconds=in_the_next_X_seconds): # if an alarm is possible for this date
for alarm in event.alarm_ids:
if alarm.type in alarm_type and \
one_date - timedelta(minutes=alarm.duration_minutes) < datetime.now() + timedelta(seconds=in_the_next_X_seconds) and \
(not after or one_date - timedelta(minutes=alarm.duration_minutes) > datetime.strptime(after.split('.')[0], DEFAULT_SERVER_DATETIME_FORMAT)):
alert = {
'alarm_id': alarm.id,
'event_id': event.id,
'notify_at': one_date - timedelta(minutes=alarm.duration_minutes),
}
res.append(alert)
return res
def get_next_mail(self, cr, uid, context=None):
cron = self.pool.get('ir.cron').search(cr, uid, [('model', 'ilike', self._name)], context=context)
if cron and len(cron) == 1:
cron = self.pool.get('ir.cron').browse(cr, uid, cron[0], context=context)
else:
_logger.exception("Cron for " + self._name + " can not be identified !")
if cron.interval_type == "weeks":
cron_interval = cron.interval_number * 7 * 24 * 60 * 60
elif cron.interval_type == "days":
cron_interval = cron.interval_number * 24 * 60 * 60
elif cron.interval_type == "hours":
cron_interval = cron.interval_number * 60 * 60
elif cron.interval_type == "minutes":
cron_interval = cron.interval_number * 60
elif cron.interval_type == "seconds":
cron_interval = cron.interval_number
if not cron_interval:
_logger.exception("Cron delay can not be computed !")
all_events = self.get_next_potential_limit_alarm(cr, uid, cron_interval, notif=False, context=context)
for event in all_events: # .values()
max_delta = all_events[event]['max_duration']
curEvent = self.pool.get('calendar.event').browse(cr, uid, event, context=context)
if curEvent.recurrency:
bFound = False
LastFound = False
for one_date in self.pool.get('calendar.event').get_recurrent_date_by_event(cr, uid, curEvent, context=context):
in_date_format = one_date.replace(tzinfo=None)
LastFound = self.do_check_alarm_for_one_date(cr, uid, in_date_format, curEvent, max_delta, cron_interval, notif=False, context=context)
if LastFound:
for alert in LastFound:
self.do_mail_reminder(cr, uid, alert, context=context)
if not bFound: # if it's the first alarm for this recurrent event
bFound = True
if bFound and not LastFound: # if the precedent event had an alarm but not this one, we can stop the search for this event
break
else:
in_date_format = datetime.strptime(curEvent.start, DEFAULT_SERVER_DATETIME_FORMAT)
LastFound = self.do_check_alarm_for_one_date(cr, uid, in_date_format, curEvent, max_delta, cron_interval, notif=False, context=context)
if LastFound:
for alert in LastFound:
self.do_mail_reminder(cr, uid, alert, context=context)
def get_next_notif(self, cr, uid, context=None):
ajax_check_every_seconds = 300
partner = self.pool.get('res.users').browse(cr, uid, uid, context=context).partner_id
all_notif = []
if not partner:
return []
all_events = self.get_next_potential_limit_alarm(cr, uid, ajax_check_every_seconds, partner_id=partner.id, mail=False, context=context)
for event in all_events: # .values()
max_delta = all_events[event]['max_duration']
curEvent = self.pool.get('calendar.event').browse(cr, uid, event, context=context)
if curEvent.recurrency:
bFound = False
LastFound = False
for one_date in self.pool.get("calendar.event").get_recurrent_date_by_event(cr, uid, curEvent, context=context):
in_date_format = one_date.replace(tzinfo=None)
LastFound = self.do_check_alarm_for_one_date(cr, uid, in_date_format, curEvent, max_delta, ajax_check_every_seconds, after=partner.calendar_last_notif_ack, mail=False, context=context)
if LastFound:
for alert in LastFound:
all_notif.append(self.do_notif_reminder(cr, uid, alert, context=context))
if not bFound: # if it's the first alarm for this recurrent event
bFound = True
if bFound and not LastFound: # if the precedent event had alarm but not this one, we can stop the search fot this event
break
else:
in_date_format = datetime.strptime(curEvent.start, DEFAULT_SERVER_DATETIME_FORMAT)
LastFound = self.do_check_alarm_for_one_date(cr, uid, in_date_format, curEvent, max_delta, ajax_check_every_seconds, partner.calendar_last_notif_ack, mail=False, context=context)
if LastFound:
for alert in LastFound:
all_notif.append(self.do_notif_reminder(cr, uid, alert, context=context))
return all_notif
def do_mail_reminder(self, cr, uid, alert, context=None):
if context is None:
context = {}
res = False
event = self.pool['calendar.event'].browse(cr, uid, alert['event_id'], context=context)
alarm = self.pool['calendar.alarm'].browse(cr, uid, alert['alarm_id'], context=context)
if alarm.type == 'email':
res = self.pool['calendar.attendee']._send_mail_to_attendees(cr, uid, [att.id for att in event.attendee_ids], template_xmlid='calendar_template_meeting_reminder', context=context)
return res
def do_notif_reminder(self, cr, uid, alert, context=None):
alarm = self.pool['calendar.alarm'].browse(cr, uid, alert['alarm_id'], context=context)
event = self.pool['calendar.event'].browse(cr, uid, alert['event_id'], context=context)
if alarm.type == 'notification':
message = event.display_time
delta = alert['notify_at'] - datetime.now()
delta = delta.seconds + delta.days * 3600 * 24
return {
'event_id': event.id,
'title': event.name,
'message': message,
'timer': delta,
'notify_at': alert['notify_at'].strftime(DEFAULT_SERVER_DATETIME_FORMAT),
}
class calendar_alarm(osv.Model):
_name = 'calendar.alarm'
_description = 'Event alarm'
def _get_duration(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for alarm in self.browse(cr, uid, ids, context=context):
if alarm.interval == "minutes":
res[alarm.id] = alarm.duration
elif alarm.interval == "hours":
res[alarm.id] = alarm.duration * 60
elif alarm.interval == "days":
res[alarm.id] = alarm.duration * 60 * 24
else:
res[alarm.id] = 0
return res
_columns = {
'name': fields.char('Name', required=True),
'type': fields.selection([('notification', 'Notification'), ('email', 'Email')], 'Type', required=True),
'duration': fields.integer('Amount', required=True),
'interval': fields.selection([('minutes', 'Minutes'), ('hours', 'Hours'), ('days', 'Days')], 'Unit', required=True),
'duration_minutes': fields.function(_get_duration, type='integer', string='duration_minutes', store=True),
}
_defaults = {
'type': 'notification',
'duration': 1,
'interval': 'hours',
}
class ir_values(osv.Model):
_inherit = 'ir.values'
def set(self, cr, uid, key, key2, name, models, value, replace=True, isobject=False, meta=False, preserve_user=False, company=False):
new_model = []
for data in models:
if type(data) in (list, tuple):
new_model.append((data[0], calendar_id2real_id(data[1])))
else:
new_model.append(data)
return super(ir_values, self).set(cr, uid, key, key2, name, new_model,
value, replace, isobject, meta, preserve_user, company)
def get(self, cr, uid, key, key2, models, meta=False, context=None, res_id_req=False, without_user=True, key2_req=True):
if context is None:
context = {}
new_model = []
for data in models:
if type(data) in (list, tuple):
new_model.append((data[0], calendar_id2real_id(data[1])))
else:
new_model.append(data)
return super(ir_values, self).get(cr, uid, key, key2, new_model,
meta, context, res_id_req, without_user, key2_req)
class ir_model(osv.Model):
_inherit = 'ir.model'
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
new_ids = isinstance(ids, (str, int, long)) and [ids] or ids
if context is None:
context = {}
data = super(ir_model, self).read(cr, uid, new_ids, fields=fields, context=context, load=load)
if data:
for val in data:
val['id'] = calendar_id2real_id(val['id'])
return isinstance(ids, (str, int, long)) and data[0] or data
original_exp_report = openerp.service.report.exp_report
def exp_report(db, uid, object, ids, data=None, context=None):
"""
Export Report
"""
if object == 'printscreen.list':
original_exp_report(db, uid, object, ids, data, context)
new_ids = []
for id in ids:
new_ids.append(calendar_id2real_id(id))
if data.get('id', False):
data['id'] = calendar_id2real_id(data['id'])
return original_exp_report(db, uid, object, new_ids, data, context)
openerp.service.report.exp_report = exp_report
class calendar_event_type(osv.Model):
_name = 'calendar.event.type'
_description = 'Meeting Type'
_columns = {
'name': fields.char('Name', required=True, translate=True),
}
class calendar_event(osv.Model):
""" Model for Calendar Event """
_name = 'calendar.event'
_description = "Event"
_order = "id desc"
_inherit = ["mail.thread", "ir.needaction_mixin"]
def do_run_scheduler(self, cr, uid, id, context=None):
self.pool['calendar.alarm_manager'].get_next_mail(cr, uid, context=context)
def get_recurrent_date_by_event(self, cr, uid, event, context=None):
"""Get recurrent dates based on Rule string and all event where recurrent_id is child
"""
def todate(date):
val = parser.parse(''.join((re.compile('\d')).findall(date)))
## Dates are localized to saved timezone if any, else current timezone.
if not val.tzinfo:
val = pytz.UTC.localize(val)
return val.astimezone(timezone)
timezone = pytz.timezone(context.get('tz') or 'UTC')
startdate = pytz.UTC.localize(datetime.strptime(event.start, DEFAULT_SERVER_DATETIME_FORMAT)) # Add "+hh:mm" timezone
if not startdate:
startdate = datetime.now()
## Convert the start date to saved timezone (or context tz) as it'll
## define the correct hour/day asked by the user to repeat for recurrence.
startdate = startdate.astimezone(timezone) # transform "+hh:mm" timezone
rset1 = rrule.rrulestr(str(event.rrule), dtstart=startdate, forceset=True)
ids_depending = self.search(cr, uid, [('recurrent_id', '=', event.id), '|', ('active', '=', False), ('active', '=', True)], context=context)
all_events = self.browse(cr, uid, ids_depending, context=context)
for ev in all_events:
rset1._exdate.append(todate(ev.recurrent_id_date))
return [d.astimezone(pytz.UTC) for d in rset1]
def _get_recurrency_end_date(self, cr, uid, id, context=None):
data = self.read(cr, uid, id, ['final_date', 'recurrency', 'rrule_type', 'count', 'end_type', 'stop'], context=context)
if not data.get('recurrency'):
return False
end_type = data.get('end_type')
final_date = data.get('final_date')
if end_type == 'count' and all(data.get(key) for key in ['count', 'rrule_type', 'stop']):
count = data['count'] + 1
delay, mult = {
'daily': ('days', 1),
'weekly': ('days', 7),
'monthly': ('months', 1),
'yearly': ('years', 1),
}[data['rrule_type']]
deadline = datetime.strptime(data['stop'], tools.DEFAULT_SERVER_DATETIME_FORMAT)
return deadline + relativedelta(**{delay: count * mult})
return final_date
def _find_my_attendee(self, cr, uid, meeting_ids, context=None):
"""
Return the first attendee where the user connected has been invited from all the meeting_ids in parameters
"""
user = self.pool['res.users'].browse(cr, uid, uid, context=context)
for meeting_id in meeting_ids:
for attendee in self.browse(cr, uid, meeting_id, context).attendee_ids:
if user.partner_id.id == attendee.partner_id.id:
return attendee
return False
def get_date_formats(self, cr, uid, context):
lang = context.get("lang")
res_lang = self.pool.get('res.lang')
lang_params = {}
if lang:
ids = res_lang.search(request.cr, uid, [("code", "=", lang)])
if ids:
lang_params = res_lang.read(request.cr, uid, ids[0], ["date_format", "time_format"])
format_date = lang_params.get("date_format", '%B-%d-%Y')
format_time = lang_params.get("time_format", '%I-%M %p')
return (format_date, format_time)
def get_display_time_tz(self, cr, uid, ids, tz=False, context=None):
context = dict(context or {})
if tz:
context["tz"] = tz
ev = self.browse(cr, uid, ids, context=context)[0]
return self._get_display_time(cr, uid, ev.start, ev.stop, ev.duration, ev.allday, context=context)
def _get_display_time(self, cr, uid, start, stop, zduration, zallday, context=None):
"""
Return date and time (from to from) based on duration with timezone in string :
eg.
1) if user add duration for 2 hours, return : August-23-2013 at (04-30 To 06-30) (Europe/Brussels)
2) if event all day ,return : AllDay, July-31-2013
"""
context = dict(context or {})
tz = context.get('tz', False)
if not tz: # tz can have a value False, so dont do it in the default value of get !
context['tz'] = self.pool.get('res.users').read(cr, SUPERUSER_ID, uid, ['tz'])['tz']
tz = context['tz']
format_date, format_time = self.get_date_formats(cr, uid, context=context)
date = fields.datetime.context_timestamp(cr, uid, datetime.strptime(start, tools.DEFAULT_SERVER_DATETIME_FORMAT), context=context)
date_deadline = fields.datetime.context_timestamp(cr, uid, datetime.strptime(stop, tools.DEFAULT_SERVER_DATETIME_FORMAT), context=context)
event_date = date.strftime(format_date)
display_time = date.strftime(format_time)
if zallday:
time = _("AllDay , %s") % (event_date)
elif zduration < 24:
duration = date + timedelta(hours=zduration)
time = ("%s at (%s To %s) (%s)") % (event_date, display_time, duration.strftime(format_time), tz)
else:
time = ("%s at %s To\n %s at %s (%s)") % (event_date, display_time, date_deadline.strftime(format_date), date_deadline.strftime(format_time), tz)
return time
def _compute(self, cr, uid, ids, fields, arg, context=None):
res = {}
for meeting_id in ids:
res[meeting_id] = {}
attendee = self._find_my_attendee(cr, uid, [meeting_id], context)
meeting = self.browse(cr, uid, [meeting_id], context=context)[0]
for field in fields:
if field == 'is_attendee':
res[meeting_id][field] = True if attendee else False
elif field == 'attendee_status':
res[meeting_id][field] = attendee.state if attendee else 'needsAction'
elif field == 'display_time':
res[meeting_id][field] = self._get_display_time(cr, uid, meeting.start, meeting.stop, meeting.duration, meeting.allday, context=context)
elif field == "display_start":
res[meeting_id][field] = meeting.start_date if meeting.allday else meeting.start_datetime
elif field == 'start':
res[meeting_id][field] = meeting.start_date if meeting.allday else meeting.start_datetime
elif field == 'stop':
res[meeting_id][field] = meeting.stop_date if meeting.allday else meeting.stop_datetime
return res
def _get_rulestring(self, cr, uid, ids, name, arg, context=None):
"""
Gets Recurrence rule string according to value type RECUR of iCalendar from the values given.
@return: dictionary of rrule value.
"""
result = {}
if not isinstance(ids, list):
ids = [ids]
for id in ids:
#read these fields as SUPERUSER because if the record is private a normal search could return False and raise an error
data = self.browse(cr, SUPERUSER_ID, id, context=context)
if data.interval and data.interval < 0:
raise osv.except_osv(_('Warning!'), _('Interval cannot be negative.'))
if data.count and data.count <= 0:
raise osv.except_osv(_('Warning!'), _('Count cannot be negative or 0.'))
data = self.read(cr, uid, id, ['id', 'byday', 'recurrency', 'final_date', 'rrule_type', 'month_by', 'interval', 'count', 'end_type', 'mo', 'tu', 'we', 'th', 'fr', 'sa', 'su', 'day', 'week_list'], context=context)
event = data['id']
if data['recurrency']:
result[event] = self.compute_rule_string(data)
else:
result[event] = ""
return result
# retro compatibility function
def _rrule_write(self, cr, uid, ids, field_name, field_value, args, context=None):
return self._set_rulestring(self, cr, uid, ids, field_name, field_value, args, context=context)
def _set_rulestring(self, cr, uid, ids, field_name, field_value, args, context=None):
if not isinstance(ids, list):
ids = [ids]
data = self._get_empty_rrule_data()
if field_value:
data['recurrency'] = True
for event in self.browse(cr, uid, ids, context=context):
rdate = event.start
update_data = self._parse_rrule(field_value, dict(data), rdate)
data.update(update_data)
self.write(cr, uid, ids, data, context=context)
return True
def _set_date(self, cr, uid, values, id=False, context=None):
if context is None:
context = {}
if values.get('start_datetime') or values.get('start_date') or values.get('start') \
or values.get('stop_datetime') or values.get('stop_date') or values.get('stop'):
allday = values.get("allday", None)
if allday is None:
if id:
allday = self.read(cr, uid, [id], ['allday'], context=context)[0].get('allday')
else:
allday = False
_logger.warning("Calendar - All day is not specified, arbitrarily set to False")
#raise osv.except_osv(_('Error!'), ("Need to know if it's an allday or not..."))
key = "date" if allday else "datetime"
notkey = "datetime" if allday else "date"
for fld in ('start', 'stop'):
if values.get('%s_%s' % (fld, key)) or values.get(fld):
values['%s_%s' % (fld, key)] = values.get('%s_%s' % (fld, key)) or values.get(fld)
values['%s_%s' % (fld, notkey)] = None
if fld not in values.keys():
values[fld] = values['%s_%s' % (fld, key)]
diff = False
if allday and values.get('stop_date') and values.get('start_date'):
diff = datetime.strptime(values['stop_date'].split(' ')[0], DEFAULT_SERVER_DATE_FORMAT) - datetime.strptime(values['start_date'].split(' ')[0], DEFAULT_SERVER_DATE_FORMAT)
elif values.get('stop_datetime') and values.get('start_datetime'):
diff = datetime.strptime(values['stop_datetime'].split('.')[0], DEFAULT_SERVER_DATETIME_FORMAT) - datetime.strptime(values['start_datetime'].split('.')[0], DEFAULT_SERVER_DATETIME_FORMAT)
if diff:
duration = float(diff.days) * 24 + (float(diff.seconds) / 3600)
values['duration'] = round(duration, 2)
_track = {
'location': {
'calendar.subtype_invitation': lambda self, cr, uid, obj, ctx=None: True,
},
'start': {
'calendar.subtype_invitation': lambda self, cr, uid, obj, ctx=None: True,
},
}
_columns = {
'id': fields.integer('ID', readonly=True),
'state': fields.selection([('draft', 'Unconfirmed'), ('open', 'Confirmed')], string='Status', readonly=True, track_visibility='onchange'),
'name': fields.char('Meeting Subject', required=True, states={'done': [('readonly', True)]}),
'is_attendee': fields.function(_compute, string='Attendee', type="boolean", multi='attendee'),
'attendee_status': fields.function(_compute, string='Attendee Status', type="selection", selection=calendar_attendee.STATE_SELECTION, multi='attendee'),
'display_time': fields.function(_compute, string='Event Time', type="char", multi='attendee'),
'display_start': fields.function(_compute, string='Date', type="char", multi='display_start', store=True),
'allday': fields.boolean('All Day', states={'done': [('readonly', True)]}),
'start': fields.function(_compute, string='Calculated start', type="datetime", multi='start', store=True, required=True),
'stop': fields.function(_compute, string='Calculated stop', type="datetime", multi='stop', store=True, required=True),
'start_date': fields.date('Start Date', states={'done': [('readonly', True)]}, track_visibility='onchange'),
'start_datetime': fields.datetime('Start DateTime', states={'done': [('readonly', True)]}, track_visibility='onchange'),
'stop_date': fields.date('End Date', states={'done': [('readonly', True)]}, track_visibility='onchange'),
'stop_datetime': fields.datetime('End Datetime', states={'done': [('readonly', True)]}, track_visibility='onchange'), # old date_deadline
'duration': fields.float('Duration', states={'done': [('readonly', True)]}),
'description': fields.text('Description', states={'done': [('readonly', True)]}),
'class': fields.selection([('public', 'Public'), ('private', 'Private'), ('confidential', 'Public for Employees')], 'Privacy', states={'done': [('readonly', True)]}),
'location': fields.char('Location', help="Location of Event", track_visibility='onchange', states={'done': [('readonly', True)]}),
'show_as': fields.selection([('free', 'Free'), ('busy', 'Busy')], 'Show Time as', states={'done': [('readonly', True)]}),
# RECURRENCE FIELD
'rrule': fields.function(_get_rulestring, type='char', fnct_inv=_set_rulestring, store=True, string='Recurrent Rule'),
'rrule_type': fields.selection([('daily', 'Day(s)'), ('weekly', 'Week(s)'), ('monthly', 'Month(s)'), ('yearly', 'Year(s)')], 'Recurrency', states={'done': [('readonly', True)]}, help="Let the event automatically repeat at that interval"),
'recurrency': fields.boolean('Recurrent', help="Recurrent Meeting"),
'recurrent_id': fields.integer('Recurrent ID'),
'recurrent_id_date': fields.datetime('Recurrent ID date'),
'end_type': fields.selection([('count', 'Number of repetitions'), ('end_date', 'End date')], 'Recurrence Termination'),
'interval': fields.integer('Repeat Every', help="Repeat every (Days/Week/Month/Year)"),
'count': fields.integer('Repeat', help="Repeat x times"),
'mo': fields.boolean('Mon'),
'tu': fields.boolean('Tue'),
'we': fields.boolean('Wed'),
'th': fields.boolean('Thu'),
'fr': fields.boolean('Fri'),
'sa': fields.boolean('Sat'),
'su': fields.boolean('Sun'),
'month_by': fields.selection([('date', 'Date of month'), ('day', 'Day of month')], 'Option', oldname='select1'),
'day': fields.integer('Date of month'),
'week_list': fields.selection([('MO', 'Monday'), ('TU', 'Tuesday'), ('WE', 'Wednesday'), ('TH', 'Thursday'), ('FR', 'Friday'), ('SA', 'Saturday'), ('SU', 'Sunday')], 'Weekday'),
'byday': fields.selection([('1', 'First'), ('2', 'Second'), ('3', 'Third'), ('4', 'Fourth'), ('5', 'Fifth'), ('-1', 'Last')], 'By day'),
'final_date': fields.date('Repeat Until'), # The last event of a recurrence
'user_id': fields.many2one('res.users', 'Responsible', states={'done': [('readonly', True)]}),
'color_partner_id': fields.related('user_id', 'partner_id', 'id', type="integer", string="colorize", store=False), # Color of creator
'active': fields.boolean('Active', help="If the active field is set to true, it will allow you to hide the event alarm information without removing it."),
'categ_ids': fields.many2many('calendar.event.type', 'meeting_category_rel', 'event_id', 'type_id', 'Tags'),
'attendee_ids': fields.one2many('calendar.attendee', 'event_id', 'Attendees', ondelete='cascade'),
'partner_ids': fields.many2many('res.partner', 'calendar_event_res_partner_rel', string='Attendees', states={'done': [('readonly', True)]}),
'alarm_ids': fields.many2many('calendar.alarm', 'calendar_alarm_calendar_event_rel', string='Reminders', ondelete="restrict", copy=False),
}
_defaults = {
'end_type': 'count',
'count': 1,
'rrule_type': False,
'allday': False,
'state': 'draft',
'class': 'public',
'show_as': 'busy',
'month_by': 'date',
'interval': 1,
'active': 1,
'user_id': lambda self, cr, uid, ctx: uid,
'partner_ids': lambda self, cr, uid, ctx: [self.pool['res.users'].browse(cr, uid, [uid], context=ctx)[0].partner_id.id]
}
def _check_closing_date(self, cr, uid, ids, context=None):
for event in self.browse(cr, uid, ids, context=context):
if event.stop < event.start:
return False
return True
_constraints = [
(_check_closing_date, 'Error ! End date cannot be set before start date.', ['start', 'stop'])
]
def onchange_allday(self, cr, uid, ids, start=False, end=False, starttime=False, endtime=False, startdatetime=False, enddatetime=False, checkallday=False, context=None):
value = {}
if not ((starttime and endtime) or (start and end)): # At first intialize, we have not datetime
return value
if checkallday: # from datetime to date
startdatetime = startdatetime or start
if startdatetime:
start = datetime.strptime(startdatetime, DEFAULT_SERVER_DATETIME_FORMAT)
value['start_date'] = datetime.strftime(start, DEFAULT_SERVER_DATE_FORMAT)
enddatetime = enddatetime or end
if enddatetime:
end = datetime.strptime(enddatetime, DEFAULT_SERVER_DATETIME_FORMAT)
value['stop_date'] = datetime.strftime(end, DEFAULT_SERVER_DATE_FORMAT)
else: # from date to datetime
user = self.pool['res.users'].browse(cr, uid, uid, context)
tz = pytz.timezone(user.tz) if user.tz else pytz.utc
if starttime:
start = datetime.strptime(starttime.split(' ')[0], DEFAULT_SERVER_DATE_FORMAT)
startdate = tz.localize(start) # Add "+hh:mm" timezone
startdate = startdate.replace(hour=8) # Set 8 AM in localtime
startdate = startdate.astimezone(pytz.utc) # Convert to UTC
value['start_datetime'] = datetime.strftime(startdate, DEFAULT_SERVER_DATETIME_FORMAT)
elif start:
value['start_datetime'] = start
if endtime:
end = datetime.strptime(endtime.split(' ')[0], DEFAULT_SERVER_DATE_FORMAT)
enddate = tz.localize(end).replace(hour=18).astimezone(pytz.utc)
value['stop_datetime'] = datetime.strftime(enddate, DEFAULT_SERVER_DATETIME_FORMAT)
elif end:
value['stop_datetime'] = end
return {'value': value}
def onchange_dates(self, cr, uid, ids, fromtype, start=False, end=False, checkallday=False, allday=False, context=None):
"""Returns duration and end date based on values passed
@param ids: List of calendar event's IDs.
"""
value = {}
if checkallday != allday:
return value
value['allday'] = checkallday # Force to be rewrited
if allday:
if fromtype == 'start':
start = datetime.strptime(start, DEFAULT_SERVER_DATE_FORMAT)
value['start_datetime'] = datetime.strftime(start, DEFAULT_SERVER_DATETIME_FORMAT)
value['start'] = datetime.strftime(start, DEFAULT_SERVER_DATETIME_FORMAT)
if fromtype == 'stop':
end = datetime.strptime(end, DEFAULT_SERVER_DATE_FORMAT)
value['stop_datetime'] = datetime.strftime(end, DEFAULT_SERVER_DATETIME_FORMAT)
value['stop'] = datetime.strftime(end, DEFAULT_SERVER_DATETIME_FORMAT)
else:
if fromtype == 'start':
start = datetime.strptime(start, DEFAULT_SERVER_DATETIME_FORMAT)
value['start_date'] = datetime.strftime(start, DEFAULT_SERVER_DATE_FORMAT)
value['start'] = datetime.strftime(start, DEFAULT_SERVER_DATETIME_FORMAT)
if fromtype == 'stop':
end = datetime.strptime(end, DEFAULT_SERVER_DATETIME_FORMAT)
value['stop_date'] = datetime.strftime(end, DEFAULT_SERVER_DATE_FORMAT)
value['stop'] = datetime.strftime(end, DEFAULT_SERVER_DATETIME_FORMAT)
return {'value': value}
def new_invitation_token(self, cr, uid, record, partner_id):
return uuid.uuid4().hex
def create_attendees(self, cr, uid, ids, context):
user_obj = self.pool['res.users']
current_user = user_obj.browse(cr, uid, uid, context=context)
res = {}
for event in self.browse(cr, uid, ids, context):
attendees = {}
for att in event.attendee_ids:
attendees[att.partner_id.id] = True
new_attendees = []
new_att_partner_ids = []
for partner in event.partner_ids:
if partner.id in attendees:
continue
access_token = self.new_invitation_token(cr, uid, event, partner.id)
values = {
'partner_id': partner.id,
'event_id': event.id,
'access_token': access_token,
'email': partner.email,
}
if partner.id == current_user.partner_id.id:
values['state'] = 'accepted'
att_id = self.pool['calendar.attendee'].create(cr, uid, values, context=context)
new_attendees.append(att_id)
new_att_partner_ids.append(partner.id)
if not current_user.email or current_user.email != partner.email:
mail_from = current_user.email or tools.config.get('email_from', False)
if self.pool['calendar.attendee']._send_mail_to_attendees(cr, uid, att_id, email_from=mail_from, context=context):
self.message_post(cr, uid, event.id, body=_("An invitation email has been sent to attendee %s") % (partner.name,), subtype="calendar.subtype_invitation", context=context)
if new_attendees:
self.write(cr, uid, [event.id], {'attendee_ids': [(4, att) for att in new_attendees]}, context=context)
if new_att_partner_ids:
self.message_subscribe(cr, uid, [event.id], new_att_partner_ids, context=context)
# We remove old attendees who are not in partner_ids now.
all_partner_ids = [part.id for part in event.partner_ids]
all_part_attendee_ids = [att.partner_id.id for att in event.attendee_ids]
all_attendee_ids = [att.id for att in event.attendee_ids]
partner_ids_to_remove = map(lambda x: x, set(all_part_attendee_ids + new_att_partner_ids) - set(all_partner_ids))
attendee_ids_to_remove = []
if partner_ids_to_remove:
attendee_ids_to_remove = self.pool["calendar.attendee"].search(cr, uid, [('partner_id.id', 'in', partner_ids_to_remove), ('event_id.id', '=', event.id)], context=context)
if attendee_ids_to_remove:
self.pool['calendar.attendee'].unlink(cr, uid, attendee_ids_to_remove, context)
res[event.id] = {
'new_attendee_ids': new_attendees,
'old_attendee_ids': all_attendee_ids,
'removed_attendee_ids': attendee_ids_to_remove
}
return res
def get_search_fields(self, browse_event, order_fields, r_date=None):
sort_fields = {}
for ord in order_fields:
if ord == 'id' and r_date:
sort_fields[ord] = '%s-%s' % (browse_event[ord], r_date.strftime("%Y%m%d%H%M%S"))
else:
sort_fields[ord] = browse_event[ord]
if type(browse_event[ord]) is openerp.osv.orm.browse_record:
name_get = browse_event[ord].name_get()
if len(name_get) and len(name_get[0]) >= 2:
sort_fields[ord] = name_get[0][1]
return sort_fields
def get_recurrent_ids(self, cr, uid, event_id, domain, order=None, context=None):
"""Gives virtual event ids for recurring events
This method gives ids of dates that comes between start date and end date of calendar views
@param order: The fields (comma separated, format "FIELD {DESC|ASC}") on which the events should be sorted
"""
if not context:
context = {}
if isinstance(event_id, (str, int, long)):
ids_to_browse = [event_id] # keep select for return
else:
ids_to_browse = event_id
if order:
order_fields = [field.split()[0] for field in order.split(',')]
else:
# fallback on self._order defined on the model
order_fields = [field.split()[0] for field in self._order.split(',')]
if 'id' not in order_fields:
order_fields.append('id')
result_data = []
result = []
for ev in self.browse(cr, uid, ids_to_browse, context=context):
if not ev.recurrency or not ev.rrule:
result.append(ev.id)
result_data.append(self.get_search_fields(ev, order_fields))
continue
rdates = self.get_recurrent_date_by_event(cr, uid, ev, context=context)
for r_date in rdates:
# fix domain evaluation
# step 1: check date and replace expression by True or False, replace other expressions by True
# step 2: evaluation of & and |
# check if there are one False
pile = []
ok = True
for arg in domain:
if str(arg[0]) in ('start', 'stop', 'final_date'):
if (arg[1] == '='):
ok = r_date.strftime('%Y-%m-%d') == arg[2]
if (arg[1] == '>'):
ok = r_date.strftime('%Y-%m-%d') > arg[2]
if (arg[1] == '<'):
ok = r_date.strftime('%Y-%m-%d') < arg[2]
if (arg[1] == '>='):
ok = r_date.strftime('%Y-%m-%d') >= arg[2]
if (arg[1] == '<='):
ok = r_date.strftime('%Y-%m-%d') <= arg[2]
pile.append(ok)
elif str(arg) == str('&') or str(arg) == str('|'):
pile.append(arg)
else:
pile.append(True)
pile.reverse()
new_pile = []
for item in pile:
if not isinstance(item, basestring):
res = item
elif str(item) == str('&'):
first = new_pile.pop()
second = new_pile.pop()
res = first and second
elif str(item) == str('|'):
first = new_pile.pop()
second = new_pile.pop()
res = first or second
new_pile.append(res)
if [True for item in new_pile if not item]:
continue
result_data.append(self.get_search_fields(ev, order_fields, r_date=r_date))
if order_fields:
def comparer(left, right):
for fn, mult in comparers:
result = cmp(fn(left), fn(right))
if result:
return mult * result
return 0
sort_params = [key.split()[0] if key[-4:].lower() != 'desc' else '-%s' % key.split()[0] for key in (order or self._order).split(',')]
comparers = [((itemgetter(col[1:]), -1) if col[0] == '-' else (itemgetter(col), 1)) for col in sort_params]
ids = [r['id'] for r in sorted(result_data, cmp=comparer)]
if isinstance(event_id, (str, int, long)):
return ids and ids[0] or False
else:
return ids
def compute_rule_string(self, data):
"""
Compute rule string according to value type RECUR of iCalendar from the values given.
@param self: the object pointer
@param data: dictionary of freq and interval value
@return: string containing recurring rule (empty if no rule)
"""
def get_week_string(freq, data):
weekdays = ['mo', 'tu', 'we', 'th', 'fr', 'sa', 'su']
if freq == 'weekly':
byday = map(lambda x: x.upper(), filter(lambda x: data.get(x) and x in weekdays, data))
if byday:
return ';BYDAY=' + ','.join(byday)
return ''
def get_month_string(freq, data):
if freq == 'monthly':
if data.get('month_by') == 'date' and (data.get('day') < 1 or data.get('day') > 31):
raise osv.except_osv(_('Error!'), ("Please select a proper day of the month."))
if data.get('month_by') == 'day': # Eg : Second Monday of the month
return ';BYDAY=' + data.get('byday') + data.get('week_list')
elif data.get('month_by') == 'date': # Eg : 16th of the month
return ';BYMONTHDAY=' + str(data.get('day'))
return ''
def get_end_date(data):
if data.get('final_date'):
data['end_date_new'] = ''.join((re.compile('\d')).findall(data.get('final_date'))) + 'T235959Z'
return (data.get('end_type') == 'count' and (';COUNT=' + str(data.get('count'))) or '') +\
((data.get('end_date_new') and data.get('end_type') == 'end_date' and (';UNTIL=' + data.get('end_date_new'))) or '')
freq = data.get('rrule_type', False) # day/week/month/year
res = ''
if freq:
interval_srting = data.get('interval') and (';INTERVAL=' + str(data.get('interval'))) or ''
res = 'FREQ=' + freq.upper() + get_week_string(freq, data) + interval_srting + get_end_date(data) + get_month_string(freq, data)
return res
def _get_empty_rrule_data(self):
return {
'byday': False,
'recurrency': False,
'final_date': False,
'rrule_type': False,
'month_by': False,
'interval': 0,
'count': False,
'end_type': False,
'mo': False,
'tu': False,
'we': False,
'th': False,
'fr': False,
'sa': False,
'su': False,
'day': False,
'week_list': False
}
def _parse_rrule(self, rule, data, date_start):
day_list = ['mo', 'tu', 'we', 'th', 'fr', 'sa', 'su']
rrule_type = ['yearly', 'monthly', 'weekly', 'daily']
r = rrule.rrulestr(rule, dtstart=datetime.strptime(date_start, DEFAULT_SERVER_DATETIME_FORMAT))
if r._freq > 0 and r._freq < 4:
data['rrule_type'] = rrule_type[r._freq]
data['count'] = r._count
data['interval'] = r._interval
data['final_date'] = r._until and r._until.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
#repeat weekly
if r._byweekday:
for i in xrange(0, 7):
if i in r._byweekday:
data[day_list[i]] = True
data['rrule_type'] = 'weekly'
#repeat monthly by nweekday ((weekday, weeknumber), )
if r._bynweekday:
data['week_list'] = day_list[r._bynweekday[0][0]].upper()
data['byday'] = str(r._bynweekday[0][1])
data['month_by'] = 'day'
data['rrule_type'] = 'monthly'
if r._bymonthday:
data['day'] = r._bymonthday[0]
data['month_by'] = 'date'
data['rrule_type'] = 'monthly'
#repeat yearly but for openerp it's monthly, take same information as monthly but interval is 12 times
if r._bymonth:
data['interval'] = data['interval'] * 12
#FIXEME handle forever case
#end of recurrence
#in case of repeat for ever that we do not support right now
if not (data.get('count') or data.get('final_date')):
data['count'] = 100
if data.get('count'):
data['end_type'] = 'count'
else:
data['end_type'] = 'end_date'
return data
def message_get_subscription_data(self, cr, uid, ids, user_pid=None, context=None):
res = {}
for virtual_id in ids:
real_id = calendar_id2real_id(virtual_id)
result = super(calendar_event, self).message_get_subscription_data(cr, uid, [real_id], user_pid=None, context=context)
res[virtual_id] = result[real_id]
return res
def onchange_partner_ids(self, cr, uid, ids, value, context=None):
""" The basic purpose of this method is to check that destination partners
effectively have email addresses. Otherwise a warning is thrown.
:param value: value format: [[6, 0, [3, 4]]]
"""
res = {'value': {}}
if not value or not value[0] or not value[0][0] == 6:
return
res.update(self.check_partners_email(cr, uid, value[0][2], context=context))
return res
def check_partners_email(self, cr, uid, partner_ids, context=None):
""" Verify that selected partner_ids have an email_address defined.
Otherwise throw a warning. """
partner_wo_email_lst = []
for partner in self.pool['res.partner'].browse(cr, uid, partner_ids, context=context):
if not partner.email:
partner_wo_email_lst.append(partner)
if not partner_wo_email_lst:
return {}
warning_msg = _('The following contacts have no email address :')
for partner in partner_wo_email_lst:
warning_msg += '\n- %s' % (partner.name)
return {'warning': {
'title': _('Email addresses not found'),
'message': warning_msg,
}}
# shows events of the day for this user
def _needaction_domain_get(self, cr, uid, context=None):
return [
('stop', '<=', time.strftime(DEFAULT_SERVER_DATE_FORMAT + ' 23:59:59')),
('start', '>=', time.strftime(DEFAULT_SERVER_DATE_FORMAT + ' 00:00:00')),
('user_id', '=', uid),
]
def message_post(self, cr, uid, thread_id, body='', subject=None, type='notification', subtype=None, parent_id=False, attachments=None, context=None, **kwargs):
if isinstance(thread_id, str):
thread_id = get_real_ids(thread_id)
if context.get('default_date'):
del context['default_date']
return super(calendar_event, self).message_post(cr, uid, thread_id, body=body, subject=subject, type=type, subtype=subtype, parent_id=parent_id, attachments=attachments, context=context, **kwargs)
def do_sendmail(self, cr, uid, ids, context=None):
for event in self.browse(cr, uid, ids, context):
current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)
if current_user.email:
if self.pool['calendar.attendee']._send_mail_to_attendees(cr, uid, [att.id for att in event.attendee_ids], email_from=current_user.email, context=context):
self.message_post(cr, uid, event.id, body=_("An invitation email has been sent to attendee(s)"), subtype="calendar.subtype_invitation", context=context)
return
def get_attendee(self, cr, uid, meeting_id, context=None):
# Used for view in controller
invitation = {'meeting': {}, 'attendee': []}
meeting = self.browse(cr, uid, int(meeting_id), context=context)
invitation['meeting'] = {
'event': meeting.name,
'where': meeting.location,
'when': meeting.display_time
}
for attendee in meeting.attendee_ids:
invitation['attendee'].append({'name': attendee.cn, 'status': attendee.state})
return invitation
def get_interval(self, cr, uid, ids, date, interval, tz=None, context=None):
#Function used only in calendar_event_data.xml for email template
date = datetime.strptime(date.split('.')[0], DEFAULT_SERVER_DATETIME_FORMAT)
if tz:
timezone = pytz.timezone(tz or 'UTC')
date = date.replace(tzinfo=pytz.timezone('UTC')).astimezone(timezone)
if interval == 'day':
res = str(date.day)
elif interval == 'month':
res = date.strftime('%B') + " " + str(date.year)
elif interval == 'dayname':
res = date.strftime('%A')
elif interval == 'time':
dummy, format_time = self.get_date_formats(cr, uid, context=context)
res = date.strftime(format_time + " %Z")
return res
def search(self, cr, uid, args, offset=0, limit=0, order=None, context=None, count=False):
if context is None:
context = {}
if context.get('mymeetings', False):
partner_id = self.pool['res.users'].browse(cr, uid, uid, context).partner_id.id
args += [('partner_ids', 'in', [partner_id])]
new_args = []
for arg in args:
new_arg = arg
if arg[0] in ('start_date', 'start_datetime', 'start',) and arg[1] == ">=":
if context.get('virtual_id', True):
new_args += ['|', '&', ('recurrency', '=', 1), ('final_date', arg[1], arg[2])]
elif arg[0] == "id":
new_id = get_real_ids(arg[2])
new_arg = (arg[0], arg[1], new_id)
new_args.append(new_arg)
if not context.get('virtual_id', True):
return super(calendar_event, self).search(cr, uid, new_args, offset=offset, limit=limit, order=order, count=count, context=context)
# offset, limit, order and count must be treated separately as we may need to deal with virtual ids
res = super(calendar_event, self).search(cr, uid, new_args, offset=0, limit=0, order=None, context=context, count=False)
res = self.get_recurrent_ids(cr, uid, res, args, order=order, context=context)
if count:
return len(res)
elif limit:
return res[offset: offset + limit]
return res
def copy(self, cr, uid, id, default=None, context=None):
default = default or {}
self._set_date(cr, uid, default, id=default.get('id'), context=context)
return super(calendar_event, self).copy(cr, uid, calendar_id2real_id(id), default, context)
def _detach_one_event(self, cr, uid, id, values=dict(), context=None):
real_event_id = calendar_id2real_id(id)
data = self.read(cr, uid, id, ['allday', 'start', 'stop', 'rrule', 'duration'])
data['start_date' if data['allday'] else 'start_datetime'] = data['start']
data['stop_date' if data['allday'] else 'stop_datetime'] = data['stop']
if data.get('rrule'):
data.update(
values,
recurrent_id=real_event_id,
recurrent_id_date=data.get('start'),
rrule_type=False,
rrule='',
recurrency=False,
final_date=datetime.strptime(data.get('start'), DEFAULT_SERVER_DATETIME_FORMAT if data['allday'] else DEFAULT_SERVER_DATETIME_FORMAT) + timedelta(hours=values.get('duration', False) or data.get('duration'))
)
#do not copy the id
if data.get('id'):
del(data['id'])
new_id = self.copy(cr, uid, real_event_id, default=data, context=context)
return new_id
def open_after_detach_event(self, cr, uid, ids, context=None):
if context is None:
context = {}
new_id = self._detach_one_event(cr, uid, ids[0], context=context)
return {
'type': 'ir.actions.act_window',
'res_model': 'calendar.event',
'view_mode': 'form',
'res_id': new_id,
'target': 'current',
'flags': {'form': {'action_buttons': True, 'options': {'mode': 'edit'}}}
}
def _name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
for arg in args:
if arg[0] == 'id':
for n, calendar_id in enumerate(arg[2]):
if isinstance(calendar_id, str):
arg[2][n] = calendar_id.split('-')[0]
return super(calendar_event, self)._name_search(cr, user, name=name, args=args, operator=operator, context=context, limit=limit, name_get_uid=name_get_uid)
def write(self, cr, uid, ids, values, context=None):
def _only_changes_to_apply_on_real_ids(field_names):
''' return True if changes are only to be made on the real ids'''
for field in field_names:
if field in ['start', 'start_date', 'start_datetime', 'stop', 'stop_date', 'stop_datetime', 'active']:
return True
return False
if not isinstance(ids, (tuple, list)):
ids = [ids]
context = context or {}
self._set_date(cr, uid, values, id=ids[0], context=context)
for one_ids in ids:
if isinstance(one_ids, (str, int, long)):
if len(str(one_ids).split('-')) == 1:
ids = [int(one_ids)]
else:
ids = [one_ids]
res = False
new_id = False
# Special write of complex IDS
for event_id in ids:
if len(str(event_id).split('-')) == 1:
continue
ids.remove(event_id)
real_event_id = calendar_id2real_id(event_id)
# if we are setting the recurrency flag to False or if we are only changing fields that
# should be only updated on the real ID and not on the virtual (like message_follower_ids):
# then set real ids to be updated.
if not values.get('recurrency', True) or not _only_changes_to_apply_on_real_ids(values.keys()):
ids.append(real_event_id)
continue
else:
data = self.read(cr, uid, event_id, ['start', 'stop', 'rrule', 'duration'])
if data.get('rrule'):
new_id = self._detach_one_event(cr, uid, event_id, values, context=None)
res = super(calendar_event, self).write(cr, uid, ids, values, context=context)
# set end_date for calendar searching
if values.get('recurrency', True) and values.get('end_type', 'count') in ('count', unicode('count')) and \
(values.get('rrule_type') or values.get('count') or values.get('start') or values.get('stop')):
for id in ids:
final_date = self._get_recurrency_end_date(cr, uid, id, context=context)
super(calendar_event, self).write(cr, uid, [id], {'final_date': final_date}, context=context)
attendees_create = False
if values.get('partner_ids', False):
attendees_create = self.create_attendees(cr, uid, ids, context)
if (values.get('start_date') or values.get('start_datetime', False)) and values.get('active', True):
the_id = new_id or (ids and int(ids[0]))
if the_id:
if attendees_create:
attendees_create = attendees_create[the_id]
mail_to_ids = list(set(attendees_create['old_attendee_ids']) - set(attendees_create['removed_attendee_ids']))
else:
mail_to_ids = [att.id for att in self.browse(cr, uid, the_id, context=context).attendee_ids]
if mail_to_ids:
current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)
if self.pool['calendar.attendee']._send_mail_to_attendees(cr, uid, mail_to_ids, template_xmlid='calendar_template_meeting_changedate', email_from=current_user.email, context=context):
self.message_post(cr, uid, the_id, body=_("A email has been send to specify that the date has been changed !"), subtype="calendar.subtype_invitation", context=context)
return res or True and False
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
self._set_date(cr, uid, vals, id=False, context=context)
if not 'user_id' in vals: # Else bug with quick_create when we are filter on an other user
vals['user_id'] = uid
res = super(calendar_event, self).create(cr, uid, vals, context=context)
final_date = self._get_recurrency_end_date(cr, uid, res, context=context)
self.write(cr, uid, [res], {'final_date': final_date}, context=context)
self.create_attendees(cr, uid, [res], context=context)
return res
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
context = dict(context or {})
if 'date' in groupby:
raise osv.except_osv(_('Warning!'), _('Group by date is not supported, use the calendar view instead.'))
virtual_id = context.get('virtual_id', True)
context.update({'virtual_id': False})
res = super(calendar_event, self).read_group(cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby, lazy=lazy)
for result in res:
#remove the count, since the value is not consistent with the result of the search when expand the group
for groupname in groupby:
if result.get(groupname + "_count"):
del result[groupname + "_count"]
result.get('__context', {}).update({'virtual_id': virtual_id})
return res
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
if context is None:
context = {}
fields2 = fields and fields[:] or None
EXTRAFIELDS = ('class', 'user_id', 'duration', 'allday', 'start', 'start_date', 'start_datetime', 'rrule')
for f in EXTRAFIELDS:
if fields and (f not in fields):
fields2.append(f)
if isinstance(ids, (str, int, long)):
select = [ids]
else:
select = ids
select = map(lambda x: (x, calendar_id2real_id(x)), select)
result = []
real_data = super(calendar_event, self).read(cr, uid, [real_id for calendar_id, real_id in select], fields=fields2, context=context, load=load)
real_data = dict(zip([x['id'] for x in real_data], real_data))
for calendar_id, real_id in select:
res = real_data[real_id].copy()
ls = calendar_id2real_id(calendar_id, with_date=res and res.get('duration', 0) > 0 and res.get('duration') or 1)
if not isinstance(ls, (str, int, long)) and len(ls) >= 2:
res['start'] = ls[1]
res['stop'] = ls[2]
if res['allday']:
res['start_date'] = ls[1]
res['stop_date'] = ls[2]
else:
res['start_datetime'] = ls[1]
res['stop_datetime'] = ls[2]
res['display_time'] = self._get_display_time(cr, uid, ls[1], ls[2], res['duration'], res['allday'], context=context)
res['id'] = calendar_id
result.append(res)
for r in result:
if r['user_id']:
user_id = type(r['user_id']) in (tuple, list) and r['user_id'][0] or r['user_id']
if user_id == uid:
continue
if r['class'] == 'private':
for f in r.keys():
if f not in ('id', 'allday', 'start', 'stop', 'duration', 'user_id', 'state', 'interval', 'count', 'recurrent_id_date', 'rrule'):
if isinstance(r[f], list):
r[f] = []
else:
r[f] = False
if f == 'name':
r[f] = _('Busy')
for r in result:
for k in EXTRAFIELDS:
if (k in r) and (fields and (k not in fields)):
del r[k]
if isinstance(ids, (str, int, long)):
return result and result[0] or False
return result
def unlink(self, cr, uid, ids, can_be_deleted=True, context=None):
if not isinstance(ids, list):
ids = [ids]
res = False
ids_to_exclure = []
ids_to_unlink = []
for event_id in ids:
if can_be_deleted and len(str(event_id).split('-')) == 1: # if ID REAL
if self.browse(cr, uid, int(event_id), context).recurrent_id:
ids_to_exclure.append(event_id)
else:
ids_to_unlink.append(int(event_id))
else:
ids_to_exclure.append(event_id)
if ids_to_unlink:
res = super(calendar_event, self).unlink(cr, uid, ids_to_unlink, context=context)
if ids_to_exclure:
for id_to_exclure in ids_to_exclure:
res = self.write(cr, uid, id_to_exclure, {'active': False}, context=context)
return res
class mail_message(osv.Model):
_inherit = "mail.message"
def search(self, cr, uid, args, offset=0, limit=0, order=None, context=None, count=False):
'''
convert the search on real ids in the case it was asked on virtual ids, then call super()
'''
for index in range(len(args)):
if args[index][0] == "res_id" and isinstance(args[index][2], str):
args[index][2] = get_real_ids(args[index][2])
return super(mail_message, self).search(cr, uid, args, offset=offset, limit=limit, order=order, context=context, count=count)
def _find_allowed_model_wise(self, cr, uid, doc_model, doc_dict, context=None):
if context is None:
context = {}
if doc_model == 'calendar.event':
order = context.get('order', self._order)
for virtual_id in self.pool[doc_model].get_recurrent_ids(cr, uid, doc_dict.keys(), [], order=order, context=context):
doc_dict.setdefault(virtual_id, doc_dict[get_real_ids(virtual_id)])
return super(mail_message, self)._find_allowed_model_wise(cr, uid, doc_model, doc_dict, context=context)
class ir_attachment(osv.Model):
_inherit = "ir.attachment"
def search(self, cr, uid, args, offset=0, limit=0, order=None, context=None, count=False):
'''
convert the search on real ids in the case it was asked on virtual ids, then call super()
'''
for index in range(len(args)):
if args[index][0] == "res_id" and isinstance(args[index][2], str):
args[index][2] = get_real_ids(args[index][2])
return super(ir_attachment, self).search(cr, uid, args, offset=offset, limit=limit, order=order, context=context, count=count)
def write(self, cr, uid, ids, vals, context=None):
'''
when posting an attachment (new or not), convert the virtual ids in real ids.
'''
if isinstance(vals.get('res_id'), str):
vals['res_id'] = get_real_ids(vals.get('res_id'))
return super(ir_attachment, self).write(cr, uid, ids, vals, context=context)
class ir_http(osv.AbstractModel):
_inherit = 'ir.http'
def _auth_method_calendar(self):
token = request.params['token']
db = request.params['db']
registry = openerp.modules.registry.RegistryManager.get(db)
attendee_pool = registry.get('calendar.attendee')
error_message = False
with registry.cursor() as cr:
attendee_id = attendee_pool.search(cr, openerp.SUPERUSER_ID, [('access_token', '=', token)])
if not attendee_id:
error_message = """Invalid Invitation Token."""
elif request.session.uid and request.session.login != 'anonymous':
# if valid session but user is not match
attendee = attendee_pool.browse(cr, openerp.SUPERUSER_ID, attendee_id[0])
user = registry.get('res.users').browse(cr, openerp.SUPERUSER_ID, request.session.uid)
if attendee.partner_id.id != user.partner_id.id:
error_message = """Invitation cannot be forwarded via email. This event/meeting belongs to %s and you are logged in as %s. Please ask organizer to add you.""" % (attendee.email, user.email)
if error_message:
raise BadRequest(error_message)
return True
class invite_wizard(osv.osv_memory):
_inherit = 'mail.wizard.invite'
def default_get(self, cr, uid, fields, context=None):
'''
in case someone clicked on 'invite others' wizard in the followers widget, transform virtual ids in real ids
'''
result = super(invite_wizard, self).default_get(cr, uid, fields, context=context)
if 'res_id' in result:
result['res_id'] = get_real_ids(result['res_id'])
return result
[FIX] calendar: invalidate cache to avoid (pre)fetching invalid virtual IDs
Because the new API basically browses everything, the virtual IDs used by
calendar.event are added to the cache and prefetching tries to get them from
the database resulting in conversion errors. These ids have to be forcibly
evicted to avoid the error.
# -*- coding: utf-8 -*-
import pytz
import re
import time
import openerp
import openerp.service.report
import uuid
from werkzeug.exceptions import BadRequest
from datetime import datetime, timedelta
from dateutil import parser
from dateutil import rrule
from dateutil.relativedelta import relativedelta
from openerp import tools, SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools.translate import _
from openerp.http import request
from operator import itemgetter
import logging
_logger = logging.getLogger(__name__)
def calendar_id2real_id(calendar_id=None, with_date=False):
"""
Convert a "virtual/recurring event id" (type string) into a real event id (type int).
E.g. virtual/recurring event id is 4-20091201100000, so it will return 4.
@param calendar_id: id of calendar
@param with_date: if a value is passed to this param it will return dates based on value of withdate + calendar_id
@return: real event id
"""
if calendar_id and isinstance(calendar_id, (str, unicode)):
res = calendar_id.split('-')
if len(res) >= 2:
real_id = res[0]
if with_date:
real_date = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT, time.strptime(res[1], "%Y%m%d%H%M%S"))
start = datetime.strptime(real_date, DEFAULT_SERVER_DATETIME_FORMAT)
end = start + timedelta(hours=with_date)
return (int(real_id), real_date, end.strftime(DEFAULT_SERVER_DATETIME_FORMAT))
return int(real_id)
return calendar_id and int(calendar_id) or calendar_id
def get_real_ids(ids):
if isinstance(ids, (str, int, long)):
return calendar_id2real_id(ids)
if isinstance(ids, (list, tuple)):
return [calendar_id2real_id(id) for id in ids]
class calendar_attendee(osv.Model):
"""
Calendar Attendee Information
"""
_name = 'calendar.attendee'
_rec_name = 'cn'
_description = 'Attendee information'
def _compute_data(self, cr, uid, ids, name, arg, context=None):
"""
Compute data on function fields for attendee values.
@param ids: list of calendar attendee's IDs
@param name: name of field
@return: dictionary of form {id: {'field Name': value'}}
"""
name = name[0]
result = {}
for attdata in self.browse(cr, uid, ids, context=context):
id = attdata.id
result[id] = {}
if name == 'cn':
if attdata.partner_id:
result[id][name] = attdata.partner_id.name or False
else:
result[id][name] = attdata.email or ''
return result
STATE_SELECTION = [
('needsAction', 'Needs Action'),
('tentative', 'Uncertain'),
('declined', 'Declined'),
('accepted', 'Accepted'),
]
_columns = {
'state': fields.selection(STATE_SELECTION, 'Status', readonly=True, help="Status of the attendee's participation"),
'cn': fields.function(_compute_data, string='Common name', type="char", multi='cn', store=True),
'partner_id': fields.many2one('res.partner', 'Contact', readonly="True"),
'email': fields.char('Email', help="Email of Invited Person"),
'availability': fields.selection([('free', 'Free'), ('busy', 'Busy')], 'Free/Busy', readonly="True"),
'access_token': fields.char('Invitation Token'),
'event_id': fields.many2one('calendar.event', 'Meeting linked'),
}
_defaults = {
'state': 'needsAction',
}
def copy(self, cr, uid, id, default=None, context=None):
raise osv.except_osv(_('Warning!'), _('You cannot duplicate a calendar attendee.'))
def onchange_partner_id(self, cr, uid, ids, partner_id, context=None):
"""
Make entry on email and availability on change of partner_id field.
@param partner_id: changed value of partner id
"""
if not partner_id:
return {'value': {'email': ''}}
partner = self.pool['res.partner'].browse(cr, uid, partner_id, context=context)
return {'value': {'email': partner.email}}
def get_ics_file(self, cr, uid, event_obj, context=None):
"""
Returns iCalendar file for the event invitation.
@param event_obj: event object (browse record)
@return: .ics file content
"""
res = None
def ics_datetime(idate, allday=False):
if idate:
if allday:
return datetime.strptime(idate.split(' ')[0], DEFAULT_SERVER_DATE_FORMAT).replace(tzinfo=pytz.timezone('UTC'))
else:
return datetime.strptime(idate.split('.')[0], DEFAULT_SERVER_DATETIME_FORMAT).replace(tzinfo=pytz.timezone('UTC'))
return False
try:
# FIXME: why isn't this in CalDAV?
import vobject
except ImportError:
return res
cal = vobject.iCalendar()
event = cal.add('vevent')
if not event_obj.start or not event_obj.stop:
raise osv.except_osv(_('Warning!'), _("First you have to specify the date of the invitation."))
event.add('created').value = ics_datetime(time.strftime(DEFAULT_SERVER_DATETIME_FORMAT))
event.add('dtstart').value = ics_datetime(event_obj.start, event_obj.allday)
event.add('dtend').value = ics_datetime(event_obj.stop, event_obj.allday)
event.add('summary').value = event_obj.name
if event_obj.description:
event.add('description').value = event_obj.description
if event_obj.location:
event.add('location').value = event_obj.location
if event_obj.rrule:
event.add('rrule').value = event_obj.rrule
if event_obj.alarm_ids:
for alarm in event_obj.alarm_ids:
valarm = event.add('valarm')
interval = alarm.interval
duration = alarm.duration
trigger = valarm.add('TRIGGER')
trigger.params['related'] = ["START"]
if interval == 'days':
delta = timedelta(days=duration)
elif interval == 'hours':
delta = timedelta(hours=duration)
elif interval == 'minutes':
delta = timedelta(minutes=duration)
trigger.value = delta
valarm.add('DESCRIPTION').value = alarm.name or 'OpenERP'
for attendee in event_obj.attendee_ids:
attendee_add = event.add('attendee')
attendee_add.value = 'MAILTO:' + (attendee.email or '')
res = cal.serialize()
return res
def _send_mail_to_attendees(self, cr, uid, ids, email_from=tools.config.get('email_from', False),
template_xmlid='calendar_template_meeting_invitation', context=None):
"""
Send mail for event invitation to event attendees.
@param email_from: email address for user sending the mail
"""
res = False
if self.pool['ir.config_parameter'].get_param(cr, uid, 'calendar.block_mail', default=False) or context.get("no_mail_to_attendees"):
return res
mail_ids = []
data_pool = self.pool['ir.model.data']
mailmess_pool = self.pool['mail.message']
mail_pool = self.pool['mail.mail']
template_pool = self.pool['email.template']
local_context = context.copy()
color = {
'needsAction': 'grey',
'accepted': 'green',
'tentative': '#FFFF00',
'declined': 'red'
}
if not isinstance(ids, (tuple, list)):
ids = [ids]
dummy, template_id = data_pool.get_object_reference(cr, uid, 'calendar', template_xmlid)
dummy, act_id = data_pool.get_object_reference(cr, uid, 'calendar', "view_calendar_event_calendar")
local_context.update({
'color': color,
'action_id': self.pool['ir.actions.act_window'].search(cr, uid, [('view_id', '=', act_id)], context=context)[0],
'dbname': cr.dbname,
'base_url': self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url', default='http://localhost:8069', context=context)
})
for attendee in self.browse(cr, uid, ids, context=context):
if attendee.email and email_from and attendee.email != email_from:
ics_file = self.get_ics_file(cr, uid, attendee.event_id, context=context)
mail_id = template_pool.send_mail(cr, uid, template_id, attendee.id, context=local_context)
vals = {}
if ics_file:
vals['attachment_ids'] = [(0, 0, {'name': 'invitation.ics',
'datas_fname': 'invitation.ics',
'datas': str(ics_file).encode('base64')})]
vals['model'] = None # We don't want to have the mail in the tchatter while in queue!
the_mailmess = mail_pool.browse(cr, uid, mail_id, context=context).mail_message_id
mailmess_pool.write(cr, uid, [the_mailmess.id], vals, context=context)
mail_ids.append(mail_id)
if mail_ids:
res = mail_pool.send(cr, uid, mail_ids, context=context)
return res
def onchange_user_id(self, cr, uid, ids, user_id, *args, **argv):
"""
Make entry on email and availability on change of user_id field.
@param ids: list of attendee's IDs
@param user_id: changed value of User id
@return: dictionary of values which put value in email and availability fields
"""
if not user_id:
return {'value': {'email': ''}}
user = self.pool['res.users'].browse(cr, uid, user_id, *args)
return {'value': {'email': user.email, 'availability': user.availability}}
def do_tentative(self, cr, uid, ids, context=None, *args):
"""
Makes event invitation as Tentative.
@param ids: list of attendee's IDs
"""
return self.write(cr, uid, ids, {'state': 'tentative'}, context)
def do_accept(self, cr, uid, ids, context=None, *args):
"""
Marks event invitation as Accepted.
@param ids: list of attendee's IDs
"""
if context is None:
context = {}
meeting_obj = self.pool['calendar.event']
res = self.write(cr, uid, ids, {'state': 'accepted'}, context)
for attendee in self.browse(cr, uid, ids, context=context):
meeting_obj.message_post(cr, uid, attendee.event_id.id, body=_(("%s has accepted invitation") % (attendee.cn)),
subtype="calendar.subtype_invitation", context=context)
return res
def do_decline(self, cr, uid, ids, context=None, *args):
"""
Marks event invitation as Declined.
@param ids: list of calendar attendee's IDs
"""
if context is None:
context = {}
meeting_obj = self.pool['calendar.event']
res = self.write(cr, uid, ids, {'state': 'declined'}, context)
for attendee in self.browse(cr, uid, ids, context=context):
meeting_obj.message_post(cr, uid, attendee.event_id.id, body=_(("%s has declined invitation") % (attendee.cn)), subtype="calendar.subtype_invitation", context=context)
return res
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if not vals.get("email") and vals.get("cn"):
cnval = vals.get("cn").split(':')
email = filter(lambda x: x.__contains__('@'), cnval)
vals['email'] = email and email[0] or ''
vals['cn'] = vals.get("cn")
res = super(calendar_attendee, self).create(cr, uid, vals, context=context)
return res
class res_partner(osv.Model):
_inherit = 'res.partner'
_columns = {
'calendar_last_notif_ack': fields.datetime('Last notification marked as read from base Calendar'),
}
def get_attendee_detail(self, cr, uid, ids, meeting_id, context=None):
"""
Return a list of tuple (id, name, status)
Used by web_calendar.js : Many2ManyAttendee
"""
datas = []
meeting = None
if meeting_id:
meeting = self.pool['calendar.event'].browse(cr, uid, get_real_ids(meeting_id), context=context)
for partner in self.browse(cr, uid, ids, context=context):
data = self.name_get(cr, uid, [partner.id], context)[0]
if meeting:
for attendee in meeting.attendee_ids:
if attendee.partner_id.id == partner.id:
data = (data[0], data[1], attendee.state)
datas.append(data)
return datas
def _set_calendar_last_notif_ack(self, cr, uid, context=None):
partner = self.pool['res.users'].browse(cr, uid, uid, context=context).partner_id
self.write(cr, uid, partner.id, {'calendar_last_notif_ack': datetime.now()}, context=context)
return
class calendar_alarm_manager(osv.AbstractModel):
_name = 'calendar.alarm_manager'
def get_next_potential_limit_alarm(self, cr, uid, seconds, notif=True, mail=True, partner_id=None, context=None):
res = {}
base_request = """
SELECT
cal.id,
cal.start - interval '1' minute * calcul_delta.max_delta AS first_alarm,
CASE
WHEN cal.recurrency THEN cal.final_date - interval '1' minute * calcul_delta.min_delta
ELSE cal.stop - interval '1' minute * calcul_delta.min_delta
END as last_alarm,
cal.start as first_event_date,
CASE
WHEN cal.recurrency THEN cal.final_date
ELSE cal.stop
END as last_event_date,
calcul_delta.min_delta,
calcul_delta.max_delta,
cal.rrule AS rule
FROM
calendar_event AS cal
RIGHT JOIN
(
SELECT
rel.calendar_event_id, max(alarm.duration_minutes) AS max_delta,min(alarm.duration_minutes) AS min_delta
FROM
calendar_alarm_calendar_event_rel AS rel
LEFT JOIN calendar_alarm AS alarm ON alarm.id = rel.calendar_alarm_id
WHERE alarm.type in %s
GROUP BY rel.calendar_event_id
) AS calcul_delta ON calcul_delta.calendar_event_id = cal.id
"""
filter_user = """
RIGHT JOIN calendar_event_res_partner_rel AS part_rel ON part_rel.calendar_event_id = cal.id
AND part_rel.res_partner_id = %s
"""
#Add filter on type
type_to_read = ()
if notif:
type_to_read += ('notification',)
if mail:
type_to_read += ('email',)
tuple_params = (type_to_read,)
# ADD FILTER ON PARTNER_ID
if partner_id:
base_request += filter_user
tuple_params += (partner_id, )
#Add filter on hours
tuple_params += (seconds, seconds,)
cr.execute("""SELECT *
FROM ( %s WHERE cal.active = True ) AS ALL_EVENTS
WHERE ALL_EVENTS.first_alarm < (now() at time zone 'utc' + interval '%%s' second )
AND ALL_EVENTS.last_alarm > (now() at time zone 'utc' - interval '%%s' second )
""" % base_request, tuple_params)
for event_id, first_alarm, last_alarm, first_meeting, last_meeting, min_duration, max_duration, rule in cr.fetchall():
res[event_id] = {
'event_id': event_id,
'first_alarm': first_alarm,
'last_alarm': last_alarm,
'first_meeting': first_meeting,
'last_meeting': last_meeting,
'min_duration': min_duration,
'max_duration': max_duration,
'rrule': rule
}
return res
def do_check_alarm_for_one_date(self, cr, uid, one_date, event, event_maxdelta, in_the_next_X_seconds, after=False, notif=True, mail=True, context=None):
res = []
alarm_type = []
if notif:
alarm_type.append('notification')
if mail:
alarm_type.append('email')
if one_date - timedelta(minutes=event_maxdelta) < datetime.now() + timedelta(seconds=in_the_next_X_seconds): # if an alarm is possible for this date
for alarm in event.alarm_ids:
if alarm.type in alarm_type and \
one_date - timedelta(minutes=alarm.duration_minutes) < datetime.now() + timedelta(seconds=in_the_next_X_seconds) and \
(not after or one_date - timedelta(minutes=alarm.duration_minutes) > datetime.strptime(after.split('.')[0], DEFAULT_SERVER_DATETIME_FORMAT)):
alert = {
'alarm_id': alarm.id,
'event_id': event.id,
'notify_at': one_date - timedelta(minutes=alarm.duration_minutes),
}
res.append(alert)
return res
def get_next_mail(self, cr, uid, context=None):
cron = self.pool.get('ir.cron').search(cr, uid, [('model', 'ilike', self._name)], context=context)
if cron and len(cron) == 1:
cron = self.pool.get('ir.cron').browse(cr, uid, cron[0], context=context)
else:
_logger.exception("Cron for " + self._name + " can not be identified !")
if cron.interval_type == "weeks":
cron_interval = cron.interval_number * 7 * 24 * 60 * 60
elif cron.interval_type == "days":
cron_interval = cron.interval_number * 24 * 60 * 60
elif cron.interval_type == "hours":
cron_interval = cron.interval_number * 60 * 60
elif cron.interval_type == "minutes":
cron_interval = cron.interval_number * 60
elif cron.interval_type == "seconds":
cron_interval = cron.interval_number
if not cron_interval:
_logger.exception("Cron delay can not be computed !")
all_events = self.get_next_potential_limit_alarm(cr, uid, cron_interval, notif=False, context=context)
for event in all_events: # .values()
max_delta = all_events[event]['max_duration']
curEvent = self.pool.get('calendar.event').browse(cr, uid, event, context=context)
if curEvent.recurrency:
bFound = False
LastFound = False
for one_date in self.pool.get('calendar.event').get_recurrent_date_by_event(cr, uid, curEvent, context=context):
in_date_format = one_date.replace(tzinfo=None)
LastFound = self.do_check_alarm_for_one_date(cr, uid, in_date_format, curEvent, max_delta, cron_interval, notif=False, context=context)
if LastFound:
for alert in LastFound:
self.do_mail_reminder(cr, uid, alert, context=context)
if not bFound: # if it's the first alarm for this recurrent event
bFound = True
if bFound and not LastFound: # if the precedent event had an alarm but not this one, we can stop the search for this event
break
else:
in_date_format = datetime.strptime(curEvent.start, DEFAULT_SERVER_DATETIME_FORMAT)
LastFound = self.do_check_alarm_for_one_date(cr, uid, in_date_format, curEvent, max_delta, cron_interval, notif=False, context=context)
if LastFound:
for alert in LastFound:
self.do_mail_reminder(cr, uid, alert, context=context)
def get_next_notif(self, cr, uid, context=None):
ajax_check_every_seconds = 300
partner = self.pool.get('res.users').browse(cr, uid, uid, context=context).partner_id
all_notif = []
if not partner:
return []
all_events = self.get_next_potential_limit_alarm(cr, uid, ajax_check_every_seconds, partner_id=partner.id, mail=False, context=context)
for event in all_events: # .values()
max_delta = all_events[event]['max_duration']
curEvent = self.pool.get('calendar.event').browse(cr, uid, event, context=context)
if curEvent.recurrency:
bFound = False
LastFound = False
for one_date in self.pool.get("calendar.event").get_recurrent_date_by_event(cr, uid, curEvent, context=context):
in_date_format = one_date.replace(tzinfo=None)
LastFound = self.do_check_alarm_for_one_date(cr, uid, in_date_format, curEvent, max_delta, ajax_check_every_seconds, after=partner.calendar_last_notif_ack, mail=False, context=context)
if LastFound:
for alert in LastFound:
all_notif.append(self.do_notif_reminder(cr, uid, alert, context=context))
if not bFound: # if it's the first alarm for this recurrent event
bFound = True
if bFound and not LastFound: # if the precedent event had alarm but not this one, we can stop the search fot this event
break
else:
in_date_format = datetime.strptime(curEvent.start, DEFAULT_SERVER_DATETIME_FORMAT)
LastFound = self.do_check_alarm_for_one_date(cr, uid, in_date_format, curEvent, max_delta, ajax_check_every_seconds, partner.calendar_last_notif_ack, mail=False, context=context)
if LastFound:
for alert in LastFound:
all_notif.append(self.do_notif_reminder(cr, uid, alert, context=context))
return all_notif
def do_mail_reminder(self, cr, uid, alert, context=None):
if context is None:
context = {}
res = False
event = self.pool['calendar.event'].browse(cr, uid, alert['event_id'], context=context)
alarm = self.pool['calendar.alarm'].browse(cr, uid, alert['alarm_id'], context=context)
if alarm.type == 'email':
res = self.pool['calendar.attendee']._send_mail_to_attendees(cr, uid, [att.id for att in event.attendee_ids], template_xmlid='calendar_template_meeting_reminder', context=context)
return res
def do_notif_reminder(self, cr, uid, alert, context=None):
alarm = self.pool['calendar.alarm'].browse(cr, uid, alert['alarm_id'], context=context)
event = self.pool['calendar.event'].browse(cr, uid, alert['event_id'], context=context)
if alarm.type == 'notification':
message = event.display_time
delta = alert['notify_at'] - datetime.now()
delta = delta.seconds + delta.days * 3600 * 24
return {
'event_id': event.id,
'title': event.name,
'message': message,
'timer': delta,
'notify_at': alert['notify_at'].strftime(DEFAULT_SERVER_DATETIME_FORMAT),
}
class calendar_alarm(osv.Model):
_name = 'calendar.alarm'
_description = 'Event alarm'
def _get_duration(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for alarm in self.browse(cr, uid, ids, context=context):
if alarm.interval == "minutes":
res[alarm.id] = alarm.duration
elif alarm.interval == "hours":
res[alarm.id] = alarm.duration * 60
elif alarm.interval == "days":
res[alarm.id] = alarm.duration * 60 * 24
else:
res[alarm.id] = 0
return res
_columns = {
'name': fields.char('Name', required=True),
'type': fields.selection([('notification', 'Notification'), ('email', 'Email')], 'Type', required=True),
'duration': fields.integer('Amount', required=True),
'interval': fields.selection([('minutes', 'Minutes'), ('hours', 'Hours'), ('days', 'Days')], 'Unit', required=True),
'duration_minutes': fields.function(_get_duration, type='integer', string='duration_minutes', store=True),
}
_defaults = {
'type': 'notification',
'duration': 1,
'interval': 'hours',
}
class ir_values(osv.Model):
_inherit = 'ir.values'
def set(self, cr, uid, key, key2, name, models, value, replace=True, isobject=False, meta=False, preserve_user=False, company=False):
new_model = []
for data in models:
if type(data) in (list, tuple):
new_model.append((data[0], calendar_id2real_id(data[1])))
else:
new_model.append(data)
return super(ir_values, self).set(cr, uid, key, key2, name, new_model,
value, replace, isobject, meta, preserve_user, company)
def get(self, cr, uid, key, key2, models, meta=False, context=None, res_id_req=False, without_user=True, key2_req=True):
if context is None:
context = {}
new_model = []
for data in models:
if type(data) in (list, tuple):
new_model.append((data[0], calendar_id2real_id(data[1])))
else:
new_model.append(data)
return super(ir_values, self).get(cr, uid, key, key2, new_model,
meta, context, res_id_req, without_user, key2_req)
class ir_model(osv.Model):
_inherit = 'ir.model'
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
new_ids = isinstance(ids, (str, int, long)) and [ids] or ids
if context is None:
context = {}
data = super(ir_model, self).read(cr, uid, new_ids, fields=fields, context=context, load=load)
if data:
for val in data:
val['id'] = calendar_id2real_id(val['id'])
return isinstance(ids, (str, int, long)) and data[0] or data
original_exp_report = openerp.service.report.exp_report
def exp_report(db, uid, object, ids, data=None, context=None):
"""
Export Report
"""
if object == 'printscreen.list':
original_exp_report(db, uid, object, ids, data, context)
new_ids = []
for id in ids:
new_ids.append(calendar_id2real_id(id))
if data.get('id', False):
data['id'] = calendar_id2real_id(data['id'])
return original_exp_report(db, uid, object, new_ids, data, context)
openerp.service.report.exp_report = exp_report
class calendar_event_type(osv.Model):
_name = 'calendar.event.type'
_description = 'Meeting Type'
_columns = {
'name': fields.char('Name', required=True, translate=True),
}
class calendar_event(osv.Model):
""" Model for Calendar Event """
_name = 'calendar.event'
_description = "Event"
_order = "id desc"
_inherit = ["mail.thread", "ir.needaction_mixin"]
def do_run_scheduler(self, cr, uid, id, context=None):
self.pool['calendar.alarm_manager'].get_next_mail(cr, uid, context=context)
def get_recurrent_date_by_event(self, cr, uid, event, context=None):
"""Get recurrent dates based on Rule string and all event where recurrent_id is child
"""
def todate(date):
val = parser.parse(''.join((re.compile('\d')).findall(date)))
## Dates are localized to saved timezone if any, else current timezone.
if not val.tzinfo:
val = pytz.UTC.localize(val)
return val.astimezone(timezone)
timezone = pytz.timezone(context.get('tz') or 'UTC')
startdate = pytz.UTC.localize(datetime.strptime(event.start, DEFAULT_SERVER_DATETIME_FORMAT)) # Add "+hh:mm" timezone
if not startdate:
startdate = datetime.now()
## Convert the start date to saved timezone (or context tz) as it'll
## define the correct hour/day asked by the user to repeat for recurrence.
startdate = startdate.astimezone(timezone) # transform "+hh:mm" timezone
rset1 = rrule.rrulestr(str(event.rrule), dtstart=startdate, forceset=True)
ids_depending = self.search(cr, uid, [('recurrent_id', '=', event.id), '|', ('active', '=', False), ('active', '=', True)], context=context)
all_events = self.browse(cr, uid, ids_depending, context=context)
for ev in all_events:
rset1._exdate.append(todate(ev.recurrent_id_date))
return [d.astimezone(pytz.UTC) for d in rset1]
def _get_recurrency_end_date(self, cr, uid, id, context=None):
data = self.read(cr, uid, id, ['final_date', 'recurrency', 'rrule_type', 'count', 'end_type', 'stop'], context=context)
if not data.get('recurrency'):
return False
end_type = data.get('end_type')
final_date = data.get('final_date')
if end_type == 'count' and all(data.get(key) for key in ['count', 'rrule_type', 'stop']):
count = data['count'] + 1
delay, mult = {
'daily': ('days', 1),
'weekly': ('days', 7),
'monthly': ('months', 1),
'yearly': ('years', 1),
}[data['rrule_type']]
deadline = datetime.strptime(data['stop'], tools.DEFAULT_SERVER_DATETIME_FORMAT)
return deadline + relativedelta(**{delay: count * mult})
return final_date
def _find_my_attendee(self, cr, uid, meeting_ids, context=None):
"""
Return the first attendee where the user connected has been invited from all the meeting_ids in parameters
"""
user = self.pool['res.users'].browse(cr, uid, uid, context=context)
for meeting_id in meeting_ids:
for attendee in self.browse(cr, uid, meeting_id, context).attendee_ids:
if user.partner_id.id == attendee.partner_id.id:
return attendee
return False
def get_date_formats(self, cr, uid, context):
lang = context.get("lang")
res_lang = self.pool.get('res.lang')
lang_params = {}
if lang:
ids = res_lang.search(request.cr, uid, [("code", "=", lang)])
if ids:
lang_params = res_lang.read(request.cr, uid, ids[0], ["date_format", "time_format"])
format_date = lang_params.get("date_format", '%B-%d-%Y')
format_time = lang_params.get("time_format", '%I-%M %p')
return (format_date, format_time)
def get_display_time_tz(self, cr, uid, ids, tz=False, context=None):
context = dict(context or {})
if tz:
context["tz"] = tz
ev = self.browse(cr, uid, ids, context=context)[0]
return self._get_display_time(cr, uid, ev.start, ev.stop, ev.duration, ev.allday, context=context)
def _get_display_time(self, cr, uid, start, stop, zduration, zallday, context=None):
"""
Return date and time (from to from) based on duration with timezone in string :
eg.
1) if user add duration for 2 hours, return : August-23-2013 at (04-30 To 06-30) (Europe/Brussels)
2) if event all day ,return : AllDay, July-31-2013
"""
context = dict(context or {})
tz = context.get('tz', False)
if not tz: # tz can have a value False, so dont do it in the default value of get !
context['tz'] = self.pool.get('res.users').read(cr, SUPERUSER_ID, uid, ['tz'])['tz']
tz = context['tz']
format_date, format_time = self.get_date_formats(cr, uid, context=context)
date = fields.datetime.context_timestamp(cr, uid, datetime.strptime(start, tools.DEFAULT_SERVER_DATETIME_FORMAT), context=context)
date_deadline = fields.datetime.context_timestamp(cr, uid, datetime.strptime(stop, tools.DEFAULT_SERVER_DATETIME_FORMAT), context=context)
event_date = date.strftime(format_date)
display_time = date.strftime(format_time)
if zallday:
time = _("AllDay , %s") % (event_date)
elif zduration < 24:
duration = date + timedelta(hours=zduration)
time = ("%s at (%s To %s) (%s)") % (event_date, display_time, duration.strftime(format_time), tz)
else:
time = ("%s at %s To\n %s at %s (%s)") % (event_date, display_time, date_deadline.strftime(format_date), date_deadline.strftime(format_time), tz)
return time
def _compute(self, cr, uid, ids, fields, arg, context=None):
res = {}
for meeting_id in ids:
res[meeting_id] = {}
attendee = self._find_my_attendee(cr, uid, [meeting_id], context)
meeting = self.browse(cr, uid, [meeting_id], context=context)[0]
for field in fields:
if field == 'is_attendee':
res[meeting_id][field] = True if attendee else False
elif field == 'attendee_status':
res[meeting_id][field] = attendee.state if attendee else 'needsAction'
elif field == 'display_time':
res[meeting_id][field] = self._get_display_time(cr, uid, meeting.start, meeting.stop, meeting.duration, meeting.allday, context=context)
elif field == "display_start":
res[meeting_id][field] = meeting.start_date if meeting.allday else meeting.start_datetime
elif field == 'start':
res[meeting_id][field] = meeting.start_date if meeting.allday else meeting.start_datetime
elif field == 'stop':
res[meeting_id][field] = meeting.stop_date if meeting.allday else meeting.stop_datetime
return res
def _get_rulestring(self, cr, uid, ids, name, arg, context=None):
"""
Gets Recurrence rule string according to value type RECUR of iCalendar from the values given.
@return: dictionary of rrule value.
"""
result = {}
if not isinstance(ids, list):
ids = [ids]
for id in ids:
#read these fields as SUPERUSER because if the record is private a normal search could return False and raise an error
data = self.browse(cr, SUPERUSER_ID, id, context=context)
if data.interval and data.interval < 0:
raise osv.except_osv(_('Warning!'), _('Interval cannot be negative.'))
if data.count and data.count <= 0:
raise osv.except_osv(_('Warning!'), _('Count cannot be negative or 0.'))
data = self.read(cr, uid, id, ['id', 'byday', 'recurrency', 'final_date', 'rrule_type', 'month_by', 'interval', 'count', 'end_type', 'mo', 'tu', 'we', 'th', 'fr', 'sa', 'su', 'day', 'week_list'], context=context)
event = data['id']
if data['recurrency']:
result[event] = self.compute_rule_string(data)
else:
result[event] = ""
return result
# retro compatibility function
def _rrule_write(self, cr, uid, ids, field_name, field_value, args, context=None):
return self._set_rulestring(self, cr, uid, ids, field_name, field_value, args, context=context)
def _set_rulestring(self, cr, uid, ids, field_name, field_value, args, context=None):
if not isinstance(ids, list):
ids = [ids]
data = self._get_empty_rrule_data()
if field_value:
data['recurrency'] = True
for event in self.browse(cr, uid, ids, context=context):
rdate = event.start
update_data = self._parse_rrule(field_value, dict(data), rdate)
data.update(update_data)
self.write(cr, uid, ids, data, context=context)
return True
def _set_date(self, cr, uid, values, id=False, context=None):
if context is None:
context = {}
if values.get('start_datetime') or values.get('start_date') or values.get('start') \
or values.get('stop_datetime') or values.get('stop_date') or values.get('stop'):
allday = values.get("allday", None)
if allday is None:
if id:
allday = self.read(cr, uid, [id], ['allday'], context=context)[0].get('allday')
else:
allday = False
_logger.warning("Calendar - All day is not specified, arbitrarily set to False")
#raise osv.except_osv(_('Error!'), ("Need to know if it's an allday or not..."))
key = "date" if allday else "datetime"
notkey = "datetime" if allday else "date"
for fld in ('start', 'stop'):
if values.get('%s_%s' % (fld, key)) or values.get(fld):
values['%s_%s' % (fld, key)] = values.get('%s_%s' % (fld, key)) or values.get(fld)
values['%s_%s' % (fld, notkey)] = None
if fld not in values.keys():
values[fld] = values['%s_%s' % (fld, key)]
diff = False
if allday and values.get('stop_date') and values.get('start_date'):
diff = datetime.strptime(values['stop_date'].split(' ')[0], DEFAULT_SERVER_DATE_FORMAT) - datetime.strptime(values['start_date'].split(' ')[0], DEFAULT_SERVER_DATE_FORMAT)
elif values.get('stop_datetime') and values.get('start_datetime'):
diff = datetime.strptime(values['stop_datetime'].split('.')[0], DEFAULT_SERVER_DATETIME_FORMAT) - datetime.strptime(values['start_datetime'].split('.')[0], DEFAULT_SERVER_DATETIME_FORMAT)
if diff:
duration = float(diff.days) * 24 + (float(diff.seconds) / 3600)
values['duration'] = round(duration, 2)
_track = {
'location': {
'calendar.subtype_invitation': lambda self, cr, uid, obj, ctx=None: True,
},
'start': {
'calendar.subtype_invitation': lambda self, cr, uid, obj, ctx=None: True,
},
}
_columns = {
'id': fields.integer('ID', readonly=True),
'state': fields.selection([('draft', 'Unconfirmed'), ('open', 'Confirmed')], string='Status', readonly=True, track_visibility='onchange'),
'name': fields.char('Meeting Subject', required=True, states={'done': [('readonly', True)]}),
'is_attendee': fields.function(_compute, string='Attendee', type="boolean", multi='attendee'),
'attendee_status': fields.function(_compute, string='Attendee Status', type="selection", selection=calendar_attendee.STATE_SELECTION, multi='attendee'),
'display_time': fields.function(_compute, string='Event Time', type="char", multi='attendee'),
'display_start': fields.function(_compute, string='Date', type="char", multi='display_start', store=True),
'allday': fields.boolean('All Day', states={'done': [('readonly', True)]}),
'start': fields.function(_compute, string='Calculated start', type="datetime", multi='start', store=True, required=True),
'stop': fields.function(_compute, string='Calculated stop', type="datetime", multi='stop', store=True, required=True),
'start_date': fields.date('Start Date', states={'done': [('readonly', True)]}, track_visibility='onchange'),
'start_datetime': fields.datetime('Start DateTime', states={'done': [('readonly', True)]}, track_visibility='onchange'),
'stop_date': fields.date('End Date', states={'done': [('readonly', True)]}, track_visibility='onchange'),
'stop_datetime': fields.datetime('End Datetime', states={'done': [('readonly', True)]}, track_visibility='onchange'), # old date_deadline
'duration': fields.float('Duration', states={'done': [('readonly', True)]}),
'description': fields.text('Description', states={'done': [('readonly', True)]}),
'class': fields.selection([('public', 'Public'), ('private', 'Private'), ('confidential', 'Public for Employees')], 'Privacy', states={'done': [('readonly', True)]}),
'location': fields.char('Location', help="Location of Event", track_visibility='onchange', states={'done': [('readonly', True)]}),
'show_as': fields.selection([('free', 'Free'), ('busy', 'Busy')], 'Show Time as', states={'done': [('readonly', True)]}),
# RECURRENCE FIELD
'rrule': fields.function(_get_rulestring, type='char', fnct_inv=_set_rulestring, store=True, string='Recurrent Rule'),
'rrule_type': fields.selection([('daily', 'Day(s)'), ('weekly', 'Week(s)'), ('monthly', 'Month(s)'), ('yearly', 'Year(s)')], 'Recurrency', states={'done': [('readonly', True)]}, help="Let the event automatically repeat at that interval"),
'recurrency': fields.boolean('Recurrent', help="Recurrent Meeting"),
'recurrent_id': fields.integer('Recurrent ID'),
'recurrent_id_date': fields.datetime('Recurrent ID date'),
'end_type': fields.selection([('count', 'Number of repetitions'), ('end_date', 'End date')], 'Recurrence Termination'),
'interval': fields.integer('Repeat Every', help="Repeat every (Days/Week/Month/Year)"),
'count': fields.integer('Repeat', help="Repeat x times"),
'mo': fields.boolean('Mon'),
'tu': fields.boolean('Tue'),
'we': fields.boolean('Wed'),
'th': fields.boolean('Thu'),
'fr': fields.boolean('Fri'),
'sa': fields.boolean('Sat'),
'su': fields.boolean('Sun'),
'month_by': fields.selection([('date', 'Date of month'), ('day', 'Day of month')], 'Option', oldname='select1'),
'day': fields.integer('Date of month'),
'week_list': fields.selection([('MO', 'Monday'), ('TU', 'Tuesday'), ('WE', 'Wednesday'), ('TH', 'Thursday'), ('FR', 'Friday'), ('SA', 'Saturday'), ('SU', 'Sunday')], 'Weekday'),
'byday': fields.selection([('1', 'First'), ('2', 'Second'), ('3', 'Third'), ('4', 'Fourth'), ('5', 'Fifth'), ('-1', 'Last')], 'By day'),
'final_date': fields.date('Repeat Until'), # The last event of a recurrence
'user_id': fields.many2one('res.users', 'Responsible', states={'done': [('readonly', True)]}),
'color_partner_id': fields.related('user_id', 'partner_id', 'id', type="integer", string="colorize", store=False), # Color of creator
'active': fields.boolean('Active', help="If the active field is set to true, it will allow you to hide the event alarm information without removing it."),
'categ_ids': fields.many2many('calendar.event.type', 'meeting_category_rel', 'event_id', 'type_id', 'Tags'),
'attendee_ids': fields.one2many('calendar.attendee', 'event_id', 'Attendees', ondelete='cascade'),
'partner_ids': fields.many2many('res.partner', 'calendar_event_res_partner_rel', string='Attendees', states={'done': [('readonly', True)]}),
'alarm_ids': fields.many2many('calendar.alarm', 'calendar_alarm_calendar_event_rel', string='Reminders', ondelete="restrict", copy=False),
}
_defaults = {
'end_type': 'count',
'count': 1,
'rrule_type': False,
'allday': False,
'state': 'draft',
'class': 'public',
'show_as': 'busy',
'month_by': 'date',
'interval': 1,
'active': 1,
'user_id': lambda self, cr, uid, ctx: uid,
'partner_ids': lambda self, cr, uid, ctx: [self.pool['res.users'].browse(cr, uid, [uid], context=ctx)[0].partner_id.id]
}
def _check_closing_date(self, cr, uid, ids, context=None):
for event in self.browse(cr, uid, ids, context=context):
if event.stop < event.start:
return False
return True
_constraints = [
(_check_closing_date, 'Error ! End date cannot be set before start date.', ['start', 'stop'])
]
def onchange_allday(self, cr, uid, ids, start=False, end=False, starttime=False, endtime=False, startdatetime=False, enddatetime=False, checkallday=False, context=None):
value = {}
if not ((starttime and endtime) or (start and end)): # At first intialize, we have not datetime
return value
if checkallday: # from datetime to date
startdatetime = startdatetime or start
if startdatetime:
start = datetime.strptime(startdatetime, DEFAULT_SERVER_DATETIME_FORMAT)
value['start_date'] = datetime.strftime(start, DEFAULT_SERVER_DATE_FORMAT)
enddatetime = enddatetime or end
if enddatetime:
end = datetime.strptime(enddatetime, DEFAULT_SERVER_DATETIME_FORMAT)
value['stop_date'] = datetime.strftime(end, DEFAULT_SERVER_DATE_FORMAT)
else: # from date to datetime
user = self.pool['res.users'].browse(cr, uid, uid, context)
tz = pytz.timezone(user.tz) if user.tz else pytz.utc
if starttime:
start = datetime.strptime(starttime.split(' ')[0], DEFAULT_SERVER_DATE_FORMAT)
startdate = tz.localize(start) # Add "+hh:mm" timezone
startdate = startdate.replace(hour=8) # Set 8 AM in localtime
startdate = startdate.astimezone(pytz.utc) # Convert to UTC
value['start_datetime'] = datetime.strftime(startdate, DEFAULT_SERVER_DATETIME_FORMAT)
elif start:
value['start_datetime'] = start
if endtime:
end = datetime.strptime(endtime.split(' ')[0], DEFAULT_SERVER_DATE_FORMAT)
enddate = tz.localize(end).replace(hour=18).astimezone(pytz.utc)
value['stop_datetime'] = datetime.strftime(enddate, DEFAULT_SERVER_DATETIME_FORMAT)
elif end:
value['stop_datetime'] = end
return {'value': value}
def onchange_dates(self, cr, uid, ids, fromtype, start=False, end=False, checkallday=False, allday=False, context=None):
"""Returns duration and end date based on values passed
@param ids: List of calendar event's IDs.
"""
value = {}
if checkallday != allday:
return value
value['allday'] = checkallday # Force to be rewrited
if allday:
if fromtype == 'start':
start = datetime.strptime(start, DEFAULT_SERVER_DATE_FORMAT)
value['start_datetime'] = datetime.strftime(start, DEFAULT_SERVER_DATETIME_FORMAT)
value['start'] = datetime.strftime(start, DEFAULT_SERVER_DATETIME_FORMAT)
if fromtype == 'stop':
end = datetime.strptime(end, DEFAULT_SERVER_DATE_FORMAT)
value['stop_datetime'] = datetime.strftime(end, DEFAULT_SERVER_DATETIME_FORMAT)
value['stop'] = datetime.strftime(end, DEFAULT_SERVER_DATETIME_FORMAT)
else:
if fromtype == 'start':
start = datetime.strptime(start, DEFAULT_SERVER_DATETIME_FORMAT)
value['start_date'] = datetime.strftime(start, DEFAULT_SERVER_DATE_FORMAT)
value['start'] = datetime.strftime(start, DEFAULT_SERVER_DATETIME_FORMAT)
if fromtype == 'stop':
end = datetime.strptime(end, DEFAULT_SERVER_DATETIME_FORMAT)
value['stop_date'] = datetime.strftime(end, DEFAULT_SERVER_DATE_FORMAT)
value['stop'] = datetime.strftime(end, DEFAULT_SERVER_DATETIME_FORMAT)
return {'value': value}
def new_invitation_token(self, cr, uid, record, partner_id):
return uuid.uuid4().hex
def create_attendees(self, cr, uid, ids, context):
user_obj = self.pool['res.users']
current_user = user_obj.browse(cr, uid, uid, context=context)
res = {}
for event in self.browse(cr, uid, ids, context):
attendees = {}
for att in event.attendee_ids:
attendees[att.partner_id.id] = True
new_attendees = []
new_att_partner_ids = []
for partner in event.partner_ids:
if partner.id in attendees:
continue
access_token = self.new_invitation_token(cr, uid, event, partner.id)
values = {
'partner_id': partner.id,
'event_id': event.id,
'access_token': access_token,
'email': partner.email,
}
if partner.id == current_user.partner_id.id:
values['state'] = 'accepted'
att_id = self.pool['calendar.attendee'].create(cr, uid, values, context=context)
new_attendees.append(att_id)
new_att_partner_ids.append(partner.id)
if not current_user.email or current_user.email != partner.email:
mail_from = current_user.email or tools.config.get('email_from', False)
if self.pool['calendar.attendee']._send_mail_to_attendees(cr, uid, att_id, email_from=mail_from, context=context):
self.message_post(cr, uid, event.id, body=_("An invitation email has been sent to attendee %s") % (partner.name,), subtype="calendar.subtype_invitation", context=context)
if new_attendees:
self.write(cr, uid, [event.id], {'attendee_ids': [(4, att) for att in new_attendees]}, context=context)
if new_att_partner_ids:
self.message_subscribe(cr, uid, [event.id], new_att_partner_ids, context=context)
# We remove old attendees who are not in partner_ids now.
all_partner_ids = [part.id for part in event.partner_ids]
all_part_attendee_ids = [att.partner_id.id for att in event.attendee_ids]
all_attendee_ids = [att.id for att in event.attendee_ids]
partner_ids_to_remove = map(lambda x: x, set(all_part_attendee_ids + new_att_partner_ids) - set(all_partner_ids))
attendee_ids_to_remove = []
if partner_ids_to_remove:
attendee_ids_to_remove = self.pool["calendar.attendee"].search(cr, uid, [('partner_id.id', 'in', partner_ids_to_remove), ('event_id.id', '=', event.id)], context=context)
if attendee_ids_to_remove:
self.pool['calendar.attendee'].unlink(cr, uid, attendee_ids_to_remove, context)
res[event.id] = {
'new_attendee_ids': new_attendees,
'old_attendee_ids': all_attendee_ids,
'removed_attendee_ids': attendee_ids_to_remove
}
return res
def get_search_fields(self, browse_event, order_fields, r_date=None):
sort_fields = {}
for ord in order_fields:
if ord == 'id' and r_date:
sort_fields[ord] = '%s-%s' % (browse_event[ord], r_date.strftime("%Y%m%d%H%M%S"))
else:
sort_fields[ord] = browse_event[ord]
if type(browse_event[ord]) is openerp.osv.orm.browse_record:
name_get = browse_event[ord].name_get()
if len(name_get) and len(name_get[0]) >= 2:
sort_fields[ord] = name_get[0][1]
return sort_fields
def get_recurrent_ids(self, cr, uid, event_id, domain, order=None, context=None):
"""Gives virtual event ids for recurring events
This method gives ids of dates that comes between start date and end date of calendar views
@param order: The fields (comma separated, format "FIELD {DESC|ASC}") on which the events should be sorted
"""
if not context:
context = {}
if isinstance(event_id, (str, int, long)):
ids_to_browse = [event_id] # keep select for return
else:
ids_to_browse = event_id
if order:
order_fields = [field.split()[0] for field in order.split(',')]
else:
# fallback on self._order defined on the model
order_fields = [field.split()[0] for field in self._order.split(',')]
if 'id' not in order_fields:
order_fields.append('id')
result_data = []
result = []
for ev in self.browse(cr, uid, ids_to_browse, context=context):
if not ev.recurrency or not ev.rrule:
result.append(ev.id)
result_data.append(self.get_search_fields(ev, order_fields))
continue
rdates = self.get_recurrent_date_by_event(cr, uid, ev, context=context)
for r_date in rdates:
# fix domain evaluation
# step 1: check date and replace expression by True or False, replace other expressions by True
# step 2: evaluation of & and |
# check if there are one False
pile = []
ok = True
for arg in domain:
if str(arg[0]) in ('start', 'stop', 'final_date'):
if (arg[1] == '='):
ok = r_date.strftime('%Y-%m-%d') == arg[2]
if (arg[1] == '>'):
ok = r_date.strftime('%Y-%m-%d') > arg[2]
if (arg[1] == '<'):
ok = r_date.strftime('%Y-%m-%d') < arg[2]
if (arg[1] == '>='):
ok = r_date.strftime('%Y-%m-%d') >= arg[2]
if (arg[1] == '<='):
ok = r_date.strftime('%Y-%m-%d') <= arg[2]
pile.append(ok)
elif str(arg) == str('&') or str(arg) == str('|'):
pile.append(arg)
else:
pile.append(True)
pile.reverse()
new_pile = []
for item in pile:
if not isinstance(item, basestring):
res = item
elif str(item) == str('&'):
first = new_pile.pop()
second = new_pile.pop()
res = first and second
elif str(item) == str('|'):
first = new_pile.pop()
second = new_pile.pop()
res = first or second
new_pile.append(res)
if [True for item in new_pile if not item]:
continue
result_data.append(self.get_search_fields(ev, order_fields, r_date=r_date))
if order_fields:
def comparer(left, right):
for fn, mult in comparers:
result = cmp(fn(left), fn(right))
if result:
return mult * result
return 0
sort_params = [key.split()[0] if key[-4:].lower() != 'desc' else '-%s' % key.split()[0] for key in (order or self._order).split(',')]
comparers = [((itemgetter(col[1:]), -1) if col[0] == '-' else (itemgetter(col), 1)) for col in sort_params]
ids = [r['id'] for r in sorted(result_data, cmp=comparer)]
if isinstance(event_id, (str, int, long)):
return ids and ids[0] or False
else:
return ids
def compute_rule_string(self, data):
"""
Compute rule string according to value type RECUR of iCalendar from the values given.
@param self: the object pointer
@param data: dictionary of freq and interval value
@return: string containing recurring rule (empty if no rule)
"""
def get_week_string(freq, data):
weekdays = ['mo', 'tu', 'we', 'th', 'fr', 'sa', 'su']
if freq == 'weekly':
byday = map(lambda x: x.upper(), filter(lambda x: data.get(x) and x in weekdays, data))
if byday:
return ';BYDAY=' + ','.join(byday)
return ''
def get_month_string(freq, data):
if freq == 'monthly':
if data.get('month_by') == 'date' and (data.get('day') < 1 or data.get('day') > 31):
raise osv.except_osv(_('Error!'), ("Please select a proper day of the month."))
if data.get('month_by') == 'day': # Eg : Second Monday of the month
return ';BYDAY=' + data.get('byday') + data.get('week_list')
elif data.get('month_by') == 'date': # Eg : 16th of the month
return ';BYMONTHDAY=' + str(data.get('day'))
return ''
def get_end_date(data):
if data.get('final_date'):
data['end_date_new'] = ''.join((re.compile('\d')).findall(data.get('final_date'))) + 'T235959Z'
return (data.get('end_type') == 'count' and (';COUNT=' + str(data.get('count'))) or '') +\
((data.get('end_date_new') and data.get('end_type') == 'end_date' and (';UNTIL=' + data.get('end_date_new'))) or '')
freq = data.get('rrule_type', False) # day/week/month/year
res = ''
if freq:
interval_srting = data.get('interval') and (';INTERVAL=' + str(data.get('interval'))) or ''
res = 'FREQ=' + freq.upper() + get_week_string(freq, data) + interval_srting + get_end_date(data) + get_month_string(freq, data)
return res
def _get_empty_rrule_data(self):
return {
'byday': False,
'recurrency': False,
'final_date': False,
'rrule_type': False,
'month_by': False,
'interval': 0,
'count': False,
'end_type': False,
'mo': False,
'tu': False,
'we': False,
'th': False,
'fr': False,
'sa': False,
'su': False,
'day': False,
'week_list': False
}
def _parse_rrule(self, rule, data, date_start):
day_list = ['mo', 'tu', 'we', 'th', 'fr', 'sa', 'su']
rrule_type = ['yearly', 'monthly', 'weekly', 'daily']
r = rrule.rrulestr(rule, dtstart=datetime.strptime(date_start, DEFAULT_SERVER_DATETIME_FORMAT))
if r._freq > 0 and r._freq < 4:
data['rrule_type'] = rrule_type[r._freq]
data['count'] = r._count
data['interval'] = r._interval
data['final_date'] = r._until and r._until.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
#repeat weekly
if r._byweekday:
for i in xrange(0, 7):
if i in r._byweekday:
data[day_list[i]] = True
data['rrule_type'] = 'weekly'
#repeat monthly by nweekday ((weekday, weeknumber), )
if r._bynweekday:
data['week_list'] = day_list[r._bynweekday[0][0]].upper()
data['byday'] = str(r._bynweekday[0][1])
data['month_by'] = 'day'
data['rrule_type'] = 'monthly'
if r._bymonthday:
data['day'] = r._bymonthday[0]
data['month_by'] = 'date'
data['rrule_type'] = 'monthly'
#repeat yearly but for openerp it's monthly, take same information as monthly but interval is 12 times
if r._bymonth:
data['interval'] = data['interval'] * 12
#FIXEME handle forever case
#end of recurrence
#in case of repeat for ever that we do not support right now
if not (data.get('count') or data.get('final_date')):
data['count'] = 100
if data.get('count'):
data['end_type'] = 'count'
else:
data['end_type'] = 'end_date'
return data
def message_get_subscription_data(self, cr, uid, ids, user_pid=None, context=None):
res = {}
for virtual_id in ids:
real_id = calendar_id2real_id(virtual_id)
result = super(calendar_event, self).message_get_subscription_data(cr, uid, [real_id], user_pid=None, context=context)
res[virtual_id] = result[real_id]
return res
def onchange_partner_ids(self, cr, uid, ids, value, context=None):
""" The basic purpose of this method is to check that destination partners
effectively have email addresses. Otherwise a warning is thrown.
:param value: value format: [[6, 0, [3, 4]]]
"""
res = {'value': {}}
if not value or not value[0] or not value[0][0] == 6:
return
res.update(self.check_partners_email(cr, uid, value[0][2], context=context))
return res
def check_partners_email(self, cr, uid, partner_ids, context=None):
""" Verify that selected partner_ids have an email_address defined.
Otherwise throw a warning. """
partner_wo_email_lst = []
for partner in self.pool['res.partner'].browse(cr, uid, partner_ids, context=context):
if not partner.email:
partner_wo_email_lst.append(partner)
if not partner_wo_email_lst:
return {}
warning_msg = _('The following contacts have no email address :')
for partner in partner_wo_email_lst:
warning_msg += '\n- %s' % (partner.name)
return {'warning': {
'title': _('Email addresses not found'),
'message': warning_msg,
}}
# shows events of the day for this user
def _needaction_domain_get(self, cr, uid, context=None):
return [
('stop', '<=', time.strftime(DEFAULT_SERVER_DATE_FORMAT + ' 23:59:59')),
('start', '>=', time.strftime(DEFAULT_SERVER_DATE_FORMAT + ' 00:00:00')),
('user_id', '=', uid),
]
def message_post(self, cr, uid, thread_id, body='', subject=None, type='notification', subtype=None, parent_id=False, attachments=None, context=None, **kwargs):
if isinstance(thread_id, str):
thread_id = get_real_ids(thread_id)
if context.get('default_date'):
del context['default_date']
return super(calendar_event, self).message_post(cr, uid, thread_id, body=body, subject=subject, type=type, subtype=subtype, parent_id=parent_id, attachments=attachments, context=context, **kwargs)
def do_sendmail(self, cr, uid, ids, context=None):
for event in self.browse(cr, uid, ids, context):
current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)
if current_user.email:
if self.pool['calendar.attendee']._send_mail_to_attendees(cr, uid, [att.id for att in event.attendee_ids], email_from=current_user.email, context=context):
self.message_post(cr, uid, event.id, body=_("An invitation email has been sent to attendee(s)"), subtype="calendar.subtype_invitation", context=context)
return
def get_attendee(self, cr, uid, meeting_id, context=None):
# Used for view in controller
invitation = {'meeting': {}, 'attendee': []}
meeting = self.browse(cr, uid, int(meeting_id), context=context)
invitation['meeting'] = {
'event': meeting.name,
'where': meeting.location,
'when': meeting.display_time
}
for attendee in meeting.attendee_ids:
invitation['attendee'].append({'name': attendee.cn, 'status': attendee.state})
return invitation
def get_interval(self, cr, uid, ids, date, interval, tz=None, context=None):
#Function used only in calendar_event_data.xml for email template
date = datetime.strptime(date.split('.')[0], DEFAULT_SERVER_DATETIME_FORMAT)
if tz:
timezone = pytz.timezone(tz or 'UTC')
date = date.replace(tzinfo=pytz.timezone('UTC')).astimezone(timezone)
if interval == 'day':
res = str(date.day)
elif interval == 'month':
res = date.strftime('%B') + " " + str(date.year)
elif interval == 'dayname':
res = date.strftime('%A')
elif interval == 'time':
dummy, format_time = self.get_date_formats(cr, uid, context=context)
res = date.strftime(format_time + " %Z")
return res
def search(self, cr, uid, args, offset=0, limit=0, order=None, context=None, count=False):
if context is None:
context = {}
if context.get('mymeetings', False):
partner_id = self.pool['res.users'].browse(cr, uid, uid, context).partner_id.id
args += [('partner_ids', 'in', [partner_id])]
new_args = []
for arg in args:
new_arg = arg
if arg[0] in ('start_date', 'start_datetime', 'start',) and arg[1] == ">=":
if context.get('virtual_id', True):
new_args += ['|', '&', ('recurrency', '=', 1), ('final_date', arg[1], arg[2])]
elif arg[0] == "id":
new_id = get_real_ids(arg[2])
new_arg = (arg[0], arg[1], new_id)
new_args.append(new_arg)
if not context.get('virtual_id', True):
return super(calendar_event, self).search(cr, uid, new_args, offset=offset, limit=limit, order=order, count=count, context=context)
# offset, limit, order and count must be treated separately as we may need to deal with virtual ids
res = super(calendar_event, self).search(cr, uid, new_args, offset=0, limit=0, order=None, context=context, count=False)
res = self.get_recurrent_ids(cr, uid, res, args, order=order, context=context)
if count:
return len(res)
elif limit:
return res[offset: offset + limit]
return res
def copy(self, cr, uid, id, default=None, context=None):
default = default or {}
self._set_date(cr, uid, default, id=default.get('id'), context=context)
return super(calendar_event, self).copy(cr, uid, calendar_id2real_id(id), default, context)
def _detach_one_event(self, cr, uid, id, values=dict(), context=None):
real_event_id = calendar_id2real_id(id)
data = self.read(cr, uid, id, ['allday', 'start', 'stop', 'rrule', 'duration'])
data['start_date' if data['allday'] else 'start_datetime'] = data['start']
data['stop_date' if data['allday'] else 'stop_datetime'] = data['stop']
if data.get('rrule'):
data.update(
values,
recurrent_id=real_event_id,
recurrent_id_date=data.get('start'),
rrule_type=False,
rrule='',
recurrency=False,
final_date=datetime.strptime(data.get('start'), DEFAULT_SERVER_DATETIME_FORMAT if data['allday'] else DEFAULT_SERVER_DATETIME_FORMAT) + timedelta(hours=values.get('duration', False) or data.get('duration'))
)
#do not copy the id
if data.get('id'):
del(data['id'])
new_id = self.copy(cr, uid, real_event_id, default=data, context=context)
return new_id
def open_after_detach_event(self, cr, uid, ids, context=None):
if context is None:
context = {}
new_id = self._detach_one_event(cr, uid, ids[0], context=context)
return {
'type': 'ir.actions.act_window',
'res_model': 'calendar.event',
'view_mode': 'form',
'res_id': new_id,
'target': 'current',
'flags': {'form': {'action_buttons': True, 'options': {'mode': 'edit'}}}
}
def _name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
for arg in args:
if arg[0] == 'id':
for n, calendar_id in enumerate(arg[2]):
if isinstance(calendar_id, str):
arg[2][n] = calendar_id.split('-')[0]
return super(calendar_event, self)._name_search(cr, user, name=name, args=args, operator=operator, context=context, limit=limit, name_get_uid=name_get_uid)
def write(self, cr, uid, ids, values, context=None):
def _only_changes_to_apply_on_real_ids(field_names):
''' return True if changes are only to be made on the real ids'''
for field in field_names:
if field in ['start', 'start_date', 'start_datetime', 'stop', 'stop_date', 'stop_datetime', 'active']:
return True
return False
if not isinstance(ids, (tuple, list)):
ids = [ids]
context = context or {}
self._set_date(cr, uid, values, id=ids[0], context=context)
for one_ids in ids:
if isinstance(one_ids, (str, int, long)):
if len(str(one_ids).split('-')) == 1:
ids = [int(one_ids)]
else:
ids = [one_ids]
res = False
new_id = False
# Special write of complex IDS
for event_id in ids:
if len(str(event_id).split('-')) == 1:
continue
ids.remove(event_id)
real_event_id = calendar_id2real_id(event_id)
# if we are setting the recurrency flag to False or if we are only changing fields that
# should be only updated on the real ID and not on the virtual (like message_follower_ids):
# then set real ids to be updated.
if not values.get('recurrency', True) or not _only_changes_to_apply_on_real_ids(values.keys()):
ids.append(real_event_id)
continue
else:
data = self.read(cr, uid, event_id, ['start', 'stop', 'rrule', 'duration'])
if data.get('rrule'):
new_id = self._detach_one_event(cr, uid, event_id, values, context=None)
res = super(calendar_event, self).write(cr, uid, ids, values, context=context)
# set end_date for calendar searching
if values.get('recurrency', True) and values.get('end_type', 'count') in ('count', unicode('count')) and \
(values.get('rrule_type') or values.get('count') or values.get('start') or values.get('stop')):
for id in ids:
final_date = self._get_recurrency_end_date(cr, uid, id, context=context)
super(calendar_event, self).write(cr, uid, [id], {'final_date': final_date}, context=context)
attendees_create = False
if values.get('partner_ids', False):
attendees_create = self.create_attendees(cr, uid, ids, context)
if (values.get('start_date') or values.get('start_datetime', False)) and values.get('active', True):
the_id = new_id or (ids and int(ids[0]))
if the_id:
if attendees_create:
attendees_create = attendees_create[the_id]
mail_to_ids = list(set(attendees_create['old_attendee_ids']) - set(attendees_create['removed_attendee_ids']))
else:
mail_to_ids = [att.id for att in self.browse(cr, uid, the_id, context=context).attendee_ids]
if mail_to_ids:
current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)
if self.pool['calendar.attendee']._send_mail_to_attendees(cr, uid, mail_to_ids, template_xmlid='calendar_template_meeting_changedate', email_from=current_user.email, context=context):
self.message_post(cr, uid, the_id, body=_("A email has been send to specify that the date has been changed !"), subtype="calendar.subtype_invitation", context=context)
return res or True and False
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
self._set_date(cr, uid, vals, id=False, context=context)
if not 'user_id' in vals: # Else bug with quick_create when we are filter on an other user
vals['user_id'] = uid
res = super(calendar_event, self).create(cr, uid, vals, context=context)
final_date = self._get_recurrency_end_date(cr, uid, res, context=context)
self.write(cr, uid, [res], {'final_date': final_date}, context=context)
self.create_attendees(cr, uid, [res], context=context)
return res
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
context = dict(context or {})
if 'date' in groupby:
raise osv.except_osv(_('Warning!'), _('Group by date is not supported, use the calendar view instead.'))
virtual_id = context.get('virtual_id', True)
context.update({'virtual_id': False})
res = super(calendar_event, self).read_group(cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby, lazy=lazy)
for result in res:
#remove the count, since the value is not consistent with the result of the search when expand the group
for groupname in groupby:
if result.get(groupname + "_count"):
del result[groupname + "_count"]
result.get('__context', {}).update({'virtual_id': virtual_id})
return res
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
if context is None:
context = {}
fields2 = fields and fields[:] or None
EXTRAFIELDS = ('class', 'user_id', 'duration', 'allday', 'start', 'start_date', 'start_datetime', 'rrule')
for f in EXTRAFIELDS:
if fields and (f not in fields):
fields2.append(f)
if isinstance(ids, (str, int, long)):
select = [ids]
else:
select = ids
# FIXME: find a better way to not push virtual ids in the cache
# (leading to their prefetching and ultimately a type error when
# postgres tries to convert '14-3489274297' to an integer)
self.invalidate_cache(cr, uid, context=context)
select = map(lambda x: (x, calendar_id2real_id(x)), select)
result = []
real_data = super(calendar_event, self).read(cr, uid, [real_id for calendar_id, real_id in select], fields=fields2, context=context, load=load)
real_data = dict(zip([x['id'] for x in real_data], real_data))
for calendar_id, real_id in select:
res = real_data[real_id].copy()
ls = calendar_id2real_id(calendar_id, with_date=res and res.get('duration', 0) > 0 and res.get('duration') or 1)
if not isinstance(ls, (str, int, long)) and len(ls) >= 2:
res['start'] = ls[1]
res['stop'] = ls[2]
if res['allday']:
res['start_date'] = ls[1]
res['stop_date'] = ls[2]
else:
res['start_datetime'] = ls[1]
res['stop_datetime'] = ls[2]
res['display_time'] = self._get_display_time(cr, uid, ls[1], ls[2], res['duration'], res['allday'], context=context)
res['id'] = calendar_id
result.append(res)
for r in result:
if r['user_id']:
user_id = type(r['user_id']) in (tuple, list) and r['user_id'][0] or r['user_id']
if user_id == uid:
continue
if r['class'] == 'private':
for f in r.keys():
if f not in ('id', 'allday', 'start', 'stop', 'duration', 'user_id', 'state', 'interval', 'count', 'recurrent_id_date', 'rrule'):
if isinstance(r[f], list):
r[f] = []
else:
r[f] = False
if f == 'name':
r[f] = _('Busy')
for r in result:
for k in EXTRAFIELDS:
if (k in r) and (fields and (k not in fields)):
del r[k]
if isinstance(ids, (str, int, long)):
return result and result[0] or False
return result
def unlink(self, cr, uid, ids, can_be_deleted=True, context=None):
if not isinstance(ids, list):
ids = [ids]
res = False
ids_to_exclure = []
ids_to_unlink = []
for event_id in ids:
if can_be_deleted and len(str(event_id).split('-')) == 1: # if ID REAL
if self.browse(cr, uid, int(event_id), context).recurrent_id:
ids_to_exclure.append(event_id)
else:
ids_to_unlink.append(int(event_id))
else:
ids_to_exclure.append(event_id)
if ids_to_unlink:
res = super(calendar_event, self).unlink(cr, uid, ids_to_unlink, context=context)
if ids_to_exclure:
for id_to_exclure in ids_to_exclure:
res = self.write(cr, uid, id_to_exclure, {'active': False}, context=context)
return res
class mail_message(osv.Model):
_inherit = "mail.message"
def search(self, cr, uid, args, offset=0, limit=0, order=None, context=None, count=False):
'''
convert the search on real ids in the case it was asked on virtual ids, then call super()
'''
for index in range(len(args)):
if args[index][0] == "res_id" and isinstance(args[index][2], str):
args[index][2] = get_real_ids(args[index][2])
return super(mail_message, self).search(cr, uid, args, offset=offset, limit=limit, order=order, context=context, count=count)
def _find_allowed_model_wise(self, cr, uid, doc_model, doc_dict, context=None):
if context is None:
context = {}
if doc_model == 'calendar.event':
order = context.get('order', self._order)
for virtual_id in self.pool[doc_model].get_recurrent_ids(cr, uid, doc_dict.keys(), [], order=order, context=context):
doc_dict.setdefault(virtual_id, doc_dict[get_real_ids(virtual_id)])
return super(mail_message, self)._find_allowed_model_wise(cr, uid, doc_model, doc_dict, context=context)
class ir_attachment(osv.Model):
_inherit = "ir.attachment"
def search(self, cr, uid, args, offset=0, limit=0, order=None, context=None, count=False):
'''
convert the search on real ids in the case it was asked on virtual ids, then call super()
'''
for index in range(len(args)):
if args[index][0] == "res_id" and isinstance(args[index][2], str):
args[index][2] = get_real_ids(args[index][2])
return super(ir_attachment, self).search(cr, uid, args, offset=offset, limit=limit, order=order, context=context, count=count)
def write(self, cr, uid, ids, vals, context=None):
'''
when posting an attachment (new or not), convert the virtual ids in real ids.
'''
if isinstance(vals.get('res_id'), str):
vals['res_id'] = get_real_ids(vals.get('res_id'))
return super(ir_attachment, self).write(cr, uid, ids, vals, context=context)
class ir_http(osv.AbstractModel):
_inherit = 'ir.http'
def _auth_method_calendar(self):
token = request.params['token']
db = request.params['db']
registry = openerp.modules.registry.RegistryManager.get(db)
attendee_pool = registry.get('calendar.attendee')
error_message = False
with registry.cursor() as cr:
attendee_id = attendee_pool.search(cr, openerp.SUPERUSER_ID, [('access_token', '=', token)])
if not attendee_id:
error_message = """Invalid Invitation Token."""
elif request.session.uid and request.session.login != 'anonymous':
# if valid session but user is not match
attendee = attendee_pool.browse(cr, openerp.SUPERUSER_ID, attendee_id[0])
user = registry.get('res.users').browse(cr, openerp.SUPERUSER_ID, request.session.uid)
if attendee.partner_id.id != user.partner_id.id:
error_message = """Invitation cannot be forwarded via email. This event/meeting belongs to %s and you are logged in as %s. Please ask organizer to add you.""" % (attendee.email, user.email)
if error_message:
raise BadRequest(error_message)
return True
class invite_wizard(osv.osv_memory):
_inherit = 'mail.wizard.invite'
def default_get(self, cr, uid, fields, context=None):
'''
in case someone clicked on 'invite others' wizard in the followers widget, transform virtual ids in real ids
'''
result = super(invite_wizard, self).default_get(cr, uid, fields, context=context)
if 'res_id' in result:
result['res_id'] = get_real_ids(result['res_id'])
return result
|
"""
@package mi.instrument.teledyne.workhorse_monitor_150_khz.cgsn.test.test_driver
@author Roger Unwin
@brief Test cases for InstrumentDriver
USAGE:
Make tests verbose and provide stdout
* From the IDK
$ bin/test_driver
$ bin/test_driver -u
$ bin/test_driver -i
$ bin/test_driver -q
"""
__author__ = 'Roger Unwin'
__license__ = 'Apache 2.0'
import unittest
from nose.plugins.attrib import attr
from mock import Mock
from mi.core.instrument.chunker import StringChunker
from mi.core.log import get_logger; log = get_logger()
from mi.idk.unit_test import InstrumentDriverTestCase
from mi.idk.unit_test import DriverTestMixin
from mi.idk.unit_test import ParameterTestConfigKey
from mi.idk.unit_test import DriverStartupConfigKey
from mi.instrument.teledyne.workhorse_monitor_300_khz.driver import NEWLINE
from mi.instrument.teledyne.workhorse_monitor_300_khz.cgsn.driver import Parameter
from mi.instrument.teledyne.workhorse_monitor_300_khz.cgsn.driver import Prompt
from mi.instrument.teledyne.workhorse_monitor_300_khz.cgsn.driver import ProtocolEvent
from mi.instrument.teledyne.workhorse_monitor_300_khz.cgsn.driver import ProtocolState
from mi.instrument.teledyne.workhorse_monitor_300_khz.cgsn.driver import ScheduledJob
from mi.instrument.teledyne.workhorse_monitor_300_khz.cgsn.driver import InstrumentCmds
from mi.instrument.teledyne.workhorse_monitor_300_khz.cgsn.driver import Capability
from mi.instrument.teledyne.workhorse_monitor_300_khz.cgsn.driver import InstrumentDriver
from mi.instrument.teledyne.workhorse_monitor_300_khz.cgsn.driver import Protocol
from mi.instrument.teledyne.workhorse_monitor_300_khz.driver import ADCP_PD0_PARSED_KEY
from mi.instrument.teledyne.workhorse_monitor_300_khz.driver import ADCP_PD0_PARSED_DataParticle
from mi.instrument.teledyne.workhorse_monitor_300_khz.driver import ADCP_SYSTEM_CONFIGURATION_KEY
from mi.instrument.teledyne.workhorse_monitor_300_khz.driver import ADCP_SYSTEM_CONFIGURATION_DataParticle
from mi.instrument.teledyne.workhorse_monitor_300_khz.driver import ADCP_COMPASS_CALIBRATION_KEY
from mi.instrument.teledyne.workhorse_monitor_300_khz.driver import ADCP_COMPASS_CALIBRATION_DataParticle
from mi.instrument.teledyne.workhorse_monitor_300_khz.test.test_data import SAMPLE_RAW_DATA1
from mi.instrument.teledyne.workhorse_monitor_300_khz.test.test_data import SAMPLE_RAW_DATA2
from mi.instrument.teledyne.workhorse_monitor_300_khz.test.test_data import SAMPLE_RAW_DATA3
from mi.instrument.teledyne.workhorse_monitor_300_khz.test.test_data import SAMPLE_RAW_DATA4
from mi.instrument.teledyne.workhorse_monitor_300_khz.test.test_data import CALIBRATION_RAW_DATA
from mi.instrument.teledyne.workhorse_monitor_300_khz.test.test_data import PS0_RAW_DATA
from mi.instrument.teledyne.workhorse_monitor_300_khz.test.test_driver import WorkhorseDriverUnitTest
from mi.instrument.teledyne.workhorse_monitor_300_khz.test.test_driver import WorkhorseDriverIntegrationTest
from mi.instrument.teledyne.workhorse_monitor_300_khz.test.test_driver import WorkhorseDriverQualificationTest
from mi.instrument.teledyne.workhorse_monitor_300_khz.test.test_driver import WorkhorseDriverPublicationTest
from mi.instrument.teledyne.workhorse_monitor_300_khz.test.test_driver import DataParticleType
###
# Driver parameters for tests
###
InstrumentDriverTestCase.initialize(
driver_module='mi.instrument.teledyne.workhorse_monitor_300_khz.cgsn.driver',
driver_class="InstrumentDriver",
instrument_agent_resource_id = 'HTWZMW',
instrument_agent_preload_id = 'IA7',
instrument_agent_name = 'teledyne_workhorse_monitor_300_khz_cgsn',
instrument_agent_packet_config = DataParticleType(),
driver_startup_config = {
DriverStartupConfigKey.PARAMETERS: {
Parameter.INSTRUMENT_ID: 0,
Parameter.SLEEP_ENABLE: 1,
Parameter.SAVE_NVRAM_TO_RECORDER: True,
Parameter.POLLED_MODE: False,
Parameter.XMIT_POWER: 255,
Parameter.HEADING_ALIGNMENT: 0,
Parameter.SPEED_OF_SOUND: 1500,
Parameter.TRANSDUCER_DEPTH: 0,
Parameter.SALINITY: 35,
Parameter.COORDINATE_TRANSFORMATION: '00111',
Parameter.SENSOR_SOURCE: "1111101",
Parameter.TIME_PER_BURST: '00:00:00.00',
Parameter.ENSEMBLES_PER_BURST: 0,
Parameter.BUFFER_OUTPUT_PERIOD: '00:00:00',
Parameter.FALSE_TARGET_THRESHOLD: '050,001',
Parameter.CORRELATION_THRESHOLD: 64,
Parameter.ERROR_VELOCITY_THRESHOLD: 2000,
Parameter.CLIP_DATA_PAST_BOTTOM: False,
Parameter.RECEIVER_GAIN_SELECT: 1,
Parameter.WATER_REFERENCE_LAYER: '001,005',
Parameter.TRANSMIT_LENGTH: 0,
Parameter.PING_WEIGHT: 0,
Parameter.AMBIGUITY_VELOCITY: 175,
Parameter.TIME_PER_ENSEMBLE: '01:00:00.00',
Parameter.TIME_PER_PING: '01:20.00',
Parameter.NUMBER_OF_DEPTH_CELLS: 30,
Parameter.PINGS_PER_ENSEMBLE: 1,
Parameter.DEPTH_CELL_SIZE: 800,
},
DriverStartupConfigKey.SCHEDULER: {
ScheduledJob.GET_CALIBRATION: {},
ScheduledJob.GET_CONFIGURATION: {},
ScheduledJob.CLOCK_SYNC: {}
}
}
)
###################################################################
###
# Driver constant definitions
###
###############################################################################
# DATA PARTICLE TEST MIXIN #
# Defines a set of assert methods used for data particle verification #
# #
# In python mixin classes are classes designed such that they wouldn't be #
# able to stand on their own, but are inherited by other classes generally #
# using multiple inheritance. #
# #
# This class defines a configuration structure for testing and common assert #
# methods for validating data particles.
###############################################################################
class ADCPTMixin(DriverTestMixin):
'''
Mixin class used for storing data particle constance
and common data assertion methods.
'''
# Create some short names for the parameter test config
TYPE = ParameterTestConfigKey.TYPE
READONLY = ParameterTestConfigKey.READONLY
STARTUP = ParameterTestConfigKey.STARTUP
DA = ParameterTestConfigKey.DIRECT_ACCESS
VALUE = ParameterTestConfigKey.VALUE
REQUIRED = ParameterTestConfigKey.REQUIRED
DEFAULT = ParameterTestConfigKey.DEFAULT
STATES = ParameterTestConfigKey.STATES
OFF_VALUE = 'off_value'
###
# Parameter and Type Definitions
###
# Verified with ADCPT-B IOS
_driver_parameters = {
Parameter.SERIAL_DATA_OUT: {TYPE: str, READONLY: True, DA: False, STARTUP: True, DEFAULT: "000 000 000",VALUE: "000 000 000",OFF_VALUE: "000 000 001"},
Parameter.SERIAL_FLOW_CONTROL: {TYPE: str, READONLY: True, DA: False, STARTUP: True, DEFAULT: '11110', VALUE: '11110', OFF_VALUE: '10110'},
Parameter.BANNER: {TYPE: bool, READONLY: True, DA: False, STARTUP: True, DEFAULT: 0, VALUE: False, OFF_VALUE: True},
Parameter.INSTRUMENT_ID: {TYPE: int, READONLY: False, DA: False, STARTUP: True, DEFAULT: 0, VALUE: 0, OFF_VALUE: 1},
Parameter.SLEEP_ENABLE: {TYPE: int, READONLY: False, DA: False, STARTUP: True, DEFAULT: 1, VALUE: 1, OFF_VALUE: 0},
Parameter.SAVE_NVRAM_TO_RECORDER: {TYPE: bool, READONLY: False, DA: False, STARTUP: True, DEFAULT: True, VALUE: True, OFF_VALUE: False},
Parameter.POLLED_MODE: {TYPE: bool, READONLY: False, DA: False, STARTUP: True, DEFAULT: False, VALUE: False, OFF_VALUE: True},
Parameter.XMIT_POWER: {TYPE: int, READONLY: False, DA: False, STARTUP: True, DEFAULT: 255, VALUE: 255, OFF_VALUE: 250},
Parameter.HEADING_ALIGNMENT: {TYPE: int, READONLY: False, DA: False, STARTUP: True, DEFAULT: 0, VALUE: 0, OFF_VALUE: 1},
Parameter.SPEED_OF_SOUND: {TYPE: int, READONLY: False, DA: True, STARTUP: True, DEFAULT: 1500, VALUE: 1500, OFF_VALUE: 1480},
Parameter.TRANSDUCER_DEPTH: {TYPE: int, READONLY: False, DA: False, STARTUP: True, DEFAULT: 0, VALUE: 0, OFF_VALUE: 32767},
Parameter.SALINITY: {TYPE: int, READONLY: False, DA: False, STARTUP: True, DEFAULT: 35, VALUE: 35, OFF_VALUE: 36},
Parameter.COORDINATE_TRANSFORMATION: {TYPE: str, READONLY: False, DA: False, STARTUP: True, DEFAULT: '00111', VALUE: '00111', OFF_VALUE: '00000'},
Parameter.SENSOR_SOURCE: {TYPE: str, READONLY: False, DA: False, STARTUP: True, DEFAULT: "1111101",VALUE: "1111101", OFF_VALUE: '0000000'},
Parameter.TIME_PER_BURST: {TYPE: str, READONLY: False, DA: False, STARTUP: True, DEFAULT: '00:00:00.00',VALUE: '00:00:00.00',OFF_VALUE: '00:55:00.00'},
Parameter.ENSEMBLES_PER_BURST: {TYPE: int, READONLY: False, DA: False, STARTUP: True, DEFAULT: 0, VALUE: 0, OFF_VALUE: 999},
Parameter.TIME_OF_FIRST_PING: {TYPE: str, READONLY: True, DA: False, STARTUP: False},
Parameter.TIME: {TYPE: str, READONLY: True, DA: False, STARTUP: False},
Parameter.BUFFER_OUTPUT_PERIOD: {TYPE: str, READONLY: False, DA: False, STARTUP: True, DEFAULT: '00:00:00', VALUE: '00:00:00', OFF_VALUE: '00:55:00'},
Parameter.FALSE_TARGET_THRESHOLD: {TYPE: str, READONLY: False, DA: False, STARTUP: True, DEFAULT: '050,001', VALUE: '050,001', OFF_VALUE: '049,002'},
Parameter.CORRELATION_THRESHOLD: {TYPE: int, READONLY: False, DA: False, STARTUP: True, DEFAULT: 64, VALUE: 64, OFF_VALUE: 63},
Parameter.SERIAL_OUT_FW_SWITCHES: {TYPE: str, READONLY: True, DA: False, STARTUP: True, DEFAULT: '111100000', VALUE: '111100000', OFF_VALUE: '111100001'},
Parameter.ERROR_VELOCITY_THRESHOLD: {TYPE: int, READONLY: False, DA: False, STARTUP: True, DEFAULT: 2000, VALUE: 2000, OFF_VALUE: 1999},
Parameter.BLANK_AFTER_TRANSMIT: {TYPE: int, READONLY: True, DA: False, STARTUP: True, DEFAULT: 352, VALUE: 352, OFF_VALUE: 342},
Parameter.CLIP_DATA_PAST_BOTTOM: {TYPE: bool, READONLY: False, DA: False, STARTUP: True, DEFAULT: False, VALUE: False, OFF_VALUE: True},
Parameter.RECEIVER_GAIN_SELECT: {TYPE: int, READONLY: False, DA: False, STARTUP: True, DEFAULT: 1, VALUE: 1, OFF_VALUE: 0},
Parameter.WATER_REFERENCE_LAYER: {TYPE: str, READONLY: False, DA: False, STARTUP: True, DEFAULT: '001,005', VALUE: '001,005', OFF_VALUE: '002,006'},
Parameter.WATER_PROFILING_MODE: {TYPE: int, READONLY: True, DA: False, STARTUP: True, DEFAULT: 1, VALUE: 1, OFF_VALUE: 0},
Parameter.TRANSMIT_LENGTH: {TYPE: int, READONLY: False, DA: False, STARTUP: True, DEFAULT: 0, VALUE: 0, OFF_VALUE: 1},
Parameter.PING_WEIGHT: {TYPE: int, READONLY: False, DA: False, STARTUP: True, DEFAULT: 0, VALUE: 0, OFF_VALUE: 1},
Parameter.AMBIGUITY_VELOCITY: {TYPE: int, READONLY: False, DA: False, STARTUP: True, DEFAULT: 175, VALUE: 175, OFF_VALUE: 176},
Parameter.TIME_PER_ENSEMBLE: {TYPE: str, READONLY: False, DA: False, STARTUP: True, DEFAULT: '01:00:00.00',VALUE: '01:00:00.00',OFF_VALUE: '00:00:01.00'},
Parameter.TIME_PER_PING: {TYPE: str, READONLY: False, DA: False, STARTUP: True, DEFAULT: '01:20.00',VALUE: '01:20.00', OFF_VALUE: '00:02.00'},
Parameter.BANDWIDTH_CONTROL: {TYPE: int, READONLY: True, DA: False, STARTUP: True, DEFAULT: 1, VALUE: 1, OFF_VALUE: 0},
Parameter.NUMBER_OF_DEPTH_CELLS: {TYPE: int, READONLY: False, DA: False, STARTUP: True, DEFAULT: 30, VALUE: 30, OFF_VALUE: 90},
Parameter.PINGS_PER_ENSEMBLE: {TYPE: int, READONLY: False, DA: False, STARTUP: True, DEFAULT: 1, VALUE: 1, OFF_VALUE: 2},
Parameter.DEPTH_CELL_SIZE: {TYPE: int, READONLY: False, DA: False, STARTUP: True, DEFAULT: 800, VALUE: 800, OFF_VALUE: 790},
}
_driver_capabilities = {
# capabilities defined in the IOS
Capability.START_AUTOSAMPLE: { STATES: [ProtocolState.COMMAND, ProtocolState.AUTOSAMPLE]},
Capability.STOP_AUTOSAMPLE: { STATES: [ProtocolState.COMMAND, ProtocolState.AUTOSAMPLE]},
Capability.CLOCK_SYNC: { STATES: [ProtocolState.COMMAND]},
Capability.GET_CALIBRATION: { STATES: [ProtocolState.COMMAND]},
Capability.GET_CONFIGURATION: { STATES: [ProtocolState.COMMAND]},
Capability.SAVE_SETUP_TO_RAM: { STATES: [ProtocolState.COMMAND]},
Capability.SEND_LAST_SAMPLE: { STATES: [ProtocolState.COMMAND]},
Capability.GET_ERROR_STATUS_WORD: { STATES: [ProtocolState.COMMAND]},
Capability.CLEAR_ERROR_STATUS_WORD: { STATES: [ProtocolState.COMMAND]},
Capability.GET_FAULT_LOG: { STATES: [ProtocolState.COMMAND]},
Capability.CLEAR_FAULT_LOG: { STATES: [ProtocolState.COMMAND]},
Capability.GET_INSTRUMENT_TRANSFORM_MATRIX: { STATES: [ProtocolState.COMMAND]},
Capability.RUN_TEST_200: { STATES: [ProtocolState.COMMAND]},
Capability.POWER_DOWN: { STATES: [ProtocolState.COMMAND]},
}
#name, type done, value pending
EF_CHAR = '\xef'
_calibration_data_parameters = {
ADCP_COMPASS_CALIBRATION_KEY.FLUXGATE_CALIBRATION_TIMESTAMP: {'type': float, 'value': -1785800539.0 },
ADCP_COMPASS_CALIBRATION_KEY.S_INVERSE_BX: {'type': list, 'value': [3.8774e-01, 4.7391e-01, -2.5109e-02, -1.4835e-02] },
ADCP_COMPASS_CALIBRATION_KEY.S_INVERSE_BY: {'type': list, 'value': [-8.2932e-03, 1.8434e-02, -5.2666e-02, 5.8153e-01] },
ADCP_COMPASS_CALIBRATION_KEY.S_INVERSE_BZ: {'type': list, 'value': [2.2218e-01, -1.7820e-01, 2.9168e-01, 1.6125e-02] },
ADCP_COMPASS_CALIBRATION_KEY.S_INVERSE_ERR: {'type': list, 'value': [-5.3909e-01, 4.7951e-01, 7.0135e-01, 4.0629e-02] },
ADCP_COMPASS_CALIBRATION_KEY.COIL_OFFSET: {'type': list, 'value': [3.8310e+04, 3.4872e+04, 3.7008e+04, 3.4458e+04] },
ADCP_COMPASS_CALIBRATION_KEY.ELECTRICAL_NULL: {'type': float, 'value': 34159 },
ADCP_COMPASS_CALIBRATION_KEY.TILT_CALIBRATION_TIMESTAMP: {'type': float, 'value': 1348176909.0 },
ADCP_COMPASS_CALIBRATION_KEY.CALIBRATION_TEMP: {'type': float, 'value': 24.9 },
ADCP_COMPASS_CALIBRATION_KEY.ROLL_UP_DOWN: {'type': list, 'value': [3.5167e-07, -1.4728e-05, -3.5240e-07, 1.5687e-05] },
ADCP_COMPASS_CALIBRATION_KEY.PITCH_UP_DOWN: {'type': list, 'value': [-1.4773e-05, 2.9804e-23, -1.5654e-05, -1.2675e-07] },
ADCP_COMPASS_CALIBRATION_KEY.OFFSET_UP_DOWN: {'type': list, 'value': [3.2170e+04, 3.3840e+04, 3.4094e+04, 3.3028e+04] },
ADCP_COMPASS_CALIBRATION_KEY.TILT_NULL: {'type': float, 'value': 33296 }
}
#name, type done, value pending
_system_configuration_data_parameters = {
ADCP_SYSTEM_CONFIGURATION_KEY.SERIAL_NUMBER: {'type': unicode, 'value': "18493" },
ADCP_SYSTEM_CONFIGURATION_KEY.TRANSDUCER_FREQUENCY: {'type': int, 'value': 307200 },
ADCP_SYSTEM_CONFIGURATION_KEY.CONFIGURATION: {'type': unicode, 'value': "4 BEAM, JANUS" },
ADCP_SYSTEM_CONFIGURATION_KEY.MATCH_LAYER: {'type': unicode, 'value': "10" },
ADCP_SYSTEM_CONFIGURATION_KEY.BEAM_ANGLE: {'type': int, 'value': 20 },
ADCP_SYSTEM_CONFIGURATION_KEY.BEAM_PATTERN: {'type': unicode, 'value': "CONVEX" },
ADCP_SYSTEM_CONFIGURATION_KEY.ORIENTATION: {'type': unicode, 'value': "UP" },
ADCP_SYSTEM_CONFIGURATION_KEY.SENSORS: {'type': unicode, 'value': "HEADING TILT 1 TILT 2 TEMPERATURE" },
ADCP_SYSTEM_CONFIGURATION_KEY.TEMPERATURE_SENSOR_OFFSET: {'type': float, 'value': -0.02 },
ADCP_SYSTEM_CONFIGURATION_KEY.CPU_FIRMWARE: {'type': unicode, 'value': "50.40 [0]" },
ADCP_SYSTEM_CONFIGURATION_KEY.BOOT_CODE_REQUIRED: {'type': unicode, 'value': "1.16" },
ADCP_SYSTEM_CONFIGURATION_KEY.BOOT_CODE_ACTUAL: {'type': unicode, 'value': "1.16" },
ADCP_SYSTEM_CONFIGURATION_KEY.DEMOD_1_VERSION: {'type': unicode, 'value': "ad48" },
ADCP_SYSTEM_CONFIGURATION_KEY.DEMOD_1_TYPE: {'type': unicode, 'value': "1f" },
ADCP_SYSTEM_CONFIGURATION_KEY.DEMOD_2_VERSION: {'type': unicode, 'value': "ad48" },
ADCP_SYSTEM_CONFIGURATION_KEY.DEMOD_2_TYPE: {'type': unicode, 'value': "1f" },
ADCP_SYSTEM_CONFIGURATION_KEY.POWER_TIMING_VERSION: {'type': unicode, 'value': "85d3" },
ADCP_SYSTEM_CONFIGURATION_KEY.POWER_TIMING_TYPE: {'type': unicode, 'value': "7" },
ADCP_SYSTEM_CONFIGURATION_KEY.BOARD_SERIAL_NUMBERS: {'type': unicode, 'value': u'2F 00 00 06 FF 25 D1 09 CPU727-2011-00E\n16 00 00 06 F5 E5 D1 09 DSP727-2001-04H\n27 00 00 06 FF 29 31 09 PIO727-3000-00G\n91 00 00 06 F6 17 A7 09 REC727-1000-04E\n'}
}
#name, type done, value pending
_pd0_parameters_base = {
ADCP_PD0_PARSED_KEY.HEADER_ID: {'type': int, 'value': 127 },
ADCP_PD0_PARSED_KEY.DATA_SOURCE_ID: {'type': int, 'value': 127 },
ADCP_PD0_PARSED_KEY.NUM_BYTES: {'type': int, 'value': 26632 },
ADCP_PD0_PARSED_KEY.NUM_DATA_TYPES: {'type': int, 'value': 6 },
ADCP_PD0_PARSED_KEY.OFFSET_DATA_TYPES: {'type': list, 'value': [18, 77, 142, 944, 1346, 1748, 2150] },
ADCP_PD0_PARSED_KEY.FIXED_LEADER_ID: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.FIRMWARE_VERSION: {'type': int, 'value': 50 },
ADCP_PD0_PARSED_KEY.FIRMWARE_REVISION: {'type': int, 'value': 40 },
ADCP_PD0_PARSED_KEY.SYSCONFIG_FREQUENCY: {'type': int, 'value': 150 },
ADCP_PD0_PARSED_KEY.SYSCONFIG_BEAM_PATTERN: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.SYSCONFIG_SENSOR_CONFIG: {'type': int, 'value': 1 },
ADCP_PD0_PARSED_KEY.SYSCONFIG_HEAD_ATTACHED: {'type': int, 'value': 1 },
ADCP_PD0_PARSED_KEY.SYSCONFIG_VERTICAL_ORIENTATION: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.DATA_FLAG: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.LAG_LENGTH: {'type': int, 'value': 53 },
ADCP_PD0_PARSED_KEY.NUM_BEAMS: {'type': int, 'value': 4 },
ADCP_PD0_PARSED_KEY.NUM_CELLS: {'type': int, 'value': 100 },
ADCP_PD0_PARSED_KEY.PINGS_PER_ENSEMBLE: {'type': int, 'value': 256 },
ADCP_PD0_PARSED_KEY.DEPTH_CELL_LENGTH: {'type': int, 'value': 32780 },
ADCP_PD0_PARSED_KEY.BLANK_AFTER_TRANSMIT: {'type': int, 'value': 49154 },
ADCP_PD0_PARSED_KEY.SIGNAL_PROCESSING_MODE: {'type': int, 'value': 1 },
ADCP_PD0_PARSED_KEY.LOW_CORR_THRESHOLD: {'type': int, 'value': 64 },
ADCP_PD0_PARSED_KEY.NUM_CODE_REPETITIONS: {'type': int, 'value': 17 },
ADCP_PD0_PARSED_KEY.PERCENT_GOOD_MIN: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.ERROR_VEL_THRESHOLD: {'type': int, 'value': 53255 },
ADCP_PD0_PARSED_KEY.TIME_PER_PING_MINUTES: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.TIME_PER_PING_SECONDS: {'type': float, 'value': 1.0 },
ADCP_PD0_PARSED_KEY.COORD_TRANSFORM_TYPE: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.COORD_TRANSFORM_TILTS: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.COORD_TRANSFORM_BEAMS: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.COORD_TRANSFORM_MAPPING: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.HEADING_ALIGNMENT: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.HEADING_BIAS: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.SENSOR_SOURCE_SPEED: {'type': int, 'value': 1 },
ADCP_PD0_PARSED_KEY.SENSOR_SOURCE_DEPTH: {'type': int, 'value': 1 },
ADCP_PD0_PARSED_KEY.SENSOR_SOURCE_HEADING: {'type': int, 'value': 1 },
ADCP_PD0_PARSED_KEY.SENSOR_SOURCE_PITCH: {'type': int, 'value': 1 },
ADCP_PD0_PARSED_KEY.SENSOR_SOURCE_ROLL: {'type': int, 'value': 1 },
ADCP_PD0_PARSED_KEY.SENSOR_SOURCE_CONDUCTIVITY: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.SENSOR_SOURCE_TEMPERATURE: {'type': int, 'value': 1 },
ADCP_PD0_PARSED_KEY.SENSOR_AVAILABLE_DEPTH: {'type': int, 'value': 1 },
ADCP_PD0_PARSED_KEY.SENSOR_AVAILABLE_HEADING: {'type': int, 'value': 1 },
ADCP_PD0_PARSED_KEY.SENSOR_AVAILABLE_PITCH: {'type': int, 'value': 1 },
ADCP_PD0_PARSED_KEY.SENSOR_AVAILABLE_ROLL: {'type': int, 'value': 1 },
ADCP_PD0_PARSED_KEY.SENSOR_AVAILABLE_CONDUCTIVITY: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.SENSOR_AVAILABLE_TEMPERATURE: {'type': int, 'value': 1 },
ADCP_PD0_PARSED_KEY.BIN_1_DISTANCE: {'type': int, 'value': 60175 },
ADCP_PD0_PARSED_KEY.TRANSMIT_PULSE_LENGTH: {'type': int, 'value': 4109 },
ADCP_PD0_PARSED_KEY.REFERENCE_LAYER_START: {'type': int, 'value': 1 },
ADCP_PD0_PARSED_KEY.REFERENCE_LAYER_STOP: {'type': int, 'value': 5 },
ADCP_PD0_PARSED_KEY.FALSE_TARGET_THRESHOLD: {'type': int, 'value': 50 },
ADCP_PD0_PARSED_KEY.LOW_LATENCY_TRIGGER: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.TRANSMIT_LAG_DISTANCE: {'type': int, 'value': 50688 },
ADCP_PD0_PARSED_KEY.CPU_BOARD_SERIAL_NUMBER: {'type': long, 'value': 9367487254980977929L },
ADCP_PD0_PARSED_KEY.SYSTEM_BANDWIDTH: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.SYSTEM_POWER: {'type': int, 'value': 255 },
ADCP_PD0_PARSED_KEY.SERIAL_NUMBER: {'type': long, 'value': 206045184 },
ADCP_PD0_PARSED_KEY.BEAM_ANGLE: {'type': int, 'value': 20 },
ADCP_PD0_PARSED_KEY.VARIABLE_LEADER_ID: {'type': int, 'value': 128 },
ADCP_PD0_PARSED_KEY.ENSEMBLE_NUMBER: {'type': int, 'value': 5 },
ADCP_PD0_PARSED_KEY.INTERNAL_TIMESTAMP: {'type': float, 'value': 752 },
ADCP_PD0_PARSED_KEY.ENSEMBLE_NUMBER_INCREMENT: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.BIT_RESULT_DEMOD_1: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.BIT_RESULT_DEMOD_2: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.BIT_RESULT_TIMING: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.SPEED_OF_SOUND: {'type': int, 'value': 1523 },
ADCP_PD0_PARSED_KEY.TRANSDUCER_DEPTH: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.HEADING: {'type': int, 'value': 5221 },
ADCP_PD0_PARSED_KEY.PITCH: {'type': int, 'value': -4657 },
ADCP_PD0_PARSED_KEY.ROLL: {'type': int, 'value': -4561 },
ADCP_PD0_PARSED_KEY.SALINITY: {'type': int, 'value': 35 },
ADCP_PD0_PARSED_KEY.TEMPERATURE: {'type': int, 'value': 2050 },
ADCP_PD0_PARSED_KEY.MPT_MINUTES: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.MPT_SECONDS: {'type': float, 'value': 0.0 },
ADCP_PD0_PARSED_KEY.HEADING_STDEV: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.PITCH_STDEV: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.ROLL_STDEV: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.ADC_TRANSMIT_CURRENT: {'type': int, 'value': 116 },
ADCP_PD0_PARSED_KEY.ADC_TRANSMIT_VOLTAGE: {'type': int, 'value': 169 },
ADCP_PD0_PARSED_KEY.ADC_AMBIENT_TEMP: {'type': int, 'value': 88 },
ADCP_PD0_PARSED_KEY.ADC_PRESSURE_PLUS: {'type': int, 'value': 79 },
ADCP_PD0_PARSED_KEY.ADC_PRESSURE_MINUS: {'type': int, 'value': 79 },
ADCP_PD0_PARSED_KEY.ADC_ATTITUDE_TEMP: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.ADC_ATTITUDE: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.ADC_CONTAMINATION_SENSOR: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.BUS_ERROR_EXCEPTION: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.ADDRESS_ERROR_EXCEPTION: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.ILLEGAL_INSTRUCTION_EXCEPTION: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.ZERO_DIVIDE_INSTRUCTION: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.EMULATOR_EXCEPTION: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.UNASSIGNED_EXCEPTION: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.WATCHDOG_RESTART_OCCURED: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.BATTERY_SAVER_POWER: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.PINGING: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.COLD_WAKEUP_OCCURED: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.UNKNOWN_WAKEUP_OCCURED: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.CLOCK_READ_ERROR: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.UNEXPECTED_ALARM: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.CLOCK_JUMP_FORWARD: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.CLOCK_JUMP_BACKWARD: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.POWER_FAIL: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.SPURIOUS_DSP_INTERRUPT: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.SPURIOUS_UART_INTERRUPT: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.SPURIOUS_CLOCK_INTERRUPT: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.LEVEL_7_INTERRUPT: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.ABSOLUTE_PRESSURE: {'type': int, 'value': 4294963793 },
ADCP_PD0_PARSED_KEY.PRESSURE_VARIANCE: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.INTERNAL_TIMESTAMP: {'type': float, 'value': 1363408382.02 },
ADCP_PD0_PARSED_KEY.VELOCITY_DATA_ID: {'type': int, 'value': 1 },
ADCP_PD0_PARSED_KEY.CORRELATION_MAGNITUDE_ID: {'type': int, 'value': 2 },
ADCP_PD0_PARSED_KEY.CORRELATION_MAGNITUDE_BEAM1: {'type': list, 'value': [19801, 1796, 1800, 1797, 1288, 1539, 1290, 1543, 1028, 1797, 1538, 775, 1034, 1283, 1029, 1799, 1801, 1545, 519, 772, 519, 1033, 1028, 1286, 521, 519, 1545, 1801, 522, 1286, 1030, 1032, 1542, 1035, 1283, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] },
ADCP_PD0_PARSED_KEY.CORRELATION_MAGNITUDE_BEAM2: {'type': list, 'value': [22365, 2057, 2825, 2825, 1801, 2058, 1545, 1286, 3079, 522, 1547, 519, 2052, 2820, 519, 1806, 1026, 1547, 1795, 1801, 2311, 1030, 781, 1796, 1037, 1802, 1035, 1798, 770, 2313, 1292, 1031, 1030, 2830, 523, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] },
ADCP_PD0_PARSED_KEY.CORRELATION_MAGNITUDE_BEAM3: {'type': list, 'value': [3853, 1796, 1289, 1803, 2317, 2571, 1028, 1282, 1799, 2825, 2574, 1026, 1028, 518, 1290, 1286, 1032, 1797, 1028, 2312, 1031, 775, 1549, 772, 1028, 772, 2570, 1288, 1796, 1542, 1538, 777, 1282, 773, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] },
ADCP_PD0_PARSED_KEY.CORRELATION_MAGNITUDE_BEAM4: {'type': list, 'value': [5386, 4100, 2822, 1286, 774, 1799, 518, 778, 3340, 1031, 1546, 1545, 1547, 2566, 3077, 3334, 1801, 1809, 2058, 1539, 1798, 1546, 3593, 1032, 2307, 1025, 1545, 2316, 2055, 1546, 1292, 2312, 1035, 2316, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] },
ADCP_PD0_PARSED_KEY.ECHO_INTENSITY_ID: {'type': int, 'value': 3 },
ADCP_PD0_PARSED_KEY.ECHO_INTENSITY_BEAM1: {'type': list, 'value': [24925, 10538, 10281, 10537, 10282, 10281, 10281, 10282, 10282, 10281, 10281, 10281, 10538, 10282, 10281, 10282, 10281, 10537, 10281, 10281, 10281, 10281, 10281, 10281, 10281, 10281, 10281, 10281, 10281, 10282, 10281, 10282, 10537, 10281, 10281, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] },
ADCP_PD0_PARSED_KEY.ECHO_INTENSITY_BEAM2: {'type': list, 'value': [29027, 12334, 12334, 12078, 12078, 11821, 12334, 12334, 12078, 12078, 12078, 12078, 12078, 12078, 12078, 12079, 12334, 12078, 12334, 12333, 12078, 12333, 12078, 12077, 12078, 12078, 12078, 12334, 12077, 12078, 12078, 12078, 12078, 12078, 12078, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] },
ADCP_PD0_PARSED_KEY.ECHO_INTENSITY_BEAM3: {'type': list, 'value': [12079, 10282, 10281, 10281, 10282, 10281, 10282, 10282, 10281, 10025, 10282, 10282, 10282, 10282, 10025, 10282, 10281, 10025, 10281, 10281, 10282, 10281, 10282, 10281, 10281, 10281, 10537, 10282, 10281, 10281, 10281, 10281, 10281, 10282, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] },
ADCP_PD0_PARSED_KEY.ECHO_INTENSITY_BEAM4: {'type': list, 'value': [14387, 12334, 12078, 12078, 12078, 12334, 12078, 12334, 12078, 12078, 12077, 12077, 12334, 12078, 12334, 12078, 12334, 12077, 12078, 11821, 12335, 12077, 12078, 12077, 12334, 11822, 12334, 12334, 12077, 12077, 12078, 11821, 11821, 12078, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] },
ADCP_PD0_PARSED_KEY.PERCENT_GOOD_ID: {'type': int, 'value': 4 },
ADCP_PD0_PARSED_KEY.CHECKSUM: {'type': int, 'value': 8239 }
}
# red
_coordinate_transformation_earth_parameters = {
# Earth Coordinates
ADCP_PD0_PARSED_KEY.WATER_VELOCITY_EAST: {'type': list, 'value': [128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128] },
ADCP_PD0_PARSED_KEY.WATER_VELOCITY_NORTH: {'type': list, 'value': [128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128] },
ADCP_PD0_PARSED_KEY.WATER_VELOCITY_UP: {'type': list, 'value': [128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128] },
ADCP_PD0_PARSED_KEY.ERROR_VELOCITY: {'type': list, 'value': [128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128] },
ADCP_PD0_PARSED_KEY.PERCENT_GOOD_3BEAM: {'type': list, 'value': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] },
ADCP_PD0_PARSED_KEY.PERCENT_TRANSFORMS_REJECT: {'type': list, 'value': [25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600] },
ADCP_PD0_PARSED_KEY.PERCENT_BAD_BEAMS: {'type': list, 'value': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] },
ADCP_PD0_PARSED_KEY.PERCENT_GOOD_4BEAM: {'type': list, 'value': [25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600] },
}
# blue
_coordinate_transformation_beam_parameters = {
# Beam Coordinates
ADCP_PD0_PARSED_KEY.PERCENT_GOOD_BEAM1: {'type': list, 'value': [25700, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] },
ADCP_PD0_PARSED_KEY.PERCENT_GOOD_BEAM2: {'type': list, 'value': [25700, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] },
ADCP_PD0_PARSED_KEY.PERCENT_GOOD_BEAM3: {'type': list, 'value': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] },
ADCP_PD0_PARSED_KEY.PERCENT_GOOD_BEAM4: {'type': list, 'value': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] },
ADCP_PD0_PARSED_KEY.BEAM_1_VELOCITY: {'type': list, 'value': [4864, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128] },
ADCP_PD0_PARSED_KEY.BEAM_2_VELOCITY: {'type': list, 'value': [62719, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128] },
ADCP_PD0_PARSED_KEY.BEAM_3_VELOCITY: {'type': list, 'value': [45824, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128] },
ADCP_PD0_PARSED_KEY.BEAM_4_VELOCITY : {'type': list, 'value': [19712, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128] },
}
_pd0_parameters = dict(_pd0_parameters_base.items() +
_coordinate_transformation_beam_parameters.items())
# Driver Parameter Methods
###
def assert_driver_parameters(self, current_parameters, verify_values = False):
"""
Verify that all driver parameters are correct and potentially verify values.
@param current_parameters: driver parameters read from the driver instance
@param verify_values: should we verify values against definition?
"""
log.debug("assert_driver_parameters current_parameters = " + str(current_parameters))
self.assert_parameters(current_parameters, self._driver_parameters, verify_values)
###
# Data Particle Parameters Methods
###
def assert_sample_data_particle(self, data_particle):
'''
Verify a particle is a know particle to this driver and verify the particle is correct
@param data_particle: Data particle of unkown type produced by the driver
'''
if (isinstance(data_particle, DataParticleType.ADCP_PD0_PARSED_BEAM)):
self.assert_particle_pd0_data(data_particle)
elif (isinstance(data_particle, DataParticleType.ADCP_SYSTEM_CONFIGURATION)):
self.assert_particle_system_configuration(data_particle)
elif (isinstance(data_particle, DataParticleType.ADCP_COMPASS_CALIBRATION)):
self.assert_particle_compass_calibration(data_particle)
else:
log.error("Unknown Particle Detected: %s" % data_particle)
self.assertFalse(True)
def assert_particle_compass_calibration(self, data_particle, verify_values = True):
'''
Verify an adcpt calibration data particle
@param data_particle: ADCPT_CalibrationDataParticle data particle
@param verify_values: bool, should we verify parameter values
'''
log.debug("in assert_particle_compass_calibration")
log.debug("data_particle = " + repr(data_particle))
self.assert_data_particle_header(data_particle, DataParticleType.ADCP_COMPASS_CALIBRATION)
self.assert_data_particle_parameters(data_particle, self._calibration_data_parameters, verify_values)
def assert_particle_system_configuration(self, data_particle, verify_values = True):
'''
Verify an adcpt fd data particle
@param data_particle: ADCPT_FDDataParticle data particle
@param verify_values: bool, should we verify parameter values
'''
self.assert_data_particle_header(data_particle, DataParticleType.ADCP_SYSTEM_CONFIGURATION)
self.assert_data_particle_parameters(data_particle, self._system_configuration_data_parameters, verify_values)
def assert_particle_pd0_data(self, data_particle, verify_values=True):
'''
Verify an adcpt ps0 data particle
@param data_particle: ADCPT_PS0DataParticle data particle
@param verify_values: bool, should we verify parameter values
'''
log.debug("IN assert_particle_pd0_data")
self.assert_data_particle_header(data_particle, DataParticleType.ADCP_PD0_PARSED_BEAM)
self.assert_data_particle_parameters(data_particle, self._pd0_parameters) # , verify_values
def setUp(self):
DriverTestMixin.setUp(self)
self._driver_parameter_defaults = {}
for label in self._driver_parameters.keys():
if self.VALUE in self._driver_parameters[label]:
self._driver_parameter_defaults[label] = self._driver_parameters[label][self.VALUE]
else:
self._driver_parameter_defaults[label] = None
###############################################################################
# UNIT TESTS #
# Unit tests test the method calls and parameters using Mock. #
###############################################################################
@attr('UNIT', group='mi')
class UnitFromIDK(WorkhorseDriverUnitTest, ADCPTMixin):
def setUp(self):
WorkhorseDriverUnitTest.setUp(self)
ADCPTMixin.setUp(self)
def test_defaults(self):
for label in sorted(self._driver_parameter_defaults.keys()):
log.debug(str(label) + " = " + str(self._driver_parameter_defaults[label]))
def test_sanity(self):
my_event_callback = Mock(spec="fake evt_callback")
driver = InstrumentDriver(self._got_data_event_callback)
protocol = Protocol(Prompt, NEWLINE, my_event_callback)
def test_send_break(self):
my_event_callback = Mock(spec="fake evt_callback")
self.protocol = Protocol(Prompt, NEWLINE, my_event_callback)
def fake_send_break1_cmd(duration):
log.debug("IN fake_send_break1_cmd")
self.protocol._linebuf = "[BREAK Wakeup A]\n" + \
" Polled Mode is OFF -- Battery Saver is ONWorkHorse Broadband ADCP Version 50.40\n" + \
"Teledyne RD Instruments (c) 1996-2010\n" + \
"All Rights Reserved."
def fake_send_break2_cmd(duration):
log.debug("IN fake_send_break2_cmd")
self.protocol._linebuf = "[BREAK Wakeup A]" + NEWLINE + \
"WorkHorse Broadband ADCP Version 50.40" + NEWLINE + \
"Teledyne RD Instruments (c) 1996-2010" + NEWLINE + \
"All Rights Reserved."
self.protocol._send_break_cmd = fake_send_break1_cmd
self.assertTrue(self.protocol._send_break(500))
self.protocol._send_break_cmd = fake_send_break2_cmd
self.assertTrue(self.protocol._send_break(500))
def test_driver_schema(self):
"""
get the driver schema and verify it is configured properly
"""
self.maxDiff = None
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_driver_schema(driver, self._driver_parameters, self._driver_capabilities)
def test_got_data(self):
"""
Verify sample data passed through the got data method produces the correct data particles
"""
# Create and initialize the instrument driver with a mock port agent
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_initialize_driver(driver)
self.assert_raw_particle_published(driver, True)
# Start validating data particles
self.assert_particle_published(driver, CALIBRATION_RAW_DATA, self.assert_particle_compass_calibration, True)
self.assert_particle_published(driver, PS0_RAW_DATA, self.assert_particle_system_configuration, True)
self.assert_particle_published(driver, SAMPLE_RAW_DATA1, self.assert_particle_pd0_data, True)
self.assert_particle_published(driver, SAMPLE_RAW_DATA2, self.assert_particle_pd0_data, True)
self.assert_particle_published(driver, SAMPLE_RAW_DATA3, self.assert_particle_pd0_data, True)
self.assert_particle_published(driver, SAMPLE_RAW_DATA4, self.assert_particle_pd0_data, True)
def test_driver_parameters(self):
"""
Verify the set of parameters known by the driver
"""
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_initialize_driver(driver, ProtocolState.COMMAND)
expected_parameters = sorted(self._driver_parameters.keys())
reported_parameters = sorted(driver.get_resource(Parameter.ALL))
log.debug("*** Expected Parameters: %s" % expected_parameters)
log.debug("*** Reported Parameters: %s" % reported_parameters)
self.assertEqual(reported_parameters, expected_parameters)
# Verify the parameter definitions
self.assert_driver_parameter_definition(driver, self._driver_parameters)
def test_capabilities(self):
"""
Verify the FSM reports capabilities as expected. All states defined in this dict must
also be defined in the protocol FSM.
"""
capabilities = {
ProtocolState.UNKNOWN: ['DRIVER_EVENT_DISCOVER'],
ProtocolState.COMMAND: ['DRIVER_EVENT_CLOCK_SYNC',
'DRIVER_EVENT_GET',
'DRIVER_EVENT_INIT_PARAMS',
'DRIVER_EVENT_SET',
'DRIVER_EVENT_START_AUTOSAMPLE',
'DRIVER_EVENT_START_DIRECT',
'PROTOCOL_EVENT_CLEAR_ERROR_STATUS_WORD',
'PROTOCOL_EVENT_CLEAR_FAULT_LOG',
'PROTOCOL_EVENT_GET_CALIBRATION',
'PROTOCOL_EVENT_GET_CONFIGURATION',
'PROTOCOL_EVENT_GET_ERROR_STATUS_WORD',
'PROTOCOL_EVENT_GET_FAULT_LOG',
'PROTOCOL_EVENT_GET_INSTRUMENT_TRANSFORM_MATRIX',
'PROTOCOL_EVENT_POWER_DOWN',
'PROTOCOL_EVENT_RECOVER_AUTOSAMPLE',
'PROTOCOL_EVENT_RUN_TEST_200',
'PROTOCOL_EVENT_SAVE_SETUP_TO_RAM',
'PROTOCOL_EVENT_SCHEDULED_CLOCK_SYNC',
'PROTOCOL_EVENT_SEND_LAST_SAMPLE'],
ProtocolState.AUTOSAMPLE: ['DRIVER_EVENT_STOP_AUTOSAMPLE',
'DRIVER_EVENT_GET',
'DRIVER_EVENT_INIT_PARAMS',
'DRIVER_EVENT_DISCOVER',
'PROTOCOL_EVENT_GET_CALIBRATION',
'PROTOCOL_EVENT_GET_CONFIGURATION',
'PROTOCOL_EVENT_SCHEDULED_CLOCK_SYNC'],
ProtocolState.DIRECT_ACCESS: ['DRIVER_EVENT_STOP_DIRECT', 'EXECUTE_DIRECT']
}
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_capabilities(driver, capabilities)
def test_driver_enums(self):
"""
Verify that all driver enumeration has no duplicate values that might cause confusion. Also
do a little extra validation for the Capabilites
"""
self.assert_enum_has_no_duplicates(InstrumentCmds())
self.assert_enum_has_no_duplicates(ProtocolState())
self.assert_enum_has_no_duplicates(ProtocolEvent())
self.assert_enum_has_no_duplicates(Parameter())
self.assert_enum_has_no_duplicates(DataParticleType())
self.assert_enum_has_no_duplicates(ScheduledJob())
# Test capabilites for duplicates, them verify that capabilities is a subset of proto events
self.assert_enum_has_no_duplicates(Capability())
self.assert_enum_complete(Capability(), ProtocolEvent())
def test_chunker(self):
"""
Test the chunker and verify the particles created.
"""
chunker = StringChunker(Protocol.sieve_function)
self.assert_chunker_sample(chunker, SAMPLE_RAW_DATA1)
self.assert_chunker_sample_with_noise(chunker, SAMPLE_RAW_DATA1)
self.assert_chunker_fragmented_sample(chunker, SAMPLE_RAW_DATA1, 32)
self.assert_chunker_combined_sample(chunker, SAMPLE_RAW_DATA1)
self.assert_chunker_sample(chunker, PS0_RAW_DATA)
self.assert_chunker_sample_with_noise(chunker, PS0_RAW_DATA)
self.assert_chunker_fragmented_sample(chunker, PS0_RAW_DATA, 32)
self.assert_chunker_combined_sample(chunker, PS0_RAW_DATA)
self.assert_chunker_sample(chunker, CALIBRATION_RAW_DATA)
self.assert_chunker_sample_with_noise(chunker, CALIBRATION_RAW_DATA)
self.assert_chunker_fragmented_sample(chunker, CALIBRATION_RAW_DATA, 32)
self.assert_chunker_combined_sample(chunker, CALIBRATION_RAW_DATA)
def test_protocol_filter_capabilities(self):
"""
This tests driver filter_capabilities.
Iterate through available capabilities, and verify that they can pass successfully through the filter.
Test silly made up capabilities to verify they are blocked by filter.
"""
my_event_callback = Mock(spec="my_event_callback")
protocol = Protocol(Prompt, NEWLINE, my_event_callback)
driver_capabilities = Capability().list()
test_capabilities = Capability().list()
# Add a bogus capability that will be filtered out.
test_capabilities.append("BOGUS_CAPABILITY")
# Verify "BOGUS_CAPABILITY was filtered out
self.assertEquals(driver_capabilities, protocol._filter_capabilities(test_capabilities))
###############################################################################
# INTEGRATION TESTS #
# Integration test test the direct driver / instrument interaction #
# but making direct calls via zeromq. #
# - Common Integration tests test the driver through the instrument agent #
# and common for all drivers (minimum requirement for ION ingestion) #
###############################################################################
@attr('INT', group='mi')
class IntFromIDK(WorkhorseDriverIntegrationTest, ADCPTMixin):
# dict to store if a param has been range stress tested
_tested = {}
def test_autosample_particle_generation(self):
"""
Test that we can generate particles when in autosample
"""
log.debug("IN test_autosample_particle_generation")
self.assert_initialize_driver()
# lets set things to do faster pinging so the test is runable in our time scales.
self.assert_set_bulk({Parameter.PINGS_PER_ENSEMBLE: 5,
Parameter.TIME_PER_PING: "00:10.00",
Parameter.TIME_PER_ENSEMBLE: "00:01:00.00"})
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.AUTOSAMPLE, delay=1)
self.assert_async_particle_generation(DataParticleType.ADCP_PD0_PARSED_BEAM, self.assert_particle_pd0_data, timeout=200)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=50)
def test_set_ranges(self):
# Updated to match ADCPT-B
log.debug("IN test_set_ranges")
self.assert_initialize_driver()
self._test_set_serial_data_out_readonly()
self._test_set_serial_flow_control_readonly()
self._test_set_banner_readonly()
self._test_set_instrument_id()
self._test_set_sleep_enable()
self._test_set_save_nvram_to_recorder()
self._test_set_polled_mode()
self._test_set_xmit_power()
self._test_set_heading_alignment()
self._test_set_speed_of_sound()
self._test_set_transducer_depth()
self._test_set_salinity()
self._test_set_coordinate_transformation()
self._test_set_sensor_source()
self._test_set_time_per_burst()
self._test_set_ensembles_per_burst()
self._test_set_time_of_first_ping_readonly()
self._test_set_buffer_output_period()
self._test_set_false_target_threshold()
self._test_set_correlation_threshold()
self._test_set_serial_out_fw_switches_readonly()
self._test_set_error_velocity_threshold()
self._test_set_blank_after_transmit_readonly()
self._test_set_clip_data_past_bottom()
self._test_set_receiver_gain_select()
self._test_set_water_reference_layer()
self._test_set_water_profiling_mode_readonly()
self._test_set_transmit_length()
self._test_set_ping_weight()
self._test_set_ambiguity_velocity()
self._test_set_time_per_ensemble()
self._test_set_time_per_ping()
self._test_set_bandwidth_control_readonly()
self._test_set_number_of_depth_cells()
self._test_set_pings_per_ensemble()
self._test_set_depth_cell_size()
fail = False
log.error("self._tested = " + repr(self._tested))
for k in self._tested.keys():
if k not in self._driver_parameters.keys():
log.error("*WARNING* " + k + " was tested but is not in _driver_parameters")
#fail = True
for k in self._driver_parameters.keys():
if k not in [Parameter.TIME_OF_FIRST_PING, Parameter.TIME] + self._tested.keys():
log.error("*ERROR* " + k + " is in _driver_parameters but was not tested.")
fail = True
self.assertFalse(fail, "See above for un-exercized parameters.")
def test_set_bulk(self):
"""
Test all set commands. Verify all exception cases.
"""
log.error("IN test_set_bulk")
self.assert_initialize_driver()
params = {}
for k in self._driver_parameters.keys():
if self.VALUE in self._driver_parameters[k]:
if self._driver_parameters[k][self.READONLY] == False:
params[k] = self._driver_parameters[k][self.VALUE]
# Set all parameters to a known ground state
self.assert_set_bulk(params)
###
# Instrument Parameteres
###
# set to off_values so we get a config change
for k in self._driver_parameters.keys():
if self.VALUE in self._driver_parameters[k]:
if False == self._driver_parameters[k][self.READONLY]:
self.assert_set(k, self._driver_parameters[k][self.OFF_VALUE])
log.debug("WANT PARAM CHANGE EVENT, SETTING " + k + " to " + str(self._driver_parameters[k][self.VALUE]))
if True == self._driver_parameters[k][self.READONLY]:
self.assert_set_readonly(k)
for k in self._driver_parameters.keys():
if self.VALUE in self._driver_parameters[k]:
if False == self._driver_parameters[k][self.READONLY]:
self.assert_set(k, self._driver_parameters[k][self.VALUE])
log.debug("WANT PARAM CHANGE EVENT, SETTING " + k + " to " + str(self._driver_parameters[k][self.VALUE]))
if True == self._driver_parameters[k][self.READONLY]:
self.assert_set_exception(k, self._driver_parameters[k][self.VALUE])
log.debug("WANT EXCEPTION SETTING " + k + " to " + str(self._driver_parameters[k][self.VALUE]))
def test_startup_params(self):
"""
Verify that startup parameters are applied correctly. Generally this
happens in the driver discovery method.
since nose orders the tests by ascii value this should run first.
"""
log.error("IN test_startup_params")
self.assert_initialize_driver()
# Updated to reflect T-F startup params.
get_values = {
Parameter.INSTRUMENT_ID: 0,
Parameter.SLEEP_ENABLE: 1,
Parameter.SAVE_NVRAM_TO_RECORDER: True,
Parameter.POLLED_MODE: False,
Parameter.XMIT_POWER: 255,
Parameter.HEADING_ALIGNMENT: 0,
Parameter.SPEED_OF_SOUND: 1500,
Parameter.TRANSDUCER_DEPTH: 0,
Parameter.SALINITY: 35,
Parameter.COORDINATE_TRANSFORMATION: '00111',
Parameter.SENSOR_SOURCE: "1111101",
Parameter.TIME_PER_BURST: '00:00:00.00',
Parameter.ENSEMBLES_PER_BURST: 0,
Parameter.BUFFER_OUTPUT_PERIOD: '00:00:00',
Parameter.FALSE_TARGET_THRESHOLD: '050,001',
Parameter.CORRELATION_THRESHOLD: 64,
Parameter.ERROR_VELOCITY_THRESHOLD: 2000,
Parameter.CLIP_DATA_PAST_BOTTOM: False,
Parameter.RECEIVER_GAIN_SELECT: 1,
Parameter.WATER_REFERENCE_LAYER: '001,005',
Parameter.TRANSMIT_LENGTH: 0,
Parameter.PING_WEIGHT: 0,
Parameter.AMBIGUITY_VELOCITY: 175,
Parameter.TIME_PER_ENSEMBLE: '01:00:00.00',
Parameter.TIME_PER_PING: '01:20.00',
Parameter.NUMBER_OF_DEPTH_CELLS: 30,
Parameter.PINGS_PER_ENSEMBLE: 1,
Parameter.DEPTH_CELL_SIZE: 800,
}
# Change the values of these parameters to something before the
# driver is reinitalized. They should be blown away on reinit.
new_values = {}
for k in self._driver_parameters.keys():
if self.VALUE in self._driver_parameters[k]:
if False == self._driver_parameters[k][self.READONLY]:
new_values[k] = self._driver_parameters[k][self.OFF_VALUE]
self.assert_startup_parameters(self.assert_driver_parameters, new_values, get_values)
def _test_set_heading_alignment(self):
###
# test get set of a variety of parameter ranges
###
log.debug("====== Testing ranges for HEADING_ALIGNMENT ======")
# HEADING_ALIGNMENT: -- -17999 to 18000
self.assert_set(Parameter.HEADING_ALIGNMENT, -17999)
self.assert_set(Parameter.HEADING_ALIGNMENT, 0)
self.assert_set(Parameter.HEADING_ALIGNMENT, 18000)
self.assert_set_exception(Parameter.HEADING_ALIGNMENT, -18000)
self.assert_set_exception(Parameter.HEADING_ALIGNMENT, 18001)
self.assert_set(Parameter.HEADING_ALIGNMENT, self._driver_parameters[Parameter.HEADING_ALIGNMENT][self.VALUE])
self._tested[Parameter.HEADING_ALIGNMENT] = True
def _test_set_transducer_depth(self):
###
# test get set of a variety of parameter ranges
###
log.debug("====== Testing ranges for TRANSDUCER_DEPTH ======")
# HEADING_ALIGNMENT: -- -17999 to 18000
self.assert_set(Parameter.TRANSDUCER_DEPTH, 0)
self.assert_set(Parameter.TRANSDUCER_DEPTH, 32767)
self.assert_set(Parameter.TRANSDUCER_DEPTH, 65535)
self.assert_set_exception(Parameter.TRANSDUCER_DEPTH, -1)
self.assert_set_exception(Parameter.TRANSDUCER_DEPTH, 65536)
self.assert_set_exception(Parameter.TIME_PER_BURST, "LEROY JENKINS")
self.assert_set_exception(Parameter.TIME_PER_BURST, 3.1415926)
self.assert_set(Parameter.TRANSDUCER_DEPTH, self._driver_parameters[Parameter.TRANSDUCER_DEPTH][self.VALUE])
self._tested[Parameter.TRANSDUCER_DEPTH] = True
def _test_set_depth_cell_size(self):
###
# test get set of a variety of parameter ranges
# * Override existing function, this instrument has a different range maxing out at 1600
###
log.debug("====== Testing ranges for DEPTH_CELL_SIZE ======")
# DEPTH_CELL_SIZE: int 80 - 3200
self.assert_set(Parameter.DEPTH_CELL_SIZE, 20)
self.assert_set(Parameter.DEPTH_CELL_SIZE, 1600)
self.assert_set_exception(Parameter.DEPTH_CELL_SIZE, 1601)
self.assert_set_exception(Parameter.DEPTH_CELL_SIZE, -1)
self.assert_set_exception(Parameter.DEPTH_CELL_SIZE, 19)
self.assert_set_exception(Parameter.DEPTH_CELL_SIZE, 3.1415926)
self.assert_set_exception(Parameter.DEPTH_CELL_SIZE, "LEROY JENKINS")
#
# Reset to good value.
#
#self.assert_set(TeledyneParameter.DEPTH_CELL_SIZE, self._driver_parameter_defaults[TeledyneParameter.DEPTH_CELL_SIZE])
self.assert_set(Parameter.DEPTH_CELL_SIZE, self._driver_parameters[Parameter.DEPTH_CELL_SIZE][self.VALUE])
self._tested[Parameter.DEPTH_CELL_SIZE] = True
###############################################################################
# QUALIFICATION TESTS #
# Device specific qualification tests are for #
# testing device specific capabilities #
###############################################################################
@attr('QUAL', group='mi')
class QualFromIDK(WorkhorseDriverQualificationTest, ADCPTMixin):
# works
def test_autosample(self):
"""
Verify autosample works and data particles are created
"""
self.assert_enter_command_mode()
self.assert_set_parameter(Parameter.TIME_PER_ENSEMBLE, '00:01:00.00', True)
self.assert_set_parameter(Parameter.TIME_PER_PING, '00:30.00', True)
self.assert_start_autosample()
self.assert_particle_async(DataParticleType.ADCP_PD0_PARSED_BEAM, self.assert_particle_pd0_data, timeout=90)
self.assert_particle_polled(ProtocolEvent.GET_CALIBRATION, self.assert_compass_calibration, DataParticleType.ADCP_COMPASS_CALIBRATION, sample_count=1, timeout=50)
self.assert_particle_polled(ProtocolEvent.GET_CONFIGURATION, self.assert_configuration, DataParticleType.ADCP_SYSTEM_CONFIGURATION, sample_count=1, timeout=50)
# Stop autosample and do run a couple commands.
self.assert_stop_autosample()
self.assert_particle_polled(ProtocolEvent.GET_CALIBRATION, self.assert_compass_calibration, DataParticleType.ADCP_COMPASS_CALIBRATION, sample_count=1)
self.assert_particle_polled(ProtocolEvent.GET_CONFIGURATION, self.assert_configuration, DataParticleType.ADCP_SYSTEM_CONFIGURATION, sample_count=1)
# Restart autosample and gather a couple samples
self.assert_sample_autosample(self.assert_particle_pd0_data, DataParticleType.ADCP_PD0_PARSED_BEAM)
def assert_cycle(self):
self.assert_start_autosample()
self.assert_particle_async(DataParticleType.ADCP_PD0_PARSED_BEAM, self.assert_particle_pd0_data, timeout=200)
self.assert_particle_polled(ProtocolEvent.GET_CALIBRATION, self.assert_compass_calibration, DataParticleType.ADCP_COMPASS_CALIBRATION, sample_count=1, timeout=60)
self.assert_particle_polled(ProtocolEvent.GET_CONFIGURATION, self.assert_configuration, DataParticleType.ADCP_SYSTEM_CONFIGURATION, sample_count=1, timeout=60)
# Stop autosample and do run a couple commands.
self.assert_stop_autosample()
self.assert_particle_polled(ProtocolEvent.GET_CALIBRATION, self.assert_compass_calibration, DataParticleType.ADCP_COMPASS_CALIBRATION, sample_count=1)
self.assert_particle_polled(ProtocolEvent.GET_CONFIGURATION, self.assert_configuration, DataParticleType.ADCP_SYSTEM_CONFIGURATION, sample_count=1)
###############################################################################
# PUBLICATION TESTS #
# Device specific publication tests are for #
# testing device specific capabilities #
###############################################################################
@attr('PUB', group='mi')
class PubFromIDK(WorkhorseDriverPublicationTest):
pass
removed a blank line to format for pep8 or some such pep
"""
@package mi.instrument.teledyne.workhorse_monitor_150_khz.cgsn.test.test_driver
@author Roger Unwin
@brief Test cases for InstrumentDriver
USAGE:
Make tests verbose and provide stdout
* From the IDK
$ bin/test_driver
$ bin/test_driver -u
$ bin/test_driver -i
$ bin/test_driver -q
"""
__author__ = 'Roger Unwin'
__license__ = 'Apache 2.0'
import unittest
from nose.plugins.attrib import attr
from mock import Mock
from mi.core.instrument.chunker import StringChunker
from mi.core.log import get_logger; log = get_logger()
from mi.idk.unit_test import InstrumentDriverTestCase
from mi.idk.unit_test import DriverTestMixin
from mi.idk.unit_test import ParameterTestConfigKey
from mi.idk.unit_test import DriverStartupConfigKey
from mi.instrument.teledyne.workhorse_monitor_300_khz.driver import NEWLINE
from mi.instrument.teledyne.workhorse_monitor_300_khz.cgsn.driver import Parameter
from mi.instrument.teledyne.workhorse_monitor_300_khz.cgsn.driver import Prompt
from mi.instrument.teledyne.workhorse_monitor_300_khz.cgsn.driver import ProtocolEvent
from mi.instrument.teledyne.workhorse_monitor_300_khz.cgsn.driver import ProtocolState
from mi.instrument.teledyne.workhorse_monitor_300_khz.cgsn.driver import ScheduledJob
from mi.instrument.teledyne.workhorse_monitor_300_khz.cgsn.driver import InstrumentCmds
from mi.instrument.teledyne.workhorse_monitor_300_khz.cgsn.driver import Capability
from mi.instrument.teledyne.workhorse_monitor_300_khz.cgsn.driver import InstrumentDriver
from mi.instrument.teledyne.workhorse_monitor_300_khz.cgsn.driver import Protocol
from mi.instrument.teledyne.workhorse_monitor_300_khz.driver import ADCP_PD0_PARSED_KEY
from mi.instrument.teledyne.workhorse_monitor_300_khz.driver import ADCP_PD0_PARSED_DataParticle
from mi.instrument.teledyne.workhorse_monitor_300_khz.driver import ADCP_SYSTEM_CONFIGURATION_KEY
from mi.instrument.teledyne.workhorse_monitor_300_khz.driver import ADCP_SYSTEM_CONFIGURATION_DataParticle
from mi.instrument.teledyne.workhorse_monitor_300_khz.driver import ADCP_COMPASS_CALIBRATION_KEY
from mi.instrument.teledyne.workhorse_monitor_300_khz.driver import ADCP_COMPASS_CALIBRATION_DataParticle
from mi.instrument.teledyne.workhorse_monitor_300_khz.test.test_data import SAMPLE_RAW_DATA1
from mi.instrument.teledyne.workhorse_monitor_300_khz.test.test_data import SAMPLE_RAW_DATA2
from mi.instrument.teledyne.workhorse_monitor_300_khz.test.test_data import SAMPLE_RAW_DATA3
from mi.instrument.teledyne.workhorse_monitor_300_khz.test.test_data import SAMPLE_RAW_DATA4
from mi.instrument.teledyne.workhorse_monitor_300_khz.test.test_data import CALIBRATION_RAW_DATA
from mi.instrument.teledyne.workhorse_monitor_300_khz.test.test_data import PS0_RAW_DATA
from mi.instrument.teledyne.workhorse_monitor_300_khz.test.test_driver import WorkhorseDriverUnitTest
from mi.instrument.teledyne.workhorse_monitor_300_khz.test.test_driver import WorkhorseDriverIntegrationTest
from mi.instrument.teledyne.workhorse_monitor_300_khz.test.test_driver import WorkhorseDriverQualificationTest
from mi.instrument.teledyne.workhorse_monitor_300_khz.test.test_driver import WorkhorseDriverPublicationTest
from mi.instrument.teledyne.workhorse_monitor_300_khz.test.test_driver import DataParticleType
###
# Driver parameters for tests
###
InstrumentDriverTestCase.initialize(
driver_module='mi.instrument.teledyne.workhorse_monitor_300_khz.cgsn.driver',
driver_class="InstrumentDriver",
instrument_agent_resource_id = 'HTWZMW',
instrument_agent_preload_id = 'IA7',
instrument_agent_name = 'teledyne_workhorse_monitor_300_khz_cgsn',
instrument_agent_packet_config = DataParticleType(),
driver_startup_config = {
DriverStartupConfigKey.PARAMETERS: {
Parameter.INSTRUMENT_ID: 0,
Parameter.SLEEP_ENABLE: 1,
Parameter.SAVE_NVRAM_TO_RECORDER: True,
Parameter.POLLED_MODE: False,
Parameter.XMIT_POWER: 255,
Parameter.HEADING_ALIGNMENT: 0,
Parameter.SPEED_OF_SOUND: 1500,
Parameter.TRANSDUCER_DEPTH: 0,
Parameter.SALINITY: 35,
Parameter.COORDINATE_TRANSFORMATION: '00111',
Parameter.SENSOR_SOURCE: "1111101",
Parameter.TIME_PER_BURST: '00:00:00.00',
Parameter.ENSEMBLES_PER_BURST: 0,
Parameter.BUFFER_OUTPUT_PERIOD: '00:00:00',
Parameter.FALSE_TARGET_THRESHOLD: '050,001',
Parameter.CORRELATION_THRESHOLD: 64,
Parameter.ERROR_VELOCITY_THRESHOLD: 2000,
Parameter.CLIP_DATA_PAST_BOTTOM: False,
Parameter.RECEIVER_GAIN_SELECT: 1,
Parameter.WATER_REFERENCE_LAYER: '001,005',
Parameter.TRANSMIT_LENGTH: 0,
Parameter.PING_WEIGHT: 0,
Parameter.AMBIGUITY_VELOCITY: 175,
Parameter.TIME_PER_ENSEMBLE: '01:00:00.00',
Parameter.TIME_PER_PING: '01:20.00',
Parameter.NUMBER_OF_DEPTH_CELLS: 30,
Parameter.PINGS_PER_ENSEMBLE: 1,
Parameter.DEPTH_CELL_SIZE: 800,
},
DriverStartupConfigKey.SCHEDULER: {
ScheduledJob.GET_CALIBRATION: {},
ScheduledJob.GET_CONFIGURATION: {},
ScheduledJob.CLOCK_SYNC: {}
}
}
)
###################################################################
###
# Driver constant definitions
###
###############################################################################
# DATA PARTICLE TEST MIXIN #
# Defines a set of assert methods used for data particle verification #
# #
# In python mixin classes are classes designed such that they wouldn't be #
# able to stand on their own, but are inherited by other classes generally #
# using multiple inheritance. #
# #
# This class defines a configuration structure for testing and common assert #
# methods for validating data particles.
###############################################################################
class ADCPTMixin(DriverTestMixin):
'''
Mixin class used for storing data particle constance
and common data assertion methods.
'''
# Create some short names for the parameter test config
TYPE = ParameterTestConfigKey.TYPE
READONLY = ParameterTestConfigKey.READONLY
STARTUP = ParameterTestConfigKey.STARTUP
DA = ParameterTestConfigKey.DIRECT_ACCESS
VALUE = ParameterTestConfigKey.VALUE
REQUIRED = ParameterTestConfigKey.REQUIRED
DEFAULT = ParameterTestConfigKey.DEFAULT
STATES = ParameterTestConfigKey.STATES
OFF_VALUE = 'off_value'
###
# Parameter and Type Definitions
###
# Verified with ADCPT-B IOS
_driver_parameters = {
Parameter.SERIAL_DATA_OUT: {TYPE: str, READONLY: True, DA: False, STARTUP: True, DEFAULT: "000 000 000",VALUE: "000 000 000",OFF_VALUE: "000 000 001"},
Parameter.SERIAL_FLOW_CONTROL: {TYPE: str, READONLY: True, DA: False, STARTUP: True, DEFAULT: '11110', VALUE: '11110', OFF_VALUE: '10110'},
Parameter.BANNER: {TYPE: bool, READONLY: True, DA: False, STARTUP: True, DEFAULT: 0, VALUE: False, OFF_VALUE: True},
Parameter.INSTRUMENT_ID: {TYPE: int, READONLY: False, DA: False, STARTUP: True, DEFAULT: 0, VALUE: 0, OFF_VALUE: 1},
Parameter.SLEEP_ENABLE: {TYPE: int, READONLY: False, DA: False, STARTUP: True, DEFAULT: 1, VALUE: 1, OFF_VALUE: 0},
Parameter.SAVE_NVRAM_TO_RECORDER: {TYPE: bool, READONLY: False, DA: False, STARTUP: True, DEFAULT: True, VALUE: True, OFF_VALUE: False},
Parameter.POLLED_MODE: {TYPE: bool, READONLY: False, DA: False, STARTUP: True, DEFAULT: False, VALUE: False, OFF_VALUE: True},
Parameter.XMIT_POWER: {TYPE: int, READONLY: False, DA: False, STARTUP: True, DEFAULT: 255, VALUE: 255, OFF_VALUE: 250},
Parameter.HEADING_ALIGNMENT: {TYPE: int, READONLY: False, DA: False, STARTUP: True, DEFAULT: 0, VALUE: 0, OFF_VALUE: 1},
Parameter.SPEED_OF_SOUND: {TYPE: int, READONLY: False, DA: True, STARTUP: True, DEFAULT: 1500, VALUE: 1500, OFF_VALUE: 1480},
Parameter.TRANSDUCER_DEPTH: {TYPE: int, READONLY: False, DA: False, STARTUP: True, DEFAULT: 0, VALUE: 0, OFF_VALUE: 32767},
Parameter.SALINITY: {TYPE: int, READONLY: False, DA: False, STARTUP: True, DEFAULT: 35, VALUE: 35, OFF_VALUE: 36},
Parameter.COORDINATE_TRANSFORMATION: {TYPE: str, READONLY: False, DA: False, STARTUP: True, DEFAULT: '00111', VALUE: '00111', OFF_VALUE: '00000'},
Parameter.SENSOR_SOURCE: {TYPE: str, READONLY: False, DA: False, STARTUP: True, DEFAULT: "1111101",VALUE: "1111101", OFF_VALUE: '0000000'},
Parameter.TIME_PER_BURST: {TYPE: str, READONLY: False, DA: False, STARTUP: True, DEFAULT: '00:00:00.00',VALUE: '00:00:00.00',OFF_VALUE: '00:55:00.00'},
Parameter.ENSEMBLES_PER_BURST: {TYPE: int, READONLY: False, DA: False, STARTUP: True, DEFAULT: 0, VALUE: 0, OFF_VALUE: 999},
Parameter.TIME_OF_FIRST_PING: {TYPE: str, READONLY: True, DA: False, STARTUP: False},
Parameter.TIME: {TYPE: str, READONLY: True, DA: False, STARTUP: False},
Parameter.BUFFER_OUTPUT_PERIOD: {TYPE: str, READONLY: False, DA: False, STARTUP: True, DEFAULT: '00:00:00', VALUE: '00:00:00', OFF_VALUE: '00:55:00'},
Parameter.FALSE_TARGET_THRESHOLD: {TYPE: str, READONLY: False, DA: False, STARTUP: True, DEFAULT: '050,001', VALUE: '050,001', OFF_VALUE: '049,002'},
Parameter.CORRELATION_THRESHOLD: {TYPE: int, READONLY: False, DA: False, STARTUP: True, DEFAULT: 64, VALUE: 64, OFF_VALUE: 63},
Parameter.SERIAL_OUT_FW_SWITCHES: {TYPE: str, READONLY: True, DA: False, STARTUP: True, DEFAULT: '111100000', VALUE: '111100000', OFF_VALUE: '111100001'},
Parameter.ERROR_VELOCITY_THRESHOLD: {TYPE: int, READONLY: False, DA: False, STARTUP: True, DEFAULT: 2000, VALUE: 2000, OFF_VALUE: 1999},
Parameter.BLANK_AFTER_TRANSMIT: {TYPE: int, READONLY: True, DA: False, STARTUP: True, DEFAULT: 352, VALUE: 352, OFF_VALUE: 342},
Parameter.CLIP_DATA_PAST_BOTTOM: {TYPE: bool, READONLY: False, DA: False, STARTUP: True, DEFAULT: False, VALUE: False, OFF_VALUE: True},
Parameter.RECEIVER_GAIN_SELECT: {TYPE: int, READONLY: False, DA: False, STARTUP: True, DEFAULT: 1, VALUE: 1, OFF_VALUE: 0},
Parameter.WATER_REFERENCE_LAYER: {TYPE: str, READONLY: False, DA: False, STARTUP: True, DEFAULT: '001,005', VALUE: '001,005', OFF_VALUE: '002,006'},
Parameter.WATER_PROFILING_MODE: {TYPE: int, READONLY: True, DA: False, STARTUP: True, DEFAULT: 1, VALUE: 1, OFF_VALUE: 0},
Parameter.TRANSMIT_LENGTH: {TYPE: int, READONLY: False, DA: False, STARTUP: True, DEFAULT: 0, VALUE: 0, OFF_VALUE: 1},
Parameter.PING_WEIGHT: {TYPE: int, READONLY: False, DA: False, STARTUP: True, DEFAULT: 0, VALUE: 0, OFF_VALUE: 1},
Parameter.AMBIGUITY_VELOCITY: {TYPE: int, READONLY: False, DA: False, STARTUP: True, DEFAULT: 175, VALUE: 175, OFF_VALUE: 176},
Parameter.TIME_PER_ENSEMBLE: {TYPE: str, READONLY: False, DA: False, STARTUP: True, DEFAULT: '01:00:00.00',VALUE: '01:00:00.00',OFF_VALUE: '00:00:01.00'},
Parameter.TIME_PER_PING: {TYPE: str, READONLY: False, DA: False, STARTUP: True, DEFAULT: '01:20.00',VALUE: '01:20.00', OFF_VALUE: '00:02.00'},
Parameter.BANDWIDTH_CONTROL: {TYPE: int, READONLY: True, DA: False, STARTUP: True, DEFAULT: 1, VALUE: 1, OFF_VALUE: 0},
Parameter.NUMBER_OF_DEPTH_CELLS: {TYPE: int, READONLY: False, DA: False, STARTUP: True, DEFAULT: 30, VALUE: 30, OFF_VALUE: 90},
Parameter.PINGS_PER_ENSEMBLE: {TYPE: int, READONLY: False, DA: False, STARTUP: True, DEFAULT: 1, VALUE: 1, OFF_VALUE: 2},
Parameter.DEPTH_CELL_SIZE: {TYPE: int, READONLY: False, DA: False, STARTUP: True, DEFAULT: 800, VALUE: 800, OFF_VALUE: 790},
}
_driver_capabilities = {
# capabilities defined in the IOS
Capability.START_AUTOSAMPLE: { STATES: [ProtocolState.COMMAND, ProtocolState.AUTOSAMPLE]},
Capability.STOP_AUTOSAMPLE: { STATES: [ProtocolState.COMMAND, ProtocolState.AUTOSAMPLE]},
Capability.CLOCK_SYNC: { STATES: [ProtocolState.COMMAND]},
Capability.GET_CALIBRATION: { STATES: [ProtocolState.COMMAND]},
Capability.GET_CONFIGURATION: { STATES: [ProtocolState.COMMAND]},
Capability.SAVE_SETUP_TO_RAM: { STATES: [ProtocolState.COMMAND]},
Capability.SEND_LAST_SAMPLE: { STATES: [ProtocolState.COMMAND]},
Capability.GET_ERROR_STATUS_WORD: { STATES: [ProtocolState.COMMAND]},
Capability.CLEAR_ERROR_STATUS_WORD: { STATES: [ProtocolState.COMMAND]},
Capability.GET_FAULT_LOG: { STATES: [ProtocolState.COMMAND]},
Capability.CLEAR_FAULT_LOG: { STATES: [ProtocolState.COMMAND]},
Capability.GET_INSTRUMENT_TRANSFORM_MATRIX: { STATES: [ProtocolState.COMMAND]},
Capability.RUN_TEST_200: { STATES: [ProtocolState.COMMAND]},
Capability.POWER_DOWN: { STATES: [ProtocolState.COMMAND]},
}
#name, type done, value pending
EF_CHAR = '\xef'
_calibration_data_parameters = {
ADCP_COMPASS_CALIBRATION_KEY.FLUXGATE_CALIBRATION_TIMESTAMP: {'type': float, 'value': -1785800539.0 },
ADCP_COMPASS_CALIBRATION_KEY.S_INVERSE_BX: {'type': list, 'value': [3.8774e-01, 4.7391e-01, -2.5109e-02, -1.4835e-02] },
ADCP_COMPASS_CALIBRATION_KEY.S_INVERSE_BY: {'type': list, 'value': [-8.2932e-03, 1.8434e-02, -5.2666e-02, 5.8153e-01] },
ADCP_COMPASS_CALIBRATION_KEY.S_INVERSE_BZ: {'type': list, 'value': [2.2218e-01, -1.7820e-01, 2.9168e-01, 1.6125e-02] },
ADCP_COMPASS_CALIBRATION_KEY.S_INVERSE_ERR: {'type': list, 'value': [-5.3909e-01, 4.7951e-01, 7.0135e-01, 4.0629e-02] },
ADCP_COMPASS_CALIBRATION_KEY.COIL_OFFSET: {'type': list, 'value': [3.8310e+04, 3.4872e+04, 3.7008e+04, 3.4458e+04] },
ADCP_COMPASS_CALIBRATION_KEY.ELECTRICAL_NULL: {'type': float, 'value': 34159 },
ADCP_COMPASS_CALIBRATION_KEY.TILT_CALIBRATION_TIMESTAMP: {'type': float, 'value': 1348176909.0 },
ADCP_COMPASS_CALIBRATION_KEY.CALIBRATION_TEMP: {'type': float, 'value': 24.9 },
ADCP_COMPASS_CALIBRATION_KEY.ROLL_UP_DOWN: {'type': list, 'value': [3.5167e-07, -1.4728e-05, -3.5240e-07, 1.5687e-05] },
ADCP_COMPASS_CALIBRATION_KEY.PITCH_UP_DOWN: {'type': list, 'value': [-1.4773e-05, 2.9804e-23, -1.5654e-05, -1.2675e-07] },
ADCP_COMPASS_CALIBRATION_KEY.OFFSET_UP_DOWN: {'type': list, 'value': [3.2170e+04, 3.3840e+04, 3.4094e+04, 3.3028e+04] },
ADCP_COMPASS_CALIBRATION_KEY.TILT_NULL: {'type': float, 'value': 33296 }
}
#name, type done, value pending
_system_configuration_data_parameters = {
ADCP_SYSTEM_CONFIGURATION_KEY.SERIAL_NUMBER: {'type': unicode, 'value': "18493" },
ADCP_SYSTEM_CONFIGURATION_KEY.TRANSDUCER_FREQUENCY: {'type': int, 'value': 307200 },
ADCP_SYSTEM_CONFIGURATION_KEY.CONFIGURATION: {'type': unicode, 'value': "4 BEAM, JANUS" },
ADCP_SYSTEM_CONFIGURATION_KEY.MATCH_LAYER: {'type': unicode, 'value': "10" },
ADCP_SYSTEM_CONFIGURATION_KEY.BEAM_ANGLE: {'type': int, 'value': 20 },
ADCP_SYSTEM_CONFIGURATION_KEY.BEAM_PATTERN: {'type': unicode, 'value': "CONVEX" },
ADCP_SYSTEM_CONFIGURATION_KEY.ORIENTATION: {'type': unicode, 'value': "UP" },
ADCP_SYSTEM_CONFIGURATION_KEY.SENSORS: {'type': unicode, 'value': "HEADING TILT 1 TILT 2 TEMPERATURE" },
ADCP_SYSTEM_CONFIGURATION_KEY.TEMPERATURE_SENSOR_OFFSET: {'type': float, 'value': -0.02 },
ADCP_SYSTEM_CONFIGURATION_KEY.CPU_FIRMWARE: {'type': unicode, 'value': "50.40 [0]" },
ADCP_SYSTEM_CONFIGURATION_KEY.BOOT_CODE_REQUIRED: {'type': unicode, 'value': "1.16" },
ADCP_SYSTEM_CONFIGURATION_KEY.BOOT_CODE_ACTUAL: {'type': unicode, 'value': "1.16" },
ADCP_SYSTEM_CONFIGURATION_KEY.DEMOD_1_VERSION: {'type': unicode, 'value': "ad48" },
ADCP_SYSTEM_CONFIGURATION_KEY.DEMOD_1_TYPE: {'type': unicode, 'value': "1f" },
ADCP_SYSTEM_CONFIGURATION_KEY.DEMOD_2_VERSION: {'type': unicode, 'value': "ad48" },
ADCP_SYSTEM_CONFIGURATION_KEY.DEMOD_2_TYPE: {'type': unicode, 'value': "1f" },
ADCP_SYSTEM_CONFIGURATION_KEY.POWER_TIMING_VERSION: {'type': unicode, 'value': "85d3" },
ADCP_SYSTEM_CONFIGURATION_KEY.POWER_TIMING_TYPE: {'type': unicode, 'value': "7" },
ADCP_SYSTEM_CONFIGURATION_KEY.BOARD_SERIAL_NUMBERS: {'type': unicode, 'value': u'2F 00 00 06 FF 25 D1 09 CPU727-2011-00E\n16 00 00 06 F5 E5 D1 09 DSP727-2001-04H\n27 00 00 06 FF 29 31 09 PIO727-3000-00G\n91 00 00 06 F6 17 A7 09 REC727-1000-04E\n'}
}
#name, type done, value pending
_pd0_parameters_base = {
ADCP_PD0_PARSED_KEY.HEADER_ID: {'type': int, 'value': 127 },
ADCP_PD0_PARSED_KEY.DATA_SOURCE_ID: {'type': int, 'value': 127 },
ADCP_PD0_PARSED_KEY.NUM_BYTES: {'type': int, 'value': 26632 },
ADCP_PD0_PARSED_KEY.NUM_DATA_TYPES: {'type': int, 'value': 6 },
ADCP_PD0_PARSED_KEY.OFFSET_DATA_TYPES: {'type': list, 'value': [18, 77, 142, 944, 1346, 1748, 2150] },
ADCP_PD0_PARSED_KEY.FIXED_LEADER_ID: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.FIRMWARE_VERSION: {'type': int, 'value': 50 },
ADCP_PD0_PARSED_KEY.FIRMWARE_REVISION: {'type': int, 'value': 40 },
ADCP_PD0_PARSED_KEY.SYSCONFIG_FREQUENCY: {'type': int, 'value': 150 },
ADCP_PD0_PARSED_KEY.SYSCONFIG_BEAM_PATTERN: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.SYSCONFIG_SENSOR_CONFIG: {'type': int, 'value': 1 },
ADCP_PD0_PARSED_KEY.SYSCONFIG_HEAD_ATTACHED: {'type': int, 'value': 1 },
ADCP_PD0_PARSED_KEY.SYSCONFIG_VERTICAL_ORIENTATION: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.DATA_FLAG: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.LAG_LENGTH: {'type': int, 'value': 53 },
ADCP_PD0_PARSED_KEY.NUM_BEAMS: {'type': int, 'value': 4 },
ADCP_PD0_PARSED_KEY.NUM_CELLS: {'type': int, 'value': 100 },
ADCP_PD0_PARSED_KEY.PINGS_PER_ENSEMBLE: {'type': int, 'value': 256 },
ADCP_PD0_PARSED_KEY.DEPTH_CELL_LENGTH: {'type': int, 'value': 32780 },
ADCP_PD0_PARSED_KEY.BLANK_AFTER_TRANSMIT: {'type': int, 'value': 49154 },
ADCP_PD0_PARSED_KEY.SIGNAL_PROCESSING_MODE: {'type': int, 'value': 1 },
ADCP_PD0_PARSED_KEY.LOW_CORR_THRESHOLD: {'type': int, 'value': 64 },
ADCP_PD0_PARSED_KEY.NUM_CODE_REPETITIONS: {'type': int, 'value': 17 },
ADCP_PD0_PARSED_KEY.PERCENT_GOOD_MIN: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.ERROR_VEL_THRESHOLD: {'type': int, 'value': 53255 },
ADCP_PD0_PARSED_KEY.TIME_PER_PING_MINUTES: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.TIME_PER_PING_SECONDS: {'type': float, 'value': 1.0 },
ADCP_PD0_PARSED_KEY.COORD_TRANSFORM_TYPE: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.COORD_TRANSFORM_TILTS: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.COORD_TRANSFORM_BEAMS: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.COORD_TRANSFORM_MAPPING: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.HEADING_ALIGNMENT: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.HEADING_BIAS: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.SENSOR_SOURCE_SPEED: {'type': int, 'value': 1 },
ADCP_PD0_PARSED_KEY.SENSOR_SOURCE_DEPTH: {'type': int, 'value': 1 },
ADCP_PD0_PARSED_KEY.SENSOR_SOURCE_HEADING: {'type': int, 'value': 1 },
ADCP_PD0_PARSED_KEY.SENSOR_SOURCE_PITCH: {'type': int, 'value': 1 },
ADCP_PD0_PARSED_KEY.SENSOR_SOURCE_ROLL: {'type': int, 'value': 1 },
ADCP_PD0_PARSED_KEY.SENSOR_SOURCE_CONDUCTIVITY: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.SENSOR_SOURCE_TEMPERATURE: {'type': int, 'value': 1 },
ADCP_PD0_PARSED_KEY.SENSOR_AVAILABLE_DEPTH: {'type': int, 'value': 1 },
ADCP_PD0_PARSED_KEY.SENSOR_AVAILABLE_HEADING: {'type': int, 'value': 1 },
ADCP_PD0_PARSED_KEY.SENSOR_AVAILABLE_PITCH: {'type': int, 'value': 1 },
ADCP_PD0_PARSED_KEY.SENSOR_AVAILABLE_ROLL: {'type': int, 'value': 1 },
ADCP_PD0_PARSED_KEY.SENSOR_AVAILABLE_CONDUCTIVITY: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.SENSOR_AVAILABLE_TEMPERATURE: {'type': int, 'value': 1 },
ADCP_PD0_PARSED_KEY.BIN_1_DISTANCE: {'type': int, 'value': 60175 },
ADCP_PD0_PARSED_KEY.TRANSMIT_PULSE_LENGTH: {'type': int, 'value': 4109 },
ADCP_PD0_PARSED_KEY.REFERENCE_LAYER_START: {'type': int, 'value': 1 },
ADCP_PD0_PARSED_KEY.REFERENCE_LAYER_STOP: {'type': int, 'value': 5 },
ADCP_PD0_PARSED_KEY.FALSE_TARGET_THRESHOLD: {'type': int, 'value': 50 },
ADCP_PD0_PARSED_KEY.LOW_LATENCY_TRIGGER: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.TRANSMIT_LAG_DISTANCE: {'type': int, 'value': 50688 },
ADCP_PD0_PARSED_KEY.CPU_BOARD_SERIAL_NUMBER: {'type': long, 'value': 9367487254980977929L },
ADCP_PD0_PARSED_KEY.SYSTEM_BANDWIDTH: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.SYSTEM_POWER: {'type': int, 'value': 255 },
ADCP_PD0_PARSED_KEY.SERIAL_NUMBER: {'type': long, 'value': 206045184 },
ADCP_PD0_PARSED_KEY.BEAM_ANGLE: {'type': int, 'value': 20 },
ADCP_PD0_PARSED_KEY.VARIABLE_LEADER_ID: {'type': int, 'value': 128 },
ADCP_PD0_PARSED_KEY.ENSEMBLE_NUMBER: {'type': int, 'value': 5 },
ADCP_PD0_PARSED_KEY.INTERNAL_TIMESTAMP: {'type': float, 'value': 752 },
ADCP_PD0_PARSED_KEY.ENSEMBLE_NUMBER_INCREMENT: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.BIT_RESULT_DEMOD_1: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.BIT_RESULT_DEMOD_2: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.BIT_RESULT_TIMING: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.SPEED_OF_SOUND: {'type': int, 'value': 1523 },
ADCP_PD0_PARSED_KEY.TRANSDUCER_DEPTH: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.HEADING: {'type': int, 'value': 5221 },
ADCP_PD0_PARSED_KEY.PITCH: {'type': int, 'value': -4657 },
ADCP_PD0_PARSED_KEY.ROLL: {'type': int, 'value': -4561 },
ADCP_PD0_PARSED_KEY.SALINITY: {'type': int, 'value': 35 },
ADCP_PD0_PARSED_KEY.TEMPERATURE: {'type': int, 'value': 2050 },
ADCP_PD0_PARSED_KEY.MPT_MINUTES: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.MPT_SECONDS: {'type': float, 'value': 0.0 },
ADCP_PD0_PARSED_KEY.HEADING_STDEV: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.PITCH_STDEV: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.ROLL_STDEV: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.ADC_TRANSMIT_CURRENT: {'type': int, 'value': 116 },
ADCP_PD0_PARSED_KEY.ADC_TRANSMIT_VOLTAGE: {'type': int, 'value': 169 },
ADCP_PD0_PARSED_KEY.ADC_AMBIENT_TEMP: {'type': int, 'value': 88 },
ADCP_PD0_PARSED_KEY.ADC_PRESSURE_PLUS: {'type': int, 'value': 79 },
ADCP_PD0_PARSED_KEY.ADC_PRESSURE_MINUS: {'type': int, 'value': 79 },
ADCP_PD0_PARSED_KEY.ADC_ATTITUDE_TEMP: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.ADC_ATTITUDE: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.ADC_CONTAMINATION_SENSOR: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.BUS_ERROR_EXCEPTION: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.ADDRESS_ERROR_EXCEPTION: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.ILLEGAL_INSTRUCTION_EXCEPTION: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.ZERO_DIVIDE_INSTRUCTION: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.EMULATOR_EXCEPTION: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.UNASSIGNED_EXCEPTION: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.WATCHDOG_RESTART_OCCURED: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.BATTERY_SAVER_POWER: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.PINGING: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.COLD_WAKEUP_OCCURED: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.UNKNOWN_WAKEUP_OCCURED: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.CLOCK_READ_ERROR: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.UNEXPECTED_ALARM: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.CLOCK_JUMP_FORWARD: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.CLOCK_JUMP_BACKWARD: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.POWER_FAIL: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.SPURIOUS_DSP_INTERRUPT: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.SPURIOUS_UART_INTERRUPT: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.SPURIOUS_CLOCK_INTERRUPT: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.LEVEL_7_INTERRUPT: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.ABSOLUTE_PRESSURE: {'type': int, 'value': 4294963793 },
ADCP_PD0_PARSED_KEY.PRESSURE_VARIANCE: {'type': int, 'value': 0 },
ADCP_PD0_PARSED_KEY.INTERNAL_TIMESTAMP: {'type': float, 'value': 1363408382.02 },
ADCP_PD0_PARSED_KEY.VELOCITY_DATA_ID: {'type': int, 'value': 1 },
ADCP_PD0_PARSED_KEY.CORRELATION_MAGNITUDE_ID: {'type': int, 'value': 2 },
ADCP_PD0_PARSED_KEY.CORRELATION_MAGNITUDE_BEAM1: {'type': list, 'value': [19801, 1796, 1800, 1797, 1288, 1539, 1290, 1543, 1028, 1797, 1538, 775, 1034, 1283, 1029, 1799, 1801, 1545, 519, 772, 519, 1033, 1028, 1286, 521, 519, 1545, 1801, 522, 1286, 1030, 1032, 1542, 1035, 1283, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] },
ADCP_PD0_PARSED_KEY.CORRELATION_MAGNITUDE_BEAM2: {'type': list, 'value': [22365, 2057, 2825, 2825, 1801, 2058, 1545, 1286, 3079, 522, 1547, 519, 2052, 2820, 519, 1806, 1026, 1547, 1795, 1801, 2311, 1030, 781, 1796, 1037, 1802, 1035, 1798, 770, 2313, 1292, 1031, 1030, 2830, 523, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] },
ADCP_PD0_PARSED_KEY.CORRELATION_MAGNITUDE_BEAM3: {'type': list, 'value': [3853, 1796, 1289, 1803, 2317, 2571, 1028, 1282, 1799, 2825, 2574, 1026, 1028, 518, 1290, 1286, 1032, 1797, 1028, 2312, 1031, 775, 1549, 772, 1028, 772, 2570, 1288, 1796, 1542, 1538, 777, 1282, 773, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] },
ADCP_PD0_PARSED_KEY.CORRELATION_MAGNITUDE_BEAM4: {'type': list, 'value': [5386, 4100, 2822, 1286, 774, 1799, 518, 778, 3340, 1031, 1546, 1545, 1547, 2566, 3077, 3334, 1801, 1809, 2058, 1539, 1798, 1546, 3593, 1032, 2307, 1025, 1545, 2316, 2055, 1546, 1292, 2312, 1035, 2316, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] },
ADCP_PD0_PARSED_KEY.ECHO_INTENSITY_ID: {'type': int, 'value': 3 },
ADCP_PD0_PARSED_KEY.ECHO_INTENSITY_BEAM1: {'type': list, 'value': [24925, 10538, 10281, 10537, 10282, 10281, 10281, 10282, 10282, 10281, 10281, 10281, 10538, 10282, 10281, 10282, 10281, 10537, 10281, 10281, 10281, 10281, 10281, 10281, 10281, 10281, 10281, 10281, 10281, 10282, 10281, 10282, 10537, 10281, 10281, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] },
ADCP_PD0_PARSED_KEY.ECHO_INTENSITY_BEAM2: {'type': list, 'value': [29027, 12334, 12334, 12078, 12078, 11821, 12334, 12334, 12078, 12078, 12078, 12078, 12078, 12078, 12078, 12079, 12334, 12078, 12334, 12333, 12078, 12333, 12078, 12077, 12078, 12078, 12078, 12334, 12077, 12078, 12078, 12078, 12078, 12078, 12078, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] },
ADCP_PD0_PARSED_KEY.ECHO_INTENSITY_BEAM3: {'type': list, 'value': [12079, 10282, 10281, 10281, 10282, 10281, 10282, 10282, 10281, 10025, 10282, 10282, 10282, 10282, 10025, 10282, 10281, 10025, 10281, 10281, 10282, 10281, 10282, 10281, 10281, 10281, 10537, 10282, 10281, 10281, 10281, 10281, 10281, 10282, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] },
ADCP_PD0_PARSED_KEY.ECHO_INTENSITY_BEAM4: {'type': list, 'value': [14387, 12334, 12078, 12078, 12078, 12334, 12078, 12334, 12078, 12078, 12077, 12077, 12334, 12078, 12334, 12078, 12334, 12077, 12078, 11821, 12335, 12077, 12078, 12077, 12334, 11822, 12334, 12334, 12077, 12077, 12078, 11821, 11821, 12078, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] },
ADCP_PD0_PARSED_KEY.PERCENT_GOOD_ID: {'type': int, 'value': 4 },
ADCP_PD0_PARSED_KEY.CHECKSUM: {'type': int, 'value': 8239 }
}
# red
_coordinate_transformation_earth_parameters = {
# Earth Coordinates
ADCP_PD0_PARSED_KEY.WATER_VELOCITY_EAST: {'type': list, 'value': [128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128] },
ADCP_PD0_PARSED_KEY.WATER_VELOCITY_NORTH: {'type': list, 'value': [128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128] },
ADCP_PD0_PARSED_KEY.WATER_VELOCITY_UP: {'type': list, 'value': [128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128] },
ADCP_PD0_PARSED_KEY.ERROR_VELOCITY: {'type': list, 'value': [128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128] },
ADCP_PD0_PARSED_KEY.PERCENT_GOOD_3BEAM: {'type': list, 'value': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] },
ADCP_PD0_PARSED_KEY.PERCENT_TRANSFORMS_REJECT: {'type': list, 'value': [25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600] },
ADCP_PD0_PARSED_KEY.PERCENT_BAD_BEAMS: {'type': list, 'value': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] },
ADCP_PD0_PARSED_KEY.PERCENT_GOOD_4BEAM: {'type': list, 'value': [25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600, 25600] },
}
# blue
_coordinate_transformation_beam_parameters = {
# Beam Coordinates
ADCP_PD0_PARSED_KEY.PERCENT_GOOD_BEAM1: {'type': list, 'value': [25700, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] },
ADCP_PD0_PARSED_KEY.PERCENT_GOOD_BEAM2: {'type': list, 'value': [25700, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] },
ADCP_PD0_PARSED_KEY.PERCENT_GOOD_BEAM3: {'type': list, 'value': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] },
ADCP_PD0_PARSED_KEY.PERCENT_GOOD_BEAM4: {'type': list, 'value': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] },
ADCP_PD0_PARSED_KEY.BEAM_1_VELOCITY: {'type': list, 'value': [4864, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128] },
ADCP_PD0_PARSED_KEY.BEAM_2_VELOCITY: {'type': list, 'value': [62719, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128] },
ADCP_PD0_PARSED_KEY.BEAM_3_VELOCITY: {'type': list, 'value': [45824, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128] },
ADCP_PD0_PARSED_KEY.BEAM_4_VELOCITY : {'type': list, 'value': [19712, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128] },
}
_pd0_parameters = dict(_pd0_parameters_base.items() +
_coordinate_transformation_beam_parameters.items())
# Driver Parameter Methods
###
def assert_driver_parameters(self, current_parameters, verify_values = False):
"""
Verify that all driver parameters are correct and potentially verify values.
@param current_parameters: driver parameters read from the driver instance
@param verify_values: should we verify values against definition?
"""
log.debug("assert_driver_parameters current_parameters = " + str(current_parameters))
self.assert_parameters(current_parameters, self._driver_parameters, verify_values)
###
# Data Particle Parameters Methods
###
def assert_sample_data_particle(self, data_particle):
'''
Verify a particle is a know particle to this driver and verify the particle is correct
@param data_particle: Data particle of unkown type produced by the driver
'''
if (isinstance(data_particle, DataParticleType.ADCP_PD0_PARSED_BEAM)):
self.assert_particle_pd0_data(data_particle)
elif (isinstance(data_particle, DataParticleType.ADCP_SYSTEM_CONFIGURATION)):
self.assert_particle_system_configuration(data_particle)
elif (isinstance(data_particle, DataParticleType.ADCP_COMPASS_CALIBRATION)):
self.assert_particle_compass_calibration(data_particle)
else:
log.error("Unknown Particle Detected: %s" % data_particle)
self.assertFalse(True)
def assert_particle_compass_calibration(self, data_particle, verify_values = True):
'''
Verify an adcpt calibration data particle
@param data_particle: ADCPT_CalibrationDataParticle data particle
@param verify_values: bool, should we verify parameter values
'''
log.debug("in assert_particle_compass_calibration")
log.debug("data_particle = " + repr(data_particle))
self.assert_data_particle_header(data_particle, DataParticleType.ADCP_COMPASS_CALIBRATION)
self.assert_data_particle_parameters(data_particle, self._calibration_data_parameters, verify_values)
def assert_particle_system_configuration(self, data_particle, verify_values = True):
'''
Verify an adcpt fd data particle
@param data_particle: ADCPT_FDDataParticle data particle
@param verify_values: bool, should we verify parameter values
'''
self.assert_data_particle_header(data_particle, DataParticleType.ADCP_SYSTEM_CONFIGURATION)
self.assert_data_particle_parameters(data_particle, self._system_configuration_data_parameters, verify_values)
def assert_particle_pd0_data(self, data_particle, verify_values=True):
'''
Verify an adcpt ps0 data particle
@param data_particle: ADCPT_PS0DataParticle data particle
@param verify_values: bool, should we verify parameter values
'''
log.debug("IN assert_particle_pd0_data")
self.assert_data_particle_header(data_particle, DataParticleType.ADCP_PD0_PARSED_BEAM)
self.assert_data_particle_parameters(data_particle, self._pd0_parameters) # , verify_values
def setUp(self):
DriverTestMixin.setUp(self)
self._driver_parameter_defaults = {}
for label in self._driver_parameters.keys():
if self.VALUE in self._driver_parameters[label]:
self._driver_parameter_defaults[label] = self._driver_parameters[label][self.VALUE]
else:
self._driver_parameter_defaults[label] = None
###############################################################################
# UNIT TESTS #
# Unit tests test the method calls and parameters using Mock. #
###############################################################################
@attr('UNIT', group='mi')
class UnitFromIDK(WorkhorseDriverUnitTest, ADCPTMixin):
def setUp(self):
WorkhorseDriverUnitTest.setUp(self)
ADCPTMixin.setUp(self)
def test_defaults(self):
for label in sorted(self._driver_parameter_defaults.keys()):
log.debug(str(label) + " = " + str(self._driver_parameter_defaults[label]))
def test_sanity(self):
my_event_callback = Mock(spec="fake evt_callback")
driver = InstrumentDriver(self._got_data_event_callback)
protocol = Protocol(Prompt, NEWLINE, my_event_callback)
def test_send_break(self):
my_event_callback = Mock(spec="fake evt_callback")
self.protocol = Protocol(Prompt, NEWLINE, my_event_callback)
def fake_send_break1_cmd(duration):
log.debug("IN fake_send_break1_cmd")
self.protocol._linebuf = "[BREAK Wakeup A]\n" + \
" Polled Mode is OFF -- Battery Saver is ONWorkHorse Broadband ADCP Version 50.40\n" + \
"Teledyne RD Instruments (c) 1996-2010\n" + \
"All Rights Reserved."
def fake_send_break2_cmd(duration):
log.debug("IN fake_send_break2_cmd")
self.protocol._linebuf = "[BREAK Wakeup A]" + NEWLINE + \
"WorkHorse Broadband ADCP Version 50.40" + NEWLINE + \
"Teledyne RD Instruments (c) 1996-2010" + NEWLINE + \
"All Rights Reserved."
self.protocol._send_break_cmd = fake_send_break1_cmd
self.assertTrue(self.protocol._send_break(500))
self.protocol._send_break_cmd = fake_send_break2_cmd
self.assertTrue(self.protocol._send_break(500))
def test_driver_schema(self):
"""
get the driver schema and verify it is configured properly
"""
self.maxDiff = None
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_driver_schema(driver, self._driver_parameters, self._driver_capabilities)
def test_got_data(self):
"""
Verify sample data passed through the got data method produces the correct data particles
"""
# Create and initialize the instrument driver with a mock port agent
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_initialize_driver(driver)
self.assert_raw_particle_published(driver, True)
# Start validating data particles
self.assert_particle_published(driver, CALIBRATION_RAW_DATA, self.assert_particle_compass_calibration, True)
self.assert_particle_published(driver, PS0_RAW_DATA, self.assert_particle_system_configuration, True)
self.assert_particle_published(driver, SAMPLE_RAW_DATA1, self.assert_particle_pd0_data, True)
self.assert_particle_published(driver, SAMPLE_RAW_DATA2, self.assert_particle_pd0_data, True)
self.assert_particle_published(driver, SAMPLE_RAW_DATA3, self.assert_particle_pd0_data, True)
self.assert_particle_published(driver, SAMPLE_RAW_DATA4, self.assert_particle_pd0_data, True)
def test_driver_parameters(self):
"""
Verify the set of parameters known by the driver
"""
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_initialize_driver(driver, ProtocolState.COMMAND)
expected_parameters = sorted(self._driver_parameters.keys())
reported_parameters = sorted(driver.get_resource(Parameter.ALL))
log.debug("*** Expected Parameters: %s" % expected_parameters)
log.debug("*** Reported Parameters: %s" % reported_parameters)
self.assertEqual(reported_parameters, expected_parameters)
# Verify the parameter definitions
self.assert_driver_parameter_definition(driver, self._driver_parameters)
def test_capabilities(self):
"""
Verify the FSM reports capabilities as expected. All states defined in this dict must
also be defined in the protocol FSM.
"""
capabilities = {
ProtocolState.UNKNOWN: ['DRIVER_EVENT_DISCOVER'],
ProtocolState.COMMAND: ['DRIVER_EVENT_CLOCK_SYNC',
'DRIVER_EVENT_GET',
'DRIVER_EVENT_INIT_PARAMS',
'DRIVER_EVENT_SET',
'DRIVER_EVENT_START_AUTOSAMPLE',
'DRIVER_EVENT_START_DIRECT',
'PROTOCOL_EVENT_CLEAR_ERROR_STATUS_WORD',
'PROTOCOL_EVENT_CLEAR_FAULT_LOG',
'PROTOCOL_EVENT_GET_CALIBRATION',
'PROTOCOL_EVENT_GET_CONFIGURATION',
'PROTOCOL_EVENT_GET_ERROR_STATUS_WORD',
'PROTOCOL_EVENT_GET_FAULT_LOG',
'PROTOCOL_EVENT_GET_INSTRUMENT_TRANSFORM_MATRIX',
'PROTOCOL_EVENT_POWER_DOWN',
'PROTOCOL_EVENT_RECOVER_AUTOSAMPLE',
'PROTOCOL_EVENT_RUN_TEST_200',
'PROTOCOL_EVENT_SAVE_SETUP_TO_RAM',
'PROTOCOL_EVENT_SCHEDULED_CLOCK_SYNC',
'PROTOCOL_EVENT_SEND_LAST_SAMPLE'],
ProtocolState.AUTOSAMPLE: ['DRIVER_EVENT_STOP_AUTOSAMPLE',
'DRIVER_EVENT_GET',
'DRIVER_EVENT_INIT_PARAMS',
'DRIVER_EVENT_DISCOVER',
'PROTOCOL_EVENT_GET_CALIBRATION',
'PROTOCOL_EVENT_GET_CONFIGURATION',
'PROTOCOL_EVENT_SCHEDULED_CLOCK_SYNC'],
ProtocolState.DIRECT_ACCESS: ['DRIVER_EVENT_STOP_DIRECT', 'EXECUTE_DIRECT']
}
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_capabilities(driver, capabilities)
def test_driver_enums(self):
"""
Verify that all driver enumeration has no duplicate values that might cause confusion. Also
do a little extra validation for the Capabilites
"""
self.assert_enum_has_no_duplicates(InstrumentCmds())
self.assert_enum_has_no_duplicates(ProtocolState())
self.assert_enum_has_no_duplicates(ProtocolEvent())
self.assert_enum_has_no_duplicates(Parameter())
self.assert_enum_has_no_duplicates(DataParticleType())
self.assert_enum_has_no_duplicates(ScheduledJob())
# Test capabilites for duplicates, them verify that capabilities is a subset of proto events
self.assert_enum_has_no_duplicates(Capability())
self.assert_enum_complete(Capability(), ProtocolEvent())
def test_chunker(self):
"""
Test the chunker and verify the particles created.
"""
chunker = StringChunker(Protocol.sieve_function)
self.assert_chunker_sample(chunker, SAMPLE_RAW_DATA1)
self.assert_chunker_sample_with_noise(chunker, SAMPLE_RAW_DATA1)
self.assert_chunker_fragmented_sample(chunker, SAMPLE_RAW_DATA1, 32)
self.assert_chunker_combined_sample(chunker, SAMPLE_RAW_DATA1)
self.assert_chunker_sample(chunker, PS0_RAW_DATA)
self.assert_chunker_sample_with_noise(chunker, PS0_RAW_DATA)
self.assert_chunker_fragmented_sample(chunker, PS0_RAW_DATA, 32)
self.assert_chunker_combined_sample(chunker, PS0_RAW_DATA)
self.assert_chunker_sample(chunker, CALIBRATION_RAW_DATA)
self.assert_chunker_sample_with_noise(chunker, CALIBRATION_RAW_DATA)
self.assert_chunker_fragmented_sample(chunker, CALIBRATION_RAW_DATA, 32)
self.assert_chunker_combined_sample(chunker, CALIBRATION_RAW_DATA)
def test_protocol_filter_capabilities(self):
"""
This tests driver filter_capabilities.
Iterate through available capabilities, and verify that they can pass successfully through the filter.
Test silly made up capabilities to verify they are blocked by filter.
"""
my_event_callback = Mock(spec="my_event_callback")
protocol = Protocol(Prompt, NEWLINE, my_event_callback)
driver_capabilities = Capability().list()
test_capabilities = Capability().list()
# Add a bogus capability that will be filtered out.
test_capabilities.append("BOGUS_CAPABILITY")
# Verify "BOGUS_CAPABILITY was filtered out
self.assertEquals(driver_capabilities, protocol._filter_capabilities(test_capabilities))
###############################################################################
# INTEGRATION TESTS #
# Integration test test the direct driver / instrument interaction #
# but making direct calls via zeromq. #
# - Common Integration tests test the driver through the instrument agent #
# and common for all drivers (minimum requirement for ION ingestion) #
###############################################################################
@attr('INT', group='mi')
class IntFromIDK(WorkhorseDriverIntegrationTest, ADCPTMixin):
# dict to store if a param has been range stress tested
_tested = {}
def test_autosample_particle_generation(self):
"""
Test that we can generate particles when in autosample
"""
log.debug("IN test_autosample_particle_generation")
self.assert_initialize_driver()
# lets set things to do faster pinging so the test is runable in our time scales.
self.assert_set_bulk({Parameter.PINGS_PER_ENSEMBLE: 5,
Parameter.TIME_PER_PING: "00:10.00",
Parameter.TIME_PER_ENSEMBLE: "00:01:00.00"})
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.AUTOSAMPLE, delay=1)
self.assert_async_particle_generation(DataParticleType.ADCP_PD0_PARSED_BEAM, self.assert_particle_pd0_data, timeout=200)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=50)
def test_set_ranges(self):
# Updated to match ADCPT-B
log.debug("IN test_set_ranges")
self.assert_initialize_driver()
self._test_set_serial_data_out_readonly()
self._test_set_serial_flow_control_readonly()
self._test_set_banner_readonly()
self._test_set_instrument_id()
self._test_set_sleep_enable()
self._test_set_save_nvram_to_recorder()
self._test_set_polled_mode()
self._test_set_xmit_power()
self._test_set_heading_alignment()
self._test_set_speed_of_sound()
self._test_set_transducer_depth()
self._test_set_salinity()
self._test_set_coordinate_transformation()
self._test_set_sensor_source()
self._test_set_time_per_burst()
self._test_set_ensembles_per_burst()
self._test_set_time_of_first_ping_readonly()
self._test_set_buffer_output_period()
self._test_set_false_target_threshold()
self._test_set_correlation_threshold()
self._test_set_serial_out_fw_switches_readonly()
self._test_set_error_velocity_threshold()
self._test_set_blank_after_transmit_readonly()
self._test_set_clip_data_past_bottom()
self._test_set_receiver_gain_select()
self._test_set_water_reference_layer()
self._test_set_water_profiling_mode_readonly()
self._test_set_transmit_length()
self._test_set_ping_weight()
self._test_set_ambiguity_velocity()
self._test_set_time_per_ensemble()
self._test_set_time_per_ping()
self._test_set_bandwidth_control_readonly()
self._test_set_number_of_depth_cells()
self._test_set_pings_per_ensemble()
self._test_set_depth_cell_size()
fail = False
log.error("self._tested = " + repr(self._tested))
for k in self._tested.keys():
if k not in self._driver_parameters.keys():
log.error("*WARNING* " + k + " was tested but is not in _driver_parameters")
#fail = True
for k in self._driver_parameters.keys():
if k not in [Parameter.TIME_OF_FIRST_PING, Parameter.TIME] + self._tested.keys():
log.error("*ERROR* " + k + " is in _driver_parameters but was not tested.")
fail = True
self.assertFalse(fail, "See above for un-exercized parameters.")
def test_set_bulk(self):
"""
Test all set commands. Verify all exception cases.
"""
log.error("IN test_set_bulk")
self.assert_initialize_driver()
params = {}
for k in self._driver_parameters.keys():
if self.VALUE in self._driver_parameters[k]:
if self._driver_parameters[k][self.READONLY] == False:
params[k] = self._driver_parameters[k][self.VALUE]
# Set all parameters to a known ground state
self.assert_set_bulk(params)
###
# Instrument Parameteres
###
# set to off_values so we get a config change
for k in self._driver_parameters.keys():
if self.VALUE in self._driver_parameters[k]:
if False == self._driver_parameters[k][self.READONLY]:
self.assert_set(k, self._driver_parameters[k][self.OFF_VALUE])
log.debug("WANT PARAM CHANGE EVENT, SETTING " + k + " to " + str(self._driver_parameters[k][self.VALUE]))
if True == self._driver_parameters[k][self.READONLY]:
self.assert_set_readonly(k)
for k in self._driver_parameters.keys():
if self.VALUE in self._driver_parameters[k]:
if False == self._driver_parameters[k][self.READONLY]:
self.assert_set(k, self._driver_parameters[k][self.VALUE])
log.debug("WANT PARAM CHANGE EVENT, SETTING " + k + " to " + str(self._driver_parameters[k][self.VALUE]))
if True == self._driver_parameters[k][self.READONLY]:
self.assert_set_exception(k, self._driver_parameters[k][self.VALUE])
log.debug("WANT EXCEPTION SETTING " + k + " to " + str(self._driver_parameters[k][self.VALUE]))
def test_startup_params(self):
"""
Verify that startup parameters are applied correctly. Generally this
happens in the driver discovery method.
since nose orders the tests by ascii value this should run first.
"""
log.error("IN test_startup_params")
self.assert_initialize_driver()
# Updated to reflect T-F startup params.
get_values = {
Parameter.INSTRUMENT_ID: 0,
Parameter.SLEEP_ENABLE: 1,
Parameter.SAVE_NVRAM_TO_RECORDER: True,
Parameter.POLLED_MODE: False,
Parameter.XMIT_POWER: 255,
Parameter.HEADING_ALIGNMENT: 0,
Parameter.SPEED_OF_SOUND: 1500,
Parameter.TRANSDUCER_DEPTH: 0,
Parameter.SALINITY: 35,
Parameter.COORDINATE_TRANSFORMATION: '00111',
Parameter.SENSOR_SOURCE: "1111101",
Parameter.TIME_PER_BURST: '00:00:00.00',
Parameter.ENSEMBLES_PER_BURST: 0,
Parameter.BUFFER_OUTPUT_PERIOD: '00:00:00',
Parameter.FALSE_TARGET_THRESHOLD: '050,001',
Parameter.CORRELATION_THRESHOLD: 64,
Parameter.ERROR_VELOCITY_THRESHOLD: 2000,
Parameter.CLIP_DATA_PAST_BOTTOM: False,
Parameter.RECEIVER_GAIN_SELECT: 1,
Parameter.WATER_REFERENCE_LAYER: '001,005',
Parameter.TRANSMIT_LENGTH: 0,
Parameter.PING_WEIGHT: 0,
Parameter.AMBIGUITY_VELOCITY: 175,
Parameter.TIME_PER_ENSEMBLE: '01:00:00.00',
Parameter.TIME_PER_PING: '01:20.00',
Parameter.NUMBER_OF_DEPTH_CELLS: 30,
Parameter.PINGS_PER_ENSEMBLE: 1,
Parameter.DEPTH_CELL_SIZE: 800,
}
# Change the values of these parameters to something before the
# driver is reinitalized. They should be blown away on reinit.
new_values = {}
for k in self._driver_parameters.keys():
if self.VALUE in self._driver_parameters[k]:
if False == self._driver_parameters[k][self.READONLY]:
new_values[k] = self._driver_parameters[k][self.OFF_VALUE]
self.assert_startup_parameters(self.assert_driver_parameters, new_values, get_values)
def _test_set_heading_alignment(self):
###
# test get set of a variety of parameter ranges
###
log.debug("====== Testing ranges for HEADING_ALIGNMENT ======")
# HEADING_ALIGNMENT: -- -17999 to 18000
self.assert_set(Parameter.HEADING_ALIGNMENT, -17999)
self.assert_set(Parameter.HEADING_ALIGNMENT, 0)
self.assert_set(Parameter.HEADING_ALIGNMENT, 18000)
self.assert_set_exception(Parameter.HEADING_ALIGNMENT, -18000)
self.assert_set_exception(Parameter.HEADING_ALIGNMENT, 18001)
self.assert_set(Parameter.HEADING_ALIGNMENT, self._driver_parameters[Parameter.HEADING_ALIGNMENT][self.VALUE])
self._tested[Parameter.HEADING_ALIGNMENT] = True
def _test_set_transducer_depth(self):
###
# test get set of a variety of parameter ranges
###
log.debug("====== Testing ranges for TRANSDUCER_DEPTH ======")
# HEADING_ALIGNMENT: -- -17999 to 18000
self.assert_set(Parameter.TRANSDUCER_DEPTH, 0)
self.assert_set(Parameter.TRANSDUCER_DEPTH, 32767)
self.assert_set(Parameter.TRANSDUCER_DEPTH, 65535)
self.assert_set_exception(Parameter.TRANSDUCER_DEPTH, -1)
self.assert_set_exception(Parameter.TRANSDUCER_DEPTH, 65536)
self.assert_set_exception(Parameter.TIME_PER_BURST, "LEROY JENKINS")
self.assert_set_exception(Parameter.TIME_PER_BURST, 3.1415926)
self.assert_set(Parameter.TRANSDUCER_DEPTH, self._driver_parameters[Parameter.TRANSDUCER_DEPTH][self.VALUE])
self._tested[Parameter.TRANSDUCER_DEPTH] = True
def _test_set_depth_cell_size(self):
###
# test get set of a variety of parameter ranges
# * Override existing function, this instrument has a different range maxing out at 1600
###
log.debug("====== Testing ranges for DEPTH_CELL_SIZE ======")
# DEPTH_CELL_SIZE: int 80 - 3200
self.assert_set(Parameter.DEPTH_CELL_SIZE, 20)
self.assert_set(Parameter.DEPTH_CELL_SIZE, 1600)
self.assert_set_exception(Parameter.DEPTH_CELL_SIZE, 1601)
self.assert_set_exception(Parameter.DEPTH_CELL_SIZE, -1)
self.assert_set_exception(Parameter.DEPTH_CELL_SIZE, 19)
self.assert_set_exception(Parameter.DEPTH_CELL_SIZE, 3.1415926)
self.assert_set_exception(Parameter.DEPTH_CELL_SIZE, "LEROY JENKINS")
#
# Reset to good value.
#
#self.assert_set(TeledyneParameter.DEPTH_CELL_SIZE, self._driver_parameter_defaults[TeledyneParameter.DEPTH_CELL_SIZE])
self.assert_set(Parameter.DEPTH_CELL_SIZE, self._driver_parameters[Parameter.DEPTH_CELL_SIZE][self.VALUE])
self._tested[Parameter.DEPTH_CELL_SIZE] = True
###############################################################################
# QUALIFICATION TESTS #
# Device specific qualification tests are for #
# testing device specific capabilities #
###############################################################################
@attr('QUAL', group='mi')
class QualFromIDK(WorkhorseDriverQualificationTest, ADCPTMixin):
# works
def test_autosample(self):
"""
Verify autosample works and data particles are created
"""
self.assert_enter_command_mode()
self.assert_set_parameter(Parameter.TIME_PER_ENSEMBLE, '00:01:00.00', True)
self.assert_set_parameter(Parameter.TIME_PER_PING, '00:30.00', True)
self.assert_start_autosample()
self.assert_particle_async(DataParticleType.ADCP_PD0_PARSED_BEAM, self.assert_particle_pd0_data, timeout=90)
self.assert_particle_polled(ProtocolEvent.GET_CALIBRATION, self.assert_compass_calibration, DataParticleType.ADCP_COMPASS_CALIBRATION, sample_count=1, timeout=50)
self.assert_particle_polled(ProtocolEvent.GET_CONFIGURATION, self.assert_configuration, DataParticleType.ADCP_SYSTEM_CONFIGURATION, sample_count=1, timeout=50)
# Stop autosample and do run a couple commands.
self.assert_stop_autosample()
self.assert_particle_polled(ProtocolEvent.GET_CALIBRATION, self.assert_compass_calibration, DataParticleType.ADCP_COMPASS_CALIBRATION, sample_count=1)
self.assert_particle_polled(ProtocolEvent.GET_CONFIGURATION, self.assert_configuration, DataParticleType.ADCP_SYSTEM_CONFIGURATION, sample_count=1)
# Restart autosample and gather a couple samples
self.assert_sample_autosample(self.assert_particle_pd0_data, DataParticleType.ADCP_PD0_PARSED_BEAM)
def assert_cycle(self):
self.assert_start_autosample()
self.assert_particle_async(DataParticleType.ADCP_PD0_PARSED_BEAM, self.assert_particle_pd0_data, timeout=200)
self.assert_particle_polled(ProtocolEvent.GET_CALIBRATION, self.assert_compass_calibration, DataParticleType.ADCP_COMPASS_CALIBRATION, sample_count=1, timeout=60)
self.assert_particle_polled(ProtocolEvent.GET_CONFIGURATION, self.assert_configuration, DataParticleType.ADCP_SYSTEM_CONFIGURATION, sample_count=1, timeout=60)
# Stop autosample and do run a couple commands.
self.assert_stop_autosample()
self.assert_particle_polled(ProtocolEvent.GET_CALIBRATION, self.assert_compass_calibration, DataParticleType.ADCP_COMPASS_CALIBRATION, sample_count=1)
self.assert_particle_polled(ProtocolEvent.GET_CONFIGURATION, self.assert_configuration, DataParticleType.ADCP_SYSTEM_CONFIGURATION, sample_count=1)
###############################################################################
# PUBLICATION TESTS #
# Device specific publication tests are for #
# testing device specific capabilities #
###############################################################################
@attr('PUB', group='mi')
class PubFromIDK(WorkhorseDriverPublicationTest):
pass
|
import unittest
from test import support
from contextlib import closing
import gc
import pickle
import select
import signal
import struct
import subprocess
import traceback
import sys, os, time, errno
try:
import threading
except ImportError:
threading = None
if sys.platform in ('os2', 'riscos'):
raise unittest.SkipTest("Can't test signal on %s" % sys.platform)
class HandlerBCalled(Exception):
pass
def exit_subprocess():
"""Use os._exit(0) to exit the current subprocess.
Otherwise, the test catches the SystemExit and continues executing
in parallel with the original test, so you wind up with an
exponential number of tests running concurrently.
"""
os._exit(0)
def ignoring_eintr(__func, *args, **kwargs):
try:
return __func(*args, **kwargs)
except EnvironmentError as e:
if e.errno != errno.EINTR:
raise
return None
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class InterProcessSignalTests(unittest.TestCase):
MAX_DURATION = 20 # Entire test should last at most 20 sec.
def setUp(self):
self.using_gc = gc.isenabled()
gc.disable()
def tearDown(self):
if self.using_gc:
gc.enable()
def format_frame(self, frame, limit=None):
return ''.join(traceback.format_stack(frame, limit=limit))
def handlerA(self, signum, frame):
self.a_called = True
def handlerB(self, signum, frame):
self.b_called = True
raise HandlerBCalled(signum, self.format_frame(frame))
def wait(self, child):
"""Wait for child to finish, ignoring EINTR."""
while True:
try:
child.wait()
return
except OSError as e:
if e.errno != errno.EINTR:
raise
def run_test(self):
# Install handlers. This function runs in a sub-process, so we
# don't worry about re-setting the default handlers.
signal.signal(signal.SIGHUP, self.handlerA)
signal.signal(signal.SIGUSR1, self.handlerB)
signal.signal(signal.SIGUSR2, signal.SIG_IGN)
signal.signal(signal.SIGALRM, signal.default_int_handler)
# Variables the signals will modify:
self.a_called = False
self.b_called = False
# Let the sub-processes know who to send signals to.
pid = os.getpid()
child = ignoring_eintr(subprocess.Popen, ['kill', '-HUP', str(pid)])
if child:
self.wait(child)
if not self.a_called:
time.sleep(1) # Give the signal time to be delivered.
self.assertTrue(self.a_called)
self.assertFalse(self.b_called)
self.a_called = False
# Make sure the signal isn't delivered while the previous
# Popen object is being destroyed, because __del__ swallows
# exceptions.
del child
try:
child = subprocess.Popen(['kill', '-USR1', str(pid)])
# This wait should be interrupted by the signal's exception.
self.wait(child)
time.sleep(1) # Give the signal time to be delivered.
self.fail('HandlerBCalled exception not thrown')
except HandlerBCalled:
self.assertTrue(self.b_called)
self.assertFalse(self.a_called)
child = ignoring_eintr(subprocess.Popen, ['kill', '-USR2', str(pid)])
if child:
self.wait(child) # Nothing should happen.
try:
signal.alarm(1)
# The race condition in pause doesn't matter in this case,
# since alarm is going to raise a KeyboardException, which
# will skip the call.
signal.pause()
# But if another signal arrives before the alarm, pause
# may return early.
time.sleep(1)
except KeyboardInterrupt:
pass
except:
self.fail("Some other exception woke us from pause: %s" %
traceback.format_exc())
else:
self.fail("pause returned of its own accord, and the signal"
" didn't arrive after another second.")
# Issue 3864, unknown if this affects earlier versions of freebsd also
@unittest.skipIf(sys.platform=='freebsd6',
'inter process signals not reliable (do not mix well with threading) '
'on freebsd6')
def test_main(self):
# This function spawns a child process to insulate the main
# test-running process from all the signals. It then
# communicates with that child process over a pipe and
# re-raises information about any exceptions the child
# throws. The real work happens in self.run_test().
os_done_r, os_done_w = os.pipe()
with closing(os.fdopen(os_done_r, 'rb')) as done_r, \
closing(os.fdopen(os_done_w, 'wb')) as done_w:
child = os.fork()
if child == 0:
# In the child process; run the test and report results
# through the pipe.
try:
done_r.close()
# Have to close done_w again here because
# exit_subprocess() will skip the enclosing with block.
with closing(done_w):
try:
self.run_test()
except:
pickle.dump(traceback.format_exc(), done_w)
else:
pickle.dump(None, done_w)
except:
print('Uh oh, raised from pickle.')
traceback.print_exc()
finally:
exit_subprocess()
done_w.close()
# Block for up to MAX_DURATION seconds for the test to finish.
r, w, x = select.select([done_r], [], [], self.MAX_DURATION)
if done_r in r:
tb = pickle.load(done_r)
if tb:
self.fail(tb)
else:
os.kill(child, signal.SIGKILL)
self.fail('Test deadlocked after %d seconds.' %
self.MAX_DURATION)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class PosixTests(unittest.TestCase):
def trivial_signal_handler(self, *args):
pass
def test_out_of_range_signal_number_raises_error(self):
self.assertRaises(ValueError, signal.getsignal, 4242)
self.assertRaises(ValueError, signal.signal, 4242,
self.trivial_signal_handler)
def test_setting_signal_handler_to_none_raises_error(self):
self.assertRaises(TypeError, signal.signal,
signal.SIGUSR1, None)
def test_getsignal(self):
hup = signal.signal(signal.SIGHUP, self.trivial_signal_handler)
self.assertEqual(signal.getsignal(signal.SIGHUP),
self.trivial_signal_handler)
signal.signal(signal.SIGHUP, hup)
self.assertEqual(signal.getsignal(signal.SIGHUP), hup)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
class WindowsSignalTests(unittest.TestCase):
def test_issue9324(self):
# Updated for issue #10003, adding SIGBREAK
handler = lambda x, y: None
for sig in (signal.SIGABRT, signal.SIGBREAK, signal.SIGFPE,
signal.SIGILL, signal.SIGINT, signal.SIGSEGV,
signal.SIGTERM):
# Set and then reset a handler for signals that work on windows
signal.signal(sig, signal.signal(sig, handler))
with self.assertRaises(ValueError):
signal.signal(-1, handler)
with self.assertRaises(ValueError):
signal.signal(7, handler)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class WakeupSignalTests(unittest.TestCase):
TIMEOUT_FULL = 10
TIMEOUT_HALF = 5
def handler(self, signum, frame):
pass
def check_signum(self, *signals):
data = os.read(self.read, len(signals)+1)
raised = struct.unpack('%uB' % len(data), data)
# We don't care of the signal delivery order (it's not portable or
# reliable)
raised = set(raised)
signals = set(signals)
self.assertEqual(raised, signals)
def test_wakeup_fd_early(self):
import select
signal.alarm(1)
before_time = time.time()
# We attempt to get a signal during the sleep,
# before select is called
time.sleep(self.TIMEOUT_FULL)
mid_time = time.time()
self.assertTrue(mid_time - before_time < self.TIMEOUT_HALF)
select.select([self.read], [], [], self.TIMEOUT_FULL)
after_time = time.time()
self.assertTrue(after_time - mid_time < self.TIMEOUT_HALF)
self.check_signum(signal.SIGALRM)
def test_wakeup_fd_during(self):
import select
signal.alarm(1)
before_time = time.time()
# We attempt to get a signal during the select call
self.assertRaises(select.error, select.select,
[self.read], [], [], self.TIMEOUT_FULL)
after_time = time.time()
self.assertTrue(after_time - before_time < self.TIMEOUT_HALF)
self.check_signum(signal.SIGALRM)
def test_signum(self):
old_handler = signal.signal(signal.SIGUSR1, self.handler)
self.addCleanup(signal.signal, signal.SIGUSR1, old_handler)
os.kill(os.getpid(), signal.SIGUSR1)
os.kill(os.getpid(), signal.SIGALRM)
self.check_signum(signal.SIGUSR1, signal.SIGALRM)
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
@unittest.skipUnless(hasattr(signal, 'pthread_kill'),
'need signal.pthread_kill()')
def test_pending(self):
signum1 = signal.SIGUSR1
signum2 = signal.SIGUSR2
tid = threading.current_thread().ident
old_handler = signal.signal(signum1, self.handler)
self.addCleanup(signal.signal, signum1, old_handler)
old_handler = signal.signal(signum2, self.handler)
self.addCleanup(signal.signal, signum2, old_handler)
signal.pthread_sigmask(signal.SIG_BLOCK, (signum1, signum2))
signal.pthread_kill(tid, signum1)
signal.pthread_kill(tid, signum2)
# Unblocking the 2 signals calls the C signal handler twice
signal.pthread_sigmask(signal.SIG_UNBLOCK, (signum1, signum2))
self.check_signum(signum1, signum2)
def setUp(self):
import fcntl
self.alrm = signal.signal(signal.SIGALRM, self.handler)
self.read, self.write = os.pipe()
flags = fcntl.fcntl(self.write, fcntl.F_GETFL, 0)
flags = flags | os.O_NONBLOCK
fcntl.fcntl(self.write, fcntl.F_SETFL, flags)
self.old_wakeup = signal.set_wakeup_fd(self.write)
def tearDown(self):
signal.set_wakeup_fd(self.old_wakeup)
os.close(self.read)
os.close(self.write)
signal.signal(signal.SIGALRM, self.alrm)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class SiginterruptTest(unittest.TestCase):
def setUp(self):
"""Install a no-op signal handler that can be set to allow
interrupts or not, and arrange for the original signal handler to be
re-installed when the test is finished.
"""
self.signum = signal.SIGUSR1
oldhandler = signal.signal(self.signum, lambda x,y: None)
self.addCleanup(signal.signal, self.signum, oldhandler)
def readpipe_interrupted(self):
"""Perform a read during which a signal will arrive. Return True if the
read is interrupted by the signal and raises an exception. Return False
if it returns normally.
"""
# Create a pipe that can be used for the read. Also clean it up
# when the test is over, since nothing else will (but see below for
# the write end).
r, w = os.pipe()
self.addCleanup(os.close, r)
# Create another process which can send a signal to this one to try
# to interrupt the read.
ppid = os.getpid()
pid = os.fork()
if pid == 0:
# Child code: sleep to give the parent enough time to enter the
# read() call (there's a race here, but it's really tricky to
# eliminate it); then signal the parent process. Also, sleep
# again to make it likely that the signal is delivered to the
# parent process before the child exits. If the child exits
# first, the write end of the pipe will be closed and the test
# is invalid.
try:
time.sleep(0.2)
os.kill(ppid, self.signum)
time.sleep(0.2)
finally:
# No matter what, just exit as fast as possible now.
exit_subprocess()
else:
# Parent code.
# Make sure the child is eventually reaped, else it'll be a
# zombie for the rest of the test suite run.
self.addCleanup(os.waitpid, pid, 0)
# Close the write end of the pipe. The child has a copy, so
# it's not really closed until the child exits. We need it to
# close when the child exits so that in the non-interrupt case
# the read eventually completes, otherwise we could just close
# it *after* the test.
os.close(w)
# Try the read and report whether it is interrupted or not to
# the caller.
try:
d = os.read(r, 1)
return False
except OSError as err:
if err.errno != errno.EINTR:
raise
return True
def test_without_siginterrupt(self):
# If a signal handler is installed and siginterrupt is not called
# at all, when that signal arrives, it interrupts a syscall that's in
# progress.
i = self.readpipe_interrupted()
self.assertTrue(i)
# Arrival of the signal shouldn't have changed anything.
i = self.readpipe_interrupted()
self.assertTrue(i)
def test_siginterrupt_on(self):
# If a signal handler is installed and siginterrupt is called with
# a true value for the second argument, when that signal arrives, it
# interrupts a syscall that's in progress.
signal.siginterrupt(self.signum, 1)
i = self.readpipe_interrupted()
self.assertTrue(i)
# Arrival of the signal shouldn't have changed anything.
i = self.readpipe_interrupted()
self.assertTrue(i)
def test_siginterrupt_off(self):
# If a signal handler is installed and siginterrupt is called with
# a false value for the second argument, when that signal arrives, it
# does not interrupt a syscall that's in progress.
signal.siginterrupt(self.signum, 0)
i = self.readpipe_interrupted()
self.assertFalse(i)
# Arrival of the signal shouldn't have changed anything.
i = self.readpipe_interrupted()
self.assertFalse(i)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class ItimerTest(unittest.TestCase):
def setUp(self):
self.hndl_called = False
self.hndl_count = 0
self.itimer = None
self.old_alarm = signal.signal(signal.SIGALRM, self.sig_alrm)
def tearDown(self):
signal.signal(signal.SIGALRM, self.old_alarm)
if self.itimer is not None: # test_itimer_exc doesn't change this attr
# just ensure that itimer is stopped
signal.setitimer(self.itimer, 0)
def sig_alrm(self, *args):
self.hndl_called = True
def sig_vtalrm(self, *args):
self.hndl_called = True
if self.hndl_count > 3:
# it shouldn't be here, because it should have been disabled.
raise signal.ItimerError("setitimer didn't disable ITIMER_VIRTUAL "
"timer.")
elif self.hndl_count == 3:
# disable ITIMER_VIRTUAL, this function shouldn't be called anymore
signal.setitimer(signal.ITIMER_VIRTUAL, 0)
self.hndl_count += 1
def sig_prof(self, *args):
self.hndl_called = True
signal.setitimer(signal.ITIMER_PROF, 0)
def test_itimer_exc(self):
# XXX I'm assuming -1 is an invalid itimer, but maybe some platform
# defines it ?
self.assertRaises(signal.ItimerError, signal.setitimer, -1, 0)
# Negative times are treated as zero on some platforms.
if 0:
self.assertRaises(signal.ItimerError,
signal.setitimer, signal.ITIMER_REAL, -1)
def test_itimer_real(self):
self.itimer = signal.ITIMER_REAL
signal.setitimer(self.itimer, 1.0)
signal.pause()
self.assertEqual(self.hndl_called, True)
# Issue 3864, unknown if this affects earlier versions of freebsd also
@unittest.skipIf(sys.platform in ('freebsd6', 'netbsd5'),
'itimer not reliable (does not mix well with threading) on some BSDs.')
def test_itimer_virtual(self):
self.itimer = signal.ITIMER_VIRTUAL
signal.signal(signal.SIGVTALRM, self.sig_vtalrm)
signal.setitimer(self.itimer, 0.3, 0.2)
start_time = time.time()
while time.time() - start_time < 60.0:
# use up some virtual time by doing real work
_ = pow(12345, 67890, 10000019)
if signal.getitimer(self.itimer) == (0.0, 0.0):
break # sig_vtalrm handler stopped this itimer
else: # Issue 8424
self.skipTest("timeout: likely cause: machine too slow or load too "
"high")
# virtual itimer should be (0.0, 0.0) now
self.assertEqual(signal.getitimer(self.itimer), (0.0, 0.0))
# and the handler should have been called
self.assertEqual(self.hndl_called, True)
# Issue 3864, unknown if this affects earlier versions of freebsd also
@unittest.skipIf(sys.platform=='freebsd6',
'itimer not reliable (does not mix well with threading) on freebsd6')
def test_itimer_prof(self):
self.itimer = signal.ITIMER_PROF
signal.signal(signal.SIGPROF, self.sig_prof)
signal.setitimer(self.itimer, 0.2, 0.2)
start_time = time.time()
while time.time() - start_time < 60.0:
# do some work
_ = pow(12345, 67890, 10000019)
if signal.getitimer(self.itimer) == (0.0, 0.0):
break # sig_prof handler stopped this itimer
else: # Issue 8424
self.skipTest("timeout: likely cause: machine too slow or load too "
"high")
# profiling itimer should be (0.0, 0.0) now
self.assertEqual(signal.getitimer(self.itimer), (0.0, 0.0))
# and the handler should have been called
self.assertEqual(self.hndl_called, True)
class PendingSignalsTests(unittest.TestCase):
"""
Test pthread_sigmask(), pthread_kill(), sigpending() and sigwait()
functions.
"""
def setUp(self):
self.has_pthread_kill = hasattr(signal, 'pthread_kill')
def handler(self, signum, frame):
1/0
def read_sigmask(self):
return signal.pthread_sigmask(signal.SIG_BLOCK, [])
def can_test_blocked_signals(self, skip):
"""
Check if a blocked signal can be raised to the main thread without
calling its signal handler. We need pthread_kill() or exactly one
thread (the main thread).
Return True if it's possible. Otherwise, return False and print a
warning if skip is False, or raise a SkipTest exception if skip is
True.
"""
if self.has_pthread_kill:
return True
# The fault handler timeout thread masks all signals. If the main
# thread masks also SIGUSR1, all threads mask this signal. In this
# case, if we send SIGUSR1 to the process, the signal is pending in the
# main or the faulthandler timeout thread. Unblock SIGUSR1 in the main
# thread calls the signal handler only if the signal is pending for the
# main thread. Stop the faulthandler timeout thread to workaround this
# problem.
import faulthandler
faulthandler.cancel_dump_tracebacks_later()
# Issue #11998: The _tkinter module loads the Tcl library which
# creates a thread waiting events in select(). This thread receives
# signals blocked by all other threads. We cannot test blocked
# signals
if '_tkinter' in sys.modules:
message = ("_tkinter is loaded and pthread_kill() is missing, "
"cannot test blocked signals (issue #11998)")
if skip:
self.skipTest(message)
else:
print("WARNING: %s" % message)
return False
return True
def kill(self, signum):
if self.has_pthread_kill:
tid = threading.get_ident()
signal.pthread_kill(tid, signum)
else:
pid = os.getpid()
os.kill(pid, signum)
@unittest.skipUnless(hasattr(signal, 'sigpending'),
'need signal.sigpending()')
def test_sigpending_empty(self):
self.assertEqual(signal.sigpending(), set())
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
@unittest.skipUnless(hasattr(signal, 'sigpending'),
'need signal.sigpending()')
def test_sigpending(self):
self.can_test_blocked_signals(True)
signum = signal.SIGUSR1
old_handler = signal.signal(signum, self.handler)
self.addCleanup(signal.signal, signum, old_handler)
signal.pthread_sigmask(signal.SIG_BLOCK, [signum])
self.kill(signum)
self.assertEqual(signal.sigpending(), {signum})
with self.assertRaises(ZeroDivisionError):
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum])
@unittest.skipUnless(hasattr(signal, 'pthread_kill'),
'need signal.pthread_kill()')
def test_pthread_kill(self):
signum = signal.SIGUSR1
current = threading.get_ident()
old_handler = signal.signal(signum, self.handler)
self.addCleanup(signal.signal, signum, old_handler)
with self.assertRaises(ZeroDivisionError):
signal.pthread_kill(current, signum)
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
def check_sigwait(self, test, signum):
# sigwait must be called with the signal blocked: since the current
# process might have several threads running, we fork() a child process
# to have a single thread.
pid = os.fork()
if pid == 0:
# child: block and wait the signal
try:
signal.signal(signum, self.handler)
signal.pthread_sigmask(signal.SIG_BLOCK, [signum])
# Do the tests
test(signum)
# The handler must not be called on unblock
try:
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum])
except ZeroDivisionError:
print("the signal handler has been called",
file=sys.stderr)
os._exit(1)
except BaseException as err:
print("error: {}".format(err), file=sys.stderr)
os._exit(1)
else:
os._exit(0)
else:
# parent: let the child some time to wait, send him the signal, and
# check it correcty received it
self.assertEqual(os.waitpid(pid, 0), (pid, 0))
@unittest.skipUnless(hasattr(signal, 'sigwait'),
'need signal.sigwait()')
@unittest.skipUnless(hasattr(os, 'fork'), 'need os.fork()')
def test_sigwait(self):
def test(signum):
signal.alarm(1)
received = signal.sigwait([signum])
if received != signum:
print("sigwait() received %s, not %s"
% (received, signum),
file=sys.stderr)
os._exit(1)
self.check_sigwait(test, signal.SIGALRM)
@unittest.skipUnless(hasattr(signal, 'sigwait'),
'need signal.sigwait()')
@unittest.skipIf(threading is None, "test needs threading module")
@unittest.skipUnless(hasattr(os, 'fork'), 'need os.fork()')
def test_sigwait_thread(self):
def kill_later(signum):
# wait until the main thread is waiting in sigwait()
time.sleep(1)
os.kill(os.getpid(), signum)
def test(signum):
killer = threading.Thread(target=kill_later, args=(signum,))
killer.start()
received = signal.sigwait([signum])
if received != signum:
print("sigwait() received %s, not %s" % (received, signum),
file=sys.stderr)
os._exit(1)
killer.join()
self.check_sigwait(test, signal.SIGUSR1)
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
def test_pthread_sigmask_arguments(self):
self.assertRaises(TypeError, signal.pthread_sigmask)
self.assertRaises(TypeError, signal.pthread_sigmask, 1)
self.assertRaises(TypeError, signal.pthread_sigmask, 1, 2, 3)
self.assertRaises(OSError, signal.pthread_sigmask, 1700, [])
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
def test_pthread_sigmask(self):
test_blocked_signals = self.can_test_blocked_signals(False)
signum = signal.SIGUSR1
# Install our signal handler
old_handler = signal.signal(signum, self.handler)
self.addCleanup(signal.signal, signum, old_handler)
# Unblock SIGUSR1 (and copy the old mask) to test our signal handler
old_mask = signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum])
self.addCleanup(signal.pthread_sigmask, signal.SIG_SETMASK, old_mask)
with self.assertRaises(ZeroDivisionError):
self.kill(signum)
# Block and then raise SIGUSR1. The signal is blocked: the signal
# handler is not called, and the signal is now pending
signal.pthread_sigmask(signal.SIG_BLOCK, [signum])
if test_blocked_signals:
self.kill(signum)
# Check the new mask
blocked = self.read_sigmask()
self.assertIn(signum, blocked)
self.assertEqual(old_mask ^ blocked, {signum})
# Unblock SIGUSR1
if test_blocked_signals:
with self.assertRaises(ZeroDivisionError):
# unblock the pending signal calls immediatly the signal handler
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum])
else:
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum])
with self.assertRaises(ZeroDivisionError):
self.kill(signum)
# Check the new mask
unblocked = self.read_sigmask()
self.assertNotIn(signum, unblocked)
self.assertEqual(blocked ^ unblocked, {signum})
self.assertSequenceEqual(old_mask, unblocked)
# Finally, restore the previous signal handler and the signal mask
def test_main():
try:
support.run_unittest(PosixTests, InterProcessSignalTests,
WakeupSignalTests, SiginterruptTest,
ItimerTest, WindowsSignalTests,
PendingSignalsTests)
finally:
support.reap_children()
if __name__ == "__main__":
test_main()
Issue #12316: Fix sigwait() test using threads
Spawn a new process instead of using fork(). Patch written by Charles-François
Natali.
import unittest
from test import support
from contextlib import closing
import gc
import pickle
import select
import signal
import struct
import subprocess
import traceback
import sys, os, time, errno
from test.script_helper import assert_python_ok
try:
import threading
except ImportError:
threading = None
if sys.platform in ('os2', 'riscos'):
raise unittest.SkipTest("Can't test signal on %s" % sys.platform)
class HandlerBCalled(Exception):
pass
def exit_subprocess():
"""Use os._exit(0) to exit the current subprocess.
Otherwise, the test catches the SystemExit and continues executing
in parallel with the original test, so you wind up with an
exponential number of tests running concurrently.
"""
os._exit(0)
def ignoring_eintr(__func, *args, **kwargs):
try:
return __func(*args, **kwargs)
except EnvironmentError as e:
if e.errno != errno.EINTR:
raise
return None
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class InterProcessSignalTests(unittest.TestCase):
MAX_DURATION = 20 # Entire test should last at most 20 sec.
def setUp(self):
self.using_gc = gc.isenabled()
gc.disable()
def tearDown(self):
if self.using_gc:
gc.enable()
def format_frame(self, frame, limit=None):
return ''.join(traceback.format_stack(frame, limit=limit))
def handlerA(self, signum, frame):
self.a_called = True
def handlerB(self, signum, frame):
self.b_called = True
raise HandlerBCalled(signum, self.format_frame(frame))
def wait(self, child):
"""Wait for child to finish, ignoring EINTR."""
while True:
try:
child.wait()
return
except OSError as e:
if e.errno != errno.EINTR:
raise
def run_test(self):
# Install handlers. This function runs in a sub-process, so we
# don't worry about re-setting the default handlers.
signal.signal(signal.SIGHUP, self.handlerA)
signal.signal(signal.SIGUSR1, self.handlerB)
signal.signal(signal.SIGUSR2, signal.SIG_IGN)
signal.signal(signal.SIGALRM, signal.default_int_handler)
# Variables the signals will modify:
self.a_called = False
self.b_called = False
# Let the sub-processes know who to send signals to.
pid = os.getpid()
child = ignoring_eintr(subprocess.Popen, ['kill', '-HUP', str(pid)])
if child:
self.wait(child)
if not self.a_called:
time.sleep(1) # Give the signal time to be delivered.
self.assertTrue(self.a_called)
self.assertFalse(self.b_called)
self.a_called = False
# Make sure the signal isn't delivered while the previous
# Popen object is being destroyed, because __del__ swallows
# exceptions.
del child
try:
child = subprocess.Popen(['kill', '-USR1', str(pid)])
# This wait should be interrupted by the signal's exception.
self.wait(child)
time.sleep(1) # Give the signal time to be delivered.
self.fail('HandlerBCalled exception not thrown')
except HandlerBCalled:
self.assertTrue(self.b_called)
self.assertFalse(self.a_called)
child = ignoring_eintr(subprocess.Popen, ['kill', '-USR2', str(pid)])
if child:
self.wait(child) # Nothing should happen.
try:
signal.alarm(1)
# The race condition in pause doesn't matter in this case,
# since alarm is going to raise a KeyboardException, which
# will skip the call.
signal.pause()
# But if another signal arrives before the alarm, pause
# may return early.
time.sleep(1)
except KeyboardInterrupt:
pass
except:
self.fail("Some other exception woke us from pause: %s" %
traceback.format_exc())
else:
self.fail("pause returned of its own accord, and the signal"
" didn't arrive after another second.")
# Issue 3864, unknown if this affects earlier versions of freebsd also
@unittest.skipIf(sys.platform=='freebsd6',
'inter process signals not reliable (do not mix well with threading) '
'on freebsd6')
def test_main(self):
# This function spawns a child process to insulate the main
# test-running process from all the signals. It then
# communicates with that child process over a pipe and
# re-raises information about any exceptions the child
# throws. The real work happens in self.run_test().
os_done_r, os_done_w = os.pipe()
with closing(os.fdopen(os_done_r, 'rb')) as done_r, \
closing(os.fdopen(os_done_w, 'wb')) as done_w:
child = os.fork()
if child == 0:
# In the child process; run the test and report results
# through the pipe.
try:
done_r.close()
# Have to close done_w again here because
# exit_subprocess() will skip the enclosing with block.
with closing(done_w):
try:
self.run_test()
except:
pickle.dump(traceback.format_exc(), done_w)
else:
pickle.dump(None, done_w)
except:
print('Uh oh, raised from pickle.')
traceback.print_exc()
finally:
exit_subprocess()
done_w.close()
# Block for up to MAX_DURATION seconds for the test to finish.
r, w, x = select.select([done_r], [], [], self.MAX_DURATION)
if done_r in r:
tb = pickle.load(done_r)
if tb:
self.fail(tb)
else:
os.kill(child, signal.SIGKILL)
self.fail('Test deadlocked after %d seconds.' %
self.MAX_DURATION)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class PosixTests(unittest.TestCase):
def trivial_signal_handler(self, *args):
pass
def test_out_of_range_signal_number_raises_error(self):
self.assertRaises(ValueError, signal.getsignal, 4242)
self.assertRaises(ValueError, signal.signal, 4242,
self.trivial_signal_handler)
def test_setting_signal_handler_to_none_raises_error(self):
self.assertRaises(TypeError, signal.signal,
signal.SIGUSR1, None)
def test_getsignal(self):
hup = signal.signal(signal.SIGHUP, self.trivial_signal_handler)
self.assertEqual(signal.getsignal(signal.SIGHUP),
self.trivial_signal_handler)
signal.signal(signal.SIGHUP, hup)
self.assertEqual(signal.getsignal(signal.SIGHUP), hup)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
class WindowsSignalTests(unittest.TestCase):
def test_issue9324(self):
# Updated for issue #10003, adding SIGBREAK
handler = lambda x, y: None
for sig in (signal.SIGABRT, signal.SIGBREAK, signal.SIGFPE,
signal.SIGILL, signal.SIGINT, signal.SIGSEGV,
signal.SIGTERM):
# Set and then reset a handler for signals that work on windows
signal.signal(sig, signal.signal(sig, handler))
with self.assertRaises(ValueError):
signal.signal(-1, handler)
with self.assertRaises(ValueError):
signal.signal(7, handler)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class WakeupSignalTests(unittest.TestCase):
TIMEOUT_FULL = 10
TIMEOUT_HALF = 5
def handler(self, signum, frame):
pass
def check_signum(self, *signals):
data = os.read(self.read, len(signals)+1)
raised = struct.unpack('%uB' % len(data), data)
# We don't care of the signal delivery order (it's not portable or
# reliable)
raised = set(raised)
signals = set(signals)
self.assertEqual(raised, signals)
def test_wakeup_fd_early(self):
import select
signal.alarm(1)
before_time = time.time()
# We attempt to get a signal during the sleep,
# before select is called
time.sleep(self.TIMEOUT_FULL)
mid_time = time.time()
self.assertTrue(mid_time - before_time < self.TIMEOUT_HALF)
select.select([self.read], [], [], self.TIMEOUT_FULL)
after_time = time.time()
self.assertTrue(after_time - mid_time < self.TIMEOUT_HALF)
self.check_signum(signal.SIGALRM)
def test_wakeup_fd_during(self):
import select
signal.alarm(1)
before_time = time.time()
# We attempt to get a signal during the select call
self.assertRaises(select.error, select.select,
[self.read], [], [], self.TIMEOUT_FULL)
after_time = time.time()
self.assertTrue(after_time - before_time < self.TIMEOUT_HALF)
self.check_signum(signal.SIGALRM)
def test_signum(self):
old_handler = signal.signal(signal.SIGUSR1, self.handler)
self.addCleanup(signal.signal, signal.SIGUSR1, old_handler)
os.kill(os.getpid(), signal.SIGUSR1)
os.kill(os.getpid(), signal.SIGALRM)
self.check_signum(signal.SIGUSR1, signal.SIGALRM)
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
@unittest.skipUnless(hasattr(signal, 'pthread_kill'),
'need signal.pthread_kill()')
def test_pending(self):
signum1 = signal.SIGUSR1
signum2 = signal.SIGUSR2
tid = threading.current_thread().ident
old_handler = signal.signal(signum1, self.handler)
self.addCleanup(signal.signal, signum1, old_handler)
old_handler = signal.signal(signum2, self.handler)
self.addCleanup(signal.signal, signum2, old_handler)
signal.pthread_sigmask(signal.SIG_BLOCK, (signum1, signum2))
signal.pthread_kill(tid, signum1)
signal.pthread_kill(tid, signum2)
# Unblocking the 2 signals calls the C signal handler twice
signal.pthread_sigmask(signal.SIG_UNBLOCK, (signum1, signum2))
self.check_signum(signum1, signum2)
def setUp(self):
import fcntl
self.alrm = signal.signal(signal.SIGALRM, self.handler)
self.read, self.write = os.pipe()
flags = fcntl.fcntl(self.write, fcntl.F_GETFL, 0)
flags = flags | os.O_NONBLOCK
fcntl.fcntl(self.write, fcntl.F_SETFL, flags)
self.old_wakeup = signal.set_wakeup_fd(self.write)
def tearDown(self):
signal.set_wakeup_fd(self.old_wakeup)
os.close(self.read)
os.close(self.write)
signal.signal(signal.SIGALRM, self.alrm)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class SiginterruptTest(unittest.TestCase):
def setUp(self):
"""Install a no-op signal handler that can be set to allow
interrupts or not, and arrange for the original signal handler to be
re-installed when the test is finished.
"""
self.signum = signal.SIGUSR1
oldhandler = signal.signal(self.signum, lambda x,y: None)
self.addCleanup(signal.signal, self.signum, oldhandler)
def readpipe_interrupted(self):
"""Perform a read during which a signal will arrive. Return True if the
read is interrupted by the signal and raises an exception. Return False
if it returns normally.
"""
# Create a pipe that can be used for the read. Also clean it up
# when the test is over, since nothing else will (but see below for
# the write end).
r, w = os.pipe()
self.addCleanup(os.close, r)
# Create another process which can send a signal to this one to try
# to interrupt the read.
ppid = os.getpid()
pid = os.fork()
if pid == 0:
# Child code: sleep to give the parent enough time to enter the
# read() call (there's a race here, but it's really tricky to
# eliminate it); then signal the parent process. Also, sleep
# again to make it likely that the signal is delivered to the
# parent process before the child exits. If the child exits
# first, the write end of the pipe will be closed and the test
# is invalid.
try:
time.sleep(0.2)
os.kill(ppid, self.signum)
time.sleep(0.2)
finally:
# No matter what, just exit as fast as possible now.
exit_subprocess()
else:
# Parent code.
# Make sure the child is eventually reaped, else it'll be a
# zombie for the rest of the test suite run.
self.addCleanup(os.waitpid, pid, 0)
# Close the write end of the pipe. The child has a copy, so
# it's not really closed until the child exits. We need it to
# close when the child exits so that in the non-interrupt case
# the read eventually completes, otherwise we could just close
# it *after* the test.
os.close(w)
# Try the read and report whether it is interrupted or not to
# the caller.
try:
d = os.read(r, 1)
return False
except OSError as err:
if err.errno != errno.EINTR:
raise
return True
def test_without_siginterrupt(self):
# If a signal handler is installed and siginterrupt is not called
# at all, when that signal arrives, it interrupts a syscall that's in
# progress.
i = self.readpipe_interrupted()
self.assertTrue(i)
# Arrival of the signal shouldn't have changed anything.
i = self.readpipe_interrupted()
self.assertTrue(i)
def test_siginterrupt_on(self):
# If a signal handler is installed and siginterrupt is called with
# a true value for the second argument, when that signal arrives, it
# interrupts a syscall that's in progress.
signal.siginterrupt(self.signum, 1)
i = self.readpipe_interrupted()
self.assertTrue(i)
# Arrival of the signal shouldn't have changed anything.
i = self.readpipe_interrupted()
self.assertTrue(i)
def test_siginterrupt_off(self):
# If a signal handler is installed and siginterrupt is called with
# a false value for the second argument, when that signal arrives, it
# does not interrupt a syscall that's in progress.
signal.siginterrupt(self.signum, 0)
i = self.readpipe_interrupted()
self.assertFalse(i)
# Arrival of the signal shouldn't have changed anything.
i = self.readpipe_interrupted()
self.assertFalse(i)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class ItimerTest(unittest.TestCase):
def setUp(self):
self.hndl_called = False
self.hndl_count = 0
self.itimer = None
self.old_alarm = signal.signal(signal.SIGALRM, self.sig_alrm)
def tearDown(self):
signal.signal(signal.SIGALRM, self.old_alarm)
if self.itimer is not None: # test_itimer_exc doesn't change this attr
# just ensure that itimer is stopped
signal.setitimer(self.itimer, 0)
def sig_alrm(self, *args):
self.hndl_called = True
def sig_vtalrm(self, *args):
self.hndl_called = True
if self.hndl_count > 3:
# it shouldn't be here, because it should have been disabled.
raise signal.ItimerError("setitimer didn't disable ITIMER_VIRTUAL "
"timer.")
elif self.hndl_count == 3:
# disable ITIMER_VIRTUAL, this function shouldn't be called anymore
signal.setitimer(signal.ITIMER_VIRTUAL, 0)
self.hndl_count += 1
def sig_prof(self, *args):
self.hndl_called = True
signal.setitimer(signal.ITIMER_PROF, 0)
def test_itimer_exc(self):
# XXX I'm assuming -1 is an invalid itimer, but maybe some platform
# defines it ?
self.assertRaises(signal.ItimerError, signal.setitimer, -1, 0)
# Negative times are treated as zero on some platforms.
if 0:
self.assertRaises(signal.ItimerError,
signal.setitimer, signal.ITIMER_REAL, -1)
def test_itimer_real(self):
self.itimer = signal.ITIMER_REAL
signal.setitimer(self.itimer, 1.0)
signal.pause()
self.assertEqual(self.hndl_called, True)
# Issue 3864, unknown if this affects earlier versions of freebsd also
@unittest.skipIf(sys.platform in ('freebsd6', 'netbsd5'),
'itimer not reliable (does not mix well with threading) on some BSDs.')
def test_itimer_virtual(self):
self.itimer = signal.ITIMER_VIRTUAL
signal.signal(signal.SIGVTALRM, self.sig_vtalrm)
signal.setitimer(self.itimer, 0.3, 0.2)
start_time = time.time()
while time.time() - start_time < 60.0:
# use up some virtual time by doing real work
_ = pow(12345, 67890, 10000019)
if signal.getitimer(self.itimer) == (0.0, 0.0):
break # sig_vtalrm handler stopped this itimer
else: # Issue 8424
self.skipTest("timeout: likely cause: machine too slow or load too "
"high")
# virtual itimer should be (0.0, 0.0) now
self.assertEqual(signal.getitimer(self.itimer), (0.0, 0.0))
# and the handler should have been called
self.assertEqual(self.hndl_called, True)
# Issue 3864, unknown if this affects earlier versions of freebsd also
@unittest.skipIf(sys.platform=='freebsd6',
'itimer not reliable (does not mix well with threading) on freebsd6')
def test_itimer_prof(self):
self.itimer = signal.ITIMER_PROF
signal.signal(signal.SIGPROF, self.sig_prof)
signal.setitimer(self.itimer, 0.2, 0.2)
start_time = time.time()
while time.time() - start_time < 60.0:
# do some work
_ = pow(12345, 67890, 10000019)
if signal.getitimer(self.itimer) == (0.0, 0.0):
break # sig_prof handler stopped this itimer
else: # Issue 8424
self.skipTest("timeout: likely cause: machine too slow or load too "
"high")
# profiling itimer should be (0.0, 0.0) now
self.assertEqual(signal.getitimer(self.itimer), (0.0, 0.0))
# and the handler should have been called
self.assertEqual(self.hndl_called, True)
class PendingSignalsTests(unittest.TestCase):
"""
Test pthread_sigmask(), pthread_kill(), sigpending() and sigwait()
functions.
"""
def setUp(self):
self.has_pthread_kill = hasattr(signal, 'pthread_kill')
def handler(self, signum, frame):
1/0
def read_sigmask(self):
return signal.pthread_sigmask(signal.SIG_BLOCK, [])
def can_test_blocked_signals(self, skip):
"""
Check if a blocked signal can be raised to the main thread without
calling its signal handler. We need pthread_kill() or exactly one
thread (the main thread).
Return True if it's possible. Otherwise, return False and print a
warning if skip is False, or raise a SkipTest exception if skip is
True.
"""
if self.has_pthread_kill:
return True
# The fault handler timeout thread masks all signals. If the main
# thread masks also SIGUSR1, all threads mask this signal. In this
# case, if we send SIGUSR1 to the process, the signal is pending in the
# main or the faulthandler timeout thread. Unblock SIGUSR1 in the main
# thread calls the signal handler only if the signal is pending for the
# main thread. Stop the faulthandler timeout thread to workaround this
# problem.
import faulthandler
faulthandler.cancel_dump_tracebacks_later()
# Issue #11998: The _tkinter module loads the Tcl library which
# creates a thread waiting events in select(). This thread receives
# signals blocked by all other threads. We cannot test blocked
# signals
if '_tkinter' in sys.modules:
message = ("_tkinter is loaded and pthread_kill() is missing, "
"cannot test blocked signals (issue #11998)")
if skip:
self.skipTest(message)
else:
print("WARNING: %s" % message)
return False
return True
def kill(self, signum):
if self.has_pthread_kill:
tid = threading.get_ident()
signal.pthread_kill(tid, signum)
else:
pid = os.getpid()
os.kill(pid, signum)
@unittest.skipUnless(hasattr(signal, 'sigpending'),
'need signal.sigpending()')
def test_sigpending_empty(self):
self.assertEqual(signal.sigpending(), set())
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
@unittest.skipUnless(hasattr(signal, 'sigpending'),
'need signal.sigpending()')
def test_sigpending(self):
self.can_test_blocked_signals(True)
signum = signal.SIGUSR1
old_handler = signal.signal(signum, self.handler)
self.addCleanup(signal.signal, signum, old_handler)
signal.pthread_sigmask(signal.SIG_BLOCK, [signum])
self.kill(signum)
self.assertEqual(signal.sigpending(), {signum})
with self.assertRaises(ZeroDivisionError):
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum])
@unittest.skipUnless(hasattr(signal, 'pthread_kill'),
'need signal.pthread_kill()')
def test_pthread_kill(self):
signum = signal.SIGUSR1
current = threading.get_ident()
old_handler = signal.signal(signum, self.handler)
self.addCleanup(signal.signal, signum, old_handler)
with self.assertRaises(ZeroDivisionError):
signal.pthread_kill(current, signum)
@unittest.skipUnless(hasattr(signal, 'sigwait'),
'need signal.sigwait()')
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
@unittest.skipUnless(hasattr(os, 'fork'), 'need os.fork()')
def test_sigwait(self):
def test(signum):
signal.alarm(1)
received = signal.sigwait([signum])
if received != signum:
print("sigwait() received %s, not %s"
% (received, signum),
file=sys.stderr)
os._exit(1)
signum = signal.SIGALRM
# sigwait must be called with the signal blocked: since the current
# process might have several threads running, we fork() a child process
# to have a single thread.
pid = os.fork()
if pid == 0:
# child: block and wait the signal
try:
signal.signal(signum, self.handler)
signal.pthread_sigmask(signal.SIG_BLOCK, [signum])
# Do the tests
test(signum)
# The handler must not be called on unblock
try:
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum])
except ZeroDivisionError:
print("the signal handler has been called",
file=sys.stderr)
os._exit(1)
except BaseException as err:
print("error: {}".format(err), file=sys.stderr)
os._exit(1)
else:
os._exit(0)
else:
# parent: check that the child correcty received the signal
self.assertEqual(os.waitpid(pid, 0), (pid, 0))
@unittest.skipUnless(hasattr(signal, 'sigwait'),
'need signal.sigwait()')
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
@unittest.skipIf(threading is None, "test needs threading module")
def test_sigwait_thread(self):
# Check that calling sigwait() from a thread doesn't suspend the whole
# process. A new interpreter is spawned to avoid problems when mixing
# threads and fork(): only async-safe functions are allowed between
# fork() and exec().
assert_python_ok("-c", """if True:
import os, threading, sys, time, signal
# the default handler terminates the process
signum = signal.SIGUSR1
def kill_later():
# wait until the main thread is waiting in sigwait()
time.sleep(1)
os.kill(os.getpid(), signum)
# the signal must be blocked by all the threads
signal.pthread_sigmask(signal.SIG_BLOCK, [signum])
killer = threading.Thread(target=kill_later)
killer.start()
received = signal.sigwait([signum])
if received != signum:
print("sigwait() received %s, not %s" % (received, signum),
file=sys.stderr)
sys.exit(1)
killer.join()
# unblock the signal, which should have been cleared by sigwait()
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum])
""")
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
def test_pthread_sigmask_arguments(self):
self.assertRaises(TypeError, signal.pthread_sigmask)
self.assertRaises(TypeError, signal.pthread_sigmask, 1)
self.assertRaises(TypeError, signal.pthread_sigmask, 1, 2, 3)
self.assertRaises(OSError, signal.pthread_sigmask, 1700, [])
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
def test_pthread_sigmask(self):
test_blocked_signals = self.can_test_blocked_signals(False)
signum = signal.SIGUSR1
# Install our signal handler
old_handler = signal.signal(signum, self.handler)
self.addCleanup(signal.signal, signum, old_handler)
# Unblock SIGUSR1 (and copy the old mask) to test our signal handler
old_mask = signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum])
self.addCleanup(signal.pthread_sigmask, signal.SIG_SETMASK, old_mask)
with self.assertRaises(ZeroDivisionError):
self.kill(signum)
# Block and then raise SIGUSR1. The signal is blocked: the signal
# handler is not called, and the signal is now pending
signal.pthread_sigmask(signal.SIG_BLOCK, [signum])
if test_blocked_signals:
self.kill(signum)
# Check the new mask
blocked = self.read_sigmask()
self.assertIn(signum, blocked)
self.assertEqual(old_mask ^ blocked, {signum})
# Unblock SIGUSR1
if test_blocked_signals:
with self.assertRaises(ZeroDivisionError):
# unblock the pending signal calls immediatly the signal handler
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum])
else:
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum])
with self.assertRaises(ZeroDivisionError):
self.kill(signum)
# Check the new mask
unblocked = self.read_sigmask()
self.assertNotIn(signum, unblocked)
self.assertEqual(blocked ^ unblocked, {signum})
self.assertSequenceEqual(old_mask, unblocked)
# Finally, restore the previous signal handler and the signal mask
def test_main():
try:
support.run_unittest(PosixTests, InterProcessSignalTests,
WakeupSignalTests, SiginterruptTest,
ItimerTest, WindowsSignalTests,
PendingSignalsTests)
finally:
support.reap_children()
if __name__ == "__main__":
test_main()
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
Miscelleanous tools used by OpenERP.
"""
import os, time, sys
import inspect
from config import config
import zipfile
import release
import socket
if sys.version_info[:2] < (2, 4):
from threadinglocal import local
else:
from threading import local
from itertools import izip
# initialize a database with base/base.sql
def init_db(cr):
import addons
f = addons.get_module_resource('base', 'base.sql')
for line in file_open(f).read().split(';'):
if (len(line)>0) and (not line.isspace()):
cr.execute(line)
cr.commit()
for i in addons.get_modules():
terp_file = addons.get_module_resource(i, '__terp__.py')
mod_path = addons.get_module_path(i)
if not mod_path:
continue
info = False
if os.path.isfile(terp_file) or os.path.isfile(mod_path+'.zip'):
info = eval(file_open(terp_file).read())
if info:
categs = info.get('category', 'Uncategorized').split('/')
p_id = None
while categs:
if p_id is not None:
cr.execute('select id \
from ir_module_category \
where name=%s and parent_id=%s', (categs[0], p_id))
else:
cr.execute('select id \
from ir_module_category \
where name=%s and parent_id is NULL', (categs[0],))
c_id = cr.fetchone()
if not c_id:
cr.execute('select nextval(\'ir_module_category_id_seq\')')
c_id = cr.fetchone()[0]
cr.execute('insert into ir_module_category \
(id, name, parent_id) \
values (%s, %s, %s)', (c_id, categs[0], p_id))
else:
c_id = c_id[0]
p_id = c_id
categs = categs[1:]
active = info.get('active', False)
installable = info.get('installable', True)
if installable:
if active:
state = 'to install'
else:
state = 'uninstalled'
else:
state = 'uninstallable'
cr.execute('select nextval(\'ir_module_module_id_seq\')')
id = cr.fetchone()[0]
cr.execute('insert into ir_module_module \
(id, author, website, name, shortdesc, description, \
category_id, state) \
values (%s, %s, %s, %s, %s, %s, %s, %s)', (
id, info.get('author', ''),
info.get('website', ''), i, info.get('name', False),
info.get('description', ''), p_id, state))
cr.execute('insert into ir_model_data \
(name,model,module, res_id) values (%s,%s,%s,%s)', (
'module_meta_information', 'ir.module.module', i, id))
dependencies = info.get('depends', [])
for d in dependencies:
cr.execute('insert into ir_module_module_dependency \
(module_id,name) values (%s, %s)', (id, d))
cr.commit()
def find_in_path(name):
if os.name == "nt":
sep = ';'
else:
sep = ':'
path = [dir for dir in os.environ['PATH'].split(sep)
if os.path.isdir(dir)]
for dir in path:
val = os.path.join(dir, name)
if os.path.isfile(val) or os.path.islink(val):
return val
return None
def find_pg_tool(name):
if config['pg_path'] and config['pg_path'] != 'None':
return os.path.join(config['pg_path'], name)
else:
return find_in_path(name)
def exec_pg_command(name, *args):
prog = find_pg_tool(name)
if not prog:
raise Exception('Couldn\'t find %s' % name)
args2 = (os.path.basename(prog),) + args
return os.spawnv(os.P_WAIT, prog, args2)
def exec_pg_command_pipe(name, *args):
prog = find_pg_tool(name)
if not prog:
raise Exception('Couldn\'t find %s' % name)
if os.name == "nt":
cmd = '"' + prog + '" ' + ' '.join(args)
else:
cmd = prog + ' ' + ' '.join(args)
return os.popen2(cmd, 'b')
def exec_command_pipe(name, *args):
prog = find_in_path(name)
if not prog:
raise Exception('Couldn\'t find %s' % name)
if os.name == "nt":
cmd = '"'+prog+'" '+' '.join(args)
else:
cmd = prog+' '+' '.join(args)
return os.popen2(cmd, 'b')
#----------------------------------------------------------
# File paths
#----------------------------------------------------------
#file_path_root = os.getcwd()
#file_path_addons = os.path.join(file_path_root, 'addons')
def file_open(name, mode="r", subdir='addons', pathinfo=False):
"""Open a file from the OpenERP root, using a subdir folder.
>>> file_open('hr/report/timesheer.xsl')
>>> file_open('addons/hr/report/timesheet.xsl')
>>> file_open('../../base/report/rml_template.xsl', subdir='addons/hr/report', pathinfo=True)
@param name: name of the file
@param mode: file open mode
@param subdir: subdirectory
@param pathinfo: if True returns tupple (fileobject, filepath)
@return: fileobject if pathinfo is False else (fileobject, filepath)
"""
adp = os.path.normcase(os.path.abspath(config['addons_path']))
rtp = os.path.normcase(os.path.abspath(config['root_path']))
if name.replace(os.path.sep, '/').startswith('addons/'):
subdir = 'addons'
name = name[7:]
# First try to locate in addons_path
if subdir:
subdir2 = subdir
if subdir2.replace(os.path.sep, '/').startswith('addons/'):
subdir2 = subdir2[7:]
subdir2 = (subdir2 != 'addons' or None) and subdir2
try:
if subdir2:
fn = os.path.join(adp, subdir2, name)
else:
fn = os.path.join(adp, name)
fn = os.path.normpath(fn)
fo = file_open(fn, mode=mode, subdir=None, pathinfo=pathinfo)
if pathinfo:
return fo, fn
return fo
except IOError, e:
pass
if subdir:
name = os.path.join(rtp, subdir, name)
else:
name = os.path.join(rtp, name)
name = os.path.normpath(name)
# Check for a zipfile in the path
head = name
zipname = False
name2 = False
while True:
head, tail = os.path.split(head)
if not tail:
break
if zipname:
zipname = os.path.join(tail, zipname)
else:
zipname = tail
if zipfile.is_zipfile(head+'.zip'):
from cStringIO import StringIO
zfile = zipfile.ZipFile(head+'.zip')
try:
fo = StringIO()
fo.write(zfile.read(os.path.join(
os.path.basename(head), zipname).replace(
os.sep, '/')))
fo.seek(0)
if pathinfo:
return fo, name
return fo
except:
name2 = os.path.normpath(os.path.join(head + '.zip', zipname))
pass
for i in (name2, name):
if i and os.path.isfile(i):
fo = file(i, mode)
if pathinfo:
return fo, i
return fo
if os.path.splitext(name)[1] == '.rml':
raise IOError, 'Report %s doesn\'t exist or deleted : ' %str(name)
raise IOError, 'File not found : '+str(name)
#----------------------------------------------------------
# iterables
#----------------------------------------------------------
def flatten(list):
"""Flatten a list of elements into a uniqu list
Author: Christophe Simonis (christophe@tinyerp.com)
Examples:
>>> flatten(['a'])
['a']
>>> flatten('b')
['b']
>>> flatten( [] )
[]
>>> flatten( [[], [[]]] )
[]
>>> flatten( [[['a','b'], 'c'], 'd', ['e', [], 'f']] )
['a', 'b', 'c', 'd', 'e', 'f']
>>> t = (1,2,(3,), [4, 5, [6, [7], (8, 9), ([10, 11, (12, 13)]), [14, [], (15,)], []]])
>>> flatten(t)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
"""
def isiterable(x):
return hasattr(x, "__iter__")
r = []
for e in list:
if isiterable(e):
map(r.append, flatten(e))
else:
r.append(e)
return r
def reverse_enumerate(l):
"""Like enumerate but in the other sens
>>> a = ['a', 'b', 'c']
>>> it = reverse_enumerate(a)
>>> it.next()
(2, 'c')
>>> it.next()
(1, 'b')
>>> it.next()
(0, 'a')
>>> it.next()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
StopIteration
"""
return izip(xrange(len(l)-1, -1, -1), reversed(l))
#----------------------------------------------------------
# Emails
#----------------------------------------------------------
def email_send(email_from, email_to, subject, body, email_cc=None, email_bcc=None, reply_to=False,
attach=None, tinycrm=False, ssl=False, debug=False, subtype='plain', x_headers=None):
"""Send an email."""
import smtplib
from email.MIMEText import MIMEText
from email.MIMEBase import MIMEBase
from email.MIMEMultipart import MIMEMultipart
from email.Header import Header
from email.Utils import formatdate, COMMASPACE
from email.Utils import formatdate, COMMASPACE
from email import Encoders
if x_headers is None:
x_headers = {}
if not ssl:
ssl = config.get('smtp_ssl', False)
if not email_from and not config['email_from']:
raise Exception("No Email sender by default, see config file")
if not email_cc:
email_cc = []
if not email_bcc:
email_bcc = []
if not attach:
msg = MIMEText(body or '',_subtype=subtype,_charset='utf-8')
else:
msg = MIMEMultipart()
msg['Subject'] = Header(ustr(subject), 'utf-8')
msg['From'] = email_from
del msg['Reply-To']
if reply_to:
msg['Reply-To'] = reply_to
else:
msg['Reply-To'] = msg['From']
msg['To'] = COMMASPACE.join(email_to)
if email_cc:
msg['Cc'] = COMMASPACE.join(email_cc)
if email_bcc:
msg['Bcc'] = COMMASPACE.join(email_bcc)
msg['Date'] = formatdate(localtime=True)
# Add OpenERP Server information
msg['X-Generated-By'] = 'OpenERP (http://www.openerp.com)'
msg['X-OpenERP-Server-Host'] = socket.gethostname()
msg['X-OpenERP-Server-Version'] = release.version
# Add dynamic X Header
for key, value in x_headers.items():
msg['X-OpenERP-%s' % key] = str(value)
if tinycrm:
msg['Message-Id'] = "<%s-tinycrm-%s@%s>" % (time.time(), tinycrm, socket.gethostname())
if attach:
msg.attach( MIMEText(body or '', _charset='utf-8', _subtype=subtype) )
for (fname,fcontent) in attach:
part = MIMEBase('application', "octet-stream")
part.set_payload( fcontent )
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"' % (fname,))
msg.attach(part)
try:
s = smtplib.SMTP()
if debug:
s.debuglevel = 5
s.connect(config['smtp_server'], config['smtp_port'])
if ssl:
s.ehlo()
s.starttls()
s.ehlo()
if config['smtp_user'] or config['smtp_password']:
s.login(config['smtp_user'], config['smtp_password'])
s.sendmail(email_from,
flatten([email_to, email_cc, email_bcc]),
msg.as_string()
)
s.quit()
except Exception, e:
import netsvc
netsvc.Logger().notifyChannel('email_send', netsvc.LOG_ERROR, e)
return False
return True
#----------------------------------------------------------
# SMS
#----------------------------------------------------------
# text must be latin-1 encoded
def sms_send(user, password, api_id, text, to):
import urllib
url = "http://api.urlsms.com/SendSMS.aspx"
#url = "http://196.7.150.220/http/sendmsg"
params = urllib.urlencode({'UserID': user, 'Password': password, 'SenderID': api_id, 'MsgText': text, 'RecipientMobileNo':to})
f = urllib.urlopen(url+"?"+params)
# FIXME: Use the logger if there is an error
return True
#---------------------------------------------------------
# Class that stores an updateable string (used in wizards)
#---------------------------------------------------------
class UpdateableStr(local):
def __init__(self, string=''):
self.string = string
def __str__(self):
return str(self.string)
def __repr__(self):
return str(self.string)
def __nonzero__(self):
return bool(self.string)
class UpdateableDict(local):
'''Stores an updateable dict to use in wizards'''
def __init__(self, dict=None):
if dict is None:
dict = {}
self.dict = dict
def __str__(self):
return str(self.dict)
def __repr__(self):
return str(self.dict)
def clear(self):
return self.dict.clear()
def keys(self):
return self.dict.keys()
def __setitem__(self, i, y):
self.dict.__setitem__(i, y)
def __getitem__(self, i):
return self.dict.__getitem__(i)
def copy(self):
return self.dict.copy()
def iteritems(self):
return self.dict.iteritems()
def iterkeys(self):
return self.dict.iterkeys()
def itervalues(self):
return self.dict.itervalues()
def pop(self, k, d=None):
return self.dict.pop(k, d)
def popitem(self):
return self.dict.popitem()
def setdefault(self, k, d=None):
return self.dict.setdefault(k, d)
def update(self, E, **F):
return self.dict.update(E, F)
def values(self):
return self.dict.values()
def get(self, k, d=None):
return self.dict.get(k, d)
def has_key(self, k):
return self.dict.has_key(k)
def items(self):
return self.dict.items()
def __cmp__(self, y):
return self.dict.__cmp__(y)
def __contains__(self, k):
return self.dict.__contains__(k)
def __delitem__(self, y):
return self.dict.__delitem__(y)
def __eq__(self, y):
return self.dict.__eq__(y)
def __ge__(self, y):
return self.dict.__ge__(y)
def __getitem__(self, y):
return self.dict.__getitem__(y)
def __gt__(self, y):
return self.dict.__gt__(y)
def __hash__(self):
return self.dict.__hash__()
def __iter__(self):
return self.dict.__iter__()
def __le__(self, y):
return self.dict.__le__(y)
def __len__(self):
return self.dict.__len__()
def __lt__(self, y):
return self.dict.__lt__(y)
def __ne__(self, y):
return self.dict.__ne__(y)
# Don't use ! Use res.currency.round()
class currency(float):
def __init__(self, value, accuracy=2, rounding=None):
if rounding is None:
rounding=10**-accuracy
self.rounding=rounding
self.accuracy=accuracy
def __new__(cls, value, accuracy=2, rounding=None):
return float.__new__(cls, round(value, accuracy))
#def __str__(self):
# display_value = int(self*(10**(-self.accuracy))/self.rounding)*self.rounding/(10**(-self.accuracy))
# return str(display_value)
def is_hashable(h):
try:
hash(h)
return True
except TypeError:
return False
class cache(object):
"""
Use it as a decorator of the function you plan to cache
Timeout: 0 = no timeout, otherwise in seconds
"""
__caches = []
def __init__(self, timeout=None, skiparg=2, multi=None):
assert skiparg >= 2 # at least self and cr
if timeout is None:
self.timeout = config['cache_timeout']
else:
self.timeout = timeout
self.skiparg = skiparg
self.multi = multi
self.lasttime = time.time()
self.cache = {}
self.fun = None
cache.__caches.append(self)
def _generate_keys(self, dbname, kwargs2):
"""
Generate keys depending of the arguments and the self.mutli value
"""
def to_tuple(d):
i = d.items()
i.sort(key=lambda (x,y): x)
return tuple(i)
if not self.multi:
key = (('dbname', dbname),) + to_tuple(kwargs2)
yield key, None
else:
multis = kwargs2[self.multi][:]
for id in multis:
kwargs2[self.multi] = (id,)
key = (('dbname', dbname),) + to_tuple(kwargs2)
yield key, id
def _unify_args(self, *args, **kwargs):
# Update named arguments with positional argument values (without self and cr)
kwargs2 = self.fun_default_values.copy()
kwargs2.update(kwargs)
kwargs2.update(dict(zip(self.fun_arg_names, args[self.skiparg-2:])))
for k in kwargs2:
if isinstance(kwargs2[k], (list, dict, set)):
kwargs2[k] = tuple(kwargs2[k])
elif not is_hashable(kwargs2[k]):
kwargs2[k] = repr(kwargs2[k])
return kwargs2
def clear(self, dbname, *args, **kwargs):
"""clear the cache for database dbname
if *args and **kwargs are both empty, clear all the keys related to this database
"""
if not args and not kwargs:
keys_to_del = [key for key in self.cache if key[0][1] == dbname]
else:
kwargs2 = self._unify_args(*args, **kwargs)
keys_to_del = [key for key, _ in self._generate_keys(dbname, kwargs2) if key in self.cache]
for key in keys_to_del:
del self.cache[key]
@classmethod
def clean_caches_for_db(cls, dbname):
for c in cls.__caches:
c.clear(dbname)
def __call__(self, fn):
if self.fun is not None:
raise Exception("Can not use a cache instance on more than one function")
self.fun = fn
argspec = inspect.getargspec(fn)
self.fun_arg_names = argspec[0][self.skiparg:]
self.fun_default_values = {}
if argspec[3]:
self.fun_default_values = dict(zip(self.fun_arg_names[-len(argspec[3]):], argspec[3]))
def cached_result(self2, cr, *args, **kwargs):
if time.time()-self.timeout > self.lasttime:
self.lasttime = time.time()
t = time.time()-self.timeout
for key in self.cache.keys():
if self.cache[key][1]<t:
del self.cache[key]
kwargs2 = self._unify_args(*args, **kwargs)
result = {}
notincache = {}
for key, id in self._generate_keys(cr.dbname, kwargs2):
if key in self.cache:
result[id] = self.cache[key][0]
else:
notincache[id] = key
if notincache:
if self.multi:
kwargs2[self.multi] = notincache.keys()
result2 = fn(self2, cr, *args[2:self.skiparg], **kwargs2)
if not self.multi:
key = notincache[None]
self.cache[key] = (result2, time.time())
result[None] = result2
else:
for id in result2:
key = notincache[id]
self.cache[key] = (result2[id], time.time())
result.update(result2)
if not self.multi:
return result[None]
return result
cached_result.clear_cache = self.clear
return cached_result
def to_xml(s):
return s.replace('&','&').replace('<','<').replace('>','>')
def ustr(value):
"""This method is similar to the builtin `str` method, except
it will return Unicode string.
@param value: the value to convert
@rtype: unicode
@return: unicode string
"""
if isinstance(value, unicode):
return value
if hasattr(value, '__unicode__'):
return unicode(value)
if not isinstance(value, str):
value = str(value)
try: # first try utf-8
return unicode(value, 'utf-8')
except:
pass
try: # then extened iso-8858
return unicode(value, 'iso-8859-15')
except:
pass
# else use default system locale
from locale import getlocale
return unicode(value, getlocale()[1])
def exception_to_unicode(e):
if hasattr(e, 'message'):
return ustr(e.message)
if hasattr(e, 'args'):
return "\n".join((ustr(a) for a in e.args))
try:
return ustr(e)
except:
return u"Unknow message"
# to be compatible with python 2.4
import __builtin__
if not hasattr(__builtin__, 'all'):
def all(iterable):
for element in iterable:
if not element:
return False
return True
__builtin__.all = all
del all
if not hasattr(__builtin__, 'any'):
def any(iterable):
for element in iterable:
if element:
return True
return False
__builtin__.any = any
del any
def get_languages():
languages={
'ar_AR': u'Arabic / الْعَرَبيّة',
'bg_BG': u'Bulgarian / български',
'bs_BS': u'Bosnian / bosanski jezik',
'ca_ES': u'Catalan / Català',
'cs_CZ': u'Czech / Čeština',
'da_DK': u'Danish / Dansk',
'de_DE': u'German / Deutsch',
'el_EL': u'Greek / Ελληνικά',
'en_CA': u'English (CA)',
'en_GB': u'English (UK)',
'en_US': u'English (US)',
'es_AR': u'Spanish (AR) / Español (AR)',
'es_ES': u'Spanish / Español',
'et_EE': u'Estonian / Eesti keel',
'fr_BE': u'French (BE) / Français (BE)',
'fr_CH': u'French (CH) / Français (CH)',
'fr_FR': u'French / Français',
'hr_HR': u'Croatian / hrvatski jezik',
'hu_HU': u'Hungarian / Magyar',
'id_ID': u'Indonesian / Bahasa Indonesia',
'it_IT': u'Italian / Italiano',
'lt_LT': u'Lithuanian / Lietuvių kalba',
'nl_NL': u'Dutch / Nederlands',
'nl_BE': u'Dutch (Belgium) / Nederlands (Belgïe)',
'pl_PL': u'Polish / Język polski',
'pt_BR': u'Portugese (BR) / português (BR)',
'pt_PT': u'Portugese / português',
'ro_RO': u'Romanian / limba română',
'ru_RU': u'Russian / русский язык',
'sl_SL': u'Slovenian / slovenščina',
'sv_SE': u'Swedish / svenska',
'tr_TR': u'Turkish / Türkçe',
'uk_UA': u'Ukrainian / украї́нська мо́ва',
'zh_CN': u'Chinese (CN) / 简体中文' ,
'zh_TW': u'Chinese (TW) / 正體字',
}
return languages
def scan_languages():
import glob
file_list = [os.path.splitext(os.path.basename(f))[0] for f in glob.glob(os.path.join(config['root_path'],'addons', 'base', 'i18n', '*.po'))]
lang_dict = get_languages()
ret = [(lang, lang_dict.get(lang, lang)) for lang in file_list]
ret.sort(key=lambda k:k[1])
return ret
def get_user_companies(cr, user):
def _get_company_children(cr, ids):
if not ids:
return []
cr.execute('SELECT id FROM res_company WHERE parent_id = any(array[%s])' %(','.join([str(x) for x in ids]),))
res=[x[0] for x in cr.fetchall()]
res.extend(_get_company_children(cr, res))
return res
cr.execute('SELECT comp.id FROM res_company AS comp, res_users AS u WHERE u.id = %s AND comp.id = u.company_id' % (user,))
compids=[cr.fetchone()[0]]
compids.extend(_get_company_children(cr, compids))
return compids
def mod10r(number):
"""
Input number : account or invoice number
Output return: the same number completed with the recursive mod10
key
"""
codec=[0,9,4,6,8,2,7,1,3,5]
report = 0
result=""
for digit in number:
result += digit
if digit.isdigit():
report = codec[ (int(digit) + report) % 10 ]
return result + str((10 - report) % 10)
def human_size(sz):
"""
Return the size in a human readable format
"""
if not sz:
return False
units = ('bytes', 'Kb', 'Mb', 'Gb')
if isinstance(sz,basestring):
sz=len(sz)
s, i = float(sz), 0
while s >= 1024 and i < len(units)-1:
s = s / 1024
i = i + 1
return "%0.2f %s" % (s, units[i])
def logged(f):
from tools.func import wraps
@wraps(f)
def wrapper(*args, **kwargs):
import netsvc
from pprint import pformat
vector = ['Call -> function: %r' % f]
for i, arg in enumerate(args):
vector.append(' arg %02d: %s' % (i, pformat(arg)))
for key, value in kwargs.items():
vector.append(' kwarg %10s: %s' % (key, pformat(value)))
timeb4 = time.time()
res = f(*args, **kwargs)
vector.append(' result: %s' % pformat(res))
vector.append(' time delta: %s' % (time.time() - timeb4))
netsvc.Logger().notifyChannel('logged', netsvc.LOG_DEBUG, '\n'.join(vector))
return res
return wrapper
class profile(object):
def __init__(self, fname=None):
self.fname = fname
def __call__(self, f):
from tools.func import wraps
@wraps(f)
def wrapper(*args, **kwargs):
class profile_wrapper(object):
def __init__(self):
self.result = None
def __call__(self):
self.result = f(*args, **kwargs)
pw = profile_wrapper()
import cProfile
fname = self.fname or ("%s.cprof" % (f.func_name,))
cProfile.runctx('pw()', globals(), locals(), filename=fname)
return pw.result
return wrapper
def debug(what):
"""
This method allow you to debug your code without print
Example:
>>> def func_foo(bar)
... baz = bar
... debug(baz)
... qnx = (baz, bar)
... debug(qnx)
...
>>> func_foo(42)
This will output on the logger:
[Wed Dec 25 00:00:00 2008] DEBUG:func_foo:baz = 42
[Wed Dec 25 00:00:00 2008] DEBUG:func_foo:qnx = (42, 42)
To view the DEBUG lines in the logger you must start the server with the option
--log-level=debug
"""
import netsvc
from inspect import stack
import re
from pprint import pformat
st = stack()[1]
param = re.split("debug *\((.+)\)", st[4][0].strip())[1].strip()
while param.count(')') > param.count('('): param = param[:param.rfind(')')]
what = pformat(what)
if param != what:
what = "%s = %s" % (param, what)
netsvc.Logger().notifyChannel(st[3], netsvc.LOG_DEBUG, what)
icons = map(lambda x: (x,x), ['STOCK_ABOUT', 'STOCK_ADD', 'STOCK_APPLY', 'STOCK_BOLD',
'STOCK_CANCEL', 'STOCK_CDROM', 'STOCK_CLEAR', 'STOCK_CLOSE', 'STOCK_COLOR_PICKER',
'STOCK_CONNECT', 'STOCK_CONVERT', 'STOCK_COPY', 'STOCK_CUT', 'STOCK_DELETE',
'STOCK_DIALOG_AUTHENTICATION', 'STOCK_DIALOG_ERROR', 'STOCK_DIALOG_INFO',
'STOCK_DIALOG_QUESTION', 'STOCK_DIALOG_WARNING', 'STOCK_DIRECTORY', 'STOCK_DISCONNECT',
'STOCK_DND', 'STOCK_DND_MULTIPLE', 'STOCK_EDIT', 'STOCK_EXECUTE', 'STOCK_FILE',
'STOCK_FIND', 'STOCK_FIND_AND_REPLACE', 'STOCK_FLOPPY', 'STOCK_GOTO_BOTTOM',
'STOCK_GOTO_FIRST', 'STOCK_GOTO_LAST', 'STOCK_GOTO_TOP', 'STOCK_GO_BACK',
'STOCK_GO_DOWN', 'STOCK_GO_FORWARD', 'STOCK_GO_UP', 'STOCK_HARDDISK',
'STOCK_HELP', 'STOCK_HOME', 'STOCK_INDENT', 'STOCK_INDEX', 'STOCK_ITALIC',
'STOCK_JUMP_TO', 'STOCK_JUSTIFY_CENTER', 'STOCK_JUSTIFY_FILL',
'STOCK_JUSTIFY_LEFT', 'STOCK_JUSTIFY_RIGHT', 'STOCK_MEDIA_FORWARD',
'STOCK_MEDIA_NEXT', 'STOCK_MEDIA_PAUSE', 'STOCK_MEDIA_PLAY',
'STOCK_MEDIA_PREVIOUS', 'STOCK_MEDIA_RECORD', 'STOCK_MEDIA_REWIND',
'STOCK_MEDIA_STOP', 'STOCK_MISSING_IMAGE', 'STOCK_NETWORK', 'STOCK_NEW',
'STOCK_NO', 'STOCK_OK', 'STOCK_OPEN', 'STOCK_PASTE', 'STOCK_PREFERENCES',
'STOCK_PRINT', 'STOCK_PRINT_PREVIEW', 'STOCK_PROPERTIES', 'STOCK_QUIT',
'STOCK_REDO', 'STOCK_REFRESH', 'STOCK_REMOVE', 'STOCK_REVERT_TO_SAVED',
'STOCK_SAVE', 'STOCK_SAVE_AS', 'STOCK_SELECT_COLOR', 'STOCK_SELECT_FONT',
'STOCK_SORT_ASCENDING', 'STOCK_SORT_DESCENDING', 'STOCK_SPELL_CHECK',
'STOCK_STOP', 'STOCK_STRIKETHROUGH', 'STOCK_UNDELETE', 'STOCK_UNDERLINE',
'STOCK_UNDO', 'STOCK_UNINDENT', 'STOCK_YES', 'STOCK_ZOOM_100',
'STOCK_ZOOM_FIT', 'STOCK_ZOOM_IN', 'STOCK_ZOOM_OUT',
'terp-account', 'terp-crm', 'terp-mrp', 'terp-product', 'terp-purchase',
'terp-sale', 'terp-tools', 'terp-administration', 'terp-hr', 'terp-partner',
'terp-project', 'terp-report', 'terp-stock', 'terp-calendar', 'terp-graph',
])
def extract_zip_file(zip_file, outdirectory):
import zipfile
import os
zf = zipfile.ZipFile(zip_file, 'r')
out = outdirectory
for path in zf.namelist():
tgt = os.path.join(out, path)
tgtdir = os.path.dirname(tgt)
if not os.path.exists(tgtdir):
os.makedirs(tgtdir)
if not tgt.endswith(os.sep):
fp = open(tgt, 'wb')
fp.write(zf.read(path))
fp.close()
zf.close()
if __name__ == '__main__':
import doctest
doctest.testmod()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
[FIX] fix in cache with skiparg > 2
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
Miscelleanous tools used by OpenERP.
"""
import os, time, sys
import inspect
from config import config
import zipfile
import release
import socket
if sys.version_info[:2] < (2, 4):
from threadinglocal import local
else:
from threading import local
from itertools import izip
# initialize a database with base/base.sql
def init_db(cr):
import addons
f = addons.get_module_resource('base', 'base.sql')
for line in file_open(f).read().split(';'):
if (len(line)>0) and (not line.isspace()):
cr.execute(line)
cr.commit()
for i in addons.get_modules():
terp_file = addons.get_module_resource(i, '__terp__.py')
mod_path = addons.get_module_path(i)
if not mod_path:
continue
info = False
if os.path.isfile(terp_file) or os.path.isfile(mod_path+'.zip'):
info = eval(file_open(terp_file).read())
if info:
categs = info.get('category', 'Uncategorized').split('/')
p_id = None
while categs:
if p_id is not None:
cr.execute('select id \
from ir_module_category \
where name=%s and parent_id=%s', (categs[0], p_id))
else:
cr.execute('select id \
from ir_module_category \
where name=%s and parent_id is NULL', (categs[0],))
c_id = cr.fetchone()
if not c_id:
cr.execute('select nextval(\'ir_module_category_id_seq\')')
c_id = cr.fetchone()[0]
cr.execute('insert into ir_module_category \
(id, name, parent_id) \
values (%s, %s, %s)', (c_id, categs[0], p_id))
else:
c_id = c_id[0]
p_id = c_id
categs = categs[1:]
active = info.get('active', False)
installable = info.get('installable', True)
if installable:
if active:
state = 'to install'
else:
state = 'uninstalled'
else:
state = 'uninstallable'
cr.execute('select nextval(\'ir_module_module_id_seq\')')
id = cr.fetchone()[0]
cr.execute('insert into ir_module_module \
(id, author, website, name, shortdesc, description, \
category_id, state) \
values (%s, %s, %s, %s, %s, %s, %s, %s)', (
id, info.get('author', ''),
info.get('website', ''), i, info.get('name', False),
info.get('description', ''), p_id, state))
cr.execute('insert into ir_model_data \
(name,model,module, res_id) values (%s,%s,%s,%s)', (
'module_meta_information', 'ir.module.module', i, id))
dependencies = info.get('depends', [])
for d in dependencies:
cr.execute('insert into ir_module_module_dependency \
(module_id,name) values (%s, %s)', (id, d))
cr.commit()
def find_in_path(name):
if os.name == "nt":
sep = ';'
else:
sep = ':'
path = [dir for dir in os.environ['PATH'].split(sep)
if os.path.isdir(dir)]
for dir in path:
val = os.path.join(dir, name)
if os.path.isfile(val) or os.path.islink(val):
return val
return None
def find_pg_tool(name):
if config['pg_path'] and config['pg_path'] != 'None':
return os.path.join(config['pg_path'], name)
else:
return find_in_path(name)
def exec_pg_command(name, *args):
prog = find_pg_tool(name)
if not prog:
raise Exception('Couldn\'t find %s' % name)
args2 = (os.path.basename(prog),) + args
return os.spawnv(os.P_WAIT, prog, args2)
def exec_pg_command_pipe(name, *args):
prog = find_pg_tool(name)
if not prog:
raise Exception('Couldn\'t find %s' % name)
if os.name == "nt":
cmd = '"' + prog + '" ' + ' '.join(args)
else:
cmd = prog + ' ' + ' '.join(args)
return os.popen2(cmd, 'b')
def exec_command_pipe(name, *args):
prog = find_in_path(name)
if not prog:
raise Exception('Couldn\'t find %s' % name)
if os.name == "nt":
cmd = '"'+prog+'" '+' '.join(args)
else:
cmd = prog+' '+' '.join(args)
return os.popen2(cmd, 'b')
#----------------------------------------------------------
# File paths
#----------------------------------------------------------
#file_path_root = os.getcwd()
#file_path_addons = os.path.join(file_path_root, 'addons')
def file_open(name, mode="r", subdir='addons', pathinfo=False):
"""Open a file from the OpenERP root, using a subdir folder.
>>> file_open('hr/report/timesheer.xsl')
>>> file_open('addons/hr/report/timesheet.xsl')
>>> file_open('../../base/report/rml_template.xsl', subdir='addons/hr/report', pathinfo=True)
@param name: name of the file
@param mode: file open mode
@param subdir: subdirectory
@param pathinfo: if True returns tupple (fileobject, filepath)
@return: fileobject if pathinfo is False else (fileobject, filepath)
"""
adp = os.path.normcase(os.path.abspath(config['addons_path']))
rtp = os.path.normcase(os.path.abspath(config['root_path']))
if name.replace(os.path.sep, '/').startswith('addons/'):
subdir = 'addons'
name = name[7:]
# First try to locate in addons_path
if subdir:
subdir2 = subdir
if subdir2.replace(os.path.sep, '/').startswith('addons/'):
subdir2 = subdir2[7:]
subdir2 = (subdir2 != 'addons' or None) and subdir2
try:
if subdir2:
fn = os.path.join(adp, subdir2, name)
else:
fn = os.path.join(adp, name)
fn = os.path.normpath(fn)
fo = file_open(fn, mode=mode, subdir=None, pathinfo=pathinfo)
if pathinfo:
return fo, fn
return fo
except IOError, e:
pass
if subdir:
name = os.path.join(rtp, subdir, name)
else:
name = os.path.join(rtp, name)
name = os.path.normpath(name)
# Check for a zipfile in the path
head = name
zipname = False
name2 = False
while True:
head, tail = os.path.split(head)
if not tail:
break
if zipname:
zipname = os.path.join(tail, zipname)
else:
zipname = tail
if zipfile.is_zipfile(head+'.zip'):
from cStringIO import StringIO
zfile = zipfile.ZipFile(head+'.zip')
try:
fo = StringIO()
fo.write(zfile.read(os.path.join(
os.path.basename(head), zipname).replace(
os.sep, '/')))
fo.seek(0)
if pathinfo:
return fo, name
return fo
except:
name2 = os.path.normpath(os.path.join(head + '.zip', zipname))
pass
for i in (name2, name):
if i and os.path.isfile(i):
fo = file(i, mode)
if pathinfo:
return fo, i
return fo
if os.path.splitext(name)[1] == '.rml':
raise IOError, 'Report %s doesn\'t exist or deleted : ' %str(name)
raise IOError, 'File not found : '+str(name)
#----------------------------------------------------------
# iterables
#----------------------------------------------------------
def flatten(list):
"""Flatten a list of elements into a uniqu list
Author: Christophe Simonis (christophe@tinyerp.com)
Examples:
>>> flatten(['a'])
['a']
>>> flatten('b')
['b']
>>> flatten( [] )
[]
>>> flatten( [[], [[]]] )
[]
>>> flatten( [[['a','b'], 'c'], 'd', ['e', [], 'f']] )
['a', 'b', 'c', 'd', 'e', 'f']
>>> t = (1,2,(3,), [4, 5, [6, [7], (8, 9), ([10, 11, (12, 13)]), [14, [], (15,)], []]])
>>> flatten(t)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
"""
def isiterable(x):
return hasattr(x, "__iter__")
r = []
for e in list:
if isiterable(e):
map(r.append, flatten(e))
else:
r.append(e)
return r
def reverse_enumerate(l):
"""Like enumerate but in the other sens
>>> a = ['a', 'b', 'c']
>>> it = reverse_enumerate(a)
>>> it.next()
(2, 'c')
>>> it.next()
(1, 'b')
>>> it.next()
(0, 'a')
>>> it.next()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
StopIteration
"""
return izip(xrange(len(l)-1, -1, -1), reversed(l))
#----------------------------------------------------------
# Emails
#----------------------------------------------------------
def email_send(email_from, email_to, subject, body, email_cc=None, email_bcc=None, reply_to=False,
attach=None, tinycrm=False, ssl=False, debug=False, subtype='plain', x_headers=None):
"""Send an email."""
import smtplib
from email.MIMEText import MIMEText
from email.MIMEBase import MIMEBase
from email.MIMEMultipart import MIMEMultipart
from email.Header import Header
from email.Utils import formatdate, COMMASPACE
from email.Utils import formatdate, COMMASPACE
from email import Encoders
if x_headers is None:
x_headers = {}
if not ssl:
ssl = config.get('smtp_ssl', False)
if not email_from and not config['email_from']:
raise Exception("No Email sender by default, see config file")
if not email_cc:
email_cc = []
if not email_bcc:
email_bcc = []
if not attach:
msg = MIMEText(body or '',_subtype=subtype,_charset='utf-8')
else:
msg = MIMEMultipart()
msg['Subject'] = Header(ustr(subject), 'utf-8')
msg['From'] = email_from
del msg['Reply-To']
if reply_to:
msg['Reply-To'] = reply_to
else:
msg['Reply-To'] = msg['From']
msg['To'] = COMMASPACE.join(email_to)
if email_cc:
msg['Cc'] = COMMASPACE.join(email_cc)
if email_bcc:
msg['Bcc'] = COMMASPACE.join(email_bcc)
msg['Date'] = formatdate(localtime=True)
# Add OpenERP Server information
msg['X-Generated-By'] = 'OpenERP (http://www.openerp.com)'
msg['X-OpenERP-Server-Host'] = socket.gethostname()
msg['X-OpenERP-Server-Version'] = release.version
# Add dynamic X Header
for key, value in x_headers.items():
msg['X-OpenERP-%s' % key] = str(value)
if tinycrm:
msg['Message-Id'] = "<%s-tinycrm-%s@%s>" % (time.time(), tinycrm, socket.gethostname())
if attach:
msg.attach( MIMEText(body or '', _charset='utf-8', _subtype=subtype) )
for (fname,fcontent) in attach:
part = MIMEBase('application', "octet-stream")
part.set_payload( fcontent )
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"' % (fname,))
msg.attach(part)
try:
s = smtplib.SMTP()
if debug:
s.debuglevel = 5
s.connect(config['smtp_server'], config['smtp_port'])
if ssl:
s.ehlo()
s.starttls()
s.ehlo()
if config['smtp_user'] or config['smtp_password']:
s.login(config['smtp_user'], config['smtp_password'])
s.sendmail(email_from,
flatten([email_to, email_cc, email_bcc]),
msg.as_string()
)
s.quit()
except Exception, e:
import netsvc
netsvc.Logger().notifyChannel('email_send', netsvc.LOG_ERROR, e)
return False
return True
#----------------------------------------------------------
# SMS
#----------------------------------------------------------
# text must be latin-1 encoded
def sms_send(user, password, api_id, text, to):
import urllib
url = "http://api.urlsms.com/SendSMS.aspx"
#url = "http://196.7.150.220/http/sendmsg"
params = urllib.urlencode({'UserID': user, 'Password': password, 'SenderID': api_id, 'MsgText': text, 'RecipientMobileNo':to})
f = urllib.urlopen(url+"?"+params)
# FIXME: Use the logger if there is an error
return True
#---------------------------------------------------------
# Class that stores an updateable string (used in wizards)
#---------------------------------------------------------
class UpdateableStr(local):
def __init__(self, string=''):
self.string = string
def __str__(self):
return str(self.string)
def __repr__(self):
return str(self.string)
def __nonzero__(self):
return bool(self.string)
class UpdateableDict(local):
'''Stores an updateable dict to use in wizards'''
def __init__(self, dict=None):
if dict is None:
dict = {}
self.dict = dict
def __str__(self):
return str(self.dict)
def __repr__(self):
return str(self.dict)
def clear(self):
return self.dict.clear()
def keys(self):
return self.dict.keys()
def __setitem__(self, i, y):
self.dict.__setitem__(i, y)
def __getitem__(self, i):
return self.dict.__getitem__(i)
def copy(self):
return self.dict.copy()
def iteritems(self):
return self.dict.iteritems()
def iterkeys(self):
return self.dict.iterkeys()
def itervalues(self):
return self.dict.itervalues()
def pop(self, k, d=None):
return self.dict.pop(k, d)
def popitem(self):
return self.dict.popitem()
def setdefault(self, k, d=None):
return self.dict.setdefault(k, d)
def update(self, E, **F):
return self.dict.update(E, F)
def values(self):
return self.dict.values()
def get(self, k, d=None):
return self.dict.get(k, d)
def has_key(self, k):
return self.dict.has_key(k)
def items(self):
return self.dict.items()
def __cmp__(self, y):
return self.dict.__cmp__(y)
def __contains__(self, k):
return self.dict.__contains__(k)
def __delitem__(self, y):
return self.dict.__delitem__(y)
def __eq__(self, y):
return self.dict.__eq__(y)
def __ge__(self, y):
return self.dict.__ge__(y)
def __getitem__(self, y):
return self.dict.__getitem__(y)
def __gt__(self, y):
return self.dict.__gt__(y)
def __hash__(self):
return self.dict.__hash__()
def __iter__(self):
return self.dict.__iter__()
def __le__(self, y):
return self.dict.__le__(y)
def __len__(self):
return self.dict.__len__()
def __lt__(self, y):
return self.dict.__lt__(y)
def __ne__(self, y):
return self.dict.__ne__(y)
# Don't use ! Use res.currency.round()
class currency(float):
def __init__(self, value, accuracy=2, rounding=None):
if rounding is None:
rounding=10**-accuracy
self.rounding=rounding
self.accuracy=accuracy
def __new__(cls, value, accuracy=2, rounding=None):
return float.__new__(cls, round(value, accuracy))
#def __str__(self):
# display_value = int(self*(10**(-self.accuracy))/self.rounding)*self.rounding/(10**(-self.accuracy))
# return str(display_value)
def is_hashable(h):
try:
hash(h)
return True
except TypeError:
return False
class cache(object):
"""
Use it as a decorator of the function you plan to cache
Timeout: 0 = no timeout, otherwise in seconds
"""
__caches = []
def __init__(self, timeout=None, skiparg=2, multi=None):
assert skiparg >= 2 # at least self and cr
if timeout is None:
self.timeout = config['cache_timeout']
else:
self.timeout = timeout
self.skiparg = skiparg
self.multi = multi
self.lasttime = time.time()
self.cache = {}
self.fun = None
cache.__caches.append(self)
def _generate_keys(self, dbname, kwargs2):
"""
Generate keys depending of the arguments and the self.mutli value
"""
def to_tuple(d):
i = d.items()
i.sort(key=lambda (x,y): x)
return tuple(i)
if not self.multi:
key = (('dbname', dbname),) + to_tuple(kwargs2)
yield key, None
else:
multis = kwargs2[self.multi][:]
for id in multis:
kwargs2[self.multi] = (id,)
key = (('dbname', dbname),) + to_tuple(kwargs2)
yield key, id
def _unify_args(self, *args, **kwargs):
# Update named arguments with positional argument values (without self and cr)
kwargs2 = self.fun_default_values.copy()
kwargs2.update(kwargs)
kwargs2.update(dict(zip(self.fun_arg_names, args[self.skiparg-2:])))
for k in kwargs2:
if isinstance(kwargs2[k], (list, dict, set)):
kwargs2[k] = tuple(kwargs2[k])
elif not is_hashable(kwargs2[k]):
kwargs2[k] = repr(kwargs2[k])
return kwargs2
def clear(self, dbname, *args, **kwargs):
"""clear the cache for database dbname
if *args and **kwargs are both empty, clear all the keys related to this database
"""
if not args and not kwargs:
keys_to_del = [key for key in self.cache if key[0][1] == dbname]
else:
kwargs2 = self._unify_args(*args, **kwargs)
keys_to_del = [key for key, _ in self._generate_keys(dbname, kwargs2) if key in self.cache]
for key in keys_to_del:
del self.cache[key]
@classmethod
def clean_caches_for_db(cls, dbname):
for c in cls.__caches:
c.clear(dbname)
def __call__(self, fn):
if self.fun is not None:
raise Exception("Can not use a cache instance on more than one function")
self.fun = fn
argspec = inspect.getargspec(fn)
self.fun_arg_names = argspec[0][self.skiparg:]
self.fun_default_values = {}
if argspec[3]:
self.fun_default_values = dict(zip(self.fun_arg_names[-len(argspec[3]):], argspec[3]))
def cached_result(self2, cr, *args, **kwargs):
if time.time()-self.timeout > self.lasttime:
self.lasttime = time.time()
t = time.time()-self.timeout
for key in self.cache.keys():
if self.cache[key][1]<t:
del self.cache[key]
kwargs2 = self._unify_args(*args, **kwargs)
result = {}
notincache = {}
for key, id in self._generate_keys(cr.dbname, kwargs2):
if key in self.cache:
result[id] = self.cache[key][0]
else:
notincache[id] = key
if notincache:
if self.multi:
kwargs2[self.multi] = notincache.keys()
result2 = fn(self2, cr, *args[:self.skiparg-2], **kwargs2)
if not self.multi:
key = notincache[None]
self.cache[key] = (result2, time.time())
result[None] = result2
else:
for id in result2:
key = notincache[id]
self.cache[key] = (result2[id], time.time())
result.update(result2)
if not self.multi:
return result[None]
return result
cached_result.clear_cache = self.clear
return cached_result
def to_xml(s):
return s.replace('&','&').replace('<','<').replace('>','>')
def ustr(value):
"""This method is similar to the builtin `str` method, except
it will return Unicode string.
@param value: the value to convert
@rtype: unicode
@return: unicode string
"""
if isinstance(value, unicode):
return value
if hasattr(value, '__unicode__'):
return unicode(value)
if not isinstance(value, str):
value = str(value)
try: # first try utf-8
return unicode(value, 'utf-8')
except:
pass
try: # then extened iso-8858
return unicode(value, 'iso-8859-15')
except:
pass
# else use default system locale
from locale import getlocale
return unicode(value, getlocale()[1])
def exception_to_unicode(e):
if hasattr(e, 'message'):
return ustr(e.message)
if hasattr(e, 'args'):
return "\n".join((ustr(a) for a in e.args))
try:
return ustr(e)
except:
return u"Unknow message"
# to be compatible with python 2.4
import __builtin__
if not hasattr(__builtin__, 'all'):
def all(iterable):
for element in iterable:
if not element:
return False
return True
__builtin__.all = all
del all
if not hasattr(__builtin__, 'any'):
def any(iterable):
for element in iterable:
if element:
return True
return False
__builtin__.any = any
del any
def get_languages():
languages={
'ar_AR': u'Arabic / الْعَرَبيّة',
'bg_BG': u'Bulgarian / български',
'bs_BS': u'Bosnian / bosanski jezik',
'ca_ES': u'Catalan / Català',
'cs_CZ': u'Czech / Čeština',
'da_DK': u'Danish / Dansk',
'de_DE': u'German / Deutsch',
'el_EL': u'Greek / Ελληνικά',
'en_CA': u'English (CA)',
'en_GB': u'English (UK)',
'en_US': u'English (US)',
'es_AR': u'Spanish (AR) / Español (AR)',
'es_ES': u'Spanish / Español',
'et_EE': u'Estonian / Eesti keel',
'fr_BE': u'French (BE) / Français (BE)',
'fr_CH': u'French (CH) / Français (CH)',
'fr_FR': u'French / Français',
'hr_HR': u'Croatian / hrvatski jezik',
'hu_HU': u'Hungarian / Magyar',
'id_ID': u'Indonesian / Bahasa Indonesia',
'it_IT': u'Italian / Italiano',
'lt_LT': u'Lithuanian / Lietuvių kalba',
'nl_NL': u'Dutch / Nederlands',
'nl_BE': u'Dutch (Belgium) / Nederlands (Belgïe)',
'pl_PL': u'Polish / Język polski',
'pt_BR': u'Portugese (BR) / português (BR)',
'pt_PT': u'Portugese / português',
'ro_RO': u'Romanian / limba română',
'ru_RU': u'Russian / русский язык',
'sl_SL': u'Slovenian / slovenščina',
'sv_SE': u'Swedish / svenska',
'tr_TR': u'Turkish / Türkçe',
'uk_UA': u'Ukrainian / украї́нська мо́ва',
'zh_CN': u'Chinese (CN) / 简体中文' ,
'zh_TW': u'Chinese (TW) / 正體字',
}
return languages
def scan_languages():
import glob
file_list = [os.path.splitext(os.path.basename(f))[0] for f in glob.glob(os.path.join(config['root_path'],'addons', 'base', 'i18n', '*.po'))]
lang_dict = get_languages()
ret = [(lang, lang_dict.get(lang, lang)) for lang in file_list]
ret.sort(key=lambda k:k[1])
return ret
def get_user_companies(cr, user):
def _get_company_children(cr, ids):
if not ids:
return []
cr.execute('SELECT id FROM res_company WHERE parent_id = any(array[%s])' %(','.join([str(x) for x in ids]),))
res=[x[0] for x in cr.fetchall()]
res.extend(_get_company_children(cr, res))
return res
cr.execute('SELECT comp.id FROM res_company AS comp, res_users AS u WHERE u.id = %s AND comp.id = u.company_id' % (user,))
compids=[cr.fetchone()[0]]
compids.extend(_get_company_children(cr, compids))
return compids
def mod10r(number):
"""
Input number : account or invoice number
Output return: the same number completed with the recursive mod10
key
"""
codec=[0,9,4,6,8,2,7,1,3,5]
report = 0
result=""
for digit in number:
result += digit
if digit.isdigit():
report = codec[ (int(digit) + report) % 10 ]
return result + str((10 - report) % 10)
def human_size(sz):
"""
Return the size in a human readable format
"""
if not sz:
return False
units = ('bytes', 'Kb', 'Mb', 'Gb')
if isinstance(sz,basestring):
sz=len(sz)
s, i = float(sz), 0
while s >= 1024 and i < len(units)-1:
s = s / 1024
i = i + 1
return "%0.2f %s" % (s, units[i])
def logged(f):
from tools.func import wraps
@wraps(f)
def wrapper(*args, **kwargs):
import netsvc
from pprint import pformat
vector = ['Call -> function: %r' % f]
for i, arg in enumerate(args):
vector.append(' arg %02d: %s' % (i, pformat(arg)))
for key, value in kwargs.items():
vector.append(' kwarg %10s: %s' % (key, pformat(value)))
timeb4 = time.time()
res = f(*args, **kwargs)
vector.append(' result: %s' % pformat(res))
vector.append(' time delta: %s' % (time.time() - timeb4))
netsvc.Logger().notifyChannel('logged', netsvc.LOG_DEBUG, '\n'.join(vector))
return res
return wrapper
class profile(object):
def __init__(self, fname=None):
self.fname = fname
def __call__(self, f):
from tools.func import wraps
@wraps(f)
def wrapper(*args, **kwargs):
class profile_wrapper(object):
def __init__(self):
self.result = None
def __call__(self):
self.result = f(*args, **kwargs)
pw = profile_wrapper()
import cProfile
fname = self.fname or ("%s.cprof" % (f.func_name,))
cProfile.runctx('pw()', globals(), locals(), filename=fname)
return pw.result
return wrapper
def debug(what):
"""
This method allow you to debug your code without print
Example:
>>> def func_foo(bar)
... baz = bar
... debug(baz)
... qnx = (baz, bar)
... debug(qnx)
...
>>> func_foo(42)
This will output on the logger:
[Wed Dec 25 00:00:00 2008] DEBUG:func_foo:baz = 42
[Wed Dec 25 00:00:00 2008] DEBUG:func_foo:qnx = (42, 42)
To view the DEBUG lines in the logger you must start the server with the option
--log-level=debug
"""
import netsvc
from inspect import stack
import re
from pprint import pformat
st = stack()[1]
param = re.split("debug *\((.+)\)", st[4][0].strip())[1].strip()
while param.count(')') > param.count('('): param = param[:param.rfind(')')]
what = pformat(what)
if param != what:
what = "%s = %s" % (param, what)
netsvc.Logger().notifyChannel(st[3], netsvc.LOG_DEBUG, what)
icons = map(lambda x: (x,x), ['STOCK_ABOUT', 'STOCK_ADD', 'STOCK_APPLY', 'STOCK_BOLD',
'STOCK_CANCEL', 'STOCK_CDROM', 'STOCK_CLEAR', 'STOCK_CLOSE', 'STOCK_COLOR_PICKER',
'STOCK_CONNECT', 'STOCK_CONVERT', 'STOCK_COPY', 'STOCK_CUT', 'STOCK_DELETE',
'STOCK_DIALOG_AUTHENTICATION', 'STOCK_DIALOG_ERROR', 'STOCK_DIALOG_INFO',
'STOCK_DIALOG_QUESTION', 'STOCK_DIALOG_WARNING', 'STOCK_DIRECTORY', 'STOCK_DISCONNECT',
'STOCK_DND', 'STOCK_DND_MULTIPLE', 'STOCK_EDIT', 'STOCK_EXECUTE', 'STOCK_FILE',
'STOCK_FIND', 'STOCK_FIND_AND_REPLACE', 'STOCK_FLOPPY', 'STOCK_GOTO_BOTTOM',
'STOCK_GOTO_FIRST', 'STOCK_GOTO_LAST', 'STOCK_GOTO_TOP', 'STOCK_GO_BACK',
'STOCK_GO_DOWN', 'STOCK_GO_FORWARD', 'STOCK_GO_UP', 'STOCK_HARDDISK',
'STOCK_HELP', 'STOCK_HOME', 'STOCK_INDENT', 'STOCK_INDEX', 'STOCK_ITALIC',
'STOCK_JUMP_TO', 'STOCK_JUSTIFY_CENTER', 'STOCK_JUSTIFY_FILL',
'STOCK_JUSTIFY_LEFT', 'STOCK_JUSTIFY_RIGHT', 'STOCK_MEDIA_FORWARD',
'STOCK_MEDIA_NEXT', 'STOCK_MEDIA_PAUSE', 'STOCK_MEDIA_PLAY',
'STOCK_MEDIA_PREVIOUS', 'STOCK_MEDIA_RECORD', 'STOCK_MEDIA_REWIND',
'STOCK_MEDIA_STOP', 'STOCK_MISSING_IMAGE', 'STOCK_NETWORK', 'STOCK_NEW',
'STOCK_NO', 'STOCK_OK', 'STOCK_OPEN', 'STOCK_PASTE', 'STOCK_PREFERENCES',
'STOCK_PRINT', 'STOCK_PRINT_PREVIEW', 'STOCK_PROPERTIES', 'STOCK_QUIT',
'STOCK_REDO', 'STOCK_REFRESH', 'STOCK_REMOVE', 'STOCK_REVERT_TO_SAVED',
'STOCK_SAVE', 'STOCK_SAVE_AS', 'STOCK_SELECT_COLOR', 'STOCK_SELECT_FONT',
'STOCK_SORT_ASCENDING', 'STOCK_SORT_DESCENDING', 'STOCK_SPELL_CHECK',
'STOCK_STOP', 'STOCK_STRIKETHROUGH', 'STOCK_UNDELETE', 'STOCK_UNDERLINE',
'STOCK_UNDO', 'STOCK_UNINDENT', 'STOCK_YES', 'STOCK_ZOOM_100',
'STOCK_ZOOM_FIT', 'STOCK_ZOOM_IN', 'STOCK_ZOOM_OUT',
'terp-account', 'terp-crm', 'terp-mrp', 'terp-product', 'terp-purchase',
'terp-sale', 'terp-tools', 'terp-administration', 'terp-hr', 'terp-partner',
'terp-project', 'terp-report', 'terp-stock', 'terp-calendar', 'terp-graph',
])
def extract_zip_file(zip_file, outdirectory):
import zipfile
import os
zf = zipfile.ZipFile(zip_file, 'r')
out = outdirectory
for path in zf.namelist():
tgt = os.path.join(out, path)
tgtdir = os.path.dirname(tgt)
if not os.path.exists(tgtdir):
os.makedirs(tgtdir)
if not tgt.endswith(os.sep):
fp = open(tgt, 'wb')
fp.write(zf.read(path))
fp.close()
zf.close()
if __name__ == '__main__':
import doctest
doctest.testmod()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
import math
import time
import os
import re
from utils import add_cmd, commands, cmd_list, PY3, PrintError
import logging
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i+n]
@add_cmd("calc", alias=["math"], minArgs=1)
def calc(bot, event, irc, args):
"""Insert help text here"""
safe_dict = {
"sqrt": math.sqrt,
"pow": math.pow,
"sin": math.sin,
"cos": math.cos,
"tan": math.tan,
"asin": math.asin,
"acos": math.acos,
"atan": math.atan,
"abs": abs,
"log": math.log,
"fact": math.factorial,
"factorial": math.factorial
}
try:
constant = {
"e": str(math.e),
"pi": str(math.pi),
}
for c in constant:
m = args.replace("){0}".format(c), ") * {0}".format(constant[c]))
p = re.compile(r'([:]?\d*\.\d+|\d+){0}'.format(c))
subst = "\\1 * " + constant[c]
m = re.sub(p, subst, m)
m = re.sub('\\b{0}\\b'.format(c), constant[c], m)
output = format(eval(m, {"__builtins__": None}, safe_dict), ",d")
irc.reply(event, "The answer is: {0}".format(output))
except ArithmeticError:
irc.reply(event, "\x034Number undefined or too large.")
except ValueError:
irc.reply(event, "\x034Invalid Input")
@add_cmd("echo", minArgs=1)
def echo(bot, event, irc, args):
"""Help text"""
irc.reply(event, args)
@add_cmd("ping", minArgs=0)
def ping(bot, event, irc, args):
"""Help text"""
irc.reply(event, "PONG!")
@add_cmd("join", admin=True, minArgs=1)
def join(bot, event, irc, args):
"""Help text"""
irc.join(args)
@add_cmd("part", alias=["leave"], admin=True, minArgs=0)
def part(bot, event, irc, args):
"""Help text"""
if len(args):
irc.part(args[0])
else:
irc.part(event.target)
@add_cmd("ban", admin=True, minArgs=1)
def ban(bot, event, irc, args):
"""Help text"""
irc.ban(args)
@add_cmd("unban", admin=True, minArgs=1)
def unban(bot, event, irc, args):
"""Help text"""
irc.unban(args)
@add_cmd("op", admin=True, minArgs=0)
def op(bot, event, irc, args):
"""Help text"""
if len(args):
if len(args) > 1 and args[0].find("#") == -1:
irc.mode(args[0], chunks(args[1:], 4), "+oooo")
elif len(args) > 1 and args[0].find("#") != -1:
irc.mode(event.target, chunks(args, 4), "+oooo")
elif len(args) == 1:
irc.op(event.target, args[0])
else:
irc.op(event.target, event.source.nick)
@add_cmd("deop", admin=True, minArgs=0)
def deop(bot, event, irc, args):
"""Help text"""
if len(args):
if len(args) > 1 and args[0].find("#") == -1:
irc.mode(args[0], chunks(args[1:], 4), "-oooo")
elif len(args) > 1 and args[0].find("#") != -1:
irc.mode(event.target, chunks(args, 4), "-oooo")
elif len(args) == 1:
irc.deop(event.target, args[0])
else:
irc.deop(event.target, event.source.nick)
@add_cmd("voice", admin=True, minArgs=0)
def voice(bot, event, irc, args):
if len(args):
if len(args) > 1 and args[0].find("#") == -1:
irc.mode(args[0], chunks(args[1:], 4), "+vvvv")
elif len(args) > 1 and args[0].find("#") != -1:
irc.mode(event.target, chunks(args, 4), "+vvvv")
elif len(args) == 1:
irc.deop(event.target, args[0])
else:
irc.voice(event.target, event.source.nick)
@add_cmd("unvoice", admin=True, minArgs=0)
def unvoice(bot, event, irc, args):
if len(args):
if len(args) > 1 and args[0].find("#") == -1:
irc.mode(args[0], chunks(args[1:], 4), "-vvvv")
elif len(args) > 1 and args[0].find("#") != -1:
irc.mode(event.target, chunks(args, 4), "-vvvv")
elif len(args) == 1:
irc.deop(event.target, args[0])
irc.unvoice(event.target, args[0])
else:
irc.unvoice(event.target, event.source.nick)
@add_cmd("nick", owner=True, minArgs=1)
def nick(bot, event, irc, args):
bot.config['nickname'] = args
irc.nick(args)
@add_cmd("log.level", admin=True, minArgs=1)
def logLevel(bot, event, irc, args):
if args[0] == "debug":
level = logging.DEBUG
elif args[0] == "info":
level = logging.INFO
elif args[0] == "error":
level = logging.ERROR
elif args[0] == "warning":
level = logging.WARNING
elif args[0] == "critical":
level = logging.CRITICAL
else:
level = logging.INFO # Default logging level
irc.reply(event, "Invalid log level {0}".format(args))
logging.getLogger().setLevel(level)
@add_cmd("quit", admin=True, minArgs=0)
def Quit(bot, event, irc, args):
"""(\x02quit <text>\x0F) -- Exits the bot with the QUIT message <text>."""
args = "zIRC - https://github.com/itslukej/zirc" if not args else args
irc.quit(" ".join(args))
time.sleep(1)
os._exit(0)
@add_cmd("help", minArgs=0)
def Help(bot, event, irc, args):
"""Help text"""
"""Help text"""
try:
irc.reply(event, "Usage: {0}".format(commands[args[0]]['function'].__doc__))
except KeyError:
try:
irc.reply(event, "Usage: {0}".format(aliases[args[0]]['function'].__doc__))
except KeyError:
irc.reply(event, "Invalid command {0}".format(args[0]))
else:
irc.reply(event, "Usage: {0}".format(commands["help"]['function'].__doc__))
@add_cmd("list", minArgs=0, alias=["ls"])
def List(bot, event, irc, args):
"""Help text"""
irc.reply(event, ", ".join(sorted(cmd_list)))
@add_cmd("reload", admin=True)
def Reload(bot, event, irc, args):
"""Help text"""
if PY3:
reload = __import__("importlib").reload
if args[0] in ['commands', 'utils']:
try:
reload(args)
irc.reply(event, "Reloaded {0}".format(args))
except ImportError:
PrintError()
else:
irc.reply(event, "Wrong module name")
Fix some commands
import math
import time
import os
import re
from utils import add_cmd, commands, cmd_list, PY3, PrintError
import logging
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i+n]
@add_cmd("calc", alias=["math"], minArgs=1)
def calc(bot, event, irc, args):
"""Insert help text here"""
safe_dict = {
"sqrt": math.sqrt,
"pow": math.pow,
"sin": math.sin,
"cos": math.cos,
"tan": math.tan,
"asin": math.asin,
"acos": math.acos,
"atan": math.atan,
"abs": abs,
"log": math.log,
"fact": math.factorial,
"factorial": math.factorial
}
try:
constant = {
"e": str(math.e),
"pi": str(math.pi),
}
for c in constant:
m = args.replace("){0}".format(c), ") * {0}".format(constant[c]))
p = re.compile(r'([:]?\d*\.\d+|\d+){0}'.format(c))
subst = "\\1 * " + constant[c]
m = re.sub(p, subst, m)
m = re.sub('\\b{0}\\b'.format(c), constant[c], m)
output = format(eval(m, {"__builtins__": None}, safe_dict), ",d")
irc.reply(event, "The answer is: {0}".format(output))
except ArithmeticError:
irc.reply(event, "\x034Number undefined or too large.")
except ValueError:
irc.reply(event, "\x034Invalid Input")
@add_cmd("echo", minArgs=1)
def echo(bot, event, irc, args):
"""Help text"""
irc.reply(event, args)
@add_cmd("ping", minArgs=0)
def ping(bot, event, irc, args):
"""Help text"""
irc.reply(event, "PONG!")
@add_cmd("join", admin=True, minArgs=1)
def join(bot, event, irc, args):
"""Help text"""
irc.join(args)
@add_cmd("part", alias=["leave"], admin=True, minArgs=0)
def part(bot, event, irc, args):
"""Help text"""
if len(args):
irc.part(args[0])
else:
irc.part(event.target)
@add_cmd("ban", admin=True, minArgs=1)
def ban(bot, event, irc, args):
"""Help text"""
if len(args):
if len(args) > 1:
if args[0].find("#") == -1:
chunked = chunks(args[1:], 4)
irc.mode(args[0], chunked, "+" + "b" * len(chunked))
elif args[0].find("#") != -1:
chunked = chunks(args, 4)
irc.mode(event.target, chunked, "+" + "b" * len(chunked))
else:
irc.ban(event.target, args[0])
else:
irc.ban(event.target, event.source.nick)
@add_cmd("unban", admin=True, minArgs=1)
def unban(bot, event, irc, args):
"""Help text"""
if len(args):
if len(args) > 1:
if args[0].find("#") == -1:
chunked = chunks(args[1:], 4)
irc.mode(args[0], chunked, "-" + "b" * len(chunked))
elif args[0].find("#") != -1:
chunked = chunks(args, 4)
irc.mode(event.target, chunked, "-" + "b" * len(chunked))
else:
irc.unban(event.target, args[0])
else:
irc.unban(event.target, event.source.nick)
@add_cmd("op", admin=True, minArgs=0)
def op(bot, event, irc, args):
"""Help text"""
if len(args):
if len(args) > 1 and args[0].find("#") == -1:
irc.mode(args[0], chunks(args[1:], 4), "+oooo")
elif len(args) > 1 and args[0].find("#") != -1:
irc.mode(event.target, chunks(args, 4), "+oooo")
elif len(args) == 1:
irc.op(event.target, args[0])
else:
irc.op(event.target, event.source.nick)
@add_cmd("deop", admin=True, minArgs=0)
def deop(bot, event, irc, args):
"""Help text"""
if len(args):
if len(args) > 1 and args[0].find("#") == -1:
irc.mode(args[0], chunks(args[1:], 4), "-oooo")
elif len(args) > 1 and args[0].find("#") != -1:
irc.mode(event.target, chunks(args, 4), "-oooo")
elif len(args) == 1:
irc.deop(event.target, args[0])
else:
irc.deop(event.target, event.source.nick)
@add_cmd("voice", admin=True, minArgs=0)
def voice(bot, event, irc, args):
if len(args):
if len(args) > 1 and args[0].find("#") == -1:
irc.mode(args[0], chunks(args[1:], 4), "+vvvv")
elif len(args) > 1 and args[0].find("#") != -1:
irc.mode(event.target, chunks(args, 4), "+vvvv")
elif len(args) == 1:
irc.deop(event.target, args[0])
else:
irc.voice(event.target, event.source.nick)
@add_cmd("unvoice", admin=True, minArgs=0)
def unvoice(bot, event, irc, args):
if len(args):
if len(args) > 1 and args[0].find("#") == -1:
irc.mode(args[0], chunks(args[1:], 4), "-vvvv")
elif len(args) > 1 and args[0].find("#") != -1:
irc.mode(event.target, chunks(args, 4), "-vvvv")
elif len(args) == 1:
irc.deop(event.target, args[0])
irc.unvoice(event.target, args[0])
else:
irc.unvoice(event.target, event.source.nick)
@add_cmd("nick", owner=True, minArgs=1)
def nick(bot, event, irc, args):
bot.config['nickname'] = args[0]
irc.nick(args[0])
@add_cmd("log.level", admin=True, minArgs=1)
def logLevel(bot, event, irc, args):
if args[0] == "debug":
level = logging.DEBUG
elif args[0] == "info":
level = logging.INFO
elif args[0] == "error":
level = logging.ERROR
elif args[0] == "warning":
level = logging.WARNING
elif args[0] == "critical":
level = logging.CRITICAL
else:
level = logging.INFO # Default logging level
irc.reply(event, "Invalid log level {0}".format(args))
logging.getLogger().setLevel(level)
@add_cmd("quit", admin=True, minArgs=0)
def Quit(bot, event, irc, args):
"""(\x02quit <text>\x0F) -- Exits the bot with the QUIT message <text>."""
args = "zIRC - https://github.com/itslukej/zirc" if not args else " ".join(args)
irc.quit(args)
time.sleep(1)
os._exit(0)
@add_cmd("help", minArgs=0)
def Help(bot, event, irc, args):
"""Help text"""
"""Help text"""
try:
irc.reply(event, "Usage: {0}".format(commands[args[0]]['function'].__doc__))
except KeyError:
try:
irc.reply(event, "Usage: {0}".format(aliases[args[0]]['function'].__doc__))
except KeyError:
irc.reply(event, "Invalid command {0}".format(args[0]))
else:
irc.reply(event, "Usage: {0}".format(commands["help"]['function'].__doc__))
@add_cmd("list", minArgs=0, alias=["ls"])
def List(bot, event, irc, args):
"""Help text"""
irc.reply(event, ", ".join(sorted(cmd_list)))
@add_cmd("reload", admin=True)
def Reload(bot, event, irc, args):
if len(args) >= 1:
if PY3:
reload = __import__("importlib").reload
if args[0] in ['commands', 'utils']:
try:
reload(args)
irc.reply(event, "Reloaded {0}".format(args))
except ImportError:
PrintError()
else:
irc.reply(event, "Wrong module name")
|
""" Tests for part of the numpy module. """
import unittest
import numpy
from pythran.typing import List, NDArray, Tuple
from pythran.tests import TestEnv
@TestEnv.module
class TestNumpyFunc3(TestEnv):
"""
This module includes tests for multiple numpy module function.
Tested functions are:
- numpy.dot
- numpy.digitize
- numpy.diff
- numpy.trace
- numpy.tri
- numpy.trim_zeros
- numpy.triu
- numpy.tril
- numpy.unique
- numpy.unwrap
and various combinations of +/-/** and trigonometric operations.
"""
def test_dot0(self):
self.run_test("def np_dot0(x, y): from numpy import dot; return dot(x, y)", 2, 3, np_dot0=[int, int])
def test_dot1(self):
self.run_test("def np_dot1(x): from numpy import dot ; y = [2, 3] ; return dot(x,y)", [2, 3], np_dot1=[List[int]])
def test_dot2(self):
self.run_test("def np_dot2(x): from numpy import dot ; y = [2j, 3j] ; return dot(x,y)", [2j, 3j], np_dot2=[List[complex]])
def test_dot3(self):
self.run_test("def np_dot3(x): from numpy import array ; y = array([2, 3]) ; return y.dot(x+x)", numpy.array([2, 3]), np_dot3=[NDArray[int,:]])
def test_dot4a(self):
self.run_test("def np_dot4a(x): from numpy import dot ; y = [2, 3] ; return dot(x,y)", numpy.array([2, 3]), np_dot4a=[NDArray[int,:]])
def test_dot4b(self):
self.run_test("def np_dot4b(x): from numpy import dot ; y = [2., 3.] ; return dot(x[1:],y)", numpy.array([2, 3, 4], dtype=numpy.float32), np_dot4b=[NDArray[numpy.float32,:]])
def test_dot4c(self):
self.run_test("def np_dot4c(x): from numpy import dot ; return dot(x[1:],x[:-1])", numpy.array([2, 3, 4], dtype=numpy.float64), np_dot4c=[NDArray[float,:]])
def test_dot4d(self):
self.run_test("def np_dot4d(x): from numpy import dot ; return dot(x, x)", numpy.array([2j, 3j, 4.]), np_dot4d=[NDArray[complex,:]])
def test_dot4e(self):
self.run_test("def np_dot4e(x): from numpy import dot ; y = (2.j, 3.j) ; return dot(x[:-1],y)", numpy.array([2.j, 3.j, 4.j], dtype=numpy.complex64), np_dot4e=[NDArray[numpy.complex64,:]])
def test_dot4f(self):
self.run_test("def np_dot4f(x): from numpy import dot ; y = (1., 2., 3.) ; return dot(2*x, y)", numpy.array([2., 3., 4.]), np_dot4f=[NDArray[float,:]])
def test_dot5(self):
""" Check for dgemm version of dot. """
self.run_test("""
def np_dot5(x, y):
from numpy import dot
return dot(x,y)""",
[[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]],
[[10., 11., 12.], [13., 14., 15.], [16., 17., 18.]],
np_dot5=[List[List[float]], List[List[float]]])
def test_dot6(self):
""" Check for dot with "no blas type". """
self.run_test("""
def np_dot6(x, y):
from numpy import dot
return dot(x,y)""",
numpy.arange(9).reshape(3, 3),
numpy.arange(9, 18).reshape(3, 3),
np_dot6=[NDArray[int,:,:], NDArray[int,:,:]])
def test_dot7(self):
""" Check for dgemm version of dot with rectangular shape. """
self.run_test("""
def np_dot7(x, y):
from numpy import dot
return dot(x,y)""",
[[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]],
[[10., 11., 12.], [13., 14., 15.], [16., 17., 18.]],
np_dot7=[List[List[float]], List[List[float]]])
def test_dot8(self):
""" Check for dot with "no blas type" with rectangulare shape. """
self.run_test("""
def np_dot8(x, y):
from numpy import dot
return dot(x,y)""",
numpy.arange(6).reshape(3, 2),
numpy.arange(6, 12).reshape(2, 3),
np_dot8=[NDArray[int,:,:], NDArray[int,:,:]])
def test_dot9(self):
""" Check for gemv version of dot. """
self.run_test("""
def np_dot9(x, y):
from numpy import dot
return dot(x,y)""",
numpy.arange(9.).reshape(3, 3).tolist(),
[float(x) for x in range(9, 12)],
np_dot9=[List[List[float]], List[float]])
def test_dot10(self):
""" Check for dot gemv with "no blas type". """
self.run_test("""
def np_dot10(x, y):
from numpy import dot
return dot(x,y)""",
numpy.arange(9).reshape(3, 3),
numpy.arange(9, 12),
np_dot10=[NDArray[int,:,:], NDArray[int,:]])
def test_dot11(self):
""" Check for gemv version of dot with rectangular shape. """
self.run_test("""
def np_dot11(x, y):
from numpy import dot
return dot(x,y)""",
numpy.arange(6.).reshape(3, 2).tolist(),
[float(x) for x in range(6, 8)],
np_dot11=[List[List[float]], List[float]])
def test_dot12(self):
""" Check for dot gemv with "no blas type" with rectangulare shape. """
self.run_test("""
def np_dot12(x, y):
from numpy import dot
return dot(x,y)""",
numpy.arange(6).reshape(3, 2),
numpy.arange(6, 8),
np_dot12=[NDArray[int,:,:], NDArray[int,:]])
def test_dot13(self):
""" Check for gevm version of dot. """
self.run_test("""
def np_dot13(x, y):
from numpy import dot
return dot(x,y)""",
[float(x) for x in range(9, 12)],
numpy.arange(9.).reshape(3, 3).tolist(),
np_dot13=[List[float], List[List[float]]])
def test_dot14(self):
""" Check for dot gevm with "no blas type". """
self.run_test("""
def np_dot14(x, y):
from numpy import dot
return dot(x,y)""",
numpy.arange(9, 12),
numpy.arange(9).reshape(3, 3),
np_dot14=[NDArray[int,:], NDArray[int,:,:]])
def test_dot15(self):
""" Check for gevm version of dot with rectangular shape. """
self.run_test("""
def np_dot15(x, y):
from numpy import dot
return dot(x,y)""",
[float(x) for x in range(6, 9)],
numpy.arange(6.).reshape(3, 2).tolist(),
np_dot15=[List[float], List[List[float]]])
def test_dot16(self):
""" Check for dot gevm with "no blas type" with rectangular shape. """
self.run_test("""
def np_dot16(x, y):
from numpy import dot
return dot(x,y)""",
numpy.arange(6.).reshape(2, 3),
numpy.arange(18.).reshape(3,6),
np_dot16=[NDArray[float,:,:], NDArray[float,:,:]])
def test_dot17(self):
""" Check for dot gevm with "no blas type" with rectangular shape,
first arg transposed."""
self.run_test("""
def np_dot17(x, y):
from numpy import dot
return dot(x.T,y)""",
numpy.arange(6.).reshape(3, 2),
numpy.arange(18.).reshape(3,6),
np_dot17=[NDArray[float,:,:], NDArray[float,:,:]])
def test_dot18(self):
""" Check for dot gevm with "no blas type" with rectangular shape,
second arg transposed"""
self.run_test("""
def np_dot18(x, y):
from numpy import dot
return dot(x,y.T)""",
numpy.arange(6.).reshape(2, 3),
numpy.arange(18.).reshape(6,3),
np_dot18=[NDArray[float,:,:], NDArray[float,:,:]])
def test_dot19(self):
""" Check for dot gevm with "no blas type" with rectangular shape,
both args transposed"""
self.run_test("""
def np_dot19(x, y):
from numpy import dot
return dot(x.T,y.T)""",
numpy.array(numpy.arange(6.).reshape(3, 2)),
numpy.array(numpy.arange(18.).reshape(6,3)),
np_dot19=[NDArray[float,:,:], NDArray[float,:,:]])
def test_dot20(self):
''' Mixed type: matrix x matrix'''
self.run_test("""
def np_dot20(x, y):
from numpy import dot
return dot(x, y)""",
numpy.array(numpy.arange(6.).reshape(2, 3),
dtype=numpy.float32),
numpy.array(numpy.arange(18.).reshape(3,6),
dtype=numpy.float64),
np_dot20=[NDArray[numpy.float32,:,:],
NDArray[numpy.float64,:,:]])
def test_dot21(self):
''' Mixed type: matrix x vector'''
self.run_test("""
def np_dot21(x, y):
from numpy import dot
return dot(x, y)""",
numpy.array(numpy.arange(6.).reshape(2, 3),
dtype=numpy.float32),
numpy.array(numpy.arange(3.).reshape(3),
dtype=numpy.float64),
np_dot21=[NDArray[numpy.float32,:,:],
NDArray[numpy.float64,:]])
def test_dot22(self):
''' Mixed type: matrix x vector'''
self.run_test("""
def np_dot22(x, y):
from numpy import dot
return dot(y, x)""",
numpy.array(numpy.arange(6.).reshape(3, 2),
dtype=numpy.float32),
numpy.array(numpy.arange(3.).reshape(3),
dtype=numpy.float64),
np_dot22=[NDArray[numpy.float32,:,:],
NDArray[numpy.float64,:]])
def test_dot23(self):
''' Nd x 1d, N > 2'''
self.run_test("""
def np_dot23(x, y):
from numpy import dot
return dot(x, y)""",
numpy.array(numpy.arange(24.).reshape(4, 3, 2),
dtype=numpy.float32),
numpy.array(numpy.arange(2.).reshape(2),
dtype=numpy.float64),
np_dot23=[NDArray[numpy.float32,:,:,:],
NDArray[numpy.float64,:]])
@unittest.skip("not implemented yet")
def test_dot24(self):
''' Nd x 1d, N > 2'''
self.run_test("""
def np_dot24(x, y):
from numpy import dot
return dot(x, y)""",
numpy.array(numpy.arange(24.).reshape(4, 3, 2),
dtype=numpy.float32),
numpy.array(numpy.arange(24.).reshape(2,3,2,2),
dtype=numpy.float64),
np_dot24=[NDArray[numpy.float32,:,:,:],
NDArray[numpy.float64,:,:,:,:]])
def test_vdot0(self):
self.run_test("""
def np_vdot0(x, y):
from numpy import vdot
return vdot(x, y)""",
numpy.array(numpy.arange(6.).reshape(3, 2),
dtype=numpy.float32),
numpy.array(numpy.arange(6.).reshape(6),
dtype=numpy.float32),
np_vdot0=[NDArray[numpy.float32,:,:],
NDArray[numpy.float32,:]])
def test_vdot1(self):
self.run_test("""
def np_vdot1(x, y):
from numpy import vdot
return vdot(x, y)""",
numpy.array(numpy.arange(6.).reshape(3, 2),
dtype=numpy.float32),
numpy.array(numpy.arange(6.).reshape(6),
dtype=numpy.float64),
np_vdot1=[NDArray[numpy.float32,:,:],
NDArray[numpy.float64,:]])
def test_vdot2(self):
self.run_test("""
def np_vdot2(x, y):
from numpy import vdot
return vdot(x, y)""",
numpy.array(numpy.arange(6.).reshape(3, 2),
dtype=numpy.complex128),
numpy.array(numpy.arange(6.).reshape(6),
dtype=numpy.complex128),
np_vdot2=[NDArray[numpy.complex128,:,:],
NDArray[numpy.complex128,:]])
def test_vdot3(self):
self.run_test("""
def np_vdot3(x, y):
from numpy import vdot
return vdot(x, y)""",
numpy.array(numpy.arange(6.),
dtype=numpy.complex128),
numpy.array(numpy.arange(6.),
dtype=numpy.complex128) * -1j,
np_vdot3=[NDArray[numpy.complex128,:],
NDArray[numpy.complex128,:]])
def test_digitize0(self):
self.run_test("def np_digitize0(x): from numpy import array, digitize ; bins = array([0.0, 1.0, 2.5, 4.0, 10.0]) ; return digitize(x, bins)", numpy.array([0.2, 6.4, 3.0, 1.6]), np_digitize0=[NDArray[float,:]])
def test_digitize1(self):
self.run_test("def np_digitize1(x): from numpy import array, digitize ; bins = array([ 10.0, 4.0, 2.5, 1.0, 0.0]) ; return digitize(x, bins)", numpy.array([0.2, 6.4, 3.0, 1.6]), np_digitize1=[NDArray[float,:]])
def test_diff0(self):
self.run_test("def np_diff0(x): from numpy import diff; return diff(x)", numpy.array([1, 2, 4, 7, 0]), np_diff0=[NDArray[int,:]])
def test_diff1(self):
self.run_test("def np_diff1(x): from numpy import diff; return diff(x,2)", numpy.array([1, 2, 4, 7, 0]), np_diff1=[NDArray[int,:]])
def test_diff2(self):
self.run_test("def np_diff2(x): from numpy import diff; return diff(x)", numpy.array([[1, 3, 6, 10], [0, 5, 6, 8]]), np_diff2=[NDArray[int,:,:]])
def test_diff3(self):
self.run_test("def np_diff3(x): from numpy import diff; return diff(x,2)", numpy.array([[1, 3, 6, 10], [0, 5, 6, 8]]), np_diff3=[NDArray[int,:,:]])
def test_diff4(self):
self.run_test("def np_diff4(x): from numpy import diff; return diff(x + x)", numpy.array([1, 2, 4, 7, 0]), np_diff4=[NDArray[int,:]])
def test_diff5(self):
self.run_test("def np_diff5(x): from numpy import diff; return diff(x + x, 2, axis=0)", numpy.arange(100).reshape(10, 10)*2, np_diff5=[NDArray[int,:,:]])
def test_diff6(self):
self.run_test("def np_diff6(x): from numpy import diff; return diff(x, axis=0)", numpy.arange(100).reshape(10, 10)*2, np_diff6=[NDArray[int,:,:]])
def test_diff7(self):
self.run_test("def np_diff7(x): from numpy import diff; return diff(x, axis=0)", numpy.arange(300).reshape(3, 10, 10)*2, np_diff7=[NDArray[int,:,:,:]])
def test_diff8(self):
self.run_test("def np_diff8(x): from numpy import diff; return diff(x, axis=1)", numpy.arange(300).reshape(3, 10, 10)*2, np_diff8=[NDArray[int,:,:,:]])
def test_diff9(self):
self.run_test("def np_diff9(x): from numpy import diff; return diff(x, axis=2)", numpy.arange(300).reshape(3, 10, 10)*2, np_diff9=[NDArray[int,:,:,:]])
def test_trace0(self):
self.run_test("def np_trace0(x): return x.trace()", numpy.arange(9).reshape(3,3), np_trace0=[NDArray[int,:,:]])
def test_trace1(self):
self.run_test("def np_trace1(x): from numpy import trace; return trace(x, 1)", numpy.arange(12).reshape(3,4), np_trace1=[NDArray[int,:,:]])
def test_trace2(self):
self.run_test("def np_trace2(x): from numpy import trace; return trace(x, 1)", numpy.arange(12).reshape(3,4), np_trace2=[NDArray[int,:,:]])
def test_tri0(self):
self.run_test("def np_tri0(a): from numpy import tri; return tri(a)", 3, np_tri0=[int])
def test_tri1(self):
self.run_test("def np_tri1(a): from numpy import tri; return tri(a, 4)", 3, np_tri1=[int])
def test_tri2(self):
self.run_test("def np_tri2(a): from numpy import tri; return tri(a, 3, -1)", 4, np_tri2=[int])
def test_tri3(self):
self.run_test("def np_tri3(a): from numpy import tri, int64; return tri(a, 5, 1, int64)", 3, np_tri3=[int])
def test_trim_zeros0(self):
self.run_test("""
def np_trim_zeros0(x):
from numpy import array, trim_zeros
return trim_zeros(x)""", numpy.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0)), np_trim_zeros0=[NDArray[int,:]])
def test_trim_zeros1(self):
self.run_test("""
def np_trim_zeros1(x):
from numpy import array, trim_zeros
return trim_zeros(x, "f")""", numpy.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0)), np_trim_zeros1=[NDArray[int,:]])
def test_trim_zeros2(self):
self.run_test("""
def np_trim_zeros2(x):
from numpy import trim_zeros
return trim_zeros(x, "b")""", numpy.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0)), np_trim_zeros2=[NDArray[int,:]])
def test_triu0(self):
self.run_test("def np_triu0(x): from numpy import triu; return triu(x)", numpy.arange(12).reshape(3,4), np_triu0=[NDArray[int,:,:]])
def test_triu1(self):
self.run_test("def np_triu1(x): from numpy import triu; return triu(x, 1)", numpy.arange(12).reshape(3,4), np_triu1=[NDArray[int,:,:]])
def test_triu2(self):
self.run_test("def np_triu2(x): from numpy import triu; return triu(x, -1)", numpy.arange(12).reshape(3,4), np_triu2=[NDArray[int,:,:]])
def test_tril0(self):
self.run_test("def np_tril0(x): from numpy import tril; return tril(x)", numpy.arange(12).reshape(3,4), np_tril0=[NDArray[int,:,:]])
def test_tril1(self):
self.run_test("def np_tril1(x): from numpy import tril; return tril(x, 1)", numpy.arange(12).reshape(3,4), np_tril1=[NDArray[int,:,:]])
def test_tril2(self):
self.run_test("def np_tril2(x): from numpy import tril; return tril(x, -1)", numpy.arange(12).reshape(3,4), np_tril2=[NDArray[int,:,:]])
def test_union1d(self):
self.run_test("def np_union1d(x): from numpy import arange, union1d ; y = arange(1,4); return union1d(x, y)", numpy.arange(-1,2), np_union1d=[NDArray[int,:]])
def test_unique0(self):
self.run_test("def np_unique0(x): from numpy import unique ; return unique(x)", numpy.array([1,1,2,2,2,1,5]), np_unique0=[NDArray[int,:]])
def test_unique1(self):
self.run_test("def np_unique1(x): from numpy import unique ; return unique(x)", numpy.array([[1,2,2],[2,1,5]]), np_unique1=[NDArray[int,:,:]])
def test_unique2(self):
self.run_test("def np_unique2(x): from numpy import unique ; return unique(x, True)", numpy.array([1,1,2,2,2,1,5]), np_unique2=[NDArray[int,:]])
def test_unique3(self):
self.run_test("def np_unique3(x): from numpy import unique ; return unique(x, True, True)", numpy.array([1,1,2,2,2,1,5]), np_unique3=[NDArray[int,:]])
def test_unique4(self):
self.run_test("def np_unique4(x): from numpy import unique ; return unique(x, True, True, True)", numpy.array([1,1,2,2,2,1,5]), np_unique4=[NDArray[int,:]])
def test_unique5(self):
self.run_test("def np_unique5(x): from numpy import unique ; return unique(x, False)", numpy.array([1,1,2,2,2,1,5]), np_unique5=[NDArray[int,:]])
def test_unique6(self):
self.run_test("def np_unique6(x): from numpy import unique ; return unique(x, False, True)", numpy.array([1,1,2,2,2,1,5]), np_unique6=[NDArray[int,:]])
def test_unique7(self):
self.run_test("def np_unique7(x): from numpy import unique ; return unique(x, False, False)", numpy.array([1,1,2,2,2,1,5]), np_unique7=[NDArray[int,:]])
def test_unique8(self):
self.run_test("def np_unique8(x): from numpy import unique ; return unique(x, return_inverse=True)", numpy.array([1,1,2,2,2,1,5]), np_unique8=[NDArray[int,:]])
def test_unique9(self):
self.run_test("def np_unique9(x): from numpy import unique ; return unique(x, True, False)", numpy.array([1,1,2,2,2,1,5]), np_unique9=[NDArray[int,:]])
def test_unique10(self):
self.run_test("def np_unique10(x): from numpy import unique ; return unique(x, True, True, False)", numpy.array([1,1,2,2,2,1,5]), np_unique10=[NDArray[int,:]])
def test_unique11(self):
self.run_test("def np_unique11(x): from numpy import unique ; return unique(x, True, False, False)", numpy.array([1,1,2,2,2,1,5]), np_unique11=[NDArray[int,:]])
def test_unique12(self):
self.run_test("def np_unique12(x): from numpy import unique ; return unique(x, True, False, True)", numpy.array([1,1,2,2,2,1,5]), np_unique12=[NDArray[int,:]])
def test_unique13(self):
self.run_test("def np_unique13(x): from numpy import unique ; return unique(x, False, True, False)", numpy.array([1,1,2,2,2,1,5]), np_unique13=[NDArray[int,:]])
def test_unique14(self):
self.run_test("def np_unique14(x): from numpy import unique ; return unique(x, False, True, True)", numpy.array([1,1,2,2,2,1,5]), np_unique14=[NDArray[int,:]])
def test_unique15(self):
self.run_test("def np_unique15(x): from numpy import unique ; return unique(x, False, False, False)", numpy.array([1,1,2,2,2,1,5]), np_unique15=[NDArray[int,:]])
def test_unique16(self):
self.run_test("def np_unique16(x): from numpy import unique ; return unique(x, False, False, True)", numpy.array([1,1,2,2,2,1,5]), np_unique16=[NDArray[int,:]])
def test_unique17(self):
self.run_test("def np_unique17(x): from numpy import unique ; return unique(x, return_counts=1)", numpy.array([1,1,2,2,2,1,5]), np_unique17=[NDArray[int,:]])
def test_unwrap0(self):
self.run_test("def np_unwrap0(x): from numpy import unwrap, pi ; x[:3] += 2.6*pi; return unwrap(x)", numpy.arange(6, dtype=float), np_unwrap0=[NDArray[float,:]])
def test_unwrap1(self):
self.run_test("def np_unwrap1(x): from numpy import unwrap, pi ; x[:3] += 2*pi; return unwrap(x, 4)", numpy.arange(6, dtype=float), np_unwrap1=[NDArray[float,:]])
def test_unwrap2(self):
self.run_test("def np_unwrap2(x): from numpy import unwrap, pi ; x[:3] -= 2*pi; return unwrap(x, 4)", numpy.arange(6, dtype=float), np_unwrap2=[NDArray[float,:]])
def test_unravel_index_0(self):
self.run_test("def np_unravel_index0(x, y): from numpy import unravel_index; return unravel_index(x, y)", 1621, (6, 7, 8, 9), np_unravel_index0=[int, Tuple[int, int, int, int]])
def test_unravel_index_1(self):
self.run_test("def np_unravel_index1(x, y): from numpy import unravel_index; return unravel_index(x, y, 'F')", 1621, (6, 7, 8, 9), np_unravel_index1=[int, Tuple[int, int, int, int]])
def test_copyto_0(self):
self.run_test("def np_copyto0(x, y): from numpy import copyto; copyto(x, y); return x",
numpy.array([1,2]), numpy.array([3,4]),
np_copyto0=[NDArray[int, :], NDArray[int, :]])
def test_copyto_1(self):
self.run_test("def np_copyto1(x, y): from numpy import copyto; copyto(x, y); return x",
numpy.array([[1,2], [7, 8]]), numpy.array([3,4]),
np_copyto1=[NDArray[int, :, :], NDArray[int, :]])
def test_numpy_pow0(self):
self.run_test('def numpy_pow0(a): return a ** 2',
numpy.arange(100).reshape((10, 10)),
numpy_pow0=[NDArray[int,:,:]])
def test_numpy_pow1(self):
self.run_test('def numpy_pow1(a): return a ** 2',
numpy.arange(100, dtype=float).reshape((10, 10)),
numpy_pow1=[NDArray[float,:,:]])
def test_numpy_pow2(self):
self.run_test('def numpy_pow2(a): return a ** 2.2',
numpy.arange(100, dtype=float).reshape((10, 10)),
numpy_pow2=[NDArray[float,:,:]])
def test_numpy_pow3(self):
self.run_test('def numpy_pow3(a): return a ** -0.2',
numpy.arange(100, dtype=int),
numpy_pow3=[NDArray[int,:]])
def test_add0(self):
self.run_test("def np_add0(a, b): return a + b", numpy.ones(10), numpy.ones(10), np_add0=[NDArray[float,:], NDArray[float,:]])
def test_add1(self):
self.run_test("def np_add1(a, b): return a + b + a", numpy.ones(10), numpy.ones(10), np_add1=[NDArray[float,:], NDArray[float,:]])
def test_add2(self):
self.run_test("def np_add2(a, b): return a + b + 1", numpy.ones(10), numpy.ones(10), np_add2=[NDArray[float,:], NDArray[float,:]])
def test_add3(self):
self.run_test("def np_add3(a, b): return 1. + a + b + 1.", numpy.ones(10), numpy.ones(10), np_add3=[NDArray[float,:], NDArray[float,:]])
def test_add4(self):
self.run_test("def np_add4(a, b): return ( a + b ) + ( a + b )", numpy.ones(10), numpy.ones(10), np_add4=[NDArray[float,:], NDArray[float,:]])
def test_add5(self):
self.run_test("def np_add5(a, b): return (-a) + (-b)", numpy.ones(10), numpy.ones(10), np_add5=[NDArray[float,:], NDArray[float,:]])
def test_sub0(self):
self.run_test("def np_sub0(a, b): return a - b", numpy.ones(10), numpy.ones(10), np_sub0=[NDArray[float,:], NDArray[float,:]])
def test_sub1(self):
self.run_test("def np_sub1(a, b): return a - b - a", numpy.ones(10), numpy.ones(10), np_sub1=[NDArray[float,:], NDArray[float,:]])
def test_sub2(self):
self.run_test("def np_sub2(a, b): return a - b - 1", numpy.ones(10), numpy.ones(10), np_sub2=[NDArray[float,:], NDArray[float,:]])
def test_sub3(self):
self.run_test("def np_sub3(a, b): return 1. - a - b - 1.", numpy.ones(10), numpy.ones(10), np_sub3=[NDArray[float,:], NDArray[float,:]])
def test_sub4(self):
self.run_test("def np_sub4(a, b): return ( a - b ) - ( a - b )", numpy.ones(10), numpy.ones(10), np_sub4=[NDArray[float,:], NDArray[float,:]])
def test_addsub0(self):
self.run_test("def np_addsub0(a, b): return a - b + a", numpy.ones(10), numpy.ones(10), np_addsub0=[NDArray[float,:], NDArray[float,:]])
def test_addsub1(self):
self.run_test("def np_addsub1(a, b): return a + b - a", numpy.ones(10), numpy.ones(10), np_addsub1=[NDArray[float,:], NDArray[float,:]])
def test_addsub2(self):
self.run_test("def np_addsub2(a, b): return a + b - 1", numpy.ones(10), numpy.ones(10), np_addsub2=[NDArray[float,:], NDArray[float,:]])
def test_addsub3(self):
self.run_test("def np_addsub3(a, b): return 1. + a - b + 1.", numpy.ones(10), numpy.ones(10), np_addsub3=[NDArray[float,:], NDArray[float,:]])
def test_addsub4(self):
self.run_test("def np_addsub4(a, b): return ( a - b ) + ( a + b )", numpy.ones(10), numpy.ones(10), np_addsub4=[NDArray[float,:], NDArray[float,:]])
def test_addcossub0(self):
self.run_test("def np_addcossub0(a, b): from numpy import cos ; return a - b + cos(a)", numpy.ones(10), numpy.ones(10), np_addcossub0=[NDArray[float,:], NDArray[float,:]])
def test_addcossub1(self):
self.run_test("def np_addcossub1(a, b): from numpy import cos ; return a + cos(b - a)", numpy.ones(10), numpy.ones(10), np_addcossub1=[NDArray[float,:], NDArray[float,:]])
def test_addcossub2(self):
self.run_test("def np_addcossub2(a, b): from numpy import cos ; return a + cos(b - 1)", numpy.ones(10), numpy.ones(10), np_addcossub2=[NDArray[float,:], NDArray[float,:]])
def test_addcossub3(self):
self.run_test("def np_addcossub3(a, b): from numpy import cos ; return cos(1. + a - b + cos(1.))", numpy.ones(10), numpy.ones(10), np_addcossub3=[NDArray[float,:], NDArray[float,:]])
def test_addcossub4(self):
self.run_test("def np_addcossub4(a, b): from numpy import cos ; return cos( a - b ) + ( a + b )", numpy.ones(10), numpy.ones(10), np_addcossub4=[NDArray[float,:], NDArray[float,:]])
def test_sin0(self):
self.run_test("def np_sin0(a, b): from numpy import sin ; return sin(a) + b", numpy.ones(10), numpy.ones(10), np_sin0=[NDArray[float,:], NDArray[float,:]])
def test_tan0(self):
self.run_test("def np_tan0(a, b): from numpy import tan ; return tan(a - b)", numpy.ones(10), numpy.ones(10), np_tan0=[NDArray[float,:], NDArray[float,:]])
def test_arccos0(self):
self.run_test("def np_arccos0(a, b): from numpy import arccos ; return arccos(a - b) + 1", numpy.ones(10), numpy.ones(10), np_arccos0=[NDArray[float,:], NDArray[float,:]])
def test_arcsin0(self):
self.run_test("def np_arcsin0(a, b): from numpy import arcsin ; return arcsin(a + b - a + -b) + 1.", numpy.ones(10), numpy.ones(10), np_arcsin0=[NDArray[float,:], NDArray[float,:]])
def test_arctan0(self):
self.run_test("def np_arctan0(a, b): from numpy import arctan ; return arctan(a -0.5) + a", numpy.ones(10), numpy.ones(10), np_arctan0=[NDArray[float,:], NDArray[float,:]])
def test_arctan20(self):
self.run_test("def np_arctan20(a, b): from numpy import arctan2 ; return b - arctan2(a , b)", numpy.ones(10), numpy.ones(10), np_arctan20=[NDArray[float,:], NDArray[float,:]])
def test_cos1(self):
self.run_test("def np_cos1(a): from numpy import cos; return cos(a)", 5, np_cos1=[int])
def test_sin1(self):
self.run_test("def np_sin1(a): from numpy import sin; return sin(a)", 0.5, np_sin1=[float])
def test_tan1(self):
self.run_test("def np_tan1(a): from numpy import tan; return tan(a)", 0.5, np_tan1=[float])
def test_arccos1(self):
self.run_test("def np_arccos1(a): from numpy import arccos ; return arccos(a)", 1, np_arccos1=[int])
def test_arcsin1(self):
self.run_test("def np_arcsin1(a): from numpy import arcsin ; return arcsin(a)", 1, np_arcsin1=[int])
def test_arctan1(self):
self.run_test("def np_arctan1(a): from numpy import arctan ; return arctan(a)", 0.5, np_arctan1=[float])
def test_arctan21(self):
self.run_test("def np_arctan21(a): from numpy import arctan2 ; b = .5 ; return arctan2(a , b)", 1., np_arctan21=[float])
def test_negative_mod(self):
self.run_test("def np_negative_mod(a): return a % 5", numpy.array([-1, -5, -2, 7]), np_negative_mod=[NDArray[int,:]])
def test_binary_op_list0(self):
self.run_test("def np_binary_op_list0(n): return n + [1,2,3]", numpy.array([4,5,6]), np_binary_op_list0=[NDArray[int,:]])
def test_binary_op_list1(self):
self.run_test("def np_binary_op_list1(n): return [1,2,3] + n", numpy.array([4,5,6]), np_binary_op_list1=[NDArray[int,:]])
def test_binary_op_list2(self):
self.run_test("def np_binary_op_list2(n): return [[1],[2],[3]] + n", numpy.array([[4],[5],[6]]), np_binary_op_list2=[NDArray[int,:,:]])
def test_binary_op_array0(self):
self.run_test("def np_binary_op_array0(n): return n + (1,2,3)", numpy.array([4,5,6]), np_binary_op_array0=[NDArray[int,:]])
def test_binary_op_array1(self):
self.run_test("def np_binary_op_array1(n): return (1,2,3) + n", numpy.array([4,5,6]), np_binary_op_array1=[NDArray[int,:]])
def test_binary_op_array2(self):
self.run_test("def np_binary_op_array2(n): return ((1,2,3),) + n", numpy.array([[4,5,6]]), np_binary_op_array2=[NDArray[int,:,:]])
def test_round_method(self):
self.run_test("def np_round_method(a): return a.round()", numpy.array([[4.3,5.5,6.1]]), np_round_method=[NDArray[float,:,:]])
def test_list_imag0(self):
self.run_test("def list_imag0(a): import numpy ; return numpy.imag(a)", numpy.array([complex(4.3,5.5),complex(6.1,3.2)]), list_imag0=[NDArray[complex,:]])
def test_list_imag1(self):
self.run_test("def list_imag1(a): import numpy ; numpy.imag(a)[0] = 1; return a", numpy.array([complex(4.3,5.5),complex(6.1,3.2)]), list_imag1=[NDArray[complex,:]])
def test_list_real0(self):
self.run_test("def list_real0(a): import numpy ; return numpy.real(a)", numpy.array([complex(4.3,5.5),complex(6.1,3.2)]), list_real0=[NDArray[complex,:]])
def test_list_real1(self):
self.run_test("def list_real1(a): import numpy ; numpy.real(a)[0] = 1; return a", numpy.array([complex(4.3,5.5),complex(6.1,3.2)]), list_real1=[NDArray[complex,:]])
def test_fill_diagonal_0(self):
self.run_test("def fill_diagonal_0(a): import numpy ; numpy.fill_diagonal(a, 0); return a", numpy.ones((4,4)), fill_diagonal_0=[NDArray[float,:,:]])
def test_fill_diagonal_1(self):
self.run_test("def fill_diagonal_1(a): import numpy ; numpy.fill_diagonal(a, 0); return a", numpy.ones((4,6)), fill_diagonal_1=[NDArray[float,:,:]])
def test_fill_diagonal_2(self):
self.run_test("def fill_diagonal_2(n): import numpy ; a = numpy.ones((n,n, 5));numpy.fill_diagonal(a[0], 0); return a", 4, fill_diagonal_2=[int])
def test_fill_diagonal_3(self):
self.run_test("def fill_diagonal_3(n): import numpy ; a = numpy.ones((n, n, 2, 2));numpy.fill_diagonal(a[0,1:3], 0); return a", 4, fill_diagonal_3=[int])
def test_interp_0(self):
self.run_test('def interp0(x,xp,fp): import numpy as np; return np.interp(x,xp,fp)',
numpy.random.randn(100),
numpy.sort(numpy.random.randn(1000)),
numpy.random.randn(1000),
interp0=[NDArray[float,:],NDArray[float,:],NDArray[float,:]])
def test_interp_1(self):
self.run_test('def interp1(x,xp,fp): import numpy as np; return np.interp(x,xp,fp,-10.,10.)',
numpy.random.randn(100),
numpy.sort(numpy.random.randn(1000)),
numpy.random.randn(1000),
interp1=[NDArray[float,:],NDArray[float,:],NDArray[float,:]])
def test_interp_2(self):
self.run_test('def interp2(x,xp,fp): import numpy as np; return np.interp(x,xp[::2],fp[::2],-10.,10.)',
numpy.random.randn(100),
numpy.sort(numpy.random.randn(1000)),
numpy.random.randn(1000),
interp2=[NDArray[float,:],NDArray[float,:],NDArray[float,:]])
def test_interp_3(self):
self.run_test('def interp3(x,xp,fp): import numpy as np; return np.interp(x[::3],xp[::2],fp[::2],-10.)',
numpy.random.randn(100),
numpy.sort(numpy.random.randn(1000)),
numpy.random.randn(1000),
interp3=[NDArray[float,:],NDArray[float,:],NDArray[float,:]])
def test_interp_4(self):
self.run_test('def interp4(x,xp,fp): import numpy as np; return np.interp(x,xp,fp,period=1.1)',
numpy.random.randn(100),
numpy.sort(numpy.random.randn(1000)),
numpy.random.randn(1000),
interp4=[NDArray[float,:],NDArray[float,:],NDArray[float,:]])
def test_interp_5(self):
self.run_test('def interp5(x,factor): N = len(x); import numpy as np; return np.interp(np.arange(0, N - 1, factor), np.arange(N), x, None, None, None)',
numpy.random.randn(100),
10.,
interp5=[NDArray[float,:],float])
def test_interp_6(self):
self.run_test('def interp6(x,xp,fp): import numpy as np; return np.interp(x,xp,fp)',
1.4,
numpy.sort(numpy.random.randn(1000)),
numpy.random.randn(1000),
interp6=[float,NDArray[float,:],NDArray[float,:]])
def test_interp_7(self):
self.run_test('def interp7(x,xp,fp): import numpy as np; return np.interp(x,xp,fp)',
1,
numpy.sort(10*numpy.random.randn(1000)),
numpy.random.randn(1000),
interp7=[int,NDArray[float,:],NDArray[float,:]])
def test_interp_8(self):
self.run_test('def interp8(x,xp,fp): import numpy as np; return np.interp(x,xp,fp,-10,10)',
1.4,
numpy.sort(numpy.random.randn(1000)),
numpy.random.randn(1000),
interp8=[float,NDArray[float,:],NDArray[float,:]])
def test_interp_0c(self):
self.run_test('def interp0c(x,xp,fp): import numpy as np; return np.interp(x,xp,fp)',
numpy.random.randn(100),
numpy.sort(numpy.random.randn(1000)),
numpy.random.randn(1000) + 1j*numpy.random.randn(1000),
interp0c=[NDArray[float,:],NDArray[float,:],NDArray[complex,:]])
def test_interp_1c(self):
self.run_test('def interp1c(x,xp,fp): import numpy as np; return np.interp(x,xp,fp,-10.j,10.j)',
numpy.random.randn(100),
numpy.sort(numpy.random.randn(1000)),
numpy.random.randn(1000) + 1j*numpy.random.randn(1000),
interp1c=[NDArray[float,:],NDArray[float,:],NDArray[complex,:]])
def test_interp_2c(self):
self.run_test('def interp2c(x,xp,fp): import numpy as np; return np.interp(x,xp[::2],fp[::2],-10.,10.)',
numpy.random.randn(100),
numpy.sort(numpy.random.randn(1000)),
numpy.random.randn(1000) + 1j*numpy.random.randn(1000),
interp2c=[NDArray[float,:],NDArray[float,:],NDArray[complex,:]])
def test_interp_3c(self):
self.run_test('def interp3c(x,xp,fp): import numpy as np; return np.interp(x[::3],xp[::2],fp[::2],-10.j)',
numpy.random.randn(100),
numpy.sort(numpy.random.randn(1000)),
numpy.random.randn(1000) + 1j*numpy.random.randn(1000),
interp3c=[NDArray[float,:],NDArray[float,:],NDArray[complex,:]])
def test_interp_4c(self):
self.run_test('def interp4c(x,xp,fp): import numpy as np; return np.interp(x,xp,fp,period=1.1)',
numpy.random.randn(100),
numpy.sort(numpy.random.randn(1000)),
numpy.random.randn(1000) + 1j*numpy.random.randn(1000),
interp4c=[NDArray[float,:],NDArray[float,:],NDArray[complex,:]])
def test_interp_5c(self):
self.run_test('def interp5c(x,factor): N = len(x); import numpy as np; return np.interp(np.arange(0, N - 1, factor), np.arange(N), x, None, None, None)',
numpy.random.randn(100) + 1j*numpy.random.randn(100),
10.,
interp5c=[NDArray[complex,:],float])
def test_interp_6c(self):
self.run_test('def interp6c(x,xp,fp): import numpy as np; return np.interp(x,xp,fp)',
1.4,
numpy.sort(numpy.random.randn(1000)),
numpy.random.randn(1000) + 1j*numpy.random.randn(1000),
interp6c=[float,NDArray[float,:],NDArray[complex,:]])
def test_interp_7c(self):
self.run_test('def interp7c(x,xp,fp): import numpy as np; return np.interp(x,xp,fp)',
1,
numpy.sort(10*numpy.random.randn(1000)),
numpy.random.randn(1000) + 1j*numpy.random.randn(1000),
interp7c=[int,NDArray[float,:],NDArray[complex,:]])
def test_interp_8c(self):
self.run_test('def interp8c(x,xp,fp): import numpy as np; return np.interp(x,xp,fp,-10j,10j)',
1.4,
numpy.sort(numpy.random.randn(1000)),
numpy.random.randn(1000) + 1j*numpy.random.randn(1000),
interp8c=[float,NDArray[float,:],NDArray[complex,:]])
def test_setdiff1d0(self):
self.run_test('def setdiff1d0(x,y): import numpy as np; return np.setdiff1d(x,y)',
numpy.random.randn(100),
numpy.random.randn(1000),
setdiff1d0=[NDArray[float,:],NDArray[float,:]])
def test_setdiff1d1(self):
self.run_test('def setdiff1d0(x,y): import numpy as np; return np.setdiff1d(x,y, True)',
numpy.unique(numpy.random.randn(1000)),
numpy.unique(numpy.random.randn(1000)),
setdiff1d0=[NDArray[float,:],NDArray[float,:]])
def test_setdiff1d2(self):
self.run_test('def setdiff1d2(x,y): import numpy as np; return np.setdiff1d(x,y)',
numpy.random.randn(100),
numpy.array([[1,2],[2,4]]),
setdiff1d2=[NDArray[float,:],NDArray[int,:,:]])
def test_setdiff1d3(self):
self.run_test('def setdiff1d3(x,y): import numpy as np; return np.setdiff1d(x,y, True)',
numpy.unique(numpy.random.randn(1000)),
numpy.array([[3,2],[5,4]]),
setdiff1d3=[NDArray[float,:],NDArray[int,:,:]])
Stabilize interp test
""" Tests for part of the numpy module. """
import unittest
import numpy
from pythran.typing import List, NDArray, Tuple
from pythran.tests import TestEnv
@TestEnv.module
class TestNumpyFunc3(TestEnv):
"""
This module includes tests for multiple numpy module function.
Tested functions are:
- numpy.dot
- numpy.digitize
- numpy.diff
- numpy.trace
- numpy.tri
- numpy.trim_zeros
- numpy.triu
- numpy.tril
- numpy.unique
- numpy.unwrap
and various combinations of +/-/** and trigonometric operations.
"""
def test_dot0(self):
self.run_test("def np_dot0(x, y): from numpy import dot; return dot(x, y)", 2, 3, np_dot0=[int, int])
def test_dot1(self):
self.run_test("def np_dot1(x): from numpy import dot ; y = [2, 3] ; return dot(x,y)", [2, 3], np_dot1=[List[int]])
def test_dot2(self):
self.run_test("def np_dot2(x): from numpy import dot ; y = [2j, 3j] ; return dot(x,y)", [2j, 3j], np_dot2=[List[complex]])
def test_dot3(self):
self.run_test("def np_dot3(x): from numpy import array ; y = array([2, 3]) ; return y.dot(x+x)", numpy.array([2, 3]), np_dot3=[NDArray[int,:]])
def test_dot4a(self):
self.run_test("def np_dot4a(x): from numpy import dot ; y = [2, 3] ; return dot(x,y)", numpy.array([2, 3]), np_dot4a=[NDArray[int,:]])
def test_dot4b(self):
self.run_test("def np_dot4b(x): from numpy import dot ; y = [2., 3.] ; return dot(x[1:],y)", numpy.array([2, 3, 4], dtype=numpy.float32), np_dot4b=[NDArray[numpy.float32,:]])
def test_dot4c(self):
self.run_test("def np_dot4c(x): from numpy import dot ; return dot(x[1:],x[:-1])", numpy.array([2, 3, 4], dtype=numpy.float64), np_dot4c=[NDArray[float,:]])
def test_dot4d(self):
self.run_test("def np_dot4d(x): from numpy import dot ; return dot(x, x)", numpy.array([2j, 3j, 4.]), np_dot4d=[NDArray[complex,:]])
def test_dot4e(self):
self.run_test("def np_dot4e(x): from numpy import dot ; y = (2.j, 3.j) ; return dot(x[:-1],y)", numpy.array([2.j, 3.j, 4.j], dtype=numpy.complex64), np_dot4e=[NDArray[numpy.complex64,:]])
def test_dot4f(self):
self.run_test("def np_dot4f(x): from numpy import dot ; y = (1., 2., 3.) ; return dot(2*x, y)", numpy.array([2., 3., 4.]), np_dot4f=[NDArray[float,:]])
def test_dot5(self):
""" Check for dgemm version of dot. """
self.run_test("""
def np_dot5(x, y):
from numpy import dot
return dot(x,y)""",
[[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]],
[[10., 11., 12.], [13., 14., 15.], [16., 17., 18.]],
np_dot5=[List[List[float]], List[List[float]]])
def test_dot6(self):
""" Check for dot with "no blas type". """
self.run_test("""
def np_dot6(x, y):
from numpy import dot
return dot(x,y)""",
numpy.arange(9).reshape(3, 3),
numpy.arange(9, 18).reshape(3, 3),
np_dot6=[NDArray[int,:,:], NDArray[int,:,:]])
def test_dot7(self):
""" Check for dgemm version of dot with rectangular shape. """
self.run_test("""
def np_dot7(x, y):
from numpy import dot
return dot(x,y)""",
[[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]],
[[10., 11., 12.], [13., 14., 15.], [16., 17., 18.]],
np_dot7=[List[List[float]], List[List[float]]])
def test_dot8(self):
""" Check for dot with "no blas type" with rectangulare shape. """
self.run_test("""
def np_dot8(x, y):
from numpy import dot
return dot(x,y)""",
numpy.arange(6).reshape(3, 2),
numpy.arange(6, 12).reshape(2, 3),
np_dot8=[NDArray[int,:,:], NDArray[int,:,:]])
def test_dot9(self):
""" Check for gemv version of dot. """
self.run_test("""
def np_dot9(x, y):
from numpy import dot
return dot(x,y)""",
numpy.arange(9.).reshape(3, 3).tolist(),
[float(x) for x in range(9, 12)],
np_dot9=[List[List[float]], List[float]])
def test_dot10(self):
""" Check for dot gemv with "no blas type". """
self.run_test("""
def np_dot10(x, y):
from numpy import dot
return dot(x,y)""",
numpy.arange(9).reshape(3, 3),
numpy.arange(9, 12),
np_dot10=[NDArray[int,:,:], NDArray[int,:]])
def test_dot11(self):
""" Check for gemv version of dot with rectangular shape. """
self.run_test("""
def np_dot11(x, y):
from numpy import dot
return dot(x,y)""",
numpy.arange(6.).reshape(3, 2).tolist(),
[float(x) for x in range(6, 8)],
np_dot11=[List[List[float]], List[float]])
def test_dot12(self):
""" Check for dot gemv with "no blas type" with rectangulare shape. """
self.run_test("""
def np_dot12(x, y):
from numpy import dot
return dot(x,y)""",
numpy.arange(6).reshape(3, 2),
numpy.arange(6, 8),
np_dot12=[NDArray[int,:,:], NDArray[int,:]])
def test_dot13(self):
""" Check for gevm version of dot. """
self.run_test("""
def np_dot13(x, y):
from numpy import dot
return dot(x,y)""",
[float(x) for x in range(9, 12)],
numpy.arange(9.).reshape(3, 3).tolist(),
np_dot13=[List[float], List[List[float]]])
def test_dot14(self):
""" Check for dot gevm with "no blas type". """
self.run_test("""
def np_dot14(x, y):
from numpy import dot
return dot(x,y)""",
numpy.arange(9, 12),
numpy.arange(9).reshape(3, 3),
np_dot14=[NDArray[int,:], NDArray[int,:,:]])
def test_dot15(self):
""" Check for gevm version of dot with rectangular shape. """
self.run_test("""
def np_dot15(x, y):
from numpy import dot
return dot(x,y)""",
[float(x) for x in range(6, 9)],
numpy.arange(6.).reshape(3, 2).tolist(),
np_dot15=[List[float], List[List[float]]])
def test_dot16(self):
""" Check for dot gevm with "no blas type" with rectangular shape. """
self.run_test("""
def np_dot16(x, y):
from numpy import dot
return dot(x,y)""",
numpy.arange(6.).reshape(2, 3),
numpy.arange(18.).reshape(3,6),
np_dot16=[NDArray[float,:,:], NDArray[float,:,:]])
def test_dot17(self):
""" Check for dot gevm with "no blas type" with rectangular shape,
first arg transposed."""
self.run_test("""
def np_dot17(x, y):
from numpy import dot
return dot(x.T,y)""",
numpy.arange(6.).reshape(3, 2),
numpy.arange(18.).reshape(3,6),
np_dot17=[NDArray[float,:,:], NDArray[float,:,:]])
def test_dot18(self):
""" Check for dot gevm with "no blas type" with rectangular shape,
second arg transposed"""
self.run_test("""
def np_dot18(x, y):
from numpy import dot
return dot(x,y.T)""",
numpy.arange(6.).reshape(2, 3),
numpy.arange(18.).reshape(6,3),
np_dot18=[NDArray[float,:,:], NDArray[float,:,:]])
def test_dot19(self):
""" Check for dot gevm with "no blas type" with rectangular shape,
both args transposed"""
self.run_test("""
def np_dot19(x, y):
from numpy import dot
return dot(x.T,y.T)""",
numpy.array(numpy.arange(6.).reshape(3, 2)),
numpy.array(numpy.arange(18.).reshape(6,3)),
np_dot19=[NDArray[float,:,:], NDArray[float,:,:]])
def test_dot20(self):
''' Mixed type: matrix x matrix'''
self.run_test("""
def np_dot20(x, y):
from numpy import dot
return dot(x, y)""",
numpy.array(numpy.arange(6.).reshape(2, 3),
dtype=numpy.float32),
numpy.array(numpy.arange(18.).reshape(3,6),
dtype=numpy.float64),
np_dot20=[NDArray[numpy.float32,:,:],
NDArray[numpy.float64,:,:]])
def test_dot21(self):
''' Mixed type: matrix x vector'''
self.run_test("""
def np_dot21(x, y):
from numpy import dot
return dot(x, y)""",
numpy.array(numpy.arange(6.).reshape(2, 3),
dtype=numpy.float32),
numpy.array(numpy.arange(3.).reshape(3),
dtype=numpy.float64),
np_dot21=[NDArray[numpy.float32,:,:],
NDArray[numpy.float64,:]])
def test_dot22(self):
''' Mixed type: matrix x vector'''
self.run_test("""
def np_dot22(x, y):
from numpy import dot
return dot(y, x)""",
numpy.array(numpy.arange(6.).reshape(3, 2),
dtype=numpy.float32),
numpy.array(numpy.arange(3.).reshape(3),
dtype=numpy.float64),
np_dot22=[NDArray[numpy.float32,:,:],
NDArray[numpy.float64,:]])
def test_dot23(self):
''' Nd x 1d, N > 2'''
self.run_test("""
def np_dot23(x, y):
from numpy import dot
return dot(x, y)""",
numpy.array(numpy.arange(24.).reshape(4, 3, 2),
dtype=numpy.float32),
numpy.array(numpy.arange(2.).reshape(2),
dtype=numpy.float64),
np_dot23=[NDArray[numpy.float32,:,:,:],
NDArray[numpy.float64,:]])
@unittest.skip("not implemented yet")
def test_dot24(self):
''' Nd x 1d, N > 2'''
self.run_test("""
def np_dot24(x, y):
from numpy import dot
return dot(x, y)""",
numpy.array(numpy.arange(24.).reshape(4, 3, 2),
dtype=numpy.float32),
numpy.array(numpy.arange(24.).reshape(2,3,2,2),
dtype=numpy.float64),
np_dot24=[NDArray[numpy.float32,:,:,:],
NDArray[numpy.float64,:,:,:,:]])
def test_vdot0(self):
self.run_test("""
def np_vdot0(x, y):
from numpy import vdot
return vdot(x, y)""",
numpy.array(numpy.arange(6.).reshape(3, 2),
dtype=numpy.float32),
numpy.array(numpy.arange(6.).reshape(6),
dtype=numpy.float32),
np_vdot0=[NDArray[numpy.float32,:,:],
NDArray[numpy.float32,:]])
def test_vdot1(self):
self.run_test("""
def np_vdot1(x, y):
from numpy import vdot
return vdot(x, y)""",
numpy.array(numpy.arange(6.).reshape(3, 2),
dtype=numpy.float32),
numpy.array(numpy.arange(6.).reshape(6),
dtype=numpy.float64),
np_vdot1=[NDArray[numpy.float32,:,:],
NDArray[numpy.float64,:]])
def test_vdot2(self):
self.run_test("""
def np_vdot2(x, y):
from numpy import vdot
return vdot(x, y)""",
numpy.array(numpy.arange(6.).reshape(3, 2),
dtype=numpy.complex128),
numpy.array(numpy.arange(6.).reshape(6),
dtype=numpy.complex128),
np_vdot2=[NDArray[numpy.complex128,:,:],
NDArray[numpy.complex128,:]])
def test_vdot3(self):
self.run_test("""
def np_vdot3(x, y):
from numpy import vdot
return vdot(x, y)""",
numpy.array(numpy.arange(6.),
dtype=numpy.complex128),
numpy.array(numpy.arange(6.),
dtype=numpy.complex128) * -1j,
np_vdot3=[NDArray[numpy.complex128,:],
NDArray[numpy.complex128,:]])
def test_digitize0(self):
self.run_test("def np_digitize0(x): from numpy import array, digitize ; bins = array([0.0, 1.0, 2.5, 4.0, 10.0]) ; return digitize(x, bins)", numpy.array([0.2, 6.4, 3.0, 1.6]), np_digitize0=[NDArray[float,:]])
def test_digitize1(self):
self.run_test("def np_digitize1(x): from numpy import array, digitize ; bins = array([ 10.0, 4.0, 2.5, 1.0, 0.0]) ; return digitize(x, bins)", numpy.array([0.2, 6.4, 3.0, 1.6]), np_digitize1=[NDArray[float,:]])
def test_diff0(self):
self.run_test("def np_diff0(x): from numpy import diff; return diff(x)", numpy.array([1, 2, 4, 7, 0]), np_diff0=[NDArray[int,:]])
def test_diff1(self):
self.run_test("def np_diff1(x): from numpy import diff; return diff(x,2)", numpy.array([1, 2, 4, 7, 0]), np_diff1=[NDArray[int,:]])
def test_diff2(self):
self.run_test("def np_diff2(x): from numpy import diff; return diff(x)", numpy.array([[1, 3, 6, 10], [0, 5, 6, 8]]), np_diff2=[NDArray[int,:,:]])
def test_diff3(self):
self.run_test("def np_diff3(x): from numpy import diff; return diff(x,2)", numpy.array([[1, 3, 6, 10], [0, 5, 6, 8]]), np_diff3=[NDArray[int,:,:]])
def test_diff4(self):
self.run_test("def np_diff4(x): from numpy import diff; return diff(x + x)", numpy.array([1, 2, 4, 7, 0]), np_diff4=[NDArray[int,:]])
def test_diff5(self):
self.run_test("def np_diff5(x): from numpy import diff; return diff(x + x, 2, axis=0)", numpy.arange(100).reshape(10, 10)*2, np_diff5=[NDArray[int,:,:]])
def test_diff6(self):
self.run_test("def np_diff6(x): from numpy import diff; return diff(x, axis=0)", numpy.arange(100).reshape(10, 10)*2, np_diff6=[NDArray[int,:,:]])
def test_diff7(self):
self.run_test("def np_diff7(x): from numpy import diff; return diff(x, axis=0)", numpy.arange(300).reshape(3, 10, 10)*2, np_diff7=[NDArray[int,:,:,:]])
def test_diff8(self):
self.run_test("def np_diff8(x): from numpy import diff; return diff(x, axis=1)", numpy.arange(300).reshape(3, 10, 10)*2, np_diff8=[NDArray[int,:,:,:]])
def test_diff9(self):
self.run_test("def np_diff9(x): from numpy import diff; return diff(x, axis=2)", numpy.arange(300).reshape(3, 10, 10)*2, np_diff9=[NDArray[int,:,:,:]])
def test_trace0(self):
self.run_test("def np_trace0(x): return x.trace()", numpy.arange(9).reshape(3,3), np_trace0=[NDArray[int,:,:]])
def test_trace1(self):
self.run_test("def np_trace1(x): from numpy import trace; return trace(x, 1)", numpy.arange(12).reshape(3,4), np_trace1=[NDArray[int,:,:]])
def test_trace2(self):
self.run_test("def np_trace2(x): from numpy import trace; return trace(x, 1)", numpy.arange(12).reshape(3,4), np_trace2=[NDArray[int,:,:]])
def test_tri0(self):
self.run_test("def np_tri0(a): from numpy import tri; return tri(a)", 3, np_tri0=[int])
def test_tri1(self):
self.run_test("def np_tri1(a): from numpy import tri; return tri(a, 4)", 3, np_tri1=[int])
def test_tri2(self):
self.run_test("def np_tri2(a): from numpy import tri; return tri(a, 3, -1)", 4, np_tri2=[int])
def test_tri3(self):
self.run_test("def np_tri3(a): from numpy import tri, int64; return tri(a, 5, 1, int64)", 3, np_tri3=[int])
def test_trim_zeros0(self):
self.run_test("""
def np_trim_zeros0(x):
from numpy import array, trim_zeros
return trim_zeros(x)""", numpy.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0)), np_trim_zeros0=[NDArray[int,:]])
def test_trim_zeros1(self):
self.run_test("""
def np_trim_zeros1(x):
from numpy import array, trim_zeros
return trim_zeros(x, "f")""", numpy.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0)), np_trim_zeros1=[NDArray[int,:]])
def test_trim_zeros2(self):
self.run_test("""
def np_trim_zeros2(x):
from numpy import trim_zeros
return trim_zeros(x, "b")""", numpy.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0)), np_trim_zeros2=[NDArray[int,:]])
def test_triu0(self):
self.run_test("def np_triu0(x): from numpy import triu; return triu(x)", numpy.arange(12).reshape(3,4), np_triu0=[NDArray[int,:,:]])
def test_triu1(self):
self.run_test("def np_triu1(x): from numpy import triu; return triu(x, 1)", numpy.arange(12).reshape(3,4), np_triu1=[NDArray[int,:,:]])
def test_triu2(self):
self.run_test("def np_triu2(x): from numpy import triu; return triu(x, -1)", numpy.arange(12).reshape(3,4), np_triu2=[NDArray[int,:,:]])
def test_tril0(self):
self.run_test("def np_tril0(x): from numpy import tril; return tril(x)", numpy.arange(12).reshape(3,4), np_tril0=[NDArray[int,:,:]])
def test_tril1(self):
self.run_test("def np_tril1(x): from numpy import tril; return tril(x, 1)", numpy.arange(12).reshape(3,4), np_tril1=[NDArray[int,:,:]])
def test_tril2(self):
self.run_test("def np_tril2(x): from numpy import tril; return tril(x, -1)", numpy.arange(12).reshape(3,4), np_tril2=[NDArray[int,:,:]])
def test_union1d(self):
self.run_test("def np_union1d(x): from numpy import arange, union1d ; y = arange(1,4); return union1d(x, y)", numpy.arange(-1,2), np_union1d=[NDArray[int,:]])
def test_unique0(self):
self.run_test("def np_unique0(x): from numpy import unique ; return unique(x)", numpy.array([1,1,2,2,2,1,5]), np_unique0=[NDArray[int,:]])
def test_unique1(self):
self.run_test("def np_unique1(x): from numpy import unique ; return unique(x)", numpy.array([[1,2,2],[2,1,5]]), np_unique1=[NDArray[int,:,:]])
def test_unique2(self):
self.run_test("def np_unique2(x): from numpy import unique ; return unique(x, True)", numpy.array([1,1,2,2,2,1,5]), np_unique2=[NDArray[int,:]])
def test_unique3(self):
self.run_test("def np_unique3(x): from numpy import unique ; return unique(x, True, True)", numpy.array([1,1,2,2,2,1,5]), np_unique3=[NDArray[int,:]])
def test_unique4(self):
self.run_test("def np_unique4(x): from numpy import unique ; return unique(x, True, True, True)", numpy.array([1,1,2,2,2,1,5]), np_unique4=[NDArray[int,:]])
def test_unique5(self):
self.run_test("def np_unique5(x): from numpy import unique ; return unique(x, False)", numpy.array([1,1,2,2,2,1,5]), np_unique5=[NDArray[int,:]])
def test_unique6(self):
self.run_test("def np_unique6(x): from numpy import unique ; return unique(x, False, True)", numpy.array([1,1,2,2,2,1,5]), np_unique6=[NDArray[int,:]])
def test_unique7(self):
self.run_test("def np_unique7(x): from numpy import unique ; return unique(x, False, False)", numpy.array([1,1,2,2,2,1,5]), np_unique7=[NDArray[int,:]])
def test_unique8(self):
self.run_test("def np_unique8(x): from numpy import unique ; return unique(x, return_inverse=True)", numpy.array([1,1,2,2,2,1,5]), np_unique8=[NDArray[int,:]])
def test_unique9(self):
self.run_test("def np_unique9(x): from numpy import unique ; return unique(x, True, False)", numpy.array([1,1,2,2,2,1,5]), np_unique9=[NDArray[int,:]])
def test_unique10(self):
self.run_test("def np_unique10(x): from numpy import unique ; return unique(x, True, True, False)", numpy.array([1,1,2,2,2,1,5]), np_unique10=[NDArray[int,:]])
def test_unique11(self):
self.run_test("def np_unique11(x): from numpy import unique ; return unique(x, True, False, False)", numpy.array([1,1,2,2,2,1,5]), np_unique11=[NDArray[int,:]])
def test_unique12(self):
self.run_test("def np_unique12(x): from numpy import unique ; return unique(x, True, False, True)", numpy.array([1,1,2,2,2,1,5]), np_unique12=[NDArray[int,:]])
def test_unique13(self):
self.run_test("def np_unique13(x): from numpy import unique ; return unique(x, False, True, False)", numpy.array([1,1,2,2,2,1,5]), np_unique13=[NDArray[int,:]])
def test_unique14(self):
self.run_test("def np_unique14(x): from numpy import unique ; return unique(x, False, True, True)", numpy.array([1,1,2,2,2,1,5]), np_unique14=[NDArray[int,:]])
def test_unique15(self):
self.run_test("def np_unique15(x): from numpy import unique ; return unique(x, False, False, False)", numpy.array([1,1,2,2,2,1,5]), np_unique15=[NDArray[int,:]])
def test_unique16(self):
self.run_test("def np_unique16(x): from numpy import unique ; return unique(x, False, False, True)", numpy.array([1,1,2,2,2,1,5]), np_unique16=[NDArray[int,:]])
def test_unique17(self):
self.run_test("def np_unique17(x): from numpy import unique ; return unique(x, return_counts=1)", numpy.array([1,1,2,2,2,1,5]), np_unique17=[NDArray[int,:]])
def test_unwrap0(self):
self.run_test("def np_unwrap0(x): from numpy import unwrap, pi ; x[:3] += 2.6*pi; return unwrap(x)", numpy.arange(6, dtype=float), np_unwrap0=[NDArray[float,:]])
def test_unwrap1(self):
self.run_test("def np_unwrap1(x): from numpy import unwrap, pi ; x[:3] += 2*pi; return unwrap(x, 4)", numpy.arange(6, dtype=float), np_unwrap1=[NDArray[float,:]])
def test_unwrap2(self):
self.run_test("def np_unwrap2(x): from numpy import unwrap, pi ; x[:3] -= 2*pi; return unwrap(x, 4)", numpy.arange(6, dtype=float), np_unwrap2=[NDArray[float,:]])
def test_unravel_index_0(self):
self.run_test("def np_unravel_index0(x, y): from numpy import unravel_index; return unravel_index(x, y)", 1621, (6, 7, 8, 9), np_unravel_index0=[int, Tuple[int, int, int, int]])
def test_unravel_index_1(self):
self.run_test("def np_unravel_index1(x, y): from numpy import unravel_index; return unravel_index(x, y, 'F')", 1621, (6, 7, 8, 9), np_unravel_index1=[int, Tuple[int, int, int, int]])
def test_copyto_0(self):
self.run_test("def np_copyto0(x, y): from numpy import copyto; copyto(x, y); return x",
numpy.array([1,2]), numpy.array([3,4]),
np_copyto0=[NDArray[int, :], NDArray[int, :]])
def test_copyto_1(self):
self.run_test("def np_copyto1(x, y): from numpy import copyto; copyto(x, y); return x",
numpy.array([[1,2], [7, 8]]), numpy.array([3,4]),
np_copyto1=[NDArray[int, :, :], NDArray[int, :]])
def test_numpy_pow0(self):
self.run_test('def numpy_pow0(a): return a ** 2',
numpy.arange(100).reshape((10, 10)),
numpy_pow0=[NDArray[int,:,:]])
def test_numpy_pow1(self):
self.run_test('def numpy_pow1(a): return a ** 2',
numpy.arange(100, dtype=float).reshape((10, 10)),
numpy_pow1=[NDArray[float,:,:]])
def test_numpy_pow2(self):
self.run_test('def numpy_pow2(a): return a ** 2.2',
numpy.arange(100, dtype=float).reshape((10, 10)),
numpy_pow2=[NDArray[float,:,:]])
def test_numpy_pow3(self):
self.run_test('def numpy_pow3(a): return a ** -0.2',
numpy.arange(100, dtype=int),
numpy_pow3=[NDArray[int,:]])
def test_add0(self):
self.run_test("def np_add0(a, b): return a + b", numpy.ones(10), numpy.ones(10), np_add0=[NDArray[float,:], NDArray[float,:]])
def test_add1(self):
self.run_test("def np_add1(a, b): return a + b + a", numpy.ones(10), numpy.ones(10), np_add1=[NDArray[float,:], NDArray[float,:]])
def test_add2(self):
self.run_test("def np_add2(a, b): return a + b + 1", numpy.ones(10), numpy.ones(10), np_add2=[NDArray[float,:], NDArray[float,:]])
def test_add3(self):
self.run_test("def np_add3(a, b): return 1. + a + b + 1.", numpy.ones(10), numpy.ones(10), np_add3=[NDArray[float,:], NDArray[float,:]])
def test_add4(self):
self.run_test("def np_add4(a, b): return ( a + b ) + ( a + b )", numpy.ones(10), numpy.ones(10), np_add4=[NDArray[float,:], NDArray[float,:]])
def test_add5(self):
self.run_test("def np_add5(a, b): return (-a) + (-b)", numpy.ones(10), numpy.ones(10), np_add5=[NDArray[float,:], NDArray[float,:]])
def test_sub0(self):
self.run_test("def np_sub0(a, b): return a - b", numpy.ones(10), numpy.ones(10), np_sub0=[NDArray[float,:], NDArray[float,:]])
def test_sub1(self):
self.run_test("def np_sub1(a, b): return a - b - a", numpy.ones(10), numpy.ones(10), np_sub1=[NDArray[float,:], NDArray[float,:]])
def test_sub2(self):
self.run_test("def np_sub2(a, b): return a - b - 1", numpy.ones(10), numpy.ones(10), np_sub2=[NDArray[float,:], NDArray[float,:]])
def test_sub3(self):
self.run_test("def np_sub3(a, b): return 1. - a - b - 1.", numpy.ones(10), numpy.ones(10), np_sub3=[NDArray[float,:], NDArray[float,:]])
def test_sub4(self):
self.run_test("def np_sub4(a, b): return ( a - b ) - ( a - b )", numpy.ones(10), numpy.ones(10), np_sub4=[NDArray[float,:], NDArray[float,:]])
def test_addsub0(self):
self.run_test("def np_addsub0(a, b): return a - b + a", numpy.ones(10), numpy.ones(10), np_addsub0=[NDArray[float,:], NDArray[float,:]])
def test_addsub1(self):
self.run_test("def np_addsub1(a, b): return a + b - a", numpy.ones(10), numpy.ones(10), np_addsub1=[NDArray[float,:], NDArray[float,:]])
def test_addsub2(self):
self.run_test("def np_addsub2(a, b): return a + b - 1", numpy.ones(10), numpy.ones(10), np_addsub2=[NDArray[float,:], NDArray[float,:]])
def test_addsub3(self):
self.run_test("def np_addsub3(a, b): return 1. + a - b + 1.", numpy.ones(10), numpy.ones(10), np_addsub3=[NDArray[float,:], NDArray[float,:]])
def test_addsub4(self):
self.run_test("def np_addsub4(a, b): return ( a - b ) + ( a + b )", numpy.ones(10), numpy.ones(10), np_addsub4=[NDArray[float,:], NDArray[float,:]])
def test_addcossub0(self):
self.run_test("def np_addcossub0(a, b): from numpy import cos ; return a - b + cos(a)", numpy.ones(10), numpy.ones(10), np_addcossub0=[NDArray[float,:], NDArray[float,:]])
def test_addcossub1(self):
self.run_test("def np_addcossub1(a, b): from numpy import cos ; return a + cos(b - a)", numpy.ones(10), numpy.ones(10), np_addcossub1=[NDArray[float,:], NDArray[float,:]])
def test_addcossub2(self):
self.run_test("def np_addcossub2(a, b): from numpy import cos ; return a + cos(b - 1)", numpy.ones(10), numpy.ones(10), np_addcossub2=[NDArray[float,:], NDArray[float,:]])
def test_addcossub3(self):
self.run_test("def np_addcossub3(a, b): from numpy import cos ; return cos(1. + a - b + cos(1.))", numpy.ones(10), numpy.ones(10), np_addcossub3=[NDArray[float,:], NDArray[float,:]])
def test_addcossub4(self):
self.run_test("def np_addcossub4(a, b): from numpy import cos ; return cos( a - b ) + ( a + b )", numpy.ones(10), numpy.ones(10), np_addcossub4=[NDArray[float,:], NDArray[float,:]])
def test_sin0(self):
self.run_test("def np_sin0(a, b): from numpy import sin ; return sin(a) + b", numpy.ones(10), numpy.ones(10), np_sin0=[NDArray[float,:], NDArray[float,:]])
def test_tan0(self):
self.run_test("def np_tan0(a, b): from numpy import tan ; return tan(a - b)", numpy.ones(10), numpy.ones(10), np_tan0=[NDArray[float,:], NDArray[float,:]])
def test_arccos0(self):
self.run_test("def np_arccos0(a, b): from numpy import arccos ; return arccos(a - b) + 1", numpy.ones(10), numpy.ones(10), np_arccos0=[NDArray[float,:], NDArray[float,:]])
def test_arcsin0(self):
self.run_test("def np_arcsin0(a, b): from numpy import arcsin ; return arcsin(a + b - a + -b) + 1.", numpy.ones(10), numpy.ones(10), np_arcsin0=[NDArray[float,:], NDArray[float,:]])
def test_arctan0(self):
self.run_test("def np_arctan0(a, b): from numpy import arctan ; return arctan(a -0.5) + a", numpy.ones(10), numpy.ones(10), np_arctan0=[NDArray[float,:], NDArray[float,:]])
def test_arctan20(self):
self.run_test("def np_arctan20(a, b): from numpy import arctan2 ; return b - arctan2(a , b)", numpy.ones(10), numpy.ones(10), np_arctan20=[NDArray[float,:], NDArray[float,:]])
def test_cos1(self):
self.run_test("def np_cos1(a): from numpy import cos; return cos(a)", 5, np_cos1=[int])
def test_sin1(self):
self.run_test("def np_sin1(a): from numpy import sin; return sin(a)", 0.5, np_sin1=[float])
def test_tan1(self):
self.run_test("def np_tan1(a): from numpy import tan; return tan(a)", 0.5, np_tan1=[float])
def test_arccos1(self):
self.run_test("def np_arccos1(a): from numpy import arccos ; return arccos(a)", 1, np_arccos1=[int])
def test_arcsin1(self):
self.run_test("def np_arcsin1(a): from numpy import arcsin ; return arcsin(a)", 1, np_arcsin1=[int])
def test_arctan1(self):
self.run_test("def np_arctan1(a): from numpy import arctan ; return arctan(a)", 0.5, np_arctan1=[float])
def test_arctan21(self):
self.run_test("def np_arctan21(a): from numpy import arctan2 ; b = .5 ; return arctan2(a , b)", 1., np_arctan21=[float])
def test_negative_mod(self):
self.run_test("def np_negative_mod(a): return a % 5", numpy.array([-1, -5, -2, 7]), np_negative_mod=[NDArray[int,:]])
def test_binary_op_list0(self):
self.run_test("def np_binary_op_list0(n): return n + [1,2,3]", numpy.array([4,5,6]), np_binary_op_list0=[NDArray[int,:]])
def test_binary_op_list1(self):
self.run_test("def np_binary_op_list1(n): return [1,2,3] + n", numpy.array([4,5,6]), np_binary_op_list1=[NDArray[int,:]])
def test_binary_op_list2(self):
self.run_test("def np_binary_op_list2(n): return [[1],[2],[3]] + n", numpy.array([[4],[5],[6]]), np_binary_op_list2=[NDArray[int,:,:]])
def test_binary_op_array0(self):
self.run_test("def np_binary_op_array0(n): return n + (1,2,3)", numpy.array([4,5,6]), np_binary_op_array0=[NDArray[int,:]])
def test_binary_op_array1(self):
self.run_test("def np_binary_op_array1(n): return (1,2,3) + n", numpy.array([4,5,6]), np_binary_op_array1=[NDArray[int,:]])
def test_binary_op_array2(self):
self.run_test("def np_binary_op_array2(n): return ((1,2,3),) + n", numpy.array([[4,5,6]]), np_binary_op_array2=[NDArray[int,:,:]])
def test_round_method(self):
self.run_test("def np_round_method(a): return a.round()", numpy.array([[4.3,5.5,6.1]]), np_round_method=[NDArray[float,:,:]])
def test_list_imag0(self):
self.run_test("def list_imag0(a): import numpy ; return numpy.imag(a)", numpy.array([complex(4.3,5.5),complex(6.1,3.2)]), list_imag0=[NDArray[complex,:]])
def test_list_imag1(self):
self.run_test("def list_imag1(a): import numpy ; numpy.imag(a)[0] = 1; return a", numpy.array([complex(4.3,5.5),complex(6.1,3.2)]), list_imag1=[NDArray[complex,:]])
def test_list_real0(self):
self.run_test("def list_real0(a): import numpy ; return numpy.real(a)", numpy.array([complex(4.3,5.5),complex(6.1,3.2)]), list_real0=[NDArray[complex,:]])
def test_list_real1(self):
self.run_test("def list_real1(a): import numpy ; numpy.real(a)[0] = 1; return a", numpy.array([complex(4.3,5.5),complex(6.1,3.2)]), list_real1=[NDArray[complex,:]])
def test_fill_diagonal_0(self):
self.run_test("def fill_diagonal_0(a): import numpy ; numpy.fill_diagonal(a, 0); return a", numpy.ones((4,4)), fill_diagonal_0=[NDArray[float,:,:]])
def test_fill_diagonal_1(self):
self.run_test("def fill_diagonal_1(a): import numpy ; numpy.fill_diagonal(a, 0); return a", numpy.ones((4,6)), fill_diagonal_1=[NDArray[float,:,:]])
def test_fill_diagonal_2(self):
self.run_test("def fill_diagonal_2(n): import numpy ; a = numpy.ones((n,n, 5));numpy.fill_diagonal(a[0], 0); return a", 4, fill_diagonal_2=[int])
def test_fill_diagonal_3(self):
self.run_test("def fill_diagonal_3(n): import numpy ; a = numpy.ones((n, n, 2, 2));numpy.fill_diagonal(a[0,1:3], 0); return a", 4, fill_diagonal_3=[int])
def test_interp_0(self):
self.run_test('def interp0(x,xp,fp): import numpy as np; return np.interp(x,xp,fp)',
numpy.random.randn(100),
numpy.sort(numpy.random.randn(1000)),
numpy.random.randn(1000),
interp0=[NDArray[float,:],NDArray[float,:],NDArray[float,:]])
def test_interp_1(self):
self.run_test('def interp1(x,xp,fp): import numpy as np; return np.interp(x,xp,fp,-10.,10.)',
numpy.random.randn(100),
numpy.sort(numpy.random.randn(1000)),
numpy.random.randn(1000),
interp1=[NDArray[float,:],NDArray[float,:],NDArray[float,:]])
def test_interp_2(self):
self.run_test('def interp2(x,xp,fp): import numpy as np; return np.interp(x,xp[::2],fp[::2],-10.,10.)',
numpy.random.randn(100),
numpy.sort(numpy.random.randn(1000)),
numpy.random.randn(1000),
interp2=[NDArray[float,:],NDArray[float,:],NDArray[float,:]])
def test_interp_3(self):
self.run_test('def interp3(x,xp,fp): import numpy as np; return np.interp(x[::3],xp[::2],fp[::2],-10.)',
numpy.random.randn(100),
numpy.sort(numpy.random.randn(1000)),
numpy.random.randn(1000),
interp3=[NDArray[float,:],NDArray[float,:],NDArray[float,:]])
def test_interp_4(self):
self.run_test('def interp4(x,xp,fp): import numpy as np; return np.interp(x,xp,fp,period=1.1)',
numpy.random.randn(100),
numpy.sort(numpy.random.randn(1000)),
numpy.random.randn(1000),
interp4=[NDArray[float,:],NDArray[float,:],NDArray[float,:]])
def test_interp_5(self):
self.run_test('def interp5(x,factor): N = len(x); import numpy as np; return np.interp(np.arange(0, N - 1, factor), np.arange(N), x, None, None, None)',
numpy.random.randn(100),
10.,
interp5=[NDArray[float,:],float])
def test_interp_6(self):
self.run_test('def interp6(x,xp,fp): import numpy as np; return np.interp(x,xp,fp)',
1.4,
numpy.sort(numpy.random.randn(1000)),
numpy.random.randn(1000),
interp6=[float,NDArray[float,:],NDArray[float,:]])
def test_interp_7(self):
self.run_test('def interp7(x,xp,fp): import numpy as np; return np.interp(x,xp,fp)',
1,
numpy.sort(10*numpy.random.randn(1000)),
numpy.random.randn(1000),
interp7=[int,NDArray[float,:],NDArray[float,:]])
def test_interp_8(self):
self.run_test('def interp8(x,xp,fp): import numpy as np; return np.interp(x,xp,fp,-10,10)',
1.4,
numpy.sort(numpy.random.randn(1000)),
numpy.random.randn(1000),
interp8=[float,NDArray[float,:],NDArray[float,:]])
def test_interp_0c(self):
self.run_test('def interp0c(x,xp,fp): import numpy as np; return np.interp(x,xp,fp)',
numpy.random.randn(100),
numpy.sort(numpy.random.randn(1000)),
numpy.random.randn(1000) + 1j*numpy.random.randn(1000),
interp0c=[NDArray[float,:],NDArray[float,:],NDArray[complex,:]])
def test_interp_1c(self):
self.run_test('def interp1c(x,xp,fp): import numpy as np; return np.interp(x,xp,fp,-10.j,10.j)',
numpy.random.randn(100),
numpy.sort(numpy.random.randn(1000)),
numpy.random.randn(1000) + 1j*numpy.random.randn(1000),
interp1c=[NDArray[float,:],NDArray[float,:],NDArray[complex,:]])
def test_interp_2c(self):
self.run_test('def interp2c(x,xp,fp): import numpy as np; return np.interp(x,xp[::2],fp[::2],-10.,10.)',
numpy.random.randn(100),
numpy.sort(numpy.random.randn(1000)),
numpy.random.randn(1000) + 1j*numpy.random.randn(1000),
interp2c=[NDArray[float,:],NDArray[float,:],NDArray[complex,:]])
def test_interp_3c(self):
self.run_test('def interp3c(x,xp,fp): import numpy as np; return np.interp(x[::3],xp[::2],fp[::2],-10.j)',
numpy.random.randn(100),
numpy.sort(numpy.random.randn(1000)),
numpy.random.randn(1000) + 1j*numpy.random.randn(1000),
interp3c=[NDArray[float,:],NDArray[float,:],NDArray[complex,:]])
def test_interp_4c(self):
self.run_test('def interp4c(x,xp,fp): import numpy as np; return np.interp(x,xp,fp,period=1.1)',
numpy.random.randn(100),
numpy.sort(numpy.random.randn(1000)),
numpy.random.randn(1000) + 1j*numpy.random.randn(1000),
interp4c=[NDArray[float,:],NDArray[float,:],NDArray[complex,:]])
def test_interp_5c(self):
self.run_test('def interp5c(x,factor): N = len(x); import numpy as np; return np.interp(np.arange(0, N - 1, factor), np.arange(N), x, None, None, None)',
numpy.random.randn(100) + 1j*numpy.random.randn(100),
10.,
interp5c=[NDArray[complex,:],float])
def test_interp_6c(self):
self.run_test('def interp6c(x,xp,fp): import numpy as np; return np.interp(x,xp,fp)',
1.4,
numpy.sort(numpy.random.randn(1000)),
numpy.random.randn(1000) + 1j*numpy.random.randn(1000),
interp6c=[float,NDArray[float,:],NDArray[complex,:]])
def test_interp_7c(self):
self.run_test('def interp7c(x,xp,fp): import numpy as np; return np.interp(x,xp,fp)',
1,
numpy.sort(10*numpy.random.randn(1000)),
numpy.random.randn(1000) + 1j*numpy.random.randn(1000),
interp7c=[int,NDArray[float,:],NDArray[complex,:]])
def test_interp_8c(self):
self.run_test('def interp8c(x,xp,fp): import numpy as np; return np.interp(x,xp,fp,-10j,10j)',
1.4,
numpy.sort(10*numpy.random.randn(1000)),
numpy.random.randn(1000) + 1j*numpy.random.randn(1000),
interp8c=[float,NDArray[float,:],NDArray[complex,:]])
def test_setdiff1d0(self):
self.run_test('def setdiff1d0(x,y): import numpy as np; return np.setdiff1d(x,y)',
numpy.random.randn(100),
numpy.random.randn(1000),
setdiff1d0=[NDArray[float,:],NDArray[float,:]])
def test_setdiff1d1(self):
self.run_test('def setdiff1d0(x,y): import numpy as np; return np.setdiff1d(x,y, True)',
numpy.unique(numpy.random.randn(1000)),
numpy.unique(numpy.random.randn(1000)),
setdiff1d0=[NDArray[float,:],NDArray[float,:]])
def test_setdiff1d2(self):
self.run_test('def setdiff1d2(x,y): import numpy as np; return np.setdiff1d(x,y)',
numpy.random.randn(100),
numpy.array([[1,2],[2,4]]),
setdiff1d2=[NDArray[float,:],NDArray[int,:,:]])
def test_setdiff1d3(self):
self.run_test('def setdiff1d3(x,y): import numpy as np; return np.setdiff1d(x,y, True)',
numpy.unique(numpy.random.randn(1000)),
numpy.array([[3,2],[5,4]]),
setdiff1d3=[NDArray[float,:],NDArray[int,:,:]])
|
# Copyright 2012 Kevin Goodsell
#
# This software is licensed under the Eclipse Public License (EPL) V1.0.
'''
Direct wrappers for STAF functions. These aren't "Pythonized", and aren't
intended to be used directly.
'''
import ctypes
from ._errors import STAFResultError
staf = ctypes.cdll.LoadLibrary('libSTAF.so')
def check_rc(result, func, arguments):
'''
ctypes errcheck function used to convert STAF function errors to exceptions.
'''
if result != 0:
raise STAFResultError(result)
# Types
Handle_t = ctypes.c_uint # From STAF.h
SyncOption_t = ctypes.c_uint # From STAF.h
RC_t = ctypes.c_uint # From STAFError.h
# From STAFString.h:
class StringImplementation(ctypes.Structure):
# Incomplete type
pass
String_t = ctypes.POINTER(StringImplementation)
class Utf8(object):
'''
Represents UTF-8 encoded parameters.
'''
@classmethod
def from_param(cls, text):
return text.encode('utf-8')
# Functions
RegisterUTF8 = staf.STAFRegisterUTF8
RegisterUTF8.argtypes = (Utf8, ctypes.POINTER(Handle_t))
RegisterUTF8.restype = RC_t
RegisterUTF8.errcheck = check_rc
UnRegister = staf.STAFUnRegister
UnRegister.argtypes = (Handle_t,)
UnRegister.restype = RC_t
UnRegister.errcheck = check_rc
Submit2UTF8 = staf.STAFSubmit2UTF8
Submit2UTF8.argtypes = (
Handle_t, # handle
SyncOption_t, # syncOption
Utf8, # where
Utf8, # service
ctypes.POINTER(ctypes.c_char), # request
ctypes.c_uint, # requestLength
ctypes.POINTER(ctypes.POINTER(ctypes.c_char)), # resultPtr
ctypes.POINTER(ctypes.c_uint), # resultLength
)
Submit2UTF8.restype = RC_t
Free = staf.STAFFree
Free.argtypes = (Handle_t, ctypes.POINTER(ctypes.c_char))
Free.restype = RC_t
Free.errcheck = check_rc
# STAFString APIs:
StringConstruct = staf.STAFStringConstruct
StringConstruct.argtypes = (
ctypes.POINTER(String_t), # pString
ctypes.POINTER(ctypes.c_char), # buffer
ctypes.c_uint, # len
ctypes.POINTER(ctypes.c_uint), # osRC
)
StringConstruct.restype = RC_t
StringConstruct.errcheck = check_rc
StringGetBuffer = staf.STAFStringGetBuffer
StringGetBuffer.argtypes = (
String_t, # aString
ctypes.POINTER(ctypes.POINTER(ctypes.c_char)), # buffer
ctypes.POINTER(ctypes.c_uint), # len
ctypes.POINTER(ctypes.c_uint), # osRC
)
StringGetBuffer.restype = RC_t
StringGetBuffer.errcheck = check_rc
StringDestruct = staf.STAFStringDestruct
StringDestruct.argtypes = (ctypes.POINTER(String_t),
ctypes.POINTER(ctypes.c_uint))
StringDestruct.restype = RC_t
StringDestruct.errcheck = check_rc
# Private data APIs:
AddPrivacyDelimiters = staf.STAFAddPrivacyDelimiters
AddPrivacyDelimiters.argtypes = (String_t, ctypes.POINTER(String_t))
AddPrivacyDelimiters.restype = RC_t
AddPrivacyDelimiters.errcheck = check_rc
RemovePrivacyDelimiters = staf.STAFRemovePrivacyDelimiters
RemovePrivacyDelimiters.argtypes = (String_t, ctypes.c_uint,
ctypes.POINTER(String_t))
RemovePrivacyDelimiters.restype = RC_t
RemovePrivacyDelimiters.errcheck = check_rc
MaskPrivateData = staf.STAFMaskPrivateData
MaskPrivateData.argtypes = (String_t, ctypes.POINTER(String_t))
MaskPrivateData.restype = RC_t
MaskPrivateData.errcheck = check_rc
EscapePrivacyDelimiters = staf.STAFEscapePrivacyDelimiters
EscapePrivacyDelimiters.argtypes = (String_t, ctypes.POINTER(String_t))
EscapePrivacyDelimiters.restype = RC_t
EscapePrivacyDelimiters.errcheck = check_rc
class String(object):
'''
Wrapper for String_t with context management to deallocate.
'''
def __init__(self, data=None):
self._as_parameter_ = String_t()
if data is None:
return
try:
utf8 = data.encode('utf-8')
StringConstruct(ctypes.byref(self._as_parameter_), utf8,len(utf8),
None)
except:
self.destroy()
raise
def byref(self):
return ctypes.byref(self._as_parameter_)
def destroy(self):
if self._as_parameter_:
StringDestruct(ctypes.byref(self._as_parameter_), None)
def __nonzero__(self):
return bool(self._as_parameter_)
def __unicode__(self):
buf = ctypes.POINTER(ctypes.c_char)()
length = ctypes.c_uint()
StringGetBuffer(self, ctypes.byref(buf), ctypes.byref(length), None)
result = buf[:length.value]
return result.decode('utf-8')
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.destroy()
# Don't supress an exception
return False
Try a few different ways to find the STAF library.
# Copyright 2012 Kevin Goodsell
#
# This software is licensed under the Eclipse Public License (EPL) V1.0.
'''
Direct wrappers for STAF functions. These aren't "Pythonized", and aren't
intended to be used directly.
'''
import ctypes
import ctypes.util
from ._errors import STAFResultError
def find_staf():
# Try to avoid the overhead of a find_library call
for name in ('STAF', 'libSTAF.so'):
try:
return ctypes.cdll.LoadLibrary(name)
except:
pass
# find_library looks like it could have significant overhead, so only try it
# after direct load attempts fail.
name = ctypes.util.find_library('STAF')
if name:
return ctypes.cdll.LoadLibrary(name)
else:
raise ImportError("Couldn't find STAF library")
staf = find_staf()
def check_rc(result, func, arguments):
'''
ctypes errcheck function used to convert STAF function errors to exceptions.
'''
if result != 0:
raise STAFResultError(result)
# Types
Handle_t = ctypes.c_uint # From STAF.h
SyncOption_t = ctypes.c_uint # From STAF.h
RC_t = ctypes.c_uint # From STAFError.h
# From STAFString.h:
class StringImplementation(ctypes.Structure):
# Incomplete type
pass
String_t = ctypes.POINTER(StringImplementation)
class Utf8(object):
'''
Represents UTF-8 encoded parameters.
'''
@classmethod
def from_param(cls, text):
return text.encode('utf-8')
# Functions
RegisterUTF8 = staf.STAFRegisterUTF8
RegisterUTF8.argtypes = (Utf8, ctypes.POINTER(Handle_t))
RegisterUTF8.restype = RC_t
RegisterUTF8.errcheck = check_rc
UnRegister = staf.STAFUnRegister
UnRegister.argtypes = (Handle_t,)
UnRegister.restype = RC_t
UnRegister.errcheck = check_rc
Submit2UTF8 = staf.STAFSubmit2UTF8
Submit2UTF8.argtypes = (
Handle_t, # handle
SyncOption_t, # syncOption
Utf8, # where
Utf8, # service
ctypes.POINTER(ctypes.c_char), # request
ctypes.c_uint, # requestLength
ctypes.POINTER(ctypes.POINTER(ctypes.c_char)), # resultPtr
ctypes.POINTER(ctypes.c_uint), # resultLength
)
Submit2UTF8.restype = RC_t
Free = staf.STAFFree
Free.argtypes = (Handle_t, ctypes.POINTER(ctypes.c_char))
Free.restype = RC_t
Free.errcheck = check_rc
# STAFString APIs:
StringConstruct = staf.STAFStringConstruct
StringConstruct.argtypes = (
ctypes.POINTER(String_t), # pString
ctypes.POINTER(ctypes.c_char), # buffer
ctypes.c_uint, # len
ctypes.POINTER(ctypes.c_uint), # osRC
)
StringConstruct.restype = RC_t
StringConstruct.errcheck = check_rc
StringGetBuffer = staf.STAFStringGetBuffer
StringGetBuffer.argtypes = (
String_t, # aString
ctypes.POINTER(ctypes.POINTER(ctypes.c_char)), # buffer
ctypes.POINTER(ctypes.c_uint), # len
ctypes.POINTER(ctypes.c_uint), # osRC
)
StringGetBuffer.restype = RC_t
StringGetBuffer.errcheck = check_rc
StringDestruct = staf.STAFStringDestruct
StringDestruct.argtypes = (ctypes.POINTER(String_t),
ctypes.POINTER(ctypes.c_uint))
StringDestruct.restype = RC_t
StringDestruct.errcheck = check_rc
# Private data APIs:
AddPrivacyDelimiters = staf.STAFAddPrivacyDelimiters
AddPrivacyDelimiters.argtypes = (String_t, ctypes.POINTER(String_t))
AddPrivacyDelimiters.restype = RC_t
AddPrivacyDelimiters.errcheck = check_rc
RemovePrivacyDelimiters = staf.STAFRemovePrivacyDelimiters
RemovePrivacyDelimiters.argtypes = (String_t, ctypes.c_uint,
ctypes.POINTER(String_t))
RemovePrivacyDelimiters.restype = RC_t
RemovePrivacyDelimiters.errcheck = check_rc
MaskPrivateData = staf.STAFMaskPrivateData
MaskPrivateData.argtypes = (String_t, ctypes.POINTER(String_t))
MaskPrivateData.restype = RC_t
MaskPrivateData.errcheck = check_rc
EscapePrivacyDelimiters = staf.STAFEscapePrivacyDelimiters
EscapePrivacyDelimiters.argtypes = (String_t, ctypes.POINTER(String_t))
EscapePrivacyDelimiters.restype = RC_t
EscapePrivacyDelimiters.errcheck = check_rc
class String(object):
'''
Wrapper for String_t with context management to deallocate.
'''
def __init__(self, data=None):
self._as_parameter_ = String_t()
if data is None:
return
try:
utf8 = data.encode('utf-8')
StringConstruct(ctypes.byref(self._as_parameter_), utf8,len(utf8),
None)
except:
self.destroy()
raise
def byref(self):
return ctypes.byref(self._as_parameter_)
def destroy(self):
if self._as_parameter_:
StringDestruct(ctypes.byref(self._as_parameter_), None)
def __nonzero__(self):
return bool(self._as_parameter_)
def __unicode__(self):
buf = ctypes.POINTER(ctypes.c_char)()
length = ctypes.c_uint()
StringGetBuffer(self, ctypes.byref(buf), ctypes.byref(length), None)
result = buf[:length.value]
return result.decode('utf-8')
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.destroy()
# Don't supress an exception
return False
|
#!/usr/bin/env python
import urllib2,urllib
from django.conf import settings
from nodegroups import *
try:
import json
except ImportError:
import simplejson as json
class SaltAPI(object):
__token_id = ''
def __init__(self):
self.__url = settings.SALT_API_URL
self.__user = settings.SALT_API_USER
self.__password = settings.SALT_API_PASSWD
''' user login and get token id '''
params = {'eauth': 'pam', 'username': self.__user, 'password': self.__password}
encode = urllib.urlencode(params)
obj = urllib.unquote(encode)
content = self.postRequest(obj, prefix='/login')
try:
self.__token_id = content['return'][0]['token']
except KeyError:
raise KeyError
def postRequest(self,obj,prefix='/'):
url = self.__url + prefix
headers = {'X-Auth-Token' : self.__token_id}
req = urllib2.Request(url, obj, headers)
opener = urllib2.urlopen(req)
content = json.loads(opener.read())
return content
def postRequest1(self,obj,prefix='/'):
url = self.__url + prefix
headers = {'X-Auth-Token' : self.__token_id}
req = urllib2.Request(url, obj, headers)
opener = urllib2.urlopen(req)
content = opener.info()
return content
def list_all_key(self):
params = {'client': 'wheel', 'fun': 'key.list_all'}
obj = urllib.urlencode(params)
content = self.postRequest(obj)
#minions = content['return'][0]['data']['return']['minions']
#minions_pre = content['return'][0]['data']['return']['minions_pre']
#return minions,minions_pre
minions = content['return'][0]['data']['return']
return minions
def delete_key(self,node_name):
params = {'client': 'wheel', 'fun': 'key.delete', 'match': node_name}
obj = urllib.urlencode(params)
content = self.postRequest(obj)
ret = content['return'][0]['data']['success']
return ret
def accept_key(self,node_name):
params = {'client': 'wheel', 'fun': 'key.accept', 'match': node_name}
obj = urllib.urlencode(params)
content = self.postRequest(obj)
ret = content['return'][0]['data']['success']
return ret
def reject_key(self,node_name):
params = {'client': 'wheel', 'fun': 'key.reject', 'match': node_name}
obj = urllib.urlencode(params)
content = self.postRequest(obj)
ret = content['return'][0]['data']['success']
return ret
def remote_noarg_execution(self,tgt,fun):
''' Execute commands without parameters '''
params = {'client': 'local', 'tgt': tgt, 'fun': fun}
obj = urllib.urlencode(params)
content = self.postRequest(obj)
ret = content['return'][0][tgt]
return ret
def remote_execution(self,tgt,fun,arg):
''' Command execution with parameters '''
params = {'client': 'local', 'tgt': tgt, 'fun': fun, 'arg': arg}
obj = urllib.urlencode(params)
content = self.postRequest(obj)
ret = content['return'][0][tgt]
return ret
def shell_remote_execution(self,tgt,arg):
''' Shell command execution with parameters '''
params = {'client': 'local', 'tgt': tgt, 'fun': 'cmd.run', 'arg': arg, 'expr_form': 'list'}
obj = urllib.urlencode(params)
content = self.postRequest(obj)
ret = content['return'][0]
return ret
def grains(self,tgt,arg):
''' Grains.item '''
params = {'client': 'local', 'tgt': tgt, 'fun': 'grains.item', 'arg': arg}
obj = urllib.urlencode(params)
content = self.postRequest(obj)
ret = content['return'][0]
return ret
def target_remote_execution(self,tgt,fun,arg):
''' Use targeting for remote execution '''
params = {'client': 'local', 'tgt': tgt, 'fun': fun, 'arg': arg, 'expr_form': 'nodegroup'}
obj = urllib.urlencode(params)
content = self.postRequest(obj)
jid = content['return'][0]['jid']
return jid
def deploy(self,tgt,arg):
''' Module deployment '''
params = {'client': 'local', 'tgt': tgt, 'fun': 'state.sls', 'arg': arg}
obj = urllib.urlencode(params)
content = self.postRequest(obj)
return content
def async_deploy(self,tgt,arg):
''' Asynchronously send a command to connected minions '''
params = {'client': 'local_async', 'tgt': tgt, 'fun': 'state.sls', 'arg': arg}
obj = urllib.urlencode(params)
content = self.postRequest(obj)
jid = content['return'][0]['jid']
return jid
def target_deploy(self,tgt,arg):
''' Based on the list forms deployment '''
params = {'client': 'local_async', 'tgt': tgt, 'fun': 'state.sls', 'arg': arg, 'expr_form': 'list'}
obj = urllib.urlencode(params)
content = self.postRequest(obj)
jid = content['return'][0]['jid']
return jid
def jobs_list(self):
''' Get Cache Jobs Defaut 24h '''
url = self.__url + '/jobs/'
headers = {'X-Auth-Token': self.__token_id}
req = urllib2.Request(url, headers=headers)
opener = urllib2.urlopen(req)
content = json.loads(opener.read())
jid = content['return'][0]
return jid
def runner_status(self,arg):
''' Return minion status '''
params = {'client': 'runner', 'fun': 'manage.' + arg }
obj = urllib.urlencode(params)
content = self.postRequest(obj)
jid = content['return'][0]
return jid
def runner(self,arg):
''' Return minion status '''
params = {'client': 'runner', 'fun': arg }
obj = urllib.urlencode(params)
content = self.postRequest(obj)
jid = content['return'][0]
return jid
def main():
#sapi = SaltAPI(url='http://127.0.0.1:8000',username='admin',password='admin')
sapi = SaltAPI()
#jid = sapi.target_deploy('echo.example.sinanode.com.cn','nginx-test')
#jids = sapi.shell_remote_execution('echo','netstat -tnlp')
#jids = "salt-run jobs.lookup_jid " + jid
#print jid
#time.sleep(100)
#result = os.popen(jids).readlines()
#if result == "":
# result = "Execute time too long, Please Click " jid + " show it"
# print result
#else:
# print result
status_all = sapi.runner_status('status')
#b = NodeGroups()
#b = sapi.runner("status")
#print a
up_host = sapi.runner_status('status')['up']
os_list = []
os_release = []
os_all = []
for hostname in up_host:
osfullname = sapi.grains(hostname,'osfullname')[hostname]['osfullname']
osrelease = sapi.grains(hostname,'osrelease')[hostname]['osrelease']
os = osfullname + osrelease
os_list.append(os)
os_uniq = set(os_list)
for release in os_uniq:
num = os_list.count(release)
os_dic = {'value': num, 'name': release}
os_all.append(os_dic)
os_release = list(set(os_list))
print os_release
print os_all
if __name__ == '__main__':
main()
keyerror 异常处理
#!/usr/bin/env python
import urllib2,urllib
from django.conf import settings
from nodegroups import *
try:
import json
except ImportError:
import simplejson as json
class SaltAPI(object):
__token_id = ''
def __init__(self):
self.__url = settings.SALT_API_URL
self.__user = settings.SALT_API_USER
self.__password = settings.SALT_API_PASSWD
''' user login and get token id '''
params = {'eauth': 'pam', 'username': self.__user, 'password': self.__password}
encode = urllib.urlencode(params)
obj = urllib.unquote(encode)
content = self.postRequest(obj, prefix='/login')
try:
self.__token_id = content['return'][0]['token']
except KeyError:
raise KeyError
def postRequest(self,obj,prefix='/'):
url = self.__url + prefix
headers = {'X-Auth-Token' : self.__token_id}
req = urllib2.Request(url, obj, headers)
opener = urllib2.urlopen(req)
content = json.loads(opener.read())
return content
def postRequest1(self,obj,prefix='/'):
url = self.__url + prefix
headers = {'X-Auth-Token' : self.__token_id}
req = urllib2.Request(url, obj, headers)
opener = urllib2.urlopen(req)
content = opener.info()
return content
def list_all_key(self):
params = {'client': 'wheel', 'fun': 'key.list_all'}
obj = urllib.urlencode(params)
content = self.postRequest(obj)
#minions = content['return'][0]['data']['return']['minions']
#minions_pre = content['return'][0]['data']['return']['minions_pre']
#return minions,minions_pre
minions = content['return'][0]['data']['return']
return minions
def delete_key(self,node_name):
params = {'client': 'wheel', 'fun': 'key.delete', 'match': node_name}
obj = urllib.urlencode(params)
content = self.postRequest(obj)
ret = content['return'][0]['data']['success']
return ret
def accept_key(self,node_name):
params = {'client': 'wheel', 'fun': 'key.accept', 'match': node_name}
obj = urllib.urlencode(params)
content = self.postRequest(obj)
ret = content['return'][0]['data']['success']
return ret
def reject_key(self,node_name):
params = {'client': 'wheel', 'fun': 'key.reject', 'match': node_name}
obj = urllib.urlencode(params)
content = self.postRequest(obj)
ret = content['return'][0]['data']['success']
return ret
def remote_noarg_execution(self,tgt,fun):
''' Execute commands without parameters '''
params = {'client': 'local', 'tgt': tgt, 'fun': fun}
obj = urllib.urlencode(params)
content = self.postRequest(obj)
try:
ret = content['return'][0][tgt]
except Exception as e:
pass
return ret
def remote_execution(self,tgt,fun,arg):
''' Command execution with parameters '''
params = {'client': 'local', 'tgt': tgt, 'fun': fun, 'arg': arg}
obj = urllib.urlencode(params)
content = self.postRequest(obj)
ret = content['return'][0][tgt]
return ret
def shell_remote_execution(self,tgt,arg):
''' Shell command execution with parameters '''
params = {'client': 'local', 'tgt': tgt, 'fun': 'cmd.run', 'arg': arg, 'expr_form': 'list'}
obj = urllib.urlencode(params)
content = self.postRequest(obj)
ret = content['return'][0]
return ret
def grains(self,tgt,arg):
''' Grains.item '''
params = {'client': 'local', 'tgt': tgt, 'fun': 'grains.item', 'arg': arg}
obj = urllib.urlencode(params)
content = self.postRequest(obj)
ret = content['return'][0]
return ret
def target_remote_execution(self,tgt,fun,arg):
''' Use targeting for remote execution '''
params = {'client': 'local', 'tgt': tgt, 'fun': fun, 'arg': arg, 'expr_form': 'nodegroup'}
obj = urllib.urlencode(params)
content = self.postRequest(obj)
jid = content['return'][0]['jid']
return jid
def deploy(self,tgt,arg):
''' Module deployment '''
params = {'client': 'local', 'tgt': tgt, 'fun': 'state.sls', 'arg': arg}
obj = urllib.urlencode(params)
content = self.postRequest(obj)
return content
def async_deploy(self,tgt,arg):
''' Asynchronously send a command to connected minions '''
params = {'client': 'local_async', 'tgt': tgt, 'fun': 'state.sls', 'arg': arg}
obj = urllib.urlencode(params)
content = self.postRequest(obj)
jid = content['return'][0]['jid']
return jid
def target_deploy(self,tgt,arg):
''' Based on the list forms deployment '''
params = {'client': 'local_async', 'tgt': tgt, 'fun': 'state.sls', 'arg': arg, 'expr_form': 'list'}
obj = urllib.urlencode(params)
content = self.postRequest(obj)
jid = content['return'][0]['jid']
return jid
def jobs_list(self):
''' Get Cache Jobs Defaut 24h '''
url = self.__url + '/jobs/'
headers = {'X-Auth-Token': self.__token_id}
req = urllib2.Request(url, headers=headers)
opener = urllib2.urlopen(req)
content = json.loads(opener.read())
jid = content['return'][0]
return jid
def runner_status(self,arg):
''' Return minion status '''
params = {'client': 'runner', 'fun': 'manage.' + arg }
obj = urllib.urlencode(params)
content = self.postRequest(obj)
jid = content['return'][0]
return jid
def runner(self,arg):
''' Return minion status '''
params = {'client': 'runner', 'fun': arg }
obj = urllib.urlencode(params)
content = self.postRequest(obj)
jid = content['return'][0]
return jid
def main():
#sapi = SaltAPI(url='http://127.0.0.1:8000',username='admin',password='admin')
sapi = SaltAPI()
#jid = sapi.target_deploy('echo.example.sinanode.com.cn','nginx-test')
#jids = sapi.shell_remote_execution('echo','netstat -tnlp')
#jids = "salt-run jobs.lookup_jid " + jid
#print jid
#time.sleep(100)
#result = os.popen(jids).readlines()
#if result == "":
# result = "Execute time too long, Please Click " jid + " show it"
# print result
#else:
# print result
status_all = sapi.runner_status('status')
#b = NodeGroups()
#b = sapi.runner("status")
#print a
up_host = sapi.runner_status('status')['up']
os_list = []
os_release = []
os_all = []
for hostname in up_host:
osfullname = sapi.grains(hostname,'osfullname')[hostname]['osfullname']
osrelease = sapi.grains(hostname,'osrelease')[hostname]['osrelease']
os = osfullname + osrelease
os_list.append(os)
os_uniq = set(os_list)
for release in os_uniq:
num = os_list.count(release)
os_dic = {'value': num, 'name': release}
os_all.append(os_dic)
os_release = list(set(os_list))
print os_release
print os_all
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
gspread.utils
~~~~~~~~~~~~~
This module contains utility functions.
"""
import re
from xml.etree import ElementTree
from .exceptions import IncorrectCellLabel
MAGIC_NUMBER = 64
CELL_ADDR_RE = re.compile(r'([A-Za-z]+)([1-9]\d*)')
def finditem(func, seq):
"""Finds and returns first item in iterable for which func(item) is True.
"""
return next((item for item in seq if func(item)))
# http://stackoverflow.com/questions/749796/pretty-printing-xml-in-python
# http://effbot.org/zone/element-lib.htm#prettyprint
def _indent(elem, level=0):
i = "\n" + level * " "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
_indent(elem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def _ds(elem):
"""ElementTree debug function.
Indents and renders xml tree to a string.
"""
_indent(elem)
return ElementTree.tostring(elem)
def numericise(value, empty2zero=False, default_blank=""):
"""Returns a value that depends on the input string:
- Float if input can be converted to Float
- Integer if input can be converted to integer
- Zero if the input string is empty and empty2zero flag is set
- The same input string, empty or not, otherwise.
Executable examples:
>>> numericise("faa")
'faa'
>>> numericise("3")
3
>>> numericise("3.1")
3.1
>>> numericise("", empty2zero=True)
0
>>> numericise("", empty2zero=False)
''
>>> numericise("", default_blank=None)
>>>
>>> numericise("", default_blank="foo")
'foo'
>>> numericise("")
''
>>> numericise(None)
>>>
"""
if value is not None:
try:
value = int(value)
except ValueError:
try:
value = float(value)
except ValueError:
if value == "":
if empty2zero:
value = 0
else:
value = default_blank
return value
def numericise_all(input, empty2zero=False, default_blank=""):
"""Returns a list of numericised values from strings"""
return [numericise(s, empty2zero, default_blank) for s in input]
def rowcol_to_a1(row, col):
"""Translates a row and column cell address to A1 notation.
:param row: The row of the cell to be converted.
Rows start at index 1.
:param col: The column of the cell to be converted.
Columns start at index 1.
:returns: a string containing the cell's coordinates in A1 notation.
Example:
>>> rowcol_to_a1(1, 1)
A1
"""
row = int(row)
col = int(col)
if row < 1 or col < 1:
raise IncorrectCellLabel('(%s, %s)' % (row, col))
div = col
column_label = ''
while div:
(div, mod) = divmod(div, 26)
if mod == 0:
mod = 26
div -= 1
column_label = chr(mod + MAGIC_NUMBER) + column_label
label = '%s%s' % (column_label, row)
return label
def a1_to_rowcol(label):
"""Translates a cell's address in A1 notation to a tuple of integers.
:param label: String with cell label in A1 notation, e.g. 'B1'.
Letter case is ignored.
:returns: a tuple containing `row` and `column` numbers. Both indexed
from 1 (one).
Example:
>>> a1_to_rowcol('A1')
(1, 1)
"""
m = CELL_ADDR_RE.match(label)
if m:
column_label = m.group(1).upper()
row = int(m.group(2))
col = 0
for i, c in enumerate(reversed(column_label)):
col += (ord(c) - MAGIC_NUMBER) * (26 ** i)
else:
raise IncorrectCellLabel(label)
return (row, col)
if __name__ == '__main__':
import doctest
doctest.testmod()
fix markup
# -*- coding: utf-8 -*-
"""
gspread.utils
~~~~~~~~~~~~~
This module contains utility functions.
"""
import re
from xml.etree import ElementTree
from .exceptions import IncorrectCellLabel
MAGIC_NUMBER = 64
CELL_ADDR_RE = re.compile(r'([A-Za-z]+)([1-9]\d*)')
def finditem(func, seq):
"""Finds and returns first item in iterable for which func(item) is True.
"""
return next((item for item in seq if func(item)))
# http://stackoverflow.com/questions/749796/pretty-printing-xml-in-python
# http://effbot.org/zone/element-lib.htm#prettyprint
def _indent(elem, level=0):
i = "\n" + level * " "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
_indent(elem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def _ds(elem):
"""ElementTree debug function.
Indents and renders xml tree to a string.
"""
_indent(elem)
return ElementTree.tostring(elem)
def numericise(value, empty2zero=False, default_blank=""):
"""Returns a value that depends on the input string:
- Float if input can be converted to Float
- Integer if input can be converted to integer
- Zero if the input string is empty and empty2zero flag is set
- The same input string, empty or not, otherwise.
Executable examples:
>>> numericise("faa")
'faa'
>>> numericise("3")
3
>>> numericise("3.1")
3.1
>>> numericise("", empty2zero=True)
0
>>> numericise("", empty2zero=False)
''
>>> numericise("", default_blank=None)
>>>
>>> numericise("", default_blank="foo")
'foo'
>>> numericise("")
''
>>> numericise(None)
>>>
"""
if value is not None:
try:
value = int(value)
except ValueError:
try:
value = float(value)
except ValueError:
if value == "":
if empty2zero:
value = 0
else:
value = default_blank
return value
def numericise_all(input, empty2zero=False, default_blank=""):
"""Returns a list of numericised values from strings"""
return [numericise(s, empty2zero, default_blank) for s in input]
def rowcol_to_a1(row, col):
"""Translates a row and column cell address to A1 notation.
:param row: The row of the cell to be converted.
Rows start at index 1.
:param col: The column of the cell to be converted.
Columns start at index 1.
:returns: a string containing the cell's coordinates in A1 notation.
Example:
>>> rowcol_to_a1(1, 1)
A1
"""
row = int(row)
col = int(col)
if row < 1 or col < 1:
raise IncorrectCellLabel('(%s, %s)' % (row, col))
div = col
column_label = ''
while div:
(div, mod) = divmod(div, 26)
if mod == 0:
mod = 26
div -= 1
column_label = chr(mod + MAGIC_NUMBER) + column_label
label = '%s%s' % (column_label, row)
return label
def a1_to_rowcol(label):
"""Translates a cell's address in A1 notation to a tuple of integers.
:param label: String with cell label in A1 notation, e.g. 'B1'.
Letter case is ignored.
:returns: a tuple containing `row` and `column` numbers. Both indexed
from 1 (one).
Example:
>>> a1_to_rowcol('A1')
(1, 1)
"""
m = CELL_ADDR_RE.match(label)
if m:
column_label = m.group(1).upper()
row = int(m.group(2))
col = 0
for i, c in enumerate(reversed(column_label)):
col += (ord(c) - MAGIC_NUMBER) * (26 ** i)
else:
raise IncorrectCellLabel(label)
return (row, col)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
import os
import copy
import etcd
import time
from gevent import socket
from tendrl.commons.utils import etcd_utils
from tendrl.commons.utils import log_utils as logger
from tendrl.monitoring_integration.grafana import cluster_detail
class GraphitePlugin():
def __init__(self):
self.host = NS.config.data["datasource_host"]
self.port = NS.config.data["datasource_port"]
self.carbon_port = NS.config.data["carbon_port"]
self.prefix = 'tendrl'
self._connect()
def _connect(self):
try:
self.graphite_sock = socket.socket()
self.graphite_sock.connect((self.host, int(self.carbon_port)))
except socket.error as ex:
logger.log("error", NS.get("publisher_id", None),
{'message': "Cannot connect to graphite socket" + str(ex)})
raise ex
def _resend(self, message):
try:
self._connect()
response = self.graphite_sock.sendall(message)
except socket.error as ex:
logger.log("error", NS.get("publisher_id", None),
{'message': "Cannot send data to graphite socket" + str(ex)})
raise ex
def push_metrics(self, metric_name, metric_value):
message = '%s%s%s %s %d\n' % (
self.prefix,
str("."),
metric_name,
str(metric_value),
int(time.time())
)
try:
response = self.graphite_sock.sendall(message)
except socket.error as ex:
response = self._resend(message)
return response
def get_resource_count(self, resource_details, obj_attr):
total = 0
up = 0
down = 0
partial = 0
for key, value in obj_attr["count"].items():
for resource_detail in resource_details["details"]:
if key == "total":
total = total + 1
if key == "up":
for attr_key, attr_values in obj_attr["count"]["up"].items():
if resource_detail[attr_key] in attr_values:
up = up + 1
if key == "down":
for attr_key, attr_values in obj_attr["count"]["down"].items():
if resource_detail[attr_key] in attr_values:
down = down + 1
if key == "partial":
for attr_key, attr_values in obj_attr["count"]["partial"].items():
if resource_detail[attr_key] in attr_values:
partial = partial + 1
resource_details["total"] = total
resource_details["up"] = up
resource_details["down"] = down
resource_details["partial"] = partial
return resource_details
def get_object_from_central_store(self, resource_key, obj_attr):
attr_details = etcd_utils.read(resource_key)
resource_details = {"details" : []}
for attr_detail in attr_details.leaves:
resource_detail = {}
attr_key = attr_detail.key.rsplit("/", 1)[1]
for key, value in obj_attr["attrs"].items():
sub_attr = etcd_utils.read(os.path.join(resource_key, attr_key, key))
resource_detail[key] = sub_attr.value
resource_details["details"].append(resource_detail)
try:
if obj_attr["count"]:
resource_details = self.get_resource_count(resource_details, obj_attr)
except KeyError:
pass
return resource_details
def get_resource_keys(self, key, resource_name):
resource_list = []
try:
resource_details = etcd_utils.read(key + "/" + str(resource_name))
for resource in resource_details.leaves:
resource_list.append(resource.key.split('/')[-1])
except (KeyError, etcd.EtcdKeyNotFound) as ex:
logger.log("error", NS.get("publisher_id", None),
{'message': "Error while fetching " +
str(resource_name).split('/')[0] + str(ex)})
return resource_list
def get_central_store_data(self, objects):
try:
cluster_list = self.get_resource_keys("", "clusters")
cluster_data = []
for cluster_id in cluster_list:
cluster_details = cluster_detail.ClusterDetail()
cluster_details.integration_id = cluster_id
cluster_key = objects["Cluster"]["value"].replace("$integration_id",
cluster_details.integration_id)
for obj in objects["Cluster"]:
if obj in ["metric", "value"]:
continue
resource_detail = {}
resource_detail[str(obj)] = {}
obj_details = objects["Cluster"][str(obj)]
obj_key = os.path.join(cluster_key, str(obj))
obj_attrs = obj_details["attrs"]
for key, value in obj_attrs.items():
try:
attr_key = os.path.join(obj_key, key)
attr_data = etcd_utils.read(attr_key)
attr_value = self.cluster_status_mapper(str(attr_data.value).lower())
resource_detail[str(obj)][key] = copy.deepcopy(attr_value)
except (KeyError, etcd.EtcdKeyNotFound) as ex:
logger.log("error", NS.get("publisher_id", None),
{'message': "Cannot Find {0} in Cluster {1}".format(key, cluster_id) + str(ex)})
cluster_details.details["Cluster"].append(copy.deepcopy(resource_detail))
host_list = self.get_resource_keys(cluster_key, "Bricks/all")
for host in host_list:
resource_detail = {}
attr_key = os.path.join(cluster_key, "Bricks/all", host)
resource_detail["host_name"] = host.replace(".", "_")
brick_list = self.get_resource_keys("", attr_key)
for brick in brick_list:
for key, value in objects["Brick"]["attrs"].items():
try:
brick_attr_key = os.path.join(cluster_key, "Bricks/all",
host, brick, key)
brick_attr_data = etcd_utils.read(brick_attr_key)
brick_attr_value = self.resource_status_mapper(str(brick_attr_data.value).lower())
resource_detail[key] = brick_attr_value
except (KeyError, etcd.EtcdKeyNotFound) as ex:
logger.log("error", NS.get("publisher_id", None),
{'message': "Cannot Find {0} in brick {1}".format(key, brick) + str(ex)})
cluster_details.details["Brick"].append(copy.deepcopy(resource_detail))
volume_list = self.get_resource_keys(cluster_key, "Volumes")
for volume in volume_list:
resource_detail = {}
volume_key = os.path.join(cluster_key, "Volumes", volume)
for key, value in objects["Volume"]["attrs"].items():
if value is None:
try:
attr_key = os.path.join(volume_key, key)
attr_data = etcd_utils.read(attr_key)
attr_value = self.resource_status_mapper(str(attr_data.value).lower())
resource_detail[key] = attr_value
except (KeyError, etcd.EtcdKeyNotFound) as ex:
logger.log("error", NS.get("publisher_id", None),
{'message': "Cannot Find {0} in volume {1}".format(key, volume) + str(ex)})
else:
try:
new_key = os.path.join(volume_key, objects["Volume"]["attrs"][key]["value"].rsplit("/", 1)[1])
resp_data = self.get_object_from_central_store(new_key,
objects["Volume"]["attrs"][key])
resource_detail[key] = resp_data
except (etcd.EtcdKeyNotFound, AttributeError, KeyError) as ex:
logger.log("error", NS.get("publisher_id", None),
{'message': "Error in retreiving geo_replication data for volume" + str(volume) + str(ex)})
resource_detail[key] = {"total": 0, "up": 0, "down": 0, "partial": 0}
cluster_details.details["Volume"].append(copy.deepcopy(resource_detail))
node_list = self.get_resource_keys(cluster_key, "nodes")
for node in node_list:
resource_detail = {}
node_key = objects["Node"]["value"].replace("$integration_id",
cluster_details.integration_id).replace("$node_id",
node)
for key, value in objects["Node"]["attrs"].items():
if value is None:
try:
attr_key = os.path.join(node_key, key)
attr_data = etcd_utils.read(attr_key)
attr_value = self.resource_status_mapper(str(attr_data.value).lower())
resource_detail[key] = attr_value
except (etcd.EtcdKeyNotFound, AttributeError, KeyError) as ex:
logger.log("error", NS.get("publisher_id", None),
{'message': "Cannot Find {0} in Node {1}".format(key, node) + str(ex)})
cluster_details.details["Node"].append(copy.deepcopy(resource_detail))
cluster_data.append(copy.deepcopy(cluster_details))
try:
cluster_data = self.set_volume_count(cluster_data, "Volume")
cluster_data = self.set_resource_count(cluster_data, "Node")
cluster_data = self.set_resource_count(cluster_data, "Brick")
cluster_data = self.set_brick_count(cluster_data)
cluster_data = self.set_brick_path(cluster_data)
cluster_data = self.set_geo_rep_session(cluster_data)
cluster_data = self.set_volume_level_brick_count(cluster_data)
except (etcd.EtcdKeyNotFound, AttributeError, KeyError) as ex:
logger.log("error", NS.get("publisher_id", None),
{'message': "Failed to set resource details" + str(ex)})
return cluster_data
except (etcd.EtcdException, AttributeError, KeyError) as ex:
logger.log("error", NS.get("publisher_id", None),
{'message': str(ex)})
raise ex
def set_geo_rep_session(self, cluster_data):
total = 0
partial = 0
up = 0
down = 0
geo_rep_mapper = {"total": total, "partial": partial, "up": up,
"down" : down}
for cluster in cluster_data:
for volume in cluster.details["Volume"]:
try:
for key, value in volume["geo_rep_session"].items():
try:
geo_rep_mapper[key] = geo_rep_mapper[key] + value
except (AttributeError, KeyError) as ex:
logger.log("error", NS.get("publisher_id", None),
{'message': "Failed to extract georep details for {0}".format(key) + str(ex)})
except (AttributeError, KeyError) as ex:
logger.log("error", NS.get("publisher_id", None),
{'message': "Failed to extract georep details for volume" + str(ex)})
cluster.details["geo_rep"] = {}
cluster.details["geo_rep"]["total"] = geo_rep_mapper["total"]
cluster.details["geo_rep"]["up"] = geo_rep_mapper["up"]
cluster.details["geo_rep"]["down"] = geo_rep_mapper["down"]
cluster.details["geo_rep"]["partial"] = geo_rep_mapper["partial"]
return cluster_data
def set_volume_level_brick_count(self,cluster_data):
volume_detail = {}
for cluster in cluster_data:
for volume in cluster.details["Volume"]:
try:
volume_detail[volume["name"]] = {"total":0, "up":0, "down":0}
except (AttributeError,KeyError):
pass
for cluster in cluster_data:
for brick in cluster.details["Brick"]:
try:
volume_detail[str(brick["vol_name"])]["total"] = volume_detail[str(brick["vol_name"])]["total"] + 1
if brick["status"] == 0:
volume_detail[str(brick["vol_name"])]["up"] = volume_detail[str(brick["vol_name"])]["up"] + 1
else:
volume_detail[str(brick["vol_name"])]["down"] = volume_detail[str(brick["vol_name"])]["down"] + 1
except (AttributeError, KeyError) as ex:
logger.log("error", NS.get("publisher_id", None),
{'message': "Failed to set volume level brick count" + str(ex)})
cluster.details["volume_level_brick_count"] = volume_detail
return cluster_data
def set_brick_count(self, cluster_data):
for cluster in cluster_data:
for node in cluster.details["Node"]:
try:
total = 0
up = 0
down = 0
for brick in cluster.details["Brick"]:
if brick["host_name"] == node["fqdn"].replace(".", "_"):
if brick["status"] == 0:
total = total + 1
up = up + 1
else:
total = total + 1
down = down + 1
node["brick_total_count"] = total
node["brick_up_count"] = up
node["brick_down_count"] = down
except (AttributeError, KeyError) as ex:
logger.log("error", NS.get("publisher_id", None),
{'message': "Failed to set brick count" + str(ex)})
return cluster_data
def set_brick_path(self, cluster_data):
for cluster in cluster_data:
for brick in cluster.details["Brick"]:
try:
brick["brick_name"] = brick["brick_path"].split(":")[1]
except (AttributeError, KeyError) as ex:
logger.log("error", NS.get("publisher_id", None),
{'message': "Failed to set brick path" + str(ex)})
return cluster_data
def set_resource_count(self, cluster_data, resource_name):
for cluster in cluster_data:
resources = cluster.details[str(resource_name)]
cluster.details[str(resource_name.lower()) + "_total_count"] = len(resources)
up = 0
down = 0
for resource in resources:
try:
if resource["status"] == 0:
up = up + 1
else:
down = down + 1
except KeyError as ex:
logger.log("error", NS.get("publisher_id", None),
{'message': "Failed to set resource count for {0}".format(resource_name) + str(ex)})
cluster.details[str(resource_name.lower()) + "_up_count"] = up
cluster.details[str(resource_name.lower()) + "_down_count"] = down
return cluster_data
def set_volume_count(self, cluster_data, resource_name):
for cluster in cluster_data:
resources = cluster.details[str(resource_name)]
cluster.details[str(resource_name.lower()) + "_total_count"] = len(resources)
up = 0
down = 0
partial = 0
degraded = 0
for resource in resources:
try:
if resource["state"] == 0:
up = up + 1
elif resource["state"] == 5:
partial = partial + 1
elif resource["state"] == 6:
degraded = degraded + 1
else:
down = down + 1
except KeyError as ex:
logger.log("error", NS.get("publisher_id", None),
{'message': "Failed to set resource count for {0}".format(resource_name) + str(ex)})
cluster.details[str(resource_name.lower()) + "_up_count"] = up
cluster.details[str(resource_name.lower()) + "_down_count"] = down
cluster.details[str(resource_name.lower()) + "_partial_count"] = partial
cluster.details[str(resource_name.lower()) + "_degraded_count"] = degraded
return cluster_data
def resource_status_mapper(self, status):
status_map = {"created" : 0.5, "stopped" : 2, "started" : 0,
"degraded" : 8, "up" : 0, "down" : 1,
"completed" : 11, "not_started" : 12,
"in progress" : 13, "in_progress" : 13,
"not started" : 12, "failed" : 4, "(partial)":5, "(degraded)": 6,
"unknown": 15}
try:
return status_map[status]
except KeyError:
return status
def cluster_status_mapper(self, status):
status_map = {"healthy" : 1, "unhealthy" : 0}
try:
return status_map[status]
except KeyError:
return status
Update __init__.py
import os
import copy
import etcd
import time
from gevent import socket
from tendrl.commons.utils import etcd_utils
from tendrl.commons.utils import log_utils as logger
from tendrl.monitoring_integration.grafana import cluster_detail
class GraphitePlugin():
def __init__(self):
self.host = NS.config.data["datasource_host"]
self.port = NS.config.data["datasource_port"]
self.carbon_port = NS.config.data["carbon_port"]
self.prefix = 'tendrl'
self._connect()
def _connect(self):
try:
self.graphite_sock = socket.socket()
self.graphite_sock.connect((self.host, int(self.carbon_port)))
except socket.error as ex:
logger.log("error", NS.get("publisher_id", None),
{'message': "Cannot connect to graphite socket" + str(ex)})
raise ex
def _resend(self, message):
try:
self._connect()
response = self.graphite_sock.sendall(message)
except socket.error as ex:
logger.log("error", NS.get("publisher_id", None),
{'message': "Cannot send data to graphite socket" + str(ex)})
raise ex
def push_metrics(self, metric_name, metric_value):
message = '%s%s%s %s %d\n' % (
self.prefix,
str("."),
metric_name,
str(metric_value),
int(time.time())
)
try:
response = self.graphite_sock.sendall(message)
except socket.error as ex:
response = self._resend(message)
return response
def get_resource_count(self, resource_details, obj_attr):
total = 0
up = 0
down = 0
partial = 0
for key, value in obj_attr["count"].items():
for resource_detail in resource_details["details"]:
if key == "total":
total = total + 1
if key == "up":
for attr_key, attr_values in obj_attr["count"]["up"].items():
if resource_detail[attr_key] in attr_values:
up = up + 1
if key == "down":
for attr_key, attr_values in obj_attr["count"]["down"].items():
if resource_detail[attr_key] in attr_values:
down = down + 1
if key == "partial":
for attr_key, attr_values in obj_attr["count"]["partial"].items():
if resource_detail[attr_key] in attr_values:
partial = partial + 1
resource_details["total"] = total
resource_details["up"] = up
resource_details["down"] = down
resource_details["partial"] = partial
return resource_details
def get_object_from_central_store(self, resource_key, obj_attr):
attr_details = etcd_utils.read(resource_key)
resource_details = {"details" : []}
for attr_detail in attr_details.leaves:
resource_detail = {}
attr_key = attr_detail.key.rsplit("/", 1)[1]
for key, value in obj_attr["attrs"].items():
sub_attr = etcd_utils.read(os.path.join(resource_key, attr_key, key))
resource_detail[key] = sub_attr.value
resource_details["details"].append(resource_detail)
try:
if obj_attr["count"]:
resource_details = self.get_resource_count(resource_details, obj_attr)
except KeyError:
pass
return resource_details
def get_resource_keys(self, key, resource_name):
resource_list = []
try:
resource_details = etcd_utils.read(key + "/" + str(resource_name))
for resource in resource_details.leaves:
resource_list.append(resource.key.split('/')[-1])
except (KeyError, etcd.EtcdKeyNotFound) as ex:
logger.log("error", NS.get("publisher_id", None),
{'message': "Error while fetching " +
str(resource_name).split('/')[0] + str(ex)})
return resource_list
def get_central_store_data(self, objects):
try:
cluster_list = self.get_resource_keys("", "clusters")
cluster_data = []
for cluster_id in cluster_list:
try:
cluster_key = "/clusters/" + str(cluster_id) + "/is_managed"
cluster_is_managed = etcd_utils.read(cluster_key).value
if cluster_is_managed.lower() == "no":
continue
except etcd.EtcdKeyNotFound:
continue
cluster_details = cluster_detail.ClusterDetail()
cluster_details.integration_id = cluster_id
cluster_key = objects["Cluster"]["value"].replace("$integration_id",
cluster_details.integration_id)
for obj in objects["Cluster"]:
if obj in ["metric", "value"]:
continue
resource_detail = {}
resource_detail[str(obj)] = {}
obj_details = objects["Cluster"][str(obj)]
obj_key = os.path.join(cluster_key, str(obj))
obj_attrs = obj_details["attrs"]
for key, value in obj_attrs.items():
try:
attr_key = os.path.join(obj_key, key)
attr_data = etcd_utils.read(attr_key)
attr_value = self.cluster_status_mapper(str(attr_data.value).lower())
resource_detail[str(obj)][key] = copy.deepcopy(attr_value)
except (KeyError, etcd.EtcdKeyNotFound) as ex:
logger.log("error", NS.get("publisher_id", None),
{'message': "Cannot Find {0} in Cluster {1}".format(key, cluster_id) + str(ex)})
cluster_details.details["Cluster"].append(copy.deepcopy(resource_detail))
host_list = self.get_resource_keys(cluster_key, "Bricks/all")
for host in host_list:
resource_detail = {}
attr_key = os.path.join(cluster_key, "Bricks/all", host)
resource_detail["host_name"] = host.replace(".", "_")
brick_list = self.get_resource_keys("", attr_key)
for brick in brick_list:
for key, value in objects["Brick"]["attrs"].items():
try:
brick_attr_key = os.path.join(cluster_key, "Bricks/all",
host, brick, key)
brick_attr_data = etcd_utils.read(brick_attr_key)
brick_attr_value = self.resource_status_mapper(str(brick_attr_data.value).lower())
resource_detail[key] = brick_attr_value
except (KeyError, etcd.EtcdKeyNotFound) as ex:
logger.log("error", NS.get("publisher_id", None),
{'message': "Cannot Find {0} in brick {1}".format(key, brick) + str(ex)})
cluster_details.details["Brick"].append(copy.deepcopy(resource_detail))
volume_list = self.get_resource_keys(cluster_key, "Volumes")
for volume in volume_list:
resource_detail = {}
volume_key = os.path.join(cluster_key, "Volumes", volume)
for key, value in objects["Volume"]["attrs"].items():
if value is None:
try:
attr_key = os.path.join(volume_key, key)
attr_data = etcd_utils.read(attr_key)
attr_value = self.resource_status_mapper(str(attr_data.value).lower())
resource_detail[key] = attr_value
except (KeyError, etcd.EtcdKeyNotFound) as ex:
logger.log("error", NS.get("publisher_id", None),
{'message': "Cannot Find {0} in volume {1}".format(key, volume) + str(ex)})
else:
try:
new_key = os.path.join(volume_key, objects["Volume"]["attrs"][key]["value"].rsplit("/", 1)[1])
resp_data = self.get_object_from_central_store(new_key,
objects["Volume"]["attrs"][key])
resource_detail[key] = resp_data
except (etcd.EtcdKeyNotFound, AttributeError, KeyError) as ex:
logger.log("error", NS.get("publisher_id", None),
{'message': "Error in retreiving geo_replication data for volume" + str(volume) + str(ex)})
resource_detail[key] = {"total": 0, "up": 0, "down": 0, "partial": 0}
cluster_details.details["Volume"].append(copy.deepcopy(resource_detail))
node_list = self.get_resource_keys(cluster_key, "nodes")
for node in node_list:
resource_detail = {}
node_key = objects["Node"]["value"].replace("$integration_id",
cluster_details.integration_id).replace("$node_id",
node)
for key, value in objects["Node"]["attrs"].items():
if value is None:
try:
attr_key = os.path.join(node_key, key)
attr_data = etcd_utils.read(attr_key)
attr_value = self.resource_status_mapper(str(attr_data.value).lower())
resource_detail[key] = attr_value
except (etcd.EtcdKeyNotFound, AttributeError, KeyError) as ex:
if key == "status":
resource_detail[key] = 1
logger.log("error", NS.get("publisher_id", None),
{'message': "Cannot Find {0} in Node {1}".format(key, node) + str(ex)})
cluster_details.details["Node"].append(copy.deepcopy(resource_detail))
cluster_data.append(copy.deepcopy(cluster_details))
try:
cluster_data = self.set_volume_count(cluster_data, "Volume")
cluster_data = self.set_resource_count(cluster_data, "Node")
cluster_data = self.set_resource_count(cluster_data, "Brick")
cluster_data = self.set_brick_count(cluster_data)
cluster_data = self.set_brick_path(cluster_data)
cluster_data = self.set_geo_rep_session(cluster_data)
cluster_data = self.set_volume_level_brick_count(cluster_data)
except (etcd.EtcdKeyNotFound, AttributeError, KeyError) as ex:
logger.log("error", NS.get("publisher_id", None),
{'message': "Failed to set resource details" + str(ex)})
return cluster_data
except (etcd.EtcdException, AttributeError, KeyError) as ex:
logger.log("error", NS.get("publisher_id", None),
{'message': str(ex)})
raise ex
def set_geo_rep_session(self, cluster_data):
total = 0
partial = 0
up = 0
down = 0
geo_rep_mapper = {"total": total, "partial": partial, "up": up,
"down" : down}
for cluster in cluster_data:
for volume in cluster.details["Volume"]:
try:
for key, value in volume["geo_rep_session"].items():
try:
geo_rep_mapper[key] = geo_rep_mapper[key] + value
except (AttributeError, KeyError) as ex:
logger.log("error", NS.get("publisher_id", None),
{'message': "Failed to extract georep details for {0}".format(key) + str(ex)})
except (AttributeError, KeyError) as ex:
logger.log("error", NS.get("publisher_id", None),
{'message': "Failed to extract georep details for volume" + str(ex)})
cluster.details["geo_rep"] = {}
cluster.details["geo_rep"]["total"] = geo_rep_mapper["total"]
cluster.details["geo_rep"]["up"] = geo_rep_mapper["up"]
cluster.details["geo_rep"]["down"] = geo_rep_mapper["down"]
cluster.details["geo_rep"]["partial"] = geo_rep_mapper["partial"]
return cluster_data
def set_volume_level_brick_count(self,cluster_data):
volume_detail = {}
for cluster in cluster_data:
for volume in cluster.details["Volume"]:
try:
volume_detail[volume["name"]] = {"total":0, "up":0, "down":0}
except (AttributeError,KeyError):
pass
for cluster in cluster_data:
for brick in cluster.details["Brick"]:
try:
volume_detail[str(brick["vol_name"])]["total"] = volume_detail[str(brick["vol_name"])]["total"] + 1
if brick["status"] == 0:
volume_detail[str(brick["vol_name"])]["up"] = volume_detail[str(brick["vol_name"])]["up"] + 1
else:
volume_detail[str(brick["vol_name"])]["down"] = volume_detail[str(brick["vol_name"])]["down"] + 1
except (AttributeError, KeyError) as ex:
logger.log("error", NS.get("publisher_id", None),
{'message': "Failed to set volume level brick count" + str(ex)})
cluster.details["volume_level_brick_count"] = volume_detail
return cluster_data
def set_brick_count(self, cluster_data):
for cluster in cluster_data:
for node in cluster.details["Node"]:
try:
total = 0
up = 0
down = 0
for brick in cluster.details["Brick"]:
if brick["host_name"] == node["fqdn"].replace(".", "_"):
if brick["status"] == 0:
total = total + 1
up = up + 1
else:
total = total + 1
down = down + 1
node["brick_total_count"] = total
node["brick_up_count"] = up
node["brick_down_count"] = down
except (AttributeError, KeyError) as ex:
logger.log("error", NS.get("publisher_id", None),
{'message': "Failed to set brick count" + str(ex)})
return cluster_data
def set_brick_path(self, cluster_data):
for cluster in cluster_data:
for brick in cluster.details["Brick"]:
try:
brick["brick_name"] = brick["brick_path"].split(":")[1]
except (AttributeError, KeyError) as ex:
logger.log("error", NS.get("publisher_id", None),
{'message': "Failed to set brick path" + str(ex)})
return cluster_data
def set_resource_count(self, cluster_data, resource_name):
for cluster in cluster_data:
resources = cluster.details[str(resource_name)]
cluster.details[str(resource_name.lower()) + "_total_count"] = len(resources)
up = 0
down = 0
for resource in resources:
try:
if resource["status"] == 0:
up = up + 1
else:
down = down + 1
except KeyError as ex:
logger.log("error", NS.get("publisher_id", None),
{'message': "Failed to set resource count for {0}".format(resource_name) + str(ex)})
cluster.details[str(resource_name.lower()) + "_up_count"] = up
cluster.details[str(resource_name.lower()) + "_down_count"] = down
return cluster_data
def set_volume_count(self, cluster_data, resource_name):
for cluster in cluster_data:
resources = cluster.details[str(resource_name)]
cluster.details[str(resource_name.lower()) + "_total_count"] = len(resources)
up = 0
down = 0
partial = 0
degraded = 0
for resource in resources:
try:
if resource["state"] == 0:
up = up + 1
elif resource["state"] == 5:
partial = partial + 1
elif resource["state"] == 6:
degraded = degraded + 1
else:
down = down + 1
except KeyError as ex:
logger.log("error", NS.get("publisher_id", None),
{'message': "Failed to set resource count for {0}".format(resource_name) + str(ex)})
cluster.details[str(resource_name.lower()) + "_up_count"] = up
cluster.details[str(resource_name.lower()) + "_down_count"] = down
cluster.details[str(resource_name.lower()) + "_partial_count"] = partial
cluster.details[str(resource_name.lower()) + "_degraded_count"] = degraded
return cluster_data
def resource_status_mapper(self, status):
status_map = {"created" : 0.5, "stopped" : 2, "started" : 0,
"degraded" : 8, "up" : 0, "down" : 1,
"completed" : 11, "not_started" : 12,
"in progress" : 13, "in_progress" : 13,
"not started" : 12, "failed" : 4, "(partial)":5, "(degraded)": 6,
"unknown": 15}
try:
return status_map[status]
except KeyError:
return status
def cluster_status_mapper(self, status):
status_map = {"healthy" : 1, "unhealthy" : 0}
try:
return status_map[status]
except KeyError:
return status
|
"""
Freezes final grades for a course
"""
import csv
import argparse
from collections import namedtuple
from django.core.management import BaseCommand, CommandError
from grades.models import ProctoredExamGrade
class ParsingError(CommandError):
"""Custom class for parsing exceptions"""
pass
class GradeRowParser:
"""Parser for rows of grade adjustment information in a CSV"""
RowProps = namedtuple('RowProps', ['exam_grade_id', 'score'])
default_col_names = dict(
exam_grade_id='proctoredexam_id',
score='score',
)
def __init__(self, col_names=None):
"""
Args:
col_names (dict): Mapping of RowProps property name to the name of the column in the CSV
"""
col_names = col_names or {}
self.col_names = self.RowProps(**{**self.default_col_names, **col_names})
def parse_and_validate_row(self, row):
"""Parses a row of grade adjustment info and makes sure it doesn't contain bad data"""
try:
parsed_row = self.RowProps(
exam_grade_id=int(row[self.col_names.exam_grade_id]),
score=float(row[self.col_names.score]),
)
except KeyError as e:
raise ParsingError('Row is missing a required column: {}'.format(str(e)))
except ValueError as e:
raise ParsingError('Row has an invalid value: {}'.format(str(e)))
if parsed_row.score < 0.0 or parsed_row.score > 100.0:
row_identifier = '{}: {}'.format(self.col_names.exam_grade_id, parsed_row.exam_grade_id)
raise ParsingError('[{}] "score" value must be between 0 and 100'.format(row_identifier))
return parsed_row
def parse_exam_grade_adjustments(self, csv_reader):
"""
Parses all rows of grade adjustment info from a CSV and yields each ProctoredExamGrade object
with its associated grade adjustment row from the CSV
Args:
csv_reader (csv.DictReader): A DictReader instance
Returns:
tuple(ProctoredExamGrade, RowProps):
A tuple containing a ProctoredExamGrade and its associated parsed CSV row
"""
parsed_row_dict = {}
for row in csv_reader:
parsed_row = self.parse_and_validate_row(row)
parsed_row_dict[parsed_row.exam_grade_id] = parsed_row
exam_grade_query = ProctoredExamGrade.objects.filter(id__in=parsed_row_dict.keys())
if exam_grade_query.count() < len(parsed_row_dict):
bad_exam_grade_ids = set(parsed_row_dict.keys()) - set(exam_grade_query.values_list('id', flat=True))
raise ParsingError(
'Some exam grade IDs do not match any ProctoredExamGrade records: {}'.format(bad_exam_grade_ids)
)
for exam_grade in exam_grade_query.all():
yield exam_grade, parsed_row_dict[exam_grade.id]
class Command(BaseCommand):
"""Parses a csv with exam grade adjustment information and changes the appropriate grades"""
help = "Parses a csv with exam grade adjustment information and changes the appropriate grades"
def add_arguments(self, parser):
parser.add_argument('csvfile', type=argparse.FileType('r'), help='')
parser.add_argument(
'--grade-id-col-name',
default=GradeRowParser.default_col_names['exam_grade_id'],
help='Name of the column that contains the proctored exam grade id')
parser.add_argument(
'--score-col-name',
default=GradeRowParser.default_col_names['score'],
help='Name of the column that contains the score value'
)
def handle(self, *args, **kwargs): # pylint: disable=unused-argument,too-many-locals
col_names = dict(
exam_grade_id=kwargs.get('grade_id_col_name'),
score=kwargs.get('score_col_name'),
)
csvfile = kwargs.get('csvfile')
reader = csv.DictReader(csvfile.read().splitlines(), delimiter='\t')
grade_row_parser = GradeRowParser(col_names=col_names)
total_rows = 0
grades_changed = 0
grades_unchanged = 0
for exam_grade, parsed_adjustment_row in grade_row_parser.parse_exam_grade_adjustments(reader):
if exam_grade.score != parsed_adjustment_row.score:
exam_grade.set_score(parsed_adjustment_row.score)
exam_grade.save_and_log(None)
grades_changed = grades_changed + 1
else:
grades_unchanged = grades_unchanged + 1
total_rows = total_rows + 1
result_messages = ['Total rows: {}'.format(total_rows)]
if grades_changed:
result_messages.append('Grades changed: {}'.format(grades_changed))
if grades_unchanged:
result_messages.append('Grades found with no change in score: {}'.format(grades_unchanged))
self.stdout.write(self.style.SUCCESS('\n'.join(result_messages)))
add support for csv, remove delimeter used for tsv
"""
Freezes final grades for a course
"""
import csv
import argparse
from collections import namedtuple
from django.core.management import BaseCommand, CommandError
from grades.models import ProctoredExamGrade
class ParsingError(CommandError):
"""Custom class for parsing exceptions"""
pass
class GradeRowParser:
"""Parser for rows of grade adjustment information in a CSV"""
RowProps = namedtuple('RowProps', ['exam_grade_id', 'score'])
default_col_names = dict(
exam_grade_id='proctoredexam_id',
score='score',
)
def __init__(self, col_names=None):
"""
Args:
col_names (dict): Mapping of RowProps property name to the name of the column in the CSV
"""
col_names = col_names or {}
self.col_names = self.RowProps(**{**self.default_col_names, **col_names})
def parse_and_validate_row(self, row):
"""Parses a row of grade adjustment info and makes sure it doesn't contain bad data"""
try:
parsed_row = self.RowProps(
exam_grade_id=int(row[self.col_names.exam_grade_id]),
score=float(row[self.col_names.score]),
)
except KeyError as e:
raise ParsingError('Row is missing a required column: {}'.format(str(e)))
except ValueError as e:
raise ParsingError('Row has an invalid value: {}'.format(str(e)))
if parsed_row.score < 0.0 or parsed_row.score > 100.0:
row_identifier = '{}: {}'.format(self.col_names.exam_grade_id, parsed_row.exam_grade_id)
raise ParsingError('[{}] "score" value must be between 0 and 100'.format(row_identifier))
return parsed_row
def parse_exam_grade_adjustments(self, csv_reader):
"""
Parses all rows of grade adjustment info from a CSV and yields each ProctoredExamGrade object
with its associated grade adjustment row from the CSV
Args:
csv_reader (csv.DictReader): A DictReader instance
Returns:
tuple(ProctoredExamGrade, RowProps):
A tuple containing a ProctoredExamGrade and its associated parsed CSV row
"""
parsed_row_dict = {}
for row in csv_reader:
parsed_row = self.parse_and_validate_row(row)
parsed_row_dict[parsed_row.exam_grade_id] = parsed_row
exam_grade_query = ProctoredExamGrade.objects.filter(id__in=parsed_row_dict.keys())
if exam_grade_query.count() < len(parsed_row_dict):
bad_exam_grade_ids = set(parsed_row_dict.keys()) - set(exam_grade_query.values_list('id', flat=True))
raise ParsingError(
'Some exam grade IDs do not match any ProctoredExamGrade records: {}'.format(bad_exam_grade_ids)
)
for exam_grade in exam_grade_query.all():
yield exam_grade, parsed_row_dict[exam_grade.id]
class Command(BaseCommand):
"""Parses a csv with exam grade adjustment information and changes the appropriate grades"""
help = "Parses a csv with exam grade adjustment information and changes the appropriate grades"
def add_arguments(self, parser):
parser.add_argument('csvfile', type=argparse.FileType('r'), help='')
parser.add_argument(
'--grade-id-col-name',
default=GradeRowParser.default_col_names['exam_grade_id'],
help='Name of the column that contains the proctored exam grade id')
parser.add_argument(
'--score-col-name',
default=GradeRowParser.default_col_names['score'],
help='Name of the column that contains the score value'
)
def handle(self, *args, **kwargs): # pylint: disable=unused-argument,too-many-locals
col_names = dict(
exam_grade_id=kwargs.get('grade_id_col_name'),
score=kwargs.get('score_col_name'),
)
csvfile = kwargs.get('csvfile')
reader = csv.DictReader(csvfile.read().splitlines())
grade_row_parser = GradeRowParser(col_names=col_names)
total_rows = 0
grades_changed = 0
grades_unchanged = 0
for exam_grade, parsed_adjustment_row in grade_row_parser.parse_exam_grade_adjustments(reader):
if exam_grade.score != parsed_adjustment_row.score:
exam_grade.set_score(parsed_adjustment_row.score)
exam_grade.save_and_log(None)
grades_changed = grades_changed + 1
else:
grades_unchanged = grades_unchanged + 1
total_rows = total_rows + 1
result_messages = ['Total rows: {}'.format(total_rows)]
if grades_changed:
result_messages.append('Grades changed: {}'.format(grades_changed))
if grades_unchanged:
result_messages.append('Grades found with no change in score: {}'.format(grades_unchanged))
self.stdout.write(self.style.SUCCESS('\n'.join(result_messages)))
|
# Python query script that reads all ES records on the l2b server, stores them in a dictionary and prints it.
from elasticsearch import Elasticsearch
import certifi
import datetime
import calendar
es = Elasticsearch(
'[...]',
# name of node to be added here 'http(s)://user:password@server:port']
port= #add the port number,
use_ssl=True,
verify_certs=True,
ca_certs=certifi.where()
)
def query_result(interval, start, end):
return es.search(index="", body =
#add index between the quotation marks
{
"from" : 0, "size" : 10000,
#the size can be changed but apparently the current query does not show > 10000 results.
"query": {
"bool": {
"must": { "match_all": {} },
"filter": {
"range": {
"@timestamp": {
"gte": 1451566081000,
"lte": 1451566082001,
#timestamps are for start/end date in epoch format. this format should be changed for other dates (current one is for 31.12.2015)
"format": "epoch_millis"
#format could be changed, but for now keeping the epoch + millisecond one
}
}
}
}
}
})
#print len(query_result(0,0,0))
print query_result(0,0,0)
Update es_to_python.py
# This is a Python query script that reads and prints ES records on the l2b server
from elasticsearch import Elasticsearch
import certifi
import datetime
import calendar
es = Elasticsearch(
'[...]',
# name of node to be added here 'http(s)://user:password@server:port']
port= #add the port number,
use_ssl=True,
verify_certs=True,
ca_certs=certifi.where()
)
def query_result(interval, start, end):
return es.search(index="", body =
#add index between the quotation marks
{
"from" : 0, "size" : 10000,
#the size can be changed but apparently the current query does not show > 10000 results.
"query": {
"bool": {
"must": { "match_all": {} },
"filter": {
"range": {
"@timestamp": {
"gte": 1451566081000,
"lte": 1451566082001,
#timestamps are for start/end date in epoch format. this format should be changed for other dates (current one is for 31.12.2015)
"format": "epoch_millis"
#format could be changed, but for now keeping the epoch + millisecond one
}
}
}
}
}
})
#print len(query_result(0,0,0))
print query_result(0,0,0)
|
#!/usr/bin/env python-sirius
"""."""
import time as _time
from epics import PV
from siriuspy.csdevice.orbitcorr import SOFBFactory
class SOFB:
"""."""
def __init__(self, acc):
"""."""
self.data = SOFBFactory.create(acc)
orbtp = 'MTurn' if self.data.isring else 'SPass'
self._trajx = PV(acc+'-Glob:AP-SOFB:'+orbtp+'OrbX-Mon')
self._trajy = PV(acc+'-Glob:AP-SOFB:'+orbtp+'OrbY-Mon')
self._orbx = PV(acc+'-Glob:AP-SOFB:SlowOrbX-Mon', auto_monitor=False)
self._orby = PV(acc+'-Glob:AP-SOFB:SlowOrbY-Mon', auto_monitor=False)
if self.data.isring:
self._trajx_idx = PV(acc+'-Glob:AP-SOFB:'+orbtp+'Idx'+'OrbX-Mon')
self._trajy_idx = PV(acc+'-Glob:AP-SOFB:'+orbtp+'Idx'+'OrbY-Mon')
self._rst = PV(acc+'-Glob:AP-SOFB:SmoothReset-Cmd')
self._npts_sp = PV(acc+'-Glob:AP-SOFB:SmoothNrPts-SP')
self._npts_rb = PV(acc+'-Glob:AP-SOFB:BufferCount-Mon')
self._sum = PV(acc+'-Glob:AP-SOFB:'+orbtp+'Sum-Mon')
self._trigsample_sp = PV(acc+'-Glob:AP-SOFB:TrigNrSamplesPost-SP')
self._trigsample_rb = PV(acc+'-Glob:AP-SOFB:TrigNrSamplesPost-RB')
@property
def connected(self):
"""."""
conn = self._trajx.connected
conn &= self._trajy.connected
conn &= self._orbx.connected
conn &= self._orby.connected
conn &= self._sum.connected
conn &= self._rst.connected
conn &= self._npts_sp.connected
conn &= self._npts_rb.connected
return conn
@property
def trajx(self):
"""."""
return self._trajx.get()
@property
def trajy(self):
"""."""
return self._trajy.get()
@property
def orbx(self):
"""."""
return self._orbx.get()
@property
def orby(self):
"""."""
return self._orby.get()
@property
def trajx_idx(self):
"""."""
return self._trajx_idx.get() if self.data.isring \
else self.trajx
@property
def trajy_idx(self):
"""."""
return self._trajy_idx.get() if self.data.isring \
else self.trajy
@property
def sum(self):
"""."""
return self._sum.get()
@property
def nr_points(self):
"""."""
return self._npts_rb.value
@nr_points.setter
def nr_points(self, value):
self._npts_sp.value = int(value)
@property
def trigsample(self):
"""."""
return self._trigsample_rb.value
@trigsample.setter
def trigsample(self, value):
self._trigsample_sp.value = int(value)
def wait(self, timeout=10):
"""."""
inter = 0.05
n = int(timeout/inter)
_time.sleep(4*inter)
for _ in range(n):
if self._npts_rb.value >= self._npts_sp.value:
break
_time.sleep(inter)
else:
print('WARN: Timed out waiting orbit.')
def reset(self):
"""."""
self._rst.value = 1
PML.ENH: Improve sofb controls.
#!/usr/bin/env python-sirius
"""."""
import time as _time
from epics import PV
from siriuspy.csdevice.orbitcorr import SOFBFactory
class SOFB:
"""."""
def __init__(self, acc):
"""."""
self.data = SOFBFactory.create(acc)
orbtp = 'MTurn' if self.data.isring else 'SPass'
pref = acc.upper() + '-Glob:AP-SOFB:'
self._trajx = PV(pref+orbtp+'OrbX-Mon')
self._trajy = PV(pref+orbtp+'OrbY-Mon')
self._orbx = PV(pref+'SlowOrbX-Mon', auto_monitor=False)
self._orby = PV(pref+'SlowOrbY-Mon', auto_monitor=False)
self._kickch = PV(pref+'KickCH-Mon', auto_monitor=False)
self._kickcv = PV(pref+'KickCV-Mon', auto_monitor=False)
self._deltakickch = PV(pref+'DeltaKickCH-Mon')
self._deltakickcv = PV(pref+'DeltaKickCV-Mon')
self._refx_sp = PV(pref+'RefOrbX-SP')
self._refy_sp = PV(pref+'RefOrbY-SP')
self._refx_rb = PV(pref+'RefOrbX-RB')
self._refy_rb = PV(pref+'RefOrbY-RB')
self._bpmxenbl_sp = PV(pref+'BPMXEnblList-SP')
self._bpmyenbl_sp = PV(pref+'BPMYEnblList-SP')
self._bpmxenbl_rb = PV(pref+'BPMXEnblList-RB')
self._bpmyenbl_rb = PV(pref+'BPMYEnblList-RB')
self._chenbl_sp = PV(pref+'CHEnblList-SP')
self._cvenbl_sp = PV(pref+'CVEnblList-SP')
self._chenbl_rb = PV(pref+'CHEnblList-RB')
self._cvenbl_rb = PV(pref+'CVEnblList-RB')
self._calccorr = PV(pref+'CalcDelta-Cmd')
self._applycorr = PV(pref+'ApplyDelta-Cmd')
if self.data.isring:
self._trajx_idx = PV(pref+orbtp+'Idx'+'OrbX-Mon')
self._trajy_idx = PV(pref+orbtp+'Idx'+'OrbY-Mon')
self._rst = PV(pref+'SmoothReset-Cmd')
self._npts_sp = PV(pref+'SmoothNrPts-SP')
self._npts_rb = PV(pref+'BufferCount-Mon')
self._sum = PV(pref+orbtp+'Sum-Mon')
self._trigsample_sp = PV(pref+'TrigNrSamplesPost-SP')
self._trigsample_rb = PV(pref+'TrigNrSamplesPost-RB')
@property
def connected(self):
"""."""
conn = self._trajx.connected
conn &= self._trajy.connected
conn &= self._orbx.connected
conn &= self._orby.connected
conn &= self._kickch.connected
conn &= self._kickcv.connected
conn &= self._deltakickch.connected
conn &= self._deltakickcv.connected
conn &= self._refx_sp.connected
conn &= self._refy_sp.connected
conn &= self._refx_rb.connected
conn &= self._refy_rb.connected
conn &= self._bpmxenbl_sp.connected
conn &= self._bpmyenbl_sp.connected
conn &= self._bpmxenbl_rb.connected
conn &= self._bpmyenbl_rb.connected
conn &= self._chenbl_sp.connected
conn &= self._cvenbl_sp.connected
conn &= self._chenbl_rb.connected
conn &= self._cvenbl_rb.connected
conn &= self._sum.connected
conn &= self._rst.connected
conn &= self._calccorr.connected
conn &= self._applycorr.connected
conn &= self._npts_sp.connected
conn &= self._npts_rb.connected
return conn
@property
def trajx(self):
"""."""
return self._trajx.get()
@property
def trajy(self):
"""."""
return self._trajy.get()
@property
def orbx(self):
"""."""
return self._orbx.get()
@property
def orby(self):
"""."""
return self._orby.get()
@property
def trajx_idx(self):
"""."""
return self._trajx_idx.get() if self.data.isring \
else self.trajx
@property
def trajy_idx(self):
"""."""
return self._trajy_idx.get() if self.data.isring \
else self.trajy
@property
def sum(self):
"""."""
return self._sum.get()
@property
def kickch(self):
"""."""
return self._kickch.get()
@property
def kickcv(self):
"""."""
return self._kickcv.get()
@property
def deltakickch(self):
"""."""
return self._deltakickch.get()
@property
def deltakickcv(self):
"""."""
return self._deltakickcv.get()
@property
def refx(self):
"""."""
return self._refx_rb.value
@refx.setter
def refx(self, value):
self._refx_sp.value = value
@property
def refy(self):
"""."""
return self._refy_rb.value
@refy.setter
def refy(self, value):
self._refy_sp.value = value
@property
def bpmxenbl(self):
"""."""
return self._bpmxenbl_rb.value
@bpmxenbl.setter
def bpmxenbl(self, value):
self._bpmxenbl_sp.value = value
@property
def bpmyenbl(self):
"""."""
return self._bpmyenbl_rb.value
@bpmyenbl.setter
def bpmyenbl(self, value):
self._bpmyenbl_sp.value = value
@property
def chenbl(self):
"""."""
return self._chenbl_rb.value
@chenbl.setter
def chenbl(self, value):
self._chenbl_sp.value = value
@property
def cvenbl(self):
"""."""
return self._cvenbl_rb.value
@cvenbl.setter
def cvenbl(self, value):
self._cvenbl_sp.value = value
@property
def nr_points(self):
"""."""
return self._npts_rb.value
@nr_points.setter
def nr_points(self, value):
self._npts_sp.value = int(value)
@property
def trigsample(self):
"""."""
return self._trigsample_rb.value
@trigsample.setter
def trigsample(self, value):
self._trigsample_sp.value = int(value)
def wait(self, timeout=10):
"""."""
inter = 0.05
n = int(timeout/inter)
_time.sleep(4*inter)
for _ in range(n):
if self._npts_rb.value >= self._npts_sp.value:
break
_time.sleep(inter)
else:
print('WARN: Timed out waiting orbit.')
def reset(self):
"""."""
self._rst.value = 1
def calccorr(self):
self._calccorr.value = 1
def applycorr(self):
"""."""
self._applycorr.value = self.data.ApplyDelta.CH
_time.sleep(0.3)
self._applycorr.value = self.data.ApplyDelta.CV
|
import re
import os.path
import glob
from . import cmake
from . import vsinfo
from .build_item import BuildItem
# -----------------------------------------------------------------------------
class Generator(BuildItem):
"""Visual Studio aliases example:
vs2013: use the bitness of the current system
vs2013_32: use 32bit version
vs2013_64: use 64bit version
"""
@staticmethod
def default_str():
"""get the default generator from cmake"""
s = cmake.CMakeSysInfo.generator()
return s
def __init__(self, name, build, num_jobs):
if name.startswith('vs'):
name = vsinfo.to_gen(name)
self.alias = name
super().__init__(name)
self.num_jobs = num_jobs
self.is_makefile = name.endswith("Makefiles")
self.is_ninja = name.endswith("Ninja")
self.is_msvc = name.startswith("Visual Studio")
self.build = build
#
self.sysinfo_name = self.name
if self.is_msvc:
ts = build.compiler.vs.toolset
self.sysinfo_name += (' ' + ts) if ts is not None else ""
# these vars would not change cmake --system-information
# self.full_name += " ".join(self.build.flags.cmake_vars)
def configure_args(self, for_json=False):
if self.name != "":
if self.is_msvc and self.build.compiler.vs.toolset is not None:
if for_json:
args = '-T ' + self.build.compiler.vs.toolset
else:
args = ['-G', self.name, '-T', self.build.compiler.vs.toolset]
else:
if for_json:
args = ''
else:
args = ['-G', self.name]
else:
if for_json:
args = ''
else:
args = []
# cmake vars are explicitly set in the preload file
# args += self.build.flags.cmake_flags
return args
def cmd(self, targets):
if self.is_makefile:
return ['make', '-j', str(self.num_jobs)] + targets
else:
bt = str(self.build.build_type)
if len(targets) > 1:
msg = ("Building multiple targets with this generator is not "
"implemented. "
"cmake --build cannot handle multiple --target " +
"invokations. A generator-specific command must be "
"written to handle multiple targets with this "
"generator " + '("{}")'.format(self.name))
raise Exception(msg)
if not self.is_msvc:
cmd = ['cmake', '--build', '.', '--target', targets[0], '--config', bt]
else:
# if a target has a . in the name, it must be substituted for _
targets_safe = [re.sub(r'\.', r'_', t) for t in targets]
if len(targets_safe) != 1:
raise Exception("msbuild can only build one target at a time: was " + str(targets_safe))
t = targets_safe[0]
pat = os.path.join(self.build.builddir, t + '*.vcxproj')
projs = glob.glob(pat)
if len(projs) == 0:
msg = "could not find vcx project for this target: {} (glob={}, got={})".format(t, pat, projs)
raise Exception(msg)
elif len(projs) > 1:
msg = "multiple vcx projects for this target: {} (glob={}, got={})".format(t, pat, projs)
raise Exception(msg)
proj = projs[0]
cmd = [self.build.compiler.vs.msbuild, proj,
'/property:Configuration='+bt,
'/maxcpucount:' + str(self.num_jobs)]
return cmd
def install(self):
bt = str(self.build.build_type)
return ['cmake', '--build', '.', '--config', bt, '--target', 'install']
"""
generators: https://cmake.org/cmake/help/v3.7/manual/cmake-generators.7.html
Unix Makefiles
MSYS Makefiles
MinGW Makefiles
NMake Makefiles
Ninja
Watcom WMake
CodeBlocks - Ninja
CodeBlocks - Unix Makefiles
CodeBlocks - MinGW Makefiles
CodeBlocks - NMake Makefiles
CodeLite - Ninja
CodeLite - Unix Makefiles
CodeLite - MinGW Makefiles
CodeLite - NMake Makefiles
Eclipse CDT4 - Ninja
Eclipse CDT4 - Unix Makefiles
Eclipse CDT4 - MinGW Makefiles
Eclipse CDT4 - NMake Makefiles
KDevelop3
KDevelop3 - Unix Makefiles
Kate - Ninja
Kate - Unix Makefiles
Kate - MinGW Makefiles
Kate - NMake Makefiles
Sublime Text 2 - Ninja
Sublime Text 2 - Unix Makefiles
Sublime Text 2 - MinGW Makefiles
Sublime Text 2 - NMake Makefiles
Visual Studio 6
Visual Studio 7
Visual Studio 7 .NET 2003
Visual Studio 8 2005 [Win64|IA64]
Visual Studio 9 2008 [Win64|IA64]
Visual Studio 10 2010 [Win64|IA64]
Visual Studio 11 2012 [Win64|ARM]
Visual Studio 12 2013 [Win64|ARM]
Visual Studio 14 2015 [Win64|ARM]
Visual Studio 15 2017 [Win64|ARM]
Green Hills MULTI
Xcode
"""
visual studio: fix build command
import re
import os.path
import glob
from . import cmake
from . import vsinfo
from .build_item import BuildItem
# -----------------------------------------------------------------------------
class Generator(BuildItem):
"""Visual Studio aliases example:
vs2013: use the bitness of the current system
vs2013_32: use 32bit version
vs2013_64: use 64bit version
"""
@staticmethod
def default_str():
"""get the default generator from cmake"""
s = cmake.CMakeSysInfo.generator()
return s
def __init__(self, name, build, num_jobs):
if name.startswith('vs'):
name = vsinfo.to_gen(name)
self.alias = name
super().__init__(name)
self.num_jobs = num_jobs
self.is_makefile = name.endswith("Makefiles")
self.is_ninja = name.endswith("Ninja")
self.is_msvc = name.startswith("Visual Studio")
self.build = build
#
self.sysinfo_name = self.name
if self.is_msvc:
ts = build.compiler.vs.toolset
self.sysinfo_name += (' ' + ts) if ts is not None else ""
# these vars would not change cmake --system-information
# self.full_name += " ".join(self.build.flags.cmake_vars)
def configure_args(self, for_json=False):
if self.name != "":
if self.is_msvc and self.build.compiler.vs.toolset is not None:
if for_json:
args = '-T ' + self.build.compiler.vs.toolset
else:
args = ['-G', self.name, '-T', self.build.compiler.vs.toolset]
else:
if for_json:
args = ''
else:
args = ['-G', self.name]
else:
if for_json:
args = ''
else:
args = []
# cmake vars are explicitly set in the preload file
# args += self.build.flags.cmake_flags
return args
def cmd(self, targets):
if self.is_makefile:
return ['make', '-j', str(self.num_jobs)] + targets
else:
bt = str(self.build.build_type)
if len(targets) > 1:
msg = ("Building multiple targets with this generator is not "
"implemented. "
"cmake --build cannot handle multiple --target " +
"invokations. A generator-specific command must be "
"written to handle multiple targets with this "
"generator " + '("{}")'.format(self.name))
raise Exception(msg)
if not self.is_msvc:
cmd = ['cmake', '--build', '.', '--target', targets[0], '--config', bt]
else:
# # if a target has a . in the name, it must be substituted for _
# targets_safe = [re.sub(r'\.', r'_', t) for t in targets]
# if len(targets_safe) != 1:
# raise Exception("msbuild can only build one target at a time: was " + str(targets_safe))
# t = targets_safe[0]
# pat = os.path.join(self.build.builddir, t + '*.vcxproj')
# projs = glob.glob(pat)
# if len(projs) == 0:
# msg = "could not find vcx project for this target: {} (glob={}, got={})".format(t, pat, projs)
# raise Exception(msg)
# elif len(projs) > 1:
# msg = "multiple vcx projects for this target: {} (glob={}, got={})".format(t, pat, projs)
# raise Exception(msg)
# proj = projs[0]
# cmd = [self.build.compiler.vs.msbuild, proj,
# '/property:Configuration='+bt,
# '/maxcpucount:' + str(self.num_jobs)]
cmd = ['cmake', '--build', '.', '--target', targets[0], '--config', bt,
'--',
#'/property:Configuration='+bt,
'/maxcpucount:' + str(self.num_jobs)]
return cmd
def install(self):
bt = str(self.build.build_type)
return ['cmake', '--build', '.', '--config', bt, '--target', 'install']
"""
generators: https://cmake.org/cmake/help/v3.7/manual/cmake-generators.7.html
Unix Makefiles
MSYS Makefiles
MinGW Makefiles
NMake Makefiles
Ninja
Watcom WMake
CodeBlocks - Ninja
CodeBlocks - Unix Makefiles
CodeBlocks - MinGW Makefiles
CodeBlocks - NMake Makefiles
CodeLite - Ninja
CodeLite - Unix Makefiles
CodeLite - MinGW Makefiles
CodeLite - NMake Makefiles
Eclipse CDT4 - Ninja
Eclipse CDT4 - Unix Makefiles
Eclipse CDT4 - MinGW Makefiles
Eclipse CDT4 - NMake Makefiles
KDevelop3
KDevelop3 - Unix Makefiles
Kate - Ninja
Kate - Unix Makefiles
Kate - MinGW Makefiles
Kate - NMake Makefiles
Sublime Text 2 - Ninja
Sublime Text 2 - Unix Makefiles
Sublime Text 2 - MinGW Makefiles
Sublime Text 2 - NMake Makefiles
Visual Studio 6
Visual Studio 7
Visual Studio 7 .NET 2003
Visual Studio 8 2005 [Win64|IA64]
Visual Studio 9 2008 [Win64|IA64]
Visual Studio 10 2010 [Win64|IA64]
Visual Studio 11 2012 [Win64|ARM]
Visual Studio 12 2013 [Win64|ARM]
Visual Studio 14 2015 [Win64|ARM]
Visual Studio 15 2017 [Win64|ARM]
Green Hills MULTI
Xcode
"""
|
# Copyright (C) 2010-2012 Cuckoo Sandbox Developers.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import pkgutil
import inspect
import logging
from collections import defaultdict
from lib.cuckoo.common.exceptions import CuckooCriticalError
from lib.cuckoo.common.abstracts import MachineManager
from lib.cuckoo.common.abstracts import Processing
from lib.cuckoo.common.abstracts import Signature
from lib.cuckoo.common.abstracts import Report
log = logging.getLogger(__name__)
_modules = defaultdict(dict)
def import_plugin(name):
try:
module = __import__(name, globals(), locals(), ["dummy"], -1)
except ImportError as e:
raise CuckooCriticalError("Unable to import plugin \"%s\": %s"
% (name, e))
else:
load_plugins(module)
def import_package(package):
prefix = package.__name__ + "."
for loader, name, ispkg in pkgutil.iter_modules(package.__path__, prefix):
if ispkg:
continue
import_plugin(name)
def load_plugins(module):
for name, value in inspect.getmembers(module):
if inspect.isclass(value):
if issubclass(value, MachineManager) and value is not MachineManager:
register_plugin("machinemanagers", value)
elif issubclass(value, Processing) and value is not Processing:
register_plugin("processing", value)
elif issubclass(value, Signature) and value is not Signature:
register_plugin("signatures", value)
elif issubclass(value, Report) and value is not Report:
register_plugin("reporting", value)
def register_plugin(group, name):
global _modules
if not group in _modules:
_modules[group] = [name]
else:
if not name in _modules[group]:
_modules[group].append(name)
def list_plugins(group=None):
if group:
return _modules[group]
else:
return _modules
Python magic
# Copyright (C) 2010-2012 Cuckoo Sandbox Developers.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import pkgutil
import inspect
import logging
from collections import defaultdict
from lib.cuckoo.common.exceptions import CuckooCriticalError
from lib.cuckoo.common.abstracts import MachineManager
from lib.cuckoo.common.abstracts import Processing
from lib.cuckoo.common.abstracts import Signature
from lib.cuckoo.common.abstracts import Report
log = logging.getLogger(__name__)
_modules = defaultdict(dict)
def import_plugin(name):
try:
module = __import__(name, globals(), locals(), ["dummy"], -1)
except ImportError as e:
raise CuckooCriticalError("Unable to import plugin \"%s\": %s"
% (name, e))
else:
load_plugins(module)
def import_package(package):
prefix = package.__name__ + "."
for loader, name, ispkg in pkgutil.iter_modules(package.__path__, prefix):
if ispkg:
continue
import_plugin(name)
def load_plugins(module):
for name, value in inspect.getmembers(module):
if inspect.isclass(value):
if issubclass(value, MachineManager) and value is not MachineManager:
register_plugin("machinemanagers", value)
elif issubclass(value, Processing) and value is not Processing:
register_plugin("processing", value)
elif issubclass(value, Signature) and value is not Signature:
register_plugin("signatures", value)
elif issubclass(value, Report) and value is not Report:
register_plugin("reporting", value)
def register_plugin(group, name):
global _modules
group = _modules.setdefault(group, [])
group.append(name)
def list_plugins(group=None):
if group:
return _modules[group]
else:
return _modules
|
"""Define celery tasks for hs_core app."""
import os
import sys
import traceback
import zipfile
import logging
import json
from celery.signals import task_postrun
from datetime import datetime, timedelta, date
from xml.etree import ElementTree
import requests
from celery import shared_task
from celery.schedules import crontab
from celery.task import periodic_task
from django.conf import settings
from django.core.mail import send_mail
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from rest_framework import status
from hs_access_control.models import GroupMembershipRequest
from hs_core.hydroshare import utils, create_empty_resource
from hs_core.hydroshare.hs_bagit import create_bag_metadata_files, create_bag, create_bagit_files_by_irods
from hs_core.hydroshare.resource import get_activated_doi, get_crossref_url, deposit_res_metadata_with_crossref
from hs_core.task_utils import get_or_create_task_notification
from hs_odm2.models import ODM2Variable
from django_irods.storage import IrodsStorage
from theme.models import UserQuota, QuotaMessage, User
from django_irods.icommands import SessionException
from celery.result import states
from hs_core.models import BaseResource, TaskNotification
from theme.utils import get_quota_message
from hs_collection_resource.models import CollectionDeletedResource
# Pass 'django' into getLogger instead of __name__
# for celery tasks (as this seems to be the
# only way to successfully log in code executed
# by celery, despite our catch-all handler).
logger = logging.getLogger('django')
# Currently there are two different cleanups scheduled.
# One is 20 minutes after creation, the other is nightly.
# TODO Clean up zipfiles in remote federated storage as well.
@periodic_task(ignore_result=True, run_every=crontab(minute=30, hour=23))
def nightly_zips_cleanup():
# delete 2 days ago
date_folder = (date.today() - timedelta(2)).strftime('%Y-%m-%d')
zips_daily_date = "zips/{daily_date}".format(daily_date=date_folder)
if __debug__:
logger.debug("cleaning up {}".format(zips_daily_date))
istorage = IrodsStorage()
if istorage.exists(zips_daily_date):
istorage.delete(zips_daily_date)
federated_prefixes = BaseResource.objects.all().values_list('resource_federation_path')\
.distinct()
for p in federated_prefixes:
prefix = p[0] # strip tuple
if prefix != "":
zips_daily_date = "{prefix}/zips/{daily_date}"\
.format(prefix=prefix, daily_date=date_folder)
if __debug__:
logger.debug("cleaning up {}".format(zips_daily_date))
istorage = IrodsStorage("federated")
if istorage.exists(zips_daily_date):
istorage.delete(zips_daily_date)
@periodic_task(ignore_result=True, run_every=crontab(minute=0, hour=0))
def manage_task_nightly():
# The nightly running task do DOI activation check
# Check DOI activation on failed and pending resources and send email.
msg_lst = []
# retrieve all published resources with failed metadata deposition with CrossRef if any and
# retry metadata deposition
failed_resources = BaseResource.objects.filter(raccess__published=True, doi__contains='failure')
for res in failed_resources:
if res.metadata.dates.all().filter(type='published'):
pub_date = res.metadata.dates.all().filter(type='published')[0]
pub_date = pub_date.start_date.strftime('%m/%d/%Y')
act_doi = get_activated_doi(res.doi)
response = deposit_res_metadata_with_crossref(res)
if response.status_code == status.HTTP_200_OK:
# retry of metadata deposition succeeds, change resource flag from failure
# to pending
res.doi = act_doi
res.save()
# create bag and compute checksum for published resource to meet DataONE requirement
create_bag_by_irods(res.short_id)
else:
# retry of metadata deposition failed again, notify admin
msg_lst.append("Metadata deposition with CrossRef for the published resource "
"DOI {res_doi} failed again after retry with first metadata "
"deposition requested since {pub_date}.".format(res_doi=act_doi,
pub_date=pub_date))
logger.debug(response.content)
else:
msg_lst.append("{res_id} does not have published date in its metadata.".format(
res_id=res.short_id))
pending_resources = BaseResource.objects.filter(raccess__published=True,
doi__contains='pending')
for res in pending_resources:
if res.metadata.dates.all().filter(type='published'):
pub_date = res.metadata.dates.all().filter(type='published')[0]
pub_date = pub_date.start_date.strftime('%m/%d/%Y')
act_doi = get_activated_doi(res.doi)
main_url = get_crossref_url()
req_str = '{MAIN_URL}servlet/submissionDownload?usr={USERNAME}&pwd=' \
'{PASSWORD}&doi_batch_id={DOI_BATCH_ID}&type={TYPE}'
response = requests.get(req_str.format(MAIN_URL=main_url,
USERNAME=settings.CROSSREF_LOGIN_ID,
PASSWORD=settings.CROSSREF_LOGIN_PWD,
DOI_BATCH_ID=res.short_id,
TYPE='result'))
root = ElementTree.fromstring(response.content)
rec_cnt_elem = root.find('.//record_count')
failure_cnt_elem = root.find('.//failure_count')
success = False
if rec_cnt_elem is not None and failure_cnt_elem is not None:
rec_cnt = int(rec_cnt_elem.text)
failure_cnt = int(failure_cnt_elem.text)
if rec_cnt > 0 and failure_cnt == 0:
res.doi = act_doi
res.save()
success = True
# create bag and compute checksum for published resource to meet DataONE requirement
create_bag_by_irods(res.short_id)
if not success:
msg_lst.append("Published resource DOI {res_doi} is not yet activated with request "
"data deposited since {pub_date}.".format(res_doi=act_doi,
pub_date=pub_date))
logger.debug(response.content)
else:
msg_lst.append("{res_id} does not have published date in its metadata.".format(
res_id=res.short_id))
if msg_lst:
email_msg = '\n'.join(msg_lst)
subject = 'Notification of pending DOI deposition/activation of published resources'
# send email for people monitoring and follow-up as needed
send_mail(subject, email_msg, settings.DEFAULT_FROM_EMAIL, [settings.DEFAULT_SUPPORT_EMAIL])
@periodic_task(ignore_result=True, run_every=crontab(minute=15, hour=0, day_of_week=1,
day_of_month='1-7'))
def send_over_quota_emails():
# check over quota cases and send quota warning emails as needed
hs_internal_zone = "hydroshare"
if not QuotaMessage.objects.exists():
QuotaMessage.objects.create()
qmsg = QuotaMessage.objects.first()
users = User.objects.filter(is_active=True).filter(is_superuser=False).all()
for u in users:
uq = UserQuota.objects.filter(user__username=u.username, zone=hs_internal_zone).first()
if uq:
used_percent = uq.used_percent
if used_percent >= qmsg.soft_limit_percent:
if used_percent >= 100 and used_percent < qmsg.hard_limit_percent:
if uq.remaining_grace_period < 0:
# triggers grace period counting
uq.remaining_grace_period = qmsg.grace_period
elif uq.remaining_grace_period > 0:
# reduce remaining_grace_period by one day
uq.remaining_grace_period -= 1
elif used_percent >= qmsg.hard_limit_percent:
# set grace period to 0 when user quota exceeds hard limit
uq.remaining_grace_period = 0
uq.save()
if u.first_name and u.last_name:
sal_name = '{} {}'.format(u.first_name, u.last_name)
elif u.first_name:
sal_name = u.first_name
elif u.last_name:
sal_name = u.last_name
else:
sal_name = u.username
msg_str = 'Dear ' + sal_name + ':\n\n'
ori_qm = get_quota_message(u)
# make embedded settings.DEFAULT_SUPPORT_EMAIL clickable with subject auto-filled
replace_substr = "<a href='mailto:{0}?subject=Request more quota'>{0}</a>".format(
settings.DEFAULT_SUPPORT_EMAIL)
new_qm = ori_qm.replace(settings.DEFAULT_SUPPORT_EMAIL, replace_substr)
msg_str += new_qm
msg_str += '\n\nHydroShare Support'
subject = 'Quota warning'
if settings.DEBUG:
logger.info("quota warning email not sent out on debug server but logged instead: "
"{}".format(msg_str))
else:
try:
# send email for people monitoring and follow-up as needed
send_mail(subject, '', settings.DEFAULT_FROM_EMAIL,
[u.email, settings.DEFAULT_SUPPORT_EMAIL],
html_message=msg_str)
except Exception as ex:
logger.debug("Failed to send quota warning email: " + ex.message)
else:
if uq.remaining_grace_period >= 0:
# turn grace period off now that the user is below quota soft limit
uq.remaining_grace_period = -1
uq.save()
else:
logger.debug('user ' + u.username + ' does not have UserQuota foreign key relation')
@shared_task
def add_zip_file_contents_to_resource(pk, zip_file_path):
"""Add zip file to existing resource and remove tmp zip file."""
zfile = None
resource = None
try:
resource = utils.get_resource_by_shortkey(pk, or_404=False)
zfile = zipfile.ZipFile(zip_file_path)
num_files = len(zfile.infolist())
zcontents = utils.ZipContents(zfile)
files = zcontents.get_files()
resource.file_unpack_status = 'Running'
resource.save()
for i, f in enumerate(files):
logger.debug("Adding file {0} to resource {1}".format(f.name, pk))
utils.add_file_to_resource(resource, f)
resource.file_unpack_message = "Imported {0} of about {1} file(s) ...".format(
i, num_files)
resource.save()
# This might make the resource unsuitable for public consumption
resource.update_public_and_discoverable()
# TODO: this is a bit of a lie because a different user requested the bag overwrite
utils.resource_modified(resource, resource.creator, overwrite_bag=False)
# Call success callback
resource.file_unpack_message = None
resource.file_unpack_status = 'Done'
resource.save()
except BaseResource.DoesNotExist:
msg = "Unable to add zip file contents to non-existent resource {pk}."
msg = msg.format(pk=pk)
logger.error(msg)
except:
exc_info = "".join(traceback.format_exception(*sys.exc_info()))
if resource:
resource.file_unpack_status = 'Error'
resource.file_unpack_message = exc_info
resource.save()
if zfile:
zfile.close()
logger.error(exc_info)
finally:
# Delete upload file
os.unlink(zip_file_path)
@shared_task
def delete_zip(zip_path):
istorage = IrodsStorage()
if istorage.exists(zip_path):
istorage.delete(zip_path)
@shared_task
def create_temp_zip(resource_id, input_path, output_path, aggregation_name=None, sf_zip=False, download_path='',
request_username=None):
""" Create temporary zip file from input_path and store in output_path
:param resource_id: the short_id of a resource
:param input_path: full irods path of input starting with federation path
:param output_path: full irods path of output starting with federation path
:param aggregation_name: The name of the aggregation to zip
:param sf_zip: signals a single file to zip
:param download_path: download path to return as task payload
:param request_username: the username of the requesting user
"""
from hs_core.hydroshare.utils import get_resource_by_shortkey
res = get_resource_by_shortkey(resource_id)
aggregation = None
if aggregation_name:
aggregation = res.get_aggregation_by_aggregation_name(aggregation_name)
istorage = res.get_irods_storage() # invoke federated storage as necessary
if res.resource_type == "CompositeResource":
if '/data/contents/' in input_path:
short_path = input_path.split('/data/contents/')[1] # strip /data/contents/
res.create_aggregation_meta_files(path=short_path)
else: # all metadata included, e.g., /data/*
res.create_aggregation_meta_files()
if aggregation or sf_zip:
# input path points to single file aggregation
# ensure that foo.zip contains aggregation metadata
# by copying these into a temp subdirectory foo/foo parallel to where foo.zip is stored
temp_folder_name, ext = os.path.splitext(output_path) # strip zip to get scratch dir
head, tail = os.path.split(temp_folder_name) # tail is unqualified folder name "foo"
out_with_folder = os.path.join(temp_folder_name, tail) # foo/foo is subdir to zip
istorage.copyFiles(input_path, out_with_folder)
if not aggregation:
if '/data/contents/' in input_path:
short_path = input_path.split('/data/contents/')[1] # strip /data/contents/
else:
short_path = input_path
try:
aggregation = res.get_aggregation_by_name(short_path)
except ObjectDoesNotExist:
pass
if aggregation:
try:
istorage.copyFiles(aggregation.map_file_path, temp_folder_name)
except SessionException:
logger.error("cannot copy {}".format(aggregation.map_file_path))
try:
istorage.copyFiles(aggregation.metadata_file_path, temp_folder_name)
except SessionException:
logger.error("cannot copy {}".format(aggregation.metadata_file_path))
if aggregation.is_model_program or aggregation.is_model_instance:
try:
istorage.copyFiles(aggregation.schema_file_path, temp_folder_name)
except SessionException:
logger.error("cannot copy {}".format(aggregation.schema_file_path))
if aggregation.is_model_instance:
try:
istorage.copyFiles(aggregation.schema_values_file_path, temp_folder_name)
except SessionException:
logger.error("cannot copy {}".format(aggregation.schema_values_file_path))
for file in aggregation.files.all():
try:
istorage.copyFiles(file.storage_path, temp_folder_name)
except SessionException:
logger.error("cannot copy {}".format(file.storage_path))
istorage.zipup(temp_folder_name, output_path)
istorage.delete(temp_folder_name) # delete working directory; this isn't the zipfile
else: # regular folder to zip
istorage.zipup(input_path, output_path)
return download_path
@shared_task
def create_bag_by_irods(resource_id, create_zip=True):
"""Create a resource bag on iRODS side by running the bagit rule and ibun zip.
This function runs as a celery task, invoked asynchronously so that it does not
block the main web thread when it creates bags for very large files which will take some time.
:param
resource_id: the resource uuid that is used to look for the resource to create the bag for.
:param create_zip: defaults to True, set to false to create bagit files without zipping
:return: bag_url if bag creation operation succeeds or
raise an exception if resource does not exist or any other issues that prevent bags from being created.
"""
res = utils.get_resource_by_shortkey(resource_id)
istorage = res.get_irods_storage()
bag_path = res.bag_path
metadata_dirty = res.getAVU('metadata_dirty')
metadata_dirty = metadata_dirty is None or metadata_dirty
# if metadata has been changed, then regenerate metadata xml files
if metadata_dirty:
create_bag_metadata_files(res)
bag_modified = res.getAVU("bag_modified")
bag_modified = bag_modified is None or bag_modified
if metadata_dirty or bag_modified:
create_bagit_files_by_irods(res, istorage)
res.setAVU("bag_modified", False)
if create_zip:
irods_bagit_input_path = res.get_irods_path(resource_id, prepend_short_id=False)
# only proceed when the resource is not deleted potentially by another request
# when being downloaded
is_exist = istorage.exists(irods_bagit_input_path)
if is_exist:
try:
if istorage.exists(bag_path):
istorage.delete(bag_path)
istorage.zipup(irods_bagit_input_path, bag_path)
if res.raccess.published:
# compute checksum to meet DataONE distribution requirement
chksum = istorage.checksum(bag_path)
res.bag_checksum = chksum
return res.bag_url
except SessionException as ex:
raise SessionException(-1, '', ex.stderr)
else:
raise ObjectDoesNotExist('Resource {} does not exist.'.format(resource_id))
@shared_task
def copy_resource_task(ori_res_id, new_res_id=None, request_username=None):
try:
new_res = None
if not new_res_id:
new_res = create_empty_resource(ori_res_id, request_username, action='copy')
new_res_id = new_res.short_id
utils.copy_resource_files_and_AVUs(ori_res_id, new_res_id)
ori_res = utils.get_resource_by_shortkey(ori_res_id)
if not new_res:
new_res = utils.get_resource_by_shortkey(new_res_id)
utils.copy_and_create_metadata(ori_res, new_res)
# create the relation element for the new_res
new_res.metadata.create_element('relation', type='source', value=ori_res.get_citation())
if ori_res.resource_type.lower() == "collectionresource":
# clone contained_res list of original collection and add to new collection
# note that new collection will not contain "deleted resources"
new_res.resources = ori_res.resources.all()
# create bag for the new resource
create_bag(new_res)
return new_res.get_absolute_url()
except Exception as ex:
if new_res:
new_res.delete()
raise utils.ResourceCopyException(str(ex))
@shared_task
def create_new_version_resource_task(ori_res_id, username, new_res_id=None):
"""
Task for creating a new version of a resource
Args:
ori_res_id: the original resource id that is to be versioned.
new_res_id: the new versioned resource id from the original resource. If None, a
new resource will be created.
username: the requesting user's username
Returns:
the new versioned resource url as the payload
"""
try:
new_res = None
if not new_res_id:
new_res = create_empty_resource(ori_res_id, username)
new_res_id = new_res.short_id
utils.copy_resource_files_and_AVUs(ori_res_id, new_res_id)
# copy metadata from source resource to target new-versioned resource except three elements
ori_res = utils.get_resource_by_shortkey(ori_res_id)
if not new_res:
new_res = utils.get_resource_by_shortkey(new_res_id)
utils.copy_and_create_metadata(ori_res, new_res)
# add or update Relation element to link source and target resources
ori_res.metadata.create_element('relation', type='isReplacedBy', value=new_res.get_citation())
if new_res.metadata.relations.all().filter(type='isVersionOf').exists():
# the original resource is already a versioned resource, and its isVersionOf relation
# element is copied over to this new version resource, needs to delete this element so
# it can be created to link to its original resource correctly
eid = new_res.metadata.relations.all().filter(type='isVersionOf').first().id
new_res.metadata.delete_element('relation', eid)
new_res.metadata.create_element('relation', type='isVersionOf', value=ori_res.get_citation())
if ori_res.resource_type.lower() == "collectionresource":
# clone contained_res list of original collection and add to new collection
# note that new version collection will not contain "deleted resources"
new_res.resources = ori_res.resources.all()
# create bag for the new resource
create_bag(new_res)
# since an isReplaceBy relation element is added to original resource, needs to call
# resource_modified() for original resource
utils.resource_modified(ori_res, by_user=username, overwrite_bag=False)
# if everything goes well up to this point, set original resource to be immutable so that
# obsoleted resources cannot be modified from REST API
ori_res.raccess.immutable = True
ori_res.raccess.save()
ori_res.save()
return new_res.get_absolute_url()
except Exception as ex:
if new_res:
new_res.delete()
raise utils.ResourceVersioningException(str(ex))
finally:
# release the lock regardless
ori_res.locked_time = None
ori_res.save()
@shared_task
def replicate_resource_bag_to_user_zone_task(res_id, request_username):
"""
Task for replicating resource bag which will be created on demand if not existent already to iRODS user zone
Args:
res_id: the resource id with its bag to be replicated to iRODS user zone
request_username: the requesting user's username to whose user zone space the bag is copied to
Returns:
None, but exceptions will be raised if there is an issue with iRODS operation
"""
res = utils.get_resource_by_shortkey(res_id)
res_coll = res.root_path
istorage = res.get_irods_storage()
if istorage.exists(res_coll):
bag_modified = res.getAVU('bag_modified')
if bag_modified is None or not bag_modified:
if not istorage.exists(res.bag_path):
create_bag_by_irods(res_id)
else:
create_bag_by_irods(res_id)
# do replication of the resource bag to irods user zone
if not res.resource_federation_path:
istorage.set_fed_zone_session()
src_file = res.bag_path
tgt_file = '/{userzone}/home/{username}/{resid}.zip'.format(
userzone=settings.HS_USER_IRODS_ZONE, username=request_username, resid=res_id)
fsize = istorage.size(src_file)
utils.validate_user_quota(request_username, fsize)
istorage.copyFiles(src_file, tgt_file)
return None
else:
raise ValidationError("Resource {} does not exist in iRODS".format(res.short_id))
@shared_task
def delete_resource_task(resource_id, request_username=None):
"""
Deletes a resource managed by HydroShare. The caller must be an owner of the resource or an
administrator to perform this function.
:param resource_id: The unique HydroShare identifier of the resource to be deleted
:return: resource_id if delete operation succeeds
raise an exception if there were errors.
"""
res = utils.get_resource_by_shortkey(resource_id)
res_title = res.metadata.title
res_type = res.resource_type
resource_related_collections = [col for col in res.collections.all()]
owners_list = [owner for owner in res.raccess.owners.all()]
# when the most recent version of a resource in an obsolescence chain is deleted, the previous
# version in the chain needs to be set as the "active" version by deleting "isReplacedBy"
# relation element
if res.metadata.relations.all().filter(type='isVersionOf').exists():
is_version_of_res_link = \
res.metadata.relations.all().filter(type='isVersionOf').first().value
idx = is_version_of_res_link.rindex('/')
if idx == -1:
obsolete_res_id = is_version_of_res_link
else:
obsolete_res_id = is_version_of_res_link[idx + 1:]
obsolete_res = utils.get_resource_by_shortkey(obsolete_res_id)
if obsolete_res.metadata.relations.all().filter(type='isReplacedBy').exists():
eid = obsolete_res.metadata.relations.all().filter(type='isReplacedBy').first().id
obsolete_res.metadata.delete_element('relation', eid)
# also make this obsoleted resource editable if not published now that it becomes the latest version
if not obsolete_res.raccess.published:
obsolete_res.raccess.immutable = False
obsolete_res.raccess.save()
res.delete()
if request_username:
# if the deleted resource is part of any collection resource, then for each of those collection
# create a CollectionDeletedResource object which can then be used to list collection deleted
# resources on collection resource landing page
for collection_res in resource_related_collections:
o = CollectionDeletedResource.objects.create(
resource_title=res_title,
deleted_by=User.objects.get(username=request_username),
resource_id=resource_id,
resource_type=res_type,
collection=collection_res
)
o.resource_owners.add(*owners_list)
# return the page URL to redirect to after resource deletion task is complete
return '/my-resources/'
@shared_task
def update_web_services(services_url, api_token, timeout, publish_urls, res_id):
"""Update web services hosted by GeoServer and HydroServer.
This function sends a resource id to the HydroShare web services manager
application, which will check the current status of the resource and register
or unregister services hosted by GeoServer and HydroServer.
The HydroShare web services manager will return a list of endpoint URLs
for both the resource and individual aggregations. If publish_urls is set to
True, these endpoints will be added to the extra metadata fields of the
resource and aggregations.
"""
session = requests.Session()
session.headers.update(
{"Authorization": " ".join(("Token", str(api_token)))}
)
rest_url = str(services_url) + "/" + str(res_id) + "/"
try:
response = session.post(rest_url, timeout=timeout)
if publish_urls and response.status_code == status.HTTP_201_CREATED:
try:
resource = utils.get_resource_by_shortkey(res_id)
response_content = json.loads(response.content.decode())
for key, value in response_content["resource"].items():
resource.extra_metadata[key] = value
resource.save()
for url in response_content["content"]:
logical_files = list(resource.logical_files)
lf = logical_files[[i.aggregation_name for i in
logical_files].index(
url["layer_name"].encode()
)]
lf.metadata.extra_metadata["Web Services URL"] = url["message"]
lf.metadata.save()
except Exception as e:
logger.error(e)
return e
return response
except (requests.exceptions.RequestException, ValueError) as e:
logger.error(e)
return e
@shared_task
def resource_debug(resource_id):
"""Update web services hosted by GeoServer and HydroServer.
"""
from hs_core.hydroshare.utils import get_resource_by_shortkey
resource = get_resource_by_shortkey(resource_id)
from hs_core.management.utils import check_irods_files
return check_irods_files(resource, log_errors=False, return_errors=True)
@shared_task
def unzip_task(user_pk, res_id, zip_with_rel_path, bool_remove_original, overwrite=False, auto_aggregate=False,
ingest_metadata=False):
from hs_core.views.utils import unzip_file
user = User.objects.get(pk=user_pk)
unzip_file(user, res_id, zip_with_rel_path, bool_remove_original, overwrite, auto_aggregate, ingest_metadata)
@periodic_task(ignore_result=True, run_every=crontab(minute=00, hour=12))
def daily_odm2_sync():
"""
ODM2 variables are maintained on an external site this synchronizes data to HydroShare for local caching
"""
ODM2Variable.sync()
@periodic_task(ignore_result=True, run_every=crontab(day_of_month=1))
def monthly_group_membership_requests_cleanup():
"""
Delete expired and redeemed group membership requests
"""
two_months_ago = datetime.today() - timedelta(days=60)
GroupMembershipRequest.objects.filter(my_date__lte=two_months_ago).delete()
@task_postrun.connect
def update_task_notification(sender=None, task_id=None, task=None, state=None, retval=None, **kwargs):
"""
Updates the state of TaskNotification model when a celery task completes
:param sender:
:param task_id: task id
:param task: task object
:param state: task return state
:param retval: task return value
:param kwargs:
:return:
"""
if task.name in settings.TASK_NAME_LIST:
if state == states.SUCCESS:
get_or_create_task_notification(task_id, status="completed", payload=retval)
elif state in states.EXCEPTION_STATES:
get_or_create_task_notification(task_id, status="failed", payload=retval)
elif state == states.REVOKED:
get_or_create_task_notification(task_id, status="aborted", payload=retval)
else:
logger.warning("Unhandled task state of {} for {}".format(state, task_id))
@periodic_task(ignore_result=True, run_every=crontab(day_of_week=1))
def task_notification_cleanup():
"""
Delete expired task notifications each week
"""
week_ago = datetime.today() - timedelta(days=7)
TaskNotification.objects.filter(created__lte=week_ago).delete()
[#3261] updating with develop
"""Define celery tasks for hs_core app."""
import os
import sys
import traceback
import zipfile
import logging
import json
from celery.signals import task_postrun
from datetime import datetime, timedelta, date
from xml.etree import ElementTree
import requests
from celery import shared_task
from celery.schedules import crontab
from celery.task import periodic_task
from django.conf import settings
from django.core.mail import send_mail
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from rest_framework import status
from hs_access_control.models import GroupMembershipRequest
from hs_core.hydroshare import utils, create_empty_resource
from hs_core.hydroshare.hs_bagit import create_bag_metadata_files, create_bag, create_bagit_files_by_irods
from hs_core.hydroshare.resource import get_activated_doi, get_crossref_url, deposit_res_metadata_with_crossref
from hs_core.task_utils import get_or_create_task_notification
from hs_odm2.models import ODM2Variable
from django_irods.storage import IrodsStorage
from theme.models import UserQuota, QuotaMessage, User
from django_irods.icommands import SessionException
from celery.result import states
from hs_core.models import BaseResource, TaskNotification
from theme.utils import get_quota_message
from hs_collection_resource.models import CollectionDeletedResource
# Pass 'django' into getLogger instead of __name__
# for celery tasks (as this seems to be the
# only way to successfully log in code executed
# by celery, despite our catch-all handler).
logger = logging.getLogger('django')
# Currently there are two different cleanups scheduled.
# One is 20 minutes after creation, the other is nightly.
# TODO Clean up zipfiles in remote federated storage as well.
@periodic_task(ignore_result=True, run_every=crontab(minute=30, hour=23))
def nightly_zips_cleanup():
# delete 2 days ago
date_folder = (date.today() - timedelta(2)).strftime('%Y-%m-%d')
zips_daily_date = "zips/{daily_date}".format(daily_date=date_folder)
if __debug__:
logger.debug("cleaning up {}".format(zips_daily_date))
istorage = IrodsStorage()
if istorage.exists(zips_daily_date):
istorage.delete(zips_daily_date)
federated_prefixes = BaseResource.objects.all().values_list('resource_federation_path')\
.distinct()
for p in federated_prefixes:
prefix = p[0] # strip tuple
if prefix != "":
zips_daily_date = "{prefix}/zips/{daily_date}"\
.format(prefix=prefix, daily_date=date_folder)
if __debug__:
logger.debug("cleaning up {}".format(zips_daily_date))
istorage = IrodsStorage("federated")
if istorage.exists(zips_daily_date):
istorage.delete(zips_daily_date)
@periodic_task(ignore_result=True, run_every=crontab(minute=0, hour=0))
def manage_task_nightly():
# The nightly running task do DOI activation check
# Check DOI activation on failed and pending resources and send email.
msg_lst = []
# retrieve all published resources with failed metadata deposition with CrossRef if any and
# retry metadata deposition
failed_resources = BaseResource.objects.filter(raccess__published=True, doi__contains='failure')
for res in failed_resources:
if res.metadata.dates.all().filter(type='published'):
pub_date = res.metadata.dates.all().filter(type='published')[0]
pub_date = pub_date.start_date.strftime('%m/%d/%Y')
act_doi = get_activated_doi(res.doi)
response = deposit_res_metadata_with_crossref(res)
if response.status_code == status.HTTP_200_OK:
# retry of metadata deposition succeeds, change resource flag from failure
# to pending
res.doi = act_doi
res.save()
# create bag and compute checksum for published resource to meet DataONE requirement
create_bag_by_irods(res.short_id)
else:
# retry of metadata deposition failed again, notify admin
msg_lst.append("Metadata deposition with CrossRef for the published resource "
"DOI {res_doi} failed again after retry with first metadata "
"deposition requested since {pub_date}.".format(res_doi=act_doi,
pub_date=pub_date))
logger.debug(response.content)
else:
msg_lst.append("{res_id} does not have published date in its metadata.".format(
res_id=res.short_id))
pending_resources = BaseResource.objects.filter(raccess__published=True,
doi__contains='pending')
for res in pending_resources:
if res.metadata.dates.all().filter(type='published'):
pub_date = res.metadata.dates.all().filter(type='published')[0]
pub_date = pub_date.start_date.strftime('%m/%d/%Y')
act_doi = get_activated_doi(res.doi)
main_url = get_crossref_url()
req_str = '{MAIN_URL}servlet/submissionDownload?usr={USERNAME}&pwd=' \
'{PASSWORD}&doi_batch_id={DOI_BATCH_ID}&type={TYPE}'
response = requests.get(req_str.format(MAIN_URL=main_url,
USERNAME=settings.CROSSREF_LOGIN_ID,
PASSWORD=settings.CROSSREF_LOGIN_PWD,
DOI_BATCH_ID=res.short_id,
TYPE='result'))
root = ElementTree.fromstring(response.content)
rec_cnt_elem = root.find('.//record_count')
failure_cnt_elem = root.find('.//failure_count')
success = False
if rec_cnt_elem is not None and failure_cnt_elem is not None:
rec_cnt = int(rec_cnt_elem.text)
failure_cnt = int(failure_cnt_elem.text)
if rec_cnt > 0 and failure_cnt == 0:
res.doi = act_doi
res.save()
success = True
# create bag and compute checksum for published resource to meet DataONE requirement
create_bag_by_irods(res.short_id)
if not success:
msg_lst.append("Published resource DOI {res_doi} is not yet activated with request "
"data deposited since {pub_date}.".format(res_doi=act_doi,
pub_date=pub_date))
logger.debug(response.content)
else:
msg_lst.append("{res_id} does not have published date in its metadata.".format(
res_id=res.short_id))
if msg_lst:
email_msg = '\n'.join(msg_lst)
subject = 'Notification of pending DOI deposition/activation of published resources'
# send email for people monitoring and follow-up as needed
send_mail(subject, email_msg, settings.DEFAULT_FROM_EMAIL, [settings.DEFAULT_SUPPORT_EMAIL])
@periodic_task(ignore_result=True, run_every=crontab(minute=15, hour=0, day_of_week=1,
day_of_month='1-7'))
def send_over_quota_emails():
# check over quota cases and send quota warning emails as needed
hs_internal_zone = "hydroshare"
if not QuotaMessage.objects.exists():
QuotaMessage.objects.create()
qmsg = QuotaMessage.objects.first()
users = User.objects.filter(is_active=True).filter(is_superuser=False).all()
for u in users:
uq = UserQuota.objects.filter(user__username=u.username, zone=hs_internal_zone).first()
if uq:
used_percent = uq.used_percent
if used_percent >= qmsg.soft_limit_percent:
if used_percent >= 100 and used_percent < qmsg.hard_limit_percent:
if uq.remaining_grace_period < 0:
# triggers grace period counting
uq.remaining_grace_period = qmsg.grace_period
elif uq.remaining_grace_period > 0:
# reduce remaining_grace_period by one day
uq.remaining_grace_period -= 1
elif used_percent >= qmsg.hard_limit_percent:
# set grace period to 0 when user quota exceeds hard limit
uq.remaining_grace_period = 0
uq.save()
if u.first_name and u.last_name:
sal_name = '{} {}'.format(u.first_name, u.last_name)
elif u.first_name:
sal_name = u.first_name
elif u.last_name:
sal_name = u.last_name
else:
sal_name = u.username
msg_str = 'Dear ' + sal_name + ':\n\n'
ori_qm = get_quota_message(u)
# make embedded settings.DEFAULT_SUPPORT_EMAIL clickable with subject auto-filled
replace_substr = "<a href='mailto:{0}?subject=Request more quota'>{0}</a>".format(
settings.DEFAULT_SUPPORT_EMAIL)
new_qm = ori_qm.replace(settings.DEFAULT_SUPPORT_EMAIL, replace_substr)
msg_str += new_qm
msg_str += '\n\nHydroShare Support'
subject = 'Quota warning'
if settings.DEBUG:
logger.info("quota warning email not sent out on debug server but logged instead: "
"{}".format(msg_str))
else:
try:
# send email for people monitoring and follow-up as needed
send_mail(subject, '', settings.DEFAULT_FROM_EMAIL,
[u.email, settings.DEFAULT_SUPPORT_EMAIL],
html_message=msg_str)
except Exception as ex:
logger.debug("Failed to send quota warning email: " + ex.message)
else:
if uq.remaining_grace_period >= 0:
# turn grace period off now that the user is below quota soft limit
uq.remaining_grace_period = -1
uq.save()
else:
logger.debug('user ' + u.username + ' does not have UserQuota foreign key relation')
@shared_task
def add_zip_file_contents_to_resource(pk, zip_file_path):
"""Add zip file to existing resource and remove tmp zip file."""
zfile = None
resource = None
try:
resource = utils.get_resource_by_shortkey(pk, or_404=False)
zfile = zipfile.ZipFile(zip_file_path)
num_files = len(zfile.infolist())
zcontents = utils.ZipContents(zfile)
files = zcontents.get_files()
resource.file_unpack_status = 'Running'
resource.save()
for i, f in enumerate(files):
logger.debug("Adding file {0} to resource {1}".format(f.name, pk))
utils.add_file_to_resource(resource, f)
resource.file_unpack_message = "Imported {0} of about {1} file(s) ...".format(
i, num_files)
resource.save()
# This might make the resource unsuitable for public consumption
resource.update_public_and_discoverable()
# TODO: this is a bit of a lie because a different user requested the bag overwrite
utils.resource_modified(resource, resource.creator, overwrite_bag=False)
# Call success callback
resource.file_unpack_message = None
resource.file_unpack_status = 'Done'
resource.save()
except BaseResource.DoesNotExist:
msg = "Unable to add zip file contents to non-existent resource {pk}."
msg = msg.format(pk=pk)
logger.error(msg)
except:
exc_info = "".join(traceback.format_exception(*sys.exc_info()))
if resource:
resource.file_unpack_status = 'Error'
resource.file_unpack_message = exc_info
resource.save()
if zfile:
zfile.close()
logger.error(exc_info)
finally:
# Delete upload file
os.unlink(zip_file_path)
@shared_task
def delete_zip(zip_path):
istorage = IrodsStorage()
if istorage.exists(zip_path):
istorage.delete(zip_path)
@shared_task
def create_temp_zip(resource_id, input_path, output_path, aggregation_name=None, sf_zip=False, download_path='',
request_username=None):
""" Create temporary zip file from input_path and store in output_path
:param resource_id: the short_id of a resource
:param input_path: full irods path of input starting with federation path
:param output_path: full irods path of output starting with federation path
:param aggregation_name: The name of the aggregation to zip
:param sf_zip: signals a single file to zip
:param download_path: download path to return as task payload
:param request_username: the username of the requesting user
"""
from hs_core.hydroshare.utils import get_resource_by_shortkey
res = get_resource_by_shortkey(resource_id)
aggregation = None
if aggregation_name:
aggregation = res.get_aggregation_by_aggregation_name(aggregation_name)
istorage = res.get_irods_storage() # invoke federated storage as necessary
if res.resource_type == "CompositeResource":
if '/data/contents/' in input_path:
short_path = input_path.split('/data/contents/')[1] # strip /data/contents/
res.create_aggregation_meta_files(path=short_path)
else: # all metadata included, e.g., /data/*
res.create_aggregation_meta_files()
if aggregation or sf_zip:
# input path points to single file aggregation
# ensure that foo.zip contains aggregation metadata
# by copying these into a temp subdirectory foo/foo parallel to where foo.zip is stored
temp_folder_name, ext = os.path.splitext(output_path) # strip zip to get scratch dir
head, tail = os.path.split(temp_folder_name) # tail is unqualified folder name "foo"
out_with_folder = os.path.join(temp_folder_name, tail) # foo/foo is subdir to zip
istorage.copyFiles(input_path, out_with_folder)
if not aggregation:
if '/data/contents/' in input_path:
short_path = input_path.split('/data/contents/')[1] # strip /data/contents/
else:
short_path = input_path
try:
aggregation = res.get_aggregation_by_name(short_path)
except ObjectDoesNotExist:
pass
if aggregation:
try:
istorage.copyFiles(aggregation.map_file_path, temp_folder_name)
except SessionException:
logger.error("cannot copy {}".format(aggregation.map_file_path))
try:
istorage.copyFiles(aggregation.metadata_file_path, temp_folder_name)
except SessionException:
logger.error("cannot copy {}".format(aggregation.metadata_file_path))
if aggregation.is_model_program or aggregation.is_model_instance:
try:
istorage.copyFiles(aggregation.schema_file_path, temp_folder_name)
except SessionException:
logger.error("cannot copy {}".format(aggregation.schema_file_path))
if aggregation.is_model_instance:
try:
istorage.copyFiles(aggregation.schema_values_file_path, temp_folder_name)
except SessionException:
logger.error("cannot copy {}".format(aggregation.schema_values_file_path))
for file in aggregation.files.all():
try:
istorage.copyFiles(file.storage_path, temp_folder_name)
except SessionException:
logger.error("cannot copy {}".format(file.storage_path))
istorage.zipup(temp_folder_name, output_path)
istorage.delete(temp_folder_name) # delete working directory; this isn't the zipfile
else: # regular folder to zip
istorage.zipup(input_path, output_path)
return download_path
@shared_task
def create_bag_by_irods(resource_id, create_zip=True):
"""Create a resource bag on iRODS side by running the bagit rule and ibun zip.
This function runs as a celery task, invoked asynchronously so that it does not
block the main web thread when it creates bags for very large files which will take some time.
:param
resource_id: the resource uuid that is used to look for the resource to create the bag for.
:param create_zip: defaults to True, set to false to create bagit files without zipping
:return: bag_url if bag creation operation succeeds or
raise an exception if resource does not exist or any other issues that prevent bags from being created.
"""
res = utils.get_resource_by_shortkey(resource_id)
istorage = res.get_irods_storage()
bag_path = res.bag_path
metadata_dirty = res.getAVU('metadata_dirty')
metadata_dirty = metadata_dirty is None or metadata_dirty
# if metadata has been changed, then regenerate metadata xml files
if metadata_dirty:
create_bag_metadata_files(res)
bag_modified = res.getAVU("bag_modified")
bag_modified = bag_modified is None or bag_modified
if metadata_dirty or bag_modified:
create_bagit_files_by_irods(res, istorage)
res.setAVU("bag_modified", False)
if create_zip:
irods_bagit_input_path = res.get_irods_path(resource_id, prepend_short_id=False)
# only proceed when the resource is not deleted potentially by another request
# when being downloaded
is_exist = istorage.exists(irods_bagit_input_path)
if is_exist:
try:
if istorage.exists(bag_path):
istorage.delete(bag_path)
istorage.zipup(irods_bagit_input_path, bag_path)
if res.raccess.published:
# compute checksum to meet DataONE distribution requirement
chksum = istorage.checksum(bag_path)
res.bag_checksum = chksum
return res.bag_url
except SessionException as ex:
raise SessionException(-1, '', ex.stderr)
else:
raise ObjectDoesNotExist('Resource {} does not exist.'.format(resource_id))
@shared_task
def copy_resource_task(ori_res_id, new_res_id=None, request_username=None):
try:
new_res = None
if not new_res_id:
new_res = create_empty_resource(ori_res_id, request_username, action='copy')
new_res_id = new_res.short_id
utils.copy_resource_files_and_AVUs(ori_res_id, new_res_id)
ori_res = utils.get_resource_by_shortkey(ori_res_id)
if not new_res:
new_res = utils.get_resource_by_shortkey(new_res_id)
utils.copy_and_create_metadata(ori_res, new_res)
if new_res.metadata.relations.all().filter(type='isVersionOf').exists():
# the resource to be copied is a versioned resource, need to delete this isVersionOf
# relation element to maintain the single versioning obsolescence chain
new_res.metadata.relations.all().filter(type='isVersionOf').first().delete()
# create the relation element for the new_res
new_res.metadata.create_element('relation', type='source', value=ori_res.get_citation())
if ori_res.resource_type.lower() == "collectionresource":
# clone contained_res list of original collection and add to new collection
# note that new collection will not contain "deleted resources"
new_res.resources = ori_res.resources.all()
# create bag for the new resource
create_bag(new_res)
return new_res.get_absolute_url()
except Exception as ex:
if new_res:
new_res.delete()
raise utils.ResourceCopyException(str(ex))
@shared_task
def create_new_version_resource_task(ori_res_id, username, new_res_id=None):
"""
Task for creating a new version of a resource
Args:
ori_res_id: the original resource id that is to be versioned.
new_res_id: the new versioned resource id from the original resource. If None, a
new resource will be created.
username: the requesting user's username
Returns:
the new versioned resource url as the payload
"""
try:
new_res = None
if not new_res_id:
new_res = create_empty_resource(ori_res_id, username)
new_res_id = new_res.short_id
utils.copy_resource_files_and_AVUs(ori_res_id, new_res_id)
# copy metadata from source resource to target new-versioned resource except three elements
ori_res = utils.get_resource_by_shortkey(ori_res_id)
if not new_res:
new_res = utils.get_resource_by_shortkey(new_res_id)
utils.copy_and_create_metadata(ori_res, new_res)
# add or update Relation element to link source and target resources
ori_res.metadata.create_element('relation', type='isReplacedBy', value=new_res.get_citation())
if new_res.metadata.relations.all().filter(type='isVersionOf').exists():
# the original resource is already a versioned resource, and its isVersionOf relation
# element is copied over to this new version resource, needs to delete this element so
# it can be created to link to its original resource correctly
new_res.metadata.relations.all().filter(type='isVersionOf').first().delete()
new_res.metadata.create_element('relation', type='isVersionOf', value=ori_res.get_citation())
if ori_res.resource_type.lower() == "collectionresource":
# clone contained_res list of original collection and add to new collection
# note that new version collection will not contain "deleted resources"
new_res.resources = ori_res.resources.all()
# create bag for the new resource
create_bag(new_res)
# since an isReplaceBy relation element is added to original resource, needs to call
# resource_modified() for original resource
utils.resource_modified(ori_res, by_user=username, overwrite_bag=False)
# if everything goes well up to this point, set original resource to be immutable so that
# obsoleted resources cannot be modified from REST API
ori_res.raccess.immutable = True
ori_res.raccess.save()
ori_res.save()
return new_res.get_absolute_url()
except Exception as ex:
if new_res:
new_res.delete()
raise utils.ResourceVersioningException(str(ex))
finally:
# release the lock regardless
ori_res.locked_time = None
ori_res.save()
@shared_task
def replicate_resource_bag_to_user_zone_task(res_id, request_username):
"""
Task for replicating resource bag which will be created on demand if not existent already to iRODS user zone
Args:
res_id: the resource id with its bag to be replicated to iRODS user zone
request_username: the requesting user's username to whose user zone space the bag is copied to
Returns:
None, but exceptions will be raised if there is an issue with iRODS operation
"""
res = utils.get_resource_by_shortkey(res_id)
res_coll = res.root_path
istorage = res.get_irods_storage()
if istorage.exists(res_coll):
bag_modified = res.getAVU('bag_modified')
if bag_modified is None or not bag_modified:
if not istorage.exists(res.bag_path):
create_bag_by_irods(res_id)
else:
create_bag_by_irods(res_id)
# do replication of the resource bag to irods user zone
if not res.resource_federation_path:
istorage.set_fed_zone_session()
src_file = res.bag_path
tgt_file = '/{userzone}/home/{username}/{resid}.zip'.format(
userzone=settings.HS_USER_IRODS_ZONE, username=request_username, resid=res_id)
fsize = istorage.size(src_file)
utils.validate_user_quota(request_username, fsize)
istorage.copyFiles(src_file, tgt_file)
return None
else:
raise ValidationError("Resource {} does not exist in iRODS".format(res.short_id))
@shared_task
def delete_resource_task(resource_id, request_username=None):
"""
Deletes a resource managed by HydroShare. The caller must be an owner of the resource or an
administrator to perform this function.
:param resource_id: The unique HydroShare identifier of the resource to be deleted
:return: resource_id if delete operation succeeds
raise an exception if there were errors.
"""
res = utils.get_resource_by_shortkey(resource_id)
res_title = res.metadata.title
res_type = res.resource_type
resource_related_collections = [col for col in res.collections.all()]
owners_list = [owner for owner in res.raccess.owners.all()]
# when the most recent version of a resource in an obsolescence chain is deleted, the previous
# version in the chain needs to be set as the "active" version by deleting "isReplacedBy"
# relation element
if res.metadata.relations.all().filter(type='isVersionOf').exists():
is_version_of_res_link = \
res.metadata.relations.all().filter(type='isVersionOf').first().value
idx = is_version_of_res_link.rindex('/')
if idx == -1:
obsolete_res_id = is_version_of_res_link
else:
obsolete_res_id = is_version_of_res_link[idx + 1:]
obsolete_res = utils.get_resource_by_shortkey(obsolete_res_id)
if obsolete_res.metadata.relations.all().filter(type='isReplacedBy').exists():
eid = obsolete_res.metadata.relations.all().filter(type='isReplacedBy').first().id
obsolete_res.metadata.delete_element('relation', eid)
# also make this obsoleted resource editable if not published now that it becomes the latest version
if not obsolete_res.raccess.published:
obsolete_res.raccess.immutable = False
obsolete_res.raccess.save()
res.delete()
if request_username:
# if the deleted resource is part of any collection resource, then for each of those collection
# create a CollectionDeletedResource object which can then be used to list collection deleted
# resources on collection resource landing page
for collection_res in resource_related_collections:
o = CollectionDeletedResource.objects.create(
resource_title=res_title,
deleted_by=User.objects.get(username=request_username),
resource_id=resource_id,
resource_type=res_type,
collection=collection_res
)
o.resource_owners.add(*owners_list)
# return the page URL to redirect to after resource deletion task is complete
return '/my-resources/'
@shared_task
def update_web_services(services_url, api_token, timeout, publish_urls, res_id):
"""Update web services hosted by GeoServer and HydroServer.
This function sends a resource id to the HydroShare web services manager
application, which will check the current status of the resource and register
or unregister services hosted by GeoServer and HydroServer.
The HydroShare web services manager will return a list of endpoint URLs
for both the resource and individual aggregations. If publish_urls is set to
True, these endpoints will be added to the extra metadata fields of the
resource and aggregations.
"""
session = requests.Session()
session.headers.update(
{"Authorization": " ".join(("Token", str(api_token)))}
)
rest_url = str(services_url) + "/" + str(res_id) + "/"
try:
response = session.post(rest_url, timeout=timeout)
if publish_urls and response.status_code == status.HTTP_201_CREATED:
try:
resource = utils.get_resource_by_shortkey(res_id)
response_content = json.loads(response.content.decode())
for key, value in response_content["resource"].items():
resource.extra_metadata[key] = value
resource.save()
for url in response_content["content"]:
logical_files = list(resource.logical_files)
lf = logical_files[[i.aggregation_name for i in
logical_files].index(
url["layer_name"].encode()
)]
lf.metadata.extra_metadata["Web Services URL"] = url["message"]
lf.metadata.save()
except Exception as e:
logger.error(e)
return e
return response
except (requests.exceptions.RequestException, ValueError) as e:
logger.error(e)
return e
@shared_task
def resource_debug(resource_id):
"""Update web services hosted by GeoServer and HydroServer.
"""
from hs_core.hydroshare.utils import get_resource_by_shortkey
resource = get_resource_by_shortkey(resource_id)
from hs_core.management.utils import check_irods_files
return check_irods_files(resource, log_errors=False, return_errors=True)
@shared_task
def unzip_task(user_pk, res_id, zip_with_rel_path, bool_remove_original, overwrite=False, auto_aggregate=False,
ingest_metadata=False):
from hs_core.views.utils import unzip_file
user = User.objects.get(pk=user_pk)
unzip_file(user, res_id, zip_with_rel_path, bool_remove_original, overwrite, auto_aggregate, ingest_metadata)
@periodic_task(ignore_result=True, run_every=crontab(minute=00, hour=12))
def daily_odm2_sync():
"""
ODM2 variables are maintained on an external site this synchronizes data to HydroShare for local caching
"""
ODM2Variable.sync()
@periodic_task(ignore_result=True, run_every=crontab(day_of_month=1))
def monthly_group_membership_requests_cleanup():
"""
Delete expired and redeemed group membership requests
"""
two_months_ago = datetime.today() - timedelta(days=60)
GroupMembershipRequest.objects.filter(my_date__lte=two_months_ago).delete()
@task_postrun.connect
def update_task_notification(sender=None, task_id=None, task=None, state=None, retval=None, **kwargs):
"""
Updates the state of TaskNotification model when a celery task completes
:param sender:
:param task_id: task id
:param task: task object
:param state: task return state
:param retval: task return value
:param kwargs:
:return:
"""
if task.name in settings.TASK_NAME_LIST:
if state == states.SUCCESS:
get_or_create_task_notification(task_id, status="completed", payload=retval)
elif state in states.EXCEPTION_STATES:
get_or_create_task_notification(task_id, status="failed", payload=retval)
elif state == states.REVOKED:
get_or_create_task_notification(task_id, status="aborted", payload=retval)
else:
logger.warning("Unhandled task state of {} for {}".format(state, task_id))
@periodic_task(ignore_result=True, run_every=crontab(day_of_week=1))
def task_notification_cleanup():
"""
Delete expired task notifications each week
"""
week_ago = datetime.today() - timedelta(days=7)
TaskNotification.objects.filter(created__lte=week_ago).delete()
|
import random
import numpy as np
import time
import robot
c = 0;
sigmaX = sigmaY = 1.6
sigmaT = 0.2
def getRandomX():
return random.gauss(0,sigmaX)
def getRandomY():
return random.gauss(0,sigmaY)
def getRandomTheta():
return random.gauss(0,sigmaT)
numberOfParticles = 100
#line1 = (10, 10, 10, 500) # (x0, y0, x1, y1)
#line2 = (20, 20, 500, 200) # (x0, y0, x1, y1)
#print "drawLine:" + str(line1)
#print "drawLine:" + str(line2)
#initialise particles
particles = [(100,100,0) for i in range(numberOfParticles)]
particleHistory = []
print "drawParticles:" + str(particles)
#function to move particles straight
def moveParticles(cm):
#update particle position after moving cm distance
for i in range(numberOfParticles):
x,y,theta = particles[i]
e = getRandomX()
f = getRandomTheta()
particles[i] = ((x + (cm + e)*np.cos(np.deg2rad(theta))), (y + (cm + e)*np.sin(np.deg2rad(theta))), (theta + f))
nx,ny,ntheta = particles[i]
if(i == 0):
line = (x, y, nx, ny)
print "drawLine:" + str(line)
#function to rotate particles deg degrees
def rotateParticles(deg):
#update particle position after moving cm distance
for i in range(numberOfParticles):
x,y,theta = particles[i]
f = getRandomTheta()
particles[i] = (x, y, theta + deg + f)
#function to draw a square
def drawSquare(cm):
scaling_factor = 10
for i in range(4):
for j in xrange(4):
robot.forwards(cm/4)
time.sleep(0.1)
moveParticles(cm*scaling_factor/4)
saveParticles()
print "drawParticles:" + str(particleHistory)
robot.left(90)
rotateParticles(90)
def saveParticles():
for p in particles:
particleHistory.append(p)
drawSquare(40)
implemented mean and drawing works nicely
import random
import numpy as np
import time
import robot
c = 0;
sigmaX = sigmaY = 1.6
sigmaT = 0.2
def getRandomX():
return random.gauss(0,sigmaX)
def getRandomY():
return random.gauss(0,sigmaY)
def getRandomTheta():
return random.gauss(0,sigmaT)
numberOfParticles = 100
#initialise particles
particles = [(100,100,0) for i in range(numberOfParticles)]
particleHistory = []
print "drawParticles:" + str(particles)
def Mean(listOfTuples):
xSum = 0
ySum = 0
thetaSum = 0
numberOfTuples = len(listOfTuples)
for t in listOfTuples:
x, y, theta = t
xSum += x
ySum += y
thetaSum += theta
return (xSum/numberOfTuples, ySum/numberOfTuples, thetaSum/numberOfTuples)
#function to move particles straight
def moveParticles(cm):
initialPosition = Mean(particles)
#update particle position after moving cm distance
for i in range(numberOfParticles):
x,y,theta = particles[i]
e = getRandomX()
f = getRandomTheta()
particles[i] = ((x + (cm + e)*np.cos(np.deg2rad(theta))), (y + (cm + e)*np.sin(np.deg2rad(theta))), (theta + f))
nx,ny,ntheta = particles[i]
newPosition = Mean(particles)
line = (initialPosition[0], initialPosition[1], newPosition[0], newPosition[1])
print "drawLine:" + str(line)
#function to rotate particles deg degrees
def rotateParticles(deg):
#update particle position after moving cm distance
for i in range(numberOfParticles):
x,y,theta = particles[i]
f = getRandomTheta()
particles[i] = (x, y, theta + deg + f)
#function to draw a square
def drawSquare(cm):
scaling_factor = 10
for i in range(4):
for j in xrange(4):
robot.forwards(cm/4)
time.sleep(0.1)
moveParticles(cm*scaling_factor/4)
saveParticles()
print "drawParticles:" + str(particleHistory)
robot.left(90)
rotateParticles(90)
def saveParticles():
for p in particles:
particleHistory.append(p)
drawSquare(40)
|
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from adapt.context import ContextManagerFrame
from adapt.engine import IntentDeterminationEngine
from adapt.intent import IntentBuilder
from mycroft.configuration import Configuration
from mycroft.messagebus.message import Message
from mycroft.util.lang import set_active_lang
from mycroft.util.log import LOG
from mycroft.util.parse import normalize
from mycroft.metrics import report_timing, Stopwatch
from mycroft.skills.padatious_service import PadatiousService
from .intent_service_interface import open_intent_envelope
class AdaptIntent(IntentBuilder):
def __init__(self, name=''):
super().__init__(name)
def workaround_one_of_context(best_intent):
""" Handle Adapt issue with context injection combined with one_of.
For all entries in the intent result where the value is None try to
populate using a value from the __tags__ structure.
"""
for key in best_intent:
if best_intent[key] is None:
for t in best_intent['__tags__']:
if key in t:
best_intent[key] = t[key][0]['entities'][0]['key']
return best_intent
class ContextManager:
"""
ContextManager
Use to track context throughout the course of a conversational session.
How to manage a session's lifecycle is not captured here.
"""
def __init__(self, timeout):
self.frame_stack = []
self.timeout = timeout * 60 # minutes to seconds
def clear_context(self):
self.frame_stack = []
def remove_context(self, context_id):
self.frame_stack = [(f, t) for (f, t) in self.frame_stack
if context_id in f.entities[0].get('data', [])]
def inject_context(self, entity, metadata=None):
"""
Args:
entity(object): Format example...
{'data': 'Entity tag as <str>',
'key': 'entity proper name as <str>',
'confidence': <float>'
}
metadata(object): dict, arbitrary metadata about entity injected
"""
metadata = metadata or {}
try:
if len(self.frame_stack) > 0:
top_frame = self.frame_stack[0]
else:
top_frame = None
if top_frame and top_frame[0].metadata_matches(metadata):
top_frame[0].merge_context(entity, metadata)
else:
frame = ContextManagerFrame(entities=[entity],
metadata=metadata.copy())
self.frame_stack.insert(0, (frame, time.time()))
except (IndexError, KeyError):
pass
def get_context(self, max_frames=None, missing_entities=None):
""" Constructs a list of entities from the context.
Args:
max_frames(int): maximum number of frames to look back
missing_entities(list of str): a list or set of tag names,
as strings
Returns:
list: a list of entities
"""
missing_entities = missing_entities or []
relevant_frames = [frame[0] for frame in self.frame_stack if
time.time() - frame[1] < self.timeout]
if not max_frames or max_frames > len(relevant_frames):
max_frames = len(relevant_frames)
missing_entities = list(missing_entities)
context = []
last = ''
depth = 0
for i in range(max_frames):
frame_entities = [entity.copy() for entity in
relevant_frames[i].entities]
for entity in frame_entities:
entity['confidence'] = entity.get('confidence', 1.0) \
/ (2.0 + depth)
context += frame_entities
# Update depth
if entity['origin'] != last or entity['origin'] == '':
depth += 1
last = entity['origin']
print(depth)
result = []
if len(missing_entities) > 0:
for entity in context:
if entity.get('data') in missing_entities:
result.append(entity)
# NOTE: this implies that we will only ever get one
# of an entity kind from context, unless specified
# multiple times in missing_entities. Cannot get
# an arbitrary number of an entity kind.
missing_entities.remove(entity.get('data'))
else:
result = context
# Only use the latest instance of each keyword
stripped = []
processed = []
for f in result:
keyword = f['data'][0][1]
if keyword not in processed:
stripped.append(f)
processed.append(keyword)
result = stripped
return result
class IntentService:
def __init__(self, bus):
self.config = Configuration.get().get('context', {})
self.engine = IntentDeterminationEngine()
# Dictionary for translating a skill id to a name
self.skill_names = {}
# Context related intializations
self.context_keywords = self.config.get('keywords', [])
self.context_max_frames = self.config.get('max_frames', 3)
self.context_timeout = self.config.get('timeout', 2)
self.context_greedy = self.config.get('greedy', False)
self.context_manager = ContextManager(self.context_timeout)
self.bus = bus
self.bus.on('register_vocab', self.handle_register_vocab)
self.bus.on('register_intent', self.handle_register_intent)
self.bus.on('recognizer_loop:utterance', self.handle_utterance)
self.bus.on('detach_intent', self.handle_detach_intent)
self.bus.on('detach_skill', self.handle_detach_skill)
# Context related handlers
self.bus.on('add_context', self.handle_add_context)
self.bus.on('remove_context', self.handle_remove_context)
self.bus.on('clear_context', self.handle_clear_context)
# Converse method
self.bus.on('skill.converse.response', self.handle_converse_response)
self.bus.on('skill.converse.error', self.handle_converse_error)
self.bus.on('mycroft.speech.recognition.unknown', self.reset_converse)
self.bus.on('mycroft.skills.loaded', self.update_skill_name_dict)
def add_active_skill_handler(message):
self.add_active_skill(message.data['skill_id'])
self.bus.on('active_skill_request', add_active_skill_handler)
self.active_skills = [] # [skill_id , timestamp]
self.converse_timeout = 5 # minutes to prune active_skills
self.waiting_for_converse = False
self.converse_result = False
self.converse_skill_id = ""
# Intents API
self.registered_intents = []
self.registered_vocab = []
self.bus.on('intent.service.adapt.get', self.handle_get_adapt)
self.bus.on('intent.service.intent.get', self.handle_get_intent)
self.bus.on('intent.service.skills.get', self.handle_get_skills)
self.bus.on('intent.service.active_skills.get',
self.handle_get_active_skills)
self.bus.on('intent.service.adapt.manifest.get', self.handle_manifest)
self.bus.on('intent.service.adapt.vocab.manifest.get',
self.handle_vocab_manifest)
def update_skill_name_dict(self, message):
"""
Messagebus handler, updates dictionary of if to skill name
conversions.
"""
self.skill_names[message.data['id']] = message.data['name']
def get_skill_name(self, skill_id):
""" Get skill name from skill ID.
Args:
skill_id: a skill id as encoded in Intent handlers.
Returns:
(str) Skill name or the skill id if the skill wasn't found
"""
return self.skill_names.get(skill_id, skill_id)
def reset_converse(self, message):
"""Let skills know there was a problem with speech recognition"""
lang = message.data.get('lang', "en-us")
set_active_lang(lang)
for skill in self.active_skills:
self.do_converse(None, skill[0], lang)
def do_converse(self, utterances, skill_id, lang, message):
self.waiting_for_converse = True
self.converse_result = False
self.converse_skill_id = skill_id
self.bus.emit(message.reply("skill.converse.request", {
"skill_id": skill_id, "utterances": utterances, "lang": lang}))
start_time = time.time()
t = 0
while self.waiting_for_converse and t < 5:
t = time.time() - start_time
time.sleep(0.1)
self.waiting_for_converse = False
self.converse_skill_id = ""
return self.converse_result
def handle_converse_error(self, message):
skill_id = message.data["skill_id"]
if message.data["error"] == "skill id does not exist":
self.remove_active_skill(skill_id)
if skill_id == self.converse_skill_id:
self.converse_result = False
self.waiting_for_converse = False
def handle_converse_response(self, message):
skill_id = message.data["skill_id"]
if skill_id == self.converse_skill_id:
self.converse_result = message.data.get("result", False)
self.waiting_for_converse = False
def remove_active_skill(self, skill_id):
for skill in self.active_skills:
if skill[0] == skill_id:
self.active_skills.remove(skill)
def add_active_skill(self, skill_id):
# search the list for an existing entry that already contains it
# and remove that reference
self.remove_active_skill(skill_id)
# add skill with timestamp to start of skill_list
self.active_skills.insert(0, [skill_id, time.time()])
def update_context(self, intent):
""" Updates context with keyword from the intent.
NOTE: This method currently won't handle one_of intent keywords
since it's not using quite the same format as other intent
keywords. This is under investigation in adapt, PR pending.
Args:
intent: Intent to scan for keywords
"""
for tag in intent['__tags__']:
if 'entities' not in tag:
continue
context_entity = tag['entities'][0]
if self.context_greedy:
self.context_manager.inject_context(context_entity)
elif context_entity['data'][0][1] in self.context_keywords:
self.context_manager.inject_context(context_entity)
def send_metrics(self, intent, context, stopwatch):
"""
Send timing metrics to the backend.
NOTE: This only applies to those with Opt In.
"""
ident = context['ident'] if 'ident' in context else None
if intent:
# Recreate skill name from skill id
parts = intent.get('intent_type', '').split(':')
intent_type = self.get_skill_name(parts[0])
if len(parts) > 1:
intent_type = ':'.join([intent_type] + parts[1:])
report_timing(ident, 'intent_service', stopwatch,
{'intent_type': intent_type})
else:
report_timing(ident, 'intent_service', stopwatch,
{'intent_type': 'intent_failure'})
def handle_utterance(self, message):
""" Main entrypoint for handling user utterances with Mycroft skills
Monitor the messagebus for 'recognizer_loop:utterance', typically
generated by a spoken interaction but potentially also from a CLI
or other method of injecting a 'user utterance' into the system.
Utterances then work through this sequence to be handled:
1) Active skills attempt to handle using converse()
2) Padatious high match intents (conf > 0.95)
3) Adapt intent handlers
5) Fallbacks:
- Padatious near match intents (conf > 0.8)
- General fallbacks
- Padatious loose match intents (conf > 0.5)
- Unknown intent handler
Args:
message (Message): The messagebus data
"""
try:
# Get language of the utterance
lang = message.data.get('lang', "en-us")
set_active_lang(lang)
utterances = message.data.get('utterances', [])
# normalize() changes "it's a boy" to "it is a boy", etc.
norm_utterances = [normalize(u.lower(), remove_articles=False)
for u in utterances]
# Build list with raw utterance(s) first, then optionally a
# normalized version following.
combined = utterances + list(set(norm_utterances) -
set(utterances))
LOG.debug("Utterances: {}".format(combined))
stopwatch = Stopwatch()
intent = None
padatious_intent = None
with stopwatch:
# Give active skills an opportunity to handle the utterance
converse = self._converse(combined, lang, message)
if not converse:
# No conversation, use intent system to handle utterance
intent = self._adapt_intent_match(utterances,
norm_utterances, lang)
for utt in combined:
_intent = PadatiousService.instance.calc_intent(utt)
if _intent:
best = padatious_intent.conf if padatious_intent \
else 0.0
if best < _intent.conf:
padatious_intent = _intent
LOG.debug("Padatious intent: {}".format(padatious_intent))
LOG.debug(" Adapt intent: {}".format(intent))
if converse:
# Report that converse handled the intent and return
LOG.debug("Handled in converse()")
ident = None
if message.context and 'ident' in message.context:
ident = message.context['ident']
report_timing(ident, 'intent_service', stopwatch,
{'intent_type': 'converse'})
return
elif (intent and intent.get('confidence', 0.0) > 0.0 and
not (padatious_intent and padatious_intent.conf >= 0.95)):
# Send the message to the Adapt intent's handler unless
# Padatious is REALLY sure it was directed at it instead.
self.update_context(intent)
# update active skills
skill_id = intent['intent_type'].split(":")[0]
self.add_active_skill(skill_id)
# Adapt doesn't handle context injection for one_of keywords
# correctly. Workaround this issue if possible.
try:
intent = workaround_one_of_context(intent)
except LookupError:
LOG.error('Error during workaround_one_of_context')
reply = message.reply(intent.get('intent_type'), intent)
else:
# Allow fallback system to handle utterance
# NOTE: A matched padatious_intent is handled this way, too
# TODO: Need to redefine intent_failure when STT can return
# multiple hypothesis -- i.e. len(utterances) > 1
reply = message.reply('intent_failure',
{'utterance': utterances[0],
'norm_utt': norm_utterances[0],
'lang': lang})
self.bus.emit(reply)
self.send_metrics(intent, message.context, stopwatch)
except Exception as e:
LOG.exception(e)
def _converse(self, utterances, lang, message):
""" Give active skills a chance at the utterance
Args:
utterances (list): list of utterances
lang (string): 4 letter ISO language code
message (Message): message to use to generate reply
Returns:
bool: True if converse handled it, False if no skill processes it
"""
# check for conversation time-out
self.active_skills = [skill for skill in self.active_skills
if time.time() - skill[
1] <= self.converse_timeout * 60]
# check if any skill wants to handle utterance
for skill in self.active_skills:
if self.do_converse(utterances, skill[0], lang, message):
# update timestamp, or there will be a timeout where
# intent stops conversing whether its being used or not
self.add_active_skill(skill[0])
return True
return False
def _adapt_intent_match(self, raw_utt, norm_utt, lang):
""" Run the Adapt engine to search for an matching intent
Args:
raw_utt (list): list of utterances
norm_utt (list): same list of utterances, normalized
lang (string): language code, e.g "en-us"
Returns:
Intent structure, or None if no match was found.
"""
best_intent = None
def take_best(intent, utt):
nonlocal best_intent
best = best_intent.get('confidence', 0.0) if best_intent else 0.0
conf = intent.get('confidence', 0.0)
if conf > best:
best_intent = intent
# TODO - Shouldn't Adapt do this?
best_intent['utterance'] = utt
for idx, utt in enumerate(raw_utt):
try:
intents = [i for i in self.engine.determine_intent(
utt, 100,
include_tags=True,
context_manager=self.context_manager)]
if intents:
take_best(intents[0], utt)
# Also test the normalized version, but set the utterance to
# the raw version so skill has access to original STT
norm_intents = [i for i in self.engine.determine_intent(
norm_utt[idx], 100,
include_tags=True,
context_manager=self.context_manager)]
if norm_intents:
take_best(norm_intents[0], utt)
except Exception as e:
LOG.exception(e)
return best_intent
def handle_register_vocab(self, message):
start_concept = message.data.get('start')
end_concept = message.data.get('end')
regex_str = message.data.get('regex')
alias_of = message.data.get('alias_of')
if regex_str:
self.engine.register_regex_entity(regex_str)
else:
self.engine.register_entity(
start_concept, end_concept, alias_of=alias_of)
self.registered_vocab.append(message.data)
def handle_register_intent(self, message):
intent = open_intent_envelope(message)
self.engine.register_intent_parser(intent)
def handle_detach_intent(self, message):
intent_name = message.data.get('intent_name')
new_parsers = [
p for p in self.engine.intent_parsers if p.name != intent_name]
self.engine.intent_parsers = new_parsers
def handle_detach_skill(self, message):
skill_id = message.data.get('skill_id')
new_parsers = [
p for p in self.engine.intent_parsers if
not p.name.startswith(skill_id)]
self.engine.intent_parsers = new_parsers
def handle_add_context(self, message):
""" Add context
Args:
message: data contains the 'context' item to add
optionally can include 'word' to be injected as
an alias for the context item.
"""
entity = {'confidence': 1.0}
context = message.data.get('context')
word = message.data.get('word') or ''
origin = message.data.get('origin') or ''
# if not a string type try creating a string from it
if not isinstance(word, str):
word = str(word)
entity['data'] = [(word, context)]
entity['match'] = word
entity['key'] = word
entity['origin'] = origin
self.context_manager.inject_context(entity)
def handle_remove_context(self, message):
""" Remove specific context
Args:
message: data contains the 'context' item to remove
"""
context = message.data.get('context')
if context:
self.context_manager.remove_context(context)
def handle_clear_context(self, message):
""" Clears all keywords from context """
self.context_manager.clear_context()
def handle_get_adapt(self, message):
utterance = message.data["utterance"]
lang = message.data.get("lang", "en-us")
norm = normalize(utterance, lang, remove_articles=False)
intent = self._adapt_intent_match([utterance], [norm], lang)
self.bus.emit(message.reply("intent.service.adapt.reply",
{"intent": intent}))
def handle_get_intent(self, message):
utterance = message.data["utterance"]
lang = message.data.get("lang", "en-us")
norm = normalize(utterance, lang, remove_articles=False)
intent = self._adapt_intent_match([utterance], [norm], lang)
# Adapt intent's handler is used unless
# Padatious is REALLY sure it was directed at it instead.
padatious_intent = PadatiousService.instance.calc_intent(utterance)
if not padatious_intent and norm != utterance:
padatious_intent = PadatiousService.instance.calc_intent(norm)
if intent is None or (
padatious_intent and padatious_intent.conf >= 0.95):
intent = padatious_intent.__dict__
self.bus.emit(message.reply("intent.service.intent.reply",
{"intent": intent}))
def handle_get_skills(self, message):
self.bus.emit(message.reply("intent.service.skills.reply",
{"skills": self.skill_names}))
def handle_get_active_skills(self, message):
self.bus.emit(message.reply("intent.service.active_skills.reply",
{"skills": [s[0] for s in
self.active_skills]}))
def handle_manifest(self, message):
self.bus.emit(message.reply("intent.service.adapt.manifest",
{"intents": self.registered_intents}))
def handle_vocab_manifest(self, message):
self.bus.emit(message.reply("intent.service.adapt.vocab.manifest",
{"vocab": self.registered_vocab}))
Fix resetting of converse state on STT failure
The handler was silently failing when the STT doesn't receive any data.
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from adapt.context import ContextManagerFrame
from adapt.engine import IntentDeterminationEngine
from adapt.intent import IntentBuilder
from mycroft.configuration import Configuration
from mycroft.messagebus.message import Message
from mycroft.util.lang import set_active_lang
from mycroft.util.log import LOG
from mycroft.util.parse import normalize
from mycroft.metrics import report_timing, Stopwatch
from mycroft.skills.padatious_service import PadatiousService
from .intent_service_interface import open_intent_envelope
class AdaptIntent(IntentBuilder):
def __init__(self, name=''):
super().__init__(name)
def workaround_one_of_context(best_intent):
""" Handle Adapt issue with context injection combined with one_of.
For all entries in the intent result where the value is None try to
populate using a value from the __tags__ structure.
"""
for key in best_intent:
if best_intent[key] is None:
for t in best_intent['__tags__']:
if key in t:
best_intent[key] = t[key][0]['entities'][0]['key']
return best_intent
class ContextManager:
"""
ContextManager
Use to track context throughout the course of a conversational session.
How to manage a session's lifecycle is not captured here.
"""
def __init__(self, timeout):
self.frame_stack = []
self.timeout = timeout * 60 # minutes to seconds
def clear_context(self):
self.frame_stack = []
def remove_context(self, context_id):
self.frame_stack = [(f, t) for (f, t) in self.frame_stack
if context_id in f.entities[0].get('data', [])]
def inject_context(self, entity, metadata=None):
"""
Args:
entity(object): Format example...
{'data': 'Entity tag as <str>',
'key': 'entity proper name as <str>',
'confidence': <float>'
}
metadata(object): dict, arbitrary metadata about entity injected
"""
metadata = metadata or {}
try:
if len(self.frame_stack) > 0:
top_frame = self.frame_stack[0]
else:
top_frame = None
if top_frame and top_frame[0].metadata_matches(metadata):
top_frame[0].merge_context(entity, metadata)
else:
frame = ContextManagerFrame(entities=[entity],
metadata=metadata.copy())
self.frame_stack.insert(0, (frame, time.time()))
except (IndexError, KeyError):
pass
def get_context(self, max_frames=None, missing_entities=None):
""" Constructs a list of entities from the context.
Args:
max_frames(int): maximum number of frames to look back
missing_entities(list of str): a list or set of tag names,
as strings
Returns:
list: a list of entities
"""
missing_entities = missing_entities or []
relevant_frames = [frame[0] for frame in self.frame_stack if
time.time() - frame[1] < self.timeout]
if not max_frames or max_frames > len(relevant_frames):
max_frames = len(relevant_frames)
missing_entities = list(missing_entities)
context = []
last = ''
depth = 0
for i in range(max_frames):
frame_entities = [entity.copy() for entity in
relevant_frames[i].entities]
for entity in frame_entities:
entity['confidence'] = entity.get('confidence', 1.0) \
/ (2.0 + depth)
context += frame_entities
# Update depth
if entity['origin'] != last or entity['origin'] == '':
depth += 1
last = entity['origin']
print(depth)
result = []
if len(missing_entities) > 0:
for entity in context:
if entity.get('data') in missing_entities:
result.append(entity)
# NOTE: this implies that we will only ever get one
# of an entity kind from context, unless specified
# multiple times in missing_entities. Cannot get
# an arbitrary number of an entity kind.
missing_entities.remove(entity.get('data'))
else:
result = context
# Only use the latest instance of each keyword
stripped = []
processed = []
for f in result:
keyword = f['data'][0][1]
if keyword not in processed:
stripped.append(f)
processed.append(keyword)
result = stripped
return result
class IntentService:
def __init__(self, bus):
self.config = Configuration.get().get('context', {})
self.engine = IntentDeterminationEngine()
# Dictionary for translating a skill id to a name
self.skill_names = {}
# Context related intializations
self.context_keywords = self.config.get('keywords', [])
self.context_max_frames = self.config.get('max_frames', 3)
self.context_timeout = self.config.get('timeout', 2)
self.context_greedy = self.config.get('greedy', False)
self.context_manager = ContextManager(self.context_timeout)
self.bus = bus
self.bus.on('register_vocab', self.handle_register_vocab)
self.bus.on('register_intent', self.handle_register_intent)
self.bus.on('recognizer_loop:utterance', self.handle_utterance)
self.bus.on('detach_intent', self.handle_detach_intent)
self.bus.on('detach_skill', self.handle_detach_skill)
# Context related handlers
self.bus.on('add_context', self.handle_add_context)
self.bus.on('remove_context', self.handle_remove_context)
self.bus.on('clear_context', self.handle_clear_context)
# Converse method
self.bus.on('skill.converse.response', self.handle_converse_response)
self.bus.on('skill.converse.error', self.handle_converse_error)
self.bus.on('mycroft.speech.recognition.unknown', self.reset_converse)
self.bus.on('mycroft.skills.loaded', self.update_skill_name_dict)
def add_active_skill_handler(message):
self.add_active_skill(message.data['skill_id'])
self.bus.on('active_skill_request', add_active_skill_handler)
self.active_skills = [] # [skill_id , timestamp]
self.converse_timeout = 5 # minutes to prune active_skills
self.waiting_for_converse = False
self.converse_result = False
self.converse_skill_id = ""
# Intents API
self.registered_intents = []
self.registered_vocab = []
self.bus.on('intent.service.adapt.get', self.handle_get_adapt)
self.bus.on('intent.service.intent.get', self.handle_get_intent)
self.bus.on('intent.service.skills.get', self.handle_get_skills)
self.bus.on('intent.service.active_skills.get',
self.handle_get_active_skills)
self.bus.on('intent.service.adapt.manifest.get', self.handle_manifest)
self.bus.on('intent.service.adapt.vocab.manifest.get',
self.handle_vocab_manifest)
def update_skill_name_dict(self, message):
"""
Messagebus handler, updates dictionary of if to skill name
conversions.
"""
self.skill_names[message.data['id']] = message.data['name']
def get_skill_name(self, skill_id):
""" Get skill name from skill ID.
Args:
skill_id: a skill id as encoded in Intent handlers.
Returns:
(str) Skill name or the skill id if the skill wasn't found
"""
return self.skill_names.get(skill_id, skill_id)
def reset_converse(self, message):
"""Let skills know there was a problem with speech recognition"""
lang = message.data.get('lang', "en-us")
set_active_lang(lang)
for skill in self.active_skills:
self.do_converse(None, skill[0], lang, message)
def do_converse(self, utterances, skill_id, lang, message):
self.waiting_for_converse = True
self.converse_result = False
self.converse_skill_id = skill_id
self.bus.emit(message.reply("skill.converse.request", {
"skill_id": skill_id, "utterances": utterances, "lang": lang}))
start_time = time.time()
t = 0
while self.waiting_for_converse and t < 5:
t = time.time() - start_time
time.sleep(0.1)
self.waiting_for_converse = False
self.converse_skill_id = ""
return self.converse_result
def handle_converse_error(self, message):
skill_id = message.data["skill_id"]
if message.data["error"] == "skill id does not exist":
self.remove_active_skill(skill_id)
if skill_id == self.converse_skill_id:
self.converse_result = False
self.waiting_for_converse = False
def handle_converse_response(self, message):
skill_id = message.data["skill_id"]
if skill_id == self.converse_skill_id:
self.converse_result = message.data.get("result", False)
self.waiting_for_converse = False
def remove_active_skill(self, skill_id):
for skill in self.active_skills:
if skill[0] == skill_id:
self.active_skills.remove(skill)
def add_active_skill(self, skill_id):
# search the list for an existing entry that already contains it
# and remove that reference
self.remove_active_skill(skill_id)
# add skill with timestamp to start of skill_list
self.active_skills.insert(0, [skill_id, time.time()])
def update_context(self, intent):
""" Updates context with keyword from the intent.
NOTE: This method currently won't handle one_of intent keywords
since it's not using quite the same format as other intent
keywords. This is under investigation in adapt, PR pending.
Args:
intent: Intent to scan for keywords
"""
for tag in intent['__tags__']:
if 'entities' not in tag:
continue
context_entity = tag['entities'][0]
if self.context_greedy:
self.context_manager.inject_context(context_entity)
elif context_entity['data'][0][1] in self.context_keywords:
self.context_manager.inject_context(context_entity)
def send_metrics(self, intent, context, stopwatch):
"""
Send timing metrics to the backend.
NOTE: This only applies to those with Opt In.
"""
ident = context['ident'] if 'ident' in context else None
if intent:
# Recreate skill name from skill id
parts = intent.get('intent_type', '').split(':')
intent_type = self.get_skill_name(parts[0])
if len(parts) > 1:
intent_type = ':'.join([intent_type] + parts[1:])
report_timing(ident, 'intent_service', stopwatch,
{'intent_type': intent_type})
else:
report_timing(ident, 'intent_service', stopwatch,
{'intent_type': 'intent_failure'})
def handle_utterance(self, message):
""" Main entrypoint for handling user utterances with Mycroft skills
Monitor the messagebus for 'recognizer_loop:utterance', typically
generated by a spoken interaction but potentially also from a CLI
or other method of injecting a 'user utterance' into the system.
Utterances then work through this sequence to be handled:
1) Active skills attempt to handle using converse()
2) Padatious high match intents (conf > 0.95)
3) Adapt intent handlers
5) Fallbacks:
- Padatious near match intents (conf > 0.8)
- General fallbacks
- Padatious loose match intents (conf > 0.5)
- Unknown intent handler
Args:
message (Message): The messagebus data
"""
try:
# Get language of the utterance
lang = message.data.get('lang', "en-us")
set_active_lang(lang)
utterances = message.data.get('utterances', [])
# normalize() changes "it's a boy" to "it is a boy", etc.
norm_utterances = [normalize(u.lower(), remove_articles=False)
for u in utterances]
# Build list with raw utterance(s) first, then optionally a
# normalized version following.
combined = utterances + list(set(norm_utterances) -
set(utterances))
LOG.debug("Utterances: {}".format(combined))
stopwatch = Stopwatch()
intent = None
padatious_intent = None
with stopwatch:
# Give active skills an opportunity to handle the utterance
converse = self._converse(combined, lang, message)
if not converse:
# No conversation, use intent system to handle utterance
intent = self._adapt_intent_match(utterances,
norm_utterances, lang)
for utt in combined:
_intent = PadatiousService.instance.calc_intent(utt)
if _intent:
best = padatious_intent.conf if padatious_intent \
else 0.0
if best < _intent.conf:
padatious_intent = _intent
LOG.debug("Padatious intent: {}".format(padatious_intent))
LOG.debug(" Adapt intent: {}".format(intent))
if converse:
# Report that converse handled the intent and return
LOG.debug("Handled in converse()")
ident = None
if message.context and 'ident' in message.context:
ident = message.context['ident']
report_timing(ident, 'intent_service', stopwatch,
{'intent_type': 'converse'})
return
elif (intent and intent.get('confidence', 0.0) > 0.0 and
not (padatious_intent and padatious_intent.conf >= 0.95)):
# Send the message to the Adapt intent's handler unless
# Padatious is REALLY sure it was directed at it instead.
self.update_context(intent)
# update active skills
skill_id = intent['intent_type'].split(":")[0]
self.add_active_skill(skill_id)
# Adapt doesn't handle context injection for one_of keywords
# correctly. Workaround this issue if possible.
try:
intent = workaround_one_of_context(intent)
except LookupError:
LOG.error('Error during workaround_one_of_context')
reply = message.reply(intent.get('intent_type'), intent)
else:
# Allow fallback system to handle utterance
# NOTE: A matched padatious_intent is handled this way, too
# TODO: Need to redefine intent_failure when STT can return
# multiple hypothesis -- i.e. len(utterances) > 1
reply = message.reply('intent_failure',
{'utterance': utterances[0],
'norm_utt': norm_utterances[0],
'lang': lang})
self.bus.emit(reply)
self.send_metrics(intent, message.context, stopwatch)
except Exception as e:
LOG.exception(e)
def _converse(self, utterances, lang, message):
""" Give active skills a chance at the utterance
Args:
utterances (list): list of utterances
lang (string): 4 letter ISO language code
message (Message): message to use to generate reply
Returns:
bool: True if converse handled it, False if no skill processes it
"""
# check for conversation time-out
self.active_skills = [skill for skill in self.active_skills
if time.time() - skill[
1] <= self.converse_timeout * 60]
# check if any skill wants to handle utterance
for skill in self.active_skills:
if self.do_converse(utterances, skill[0], lang, message):
# update timestamp, or there will be a timeout where
# intent stops conversing whether its being used or not
self.add_active_skill(skill[0])
return True
return False
def _adapt_intent_match(self, raw_utt, norm_utt, lang):
""" Run the Adapt engine to search for an matching intent
Args:
raw_utt (list): list of utterances
norm_utt (list): same list of utterances, normalized
lang (string): language code, e.g "en-us"
Returns:
Intent structure, or None if no match was found.
"""
best_intent = None
def take_best(intent, utt):
nonlocal best_intent
best = best_intent.get('confidence', 0.0) if best_intent else 0.0
conf = intent.get('confidence', 0.0)
if conf > best:
best_intent = intent
# TODO - Shouldn't Adapt do this?
best_intent['utterance'] = utt
for idx, utt in enumerate(raw_utt):
try:
intents = [i for i in self.engine.determine_intent(
utt, 100,
include_tags=True,
context_manager=self.context_manager)]
if intents:
take_best(intents[0], utt)
# Also test the normalized version, but set the utterance to
# the raw version so skill has access to original STT
norm_intents = [i for i in self.engine.determine_intent(
norm_utt[idx], 100,
include_tags=True,
context_manager=self.context_manager)]
if norm_intents:
take_best(norm_intents[0], utt)
except Exception as e:
LOG.exception(e)
return best_intent
def handle_register_vocab(self, message):
start_concept = message.data.get('start')
end_concept = message.data.get('end')
regex_str = message.data.get('regex')
alias_of = message.data.get('alias_of')
if regex_str:
self.engine.register_regex_entity(regex_str)
else:
self.engine.register_entity(
start_concept, end_concept, alias_of=alias_of)
self.registered_vocab.append(message.data)
def handle_register_intent(self, message):
intent = open_intent_envelope(message)
self.engine.register_intent_parser(intent)
def handle_detach_intent(self, message):
intent_name = message.data.get('intent_name')
new_parsers = [
p for p in self.engine.intent_parsers if p.name != intent_name]
self.engine.intent_parsers = new_parsers
def handle_detach_skill(self, message):
skill_id = message.data.get('skill_id')
new_parsers = [
p for p in self.engine.intent_parsers if
not p.name.startswith(skill_id)]
self.engine.intent_parsers = new_parsers
def handle_add_context(self, message):
""" Add context
Args:
message: data contains the 'context' item to add
optionally can include 'word' to be injected as
an alias for the context item.
"""
entity = {'confidence': 1.0}
context = message.data.get('context')
word = message.data.get('word') or ''
origin = message.data.get('origin') or ''
# if not a string type try creating a string from it
if not isinstance(word, str):
word = str(word)
entity['data'] = [(word, context)]
entity['match'] = word
entity['key'] = word
entity['origin'] = origin
self.context_manager.inject_context(entity)
def handle_remove_context(self, message):
""" Remove specific context
Args:
message: data contains the 'context' item to remove
"""
context = message.data.get('context')
if context:
self.context_manager.remove_context(context)
def handle_clear_context(self, message):
""" Clears all keywords from context """
self.context_manager.clear_context()
def handle_get_adapt(self, message):
utterance = message.data["utterance"]
lang = message.data.get("lang", "en-us")
norm = normalize(utterance, lang, remove_articles=False)
intent = self._adapt_intent_match([utterance], [norm], lang)
self.bus.emit(message.reply("intent.service.adapt.reply",
{"intent": intent}))
def handle_get_intent(self, message):
utterance = message.data["utterance"]
lang = message.data.get("lang", "en-us")
norm = normalize(utterance, lang, remove_articles=False)
intent = self._adapt_intent_match([utterance], [norm], lang)
# Adapt intent's handler is used unless
# Padatious is REALLY sure it was directed at it instead.
padatious_intent = PadatiousService.instance.calc_intent(utterance)
if not padatious_intent and norm != utterance:
padatious_intent = PadatiousService.instance.calc_intent(norm)
if intent is None or (
padatious_intent and padatious_intent.conf >= 0.95):
intent = padatious_intent.__dict__
self.bus.emit(message.reply("intent.service.intent.reply",
{"intent": intent}))
def handle_get_skills(self, message):
self.bus.emit(message.reply("intent.service.skills.reply",
{"skills": self.skill_names}))
def handle_get_active_skills(self, message):
self.bus.emit(message.reply("intent.service.active_skills.reply",
{"skills": [s[0] for s in
self.active_skills]}))
def handle_manifest(self, message):
self.bus.emit(message.reply("intent.service.adapt.manifest",
{"intents": self.registered_intents}))
def handle_vocab_manifest(self, message):
self.bus.emit(message.reply("intent.service.adapt.vocab.manifest",
{"vocab": self.registered_vocab}))
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011-2014 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import pytz
import re
import time
import openerp
import openerp.service.report
import uuid
from werkzeug.exceptions import BadRequest
from datetime import datetime, timedelta
from dateutil import parser
from dateutil import rrule
from dateutil.relativedelta import relativedelta
from openerp import tools, SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools.translate import _
from openerp.http import request
from operator import itemgetter
import logging
_logger = logging.getLogger(__name__)
def calendar_id2real_id(calendar_id=None, with_date=False):
"""
Convert a "virtual/recurring event id" (type string) into a real event id (type int).
E.g. virtual/recurring event id is 4-20091201100000, so it will return 4.
@param calendar_id: id of calendar
@param with_date: if a value is passed to this param it will return dates based on value of withdate + calendar_id
@return: real event id
"""
if calendar_id and isinstance(calendar_id, (str, unicode)):
res = calendar_id.split('-')
if len(res) >= 2:
real_id = res[0]
if with_date:
real_date = time.strftime("%Y-%m-%d %H:%M:%S", time.strptime(res[1], "%Y%m%d%H%M%S"))
start = datetime.strptime(real_date, "%Y-%m-%d %H:%M:%S")
end = start + timedelta(hours=with_date)
return (int(real_id), real_date, end.strftime("%Y-%m-%d %H:%M:%S"))
return int(real_id)
return calendar_id and int(calendar_id) or calendar_id
def get_real_ids(ids):
if isinstance(ids, (str, int, long)):
return calendar_id2real_id(ids)
if isinstance(ids, (list, tuple)):
return [calendar_id2real_id(id) for id in ids]
class calendar_attendee(osv.Model):
"""
Calendar Attendee Information
"""
_name = 'calendar.attendee'
_rec_name = 'cn'
_description = 'Attendee information'
def _compute_data(self, cr, uid, ids, name, arg, context=None):
"""
Compute data on function fields for attendee values.
@param ids: list of calendar attendee's IDs
@param name: name of field
@return: dictionary of form {id: {'field Name': value'}}
"""
name = name[0]
result = {}
for attdata in self.browse(cr, uid, ids, context=context):
id = attdata.id
result[id] = {}
if name == 'cn':
if attdata.partner_id:
result[id][name] = attdata.partner_id.name or False
else:
result[id][name] = attdata.email or ''
if name == 'event_date':
result[id][name] = attdata.event_id.date
if name == 'event_end_date':
result[id][name] = attdata.event_id.date_deadline
return result
STATE_SELECTION = [
('needsAction', 'Needs Action'),
('tentative', 'Uncertain'),
('declined', 'Declined'),
('accepted', 'Accepted'),
]
_columns = {
'state': fields.selection(STATE_SELECTION, 'Status', readonly=True, help="Status of the attendee's participation"),
'cn': fields.function(_compute_data, string='Common name', type="char", multi='cn', store=True),
'partner_id': fields.many2one('res.partner', 'Contact', readonly="True"),
'email': fields.char('Email', help="Email of Invited Person"),
'event_date': fields.function(_compute_data, string='Event Date', type="datetime", multi='event_date'),
'event_end_date': fields.function(_compute_data, string='Event End Date', type="datetime", multi='event_end_date'),
'availability': fields.selection([('free', 'Free'), ('busy', 'Busy')], 'Free/Busy', readonly="True"),
'access_token': fields.char('Invitation Token'),
'event_id': fields.many2one('calendar.event', 'Meeting linked'),
}
_defaults = {
'state': 'needsAction',
}
def copy(self, cr, uid, id, default=None, context=None):
raise osv.except_osv(_('Warning!'), _('You cannot duplicate a calendar attendee.'))
def onchange_partner_id(self, cr, uid, ids, partner_id, context=None):
"""
Make entry on email and availability on change of partner_id field.
@param partner_id: changed value of partner id
"""
if not partner_id:
return {'value': {'email': ''}}
partner = self.pool['res.partner'].browse(cr, uid, partner_id, context=context)
return {'value': {'email': partner.email}}
def get_ics_file(self, cr, uid, event_obj, context=None):
"""
Returns iCalendar file for the event invitation.
@param event_obj: event object (browse record)
@return: .ics file content
"""
res = None
def ics_datetime(idate, short=False):
if idate:
return datetime.strptime(idate.split('.')[0], '%Y-%m-%d %H:%M:%S').replace(tzinfo=pytz.timezone('UTC'))
return False
try:
# FIXME: why isn't this in CalDAV?
import vobject
except ImportError:
return res
cal = vobject.iCalendar()
event = cal.add('vevent')
if not event_obj.date_deadline or not event_obj.date:
raise osv.except_osv(_('Warning!'), _("First you have to specify the date of the invitation."))
event.add('created').value = ics_datetime(time.strftime('%Y-%m-%d %H:%M:%S'))
event.add('dtstart').value = ics_datetime(event_obj.date)
event.add('dtend').value = ics_datetime(event_obj.date_deadline)
event.add('summary').value = event_obj.name
if event_obj.description:
event.add('description').value = event_obj.description
if event_obj.location:
event.add('location').value = event_obj.location
if event_obj.rrule:
event.add('rrule').value = event_obj.rrule
if event_obj.alarm_ids:
for alarm in event_obj.alarm_ids:
valarm = event.add('valarm')
interval = alarm.interval
duration = alarm.duration
trigger = valarm.add('TRIGGER')
trigger.params['related'] = ["START"]
if interval == 'days':
delta = timedelta(days=duration)
elif interval == 'hours':
delta = timedelta(hours=duration)
elif interval == 'minutes':
delta = timedelta(minutes=duration)
trigger.value = delta
valarm.add('DESCRIPTION').value = alarm.name or 'OpenERP'
for attendee in event_obj.attendee_ids:
attendee_add = event.add('attendee')
attendee_add.value = 'MAILTO:' + (attendee.email or '')
res = cal.serialize()
return res
def _send_mail_to_attendees(self, cr, uid, ids, email_from=tools.config.get('email_from', False), template_xmlid='calendar_template_meeting_invitation', context=None):
"""
Send mail for event invitation to event attendees.
@param email_from: email address for user sending the mail
"""
res = False
mail_ids = []
data_pool = self.pool['ir.model.data']
mailmess_pool = self.pool['mail.message']
mail_pool = self.pool['mail.mail']
template_pool = self.pool['email.template']
local_context = context.copy()
color = {
'needsAction': 'grey',
'accepted': 'green',
'tentative': '#FFFF00',
'declined': 'red'
}
if not isinstance(ids, (tuple, list)):
ids = [ids]
dummy, template_id = data_pool.get_object_reference(cr, uid, 'calendar', template_xmlid)
dummy, act_id = data_pool.get_object_reference(cr, uid, 'calendar', "view_calendar_event_calendar")
local_context.update({
'color': color,
'action_id': self.pool['ir.actions.act_window'].search(cr, uid, [('view_id', '=', act_id)], context=context)[0],
'dbname': cr.dbname,
'base_url': self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url', default='http://localhost:8069', context=context)
})
for attendee in self.browse(cr, uid, ids, context=context):
if attendee.email and email_from and attendee.email != email_from:
ics_file = self.get_ics_file(cr, uid, attendee.event_id, context=context)
mail_id = template_pool.send_mail(cr, uid, template_id, attendee.id, context=local_context)
vals = {}
if ics_file:
vals['attachment_ids'] = [(0, 0, {'name': 'invitation.ics',
'datas_fname': 'invitation.ics',
'datas': str(ics_file).encode('base64')})]
vals['model'] = None # We don't want to have the mail in the tchatter while in queue!
the_mailmess = mail_pool.browse(cr, uid, mail_id, context=context).mail_message_id
mailmess_pool.write(cr, uid, [the_mailmess.id], vals, context=context)
mail_ids.append(mail_id)
if mail_ids:
res = mail_pool.send(cr, uid, mail_ids, context=context)
return res
def onchange_user_id(self, cr, uid, ids, user_id, *args, **argv):
"""
Make entry on email and availability on change of user_id field.
@param ids: list of attendee's IDs
@param user_id: changed value of User id
@return: dictionary of values which put value in email and availability fields
"""
if not user_id:
return {'value': {'email': ''}}
user = self.pool['res.users'].browse(cr, uid, user_id, *args)
return {'value': {'email': user.email, 'availability': user.availability}}
def do_tentative(self, cr, uid, ids, context=None, *args):
"""
Makes event invitation as Tentative.
@param ids: list of attendee's IDs
"""
return self.write(cr, uid, ids, {'state': 'tentative'}, context)
def do_accept(self, cr, uid, ids, context=None, *args):
"""
Marks event invitation as Accepted.
@param ids: list of attendee's IDs
"""
if context is None:
context = {}
meeting_obj = self.pool['calendar.event']
res = self.write(cr, uid, ids, {'state': 'accepted'}, context)
for attendee in self.browse(cr, uid, ids, context=context):
meeting_obj.message_post(cr, uid, attendee.event_id.id, body=_(("%s has accepted invitation") % (attendee.cn)), subtype="calendar.subtype_invitation", context=context)
return res
def do_decline(self, cr, uid, ids, context=None, *args):
"""
Marks event invitation as Declined.
@param ids: list of calendar attendee's IDs
"""
if context is None:
context = {}
meeting_obj = self.pool['calendar.event']
res = self.write(cr, uid, ids, {'state': 'declined'}, context)
for attendee in self.browse(cr, uid, ids, context=context):
meeting_obj.message_post(cr, uid, attendee.event_id.id, body=_(("%s has declined invitation") % (attendee.cn)), subtype="calendar.subtype_invitation", context=context)
return res
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if not vals.get("email") and vals.get("cn"):
cnval = vals.get("cn").split(':')
email = filter(lambda x: x.__contains__('@'), cnval)
vals['email'] = email and email[0] or ''
vals['cn'] = vals.get("cn")
res = super(calendar_attendee, self).create(cr, uid, vals, context=context)
return res
class res_partner(osv.Model):
_inherit = 'res.partner'
_columns = {
'calendar_last_notif_ack': fields.datetime('Last notification marked as read from base Calendar'),
}
def get_attendee_detail(self, cr, uid, ids, meeting_id, context=None):
datas = []
meeting = False
if meeting_id:
meeting = self.pool['calendar.event'].browse(cr, uid, get_real_ids(meeting_id), context=context)
for partner in self.browse(cr, uid, ids, context=context):
data = self.name_get(cr, uid, [partner.id], context)[0]
if meeting:
for attendee in meeting.attendee_ids:
if attendee.partner_id.id == partner.id:
data = (data[0], data[1], attendee.state)
datas.append(data)
return datas
def calendar_last_notif_ack(self, cr, uid, context=None):
partner = self.pool['res.users'].browse(cr, uid, uid, context=context).partner_id
self.write(cr, uid, partner.id, {'calendar_last_notif_ack': datetime.now()}, context=context)
return
class calendar_alarm_manager(osv.AbstractModel):
_name = 'calendar.alarm_manager'
def get_next_potential_limit_alarm(self, cr, uid, seconds, notif=True, mail=True, partner_id=None, context=None):
res = {}
base_request = """
SELECT
cal.id,
cal.date - interval '1' minute * calcul_delta.max_delta AS first_alarm,
CASE
WHEN cal.recurrency THEN cal.end_date - interval '1' minute * calcul_delta.min_delta
ELSE cal.date_deadline - interval '1' minute * calcul_delta.min_delta
END as last_alarm,
cal.date as first_event_date,
CASE
WHEN cal.recurrency THEN cal.end_date
ELSE cal.date_deadline
END as last_event_date,
calcul_delta.min_delta,
calcul_delta.max_delta,
cal.rrule AS rule
FROM
calendar_event AS cal
RIGHT JOIN
(
SELECT
rel.calendar_event_id, max(alarm.duration_minutes) AS max_delta,min(alarm.duration_minutes) AS min_delta
FROM
calendar_alarm_calendar_event_rel AS rel
LEFT JOIN calendar_alarm AS alarm ON alarm.id = rel.calendar_alarm_id
WHERE alarm.type in %s
GROUP BY rel.calendar_event_id
) AS calcul_delta ON calcul_delta.calendar_event_id = cal.id
"""
filter_user = """
RIGHT JOIN calendar_event_res_partner_rel AS part_rel ON part_rel.calendar_event_id = cal.id
AND part_rel.res_partner_id = %s
"""
#Add filter on type
type_to_read = ()
if notif:
type_to_read += ('notification',)
if mail:
type_to_read += ('email',)
tuple_params = (type_to_read,)
#ADD FILTER ON PARTNER_ID
if partner_id:
base_request += filter_user
tuple_params += (partner_id, )
#Add filter on hours
tuple_params += (seconds, seconds,)
cr.execute("""SELECT *
FROM ( %s ) AS ALL_EVENTS
WHERE ALL_EVENTS.first_alarm < (now() at time zone 'utc' + interval '%%s' second )
AND ALL_EVENTS.last_alarm > (now() at time zone 'utc' - interval '%%s' second )
""" % base_request, tuple_params)
for event_id, first_alarm, last_alarm, first_meeting, last_meeting, min_duration, max_duration, rule in cr.fetchall():
res[event_id] = {
'event_id': event_id,
'first_alarm': first_alarm,
'last_alarm': last_alarm,
'first_meeting': first_meeting,
'last_meeting': last_meeting,
'min_duration': min_duration,
'max_duration': max_duration,
'rrule': rule
}
return res
def do_check_alarm_for_one_date(self, cr, uid, one_date, event, event_maxdelta, in_the_next_X_seconds, after=False, notif=True, mail=True, context=None):
res = []
alarm_type = []
if notif:
alarm_type.append('notification')
if mail:
alarm_type.append('email')
if one_date - timedelta(minutes=event_maxdelta) < datetime.now() + timedelta(seconds=in_the_next_X_seconds): # if an alarm is possible for this date
for alarm in event.alarm_ids:
if alarm.type in alarm_type and \
one_date - timedelta(minutes=alarm.duration_minutes) < datetime.now() + timedelta(seconds=in_the_next_X_seconds) and \
(not after or one_date - timedelta(minutes=alarm.duration_minutes) > datetime.strptime(after.split('.')[0], "%Y-%m-%d %H:%M:%S")):
alert = {
'alarm_id': alarm.id,
'event_id': event.id,
'notify_at': one_date - timedelta(minutes=alarm.duration_minutes),
}
res.append(alert)
return res
def get_next_mail(self, cr, uid, context=None):
cron = self.pool.get('ir.cron').search(cr, uid, [('model', 'ilike', self._name)], context=context)
if cron and len(cron) == 1:
cron = self.pool.get('ir.cron').browse(cr, uid, cron[0], context=context)
else:
raise ("Cron for " + self._name + " not identified :( !")
if cron.interval_type == "weeks":
cron_interval = cron.interval_number * 7 * 24 * 60 * 60
elif cron.interval_type == "days":
cron_interval = cron.interval_number * 24 * 60 * 60
elif cron.interval_type == "hours":
cron_interval = cron.interval_number * 60 * 60
elif cron.interval_type == "minutes":
cron_interval = cron.interval_number * 60
elif cron.interval_type == "seconds":
cron_interval = cron.interval_number
if not cron_interval:
raise ("Cron delay for " + self._name + " can not be calculated :( !")
all_events = self.get_next_potential_limit_alarm(cr, uid, cron_interval, notif=False, context=context)
for event in all_events: # .values()
max_delta = all_events[event]['max_duration']
curEvent = self.pool.get('calendar.event').browse(cr, uid, event, context=context)
if curEvent.recurrency:
bFound = False
LastFound = False
for one_date in self.pool.get('calendar.event').get_recurrent_date_by_event(cr, uid, curEvent, context=context):
in_date_format = one_date.replace(tzinfo=None)
LastFound = self.do_check_alarm_for_one_date(cr, uid, in_date_format, curEvent, max_delta, cron_interval, notif=False, context=context)
if LastFound:
for alert in LastFound:
self.do_mail_reminder(cr, uid, alert, context=context)
if not bFound: # if it's the first alarm for this recurrent event
bFound = True
if bFound and not LastFound: # if the precedent event had an alarm but not this one, we can stop the search for this event
break
else:
in_date_format = datetime.strptime(curEvent.date, '%Y-%m-%d %H:%M:%S')
LastFound = self.do_check_alarm_for_one_date(cr, uid, in_date_format, curEvent, max_delta, cron_interval, notif=False, context=context)
if LastFound:
for alert in LastFound:
self.do_mail_reminder(cr, uid, alert, context=context)
def get_next_notif(self, cr, uid, context=None):
ajax_check_every_seconds = 300
partner = self.pool.get('res.users').browse(cr, uid, uid, context=context).partner_id
all_notif = []
if not partner:
return []
all_events = self.get_next_potential_limit_alarm(cr, uid, ajax_check_every_seconds, partner_id=partner.id, mail=False, context=context)
for event in all_events: # .values()
max_delta = all_events[event]['max_duration']
curEvent = self.pool.get('calendar.event').browse(cr, uid, event, context=context)
if curEvent.recurrency:
bFound = False
LastFound = False
for one_date in self.pool.get("calendar.event").get_recurrent_date_by_event(cr, uid, curEvent, context=context):
in_date_format = one_date.replace(tzinfo=None)
LastFound = self.do_check_alarm_for_one_date(cr, uid, in_date_format, curEvent, max_delta, ajax_check_every_seconds, after=partner.calendar_last_notif_ack, mail=False, context=context)
if LastFound:
for alert in LastFound:
all_notif.append(self.do_notif_reminder(cr, uid, alert, context=context))
if not bFound: # if it's the first alarm for this recurrent event
bFound = True
if bFound and not LastFound: # if the precedent event had alarm but not this one, we can stop the search fot this event
break
else:
in_date_format = datetime.strptime(curEvent.date, '%Y-%m-%d %H:%M:%S')
LastFound = self.do_check_alarm_for_one_date(cr, uid, in_date_format, curEvent, max_delta, ajax_check_every_seconds, partner.calendar_last_notif_ack, mail=False, context=context)
if LastFound:
for alert in LastFound:
all_notif.append(self.do_notif_reminder(cr, uid, alert, context=context))
return all_notif
def do_mail_reminder(self, cr, uid, alert, context=None):
if context is None:
context = {}
res = False
event = self.pool['calendar.event'].browse(cr, uid, alert['event_id'], context=context)
alarm = self.pool['calendar.alarm'].browse(cr, uid, alert['alarm_id'], context=context)
if alarm.type == 'email':
res = self.pool['calendar.attendee']._send_mail_to_attendees(cr, uid, [att.id for att in event.attendee_ids], template_xmlid='calendar_template_meeting_reminder', context=context)
return res
def do_notif_reminder(self, cr, uid, alert, context=None):
alarm = self.pool['calendar.alarm'].browse(cr, uid, alert['alarm_id'], context=context)
event = self.pool['calendar.event'].browse(cr, uid, alert['event_id'], context=context)
if alarm.type == 'notification':
message = event.display_time
delta = alert['notify_at'] - datetime.now()
delta = delta.seconds + delta.days * 3600 * 24
return {
'event_id': event.id,
'title': event.name,
'message': message,
'timer': delta,
'notify_at': alert['notify_at'].strftime("%Y-%m-%d %H:%M:%S"),
}
class calendar_alarm(osv.Model):
_name = 'calendar.alarm'
_description = 'Event alarm'
def _get_duration(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for alarm in self.browse(cr, uid, ids, context=context):
if alarm.interval == "minutes":
res[alarm.id] = alarm.duration
elif alarm.interval == "hours":
res[alarm.id] = alarm.duration * 60
elif alarm.interval == "days":
res[alarm.id] = alarm.duration * 60 * 24
else:
res[alarm.id] = 0
return res
_columns = {
'name': fields.char('Name', required=True), # fields function
'type': fields.selection([('notification', 'Notification'), ('email', 'Email')], 'Type', required=True),
'duration': fields.integer('Amount', required=True),
'interval': fields.selection([('minutes', 'Minutes'), ('hours', 'Hours'), ('days', 'Days')], 'Unit', required=True),
'duration_minutes': fields.function(_get_duration, type='integer', string='duration_minutes', store=True),
}
_defaults = {
'type': 'notification',
'duration': 1,
'interval': 'hours',
}
class ir_values(osv.Model):
_inherit = 'ir.values'
def set(self, cr, uid, key, key2, name, models, value, replace=True, isobject=False, meta=False, preserve_user=False, company=False):
new_model = []
for data in models:
if type(data) in (list, tuple):
new_model.append((data[0], calendar_id2real_id(data[1])))
else:
new_model.append(data)
return super(ir_values, self).set(cr, uid, key, key2, name, new_model,
value, replace, isobject, meta, preserve_user, company)
def get(self, cr, uid, key, key2, models, meta=False, context=None, res_id_req=False, without_user=True, key2_req=True):
if context is None:
context = {}
new_model = []
for data in models:
if type(data) in (list, tuple):
new_model.append((data[0], calendar_id2real_id(data[1])))
else:
new_model.append(data)
return super(ir_values, self).get(cr, uid, key, key2, new_model,
meta, context, res_id_req, without_user, key2_req)
class ir_model(osv.Model):
_inherit = 'ir.model'
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
new_ids = isinstance(ids, (str, int, long)) and [ids] or ids
if context is None:
context = {}
data = super(ir_model, self).read(cr, uid, new_ids, fields=fields, context=context, load=load)
if data:
for val in data:
val['id'] = calendar_id2real_id(val['id'])
return isinstance(ids, (str, int, long)) and data[0] or data
original_exp_report = openerp.service.report.exp_report
def exp_report(db, uid, object, ids, data=None, context=None):
"""
Export Report
"""
if object == 'printscreen.list':
original_exp_report(db, uid, object, ids, data, context)
new_ids = []
for id in ids:
new_ids.append(calendar_id2real_id(id))
if data.get('id', False):
data['id'] = calendar_id2real_id(data['id'])
return original_exp_report(db, uid, object, new_ids, data, context)
openerp.service.report.exp_report = exp_report
class calendar_event_type(osv.Model):
_name = 'calendar.event.type'
_description = 'Meeting Type'
_columns = {
'name': fields.char('Name', required=True, translate=True),
}
class calendar_event(osv.Model):
""" Model for Calendar Event """
_name = 'calendar.event'
_description = "Meeting"
_order = "id desc"
_inherit = ["mail.thread", "ir.needaction_mixin"]
def do_run_scheduler(self, cr, uid, id, context=None):
self.pool['calendar.alarm_manager'].do_run_scheduler(cr, uid, context=context)
def get_recurrent_date_by_event(self, cr, uid, event, context=None):
"""Get recurrent dates based on Rule string and all event where recurrent_id is child
"""
def todate(date):
val = parser.parse(''.join((re.compile('\d')).findall(date)))
## Dates are localized to saved timezone if any, else current timezone.
if not val.tzinfo:
val = pytz.UTC.localize(val)
return val.astimezone(timezone)
timezone = pytz.timezone(event.vtimezone or context.get('tz') or 'UTC')
startdate = pytz.UTC.localize(datetime.strptime(event.date, "%Y-%m-%d %H:%M:%S")) # Add "+hh:mm" timezone
if not startdate:
startdate = datetime.now()
## Convert the start date to saved timezone (or context tz) as it'll
## define the correct hour/day asked by the user to repeat for recurrence.
startdate = startdate.astimezone(timezone) # transform "+hh:mm" timezone
rset1 = rrule.rrulestr(str(event.rrule), dtstart=startdate, forceset=True)
ids_depending = self.search(cr, uid, [('recurrent_id', '=', event.id), '|', ('active', '=', False), ('active', '=', True)], context=context)
all_events = self.browse(cr, uid, ids_depending, context=context)
for ev in all_events:
rset1._exdate.append(todate(ev.recurrent_id_date))
return [d.astimezone(pytz.UTC) for d in rset1]
def _get_recurrency_end_date(self, data, context=None):
if not data.get('recurrency'):
return False
end_type = data.get('end_type')
end_date = data.get('end_date')
if end_type == 'count' and all(data.get(key) for key in ['count', 'rrule_type', 'date_deadline']):
count = data['count'] + 1
delay, mult = {
'daily': ('days', 1),
'weekly': ('days', 7),
'monthly': ('months', 1),
'yearly': ('years', 1),
}[data['rrule_type']]
deadline = datetime.strptime(data['date_deadline'], tools.DEFAULT_SERVER_DATETIME_FORMAT)
return deadline + relativedelta(**{delay: count * mult})
return end_date
def _find_my_attendee(self, cr, uid, meeting_ids, context=None):
"""
Return the first attendee where the user connected has been invited from all the meeting_ids in parameters
"""
user = self.pool['res.users'].browse(cr, uid, uid, context=context)
for meeting_id in meeting_ids:
for attendee in self.browse(cr, uid, meeting_id, context).attendee_ids:
if user.partner_id.id == attendee.partner_id.id:
return attendee
return False
def _get_display_time(self, cr, uid, meeting_id, context=None):
"""
Return date and time (from to from) based on duration with timezone in string :
eg.
1) if user add duration for 2 hours, return : August-23-2013 at (04-30 To 06-30) (Europe/Brussels)
2) if event all day ,return : AllDay, July-31-2013
"""
if context is None:
context = {}
tz = context.get('tz', False)
if not tz: # tz can have a value False, so dont do it in the default value of get !
tz = pytz.timezone('UTC')
meeting = self.browse(cr, uid, meeting_id, context=context)
date = fields.datetime.context_timestamp(cr, uid, datetime.strptime(meeting.date, tools.DEFAULT_SERVER_DATETIME_FORMAT), context=context)
date_deadline = fields.datetime.context_timestamp(cr, uid, datetime.strptime(meeting.date_deadline, tools.DEFAULT_SERVER_DATETIME_FORMAT), context=context)
event_date = date.strftime('%B-%d-%Y')
display_time = date.strftime('%H-%M')
if meeting.allday:
time = _("AllDay , %s") % (event_date)
elif meeting.duration < 24:
duration = date + timedelta(hours=meeting.duration)
time = ("%s at (%s To %s) (%s)") % (event_date, display_time, duration.strftime('%H-%M'), tz)
else:
time = ("%s at %s To\n %s at %s (%s)") % (event_date, display_time, date_deadline.strftime('%B-%d-%Y'), date_deadline.strftime('%H-%M'), tz)
return time
def _compute(self, cr, uid, ids, fields, arg, context=None):
res = {}
for meeting_id in ids:
res[meeting_id] = {}
attendee = self._find_my_attendee(cr, uid, [meeting_id], context)
for field in fields:
if field == 'is_attendee':
res[meeting_id][field] = True if attendee else False
elif field == 'attendee_status':
res[meeting_id][field] = attendee.state if attendee else 'needsAction'
elif field == 'display_time':
res[meeting_id][field] = self._get_display_time(cr, uid, meeting_id, context=context)
return res
def _get_rulestring(self, cr, uid, ids, name, arg, context=None):
"""
Gets Recurrence rule string according to value type RECUR of iCalendar from the values given.
@return: dictionary of rrule value.
"""
result = {}
if not isinstance(ids, list):
ids = [ids]
for id in ids:
#read these fields as SUPERUSER because if the record is private a normal search could return False and raise an error
data = self.browse(cr, SUPERUSER_ID, id, context=context)
if data.interval and data.interval < 0:
raise osv.except_osv(_('Warning!'), _('Interval cannot be negative.'))
if data.count and data.count <= 0:
raise osv.except_osv(_('Warning!'), _('Count cannot be negative or 0.'))
data = self.read(cr, uid, id, ['id', 'byday', 'recurrency', 'month_list', 'end_date', 'rrule_type', 'month_by', 'interval', 'count', 'end_type', 'mo', 'tu', 'we', 'th', 'fr', 'sa', 'su', 'day', 'week_list'], context=context)
event = data['id']
if data['recurrency']:
result[event] = self.compute_rule_string(data)
else:
result[event] = ""
return result
# retro compatibility function
def _rrule_write(self, cr, uid, ids, field_name, field_value, args, context=None):
return self._set_rulestring(self, cr, uid, ids, field_name, field_value, args, context=context)
def _set_rulestring(self, cr, uid, ids, field_name, field_value, args, context=None):
if not isinstance(ids, list):
ids = [ids]
data = self._get_empty_rrule_data()
if field_value:
data['recurrency'] = True
for event in self.browse(cr, uid, ids, context=context):
rdate = event.date
update_data = self._parse_rrule(field_value, dict(data), rdate)
data.update(update_data)
self.write(cr, uid, ids, data, context=context)
return True
def _tz_get(self, cr, uid, context=None):
return [(x.lower(), x) for x in pytz.all_timezones]
_track = {
'location': {
'calendar.subtype_invitation': lambda self, cr, uid, obj, ctx=None: True,
},
'date': {
'calendar.subtype_invitation': lambda self, cr, uid, obj, ctx=None: True,
},
}
_columns = {
'id': fields.integer('ID', readonly=True),
'state': fields.selection([('draft', 'Unconfirmed'), ('open', 'Confirmed')], string='Status', readonly=True, track_visibility='onchange'),
'name': fields.char('Meeting Subject', required=True, states={'done': [('readonly', True)]}),
'is_attendee': fields.function(_compute, string='Attendee', type="boolean", multi='attendee'),
'attendee_status': fields.function(_compute, string='Attendee Status', type="selection", selection=calendar_attendee.STATE_SELECTION, multi='attendee'),
'display_time': fields.function(_compute, string='Event Time', type="char", multi='attendee'),
'date': fields.datetime('Date', states={'done': [('readonly', True)]}, required=True, track_visibility='onchange'),
'date_deadline': fields.datetime('End Date', states={'done': [('readonly', True)]}, required=True,),
'duration': fields.float('Duration', states={'done': [('readonly', True)]}),
'description': fields.text('Description', states={'done': [('readonly', True)]}),
'class': fields.selection([('public', 'Public'), ('private', 'Private'), ('confidential', 'Public for Employees')], 'Privacy', states={'done': [('readonly', True)]}),
'location': fields.char('Location', help="Location of Event", track_visibility='onchange', states={'done': [('readonly', True)]}),
'show_as': fields.selection([('free', 'Free'), ('busy', 'Busy')], 'Show Time as', states={'done': [('readonly', True)]}),
'rrule': fields.function(_get_rulestring, type='char', fnct_inv=_set_rulestring, store=True, string='Recurrent Rule'),
'rrule_type': fields.selection([('daily', 'Day(s)'), ('weekly', 'Week(s)'), ('monthly', 'Month(s)'), ('yearly', 'Year(s)')], 'Recurrency', states={'done': [('readonly', True)]}, help="Let the event automatically repeat at that interval"),
'recurrency': fields.boolean('Recurrent', help="Recurrent Meeting"),
'recurrent_id': fields.integer('Recurrent ID'),
'recurrent_id_date': fields.datetime('Recurrent ID date'),
'vtimezone': fields.selection(_tz_get, string='Timezone'),
'end_type': fields.selection([('count', 'Number of repetitions'), ('end_date', 'End date')], 'Recurrence Termination'),
'interval': fields.integer('Repeat Every', help="Repeat every (Days/Week/Month/Year)"),
'count': fields.integer('Repeat', help="Repeat x times"),
'mo': fields.boolean('Mon'),
'tu': fields.boolean('Tue'),
'we': fields.boolean('Wed'),
'th': fields.boolean('Thu'),
'fr': fields.boolean('Fri'),
'sa': fields.boolean('Sat'),
'su': fields.boolean('Sun'),
'month_by': fields.selection([('date', 'Date of month'), ('day', 'Day of month')], 'Option', oldname='select1'),
'day': fields.integer('Date of month'),
'week_list': fields.selection([('MO', 'Monday'), ('TU', 'Tuesday'), ('WE', 'Wednesday'), ('TH', 'Thursday'), ('FR', 'Friday'), ('SA', 'Saturday'), ('SU', 'Sunday')], 'Weekday'),
'byday': fields.selection([('1', 'First'), ('2', 'Second'), ('3', 'Third'), ('4', 'Fourth'), ('5', 'Fifth'), ('-1', 'Last')], 'By day'),
'end_date': fields.date('Repeat Until'),
'allday': fields.boolean('All Day', states={'done': [('readonly', True)]}),
'user_id': fields.many2one('res.users', 'Responsible', states={'done': [('readonly', True)]}),
'color_partner_id': fields.related('user_id', 'partner_id', 'id', type="integer", string="colorize", store=False), # Color of creator
'active': fields.boolean('Active', help="If the active field is set to true, it will allow you to hide the event alarm information without removing it."),
'categ_ids': fields.many2many('calendar.event.type', 'meeting_category_rel', 'event_id', 'type_id', 'Tags'),
'attendee_ids': fields.one2many('calendar.attendee', 'event_id', 'Attendees', ondelete='cascade'),
'partner_ids': fields.many2many('res.partner', string='Attendees', states={'done': [('readonly', True)]}),
'alarm_ids': fields.many2many('calendar.alarm', string='Reminders', ondelete="restrict"),
}
_defaults = {
'end_type': 'count',
'count': 1,
'rrule_type': False,
'state': 'draft',
'class': 'public',
'show_as': 'busy',
'month_by': 'date',
'interval': 1,
'active': 1,
'user_id': lambda self, cr, uid, ctx: uid,
'partner_ids': lambda self, cr, uid, ctx: [self.pool['res.users'].browse(cr, uid, [uid], context=ctx)[0].partner_id.id]
}
def _check_closing_date(self, cr, uid, ids, context=None):
for event in self.browse(cr, uid, ids, context=context):
if event.date_deadline < event.date:
return False
return True
_constraints = [
(_check_closing_date, 'Error ! End date cannot be set before start date.', ['date_deadline']),
]
def onchange_dates(self, cr, uid, ids, start_date, duration=False, end_date=False, allday=False, context=None):
"""Returns duration and/or end date based on values passed
@param ids: List of calendar event's IDs.
"""
if context is None:
context = {}
value = {}
if not start_date:
return value
if not end_date and not duration:
duration = 1.00
value['duration'] = duration
if allday: # For all day event
start = datetime.strptime(start_date, "%Y-%m-%d %H:%M:%S")
user = self.pool['res.users'].browse(cr, uid, uid)
tz = pytz.timezone(user.tz) if user.tz else pytz.utc
start = pytz.utc.localize(start).astimezone(tz) # convert start in user's timezone
start = start.astimezone(pytz.utc) # convert start back to utc
value['duration'] = 24.0
value['date'] = datetime.strftime(start, "%Y-%m-%d %H:%M:%S")
else:
start = datetime.strptime(start_date, "%Y-%m-%d %H:%M:%S")
if end_date and not duration:
end = datetime.strptime(end_date, "%Y-%m-%d %H:%M:%S")
diff = end - start
duration = float(diff.days) * 24 + (float(diff.seconds) / 3600)
value['duration'] = round(duration, 2)
elif not end_date:
end = start + timedelta(hours=duration)
value['date_deadline'] = end.strftime("%Y-%m-%d %H:%M:%S")
elif end_date and duration and not allday:
# we have both, keep them synchronized:
# set duration based on end_date (arbitrary decision: this avoid
# getting dates like 06:31:48 instead of 06:32:00)
end = datetime.strptime(end_date, "%Y-%m-%d %H:%M:%S")
diff = end - start
duration = float(diff.days) * 24 + (float(diff.seconds) / 3600)
value['duration'] = round(duration, 2)
return {'value': value}
def new_invitation_token(self, cr, uid, record, partner_id):
return uuid.uuid4().hex
def create_attendees(self, cr, uid, ids, context):
user_obj = self.pool['res.users']
current_user = user_obj.browse(cr, uid, uid, context=context)
res = {}
for event in self.browse(cr, uid, ids, context):
attendees = {}
for att in event.attendee_ids:
attendees[att.partner_id.id] = True
new_attendees = []
new_att_partner_ids = []
for partner in event.partner_ids:
if partner.id in attendees:
continue
access_token = self.new_invitation_token(cr, uid, event, partner.id)
values = {
'partner_id': partner.id,
'event_id': event.id,
'access_token': access_token,
'email': partner.email,
}
if partner.id == current_user.partner_id.id:
values['state'] = 'accepted'
att_id = self.pool['calendar.attendee'].create(cr, uid, values, context=context)
new_attendees.append(att_id)
new_att_partner_ids.append(partner.id)
if not current_user.email or current_user.email != partner.email:
mail_from = current_user.email or tools.config.get('email_from', False)
if self.pool['calendar.attendee']._send_mail_to_attendees(cr, uid, att_id, email_from=mail_from, context=context):
self.message_post(cr, uid, event.id, body=_("An invitation email has been sent to attendee %s") % (partner.name,), subtype="calendar.subtype_invitation", context=context)
if new_attendees:
self.write(cr, uid, [event.id], {'attendee_ids': [(4, att) for att in new_attendees]}, context=context)
if new_att_partner_ids:
self.message_subscribe(cr, uid, [event.id], new_att_partner_ids, context=context)
# We remove old attendees who are not in partner_ids now.
all_partner_ids = [part.id for part in event.partner_ids]
all_part_attendee_ids = [att.partner_id.id for att in event.attendee_ids]
all_attendee_ids = [att.id for att in event.attendee_ids]
partner_ids_to_remove = map(lambda x: x, set(all_part_attendee_ids + new_att_partner_ids) - set(all_partner_ids))
attendee_ids_to_remove = []
if partner_ids_to_remove:
attendee_ids_to_remove = self.pool["calendar.attendee"].search(cr, uid, [('partner_id.id', 'in', partner_ids_to_remove), ('event_id.id', '=', event.id)], context=context)
if attendee_ids_to_remove:
self.pool['calendar.attendee'].unlink(cr, uid, attendee_ids_to_remove, context)
res[event.id] = {
'new_attendee_ids': new_attendees,
'old_attendee_ids': all_attendee_ids,
'removed_attendee_ids': attendee_ids_to_remove
}
return res
def get_search_fields(self, browse_event, order_fields, r_date=None):
sort_fields = {}
for ord in order_fields:
if ord == 'id' and r_date:
sort_fields[ord] = '%s-%s' % (browse_event[ord], r_date.strftime("%Y%m%d%H%M%S"))
else:
sort_fields[ord] = browse_event[ord]
'If we sort on FK, we obtain a browse_record, so we need to sort on name_get'
if type(browse_event[ord]) is openerp.osv.orm.browse_record:
name_get = browse_event[ord].name_get()
if len(name_get) and len(name_get[0]) >= 2:
sort_fields[ord] = name_get[0][1]
return sort_fields
def get_recurrent_ids(self, cr, uid, event_id, domain, order=None, context=None):
"""Gives virtual event ids for recurring events
This method gives ids of dates that comes between start date and end date of calendar views
@param order: The fields (comma separated, format "FIELD {DESC|ASC}") on which the events should be sorted
"""
if not context:
context = {}
if isinstance(event_id, (str, int, long)):
ids_to_browse = [event_id] # keep select for return
else:
ids_to_browse = event_id
if order:
order_fields = [field.split()[0] for field in order.split(',')]
else:
# fallback on self._order defined on the model
order_fields = [field.split()[0] for field in self._order.split(',')]
if 'id' not in order_fields:
order_fields.append('id')
result_data = []
result = []
for ev in self.browse(cr, uid, ids_to_browse, context=context):
if not ev.recurrency or not ev.rrule:
result.append(ev.id)
result_data.append(self.get_search_fields(ev, order_fields))
continue
rdates = self.get_recurrent_date_by_event(cr, uid, ev, context=context)
for r_date in rdates:
# fix domain evaluation
# step 1: check date and replace expression by True or False, replace other expressions by True
# step 2: evaluation of & and |
# check if there are one False
pile = []
ok = True
for arg in domain:
if str(arg[0]) in (str('date'), str('date_deadline'), str('end_date')):
if (arg[1] == '='):
ok = r_date.strftime('%Y-%m-%d') == arg[2]
if (arg[1] == '>'):
ok = r_date.strftime('%Y-%m-%d') > arg[2]
if (arg[1] == '<'):
ok = r_date.strftime('%Y-%m-%d') < arg[2]
if (arg[1] == '>='):
ok = r_date.strftime('%Y-%m-%d') >= arg[2]
if (arg[1] == '<='):
ok = r_date.strftime('%Y-%m-%d') <= arg[2]
pile.append(ok)
elif str(arg) == str('&') or str(arg) == str('|'):
pile.append(arg)
else:
pile.append(True)
pile.reverse()
new_pile = []
for item in pile:
if not isinstance(item, basestring):
res = item
elif str(item) == str('&'):
first = new_pile.pop()
second = new_pile.pop()
res = first and second
elif str(item) == str('|'):
first = new_pile.pop()
second = new_pile.pop()
res = first or second
new_pile.append(res)
if [True for item in new_pile if not item]:
continue
result_data.append(self.get_search_fields(ev, order_fields, r_date=r_date))
if order_fields:
def comparer(left, right):
for fn, mult in comparers:
result = cmp(fn(left), fn(right))
if result:
return mult * result
return 0
sort_params = [key.split()[0] if key[-4:].lower() != 'desc' else '-%s' % key.split()[0] for key in (order or self._order).split(',')]
comparers = [((itemgetter(col[1:]), -1) if col[0] == '-' else (itemgetter(col), 1)) for col in sort_params]
ids = [r['id'] for r in sorted(result_data, cmp=comparer)]
if isinstance(event_id, (str, int, long)):
return ids and ids[0] or False
else:
return ids
def compute_rule_string(self, data):
"""
Compute rule string according to value type RECUR of iCalendar from the values given.
@param self: the object pointer
@param data: dictionary of freq and interval value
@return: string containing recurring rule (empty if no rule)
"""
def get_week_string(freq, data):
weekdays = ['mo', 'tu', 'we', 'th', 'fr', 'sa', 'su']
if freq == 'weekly':
byday = map(lambda x: x.upper(), filter(lambda x: data.get(x) and x in weekdays, data))
# byday = map(lambda x: x.upper(),[data[day] for day in weekdays if data[day]])
if byday:
return ';BYDAY=' + ','.join(byday)
return ''
def get_month_string(freq, data):
if freq == 'monthly':
if data.get('month_by') == 'date' and (data.get('day') < 1 or data.get('day') > 31):
raise osv.except_osv(_('Error!'), ("Please select a proper day of the month."))
if data.get('month_by') == 'day': # Eg : Second Monday of the month
return ';BYDAY=' + data.get('byday') + data.get('week_list')
elif data.get('month_by') == 'date': # Eg : 16th of the month
return ';BYMONTHDAY=' + str(data.get('day'))
return ''
def get_end_date(data):
if data.get('end_date'):
data['end_date_new'] = ''.join((re.compile('\d')).findall(data.get('end_date'))) + 'T235959Z'
return (data.get('end_type') == 'count' and (';COUNT=' + str(data.get('count'))) or '') +\
((data.get('end_date_new') and data.get('end_type') == 'end_date' and (';UNTIL=' + data.get('end_date_new'))) or '')
freq = data.get('rrule_type', False) # day/week/month/year
res = ''
if freq:
interval_srting = data.get('interval') and (';INTERVAL=' + str(data.get('interval'))) or ''
res = 'FREQ=' + freq.upper() + get_week_string(freq, data) + interval_srting + get_end_date(data) + get_month_string(freq, data)
return res
def _get_empty_rrule_data(self):
return {
'byday': False,
'recurrency': False,
'end_date': False,
'rrule_type': False,
'month_by': False,
'interval': 0,
'count': False,
'end_type': False,
'mo': False,
'tu': False,
'we': False,
'th': False,
'fr': False,
'sa': False,
'su': False,
'day': False,
'week_list': False
}
def _parse_rrule(self, rule, data, date_start):
day_list = ['mo', 'tu', 'we', 'th', 'fr', 'sa', 'su']
rrule_type = ['yearly', 'monthly', 'weekly', 'daily']
r = rrule.rrulestr(rule, dtstart=datetime.strptime(date_start, "%Y-%m-%d %H:%M:%S"))
if r._freq > 0 and r._freq < 4:
data['rrule_type'] = rrule_type[r._freq]
data['count'] = r._count
data['interval'] = r._interval
data['end_date'] = r._until and r._until.strftime("%Y-%m-%d %H:%M:%S")
#repeat weekly
if r._byweekday:
for i in xrange(0, 7):
if i in r._byweekday:
data[day_list[i]] = True
data['rrule_type'] = 'weekly'
#repeat monthly by nweekday ((weekday, weeknumber), )
if r._bynweekday:
data['week_list'] = day_list[r._bynweekday[0][0]].upper()
data['byday'] = str(r._bynweekday[0][1])
data['month_by'] = 'day'
data['rrule_type'] = 'monthly'
if r._bymonthday:
data['day'] = r._bymonthday[0]
data['month_by'] = 'date'
data['rrule_type'] = 'monthly'
#repeat yearly but for openerp it's monthly, take same information as monthly but interval is 12 times
if r._bymonth:
data['interval'] = data['interval'] * 12
#FIXEME handle forever case
#end of recurrence
#in case of repeat for ever that we do not support right now
if not (data.get('count') or data.get('end_date')):
data['count'] = 100
if data.get('count'):
data['end_type'] = 'count'
else:
data['end_type'] = 'end_date'
return data
def message_get_subscription_data(self, cr, uid, ids, user_pid=None, context=None):
res = {}
for virtual_id in ids:
real_id = calendar_id2real_id(virtual_id)
result = super(calendar_event, self).message_get_subscription_data(cr, uid, [real_id], user_pid=None, context=context)
res[virtual_id] = result[real_id]
return res
def onchange_partner_ids(self, cr, uid, ids, value, context=None):
""" The basic purpose of this method is to check that destination partners
effectively have email addresses. Otherwise a warning is thrown.
:param value: value format: [[6, 0, [3, 4]]]
"""
res = {'value': {}}
if not value or not value[0] or not value[0][0] == 6:
return
res.update(self.check_partners_email(cr, uid, value[0][2], context=context))
return res
def onchange_rec_day(self, cr, uid, id, date, mo, tu, we, th, fr, sa, su):
""" set the start date according to the first occurence of rrule"""
rrule_obj = self._get_empty_rrule_data()
rrule_obj.update({
'byday': True,
'rrule_type': 'weekly',
'mo': mo,
'tu': tu,
'we': we,
'th': th,
'fr': fr,
'sa': sa,
'su': su,
'interval': 1
})
str_rrule = self.compute_rule_string(rrule_obj)
first_occurence = list(rrule.rrulestr(str_rrule + ";COUNT=1", dtstart=datetime.strptime(date, "%Y-%m-%d %H:%M:%S"), forceset=True))[0]
return {'value': {'date': first_occurence.strftime("%Y-%m-%d") + ' 00:00:00'}}
def check_partners_email(self, cr, uid, partner_ids, context=None):
""" Verify that selected partner_ids have an email_address defined.
Otherwise throw a warning. """
partner_wo_email_lst = []
for partner in self.pool['res.partner'].browse(cr, uid, partner_ids, context=context):
if not partner.email:
partner_wo_email_lst.append(partner)
if not partner_wo_email_lst:
return {}
warning_msg = _('The following contacts have no email address :')
for partner in partner_wo_email_lst:
warning_msg += '\n- %s' % (partner.name)
return {'warning': {
'title': _('Email addresses not found'),
'message': warning_msg,
}}
# ----------------------------------------
# OpenChatter
# ----------------------------------------
# shows events of the day for this user
def _needaction_domain_get(self, cr, uid, context=None):
return [('end_date', '>=', time.strftime(DEFAULT_SERVER_DATE_FORMAT + ' 23:59:59')), ('date', '>=', time.strftime(DEFAULT_SERVER_DATE_FORMAT + ' 23:59:59')), ('user_id', '=', uid)]
def message_post(self, cr, uid, thread_id, body='', subject=None, type='notification', subtype=None, parent_id=False, attachments=None, context=None, **kwargs):
if isinstance(thread_id, str):
thread_id = get_real_ids(thread_id)
if context.get('default_date'):
del context['default_date']
return super(calendar_event, self).message_post(cr, uid, thread_id, body=body, subject=subject, type=type, subtype=subtype, parent_id=parent_id, attachments=attachments, context=context, **kwargs)
def do_sendmail(self, cr, uid, ids, context=None):
for event in self.browse(cr, uid, ids, context):
current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)
if current_user.email:
if self.pool['calendar.attendee']._send_mail_to_attendees(cr, uid, [att.id for att in event.attendee_ids], email_from=current_user.email, context=context):
self.message_post(cr, uid, event.id, body=_("An invitation email has been sent to attendee(s)"), subtype="calendar.subtype_invitation", context=context)
return
def get_attendee(self, cr, uid, meeting_id, context=None):
# Used for view in controller
invitation = {'meeting': {}, 'attendee': []}
meeting = self.browse(cr, uid, int(meeting_id), context)
invitation['meeting'] = {
'event': meeting.name,
'where': meeting.location,
'when': meeting.display_time
}
for attendee in meeting.attendee_ids:
invitation['attendee'].append({'name': attendee.cn, 'status': attendee.state})
return invitation
def get_interval(self, cr, uid, ids, date, interval, tz=None, context=None):
#Function used only in calendar_event_data.xml for email template
date = datetime.strptime(date.split('.')[0], DEFAULT_SERVER_DATETIME_FORMAT)
if tz:
timezone = pytz.timezone(tz or 'UTC')
date = date.replace(tzinfo=pytz.timezone('UTC')).astimezone(timezone)
if interval == 'day':
res = str(date.day)
elif interval == 'month':
res = date.strftime('%B') + " " + str(date.year)
elif interval == 'dayname':
res = date.strftime('%A')
elif interval == 'time':
res = date.strftime('%I:%M %p')
return res
def search(self, cr, uid, args, offset=0, limit=0, order=None, context=None, count=False):
if context is None:
context = {}
if context.get('mymeetings', False):
partner_id = self.pool['res.users'].browse(cr, uid, uid, context).partner_id.id
args += ['|', ('partner_ids', 'in', [partner_id]), ('user_id', '=', uid)]
new_args = []
for arg in args:
new_arg = arg
if arg[0] in ('date', unicode('date')) and arg[1] == ">=":
if context.get('virtual_id', True):
new_args += ['|', '&', ('recurrency', '=', 1), ('end_date', arg[1], arg[2])]
elif arg[0] == "id":
new_id = get_real_ids(arg[2])
new_arg = (arg[0], arg[1], new_id)
new_args.append(new_arg)
if not context.get('virtual_id', True):
return super(calendar_event, self).search(cr, uid, new_args, offset=offset, limit=limit, order=order, context=context, count=count)
# offset, limit, order and count must be treated separately as we may need to deal with virtual ids
res = super(calendar_event, self).search(cr, uid, new_args, offset=0, limit=0, order=None, context=context, count=False)
res = self.get_recurrent_ids(cr, uid, res, args, order=order, context=context)
if count:
return len(res)
elif limit:
return res[offset: offset + limit]
return res
def copy(self, cr, uid, id, default=None, context=None):
if context is None:
context = {}
default = default or {}
default['attendee_ids'] = False
res = super(calendar_event, self).copy(cr, uid, calendar_id2real_id(id), default, context)
return res
def _detach_one_event(self, cr, uid, id, values=dict(), context=None):
real_event_id = calendar_id2real_id(id)
data = self.read(cr, uid, id, ['date', 'date_deadline', 'rrule', 'duration'])
if data.get('rrule'):
data.update(
values,
recurrent_id=real_event_id,
recurrent_id_date=data.get('date'),
rrule_type=False,
rrule='',
recurrency=False,
end_date=datetime.strptime(values.get('date', False) or data.get('date'), "%Y-%m-%d %H:%M:%S") + timedelta(hours=values.get('duration', False) or data.get('duration'))
)
#do not copy the id
if data.get('id'):
del(data['id'])
new_id = self.copy(cr, uid, real_event_id, default=data, context=context)
return new_id
def open_after_detach_event(self, cr, uid, ids, context=None):
if context is None:
context = {}
new_id = self._detach_one_event(cr, uid, ids[0], context=context)
return {
'type': 'ir.actions.act_window',
'res_model': 'calendar.event',
'view_mode': 'form',
'res_id': new_id,
'target': 'current',
'flags': {'form': {'action_buttons': True, 'options': {'mode': 'edit'}}}
}
def write(self, cr, uid, ids, values, context=None):
def _only_changes_to_apply_on_real_ids(field_names):
''' return True if changes are only to be made on the real ids'''
for field in field_names:
if field in ['date', 'active']:
return True
return False
context = context or {}
if isinstance(ids, (str, int, long)):
if len(str(ids).split('-')) == 1:
ids = [int(ids)]
else:
ids = [ids]
res = False
new_id = False
# Special write of complex IDS
for event_id in ids:
if len(str(event_id).split('-')) == 1:
continue
ids.remove(event_id)
real_event_id = calendar_id2real_id(event_id)
# if we are setting the recurrency flag to False or if we are only changing fields that
# should be only updated on the real ID and not on the virtual (like message_follower_ids):
# then set real ids to be updated.
if not values.get('recurrency', True) or not _only_changes_to_apply_on_real_ids(values.keys()):
ids.append(real_event_id)
continue
else:
data = self.read(cr, uid, event_id, ['date', 'date_deadline', 'rrule', 'duration'])
if data.get('rrule'):
new_id = self._detach_one_event(cr, uid, event_id, values, context=None)
res = super(calendar_event, self).write(cr, uid, ids, values, context=context)
# set end_date for calendar searching
if values.get('recurrency', True) and values.get('end_type', 'count') in ('count', unicode('count')) and \
(values.get('rrule_type') or values.get('count') or values.get('date') or values.get('date_deadline')):
for data in self.read(cr, uid, ids, ['end_date', 'date_deadline', 'recurrency', 'rrule_type', 'count', 'end_type'], context=context):
end_date = self._get_recurrency_end_date(data, context=context)
super(calendar_event, self).write(cr, uid, [data['id']], {'end_date': end_date}, context=context)
attendees_create = False
if values.get('partner_ids', False):
attendees_create = self.create_attendees(cr, uid, ids, context)
if values.get('date', False) and values.get('active', True):
the_id = new_id or (ids and int(ids[0]))
if attendees_create:
attendees_create = attendees_create[the_id]
mail_to_ids = list(set(attendees_create['old_attendee_ids']) - set(attendees_create['removed_attendee_ids']))
else:
mail_to_ids = [att.id for att in self.browse(cr, uid, the_id, context=context).attendee_ids]
if mail_to_ids:
current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)
if self.pool['calendar.attendee']._send_mail_to_attendees(cr, uid, mail_to_ids, template_xmlid='calendar_template_meeting_changedate', email_from=current_user.email, context=context):
self.message_post(cr, uid, the_id, body=_("A email has been send to specify that the date has been changed !"), subtype="calendar.subtype_invitation", context=context)
return res or True and False
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if not 'user_id' in vals: # Else bug with quick_create when we are filter on an other user
vals['user_id'] = uid
res = super(calendar_event, self).create(cr, uid, vals, context=context)
data = self.read(cr, uid, [res], ['end_date', 'date_deadline', 'recurrency', 'rrule_type', 'count', 'end_type'], context=context)[0]
end_date = self._get_recurrency_end_date(data, context=context)
self.write(cr, uid, [res], {'end_date': end_date}, context=context)
self.create_attendees(cr, uid, [res], context=context)
return res
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False):
if not context:
context = {}
if 'date' in groupby:
raise osv.except_osv(_('Warning!'), _('Group by date is not supported, use the calendar view instead.'))
virtual_id = context.get('virtual_id', True)
context.update({'virtual_id': False})
res = super(calendar_event, self).read_group(cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby)
for result in res:
#remove the count, since the value is not consistent with the result of the search when expand the group
for groupname in groupby:
if result.get(groupname + "_count"):
del result[groupname + "_count"]
result.get('__context', {}).update({'virtual_id': virtual_id})
return res
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
if context is None:
context = {}
fields2 = fields and fields[:] or None
EXTRAFIELDS = ('class', 'user_id', 'duration', 'date', 'rrule', 'vtimezone')
for f in EXTRAFIELDS:
if fields and (f not in fields):
fields2.append(f)
if isinstance(ids, (str, int, long)):
select = [ids]
else:
select = ids
select = map(lambda x: (x, calendar_id2real_id(x)), select)
result = []
real_data = super(calendar_event, self).read(cr, uid, [real_id for calendar_id, real_id in select], fields=fields2, context=context, load=load)
real_data = dict(zip([x['id'] for x in real_data], real_data))
for calendar_id, real_id in select:
res = real_data[real_id].copy()
ls = calendar_id2real_id(calendar_id, with_date=res and res.get('duration', 0) or 0)
if not isinstance(ls, (str, int, long)) and len(ls) >= 2:
res['date'] = ls[1]
res['date_deadline'] = ls[2]
res['id'] = calendar_id
result.append(res)
for r in result:
if r['user_id']:
user_id = type(r['user_id']) in (tuple, list) and r['user_id'][0] or r['user_id']
if user_id == uid:
continue
if r['class'] == 'private':
for f in r.keys():
if f not in ('id', 'date', 'date_deadline', 'duration', 'user_id', 'state', 'interval', 'count', 'recurrent_id_date'):
if isinstance(r[f], list):
r[f] = []
else:
r[f] = False
if f == 'name':
r[f] = _('Busy')
for r in result:
for k in EXTRAFIELDS:
if (k in r) and (fields and (k not in fields)):
del r[k]
if isinstance(ids, (str, int, long)):
return result and result[0] or False
return result
def unlink(self, cr, uid, ids, unlink_level=0, context=None):
if not isinstance(ids, list):
ids = [ids]
res = False
ids_to_exclure = []
ids_to_unlink = []
# One time moved to google_Calendar, we can specify, if not in google, and not rec or get_inst = 0, we delete it
for event_id in ids:
if unlink_level == 1 and len(str(event_id).split('-')) == 1: # if ID REAL
if self.browse(cr, uid, event_id).recurrent_id:
ids_to_exclure.append(event_id)
else:
ids_to_unlink.append(event_id)
else:
ids_to_exclure.append(event_id)
if ids_to_unlink:
res = super(calendar_event, self).unlink(cr, uid, ids_to_unlink, context=context)
if ids_to_exclure:
for id_to_exclure in ids_to_exclure:
res = self.write(cr, uid, id_to_exclure, {'active': False}, context=context)
return res
class mail_message(osv.Model):
_inherit = "mail.message"
def search(self, cr, uid, args, offset=0, limit=0, order=None, context=None, count=False):
'''
convert the search on real ids in the case it was asked on virtual ids, then call super()
'''
for index in range(len(args)):
if args[index][0] == "res_id" and isinstance(args[index][2], str):
args[index][2] = get_real_ids(args[index][2])
return super(mail_message, self).search(cr, uid, args, offset=offset, limit=limit, order=order, context=context, count=count)
def _find_allowed_model_wise(self, cr, uid, doc_model, doc_dict, context=None):
if context is None:
context = {}
if doc_model == 'calendar.event':
order = context.get('order', self._order)
for virtual_id in self.pool[doc_model].get_recurrent_ids(cr, uid, doc_dict.keys(), [], order=order, context=context):
doc_dict.setdefault(virtual_id, doc_dict[get_real_ids(virtual_id)])
return super(mail_message, self)._find_allowed_model_wise(cr, uid, doc_model, doc_dict, context=context)
class ir_attachment(osv.Model):
_inherit = "ir.attachment"
def search(self, cr, uid, args, offset=0, limit=0, order=None, context=None, count=False):
'''
convert the search on real ids in the case it was asked on virtual ids, then call super()
'''
for index in range(len(args)):
if args[index][0] == "res_id" and isinstance(args[index][2], str):
args[index][2] = get_real_ids(args[index][2])
return super(ir_attachment, self).search(cr, uid, args, offset=offset, limit=limit, order=order, context=context, count=count)
def write(self, cr, uid, ids, vals, context=None):
'''
when posting an attachment (new or not), convert the virtual ids in real ids.
'''
if isinstance(vals.get('res_id'), str):
vals['res_id'] = get_real_ids(vals.get('res_id'))
return super(ir_attachment, self).write(cr, uid, ids, vals, context=context)
class ir_http(osv.AbstractModel):
_inherit = 'ir.http'
def _auth_method_calendar(self):
token = request.params['token']
db = request.params['db']
registry = openerp.modules.registry.RegistryManager.get(db)
attendee_pool = registry.get('calendar.attendee')
error_message = False
with registry.cursor() as cr:
attendee_id = attendee_pool.search(cr, openerp.SUPERUSER_ID, [('access_token', '=', token)])
if not attendee_id:
error_message = """Invalid Invitation Token."""
elif request.session.uid and request.session.login != 'anonymous':
# if valid session but user is not match
attendee = attendee_pool.browse(cr, openerp.SUPERUSER_ID, attendee_id[0])
user = registry.get('res.users').browse(cr, openerp.SUPERUSER_ID, request.session.uid)
if attendee.partner_id.id != user.partner_id.id:
error_message = """Invitation cannot be forwarded via email. This event/meeting belongs to %s and you are logged in as %s. Please ask organizer to add you.""" % (attendee.email, user.email)
if error_message:
raise BadRequest(error_message)
return True
class invite_wizard(osv.osv_memory):
_inherit = 'mail.wizard.invite'
def default_get(self, cr, uid, fields, context=None):
'''
in case someone clicked on 'invite others' wizard in the followers widget, transform virtual ids in real ids
'''
result = super(invite_wizard, self).default_get(cr, uid, fields, context=context)
if 'res_id' in result:
result['res_id'] = get_real_ids(result['res_id'])
return result
[FIX] Fix the call of the cron due to a function which has been renamed.
[IMP] Add param calendar.block_mail in ir.config.parameters to allow to bypass the sent of mails to attendees.
bzr revid: jke@openerp.com-20140414202936-kh1izog183nwenb7
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011-2014 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import pytz
import re
import time
import openerp
import openerp.service.report
import uuid
from werkzeug.exceptions import BadRequest
from datetime import datetime, timedelta
from dateutil import parser
from dateutil import rrule
from dateutil.relativedelta import relativedelta
from openerp import tools, SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools.translate import _
from openerp.http import request
from operator import itemgetter
import logging
_logger = logging.getLogger(__name__)
def calendar_id2real_id(calendar_id=None, with_date=False):
"""
Convert a "virtual/recurring event id" (type string) into a real event id (type int).
E.g. virtual/recurring event id is 4-20091201100000, so it will return 4.
@param calendar_id: id of calendar
@param with_date: if a value is passed to this param it will return dates based on value of withdate + calendar_id
@return: real event id
"""
if calendar_id and isinstance(calendar_id, (str, unicode)):
res = calendar_id.split('-')
if len(res) >= 2:
real_id = res[0]
if with_date:
real_date = time.strftime("%Y-%m-%d %H:%M:%S", time.strptime(res[1], "%Y%m%d%H%M%S"))
start = datetime.strptime(real_date, "%Y-%m-%d %H:%M:%S")
end = start + timedelta(hours=with_date)
return (int(real_id), real_date, end.strftime("%Y-%m-%d %H:%M:%S"))
return int(real_id)
return calendar_id and int(calendar_id) or calendar_id
def get_real_ids(ids):
if isinstance(ids, (str, int, long)):
return calendar_id2real_id(ids)
if isinstance(ids, (list, tuple)):
return [calendar_id2real_id(id) for id in ids]
class calendar_attendee(osv.Model):
"""
Calendar Attendee Information
"""
_name = 'calendar.attendee'
_rec_name = 'cn'
_description = 'Attendee information'
def _compute_data(self, cr, uid, ids, name, arg, context=None):
"""
Compute data on function fields for attendee values.
@param ids: list of calendar attendee's IDs
@param name: name of field
@return: dictionary of form {id: {'field Name': value'}}
"""
name = name[0]
result = {}
for attdata in self.browse(cr, uid, ids, context=context):
id = attdata.id
result[id] = {}
if name == 'cn':
if attdata.partner_id:
result[id][name] = attdata.partner_id.name or False
else:
result[id][name] = attdata.email or ''
if name == 'event_date':
result[id][name] = attdata.event_id.date
if name == 'event_end_date':
result[id][name] = attdata.event_id.date_deadline
return result
STATE_SELECTION = [
('needsAction', 'Needs Action'),
('tentative', 'Uncertain'),
('declined', 'Declined'),
('accepted', 'Accepted'),
]
_columns = {
'state': fields.selection(STATE_SELECTION, 'Status', readonly=True, help="Status of the attendee's participation"),
'cn': fields.function(_compute_data, string='Common name', type="char", multi='cn', store=True),
'partner_id': fields.many2one('res.partner', 'Contact', readonly="True"),
'email': fields.char('Email', help="Email of Invited Person"),
'event_date': fields.function(_compute_data, string='Event Date', type="datetime", multi='event_date'),
'event_end_date': fields.function(_compute_data, string='Event End Date', type="datetime", multi='event_end_date'),
'availability': fields.selection([('free', 'Free'), ('busy', 'Busy')], 'Free/Busy', readonly="True"),
'access_token': fields.char('Invitation Token'),
'event_id': fields.many2one('calendar.event', 'Meeting linked'),
}
_defaults = {
'state': 'needsAction',
}
def copy(self, cr, uid, id, default=None, context=None):
raise osv.except_osv(_('Warning!'), _('You cannot duplicate a calendar attendee.'))
def onchange_partner_id(self, cr, uid, ids, partner_id, context=None):
"""
Make entry on email and availability on change of partner_id field.
@param partner_id: changed value of partner id
"""
if not partner_id:
return {'value': {'email': ''}}
partner = self.pool['res.partner'].browse(cr, uid, partner_id, context=context)
return {'value': {'email': partner.email}}
def get_ics_file(self, cr, uid, event_obj, context=None):
"""
Returns iCalendar file for the event invitation.
@param event_obj: event object (browse record)
@return: .ics file content
"""
res = None
def ics_datetime(idate, short=False):
if idate:
return datetime.strptime(idate.split('.')[0], '%Y-%m-%d %H:%M:%S').replace(tzinfo=pytz.timezone('UTC'))
return False
try:
# FIXME: why isn't this in CalDAV?
import vobject
except ImportError:
return res
cal = vobject.iCalendar()
event = cal.add('vevent')
if not event_obj.date_deadline or not event_obj.date:
raise osv.except_osv(_('Warning!'), _("First you have to specify the date of the invitation."))
event.add('created').value = ics_datetime(time.strftime('%Y-%m-%d %H:%M:%S'))
event.add('dtstart').value = ics_datetime(event_obj.date)
event.add('dtend').value = ics_datetime(event_obj.date_deadline)
event.add('summary').value = event_obj.name
if event_obj.description:
event.add('description').value = event_obj.description
if event_obj.location:
event.add('location').value = event_obj.location
if event_obj.rrule:
event.add('rrule').value = event_obj.rrule
if event_obj.alarm_ids:
for alarm in event_obj.alarm_ids:
valarm = event.add('valarm')
interval = alarm.interval
duration = alarm.duration
trigger = valarm.add('TRIGGER')
trigger.params['related'] = ["START"]
if interval == 'days':
delta = timedelta(days=duration)
elif interval == 'hours':
delta = timedelta(hours=duration)
elif interval == 'minutes':
delta = timedelta(minutes=duration)
trigger.value = delta
valarm.add('DESCRIPTION').value = alarm.name or 'OpenERP'
for attendee in event_obj.attendee_ids:
attendee_add = event.add('attendee')
attendee_add.value = 'MAILTO:' + (attendee.email or '')
res = cal.serialize()
return res
def _send_mail_to_attendees(self, cr, uid, ids, email_from=tools.config.get('email_from', False), template_xmlid='calendar_template_meeting_invitation', context=None):
"""
Send mail for event invitation to event attendees.
@param email_from: email address for user sending the mail
"""
res = False
if self.pool['ir.config_parameter'].get_param(cr, uid, 'calendar.block_mail', default=False):
return res
mail_ids = []
data_pool = self.pool['ir.model.data']
mailmess_pool = self.pool['mail.message']
mail_pool = self.pool['mail.mail']
template_pool = self.pool['email.template']
local_context = context.copy()
color = {
'needsAction': 'grey',
'accepted': 'green',
'tentative': '#FFFF00',
'declined': 'red'
}
if not isinstance(ids, (tuple, list)):
ids = [ids]
dummy, template_id = data_pool.get_object_reference(cr, uid, 'calendar', template_xmlid)
dummy, act_id = data_pool.get_object_reference(cr, uid, 'calendar', "view_calendar_event_calendar")
local_context.update({
'color': color,
'action_id': self.pool['ir.actions.act_window'].search(cr, uid, [('view_id', '=', act_id)], context=context)[0],
'dbname': cr.dbname,
'base_url': self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url', default='http://localhost:8069', context=context)
})
for attendee in self.browse(cr, uid, ids, context=context):
if attendee.email and email_from and attendee.email != email_from:
ics_file = self.get_ics_file(cr, uid, attendee.event_id, context=context)
mail_id = template_pool.send_mail(cr, uid, template_id, attendee.id, context=local_context)
vals = {}
if ics_file:
vals['attachment_ids'] = [(0, 0, {'name': 'invitation.ics',
'datas_fname': 'invitation.ics',
'datas': str(ics_file).encode('base64')})]
vals['model'] = None # We don't want to have the mail in the tchatter while in queue!
the_mailmess = mail_pool.browse(cr, uid, mail_id, context=context).mail_message_id
mailmess_pool.write(cr, uid, [the_mailmess.id], vals, context=context)
mail_ids.append(mail_id)
if mail_ids:
res = mail_pool.send(cr, uid, mail_ids, context=context)
return res
def onchange_user_id(self, cr, uid, ids, user_id, *args, **argv):
"""
Make entry on email and availability on change of user_id field.
@param ids: list of attendee's IDs
@param user_id: changed value of User id
@return: dictionary of values which put value in email and availability fields
"""
if not user_id:
return {'value': {'email': ''}}
user = self.pool['res.users'].browse(cr, uid, user_id, *args)
return {'value': {'email': user.email, 'availability': user.availability}}
def do_tentative(self, cr, uid, ids, context=None, *args):
"""
Makes event invitation as Tentative.
@param ids: list of attendee's IDs
"""
return self.write(cr, uid, ids, {'state': 'tentative'}, context)
def do_accept(self, cr, uid, ids, context=None, *args):
"""
Marks event invitation as Accepted.
@param ids: list of attendee's IDs
"""
if context is None:
context = {}
meeting_obj = self.pool['calendar.event']
res = self.write(cr, uid, ids, {'state': 'accepted'}, context)
for attendee in self.browse(cr, uid, ids, context=context):
meeting_obj.message_post(cr, uid, attendee.event_id.id, body=_(("%s has accepted invitation") % (attendee.cn)), subtype="calendar.subtype_invitation", context=context)
return res
def do_decline(self, cr, uid, ids, context=None, *args):
"""
Marks event invitation as Declined.
@param ids: list of calendar attendee's IDs
"""
if context is None:
context = {}
meeting_obj = self.pool['calendar.event']
res = self.write(cr, uid, ids, {'state': 'declined'}, context)
for attendee in self.browse(cr, uid, ids, context=context):
meeting_obj.message_post(cr, uid, attendee.event_id.id, body=_(("%s has declined invitation") % (attendee.cn)), subtype="calendar.subtype_invitation", context=context)
return res
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if not vals.get("email") and vals.get("cn"):
cnval = vals.get("cn").split(':')
email = filter(lambda x: x.__contains__('@'), cnval)
vals['email'] = email and email[0] or ''
vals['cn'] = vals.get("cn")
res = super(calendar_attendee, self).create(cr, uid, vals, context=context)
return res
class res_partner(osv.Model):
_inherit = 'res.partner'
_columns = {
'calendar_last_notif_ack': fields.datetime('Last notification marked as read from base Calendar'),
}
def get_attendee_detail(self, cr, uid, ids, meeting_id, context=None):
datas = []
meeting = False
if meeting_id:
meeting = self.pool['calendar.event'].browse(cr, uid, get_real_ids(meeting_id), context=context)
for partner in self.browse(cr, uid, ids, context=context):
data = self.name_get(cr, uid, [partner.id], context)[0]
if meeting:
for attendee in meeting.attendee_ids:
if attendee.partner_id.id == partner.id:
data = (data[0], data[1], attendee.state)
datas.append(data)
return datas
def calendar_last_notif_ack(self, cr, uid, context=None):
partner = self.pool['res.users'].browse(cr, uid, uid, context=context).partner_id
self.write(cr, uid, partner.id, {'calendar_last_notif_ack': datetime.now()}, context=context)
return
class calendar_alarm_manager(osv.AbstractModel):
_name = 'calendar.alarm_manager'
def get_next_potential_limit_alarm(self, cr, uid, seconds, notif=True, mail=True, partner_id=None, context=None):
res = {}
base_request = """
SELECT
cal.id,
cal.date - interval '1' minute * calcul_delta.max_delta AS first_alarm,
CASE
WHEN cal.recurrency THEN cal.end_date - interval '1' minute * calcul_delta.min_delta
ELSE cal.date_deadline - interval '1' minute * calcul_delta.min_delta
END as last_alarm,
cal.date as first_event_date,
CASE
WHEN cal.recurrency THEN cal.end_date
ELSE cal.date_deadline
END as last_event_date,
calcul_delta.min_delta,
calcul_delta.max_delta,
cal.rrule AS rule
FROM
calendar_event AS cal
RIGHT JOIN
(
SELECT
rel.calendar_event_id, max(alarm.duration_minutes) AS max_delta,min(alarm.duration_minutes) AS min_delta
FROM
calendar_alarm_calendar_event_rel AS rel
LEFT JOIN calendar_alarm AS alarm ON alarm.id = rel.calendar_alarm_id
WHERE alarm.type in %s
GROUP BY rel.calendar_event_id
) AS calcul_delta ON calcul_delta.calendar_event_id = cal.id
"""
filter_user = """
RIGHT JOIN calendar_event_res_partner_rel AS part_rel ON part_rel.calendar_event_id = cal.id
AND part_rel.res_partner_id = %s
"""
#Add filter on type
type_to_read = ()
if notif:
type_to_read += ('notification',)
if mail:
type_to_read += ('email',)
tuple_params = (type_to_read,)
#ADD FILTER ON PARTNER_ID
if partner_id:
base_request += filter_user
tuple_params += (partner_id, )
#Add filter on hours
tuple_params += (seconds, seconds,)
cr.execute("""SELECT *
FROM ( %s ) AS ALL_EVENTS
WHERE ALL_EVENTS.first_alarm < (now() at time zone 'utc' + interval '%%s' second )
AND ALL_EVENTS.last_alarm > (now() at time zone 'utc' - interval '%%s' second )
""" % base_request, tuple_params)
for event_id, first_alarm, last_alarm, first_meeting, last_meeting, min_duration, max_duration, rule in cr.fetchall():
res[event_id] = {
'event_id': event_id,
'first_alarm': first_alarm,
'last_alarm': last_alarm,
'first_meeting': first_meeting,
'last_meeting': last_meeting,
'min_duration': min_duration,
'max_duration': max_duration,
'rrule': rule
}
return res
def do_check_alarm_for_one_date(self, cr, uid, one_date, event, event_maxdelta, in_the_next_X_seconds, after=False, notif=True, mail=True, context=None):
res = []
alarm_type = []
if notif:
alarm_type.append('notification')
if mail:
alarm_type.append('email')
if one_date - timedelta(minutes=event_maxdelta) < datetime.now() + timedelta(seconds=in_the_next_X_seconds): # if an alarm is possible for this date
for alarm in event.alarm_ids:
if alarm.type in alarm_type and \
one_date - timedelta(minutes=alarm.duration_minutes) < datetime.now() + timedelta(seconds=in_the_next_X_seconds) and \
(not after or one_date - timedelta(minutes=alarm.duration_minutes) > datetime.strptime(after.split('.')[0], "%Y-%m-%d %H:%M:%S")):
alert = {
'alarm_id': alarm.id,
'event_id': event.id,
'notify_at': one_date - timedelta(minutes=alarm.duration_minutes),
}
res.append(alert)
return res
def get_next_mail(self, cr, uid, context=None):
cron = self.pool.get('ir.cron').search(cr, uid, [('model', 'ilike', self._name)], context=context)
if cron and len(cron) == 1:
cron = self.pool.get('ir.cron').browse(cr, uid, cron[0], context=context)
else:
_logger.exception("Cron for " + self._name + " can not be identified !")
if cron.interval_type == "weeks":
cron_interval = cron.interval_number * 7 * 24 * 60 * 60
elif cron.interval_type == "days":
cron_interval = cron.interval_number * 24 * 60 * 60
elif cron.interval_type == "hours":
cron_interval = cron.interval_number * 60 * 60
elif cron.interval_type == "minutes":
cron_interval = cron.interval_number * 60
elif cron.interval_type == "seconds":
cron_interval = cron.interval_number
if not cron_interval:
_logger.exception("Cron delay can not be computed !")
all_events = self.get_next_potential_limit_alarm(cr, uid, cron_interval, notif=False, context=context)
for event in all_events: # .values()
max_delta = all_events[event]['max_duration']
curEvent = self.pool.get('calendar.event').browse(cr, uid, event, context=context)
if curEvent.recurrency:
bFound = False
LastFound = False
for one_date in self.pool.get('calendar.event').get_recurrent_date_by_event(cr, uid, curEvent, context=context):
in_date_format = one_date.replace(tzinfo=None)
LastFound = self.do_check_alarm_for_one_date(cr, uid, in_date_format, curEvent, max_delta, cron_interval, notif=False, context=context)
if LastFound:
for alert in LastFound:
self.do_mail_reminder(cr, uid, alert, context=context)
if not bFound: # if it's the first alarm for this recurrent event
bFound = True
if bFound and not LastFound: # if the precedent event had an alarm but not this one, we can stop the search for this event
break
else:
in_date_format = datetime.strptime(curEvent.date, '%Y-%m-%d %H:%M:%S')
LastFound = self.do_check_alarm_for_one_date(cr, uid, in_date_format, curEvent, max_delta, cron_interval, notif=False, context=context)
if LastFound:
for alert in LastFound:
self.do_mail_reminder(cr, uid, alert, context=context)
def get_next_notif(self, cr, uid, context=None):
ajax_check_every_seconds = 300
partner = self.pool.get('res.users').browse(cr, uid, uid, context=context).partner_id
all_notif = []
if not partner:
return []
all_events = self.get_next_potential_limit_alarm(cr, uid, ajax_check_every_seconds, partner_id=partner.id, mail=False, context=context)
for event in all_events: # .values()
max_delta = all_events[event]['max_duration']
curEvent = self.pool.get('calendar.event').browse(cr, uid, event, context=context)
if curEvent.recurrency:
bFound = False
LastFound = False
for one_date in self.pool.get("calendar.event").get_recurrent_date_by_event(cr, uid, curEvent, context=context):
in_date_format = one_date.replace(tzinfo=None)
LastFound = self.do_check_alarm_for_one_date(cr, uid, in_date_format, curEvent, max_delta, ajax_check_every_seconds, after=partner.calendar_last_notif_ack, mail=False, context=context)
if LastFound:
for alert in LastFound:
all_notif.append(self.do_notif_reminder(cr, uid, alert, context=context))
if not bFound: # if it's the first alarm for this recurrent event
bFound = True
if bFound and not LastFound: # if the precedent event had alarm but not this one, we can stop the search fot this event
break
else:
in_date_format = datetime.strptime(curEvent.date, '%Y-%m-%d %H:%M:%S')
LastFound = self.do_check_alarm_for_one_date(cr, uid, in_date_format, curEvent, max_delta, ajax_check_every_seconds, partner.calendar_last_notif_ack, mail=False, context=context)
if LastFound:
for alert in LastFound:
all_notif.append(self.do_notif_reminder(cr, uid, alert, context=context))
return all_notif
def do_mail_reminder(self, cr, uid, alert, context=None):
if context is None:
context = {}
res = False
event = self.pool['calendar.event'].browse(cr, uid, alert['event_id'], context=context)
alarm = self.pool['calendar.alarm'].browse(cr, uid, alert['alarm_id'], context=context)
if alarm.type == 'email':
res = self.pool['calendar.attendee']._send_mail_to_attendees(cr, uid, [att.id for att in event.attendee_ids], template_xmlid='calendar_template_meeting_reminder', context=context)
return res
def do_notif_reminder(self, cr, uid, alert, context=None):
alarm = self.pool['calendar.alarm'].browse(cr, uid, alert['alarm_id'], context=context)
event = self.pool['calendar.event'].browse(cr, uid, alert['event_id'], context=context)
if alarm.type == 'notification':
message = event.display_time
delta = alert['notify_at'] - datetime.now()
delta = delta.seconds + delta.days * 3600 * 24
return {
'event_id': event.id,
'title': event.name,
'message': message,
'timer': delta,
'notify_at': alert['notify_at'].strftime("%Y-%m-%d %H:%M:%S"),
}
class calendar_alarm(osv.Model):
_name = 'calendar.alarm'
_description = 'Event alarm'
def _get_duration(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for alarm in self.browse(cr, uid, ids, context=context):
if alarm.interval == "minutes":
res[alarm.id] = alarm.duration
elif alarm.interval == "hours":
res[alarm.id] = alarm.duration * 60
elif alarm.interval == "days":
res[alarm.id] = alarm.duration * 60 * 24
else:
res[alarm.id] = 0
return res
_columns = {
'name': fields.char('Name', required=True), # fields function
'type': fields.selection([('notification', 'Notification'), ('email', 'Email')], 'Type', required=True),
'duration': fields.integer('Amount', required=True),
'interval': fields.selection([('minutes', 'Minutes'), ('hours', 'Hours'), ('days', 'Days')], 'Unit', required=True),
'duration_minutes': fields.function(_get_duration, type='integer', string='duration_minutes', store=True),
}
_defaults = {
'type': 'notification',
'duration': 1,
'interval': 'hours',
}
class ir_values(osv.Model):
_inherit = 'ir.values'
def set(self, cr, uid, key, key2, name, models, value, replace=True, isobject=False, meta=False, preserve_user=False, company=False):
new_model = []
for data in models:
if type(data) in (list, tuple):
new_model.append((data[0], calendar_id2real_id(data[1])))
else:
new_model.append(data)
return super(ir_values, self).set(cr, uid, key, key2, name, new_model,
value, replace, isobject, meta, preserve_user, company)
def get(self, cr, uid, key, key2, models, meta=False, context=None, res_id_req=False, without_user=True, key2_req=True):
if context is None:
context = {}
new_model = []
for data in models:
if type(data) in (list, tuple):
new_model.append((data[0], calendar_id2real_id(data[1])))
else:
new_model.append(data)
return super(ir_values, self).get(cr, uid, key, key2, new_model,
meta, context, res_id_req, without_user, key2_req)
class ir_model(osv.Model):
_inherit = 'ir.model'
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
new_ids = isinstance(ids, (str, int, long)) and [ids] or ids
if context is None:
context = {}
data = super(ir_model, self).read(cr, uid, new_ids, fields=fields, context=context, load=load)
if data:
for val in data:
val['id'] = calendar_id2real_id(val['id'])
return isinstance(ids, (str, int, long)) and data[0] or data
original_exp_report = openerp.service.report.exp_report
def exp_report(db, uid, object, ids, data=None, context=None):
"""
Export Report
"""
if object == 'printscreen.list':
original_exp_report(db, uid, object, ids, data, context)
new_ids = []
for id in ids:
new_ids.append(calendar_id2real_id(id))
if data.get('id', False):
data['id'] = calendar_id2real_id(data['id'])
return original_exp_report(db, uid, object, new_ids, data, context)
openerp.service.report.exp_report = exp_report
class calendar_event_type(osv.Model):
_name = 'calendar.event.type'
_description = 'Meeting Type'
_columns = {
'name': fields.char('Name', required=True, translate=True),
}
class calendar_event(osv.Model):
""" Model for Calendar Event """
_name = 'calendar.event'
_description = "Meeting"
_order = "id desc"
_inherit = ["mail.thread", "ir.needaction_mixin"]
def do_run_scheduler(self, cr, uid, id, context=None):
self.pool['calendar.alarm_manager'].get_next_mail(cr, uid, context=context)
def get_recurrent_date_by_event(self, cr, uid, event, context=None):
"""Get recurrent dates based on Rule string and all event where recurrent_id is child
"""
def todate(date):
val = parser.parse(''.join((re.compile('\d')).findall(date)))
## Dates are localized to saved timezone if any, else current timezone.
if not val.tzinfo:
val = pytz.UTC.localize(val)
return val.astimezone(timezone)
timezone = pytz.timezone(event.vtimezone or context.get('tz') or 'UTC')
startdate = pytz.UTC.localize(datetime.strptime(event.date, "%Y-%m-%d %H:%M:%S")) # Add "+hh:mm" timezone
if not startdate:
startdate = datetime.now()
## Convert the start date to saved timezone (or context tz) as it'll
## define the correct hour/day asked by the user to repeat for recurrence.
startdate = startdate.astimezone(timezone) # transform "+hh:mm" timezone
rset1 = rrule.rrulestr(str(event.rrule), dtstart=startdate, forceset=True)
ids_depending = self.search(cr, uid, [('recurrent_id', '=', event.id), '|', ('active', '=', False), ('active', '=', True)], context=context)
all_events = self.browse(cr, uid, ids_depending, context=context)
for ev in all_events:
rset1._exdate.append(todate(ev.recurrent_id_date))
return [d.astimezone(pytz.UTC) for d in rset1]
def _get_recurrency_end_date(self, data, context=None):
if not data.get('recurrency'):
return False
end_type = data.get('end_type')
end_date = data.get('end_date')
if end_type == 'count' and all(data.get(key) for key in ['count', 'rrule_type', 'date_deadline']):
count = data['count'] + 1
delay, mult = {
'daily': ('days', 1),
'weekly': ('days', 7),
'monthly': ('months', 1),
'yearly': ('years', 1),
}[data['rrule_type']]
deadline = datetime.strptime(data['date_deadline'], tools.DEFAULT_SERVER_DATETIME_FORMAT)
return deadline + relativedelta(**{delay: count * mult})
return end_date
def _find_my_attendee(self, cr, uid, meeting_ids, context=None):
"""
Return the first attendee where the user connected has been invited from all the meeting_ids in parameters
"""
user = self.pool['res.users'].browse(cr, uid, uid, context=context)
for meeting_id in meeting_ids:
for attendee in self.browse(cr, uid, meeting_id, context).attendee_ids:
if user.partner_id.id == attendee.partner_id.id:
return attendee
return False
def _get_display_time(self, cr, uid, meeting_id, context=None):
"""
Return date and time (from to from) based on duration with timezone in string :
eg.
1) if user add duration for 2 hours, return : August-23-2013 at (04-30 To 06-30) (Europe/Brussels)
2) if event all day ,return : AllDay, July-31-2013
"""
if context is None:
context = {}
tz = context.get('tz', False)
if not tz: # tz can have a value False, so dont do it in the default value of get !
tz = pytz.timezone('UTC')
meeting = self.browse(cr, uid, meeting_id, context=context)
date = fields.datetime.context_timestamp(cr, uid, datetime.strptime(meeting.date, tools.DEFAULT_SERVER_DATETIME_FORMAT), context=context)
date_deadline = fields.datetime.context_timestamp(cr, uid, datetime.strptime(meeting.date_deadline, tools.DEFAULT_SERVER_DATETIME_FORMAT), context=context)
event_date = date.strftime('%B-%d-%Y')
display_time = date.strftime('%H-%M')
if meeting.allday:
time = _("AllDay , %s") % (event_date)
elif meeting.duration < 24:
duration = date + timedelta(hours=meeting.duration)
time = ("%s at (%s To %s) (%s)") % (event_date, display_time, duration.strftime('%H-%M'), tz)
else:
time = ("%s at %s To\n %s at %s (%s)") % (event_date, display_time, date_deadline.strftime('%B-%d-%Y'), date_deadline.strftime('%H-%M'), tz)
return time
def _compute(self, cr, uid, ids, fields, arg, context=None):
res = {}
for meeting_id in ids:
res[meeting_id] = {}
attendee = self._find_my_attendee(cr, uid, [meeting_id], context)
for field in fields:
if field == 'is_attendee':
res[meeting_id][field] = True if attendee else False
elif field == 'attendee_status':
res[meeting_id][field] = attendee.state if attendee else 'needsAction'
elif field == 'display_time':
res[meeting_id][field] = self._get_display_time(cr, uid, meeting_id, context=context)
return res
def _get_rulestring(self, cr, uid, ids, name, arg, context=None):
"""
Gets Recurrence rule string according to value type RECUR of iCalendar from the values given.
@return: dictionary of rrule value.
"""
result = {}
if not isinstance(ids, list):
ids = [ids]
for id in ids:
#read these fields as SUPERUSER because if the record is private a normal search could return False and raise an error
data = self.browse(cr, SUPERUSER_ID, id, context=context)
if data.interval and data.interval < 0:
raise osv.except_osv(_('Warning!'), _('Interval cannot be negative.'))
if data.count and data.count <= 0:
raise osv.except_osv(_('Warning!'), _('Count cannot be negative or 0.'))
data = self.read(cr, uid, id, ['id', 'byday', 'recurrency', 'month_list', 'end_date', 'rrule_type', 'month_by', 'interval', 'count', 'end_type', 'mo', 'tu', 'we', 'th', 'fr', 'sa', 'su', 'day', 'week_list'], context=context)
event = data['id']
if data['recurrency']:
result[event] = self.compute_rule_string(data)
else:
result[event] = ""
return result
# retro compatibility function
def _rrule_write(self, cr, uid, ids, field_name, field_value, args, context=None):
return self._set_rulestring(self, cr, uid, ids, field_name, field_value, args, context=context)
def _set_rulestring(self, cr, uid, ids, field_name, field_value, args, context=None):
if not isinstance(ids, list):
ids = [ids]
data = self._get_empty_rrule_data()
if field_value:
data['recurrency'] = True
for event in self.browse(cr, uid, ids, context=context):
rdate = event.date
update_data = self._parse_rrule(field_value, dict(data), rdate)
data.update(update_data)
self.write(cr, uid, ids, data, context=context)
return True
def _tz_get(self, cr, uid, context=None):
return [(x.lower(), x) for x in pytz.all_timezones]
_track = {
'location': {
'calendar.subtype_invitation': lambda self, cr, uid, obj, ctx=None: True,
},
'date': {
'calendar.subtype_invitation': lambda self, cr, uid, obj, ctx=None: True,
},
}
_columns = {
'id': fields.integer('ID', readonly=True),
'state': fields.selection([('draft', 'Unconfirmed'), ('open', 'Confirmed')], string='Status', readonly=True, track_visibility='onchange'),
'name': fields.char('Meeting Subject', required=True, states={'done': [('readonly', True)]}),
'is_attendee': fields.function(_compute, string='Attendee', type="boolean", multi='attendee'),
'attendee_status': fields.function(_compute, string='Attendee Status', type="selection", selection=calendar_attendee.STATE_SELECTION, multi='attendee'),
'display_time': fields.function(_compute, string='Event Time', type="char", multi='attendee'),
'date': fields.datetime('Date', states={'done': [('readonly', True)]}, required=True, track_visibility='onchange'),
'date_deadline': fields.datetime('End Date', states={'done': [('readonly', True)]}, required=True,),
'duration': fields.float('Duration', states={'done': [('readonly', True)]}),
'description': fields.text('Description', states={'done': [('readonly', True)]}),
'class': fields.selection([('public', 'Public'), ('private', 'Private'), ('confidential', 'Public for Employees')], 'Privacy', states={'done': [('readonly', True)]}),
'location': fields.char('Location', help="Location of Event", track_visibility='onchange', states={'done': [('readonly', True)]}),
'show_as': fields.selection([('free', 'Free'), ('busy', 'Busy')], 'Show Time as', states={'done': [('readonly', True)]}),
'rrule': fields.function(_get_rulestring, type='char', fnct_inv=_set_rulestring, store=True, string='Recurrent Rule'),
'rrule_type': fields.selection([('daily', 'Day(s)'), ('weekly', 'Week(s)'), ('monthly', 'Month(s)'), ('yearly', 'Year(s)')], 'Recurrency', states={'done': [('readonly', True)]}, help="Let the event automatically repeat at that interval"),
'recurrency': fields.boolean('Recurrent', help="Recurrent Meeting"),
'recurrent_id': fields.integer('Recurrent ID'),
'recurrent_id_date': fields.datetime('Recurrent ID date'),
'vtimezone': fields.selection(_tz_get, string='Timezone'),
'end_type': fields.selection([('count', 'Number of repetitions'), ('end_date', 'End date')], 'Recurrence Termination'),
'interval': fields.integer('Repeat Every', help="Repeat every (Days/Week/Month/Year)"),
'count': fields.integer('Repeat', help="Repeat x times"),
'mo': fields.boolean('Mon'),
'tu': fields.boolean('Tue'),
'we': fields.boolean('Wed'),
'th': fields.boolean('Thu'),
'fr': fields.boolean('Fri'),
'sa': fields.boolean('Sat'),
'su': fields.boolean('Sun'),
'month_by': fields.selection([('date', 'Date of month'), ('day', 'Day of month')], 'Option', oldname='select1'),
'day': fields.integer('Date of month'),
'week_list': fields.selection([('MO', 'Monday'), ('TU', 'Tuesday'), ('WE', 'Wednesday'), ('TH', 'Thursday'), ('FR', 'Friday'), ('SA', 'Saturday'), ('SU', 'Sunday')], 'Weekday'),
'byday': fields.selection([('1', 'First'), ('2', 'Second'), ('3', 'Third'), ('4', 'Fourth'), ('5', 'Fifth'), ('-1', 'Last')], 'By day'),
'end_date': fields.date('Repeat Until'),
'allday': fields.boolean('All Day', states={'done': [('readonly', True)]}),
'user_id': fields.many2one('res.users', 'Responsible', states={'done': [('readonly', True)]}),
'color_partner_id': fields.related('user_id', 'partner_id', 'id', type="integer", string="colorize", store=False), # Color of creator
'active': fields.boolean('Active', help="If the active field is set to true, it will allow you to hide the event alarm information without removing it."),
'categ_ids': fields.many2many('calendar.event.type', 'meeting_category_rel', 'event_id', 'type_id', 'Tags'),
'attendee_ids': fields.one2many('calendar.attendee', 'event_id', 'Attendees', ondelete='cascade'),
'partner_ids': fields.many2many('res.partner', string='Attendees', states={'done': [('readonly', True)]}),
'alarm_ids': fields.many2many('calendar.alarm', string='Reminders', ondelete="restrict"),
}
_defaults = {
'end_type': 'count',
'count': 1,
'rrule_type': False,
'state': 'draft',
'class': 'public',
'show_as': 'busy',
'month_by': 'date',
'interval': 1,
'active': 1,
'user_id': lambda self, cr, uid, ctx: uid,
'partner_ids': lambda self, cr, uid, ctx: [self.pool['res.users'].browse(cr, uid, [uid], context=ctx)[0].partner_id.id]
}
def _check_closing_date(self, cr, uid, ids, context=None):
for event in self.browse(cr, uid, ids, context=context):
if event.date_deadline < event.date:
return False
return True
_constraints = [
(_check_closing_date, 'Error ! End date cannot be set before start date.', ['date_deadline']),
]
def onchange_dates(self, cr, uid, ids, start_date, duration=False, end_date=False, allday=False, context=None):
"""Returns duration and/or end date based on values passed
@param ids: List of calendar event's IDs.
"""
if context is None:
context = {}
value = {}
if not start_date:
return value
if not end_date and not duration:
duration = 1.00
value['duration'] = duration
if allday: # For all day event
start = datetime.strptime(start_date, "%Y-%m-%d %H:%M:%S")
user = self.pool['res.users'].browse(cr, uid, uid)
tz = pytz.timezone(user.tz) if user.tz else pytz.utc
start = pytz.utc.localize(start).astimezone(tz) # convert start in user's timezone
start = start.astimezone(pytz.utc) # convert start back to utc
value['duration'] = 24.0
value['date'] = datetime.strftime(start, "%Y-%m-%d %H:%M:%S")
else:
start = datetime.strptime(start_date, "%Y-%m-%d %H:%M:%S")
if end_date and not duration:
end = datetime.strptime(end_date, "%Y-%m-%d %H:%M:%S")
diff = end - start
duration = float(diff.days) * 24 + (float(diff.seconds) / 3600)
value['duration'] = round(duration, 2)
elif not end_date:
end = start + timedelta(hours=duration)
value['date_deadline'] = end.strftime("%Y-%m-%d %H:%M:%S")
elif end_date and duration and not allday:
# we have both, keep them synchronized:
# set duration based on end_date (arbitrary decision: this avoid
# getting dates like 06:31:48 instead of 06:32:00)
end = datetime.strptime(end_date, "%Y-%m-%d %H:%M:%S")
diff = end - start
duration = float(diff.days) * 24 + (float(diff.seconds) / 3600)
value['duration'] = round(duration, 2)
return {'value': value}
def new_invitation_token(self, cr, uid, record, partner_id):
return uuid.uuid4().hex
def create_attendees(self, cr, uid, ids, context):
user_obj = self.pool['res.users']
current_user = user_obj.browse(cr, uid, uid, context=context)
res = {}
for event in self.browse(cr, uid, ids, context):
attendees = {}
for att in event.attendee_ids:
attendees[att.partner_id.id] = True
new_attendees = []
new_att_partner_ids = []
for partner in event.partner_ids:
if partner.id in attendees:
continue
access_token = self.new_invitation_token(cr, uid, event, partner.id)
values = {
'partner_id': partner.id,
'event_id': event.id,
'access_token': access_token,
'email': partner.email,
}
if partner.id == current_user.partner_id.id:
values['state'] = 'accepted'
att_id = self.pool['calendar.attendee'].create(cr, uid, values, context=context)
new_attendees.append(att_id)
new_att_partner_ids.append(partner.id)
if not current_user.email or current_user.email != partner.email:
mail_from = current_user.email or tools.config.get('email_from', False)
if self.pool['calendar.attendee']._send_mail_to_attendees(cr, uid, att_id, email_from=mail_from, context=context):
self.message_post(cr, uid, event.id, body=_("An invitation email has been sent to attendee %s") % (partner.name,), subtype="calendar.subtype_invitation", context=context)
if new_attendees:
self.write(cr, uid, [event.id], {'attendee_ids': [(4, att) for att in new_attendees]}, context=context)
if new_att_partner_ids:
self.message_subscribe(cr, uid, [event.id], new_att_partner_ids, context=context)
# We remove old attendees who are not in partner_ids now.
all_partner_ids = [part.id for part in event.partner_ids]
all_part_attendee_ids = [att.partner_id.id for att in event.attendee_ids]
all_attendee_ids = [att.id for att in event.attendee_ids]
partner_ids_to_remove = map(lambda x: x, set(all_part_attendee_ids + new_att_partner_ids) - set(all_partner_ids))
attendee_ids_to_remove = []
if partner_ids_to_remove:
attendee_ids_to_remove = self.pool["calendar.attendee"].search(cr, uid, [('partner_id.id', 'in', partner_ids_to_remove), ('event_id.id', '=', event.id)], context=context)
if attendee_ids_to_remove:
self.pool['calendar.attendee'].unlink(cr, uid, attendee_ids_to_remove, context)
res[event.id] = {
'new_attendee_ids': new_attendees,
'old_attendee_ids': all_attendee_ids,
'removed_attendee_ids': attendee_ids_to_remove
}
return res
def get_search_fields(self, browse_event, order_fields, r_date=None):
sort_fields = {}
for ord in order_fields:
if ord == 'id' and r_date:
sort_fields[ord] = '%s-%s' % (browse_event[ord], r_date.strftime("%Y%m%d%H%M%S"))
else:
sort_fields[ord] = browse_event[ord]
'If we sort on FK, we obtain a browse_record, so we need to sort on name_get'
if type(browse_event[ord]) is openerp.osv.orm.browse_record:
name_get = browse_event[ord].name_get()
if len(name_get) and len(name_get[0]) >= 2:
sort_fields[ord] = name_get[0][1]
return sort_fields
def get_recurrent_ids(self, cr, uid, event_id, domain, order=None, context=None):
"""Gives virtual event ids for recurring events
This method gives ids of dates that comes between start date and end date of calendar views
@param order: The fields (comma separated, format "FIELD {DESC|ASC}") on which the events should be sorted
"""
if not context:
context = {}
if isinstance(event_id, (str, int, long)):
ids_to_browse = [event_id] # keep select for return
else:
ids_to_browse = event_id
if order:
order_fields = [field.split()[0] for field in order.split(',')]
else:
# fallback on self._order defined on the model
order_fields = [field.split()[0] for field in self._order.split(',')]
if 'id' not in order_fields:
order_fields.append('id')
result_data = []
result = []
for ev in self.browse(cr, uid, ids_to_browse, context=context):
if not ev.recurrency or not ev.rrule:
result.append(ev.id)
result_data.append(self.get_search_fields(ev, order_fields))
continue
rdates = self.get_recurrent_date_by_event(cr, uid, ev, context=context)
for r_date in rdates:
# fix domain evaluation
# step 1: check date and replace expression by True or False, replace other expressions by True
# step 2: evaluation of & and |
# check if there are one False
pile = []
ok = True
for arg in domain:
if str(arg[0]) in (str('date'), str('date_deadline'), str('end_date')):
if (arg[1] == '='):
ok = r_date.strftime('%Y-%m-%d') == arg[2]
if (arg[1] == '>'):
ok = r_date.strftime('%Y-%m-%d') > arg[2]
if (arg[1] == '<'):
ok = r_date.strftime('%Y-%m-%d') < arg[2]
if (arg[1] == '>='):
ok = r_date.strftime('%Y-%m-%d') >= arg[2]
if (arg[1] == '<='):
ok = r_date.strftime('%Y-%m-%d') <= arg[2]
pile.append(ok)
elif str(arg) == str('&') or str(arg) == str('|'):
pile.append(arg)
else:
pile.append(True)
pile.reverse()
new_pile = []
for item in pile:
if not isinstance(item, basestring):
res = item
elif str(item) == str('&'):
first = new_pile.pop()
second = new_pile.pop()
res = first and second
elif str(item) == str('|'):
first = new_pile.pop()
second = new_pile.pop()
res = first or second
new_pile.append(res)
if [True for item in new_pile if not item]:
continue
result_data.append(self.get_search_fields(ev, order_fields, r_date=r_date))
if order_fields:
def comparer(left, right):
for fn, mult in comparers:
result = cmp(fn(left), fn(right))
if result:
return mult * result
return 0
sort_params = [key.split()[0] if key[-4:].lower() != 'desc' else '-%s' % key.split()[0] for key in (order or self._order).split(',')]
comparers = [((itemgetter(col[1:]), -1) if col[0] == '-' else (itemgetter(col), 1)) for col in sort_params]
ids = [r['id'] for r in sorted(result_data, cmp=comparer)]
if isinstance(event_id, (str, int, long)):
return ids and ids[0] or False
else:
return ids
def compute_rule_string(self, data):
"""
Compute rule string according to value type RECUR of iCalendar from the values given.
@param self: the object pointer
@param data: dictionary of freq and interval value
@return: string containing recurring rule (empty if no rule)
"""
def get_week_string(freq, data):
weekdays = ['mo', 'tu', 'we', 'th', 'fr', 'sa', 'su']
if freq == 'weekly':
byday = map(lambda x: x.upper(), filter(lambda x: data.get(x) and x in weekdays, data))
# byday = map(lambda x: x.upper(),[data[day] for day in weekdays if data[day]])
if byday:
return ';BYDAY=' + ','.join(byday)
return ''
def get_month_string(freq, data):
if freq == 'monthly':
if data.get('month_by') == 'date' and (data.get('day') < 1 or data.get('day') > 31):
raise osv.except_osv(_('Error!'), ("Please select a proper day of the month."))
if data.get('month_by') == 'day': # Eg : Second Monday of the month
return ';BYDAY=' + data.get('byday') + data.get('week_list')
elif data.get('month_by') == 'date': # Eg : 16th of the month
return ';BYMONTHDAY=' + str(data.get('day'))
return ''
def get_end_date(data):
if data.get('end_date'):
data['end_date_new'] = ''.join((re.compile('\d')).findall(data.get('end_date'))) + 'T235959Z'
return (data.get('end_type') == 'count' and (';COUNT=' + str(data.get('count'))) or '') +\
((data.get('end_date_new') and data.get('end_type') == 'end_date' and (';UNTIL=' + data.get('end_date_new'))) or '')
freq = data.get('rrule_type', False) # day/week/month/year
res = ''
if freq:
interval_srting = data.get('interval') and (';INTERVAL=' + str(data.get('interval'))) or ''
res = 'FREQ=' + freq.upper() + get_week_string(freq, data) + interval_srting + get_end_date(data) + get_month_string(freq, data)
return res
def _get_empty_rrule_data(self):
return {
'byday': False,
'recurrency': False,
'end_date': False,
'rrule_type': False,
'month_by': False,
'interval': 0,
'count': False,
'end_type': False,
'mo': False,
'tu': False,
'we': False,
'th': False,
'fr': False,
'sa': False,
'su': False,
'day': False,
'week_list': False
}
def _parse_rrule(self, rule, data, date_start):
day_list = ['mo', 'tu', 'we', 'th', 'fr', 'sa', 'su']
rrule_type = ['yearly', 'monthly', 'weekly', 'daily']
r = rrule.rrulestr(rule, dtstart=datetime.strptime(date_start, "%Y-%m-%d %H:%M:%S"))
if r._freq > 0 and r._freq < 4:
data['rrule_type'] = rrule_type[r._freq]
data['count'] = r._count
data['interval'] = r._interval
data['end_date'] = r._until and r._until.strftime("%Y-%m-%d %H:%M:%S")
#repeat weekly
if r._byweekday:
for i in xrange(0, 7):
if i in r._byweekday:
data[day_list[i]] = True
data['rrule_type'] = 'weekly'
#repeat monthly by nweekday ((weekday, weeknumber), )
if r._bynweekday:
data['week_list'] = day_list[r._bynweekday[0][0]].upper()
data['byday'] = str(r._bynweekday[0][1])
data['month_by'] = 'day'
data['rrule_type'] = 'monthly'
if r._bymonthday:
data['day'] = r._bymonthday[0]
data['month_by'] = 'date'
data['rrule_type'] = 'monthly'
#repeat yearly but for openerp it's monthly, take same information as monthly but interval is 12 times
if r._bymonth:
data['interval'] = data['interval'] * 12
#FIXEME handle forever case
#end of recurrence
#in case of repeat for ever that we do not support right now
if not (data.get('count') or data.get('end_date')):
data['count'] = 100
if data.get('count'):
data['end_type'] = 'count'
else:
data['end_type'] = 'end_date'
return data
def message_get_subscription_data(self, cr, uid, ids, user_pid=None, context=None):
res = {}
for virtual_id in ids:
real_id = calendar_id2real_id(virtual_id)
result = super(calendar_event, self).message_get_subscription_data(cr, uid, [real_id], user_pid=None, context=context)
res[virtual_id] = result[real_id]
return res
def onchange_partner_ids(self, cr, uid, ids, value, context=None):
""" The basic purpose of this method is to check that destination partners
effectively have email addresses. Otherwise a warning is thrown.
:param value: value format: [[6, 0, [3, 4]]]
"""
res = {'value': {}}
if not value or not value[0] or not value[0][0] == 6:
return
res.update(self.check_partners_email(cr, uid, value[0][2], context=context))
return res
def onchange_rec_day(self, cr, uid, id, date, mo, tu, we, th, fr, sa, su):
""" set the start date according to the first occurence of rrule"""
rrule_obj = self._get_empty_rrule_data()
rrule_obj.update({
'byday': True,
'rrule_type': 'weekly',
'mo': mo,
'tu': tu,
'we': we,
'th': th,
'fr': fr,
'sa': sa,
'su': su,
'interval': 1
})
str_rrule = self.compute_rule_string(rrule_obj)
first_occurence = list(rrule.rrulestr(str_rrule + ";COUNT=1", dtstart=datetime.strptime(date, "%Y-%m-%d %H:%M:%S"), forceset=True))[0]
return {'value': {'date': first_occurence.strftime("%Y-%m-%d") + ' 00:00:00'}}
def check_partners_email(self, cr, uid, partner_ids, context=None):
""" Verify that selected partner_ids have an email_address defined.
Otherwise throw a warning. """
partner_wo_email_lst = []
for partner in self.pool['res.partner'].browse(cr, uid, partner_ids, context=context):
if not partner.email:
partner_wo_email_lst.append(partner)
if not partner_wo_email_lst:
return {}
warning_msg = _('The following contacts have no email address :')
for partner in partner_wo_email_lst:
warning_msg += '\n- %s' % (partner.name)
return {'warning': {
'title': _('Email addresses not found'),
'message': warning_msg,
}}
# ----------------------------------------
# OpenChatter
# ----------------------------------------
# shows events of the day for this user
def _needaction_domain_get(self, cr, uid, context=None):
return [('end_date', '>=', time.strftime(DEFAULT_SERVER_DATE_FORMAT + ' 23:59:59')), ('date', '>=', time.strftime(DEFAULT_SERVER_DATE_FORMAT + ' 23:59:59')), ('user_id', '=', uid)]
def message_post(self, cr, uid, thread_id, body='', subject=None, type='notification', subtype=None, parent_id=False, attachments=None, context=None, **kwargs):
if isinstance(thread_id, str):
thread_id = get_real_ids(thread_id)
if context.get('default_date'):
del context['default_date']
return super(calendar_event, self).message_post(cr, uid, thread_id, body=body, subject=subject, type=type, subtype=subtype, parent_id=parent_id, attachments=attachments, context=context, **kwargs)
def do_sendmail(self, cr, uid, ids, context=None):
for event in self.browse(cr, uid, ids, context):
current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)
if current_user.email:
if self.pool['calendar.attendee']._send_mail_to_attendees(cr, uid, [att.id for att in event.attendee_ids], email_from=current_user.email, context=context):
self.message_post(cr, uid, event.id, body=_("An invitation email has been sent to attendee(s)"), subtype="calendar.subtype_invitation", context=context)
return
def get_attendee(self, cr, uid, meeting_id, context=None):
# Used for view in controller
invitation = {'meeting': {}, 'attendee': []}
meeting = self.browse(cr, uid, int(meeting_id), context)
invitation['meeting'] = {
'event': meeting.name,
'where': meeting.location,
'when': meeting.display_time
}
for attendee in meeting.attendee_ids:
invitation['attendee'].append({'name': attendee.cn, 'status': attendee.state})
return invitation
def get_interval(self, cr, uid, ids, date, interval, tz=None, context=None):
#Function used only in calendar_event_data.xml for email template
date = datetime.strptime(date.split('.')[0], DEFAULT_SERVER_DATETIME_FORMAT)
if tz:
timezone = pytz.timezone(tz or 'UTC')
date = date.replace(tzinfo=pytz.timezone('UTC')).astimezone(timezone)
if interval == 'day':
res = str(date.day)
elif interval == 'month':
res = date.strftime('%B') + " " + str(date.year)
elif interval == 'dayname':
res = date.strftime('%A')
elif interval == 'time':
res = date.strftime('%I:%M %p')
return res
def search(self, cr, uid, args, offset=0, limit=0, order=None, context=None, count=False):
if context is None:
context = {}
if context.get('mymeetings', False):
partner_id = self.pool['res.users'].browse(cr, uid, uid, context).partner_id.id
args += ['|', ('partner_ids', 'in', [partner_id]), ('user_id', '=', uid)]
new_args = []
for arg in args:
new_arg = arg
if arg[0] in ('date', unicode('date')) and arg[1] == ">=":
if context.get('virtual_id', True):
new_args += ['|', '&', ('recurrency', '=', 1), ('end_date', arg[1], arg[2])]
elif arg[0] == "id":
new_id = get_real_ids(arg[2])
new_arg = (arg[0], arg[1], new_id)
new_args.append(new_arg)
if not context.get('virtual_id', True):
return super(calendar_event, self).search(cr, uid, new_args, offset=offset, limit=limit, order=order, context=context, count=count)
# offset, limit, order and count must be treated separately as we may need to deal with virtual ids
res = super(calendar_event, self).search(cr, uid, new_args, offset=0, limit=0, order=None, context=context, count=False)
res = self.get_recurrent_ids(cr, uid, res, args, order=order, context=context)
if count:
return len(res)
elif limit:
return res[offset: offset + limit]
return res
def copy(self, cr, uid, id, default=None, context=None):
if context is None:
context = {}
default = default or {}
default['attendee_ids'] = False
res = super(calendar_event, self).copy(cr, uid, calendar_id2real_id(id), default, context)
return res
def _detach_one_event(self, cr, uid, id, values=dict(), context=None):
real_event_id = calendar_id2real_id(id)
data = self.read(cr, uid, id, ['date', 'date_deadline', 'rrule', 'duration'])
if data.get('rrule'):
data.update(
values,
recurrent_id=real_event_id,
recurrent_id_date=data.get('date'),
rrule_type=False,
rrule='',
recurrency=False,
end_date=datetime.strptime(values.get('date', False) or data.get('date'), "%Y-%m-%d %H:%M:%S") + timedelta(hours=values.get('duration', False) or data.get('duration'))
)
#do not copy the id
if data.get('id'):
del(data['id'])
new_id = self.copy(cr, uid, real_event_id, default=data, context=context)
return new_id
def open_after_detach_event(self, cr, uid, ids, context=None):
if context is None:
context = {}
new_id = self._detach_one_event(cr, uid, ids[0], context=context)
return {
'type': 'ir.actions.act_window',
'res_model': 'calendar.event',
'view_mode': 'form',
'res_id': new_id,
'target': 'current',
'flags': {'form': {'action_buttons': True, 'options': {'mode': 'edit'}}}
}
def write(self, cr, uid, ids, values, context=None):
def _only_changes_to_apply_on_real_ids(field_names):
''' return True if changes are only to be made on the real ids'''
for field in field_names:
if field in ['date', 'active']:
return True
return False
context = context or {}
if isinstance(ids, (str, int, long)):
if len(str(ids).split('-')) == 1:
ids = [int(ids)]
else:
ids = [ids]
res = False
new_id = False
# Special write of complex IDS
for event_id in ids:
if len(str(event_id).split('-')) == 1:
continue
ids.remove(event_id)
real_event_id = calendar_id2real_id(event_id)
# if we are setting the recurrency flag to False or if we are only changing fields that
# should be only updated on the real ID and not on the virtual (like message_follower_ids):
# then set real ids to be updated.
if not values.get('recurrency', True) or not _only_changes_to_apply_on_real_ids(values.keys()):
ids.append(real_event_id)
continue
else:
data = self.read(cr, uid, event_id, ['date', 'date_deadline', 'rrule', 'duration'])
if data.get('rrule'):
new_id = self._detach_one_event(cr, uid, event_id, values, context=None)
res = super(calendar_event, self).write(cr, uid, ids, values, context=context)
# set end_date for calendar searching
if values.get('recurrency', True) and values.get('end_type', 'count') in ('count', unicode('count')) and \
(values.get('rrule_type') or values.get('count') or values.get('date') or values.get('date_deadline')):
for data in self.read(cr, uid, ids, ['end_date', 'date_deadline', 'recurrency', 'rrule_type', 'count', 'end_type'], context=context):
end_date = self._get_recurrency_end_date(data, context=context)
super(calendar_event, self).write(cr, uid, [data['id']], {'end_date': end_date}, context=context)
attendees_create = False
if values.get('partner_ids', False):
attendees_create = self.create_attendees(cr, uid, ids, context)
if values.get('date', False) and values.get('active', True):
the_id = new_id or (ids and int(ids[0]))
if attendees_create:
attendees_create = attendees_create[the_id]
mail_to_ids = list(set(attendees_create['old_attendee_ids']) - set(attendees_create['removed_attendee_ids']))
else:
mail_to_ids = [att.id for att in self.browse(cr, uid, the_id, context=context).attendee_ids]
if mail_to_ids:
current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)
if self.pool['calendar.attendee']._send_mail_to_attendees(cr, uid, mail_to_ids, template_xmlid='calendar_template_meeting_changedate', email_from=current_user.email, context=context):
self.message_post(cr, uid, the_id, body=_("A email has been send to specify that the date has been changed !"), subtype="calendar.subtype_invitation", context=context)
return res or True and False
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if not 'user_id' in vals: # Else bug with quick_create when we are filter on an other user
vals['user_id'] = uid
res = super(calendar_event, self).create(cr, uid, vals, context=context)
data = self.read(cr, uid, [res], ['end_date', 'date_deadline', 'recurrency', 'rrule_type', 'count', 'end_type'], context=context)[0]
end_date = self._get_recurrency_end_date(data, context=context)
self.write(cr, uid, [res], {'end_date': end_date}, context=context)
self.create_attendees(cr, uid, [res], context=context)
return res
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False):
if not context:
context = {}
if 'date' in groupby:
raise osv.except_osv(_('Warning!'), _('Group by date is not supported, use the calendar view instead.'))
virtual_id = context.get('virtual_id', True)
context.update({'virtual_id': False})
res = super(calendar_event, self).read_group(cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby)
for result in res:
#remove the count, since the value is not consistent with the result of the search when expand the group
for groupname in groupby:
if result.get(groupname + "_count"):
del result[groupname + "_count"]
result.get('__context', {}).update({'virtual_id': virtual_id})
return res
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
if context is None:
context = {}
fields2 = fields and fields[:] or None
EXTRAFIELDS = ('class', 'user_id', 'duration', 'date', 'rrule', 'vtimezone')
for f in EXTRAFIELDS:
if fields and (f not in fields):
fields2.append(f)
if isinstance(ids, (str, int, long)):
select = [ids]
else:
select = ids
select = map(lambda x: (x, calendar_id2real_id(x)), select)
result = []
real_data = super(calendar_event, self).read(cr, uid, [real_id for calendar_id, real_id in select], fields=fields2, context=context, load=load)
real_data = dict(zip([x['id'] for x in real_data], real_data))
for calendar_id, real_id in select:
res = real_data[real_id].copy()
ls = calendar_id2real_id(calendar_id, with_date=res and res.get('duration', 0) or 0)
if not isinstance(ls, (str, int, long)) and len(ls) >= 2:
res['date'] = ls[1]
res['date_deadline'] = ls[2]
res['id'] = calendar_id
result.append(res)
for r in result:
if r['user_id']:
user_id = type(r['user_id']) in (tuple, list) and r['user_id'][0] or r['user_id']
if user_id == uid:
continue
if r['class'] == 'private':
for f in r.keys():
if f not in ('id', 'date', 'date_deadline', 'duration', 'user_id', 'state', 'interval', 'count', 'recurrent_id_date'):
if isinstance(r[f], list):
r[f] = []
else:
r[f] = False
if f == 'name':
r[f] = _('Busy')
for r in result:
for k in EXTRAFIELDS:
if (k in r) and (fields and (k not in fields)):
del r[k]
if isinstance(ids, (str, int, long)):
return result and result[0] or False
return result
def unlink(self, cr, uid, ids, unlink_level=0, context=None):
if not isinstance(ids, list):
ids = [ids]
res = False
ids_to_exclure = []
ids_to_unlink = []
# One time moved to google_Calendar, we can specify, if not in google, and not rec or get_inst = 0, we delete it
for event_id in ids:
if unlink_level == 1 and len(str(event_id).split('-')) == 1: # if ID REAL
if self.browse(cr, uid, event_id).recurrent_id:
ids_to_exclure.append(event_id)
else:
ids_to_unlink.append(event_id)
else:
ids_to_exclure.append(event_id)
if ids_to_unlink:
res = super(calendar_event, self).unlink(cr, uid, ids_to_unlink, context=context)
if ids_to_exclure:
for id_to_exclure in ids_to_exclure:
res = self.write(cr, uid, id_to_exclure, {'active': False}, context=context)
return res
class mail_message(osv.Model):
_inherit = "mail.message"
def search(self, cr, uid, args, offset=0, limit=0, order=None, context=None, count=False):
'''
convert the search on real ids in the case it was asked on virtual ids, then call super()
'''
for index in range(len(args)):
if args[index][0] == "res_id" and isinstance(args[index][2], str):
args[index][2] = get_real_ids(args[index][2])
return super(mail_message, self).search(cr, uid, args, offset=offset, limit=limit, order=order, context=context, count=count)
def _find_allowed_model_wise(self, cr, uid, doc_model, doc_dict, context=None):
if context is None:
context = {}
if doc_model == 'calendar.event':
order = context.get('order', self._order)
for virtual_id in self.pool[doc_model].get_recurrent_ids(cr, uid, doc_dict.keys(), [], order=order, context=context):
doc_dict.setdefault(virtual_id, doc_dict[get_real_ids(virtual_id)])
return super(mail_message, self)._find_allowed_model_wise(cr, uid, doc_model, doc_dict, context=context)
class ir_attachment(osv.Model):
_inherit = "ir.attachment"
def search(self, cr, uid, args, offset=0, limit=0, order=None, context=None, count=False):
'''
convert the search on real ids in the case it was asked on virtual ids, then call super()
'''
for index in range(len(args)):
if args[index][0] == "res_id" and isinstance(args[index][2], str):
args[index][2] = get_real_ids(args[index][2])
return super(ir_attachment, self).search(cr, uid, args, offset=offset, limit=limit, order=order, context=context, count=count)
def write(self, cr, uid, ids, vals, context=None):
'''
when posting an attachment (new or not), convert the virtual ids in real ids.
'''
if isinstance(vals.get('res_id'), str):
vals['res_id'] = get_real_ids(vals.get('res_id'))
return super(ir_attachment, self).write(cr, uid, ids, vals, context=context)
class ir_http(osv.AbstractModel):
_inherit = 'ir.http'
def _auth_method_calendar(self):
token = request.params['token']
db = request.params['db']
registry = openerp.modules.registry.RegistryManager.get(db)
attendee_pool = registry.get('calendar.attendee')
error_message = False
with registry.cursor() as cr:
attendee_id = attendee_pool.search(cr, openerp.SUPERUSER_ID, [('access_token', '=', token)])
if not attendee_id:
error_message = """Invalid Invitation Token."""
elif request.session.uid and request.session.login != 'anonymous':
# if valid session but user is not match
attendee = attendee_pool.browse(cr, openerp.SUPERUSER_ID, attendee_id[0])
user = registry.get('res.users').browse(cr, openerp.SUPERUSER_ID, request.session.uid)
if attendee.partner_id.id != user.partner_id.id:
error_message = """Invitation cannot be forwarded via email. This event/meeting belongs to %s and you are logged in as %s. Please ask organizer to add you.""" % (attendee.email, user.email)
if error_message:
raise BadRequest(error_message)
return True
class invite_wizard(osv.osv_memory):
_inherit = 'mail.wizard.invite'
def default_get(self, cr, uid, fields, context=None):
'''
in case someone clicked on 'invite others' wizard in the followers widget, transform virtual ids in real ids
'''
result = super(invite_wizard, self).default_get(cr, uid, fields, context=context)
if 'res_id' in result:
result['res_id'] = get_real_ids(result['res_id'])
return result
|
# -*- coding: utf8 -*-
# Copyright (c) 2014 by Ecreall under licence AGPL terms
# avalaible on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
"""
This module represent all of behaviors used in the
Proposal management process definition.
"""
import datetime
import pytz
from persistent.list import PersistentList
from persistent.dict import PersistentDict
from pyramid.httpexceptions import HTTPFound
from pyramid.threadlocal import get_current_request
from substanced.util import get_oid
from dace.util import (
getSite,
copy,
find_service,
get_obj)
from dace.objectofcollaboration.principal.util import (
has_role,
grant_roles,
get_current,
revoke_roles,
has_any_roles)
#from dace.objectofcollaboration import system
import html_diff_wrapper
from dace.processinstance.activity import (
InfiniteCardinality, ElementaryAction, ActionType)
from dace.processinstance.core import ActivityExecuted
from pontus.file import OBJECT_DATA
from novaideo.content.interface import (
INovaIdeoApplication,
IProposal,
Iidea,
IWorkspace)
from ..user_management.behaviors import (
global_user_processsecurity,
access_user_processsecurity)
from novaideo import _, log
from novaideo.content.proposal import Proposal
from ..comment_management.behaviors import VALIDATOR_BY_CONTEXT
from novaideo.content.correlation import CorrelationType
from novaideo.content.token import Token
from novaideo.content.working_group import WorkingGroup
from novaideo.content.processes.idea_management.behaviors import (
PresentIdea,
CommentIdea,
Associate as AssociateIdea)
from novaideo.utilities.util import (
connect, disconnect, to_localized_time)
from novaideo.event import (
ObjectPublished, CorrelableRemoved, ObjectModified)
from novaideo.content.processes.proposal_management import WORK_MODES
from novaideo.core import access_action, serialize_roles
from novaideo.content.alert import InternalAlertKind
from novaideo.views.filter import get_users_by_preferences
from novaideo.utilities.alerts_utility import alert
from . import (
FIRST_VOTE_PUBLISHING_MESSAGE,
VP_DEFAULT_DURATION,
AMENDMENTS_CYCLE_DEFAULT_DURATION,
FIRST_VOTE_DURATION_MESSAGE,
init_proposal_ballots,
add_files_to_workspace,
add_attached_files)
try:
basestring
except NameError:
basestring = str
VOTE_PUBLISHING_MESSAGE = _("Chaque participant du groupe de travail vote pour"
" ou contre l'amélioration de la proposition. Si la majorité"
" est \"pour\", un nouveau cycle d'amélioration commence, sinon"
" la proposition est soumise en l'état aux autres membres de la plateforme")
VOTE_DURATION_MESSAGE = _("Voting results may not be known until the end of"
" the period for voting. In the case where the"
" majority are for the continuation of improvements"
" of the proposal, your vote for the duration of the"
" amendment period will be useful")
VOTE_MODEWORK_MESSAGE = _("Voting results may not be known until the end of"
" the period for voting. In the case where the"
" majority are for the continuation of improvements"
" of the proposal, your vote for the work mode will be useful")
VOTE_REOPENING_MESSAGE = _("Voting results may not be known until the end of"
" the period for voting. In the case where the"
" majority are for the continuation of improvements"
" of the proposal, your vote for reopening working"
" group will be useful")
def publish_ideas(ideas, request):
for idea in ideas:
idea.state = PersistentList(['published'])
idea.modified_at = datetime.datetime.now(tz=pytz.UTC)
idea.reindex()
request.registry.notify(ObjectPublished(object=idea))
def publish_condition(process):
proposal = process.execution_context.created_entity('proposal')
if proposal:
working_group = proposal.working_group
report = working_group.vp_ballot.report
if not getattr(working_group, 'first_vote', True):
electeds = report.get_electeds()
if electeds is None:
return False
else:
return True
report.calculate_votes()
if report.result['False'] != 0:
return False
return True
def start_improvement_cycle(proposal):
def_container = find_service('process_definition_container')
runtime = find_service('runtime')
pd = def_container.get_definition('proposalimprovementcycle')
proc = pd()
proc.__name__ = proc.id
runtime.addtoproperty('processes', proc)
proc.defineGraph(pd)
proc.execution_context.add_created_entity('proposal', proposal)
proc.execute()
return proc
def close_votes(context, request, vote_processes):
vote_actions = [process.get_actions('vote')
for process in vote_processes]
vote_actions = [action for actions in vote_actions
for action in actions]
for action in vote_actions:
action.close_vote(context, request)
def first_vote_registration(user, working_group, appstruct):
#duration vote
ballot = working_group.duration_configuration_ballot
report = ballot.report
if user not in report.voters:
elected_id = appstruct['elected']
try:
subject_id = get_oid(elected_id[OBJECT_DATA])
except Exception:
subject_id = elected_id
votefactory = report.ballottype.vote_factory
vote = votefactory(subject_id)
vote.user_id = get_oid(user)
ballot.ballot_box.addtoproperty('votes', vote)
report.addtoproperty('voters', user)
#publication vote
ballot = working_group.vp_ballot
report = ballot.report
if user not in report.voters:
vote = appstruct['vote']
votefactory = report.ballottype.vote_factory
vote = votefactory(vote)
vote.user_id = get_oid(user)
ballot.ballot_box.addtoproperty('votes', vote)
report.addtoproperty('voters', user)
def first_vote_remove(user, working_group):
user_oid = get_oid(user)
#duration vote
ballot = working_group.duration_configuration_ballot
votes = [v for v in ballot.ballot_box.votes
if getattr(v, 'user_id', 0) == user_oid]
if votes:
ballot.ballot_box.delfromproperty('votes', votes[0])
ballot.report.delfromproperty('voters', user)
#publication vote
ballot = working_group.vp_ballot
votes = [v for v in ballot.ballot_box.votes
if getattr(v, 'user_id', 0) == user_oid]
if votes:
ballot.ballot_box.delfromproperty('votes', votes[0])
ballot.report.delfromproperty('voters', user)
def calculate_amendments_cycle_duration(process):
if getattr(process, 'attachedTo', None):
process = process.attachedTo.process
proposal = process.execution_context.created_entity('proposal')
working_group = proposal.working_group
duration_ballot = getattr(
working_group, 'duration_configuration_ballot', None)
if duration_ballot is not None and duration_ballot.report.voters:
electeds = duration_ballot.report.get_electeds()
if electeds:
return AMENDMENTS_CYCLE_DEFAULT_DURATION[electeds[0]] + \
datetime.datetime.now()
return AMENDMENTS_CYCLE_DEFAULT_DURATION["One week"] + \
datetime.datetime.now()
def createproposal_roles_validation(process, context):
return has_role(role=('Admin',))
def createproposal_processsecurity_validation(process, context):
request = get_current_request()
if getattr(request, 'is_idea_box', False):
return False
return global_user_processsecurity()
def include_ideas_texts(proposal, related_ideas):
proposal.text = getattr(proposal, 'text', '') +\
''.join(['<div>' + idea.text + '</div>' \
for idea in related_ideas])
class CreateProposal(InfiniteCardinality):
submission_title = _('Save')
context = INovaIdeoApplication
roles_validation = createproposal_roles_validation
processsecurity_validation = createproposal_processsecurity_validation
def start(self, context, request, appstruct, **kw):
root = getSite()
user = get_current()
related_ideas = appstruct.pop('related_ideas')
proposal = appstruct['_object_data']
root.merge_keywords(proposal.keywords)
proposal.text = html_diff_wrapper.normalize_text(proposal.text)
root.addtoproperty('proposals', proposal)
proposal.state.append('draft')
grant_roles(user=user, roles=(('Owner', proposal), ))
grant_roles(user=user, roles=(('Participant', proposal), ))
proposal.setproperty('author', user)
wg = WorkingGroup()
root.addtoproperty('working_groups', wg)
wg.init_workspace()
wg.setproperty('proposal', proposal)
wg.addtoproperty('members', user)
wg.state.append('deactivated')
if related_ideas:
connect(proposal,
related_ideas,
{'comment': _('Add related ideas'),
'type': _('Creation')},
user,
['related_proposals', 'related_ideas'],
CorrelationType.solid)
add_attached_files(appstruct, proposal)
proposal.reindex()
init_proposal_ballots(proposal)
wg.reindex()
proposal.subscribe_to_channel(user)
request.registry.notify(ActivityExecuted(self, [proposal, wg], user))
return {'newcontext': proposal}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(kw['newcontext'], "@@index"))
def pap_processsecurity_validation(process, context):
request = get_current_request()
if getattr(request, 'is_idea_box', False):
return False
condition = False
if 'idea' in request.content_to_examine:
condition = 'favorable' in context.state
else:
condition = 'published' in context.state
return condition and has_role(role=('Member',))
class PublishAsProposal(CreateProposal):
style = 'button' #TODO add style abstract class
context = Iidea
submission_title = _('Save')
style_order = 0
style_descriminator = 'primary-action'
style_picto = 'novaideo-icon icon-wg'
processsecurity_validation = pap_processsecurity_validation
roles_validation = NotImplemented
def del_processsecurity_validation(process, context):
return global_user_processsecurity() and \
(has_role(role=('Owner', context)) and \
'draft' in context.state)
class DeleteProposal(InfiniteCardinality):
style = 'button' #TODO add style abstract class
style_descriminator = 'global-action'
style_interaction = 'ajax-action'
style_picto = 'glyphicon glyphicon-trash'
style_order = 12
submission_title = _('Continue')
context = IProposal
processsecurity_validation = del_processsecurity_validation
def start(self, context, request, appstruct, **kw):
root = getSite()
not_draft_owner = 'draft' not in context.state or \
not has_role(role=('Owner', context))
tokens = [t for t in context.tokens if not t.proposal]
proposal_tokens = [t for t in context.tokens if t.proposal]
for token in list(tokens):
token.owner.addtoproperty('tokens', token)
for proposal_token in list(proposal_tokens):
proposal_token.owner.delfromproperty('tokens_ref', proposal_token)
wg = context.working_group
members = list(wg.members)
for member in members:
wg.delfromproperty('members', member)
wg.delfromproperty('proposal', context)
root.delfromproperty('working_groups', wg)
request.registry.notify(CorrelableRemoved(object=context))
root.delfromproperty('proposals', context)
if not_draft_owner:
mail_template = root.get_mail_template('delete_proposal')
explanation = appstruct['explanation']
subject = mail_template['subject'].format(
subject_title=context.title)
localizer = request.localizer
alert('internal', [root], members,
internal_kind=InternalAlertKind.moderation_alert,
subjects=[], removed=True, subject_title=context.title)
for member in members:
if getattr(member, 'email', ''):
message = mail_template['template'].format(
recipient_title=localizer.translate(
_(getattr(member, 'user_title', ''))),
recipient_first_name=getattr(
member, 'first_name', member.name),
recipient_last_name=getattr(member, 'last_name', ''),
subject_title=context.title,
explanation=explanation,
novaideo_title=root.title
)
alert('email', [root.get_site_sender()], [member.email],
subject=subject, body=message)
return {'newcontext': root}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(kw['newcontext'], ""))
def publish_roles_validation(process, context):
return has_role(role=('Owner', context))
def publish_processsecurity_validation(process, context):
user = get_current()
root = getSite()
not_published_ideas = False
if getattr(root, 'moderate_ideas', False):
not_published_ideas = any('published' not in i.state
for i in context.related_ideas.keys())
not_favorable_ideas = False
if 'idea' in getattr(root, 'content_to_examine', []):
not_favorable_ideas = any('favorable' not in i.state
for i in context.related_ideas.keys())
return not (not_published_ideas or not_favorable_ideas) and \
len(user.active_working_groups) < root.participations_maxi and \
global_user_processsecurity()
def publish_state_validation(process, context):
return "draft" in context.state
class PublishProposal(InfiniteCardinality):
style = 'button' #TODO add style abstract class
style_descriminator = 'global-action'
style_picto = 'glyphicon glyphicon-share'
style_order = 13
submission_title = _('Continue')
context = IProposal
roles_validation = publish_roles_validation
processsecurity_validation = publish_processsecurity_validation
state_validation = publish_state_validation
def start(self, context, request, appstruct, **kw):
user = get_current()
root = getSite()
working_group = context.working_group
context.state.remove('draft')
if appstruct.get('vote', False):
if 'proposal' in getattr(root, 'content_to_support', []):
context.state = PersistentList(
['submitted_support', 'published'])
else:
context.state = PersistentList(
['published', 'submitted_support'])
working_group.state = PersistentList(['archived'])
context.reindex()
working_group.reindex()
else:
default_mode = root.get_default_work_mode()
participants_mini = root.participants_mini
mode_id = appstruct.get('work_mode', default_mode.work_id)
if mode_id:
working_group.work_mode_id = mode_id
participants_mini = WORK_MODES[mode_id].participants_mini
#Only the vote of the author is considered
first_vote_registration(user, working_group, appstruct)
if participants_mini > 1:
context.state = PersistentList(
['open to a working group', 'published'])
context.reindex()
else:
context.state = PersistentList(['amendable', 'published'])
working_group.state = PersistentList(['active'])
context.reindex()
working_group.reindex()
if not hasattr(working_group, 'first_improvement_cycle'):
working_group.first_improvement_cycle = True
if not working_group.improvement_cycle_proc:
improvement_cycle_proc = start_improvement_cycle(context)
working_group.setproperty(
'improvement_cycle_proc', improvement_cycle_proc)
working_group.improvement_cycle_proc.execute_action(
context, request, 'votingpublication', {})
context.modified_at = datetime.datetime.now(tz=pytz.UTC)
context.init_published_at()
not_published_ideas = []
if not getattr(root, 'moderate_ideas', False) and\
'idea' not in getattr(root, 'content_to_examine', []):
not_published_ideas = [i for i in context.related_ideas.keys()
if 'published' not in i.state]
publish_ideas(not_published_ideas, request)
not_published_ideas.extend(context)
request.registry.notify(ObjectPublished(object=context))
request.registry.notify(ActivityExecuted(
self, not_published_ideas, user))
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def duplicate_processsecurity_validation(process, context):
return 'draft' not in context.state and \
global_user_processsecurity()
class DuplicateProposal(InfiniteCardinality):
style = 'button' #TODO add style abstract class
style_descriminator = 'global-action'
style_picto = 'octicon octicon-git-branch'
style_order = 7
submission_title = _('Save')
context = IProposal
processsecurity_validation = duplicate_processsecurity_validation
def start(self, context, request, appstruct, **kw):
root = getSite()
user = get_current()
related_ideas = appstruct.pop('related_ideas')
root.merge_keywords(appstruct['keywords'])
copy_of_proposal = copy(
context, (root, 'proposals'),
omit=('created_at', 'modified_at',
'examined_at', 'published_at',
'opinion', 'attached_files',
'len_selections', 'graph'))
copy_of_proposal.opinion = PersistentDict({})
copy_of_proposal.init_graph()
copy_of_proposal.set_data(appstruct)
copy_of_proposal.text = html_diff_wrapper.normalize_text(
copy_of_proposal.text)
copy_of_proposal.setproperty('originalentity', context)
copy_of_proposal.state = PersistentList(['draft'])
grant_roles(user=user, roles=(('Owner', copy_of_proposal), ))
grant_roles(user=user, roles=(('Participant', copy_of_proposal), ))
copy_of_proposal.setproperty('author', user)
wg = WorkingGroup()
root.addtoproperty('working_groups', wg)
wg.init_workspace()
wg.setproperty('proposal', copy_of_proposal)
wg.addtoproperty('members', user)
wg.state.append('deactivated')
if related_ideas:
connect(copy_of_proposal,
related_ideas,
{'comment': _('Add related ideas'),
'type': _('Duplicate')},
user,
['related_proposals', 'related_ideas'],
CorrelationType.solid)
add_attached_files(appstruct, copy_of_proposal)
wg.reindex()
copy_of_proposal.reindex()
init_proposal_ballots(copy_of_proposal)
context.reindex()
request.registry.notify(ActivityExecuted(
self, [copy_of_proposal, wg], user))
copy_of_proposal.subscribe_to_channel(user)
return {'newcontext': copy_of_proposal}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(kw['newcontext'], "@@index"))
def edit_roles_validation(process, context):
return has_role(role=('Owner', context))
def edit_processsecurity_validation(process, context):
return global_user_processsecurity()
def edit_state_validation(process, context):
return "draft" in context.state
class EditProposal(InfiniteCardinality):
style = 'button' #TODO add style abstract class
style_descriminator = 'text-action'
style_picto = 'glyphicon glyphicon-pencil'
style_order = 1
submission_title = _('Save')
context = IProposal
roles_validation = edit_roles_validation
processsecurity_validation = edit_processsecurity_validation
state_validation = edit_state_validation
def start(self, context, request, appstruct, **kw):
root = getSite()
user = get_current()
if 'related_ideas' in appstruct:
context.set_related_ideas(
appstruct['related_ideas'], user)
add_attached_files(appstruct, context)
context.text = html_diff_wrapper.normalize_text(context.text)
context.modified_at = datetime.datetime.now(tz=pytz.UTC)
root.merge_keywords(context.keywords)
context.reindex()
request.registry.notify(ActivityExecuted(self, [context], user))
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def support_roles_validation(process, context):
return has_role(role=('Member',))
def support_processsecurity_validation(process, context):
request = get_current_request()
if 'proposal' not in request.content_to_support:
return False
user = get_current()
return getattr(user, 'tokens', []) and \
not (user in [t.owner for t in context.tokens]) and \
global_user_processsecurity()
def support_state_validation(process, context):
return 'submitted_support' in context.state
class SupportProposal(InfiniteCardinality):
# style = 'button' #TODO add style abstract class
# style_descriminator = 'text-action'
# style_picto = 'glyphicon glyphicon-thumbs-up'
# style_order = 4
context = IProposal
roles_validation = support_roles_validation
processsecurity_validation = support_processsecurity_validation
state_validation = support_state_validation
def start(self, context, request, appstruct, **kw):
user = get_current()
token = None
for tok in user.tokens:
if tok.proposal is context:
token = tok
if token is None:
token = user.tokens[-1]
context.addtoproperty('tokens_support', token)
context.init_support_history()
context._support_history.append(
(get_oid(user), datetime.datetime.now(tz=pytz.UTC), 1))
request.registry.notify(ActivityExecuted(self, [context], user))
users = list(get_users_by_preferences(context))
users.extend(context.working_group.members)
alert('internal', [request.root], users,
internal_kind=InternalAlertKind.support_alert,
subjects=[context], support_kind='support')
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
class OpposeProposal(InfiniteCardinality):
# style = 'button' #TODO add style abstract class
# style_descriminator = 'text-action'
# style_picto = 'glyphicon glyphicon-thumbs-down'
# style_order = 5
context = IProposal
roles_validation = support_roles_validation
processsecurity_validation = support_processsecurity_validation
state_validation = support_state_validation
def start(self, context, request, appstruct, **kw):
user = get_current()
token = None
for tok in user.tokens:
if tok.proposal is context:
token = tok
break
if token is None:
token = user.tokens[-1]
context.addtoproperty('tokens_opposition', token)
context.init_support_history()
context._support_history.append(
(get_oid(user), datetime.datetime.now(tz=pytz.UTC), 0))
request.registry.notify(ActivityExecuted(self, [context], user))
users = list(get_users_by_preferences(context))
users.extend(context.working_group.members)
alert('internal', [request.root], users,
internal_kind=InternalAlertKind.support_alert,
subjects=[context], support_kind='oppose')
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def opinion_roles_validation(process, context):
return has_role(role=('Examiner',))
def opinion_processsecurity_validation(process, context):
request = get_current_request()
if 'proposal' not in request.content_to_examine:
return False
return global_user_processsecurity()
def opinion_state_validation(process, context):
return 'submitted_support' in context.state and 'examined' not in context.state
class MakeOpinion(InfiniteCardinality):
style = 'button' #TODO add style abstract class
style_descriminator = 'global-action'
style_picto = 'octicon octicon-checklist'
style_order = 10
submission_title = _('Save')
context = IProposal
roles_validation = opinion_roles_validation
processsecurity_validation = opinion_processsecurity_validation
state_validation = opinion_state_validation
def start(self, context, request, appstruct, **kw):
appstruct.pop('_csrf_token_')
context.opinion = PersistentDict(appstruct)
old_sate = context.state[0]
context.state = PersistentList(
['examined', 'published', context.opinion['opinion']])
context.init_examined_at()
context.reindex()
tokens = [t for t in context.tokens if not t.proposal]
proposal_tokens = [t for t in context.tokens if t.proposal]
for token in list(tokens):
token.owner.addtoproperty('tokens', token)
for token in list(proposal_tokens):
context.__delitem__(token.__name__)
members = context.working_group.members
url = request.resource_url(context, "@@index")
root = getSite()
mail_template = root.get_mail_template('opinion_proposal')
subject = mail_template['subject'].format(subject_title=context.title)
localizer = request.localizer
users = list(get_users_by_preferences(context))
users.extend(members)
alert('internal', [root], users,
internal_kind=InternalAlertKind.examination_alert,
subjects=[context])
for member in members:
if getattr(member, 'email', ''):
message = mail_template['template'].format(
recipient_title=localizer.translate(
_(getattr(member, 'user_title', ''))),
recipient_first_name=getattr(member, 'first_name', member.name),
recipient_last_name=getattr(member, 'last_name', ''),
subject_url=url,
subject_title=context.title,
opinion=localizer.translate(_(context.opinion_value)),
explanation=context.opinion['explanation'],
novaideo_title=request.root.title
)
alert('email', [root.get_site_sender()], [member.email],
subject=subject, body=message)
request.registry.notify(ActivityExecuted(
self, [context], get_current()))
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def withdrawt_processsecurity_validation(process, context):
user = get_current()
return any((t.owner is user) for t in context.tokens) and \
global_user_processsecurity()
class WithdrawToken(InfiniteCardinality):
# style = 'button' #TODO add style abstract class
# style_descriminator = 'text-action'
# style_picto = 'glyphicon glyphicon-share-alt'
# style_order = 6
context = IProposal
roles_validation = support_roles_validation
processsecurity_validation = withdrawt_processsecurity_validation
state_validation = support_state_validation
def start(self, context, request, appstruct, **kw):
user = get_current()
user_tokens = [t for t in context.tokens
if t.owner is user]
token = user_tokens[-1]
context.delfromproperty(token.__property__, token)
user.addtoproperty('tokens', token)
context.init_support_history()
context._support_history.append(
(get_oid(user), datetime.datetime.now(tz=pytz.UTC), -1))
request.registry.notify(ActivityExecuted(self, [context], user))
users = list(get_users_by_preferences(context))
users.extend(context.working_group.members)
alert('internal', [request.root], users,
internal_kind=InternalAlertKind.support_alert,
subjects=[context], support_kind='withdraw')
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def comm_relation_validation(process, context):
return process.execution_context.has_relation(context, 'proposal')
def comm_roles_validation(process, context):
return has_role(role=('Member',))
def comm_processsecurity_validation(process, context):
return global_user_processsecurity()
def comm_state_validation(process, context):
return 'draft' not in context.state
class CommentProposal(CommentIdea):
isSequential = False
context = IProposal
roles_validation = comm_roles_validation
processsecurity_validation = comm_processsecurity_validation
state_validation = comm_state_validation
def seea_roles_validation(process, context):
return has_role(role=('Participant', context))
def seea_processsecurity_validation(process, context):
return any(not('archived' in a.state) for a in context.amendments) and \
global_user_processsecurity()
class SeeAmendments(InfiniteCardinality):
isSequential = False
context = IProposal
roles_validation = seea_roles_validation
processsecurity_validation = seea_processsecurity_validation
def start(self, context, request, appstruct, **kw):
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def seem_processsecurity_validation(process, context):
return global_user_processsecurity()
class SeeMembers(InfiniteCardinality):
style_descriminator = 'wg-action'
style_interaction = 'ajax-action'
style_picto = 'fa fa-users'
isSequential = False
context = IProposal
processsecurity_validation = seem_processsecurity_validation
def start(self, context, request, appstruct, **kw):
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def present_roles_validation(process, context):
return has_role(role=('Member',))
def present_processsecurity_validation(process, context):
return global_user_processsecurity()
def present_state_validation(process, context):
return 'draft' not in context.state #TODO ?
class PresentProposal(PresentIdea):
context = IProposal
roles_validation = present_roles_validation
processsecurity_validation = present_processsecurity_validation
state_validation = present_state_validation
def associate_processsecurity_validation(process, context):
return (has_role(role=('Owner', context)) or \
(has_role(role=('Member',)) and \
'draft' not in context.state)) and \
global_user_processsecurity()
class Associate(AssociateIdea):
context = IProposal
processsecurity_validation = associate_processsecurity_validation
def seeideas_state_validation(process, context):
return 'draft' not in context.state or \
('draft' in context.state and has_role(role=('Owner', context)))
class SeeRelatedIdeas(InfiniteCardinality):
style_descriminator = 'listing-primary-action'
style_interaction = 'ajax-action'
style_picto = 'glyphicon glyphicon-link'
context = IProposal
#processsecurity_validation = seeideas_processsecurity_validation
#roles_validation = seeideas_roles_validation
state_validation = seeideas_state_validation
def start(self, context, request, appstruct, **kw):
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def withdraw_roles_validation(process, context):
return has_role(role=('Member',))
def withdraw_processsecurity_validation(process, context):
user = get_current()
wg = context.working_group
return wg and\
user in wg.wating_list and \
global_user_processsecurity()
def withdraw_state_validation(process, context):
return 'amendable' in context.state
class Withdraw(InfiniteCardinality):
style = 'button' #TODO add style abstract class
style_descriminator = 'wg-action'
style_order = 3
style_css_class = 'btn-warning'
isSequential = False
context = IProposal
roles_validation = withdraw_roles_validation
processsecurity_validation = withdraw_processsecurity_validation
state_validation = withdraw_state_validation
def start(self, context, request, appstruct, **kw):
user = get_current()
working_group = context.working_group
working_group.delfromproperty('wating_list', user)
if getattr(user, 'email', ''):
localizer = request.localizer
root = getSite()
mail_template = root.get_mail_template('withdeaw')
subject = mail_template['subject'].format(
subject_title=context.title)
message = mail_template['template'].format(
recipient_title=localizer.translate(
_(getattr(user, 'user_title', ''))),
recipient_first_name=getattr(user, 'first_name', user.name),
recipient_last_name=getattr(user, 'last_name', ''),
subject_title=context.title,
subject_url=request.resource_url(context, "@@index"),
novaideo_title=request.root.title
)
alert('email', [root.get_site_sender()], [user.email],
subject=subject, body=message)
request.registry.notify(ActivityExecuted(
self, [context, working_group], user))
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def resign_roles_validation(process, context):
user = get_current()
working_group = context.working_group
return working_group and user in working_group.members
def resign_processsecurity_validation(process, context):
return global_user_processsecurity()
def resign_state_validation(process, context):
return any(s in context.state for s in
['amendable', 'open to a working group'])
class Resign(InfiniteCardinality):
style = 'button' #TODO add style abstract class
style_descriminator = 'wg-action'
style_order = 2
style_picto = 'typcn typcn-user-delete'
style_css_class = 'btn-danger'
isSequential = False
context = IProposal
roles_validation = resign_roles_validation
processsecurity_validation = resign_processsecurity_validation
state_validation = resign_state_validation
def _get_next_user(self, users, root):
for user in users:
wgs = user.active_working_groups
if 'active' in user.state and len(wgs) < root.participations_maxi:
return user
return None
def start(self, context, request, appstruct, **kw):
root = getSite()
user = get_current()
working_group = context.working_group
working_group.delfromproperty('members', user)
members = working_group.members
mode = getattr(working_group, 'work_mode', root.get_default_work_mode())
revoke_roles(user, (('Participant', context),))
if members:
alert('internal', [root], members,
internal_kind=InternalAlertKind.working_group_alert,
subjects=[context], alert_kind='resign')
url = request.resource_url(context, "@@index")
localizer = request.localizer
sender = root.get_site_sender()
if working_group.wating_list:
next_user = self._get_next_user(working_group.wating_list, root)
if next_user is not None:
mail_template = root.get_mail_template(
'wg_wating_list_participation')
working_group.delfromproperty('wating_list', next_user)
working_group.addtoproperty('members', next_user)
grant_roles(next_user, (('Participant', context),))
if members:
alert('internal', [root], members,
internal_kind=InternalAlertKind.working_group_alert,
subjects=[context], alert_kind='wg_wating_list_participation')
if getattr(next_user, 'email', ''):
subject = mail_template['subject'].format(
subject_title=context.title)
message = mail_template['template'].format(
recipient_title=localizer.translate(
_(getattr(next_user, 'user_title', ''))),
recipient_first_name=getattr(
next_user, 'first_name', next_user.name),
recipient_last_name=getattr(next_user, 'last_name', ''),
subject_title=context.title,
subject_url=url,
novaideo_title=root.title
)
alert('email', [sender], [next_user.email],
subject=subject, body=message)
participants = working_group.members
len_participants = len(participants)
if len_participants < mode.participants_mini and \
'open to a working group' not in context.state:
context.state = PersistentList(
['open to a working group', 'published'])
working_group.state = PersistentList(['deactivated'])
working_group.reindex()
context.reindex()
alert('internal', [root], participants,
internal_kind=InternalAlertKind.working_group_alert,
subjects=[context], alert_kind='resign_to_wg_open')
if getattr(user, 'email', ''):
mail_template = root.get_mail_template('wg_resign')
subject = mail_template['subject'].format(
subject_title=context.title)
message = mail_template['template'].format(
recipient_title=localizer.translate(
_(getattr(user, 'user_title', ''))),
recipient_first_name=getattr(user, 'first_name', user.name),
recipient_last_name=getattr(user, 'last_name', ''),
subject_title=context.title,
subject_url=url,
novaideo_title=root.title
)
alert('email', [sender], [user.email],
subject=subject, body=message)
request.registry.notify(ActivityExecuted(
self, [context, working_group], user))
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def participate_roles_validation(process, context):
user = get_current()
working_group = context.working_group
return working_group and has_role(role=('Member',)) and \
user not in working_group.members
def participate_processsecurity_validation(process, context):
working_group = context.working_group
user = get_current()
root = getSite()
wgs = getattr(user, 'active_working_groups', [])
return working_group and \
user not in working_group.wating_list and \
len(wgs) < root.participations_maxi and \
global_user_processsecurity()
def participate_state_validation(process, context):
working_group = context.working_group
return working_group and \
not('closed' in working_group.state) and \
any(s in context.state for s in
['amendable', 'open to a working group'])
class Participate(InfiniteCardinality):
style = 'button' #TODO add style abstract class
style_descriminator = 'wg-action'
style_order = 1
style_picto = 'md md-group-add'
style_css_class = 'btn-success'
submission_title = _('Save')
isSequential = False
context = IProposal
roles_validation = participate_roles_validation
processsecurity_validation = participate_processsecurity_validation
state_validation = participate_state_validation
def _send_mail_to_user(self, subject_template,
message_template, user,
context, request):
localizer = request.localizer
subject = subject_template.format(subject_title=context.title)
message = message_template.format(
recipient_title=localizer.translate(
_(getattr(user, 'user_title', ''))),
recipient_first_name=getattr(user, 'first_name', user.name),
recipient_last_name=getattr(user, 'last_name', ''),
subject_title=context.title,
subject_url=request.resource_url(context, "@@index"),
novaideo_title=request.root.title
)
alert('email', [request.root.get_site_sender()], [user.email],
subject=subject, body=message)
def start(self, context, request, appstruct, **kw):
root = getSite()
user = get_current()
working_group = context.working_group
participants = working_group.members
mode = getattr(working_group, 'work_mode', root.get_default_work_mode())
len_participants = len(participants)
if len_participants < mode.participants_maxi:
#Alert new participant
if participants:
alert('internal', [root], participants,
internal_kind=InternalAlertKind.working_group_alert,
subjects=[context], alert_kind='participate')
working_group.addtoproperty('members', user)
grant_roles(user, (('Participant', context),))
#alert maw working groups
active_wgs = getattr(user, 'active_working_groups', [])
if len(active_wgs) == root.participations_maxi:
alert('internal', [root], [user],
internal_kind=InternalAlertKind.working_group_alert,
subjects=[user], alert_kind='participations_maxi')
if (len_participants+1) == mode.participants_mini:
working_group.state = PersistentList(['active'])
context.state = PersistentList(['amendable', 'published'])
working_group.reindex()
context.reindex()
#Only if is the first improvement cycle
if not hasattr(working_group, 'first_improvement_cycle'):
working_group.first_improvement_cycle = True
if not working_group.improvement_cycle_proc:
improvement_cycle_proc = start_improvement_cycle(
context)
working_group.setproperty(
'improvement_cycle_proc', improvement_cycle_proc)
#Run the improvement cycle proc
working_group.improvement_cycle_proc.execute_action(
context, request, 'votingpublication', {})
#Alert start of the improvement cycle proc
alert('internal', [root], participants,
internal_kind=InternalAlertKind.working_group_alert,
subjects=[context], alert_kind='amendable')
#Send Mail alert to user
if getattr(user, 'email', ''):
mail_template = root.get_mail_template('wg_participation')
self._send_mail_to_user(
mail_template['subject'], mail_template['template'],
user, context, request)
else:
working_group.addtoproperty('wating_list', user)
working_group.reindex()
users = list(participants)
users.append(user)
alert('internal', [root], users,
internal_kind=InternalAlertKind.working_group_alert,
subjects=[context], alert_kind='wg_participation_max')
if getattr(user, 'email', ''):
mail_template = root.get_mail_template('wating_list')
self._send_mail_to_user(
mail_template['subject'], mail_template['template'],
user, context, request)
request.registry.notify(ActivityExecuted(
self, [context, working_group], user))
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def compare_processsecurity_validation(process, context):
return getattr(context, 'version', None) is not None and \
(has_role(role=('Owner', context)) or \
(has_role(role=('Member',)) and\
'draft' not in context.state)) and \
global_user_processsecurity()
class CompareProposal(InfiniteCardinality):
title = _('Compare')
context = IProposal
processsecurity_validation = compare_processsecurity_validation
def start(self, context, request, appstruct, **kw):
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def attach_roles_validation(process, context):
return has_role(role=('Participant', context))
def attach_processsecurity_validation(process, context):
return global_user_processsecurity()
def attach_state_validation(process, context):
wg = context.working_group
return wg and 'active' in wg.state and 'amendable' in context.state
class AttachFiles(InfiniteCardinality):
style = 'button' #TODO add style abstract class
style_descriminator = 'text-action'
style_interaction = 'ajax-action'
style_picto = 'glyphicon glyphicon-paperclip'
style_order = 3
submission_title = _('Save')
context = IProposal
roles_validation = attach_roles_validation
processsecurity_validation = attach_processsecurity_validation
state_validation = attach_state_validation
def start(self, context, request, appstruct, **kw):
add_attached_files({'add_files': appstruct}, context)
context.reindex()
request.registry.notify(ActivityExecuted(
self, [context], get_current()))
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def get_access_key(obj):
if 'draft' not in obj.state:
return ['always']
else:
result = serialize_roles(
(('Owner', obj), 'Admin'))
return result
def seeproposal_processsecurity_validation(process, context):
return access_user_processsecurity(process, context) and \
('draft' not in context.state or \
has_any_roles(roles=(('Owner', context), 'Admin')))
@access_action(access_key=get_access_key)
class SeeProposal(InfiniteCardinality):
title = _('Details')
context = IProposal
actionType = ActionType.automatic
processsecurity_validation = seeproposal_processsecurity_validation
def start(self, context, request, appstruct, **kw):
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
#*************************** ProposalImprovementCycle process **********************************#
def decision_relation_validation(process, context):
return process.execution_context.has_relation(context, 'proposal')
def decision_roles_validation(process, context):
return has_role(role=('Admin',))
def decision_state_validation(process, context):
wg = context.working_group
return wg and 'active' in wg.state and \
'amendable' in context.state
class VotingPublication(ElementaryAction):
style = 'button' #TODO add style abstract class
style_descriminator = 'global-action'
style_order = 5
context = IProposal
processs_relation_id = 'proposal'
#actionType = ActionType.system
relation_validation = decision_relation_validation
roles_validation = decision_roles_validation
state_validation = decision_state_validation
def start(self, context, request, appstruct, **kw):
context.state.remove(context.state[0])
context.state.insert(0, 'votes for publishing')
context.reindex()
working_group = context.working_group
working_group.iteration = getattr(working_group, 'iteration', 0) + 1
if not getattr(working_group, 'first_vote', True):
members = working_group.members
url = request.resource_url(context, "@@index")
root = getSite()
mail_template = root.get_mail_template('start_vote_publishing')
subject = mail_template['subject'].format(
subject_title=context.title)
localizer = request.localizer
alert('internal', [root], members,
internal_kind=InternalAlertKind.working_group_alert,
subjects=[context], alert_kind='end_work')
for member in members:
if getattr(member, 'email', ''):
message = mail_template['template'].format(
recipient_title=localizer.translate(
_(getattr(member, 'user_title', ''))),
recipient_first_name=getattr(
member, 'first_name', member.name),
recipient_last_name=getattr(
member, 'last_name', ''),
subject_title=context.title,
subject_url=url,
novaideo_title=root.title
)
alert('email', [root.get_site_sender()], [member.email],
subject=subject, body=message)
request.registry.notify(ActivityExecuted(
self, [context], get_current()))
return {}
def after_execution(self, context, request, **kw):
proposal = self.process.execution_context.created_entity(
'proposal')
if self.sub_process:
exec_ctx = self.sub_process.execution_context
vote_processes = exec_ctx.get_involved_collection('vote_processes')
opened_vote_processes = [process for process in vote_processes
if not process._finished]
if opened_vote_processes:
close_votes(proposal, request, opened_vote_processes)
setattr(self.process, 'new_cycle_date', datetime.datetime.now())
setattr(self.process, 'previous_alert', -1)
super(VotingPublication, self).after_execution(proposal, request, **kw)
is_published = publish_condition(self.process)
if is_published:
self.process.execute_action(proposal, request, 'submit', {})
else:
self.process.execute_action(proposal, request, 'work', {})
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def work_state_validation(process, context):
return 'active' in getattr(context.working_group, 'state', []) and \
'votes for publishing' in context.state
class Work(ElementaryAction):
style = 'button' #TODO add style abstract class
style_descriminator = 'global-action'
style_order = 5
context = IProposal
processs_relation_id = 'proposal'
#actionType = ActionType.system
relation_validation = decision_relation_validation
roles_validation = work_state_validation
state_validation = decision_state_validation
def _send_mails(self, context, request, subject_template, message_template):
working_group = context.working_group
duration = to_localized_time(
calculate_amendments_cycle_duration(self.process),
translate=True)
isclosed = 'closed' in working_group.state
members = working_group.members
url = request.resource_url(context, "@@index")
subject = subject_template.format(subject_title=context.title)
localizer = request.localizer
root = request.root
alert('internal', [root], members,
internal_kind=InternalAlertKind.working_group_alert,
subjects=[context], alert_kind='start_work')
for member in [m for m in members if getattr(m, 'email', '')]:
message = message_template.format(
recipient_title=localizer.translate(
_(getattr(member, 'user_title', ''))),
recipient_first_name=getattr(
member, 'first_name', member.name),
recipient_last_name=getattr(member, 'last_name', ''),
subject_title=context.title,
subject_url=url,
duration=duration,
isclosed=localizer.translate(
(isclosed and _('closed')) or _('open')),
novaideo_title=root.title
)
alert('email', [root.get_site_sender()], [member.email],
subject=subject, body=message)
def start(self, context, request, appstruct, **kw):
root = getSite()
working_group = context.working_group
context.state.remove('votes for publishing')
#Only for amendments work mode
reopening_ballot = getattr(
working_group, 'reopening_configuration_ballot', None)
if reopening_ballot is not None:
report = reopening_ballot.report
voters_len = len(report.voters)
electors_len = len(report.electors)
report.calculate_votes()
#absolute majority
if (voters_len == electors_len) and \
(report.result['False'] == 0) and \
'closed' in working_group.state:
working_group.state.remove('closed')
context.state.insert(0, 'amendable')
#The first improvement cycle is started
if working_group.first_improvement_cycle:
mail_template = root.get_mail_template('start_work')
self._send_mails(
context, request,
mail_template['subject'], mail_template['template'])
working_group.first_improvement_cycle = False
else:
mail_template = root.get_mail_template('first_start_work')
self._send_mails(
context, request,
mail_template['subject'], mail_template['template'])
context.reindex()
working_group.reindex()
request.registry.notify(ActivityExecuted(
self, [context, working_group], get_current()))
return {}
def after_execution(self, context, request, **kw):
proposal = self.process.execution_context.created_entity('proposal')
super(Work, self).after_execution(proposal, request, **kw)
self.process.execute_action(proposal, request, 'votingpublication', {})
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def submit_roles_validation(process, context):
return has_role(role=('Admin',))
def submit_state_validation(process, context):
wg = context.working_group
return wg and 'active' in context.working_group.state and \
'votes for publishing' in context.state
class SubmitProposal(ElementaryAction):
style = 'button' #TODO add style abstract class
style_descriminator = 'global-action'
style_picto = 'glyphicon glyphicon-certificate'
style_order = 2
context = IProposal
processs_relation_id = 'proposal'
#actionType = ActionType.system
relation_validation = decision_relation_validation
roles_validation = submit_roles_validation
state_validation = submit_state_validation
def start(self, context, request, appstruct, **kw):
root = getSite()
localizer = request.localizer
working_group = context.working_group
if 'proposal' in getattr(root, 'content_to_support', []):
context.state = PersistentList(['submitted_support', 'published'])
else:
context.state = PersistentList(['published', 'submitted_support'])
working_group.state = PersistentList(['archived'])
members = working_group.members
for member in members:
token = Token(title='Token_'+context.title)
token.setproperty('proposal', context)
member.addtoproperty('tokens_ref', token)
member.addtoproperty('tokens', token)
token.setproperty('owner', member)
revoke_roles(member, (('Participant', context),))
#Alert users
users = list(get_users_by_preferences(context))
users.extend(members)
users = set(users)
url = request.resource_url(context, "@@index")
mail_template = root.get_mail_template('publish_proposal')
subject = mail_template['subject'].format(
subject_title=context.title)
alert('internal', [root], users,
internal_kind=InternalAlertKind.working_group_alert,
subjects=[context], alert_kind='submit_proposal')
for member in [m for m in users if getattr(m, 'email', '')]:
message = mail_template['template'].format(
recipient_title=localizer.translate(
_(getattr(member, 'user_title', ''))),
recipient_first_name=getattr(
member, 'first_name', member.name),
recipient_last_name=getattr(member, 'last_name', ''),
subject_title=context.title,
subject_url=url,
novaideo_title=root.title
)
alert('email', [root.get_site_sender()], [member.email],
subject=subject, body=message)
context.modified_at = datetime.datetime.now(tz=pytz.UTC)
working_group.reindex()
context.reindex()
request.registry.notify(ActivityExecuted(
self, [context, working_group], get_current()))
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def alert_relation_validation(process, context):
return process.execution_context.has_relation(context, 'proposal')
def alert_roles_validation(process, context):
return has_role(role=('System',))
class AlertEnd(ElementaryAction):
style = 'button' #TODO add style abstract class
style_descriminator = 'global-action'
style_order = 4
context = IProposal
actionType = ActionType.system
processs_relation_id = 'proposal'
roles_validation = alert_roles_validation
relation_validation = alert_relation_validation
def start(self, context, request, appstruct, **kw):
working_group = context.working_group
previous_alert = getattr(self.process, 'previous_alert', -1)
setattr(self.process, 'previous_alert', previous_alert + 1)
if 'active' in working_group.state and 'amendable' in context.state:
members = working_group.members
url = request.resource_url(context, "@@index")
root = request.root
mail_template = root.get_mail_template('alert_end')
subject = mail_template['subject'].format(
subject_title=context.title)
localizer = request.localizer
alert('internal', [root], members,
internal_kind=InternalAlertKind.working_group_alert,
subjects=[context], alert_kind='alert_end_work')
for member in [m for m in members if getattr(m, 'email', '')]:
message = mail_template['template'].format(
recipient_title=localizer.translate(
_(getattr(member, 'user_title', ''))),
recipient_first_name=getattr(
member, 'first_name', member.name),
recipient_last_name=getattr(
member, 'last_name', ''),
subject_url=url,
subject_title=context.title,
novaideo_title=root.title
)
alert('email', [root.get_site_sender()], [member.email],
subject=subject, body=message)
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
#**********************************************Workspace***************************************************
def get_access_key_ws(obj):
return serialize_roles(
(('Participant', obj.proposal), 'Admin'))
def seeworkspace_processsecurity_validation(process, context):
return has_any_roles(
roles=(('Participant', context.proposal), 'Admin'))
@access_action(access_key=get_access_key_ws)
class SeeWorkspace(InfiniteCardinality):
title = _('Details')
context = IWorkspace
actionType = ActionType.automatic
processsecurity_validation = seeworkspace_processsecurity_validation
def start(self, context, request, appstruct, **kw):
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
class AddFiles(InfiniteCardinality):
style = 'button' #TODO add style abstract class
style_descriminator = 'global-action'
style_picto = 'glyphicon glyphicon-import'
style_order = 4
submission_title = _('Save')
context = IWorkspace
roles_validation = seeworkspace_processsecurity_validation
processsecurity_validation = createproposal_processsecurity_validation
def start(self, context, request, appstruct, **kw):
add_files_to_workspace(appstruct.get('files', []), context)
context.reindex()
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
class RemoveFile(InfiniteCardinality):
context = IWorkspace
roles_validation = seeworkspace_processsecurity_validation
processsecurity_validation = createproposal_processsecurity_validation
def start(self, context, request, appstruct, **kw):
oid = appstruct.get('oid', None)
if oid:
try:
file_ = get_obj(int(oid))
if file_ and file_ in context.files:
context.delfromproperty('files', file_)
except Exception as error:
log.warning(error)
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
#TODO behaviors
VALIDATOR_BY_CONTEXT[Proposal] = CommentProposal
fix conditions
# -*- coding: utf8 -*-
# Copyright (c) 2014 by Ecreall under licence AGPL terms
# avalaible on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
"""
This module represent all of behaviors used in the
Proposal management process definition.
"""
import datetime
import pytz
from persistent.list import PersistentList
from persistent.dict import PersistentDict
from pyramid.httpexceptions import HTTPFound
from pyramid.threadlocal import get_current_request
from substanced.util import get_oid
from dace.util import (
getSite,
copy,
find_service,
get_obj)
from dace.objectofcollaboration.principal.util import (
has_role,
grant_roles,
get_current,
revoke_roles,
has_any_roles)
#from dace.objectofcollaboration import system
import html_diff_wrapper
from dace.processinstance.activity import (
InfiniteCardinality, ElementaryAction, ActionType)
from dace.processinstance.core import ActivityExecuted
from pontus.file import OBJECT_DATA
from novaideo.content.interface import (
INovaIdeoApplication,
IProposal,
Iidea,
IWorkspace)
from ..user_management.behaviors import (
global_user_processsecurity,
access_user_processsecurity)
from novaideo import _, log
from novaideo.content.proposal import Proposal
from ..comment_management.behaviors import VALIDATOR_BY_CONTEXT
from novaideo.content.correlation import CorrelationType
from novaideo.content.token import Token
from novaideo.content.working_group import WorkingGroup
from novaideo.content.processes.idea_management.behaviors import (
PresentIdea,
CommentIdea,
Associate as AssociateIdea)
from novaideo.utilities.util import (
connect, disconnect, to_localized_time)
from novaideo.event import (
ObjectPublished, CorrelableRemoved, ObjectModified)
from novaideo.content.processes.proposal_management import WORK_MODES
from novaideo.core import access_action, serialize_roles
from novaideo.content.alert import InternalAlertKind
from novaideo.views.filter import get_users_by_preferences
from novaideo.utilities.alerts_utility import alert
from . import (
FIRST_VOTE_PUBLISHING_MESSAGE,
VP_DEFAULT_DURATION,
AMENDMENTS_CYCLE_DEFAULT_DURATION,
FIRST_VOTE_DURATION_MESSAGE,
init_proposal_ballots,
add_files_to_workspace,
add_attached_files)
try:
basestring
except NameError:
basestring = str
VOTE_PUBLISHING_MESSAGE = _("Chaque participant du groupe de travail vote pour"
" ou contre l'amélioration de la proposition. Si la majorité"
" est \"pour\", un nouveau cycle d'amélioration commence, sinon"
" la proposition est soumise en l'état aux autres membres de la plateforme")
VOTE_DURATION_MESSAGE = _("Voting results may not be known until the end of"
" the period for voting. In the case where the"
" majority are for the continuation of improvements"
" of the proposal, your vote for the duration of the"
" amendment period will be useful")
VOTE_MODEWORK_MESSAGE = _("Voting results may not be known until the end of"
" the period for voting. In the case where the"
" majority are for the continuation of improvements"
" of the proposal, your vote for the work mode will be useful")
VOTE_REOPENING_MESSAGE = _("Voting results may not be known until the end of"
" the period for voting. In the case where the"
" majority are for the continuation of improvements"
" of the proposal, your vote for reopening working"
" group will be useful")
def publish_ideas(ideas, request):
for idea in ideas:
idea.state = PersistentList(['published'])
idea.modified_at = datetime.datetime.now(tz=pytz.UTC)
idea.reindex()
request.registry.notify(ObjectPublished(object=idea))
def publish_condition(process):
proposal = process.execution_context.created_entity('proposal')
if proposal:
working_group = proposal.working_group
report = working_group.vp_ballot.report
if not getattr(working_group, 'first_vote', True):
electeds = report.get_electeds()
if electeds is None:
return False
else:
return True
report.calculate_votes()
if report.result['False'] != 0:
return False
return True
def start_improvement_cycle(proposal):
def_container = find_service('process_definition_container')
runtime = find_service('runtime')
pd = def_container.get_definition('proposalimprovementcycle')
proc = pd()
proc.__name__ = proc.id
runtime.addtoproperty('processes', proc)
proc.defineGraph(pd)
proc.execution_context.add_created_entity('proposal', proposal)
proc.execute()
return proc
def close_votes(context, request, vote_processes):
vote_actions = [process.get_actions('vote')
for process in vote_processes]
vote_actions = [action for actions in vote_actions
for action in actions]
for action in vote_actions:
action.close_vote(context, request)
def first_vote_registration(user, working_group, appstruct):
#duration vote
ballot = working_group.duration_configuration_ballot
report = ballot.report
if user not in report.voters:
elected_id = appstruct['elected']
try:
subject_id = get_oid(elected_id[OBJECT_DATA])
except Exception:
subject_id = elected_id
votefactory = report.ballottype.vote_factory
vote = votefactory(subject_id)
vote.user_id = get_oid(user)
ballot.ballot_box.addtoproperty('votes', vote)
report.addtoproperty('voters', user)
#publication vote
ballot = working_group.vp_ballot
report = ballot.report
if user not in report.voters:
vote = appstruct['vote']
votefactory = report.ballottype.vote_factory
vote = votefactory(vote)
vote.user_id = get_oid(user)
ballot.ballot_box.addtoproperty('votes', vote)
report.addtoproperty('voters', user)
def first_vote_remove(user, working_group):
user_oid = get_oid(user)
#duration vote
ballot = working_group.duration_configuration_ballot
votes = [v for v in ballot.ballot_box.votes
if getattr(v, 'user_id', 0) == user_oid]
if votes:
ballot.ballot_box.delfromproperty('votes', votes[0])
ballot.report.delfromproperty('voters', user)
#publication vote
ballot = working_group.vp_ballot
votes = [v for v in ballot.ballot_box.votes
if getattr(v, 'user_id', 0) == user_oid]
if votes:
ballot.ballot_box.delfromproperty('votes', votes[0])
ballot.report.delfromproperty('voters', user)
def calculate_amendments_cycle_duration(process):
if getattr(process, 'attachedTo', None):
process = process.attachedTo.process
proposal = process.execution_context.created_entity('proposal')
working_group = proposal.working_group
duration_ballot = getattr(
working_group, 'duration_configuration_ballot', None)
if duration_ballot is not None and duration_ballot.report.voters:
electeds = duration_ballot.report.get_electeds()
if electeds:
return AMENDMENTS_CYCLE_DEFAULT_DURATION[electeds[0]] + \
datetime.datetime.now()
return AMENDMENTS_CYCLE_DEFAULT_DURATION["One week"] + \
datetime.datetime.now()
def createproposal_roles_validation(process, context):
return has_role(role=('Admin',))
def createproposal_processsecurity_validation(process, context):
request = get_current_request()
if getattr(request, 'is_idea_box', False):
return False
return global_user_processsecurity()
def include_ideas_texts(proposal, related_ideas):
proposal.text = getattr(proposal, 'text', '') +\
''.join(['<div>' + idea.text + '</div>' \
for idea in related_ideas])
class CreateProposal(InfiniteCardinality):
submission_title = _('Save')
context = INovaIdeoApplication
roles_validation = createproposal_roles_validation
processsecurity_validation = createproposal_processsecurity_validation
def start(self, context, request, appstruct, **kw):
root = getSite()
user = get_current()
related_ideas = appstruct.pop('related_ideas')
proposal = appstruct['_object_data']
root.merge_keywords(proposal.keywords)
proposal.text = html_diff_wrapper.normalize_text(proposal.text)
root.addtoproperty('proposals', proposal)
proposal.state.append('draft')
grant_roles(user=user, roles=(('Owner', proposal), ))
grant_roles(user=user, roles=(('Participant', proposal), ))
proposal.setproperty('author', user)
wg = WorkingGroup()
root.addtoproperty('working_groups', wg)
wg.init_workspace()
wg.setproperty('proposal', proposal)
wg.addtoproperty('members', user)
wg.state.append('deactivated')
if related_ideas:
connect(proposal,
related_ideas,
{'comment': _('Add related ideas'),
'type': _('Creation')},
user,
['related_proposals', 'related_ideas'],
CorrelationType.solid)
add_attached_files(appstruct, proposal)
proposal.reindex()
init_proposal_ballots(proposal)
wg.reindex()
proposal.subscribe_to_channel(user)
request.registry.notify(ActivityExecuted(self, [proposal, wg], user))
return {'newcontext': proposal}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(kw['newcontext'], "@@index"))
def pap_processsecurity_validation(process, context):
request = get_current_request()
if getattr(request, 'is_idea_box', False):
return False
condition = False
if 'idea' in request.content_to_examine:
condition = 'favorable' in context.state
else:
condition = 'published' in context.state
return condition and has_role(role=('Member',))
class PublishAsProposal(CreateProposal):
style = 'button' #TODO add style abstract class
context = Iidea
submission_title = _('Save')
style_order = 0
style_descriminator = 'primary-action'
style_picto = 'novaideo-icon icon-wg'
processsecurity_validation = pap_processsecurity_validation
roles_validation = NotImplemented
def del_processsecurity_validation(process, context):
return global_user_processsecurity() and \
(has_role(role=('Owner', context)) and \
'draft' in context.state)
class DeleteProposal(InfiniteCardinality):
style = 'button' #TODO add style abstract class
style_descriminator = 'global-action'
style_interaction = 'ajax-action'
style_picto = 'glyphicon glyphicon-trash'
style_order = 12
submission_title = _('Continue')
context = IProposal
processsecurity_validation = del_processsecurity_validation
def start(self, context, request, appstruct, **kw):
root = getSite()
not_draft_owner = 'draft' not in context.state or \
not has_role(role=('Owner', context))
tokens = [t for t in context.tokens if not t.proposal]
proposal_tokens = [t for t in context.tokens if t.proposal]
for token in list(tokens):
token.owner.addtoproperty('tokens', token)
for proposal_token in list(proposal_tokens):
proposal_token.owner.delfromproperty('tokens_ref', proposal_token)
wg = context.working_group
members = list(wg.members)
for member in members:
wg.delfromproperty('members', member)
wg.delfromproperty('proposal', context)
root.delfromproperty('working_groups', wg)
request.registry.notify(CorrelableRemoved(object=context))
root.delfromproperty('proposals', context)
if not_draft_owner:
mail_template = root.get_mail_template('delete_proposal')
explanation = appstruct['explanation']
subject = mail_template['subject'].format(
subject_title=context.title)
localizer = request.localizer
alert('internal', [root], members,
internal_kind=InternalAlertKind.moderation_alert,
subjects=[], removed=True, subject_title=context.title)
for member in members:
if getattr(member, 'email', ''):
message = mail_template['template'].format(
recipient_title=localizer.translate(
_(getattr(member, 'user_title', ''))),
recipient_first_name=getattr(
member, 'first_name', member.name),
recipient_last_name=getattr(member, 'last_name', ''),
subject_title=context.title,
explanation=explanation,
novaideo_title=root.title
)
alert('email', [root.get_site_sender()], [member.email],
subject=subject, body=message)
return {'newcontext': root}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(kw['newcontext'], ""))
def publish_roles_validation(process, context):
return has_role(role=('Owner', context))
def publish_processsecurity_validation(process, context):
user = get_current()
root = getSite()
not_published_ideas = False
if getattr(root, 'moderate_ideas', False):
not_published_ideas = any('published' not in i.state
for i in context.related_ideas.keys())
not_favorable_ideas = False
if 'idea' in getattr(root, 'content_to_examine', []):
not_favorable_ideas = any('favorable' not in i.state
for i in context.related_ideas.keys())
return not (not_published_ideas or not_favorable_ideas) and \
len(user.active_working_groups) < root.participations_maxi and \
global_user_processsecurity()
def publish_state_validation(process, context):
return "draft" in context.state
class PublishProposal(InfiniteCardinality):
style = 'button' #TODO add style abstract class
style_descriminator = 'global-action'
style_picto = 'glyphicon glyphicon-share'
style_order = 13
submission_title = _('Continue')
context = IProposal
roles_validation = publish_roles_validation
processsecurity_validation = publish_processsecurity_validation
state_validation = publish_state_validation
def start(self, context, request, appstruct, **kw):
user = get_current()
root = getSite()
working_group = context.working_group
context.state.remove('draft')
if appstruct.get('vote', False):
if 'proposal' in getattr(root, 'content_to_support', []):
context.state = PersistentList(
['submitted_support', 'published'])
else:
context.state = PersistentList(
['published', 'submitted_support'])
working_group.state = PersistentList(['archived'])
context.reindex()
working_group.reindex()
else:
default_mode = root.get_default_work_mode()
participants_mini = root.participants_mini
mode_id = appstruct.get('work_mode', default_mode.work_id)
if mode_id:
working_group.work_mode_id = mode_id
participants_mini = WORK_MODES[mode_id].participants_mini
#Only the vote of the author is considered
first_vote_registration(user, working_group, appstruct)
if participants_mini > 1:
context.state = PersistentList(
['open to a working group', 'published'])
context.reindex()
else:
context.state = PersistentList(['amendable', 'published'])
working_group.state = PersistentList(['active'])
context.reindex()
working_group.reindex()
if not hasattr(working_group, 'first_improvement_cycle'):
working_group.first_improvement_cycle = True
if not working_group.improvement_cycle_proc:
improvement_cycle_proc = start_improvement_cycle(context)
working_group.setproperty(
'improvement_cycle_proc', improvement_cycle_proc)
working_group.improvement_cycle_proc.execute_action(
context, request, 'votingpublication', {})
context.modified_at = datetime.datetime.now(tz=pytz.UTC)
context.init_published_at()
not_published_ideas = []
if not getattr(root, 'moderate_ideas', False) and\
'idea' not in getattr(root, 'content_to_examine', []):
not_published_ideas = [i for i in context.related_ideas.keys()
if 'published' not in i.state]
publish_ideas(not_published_ideas, request)
not_published_ideas.extend(context)
request.registry.notify(ObjectPublished(object=context))
request.registry.notify(ActivityExecuted(
self, not_published_ideas, user))
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def duplicate_processsecurity_validation(process, context):
return 'draft' not in context.state and \
global_user_processsecurity()
class DuplicateProposal(InfiniteCardinality):
style = 'button' #TODO add style abstract class
style_descriminator = 'global-action'
style_picto = 'octicon octicon-git-branch'
style_order = 7
submission_title = _('Save')
context = IProposal
processsecurity_validation = duplicate_processsecurity_validation
def start(self, context, request, appstruct, **kw):
root = getSite()
user = get_current()
related_ideas = appstruct.pop('related_ideas')
root.merge_keywords(appstruct['keywords'])
copy_of_proposal = copy(
context, (root, 'proposals'),
omit=('created_at', 'modified_at',
'examined_at', 'published_at',
'opinion', 'attached_files',
'len_selections', 'graph'))
copy_of_proposal.opinion = PersistentDict({})
copy_of_proposal.init_graph()
copy_of_proposal.set_data(appstruct)
copy_of_proposal.text = html_diff_wrapper.normalize_text(
copy_of_proposal.text)
copy_of_proposal.setproperty('originalentity', context)
copy_of_proposal.state = PersistentList(['draft'])
grant_roles(user=user, roles=(('Owner', copy_of_proposal), ))
grant_roles(user=user, roles=(('Participant', copy_of_proposal), ))
copy_of_proposal.setproperty('author', user)
wg = WorkingGroup()
root.addtoproperty('working_groups', wg)
wg.init_workspace()
wg.setproperty('proposal', copy_of_proposal)
wg.addtoproperty('members', user)
wg.state.append('deactivated')
if related_ideas:
connect(copy_of_proposal,
related_ideas,
{'comment': _('Add related ideas'),
'type': _('Duplicate')},
user,
['related_proposals', 'related_ideas'],
CorrelationType.solid)
add_attached_files(appstruct, copy_of_proposal)
wg.reindex()
copy_of_proposal.reindex()
init_proposal_ballots(copy_of_proposal)
context.reindex()
request.registry.notify(ActivityExecuted(
self, [copy_of_proposal, wg], user))
copy_of_proposal.subscribe_to_channel(user)
return {'newcontext': copy_of_proposal}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(kw['newcontext'], "@@index"))
def edit_roles_validation(process, context):
return has_role(role=('Owner', context))
def edit_processsecurity_validation(process, context):
return global_user_processsecurity()
def edit_state_validation(process, context):
return "draft" in context.state
class EditProposal(InfiniteCardinality):
style = 'button' #TODO add style abstract class
style_descriminator = 'text-action'
style_picto = 'glyphicon glyphicon-pencil'
style_order = 1
submission_title = _('Save')
context = IProposal
roles_validation = edit_roles_validation
processsecurity_validation = edit_processsecurity_validation
state_validation = edit_state_validation
def start(self, context, request, appstruct, **kw):
root = getSite()
user = get_current()
if 'related_ideas' in appstruct:
context.set_related_ideas(
appstruct['related_ideas'], user)
add_attached_files(appstruct, context)
context.text = html_diff_wrapper.normalize_text(context.text)
context.modified_at = datetime.datetime.now(tz=pytz.UTC)
root.merge_keywords(context.keywords)
context.reindex()
request.registry.notify(ActivityExecuted(self, [context], user))
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def support_roles_validation(process, context):
return has_role(role=('Member',))
def support_processsecurity_validation(process, context):
request = get_current_request()
if 'proposal' not in request.content_to_support:
return False
user = get_current()
return getattr(user, 'tokens', []) and \
not (user in [t.owner for t in context.tokens]) and \
global_user_processsecurity()
def support_state_validation(process, context):
return 'submitted_support' in context.state
class SupportProposal(InfiniteCardinality):
# style = 'button' #TODO add style abstract class
# style_descriminator = 'text-action'
# style_picto = 'glyphicon glyphicon-thumbs-up'
# style_order = 4
context = IProposal
roles_validation = support_roles_validation
processsecurity_validation = support_processsecurity_validation
state_validation = support_state_validation
def start(self, context, request, appstruct, **kw):
user = get_current()
token = None
for tok in user.tokens:
if tok.proposal is context:
token = tok
if token is None:
token = user.tokens[-1]
context.addtoproperty('tokens_support', token)
context.init_support_history()
context._support_history.append(
(get_oid(user), datetime.datetime.now(tz=pytz.UTC), 1))
request.registry.notify(ActivityExecuted(self, [context], user))
users = list(get_users_by_preferences(context))
users.extend(context.working_group.members)
alert('internal', [request.root], users,
internal_kind=InternalAlertKind.support_alert,
subjects=[context], support_kind='support')
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
class OpposeProposal(InfiniteCardinality):
# style = 'button' #TODO add style abstract class
# style_descriminator = 'text-action'
# style_picto = 'glyphicon glyphicon-thumbs-down'
# style_order = 5
context = IProposal
roles_validation = support_roles_validation
processsecurity_validation = support_processsecurity_validation
state_validation = support_state_validation
def start(self, context, request, appstruct, **kw):
user = get_current()
token = None
for tok in user.tokens:
if tok.proposal is context:
token = tok
break
if token is None:
token = user.tokens[-1]
context.addtoproperty('tokens_opposition', token)
context.init_support_history()
context._support_history.append(
(get_oid(user), datetime.datetime.now(tz=pytz.UTC), 0))
request.registry.notify(ActivityExecuted(self, [context], user))
users = list(get_users_by_preferences(context))
users.extend(context.working_group.members)
alert('internal', [request.root], users,
internal_kind=InternalAlertKind.support_alert,
subjects=[context], support_kind='oppose')
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def opinion_roles_validation(process, context):
return has_role(role=('Examiner',))
def opinion_processsecurity_validation(process, context):
request = get_current_request()
if 'proposal' not in request.content_to_examine:
return False
return global_user_processsecurity()
def opinion_state_validation(process, context):
return 'submitted_support' in context.state and 'examined' not in context.state
class MakeOpinion(InfiniteCardinality):
style = 'button' #TODO add style abstract class
style_descriminator = 'global-action'
style_picto = 'octicon octicon-checklist'
style_order = 10
submission_title = _('Save')
context = IProposal
roles_validation = opinion_roles_validation
processsecurity_validation = opinion_processsecurity_validation
state_validation = opinion_state_validation
def start(self, context, request, appstruct, **kw):
appstruct.pop('_csrf_token_')
context.opinion = PersistentDict(appstruct)
old_sate = context.state[0]
context.state = PersistentList(
['examined', 'published', context.opinion['opinion']])
context.init_examined_at()
context.reindex()
tokens = [t for t in context.tokens if not t.proposal]
proposal_tokens = [t for t in context.tokens if t.proposal]
for token in list(tokens):
token.owner.addtoproperty('tokens', token)
for token in list(proposal_tokens):
context.__delitem__(token.__name__)
members = context.working_group.members
url = request.resource_url(context, "@@index")
root = getSite()
mail_template = root.get_mail_template('opinion_proposal')
subject = mail_template['subject'].format(subject_title=context.title)
localizer = request.localizer
users = list(get_users_by_preferences(context))
users.extend(members)
alert('internal', [root], users,
internal_kind=InternalAlertKind.examination_alert,
subjects=[context])
for member in members:
if getattr(member, 'email', ''):
message = mail_template['template'].format(
recipient_title=localizer.translate(
_(getattr(member, 'user_title', ''))),
recipient_first_name=getattr(member, 'first_name', member.name),
recipient_last_name=getattr(member, 'last_name', ''),
subject_url=url,
subject_title=context.title,
opinion=localizer.translate(_(context.opinion_value)),
explanation=context.opinion['explanation'],
novaideo_title=request.root.title
)
alert('email', [root.get_site_sender()], [member.email],
subject=subject, body=message)
request.registry.notify(ActivityExecuted(
self, [context], get_current()))
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def withdrawt_processsecurity_validation(process, context):
user = get_current()
return any((t.owner is user) for t in context.tokens) and \
global_user_processsecurity()
class WithdrawToken(InfiniteCardinality):
# style = 'button' #TODO add style abstract class
# style_descriminator = 'text-action'
# style_picto = 'glyphicon glyphicon-share-alt'
# style_order = 6
context = IProposal
roles_validation = support_roles_validation
processsecurity_validation = withdrawt_processsecurity_validation
state_validation = support_state_validation
def start(self, context, request, appstruct, **kw):
user = get_current()
user_tokens = [t for t in context.tokens
if t.owner is user]
token = user_tokens[-1]
context.delfromproperty(token.__property__, token)
user.addtoproperty('tokens', token)
context.init_support_history()
context._support_history.append(
(get_oid(user), datetime.datetime.now(tz=pytz.UTC), -1))
request.registry.notify(ActivityExecuted(self, [context], user))
users = list(get_users_by_preferences(context))
users.extend(context.working_group.members)
alert('internal', [request.root], users,
internal_kind=InternalAlertKind.support_alert,
subjects=[context], support_kind='withdraw')
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def comm_relation_validation(process, context):
return process.execution_context.has_relation(context, 'proposal')
def comm_roles_validation(process, context):
return has_role(role=('Member',))
def comm_processsecurity_validation(process, context):
return global_user_processsecurity()
def comm_state_validation(process, context):
return 'draft' not in context.state
class CommentProposal(CommentIdea):
isSequential = False
context = IProposal
roles_validation = comm_roles_validation
processsecurity_validation = comm_processsecurity_validation
state_validation = comm_state_validation
def seea_roles_validation(process, context):
return has_role(role=('Participant', context))
def seea_processsecurity_validation(process, context):
return any(not('archived' in a.state) for a in context.amendments) and \
global_user_processsecurity()
class SeeAmendments(InfiniteCardinality):
isSequential = False
context = IProposal
roles_validation = seea_roles_validation
processsecurity_validation = seea_processsecurity_validation
def start(self, context, request, appstruct, **kw):
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def seem_processsecurity_validation(process, context):
return global_user_processsecurity()
class SeeMembers(InfiniteCardinality):
style_descriminator = 'wg-action'
style_interaction = 'ajax-action'
style_picto = 'fa fa-users'
isSequential = False
context = IProposal
processsecurity_validation = seem_processsecurity_validation
def start(self, context, request, appstruct, **kw):
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def present_roles_validation(process, context):
return has_role(role=('Member',))
def present_processsecurity_validation(process, context):
return global_user_processsecurity()
def present_state_validation(process, context):
return 'draft' not in context.state #TODO ?
class PresentProposal(PresentIdea):
context = IProposal
roles_validation = present_roles_validation
processsecurity_validation = present_processsecurity_validation
state_validation = present_state_validation
def associate_processsecurity_validation(process, context):
return (has_role(role=('Owner', context)) or \
(has_role(role=('Member',)) and \
'draft' not in context.state)) and \
global_user_processsecurity()
class Associate(AssociateIdea):
context = IProposal
processsecurity_validation = associate_processsecurity_validation
def seeideas_state_validation(process, context):
return 'draft' not in context.state or \
('draft' in context.state and has_role(role=('Owner', context)))
class SeeRelatedIdeas(InfiniteCardinality):
style_descriminator = 'listing-primary-action'
style_interaction = 'ajax-action'
style_picto = 'glyphicon glyphicon-link'
context = IProposal
#processsecurity_validation = seeideas_processsecurity_validation
#roles_validation = seeideas_roles_validation
state_validation = seeideas_state_validation
def start(self, context, request, appstruct, **kw):
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def withdraw_roles_validation(process, context):
return has_role(role=('Member',))
def withdraw_processsecurity_validation(process, context):
user = get_current()
wg = context.working_group
return wg and\
user in wg.wating_list and \
global_user_processsecurity()
def withdraw_state_validation(process, context):
return 'amendable' in context.state
class Withdraw(InfiniteCardinality):
style = 'button' #TODO add style abstract class
style_descriminator = 'wg-action'
style_order = 3
style_css_class = 'btn-warning'
isSequential = False
context = IProposal
roles_validation = withdraw_roles_validation
processsecurity_validation = withdraw_processsecurity_validation
state_validation = withdraw_state_validation
def start(self, context, request, appstruct, **kw):
user = get_current()
working_group = context.working_group
working_group.delfromproperty('wating_list', user)
if getattr(user, 'email', ''):
localizer = request.localizer
root = getSite()
mail_template = root.get_mail_template('withdeaw')
subject = mail_template['subject'].format(
subject_title=context.title)
message = mail_template['template'].format(
recipient_title=localizer.translate(
_(getattr(user, 'user_title', ''))),
recipient_first_name=getattr(user, 'first_name', user.name),
recipient_last_name=getattr(user, 'last_name', ''),
subject_title=context.title,
subject_url=request.resource_url(context, "@@index"),
novaideo_title=request.root.title
)
alert('email', [root.get_site_sender()], [user.email],
subject=subject, body=message)
request.registry.notify(ActivityExecuted(
self, [context, working_group], user))
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def resign_roles_validation(process, context):
user = get_current()
working_group = context.working_group
return working_group and user in working_group.members
def resign_processsecurity_validation(process, context):
return global_user_processsecurity()
def resign_state_validation(process, context):
return any(s in context.state for s in
['amendable', 'open to a working group'])
class Resign(InfiniteCardinality):
style = 'button' #TODO add style abstract class
style_descriminator = 'wg-action'
style_order = 2
style_picto = 'typcn typcn-user-delete'
style_css_class = 'btn-danger'
isSequential = False
context = IProposal
roles_validation = resign_roles_validation
processsecurity_validation = resign_processsecurity_validation
state_validation = resign_state_validation
def _get_next_user(self, users, root):
for user in users:
wgs = user.active_working_groups
if 'active' in user.state and len(wgs) < root.participations_maxi:
return user
return None
def start(self, context, request, appstruct, **kw):
root = getSite()
user = get_current()
working_group = context.working_group
working_group.delfromproperty('members', user)
members = working_group.members
mode = getattr(working_group, 'work_mode', root.get_default_work_mode())
revoke_roles(user, (('Participant', context),))
if members:
alert('internal', [root], members,
internal_kind=InternalAlertKind.working_group_alert,
subjects=[context], alert_kind='resign')
url = request.resource_url(context, "@@index")
localizer = request.localizer
sender = root.get_site_sender()
if working_group.wating_list:
next_user = self._get_next_user(working_group.wating_list, root)
if next_user is not None:
mail_template = root.get_mail_template(
'wg_wating_list_participation')
working_group.delfromproperty('wating_list', next_user)
working_group.addtoproperty('members', next_user)
grant_roles(next_user, (('Participant', context),))
if members:
alert('internal', [root], members,
internal_kind=InternalAlertKind.working_group_alert,
subjects=[context], alert_kind='wg_wating_list_participation')
if getattr(next_user, 'email', ''):
subject = mail_template['subject'].format(
subject_title=context.title)
message = mail_template['template'].format(
recipient_title=localizer.translate(
_(getattr(next_user, 'user_title', ''))),
recipient_first_name=getattr(
next_user, 'first_name', next_user.name),
recipient_last_name=getattr(next_user, 'last_name', ''),
subject_title=context.title,
subject_url=url,
novaideo_title=root.title
)
alert('email', [sender], [next_user.email],
subject=subject, body=message)
participants = working_group.members
len_participants = len(participants)
if len_participants < mode.participants_mini and \
'open to a working group' not in context.state:
context.state = PersistentList(
['open to a working group', 'published'])
working_group.state = PersistentList(['deactivated'])
working_group.reindex()
context.reindex()
alert('internal', [root], participants,
internal_kind=InternalAlertKind.working_group_alert,
subjects=[context], alert_kind='resign_to_wg_open')
if getattr(user, 'email', ''):
mail_template = root.get_mail_template('wg_resign')
subject = mail_template['subject'].format(
subject_title=context.title)
message = mail_template['template'].format(
recipient_title=localizer.translate(
_(getattr(user, 'user_title', ''))),
recipient_first_name=getattr(user, 'first_name', user.name),
recipient_last_name=getattr(user, 'last_name', ''),
subject_title=context.title,
subject_url=url,
novaideo_title=root.title
)
alert('email', [sender], [user.email],
subject=subject, body=message)
request.registry.notify(ActivityExecuted(
self, [context, working_group], user))
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def participate_roles_validation(process, context):
user = get_current()
working_group = context.working_group
return working_group and has_role(role=('Member',)) and \
user not in working_group.members
def participate_processsecurity_validation(process, context):
working_group = context.working_group
user = get_current()
root = getSite()
wgs = getattr(user, 'active_working_groups', [])
return working_group and \
user not in working_group.wating_list and \
len(wgs) < root.participations_maxi and \
global_user_processsecurity()
def participate_state_validation(process, context):
working_group = context.working_group
return working_group and \
not('closed' in working_group.state) and \
any(s in context.state for s in
['amendable', 'open to a working group'])
class Participate(InfiniteCardinality):
style = 'button' #TODO add style abstract class
style_descriminator = 'wg-action'
style_order = 1
style_picto = 'md md-group-add'
style_css_class = 'btn-success'
submission_title = _('Save')
isSequential = False
context = IProposal
roles_validation = participate_roles_validation
processsecurity_validation = participate_processsecurity_validation
state_validation = participate_state_validation
def _send_mail_to_user(self, subject_template,
message_template, user,
context, request):
localizer = request.localizer
subject = subject_template.format(subject_title=context.title)
message = message_template.format(
recipient_title=localizer.translate(
_(getattr(user, 'user_title', ''))),
recipient_first_name=getattr(user, 'first_name', user.name),
recipient_last_name=getattr(user, 'last_name', ''),
subject_title=context.title,
subject_url=request.resource_url(context, "@@index"),
novaideo_title=request.root.title
)
alert('email', [request.root.get_site_sender()], [user.email],
subject=subject, body=message)
def start(self, context, request, appstruct, **kw):
root = getSite()
user = get_current()
working_group = context.working_group
participants = working_group.members
mode = getattr(working_group, 'work_mode', root.get_default_work_mode())
len_participants = len(participants)
if len_participants < mode.participants_maxi:
#Alert new participant
if participants:
alert('internal', [root], participants,
internal_kind=InternalAlertKind.working_group_alert,
subjects=[context], alert_kind='participate')
working_group.addtoproperty('members', user)
grant_roles(user, (('Participant', context),))
#alert maw working groups
active_wgs = getattr(user, 'active_working_groups', [])
if len(active_wgs) == root.participations_maxi:
alert('internal', [root], [user],
internal_kind=InternalAlertKind.working_group_alert,
subjects=[user], alert_kind='participations_maxi')
if (len_participants+1) == mode.participants_mini:
working_group.state = PersistentList(['active'])
context.state = PersistentList(['amendable', 'published'])
working_group.reindex()
context.reindex()
#Only if is the first improvement cycle
if not hasattr(working_group, 'first_improvement_cycle'):
working_group.first_improvement_cycle = True
if not working_group.improvement_cycle_proc:
improvement_cycle_proc = start_improvement_cycle(
context)
working_group.setproperty(
'improvement_cycle_proc', improvement_cycle_proc)
#Run the improvement cycle proc
working_group.improvement_cycle_proc.execute_action(
context, request, 'votingpublication', {})
#Alert start of the improvement cycle proc
alert('internal', [root], participants,
internal_kind=InternalAlertKind.working_group_alert,
subjects=[context], alert_kind='amendable')
#Send Mail alert to user
if getattr(user, 'email', ''):
mail_template = root.get_mail_template('wg_participation')
self._send_mail_to_user(
mail_template['subject'], mail_template['template'],
user, context, request)
else:
working_group.addtoproperty('wating_list', user)
working_group.reindex()
users = list(participants)
users.append(user)
alert('internal', [root], users,
internal_kind=InternalAlertKind.working_group_alert,
subjects=[context], alert_kind='wg_participation_max')
if getattr(user, 'email', ''):
mail_template = root.get_mail_template('wating_list')
self._send_mail_to_user(
mail_template['subject'], mail_template['template'],
user, context, request)
request.registry.notify(ActivityExecuted(
self, [context, working_group], user))
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def compare_processsecurity_validation(process, context):
return getattr(context, 'version', None) is not None and \
(has_role(role=('Owner', context)) or \
(has_role(role=('Member',)) and\
'draft' not in context.state)) and \
global_user_processsecurity()
class CompareProposal(InfiniteCardinality):
title = _('Compare')
context = IProposal
processsecurity_validation = compare_processsecurity_validation
def start(self, context, request, appstruct, **kw):
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def attach_roles_validation(process, context):
return has_role(role=('Participant', context))
def attach_processsecurity_validation(process, context):
return global_user_processsecurity()
def attach_state_validation(process, context):
wg = context.working_group
return wg and 'active' in wg.state and 'amendable' in context.state
class AttachFiles(InfiniteCardinality):
style = 'button' #TODO add style abstract class
style_descriminator = 'text-action'
style_interaction = 'ajax-action'
style_picto = 'glyphicon glyphicon-paperclip'
style_order = 3
submission_title = _('Save')
context = IProposal
roles_validation = attach_roles_validation
processsecurity_validation = attach_processsecurity_validation
state_validation = attach_state_validation
def start(self, context, request, appstruct, **kw):
add_attached_files({'add_files': appstruct}, context)
context.reindex()
request.registry.notify(ActivityExecuted(
self, [context], get_current()))
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def get_access_key(obj):
if 'draft' not in obj.state:
return ['always']
else:
result = serialize_roles(
(('Owner', obj), 'Admin'))
return result
def seeproposal_processsecurity_validation(process, context):
return access_user_processsecurity(process, context) and \
('draft' not in context.state or \
has_any_roles(roles=(('Owner', context), 'Admin')))
@access_action(access_key=get_access_key)
class SeeProposal(InfiniteCardinality):
title = _('Details')
context = IProposal
actionType = ActionType.automatic
processsecurity_validation = seeproposal_processsecurity_validation
def start(self, context, request, appstruct, **kw):
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
#*************************** ProposalImprovementCycle process **********************************#
def decision_relation_validation(process, context):
return process.execution_context.has_relation(context, 'proposal')
def decision_roles_validation(process, context):
return has_role(role=('Admin',))
def decision_state_validation(process, context):
wg = context.working_group
return wg and 'active' in wg.state and \
'amendable' in context.state
class VotingPublication(ElementaryAction):
style = 'button' #TODO add style abstract class
style_descriminator = 'global-action'
style_order = 5
context = IProposal
processs_relation_id = 'proposal'
#actionType = ActionType.system
relation_validation = decision_relation_validation
roles_validation = decision_roles_validation
state_validation = decision_state_validation
def start(self, context, request, appstruct, **kw):
context.state.remove(context.state[0])
context.state.insert(0, 'votes for publishing')
context.reindex()
working_group = context.working_group
working_group.iteration = getattr(working_group, 'iteration', 0) + 1
if not getattr(working_group, 'first_vote', True):
members = working_group.members
url = request.resource_url(context, "@@index")
root = getSite()
mail_template = root.get_mail_template('start_vote_publishing')
subject = mail_template['subject'].format(
subject_title=context.title)
localizer = request.localizer
alert('internal', [root], members,
internal_kind=InternalAlertKind.working_group_alert,
subjects=[context], alert_kind='end_work')
for member in members:
if getattr(member, 'email', ''):
message = mail_template['template'].format(
recipient_title=localizer.translate(
_(getattr(member, 'user_title', ''))),
recipient_first_name=getattr(
member, 'first_name', member.name),
recipient_last_name=getattr(
member, 'last_name', ''),
subject_title=context.title,
subject_url=url,
novaideo_title=root.title
)
alert('email', [root.get_site_sender()], [member.email],
subject=subject, body=message)
request.registry.notify(ActivityExecuted(
self, [context], get_current()))
return {}
def after_execution(self, context, request, **kw):
proposal = self.process.execution_context.created_entity(
'proposal')
if self.sub_process:
exec_ctx = self.sub_process.execution_context
vote_processes = exec_ctx.get_involved_collection('vote_processes')
opened_vote_processes = [process for process in vote_processes
if not process._finished]
if opened_vote_processes:
close_votes(proposal, request, opened_vote_processes)
setattr(self.process, 'new_cycle_date', datetime.datetime.now())
setattr(self.process, 'previous_alert', -1)
super(VotingPublication, self).after_execution(proposal, request, **kw)
is_published = publish_condition(self.process)
if is_published:
self.process.execute_action(proposal, request, 'submit', {})
else:
self.process.execute_action(proposal, request, 'work', {})
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def work_state_validation(process, context):
return 'active' in getattr(context.working_group, 'state', []) and \
'votes for publishing' in context.state
class Work(ElementaryAction):
style = 'button' #TODO add style abstract class
style_descriminator = 'global-action'
style_order = 5
context = IProposal
processs_relation_id = 'proposal'
#actionType = ActionType.system
relation_validation = decision_relation_validation
roles_validation = decision_roles_validation
state_validation = work_state_validation
def _send_mails(self, context, request, subject_template, message_template):
working_group = context.working_group
duration = to_localized_time(
calculate_amendments_cycle_duration(self.process),
translate=True)
isclosed = 'closed' in working_group.state
members = working_group.members
url = request.resource_url(context, "@@index")
subject = subject_template.format(subject_title=context.title)
localizer = request.localizer
root = request.root
alert('internal', [root], members,
internal_kind=InternalAlertKind.working_group_alert,
subjects=[context], alert_kind='start_work')
for member in [m for m in members if getattr(m, 'email', '')]:
message = message_template.format(
recipient_title=localizer.translate(
_(getattr(member, 'user_title', ''))),
recipient_first_name=getattr(
member, 'first_name', member.name),
recipient_last_name=getattr(member, 'last_name', ''),
subject_title=context.title,
subject_url=url,
duration=duration,
isclosed=localizer.translate(
(isclosed and _('closed')) or _('open')),
novaideo_title=root.title
)
alert('email', [root.get_site_sender()], [member.email],
subject=subject, body=message)
def start(self, context, request, appstruct, **kw):
root = getSite()
working_group = context.working_group
context.state.remove('votes for publishing')
#Only for amendments work mode
reopening_ballot = getattr(
working_group, 'reopening_configuration_ballot', None)
if reopening_ballot is not None:
report = reopening_ballot.report
voters_len = len(report.voters)
electors_len = len(report.electors)
report.calculate_votes()
#absolute majority
if (voters_len == electors_len) and \
(report.result['False'] == 0) and \
'closed' in working_group.state:
working_group.state.remove('closed')
context.state.insert(0, 'amendable')
#The first improvement cycle is started
if working_group.first_improvement_cycle:
mail_template = root.get_mail_template('start_work')
self._send_mails(
context, request,
mail_template['subject'], mail_template['template'])
working_group.first_improvement_cycle = False
else:
mail_template = root.get_mail_template('first_start_work')
self._send_mails(
context, request,
mail_template['subject'], mail_template['template'])
context.reindex()
working_group.reindex()
request.registry.notify(ActivityExecuted(
self, [context, working_group], get_current()))
return {}
def after_execution(self, context, request, **kw):
proposal = self.process.execution_context.created_entity('proposal')
super(Work, self).after_execution(proposal, request, **kw)
self.process.execute_action(proposal, request, 'votingpublication', {})
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def submit_roles_validation(process, context):
return has_role(role=('Admin',))
def submit_state_validation(process, context):
wg = context.working_group
return wg and 'active' in context.working_group.state and \
'votes for publishing' in context.state
class SubmitProposal(ElementaryAction):
style = 'button' #TODO add style abstract class
style_descriminator = 'global-action'
style_picto = 'glyphicon glyphicon-certificate'
style_order = 2
context = IProposal
processs_relation_id = 'proposal'
#actionType = ActionType.system
relation_validation = decision_relation_validation
roles_validation = submit_roles_validation
state_validation = submit_state_validation
def start(self, context, request, appstruct, **kw):
root = getSite()
localizer = request.localizer
working_group = context.working_group
if 'proposal' in getattr(root, 'content_to_support', []):
context.state = PersistentList(['submitted_support', 'published'])
else:
context.state = PersistentList(['published', 'submitted_support'])
working_group.state = PersistentList(['archived'])
members = working_group.members
for member in members:
token = Token(title='Token_'+context.title)
token.setproperty('proposal', context)
member.addtoproperty('tokens_ref', token)
member.addtoproperty('tokens', token)
token.setproperty('owner', member)
revoke_roles(member, (('Participant', context),))
#Alert users
users = list(get_users_by_preferences(context))
users.extend(members)
users = set(users)
url = request.resource_url(context, "@@index")
mail_template = root.get_mail_template('publish_proposal')
subject = mail_template['subject'].format(
subject_title=context.title)
alert('internal', [root], users,
internal_kind=InternalAlertKind.working_group_alert,
subjects=[context], alert_kind='submit_proposal')
for member in [m for m in users if getattr(m, 'email', '')]:
message = mail_template['template'].format(
recipient_title=localizer.translate(
_(getattr(member, 'user_title', ''))),
recipient_first_name=getattr(
member, 'first_name', member.name),
recipient_last_name=getattr(member, 'last_name', ''),
subject_title=context.title,
subject_url=url,
novaideo_title=root.title
)
alert('email', [root.get_site_sender()], [member.email],
subject=subject, body=message)
context.modified_at = datetime.datetime.now(tz=pytz.UTC)
working_group.reindex()
context.reindex()
request.registry.notify(ActivityExecuted(
self, [context, working_group], get_current()))
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def alert_relation_validation(process, context):
return process.execution_context.has_relation(context, 'proposal')
def alert_roles_validation(process, context):
return has_role(role=('System',))
class AlertEnd(ElementaryAction):
style = 'button' #TODO add style abstract class
style_descriminator = 'global-action'
style_order = 4
context = IProposal
actionType = ActionType.system
processs_relation_id = 'proposal'
roles_validation = alert_roles_validation
relation_validation = alert_relation_validation
def start(self, context, request, appstruct, **kw):
working_group = context.working_group
previous_alert = getattr(self.process, 'previous_alert', -1)
setattr(self.process, 'previous_alert', previous_alert + 1)
if 'active' in working_group.state and 'amendable' in context.state:
members = working_group.members
url = request.resource_url(context, "@@index")
root = request.root
mail_template = root.get_mail_template('alert_end')
subject = mail_template['subject'].format(
subject_title=context.title)
localizer = request.localizer
alert('internal', [root], members,
internal_kind=InternalAlertKind.working_group_alert,
subjects=[context], alert_kind='alert_end_work')
for member in [m for m in members if getattr(m, 'email', '')]:
message = mail_template['template'].format(
recipient_title=localizer.translate(
_(getattr(member, 'user_title', ''))),
recipient_first_name=getattr(
member, 'first_name', member.name),
recipient_last_name=getattr(
member, 'last_name', ''),
subject_url=url,
subject_title=context.title,
novaideo_title=root.title
)
alert('email', [root.get_site_sender()], [member.email],
subject=subject, body=message)
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
#**********************************************Workspace***************************************************
def get_access_key_ws(obj):
return serialize_roles(
(('Participant', obj.proposal), 'Admin'))
def seeworkspace_processsecurity_validation(process, context):
return has_any_roles(
roles=(('Participant', context.proposal), 'Admin'))
@access_action(access_key=get_access_key_ws)
class SeeWorkspace(InfiniteCardinality):
title = _('Details')
context = IWorkspace
actionType = ActionType.automatic
processsecurity_validation = seeworkspace_processsecurity_validation
def start(self, context, request, appstruct, **kw):
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
class AddFiles(InfiniteCardinality):
style = 'button' #TODO add style abstract class
style_descriminator = 'global-action'
style_picto = 'glyphicon glyphicon-import'
style_order = 4
submission_title = _('Save')
context = IWorkspace
roles_validation = seeworkspace_processsecurity_validation
processsecurity_validation = createproposal_processsecurity_validation
def start(self, context, request, appstruct, **kw):
add_files_to_workspace(appstruct.get('files', []), context)
context.reindex()
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
class RemoveFile(InfiniteCardinality):
context = IWorkspace
roles_validation = seeworkspace_processsecurity_validation
processsecurity_validation = createproposal_processsecurity_validation
def start(self, context, request, appstruct, **kw):
oid = appstruct.get('oid', None)
if oid:
try:
file_ = get_obj(int(oid))
if file_ and file_ in context.files:
context.delfromproperty('files', file_)
except Exception as error:
log.warning(error)
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
#TODO behaviors
VALIDATOR_BY_CONTEXT[Proposal] = CommentProposal
|
#!/usr/bin/env python
import ast
import base64
import ccd
import hashlib
import os
import numpy as np
import sys
import requests
import cw
from glob import glob
from datetime import datetime
class SparkException(Exception):
pass
class Spark(object):
def __init__(self, config):
self.config = config
ubids = 'LANDSAT_7/ETM/sr_band1&ubid=LANDSAT_7/ETM/sr_band2&ubid=LANDSAT_7/ETM/sr_band4&ubid=LANDSAT_7/ETM/sr_band5&ubid=LANDSAT_7/ETM/sr_band7&ubid=LANDSAT_7/ETM/cfmask&ubid=LANDSAT_7/ETM/sr_band3&ubid=LANDSAT_7/ETM/toa_band6'
self.ubids_list = ubids.split('&ubid=')
def sort_band_data(self, band, field):
return sorted(band, key=lambda x: x[field])
def b64_to_bytearray(self, data):
return np.frombuffer(base64.b64decode(data), np.int16)
def dtstr_to_ordinal(self, dtstr):
_dt = datetime.strptime(dtstr, '%Y-%m-%dT%H:%M:%SZ')
return _dt.toordinal()
def collect_data(self, band_group, json_data):
_blist = "band2 band3 band4 band5 band6 band7 cfmask"
band_list = "band1 " + _blist if band_group is 'tm' else "band10 " + _blist
for b in band_list.split(" "):
vars()[b] = []
band_bucket = []
for item in json_data:
which_band = item['ubid'][-6:].replace("_", "")
band_bucket.append(which_band)
vars()[which_band].append(item)
valid_sources = set([i['source'] for i in vars()['band2']]) & set([i['source'] for i in vars()['cfmask']])
for bucket in band_list.split(" "):
_orig = vars()[bucket]
vars()[bucket+'_clean'] = [item for item in _orig if item['source'] in valid_sources]
for bucket in band_list.split(" "):
_sorted = vars()[bucket+'_sorted'] = self.sort_band_data(vars()[bucket+'_clean'], 'acquired')
vars()[bucket+'_bytes'] = [self.b64_to_bytearray(item['data']) for item in _sorted]
dates = [self.dtstr_to_ordinal(i['acquired']) for i in vars()['band2_sorted']]
mapping = self.config['ubid_band_dict'][band_group]
for band in "red green blue nirs swirs1 swirs2 thermals qas".split(" "):
vars()[band+'_array'] = np.array(vars()[mapping[band] + '_bytes'])
print("{}: len {}".format(band, len(vars()[band+'_array'])))
rows = len(dates) #282
cells = 10000 # per tile, 100x100
output = []
try:
for pixel in range(0, cells):
lower = pixel
upper = pixel + 1
_od = dict()
_od[pixel] = {'dates': dates,
'red': vars()['red_array'][0:rows, lower:upper],
'green': vars()['green_array'][0:rows, lower:upper],
'blue': vars()['blue_array'][0:rows, lower:upper],
'nirs': vars()['nirs_array'][0:rows, lower:upper],
'swirs1': vars()['swirs1_array'][0:rows, lower:upper],
'swirs2': vars()['swirs2_array'][0:rows, lower:upper],
'thermals': vars()['thermals_array'][0:rows, lower:upper],
'qas': vars()['qas_array'][0:rows, lower:upper]}
output.append(_od)
except IndexError as e:
output = "IndexError for returned data: {}".format(e.message)
print("returning {} output items from collect_data".format(len(output)))
return output
def run_pyccd(self, datad):
def np_to_list(_d):
_x = [i[0] for i in _d]
return np.array(_x)
data = list(datad.values())[0]
print ("data is: {}".format(type(data)))
results = ccd.detect(data['dates'],
np_to_list(data['blue']),
np_to_list(data['green']),
np_to_list(data['red']),
np_to_list(data['nirs']),
np_to_list(data['swirs1']),
np_to_list(data['swirs2']),
np_to_list(data['thermals']),
np_to_list(data['qas']))
return results
def pixel_xy(self, index, tilex, tiley, dim=100):
# if index is 565, tilex is 123, tiley is 330, xdim is 100, ydim is 100
row = index / dim
col = index - row * dim
return {'y': tiley+row, 'x': tilex+col}
def run(self, input_d):
# url = "http://lcmap-test.cr.usgs.gov/landsat/tiles?x=-2013585&y=3095805&acquired=1982-01-01/2017-01-01&ubid="
resp = requests.get(input_d['inputs_url'])
if resp.status_code != 200:
resp = requests.get(input_d['inputs_url'])
# data = list()
# for i in self.ubids_list:
# print "attempting query of: {}".format(url+i)
# resp = requests.get(url+i)
# if resp.status_code != 200:
# print "got a non-200: {}\ntrying again...".format(resp.status_code)
# resp = requests.get(url+i)
# print "len of resp.json() {}".format(len(resp.json()))
# for d in resp.json():
# data.append(d)
# band_group = 'oli' if 'OLI_TIRS' in url else 'tm'
band_group = 'oli' if 'OLI_TIRS' in input_d['inputs_url'] else 'tm'
# output = collect_data(band_group, tile_resp.json())
output = self.collect_data(band_group, resp.json())
print("Have data of type {}, ready to attempt pyccd.".format(type(output)))
# This block should be turned into a value that is returned
# from the call to run(). Sending/Receiving is a different
# responsibility than executing the tasks.
# HACK
if isinstance(output, str):
# gather other needed info, set result_ok to False, set result to
# output. without x, y, algorithm then result cannot be saved
# by lcmap-changes
msg = "Query error:{}".format(output)
print(msg)
# be more specific about this exception. Create one for
# QueryFailedException or whatevs.
raise Exception(msg)
else:
print("Data is valid to run pyccd. Proceeding.")
for item in output:
# item is a dict, keyed by pixel index {0: {dates: , green: , yada...}
print("item.keys:{}".format(list(item)))
#pixel_index = item.keys()[0]
pixel_index = list(item)[0]
# for the short term, consider using multiprocessing pool
# to run these in parallel
results = self.run_pyccd(item)
outgoing = dict()
outgoing['x'], outgoing['y'] = self.pixel_xy(pixel_index, input_d['tile_x'], input_d['tile_y'])
outgoing['algorithm'] = input_d['algorithm']
outgoing['result_md5'] = hashlib.md5("{}".format(results)).hexdigest()
# somehow determine if the result is ok or not.
# all True for the moment
outgoing['result_ok'] = True
outgoing['result_produced'] = datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ')
#outgoing['inputs_md5'] = hashlib.md5("{}".format(resp.json())).hexdigest()
outgoing['inputs_md5'] = 'not implemented'
# act as a generator so results can be sent over messaging as they
# arrive
yield outgoing
def run(config, indata):
sprk = Spark(config)
return sprk.run(indata)
encoded results to utf-8 to support md5 sum
#!/usr/bin/env python
import ast
import base64
import ccd
import hashlib
import os
import numpy as np
import sys
import requests
import cw
from glob import glob
from datetime import datetime
class SparkException(Exception):
pass
class Spark(object):
def __init__(self, config):
self.config = config
ubids = 'LANDSAT_7/ETM/sr_band1&ubid=LANDSAT_7/ETM/sr_band2&ubid=LANDSAT_7/ETM/sr_band4&ubid=LANDSAT_7/ETM/sr_band5&ubid=LANDSAT_7/ETM/sr_band7&ubid=LANDSAT_7/ETM/cfmask&ubid=LANDSAT_7/ETM/sr_band3&ubid=LANDSAT_7/ETM/toa_band6'
self.ubids_list = ubids.split('&ubid=')
def sort_band_data(self, band, field):
return sorted(band, key=lambda x: x[field])
def b64_to_bytearray(self, data):
return np.frombuffer(base64.b64decode(data), np.int16)
def dtstr_to_ordinal(self, dtstr):
_dt = datetime.strptime(dtstr, '%Y-%m-%dT%H:%M:%SZ')
return _dt.toordinal()
def collect_data(self, band_group, json_data):
_blist = "band2 band3 band4 band5 band6 band7 cfmask"
band_list = "band1 " + _blist if band_group is 'tm' else "band10 " + _blist
for b in band_list.split(" "):
vars()[b] = []
band_bucket = []
for item in json_data:
which_band = item['ubid'][-6:].replace("_", "")
band_bucket.append(which_band)
vars()[which_band].append(item)
valid_sources = set([i['source'] for i in vars()['band2']]) & set([i['source'] for i in vars()['cfmask']])
for bucket in band_list.split(" "):
_orig = vars()[bucket]
vars()[bucket+'_clean'] = [item for item in _orig if item['source'] in valid_sources]
for bucket in band_list.split(" "):
_sorted = vars()[bucket+'_sorted'] = self.sort_band_data(vars()[bucket+'_clean'], 'acquired')
vars()[bucket+'_bytes'] = [self.b64_to_bytearray(item['data']) for item in _sorted]
dates = [self.dtstr_to_ordinal(i['acquired']) for i in vars()['band2_sorted']]
mapping = self.config['ubid_band_dict'][band_group]
for band in "red green blue nirs swirs1 swirs2 thermals qas".split(" "):
vars()[band+'_array'] = np.array(vars()[mapping[band] + '_bytes'])
print("{}: len {}".format(band, len(vars()[band+'_array'])))
rows = len(dates) #282
cells = 10000 # per tile, 100x100
output = []
try:
for pixel in range(0, cells):
lower = pixel
upper = pixel + 1
_od = dict()
_od[pixel] = {'dates': dates,
'red': vars()['red_array'][0:rows, lower:upper],
'green': vars()['green_array'][0:rows, lower:upper],
'blue': vars()['blue_array'][0:rows, lower:upper],
'nirs': vars()['nirs_array'][0:rows, lower:upper],
'swirs1': vars()['swirs1_array'][0:rows, lower:upper],
'swirs2': vars()['swirs2_array'][0:rows, lower:upper],
'thermals': vars()['thermals_array'][0:rows, lower:upper],
'qas': vars()['qas_array'][0:rows, lower:upper]}
output.append(_od)
except IndexError as e:
output = "IndexError for returned data: {}".format(e.message)
print("returning {} output items from collect_data".format(len(output)))
return output
def run_pyccd(self, datad):
def np_to_list(_d):
_x = [i[0] for i in _d]
return np.array(_x)
data = list(datad.values())[0]
print ("data is: {}".format(type(data)))
results = ccd.detect(data['dates'],
np_to_list(data['blue']),
np_to_list(data['green']),
np_to_list(data['red']),
np_to_list(data['nirs']),
np_to_list(data['swirs1']),
np_to_list(data['swirs2']),
np_to_list(data['thermals']),
np_to_list(data['qas']))
return results
def pixel_xy(self, index, tilex, tiley, dim=100):
# if index is 565, tilex is 123, tiley is 330, xdim is 100, ydim is 100
row = index / dim
col = index - row * dim
return {'y': tiley+row, 'x': tilex+col}
def run(self, input_d):
# url = "http://lcmap-test.cr.usgs.gov/landsat/tiles?x=-2013585&y=3095805&acquired=1982-01-01/2017-01-01&ubid="
resp = requests.get(input_d['inputs_url'])
if resp.status_code != 200:
resp = requests.get(input_d['inputs_url'])
# data = list()
# for i in self.ubids_list:
# print "attempting query of: {}".format(url+i)
# resp = requests.get(url+i)
# if resp.status_code != 200:
# print "got a non-200: {}\ntrying again...".format(resp.status_code)
# resp = requests.get(url+i)
# print "len of resp.json() {}".format(len(resp.json()))
# for d in resp.json():
# data.append(d)
# band_group = 'oli' if 'OLI_TIRS' in url else 'tm'
band_group = 'oli' if 'OLI_TIRS' in input_d['inputs_url'] else 'tm'
# output = collect_data(band_group, tile_resp.json())
output = self.collect_data(band_group, resp.json())
print("Have data of type {}, ready to attempt pyccd.".format(type(output)))
# This block should be turned into a value that is returned
# from the call to run(). Sending/Receiving is a different
# responsibility than executing the tasks.
# HACK
if isinstance(output, str):
# gather other needed info, set result_ok to False, set result to
# output. without x, y, algorithm then result cannot be saved
# by lcmap-changes
msg = "Query error:{}".format(output)
print(msg)
# be more specific about this exception. Create one for
# QueryFailedException or whatevs.
raise Exception(msg)
else:
print("Data is valid to run pyccd. Proceeding.")
for item in output:
# item is a dict, keyed by pixel index {0: {dates: , green: , yada...}
print("item.keys:{}".format(list(item)))
#pixel_index = item.keys()[0]
pixel_index = list(item)[0]
# for the short term, consider using multiprocessing pool
# to run these in parallel
results = self.run_pyccd(item)
outgoing = dict()
outgoing['x'], outgoing['y'] = self.pixel_xy(pixel_index, input_d['tile_x'], input_d['tile_y'])
outgoing['algorithm'] = input_d['algorithm']
outgoing['result_md5'] = hashlib.md5("{}".format(results.encode('utf-8'))).hexdigest()
# somehow determine if the result is ok or not.
# all True for the moment
outgoing['result_ok'] = True
outgoing['result_produced'] = datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ')
#outgoing['inputs_md5'] = hashlib.md5("{}".format(resp.json())).hexdigest()
outgoing['inputs_md5'] = 'not implemented'
# act as a generator so results can be sent over messaging as they
# arrive
yield outgoing
def run(config, indata):
sprk = Spark(config)
return sprk.run(indata)
|
#!/usr/bin/python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions that are useful when dealing with requests.
"""
__authors__ = [
'"Lennard de Rijk" <ljvderijk@gmail.com>',
]
import soc.logic.models as model_logic
def completeRequestForRole(role_entity, role_name):
"""Marks the request that leads to the given role_entity as completly accepted.
Args:
role_entity : A datastore entity that is either a role or a subclass of the role model
role_name : The name in the request that is used to describe the type of the role_entity
"""
# get the request logic so we can query the datastore
request_logic = model_logic.request.logic
# create the query properties for the specific role
properties = {'scope_path' : role_entity.scope_path,
'link_id' : role_entity.link_id,
'role' : role_name}
# get the request that complies with properties
request_entity = request_logic.getForFields(properties, unique=True)
# mark the request completed, if there is any
if request_entity:
request_logic.updateModelProperties(request_entity,
{'state' : 'completed'})
Little change in soc.logic.hlper.request module completeRequestForRole function docstring.
Patch by: Pawel Solyga
Review by: to-be-reviewed
--HG--
extra : convert_revision : svn%3A32761e7d-7263-4528-b7be-7235b26367ec/trunk%401539
#!/usr/bin/python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions that are useful when dealing with requests.
"""
__authors__ = [
'"Lennard de Rijk" <ljvderijk@gmail.com>',
]
import soc.logic.models as model_logic
def completeRequestForRole(role_entity, role_name):
"""Marks the request that leads to the given role_entity as completed.
Args:
role_entity : A datastore entity that is either a role or a subclass of the role model
role_name : The name in the request that is used to describe the type of the role_entity
"""
# get the request logic so we can query the datastore
request_logic = model_logic.request.logic
# create the query properties for the specific role
properties = {'scope_path' : role_entity.scope_path,
'link_id' : role_entity.link_id,
'role' : role_name}
# get the request that complies with properties
request_entity = request_logic.getForFields(properties, unique=True)
# mark the request completed, if there is any
if request_entity:
request_logic.updateModelProperties(request_entity,
{'state' : 'completed'})
|
#!/bin/sh
# -*- python -*-
################################################################################
# This file is python bilingual: The next line starts a comment in Python,
# and is a no-op in shell
""":"
# Find a suitable python interpreter (adapt for your specific needs)
for cmd in python3 python python2; do
command -v > /dev/null $cmd && exec $cmd $0 "$@"
done
echo "Error: Could not find a valid python interpreter --> exiting!" >&2
exit 2
":"""
################################################################################
# Git Version: @git@
#-----------------------------------------------------------------------
# XALT: A tool that tracks users jobs and environments on a cluster.
# Copyright (C) 2013-2014 University of Texas at Austin
# Copyright (C) 2013-2014 University of Tennessee
#
# This library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of
# the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free
# Software Foundation, Inc., 59 Temple Place, Suite 330,
# Boston, MA 02111-1307 USA
#-----------------------------------------------------------------------
from __future__ import print_function
from fnmatch import fnmatch
from collections import OrderedDict
import os, sys, re, argparse
class CmdLineOptions(object):
""" Command line Options class """
def __init__(self):
""" Empty Ctor """
pass
def execute(self):
""" Specify command line arguments and parse the command line"""
parser = argparse.ArgumentParser()
parser.add_argument("--lib64", dest='lib64', action="store", help="LIB64 install directory")
parser.add_argument("--base", dest='base', action="store", help="base library")
parser.add_argument("--real", dest='real', action="store", help="real library")
args = parser.parse_args()
return args
def files_in_tree(path,pattern):
fileA = []
if (not os.path.isdir(path)):
return fileA
for root, dirs, files in os.walk(path):
for name in files:
fn = os.path.join(root, name)
if (fnmatch(fn,pattern)):
fileA.append(fn)
return fileA
def main():
args = CmdLineOptions().execute()
lib64_dir = args.lib64
baseLib = args.base
realLib = args.real
baseBn = os.path.basename(baseLib)
dirNm = os.path.realpath(os.path.dirname(baseLib))
pattern = os.path.join(dirNm,baseBn)+"*"
fileA = files_in_tree(dirNm, pattern)
fileA.append(realLib)
fileT = {}
for fn in fileA:
fileT[fn] = True
for fn in fileT:
print("Storing "+fn+":")
if (os.path.islink(fn)):
newFn = os.readlink(fn)
if (newFn.find('/') == -1):
newFn = os.path.join(dirNm,newFn)
if (os.path.isfile(newFn)):
if (fileT[newFn]):
cmd = "cp "+newFn+" "+lib64_dir
print (" ",cmd)
os.system(cmd)
fileT[newFn] = False
cmd = "ln -sf "+os.path.basename(newFn)+" "+ os.path.join(lib64_dir,os.path.basename(fn))
print (" ",cmd)
os.system(cmd)
else:
print ("Cannot deal w/link: ",newFn)
os.exit(-1)
else:
if (fileT[fn]):
cmd = "cp "+fn+" "+lib64_dir
print (" ",cmd)
os.system(cmd)
fileT[fn] = False
if ( __name__ == '__main__'): main()
add verbose option to copy_system_library_to_xalt.py
#!/bin/sh
# -*- python -*-
################################################################################
# This file is python bilingual: The next line starts a comment in Python,
# and is a no-op in shell
""":"
# Find a suitable python interpreter (adapt for your specific needs)
for cmd in python3 python python2; do
command -v > /dev/null $cmd && exec $cmd $0 "$@"
done
echo "Error: Could not find a valid python interpreter --> exiting!" >&2
exit 2
":"""
################################################################################
# Git Version: @git@
#-----------------------------------------------------------------------
# XALT: A tool that tracks users jobs and environments on a cluster.
# Copyright (C) 2013-2014 University of Texas at Austin
# Copyright (C) 2013-2014 University of Tennessee
#
# This library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of
# the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free
# Software Foundation, Inc., 59 Temple Place, Suite 330,
# Boston, MA 02111-1307 USA
#-----------------------------------------------------------------------
from __future__ import print_function
from fnmatch import fnmatch
from collections import OrderedDict
import os, sys, re, argparse
class CmdLineOptions(object):
""" Command line Options class """
def __init__(self):
""" Empty Ctor """
pass
def execute(self):
""" Specify command line arguments and parse the command line"""
parser = argparse.ArgumentParser()
parser.add_argument("--lib64", dest='lib64', action="store", help="LIB64 install directory")
parser.add_argument("--base", dest='base', action="store", help="base library")
parser.add_argument("--real", dest='real', action="store", help="real library")
parser.add_argument("--verbose", dest='verbose', action="store_true", default=False, help="Print actions")
args = parser.parse_args()
return args
def files_in_tree(path,pattern):
fileA = []
if (not os.path.isdir(path)):
return fileA
for root, dirs, files in os.walk(path):
for name in files:
fn = os.path.join(root, name)
if (fnmatch(fn,pattern)):
fileA.append(fn)
return fileA
def main():
args = CmdLineOptions().execute()
lib64_dir = args.lib64
baseLib = args.base
realLib = args.real
verbose = args.verbose
baseBn = os.path.basename(baseLib)
dirNm = os.path.realpath(os.path.dirname(baseLib))
pattern = os.path.join(dirNm,baseBn)+"*"
fileA = files_in_tree(dirNm, pattern)
fileA.append(realLib)
fileT = {}
for fn in fileA:
fileT[fn] = True
for fn in fileT:
if (verbose): print("Storing "+fn+":")
if (os.path.islink(fn)):
newFn = os.readlink(fn)
if (newFn.find('/') == -1):
newFn = os.path.join(dirNm,newFn)
if (os.path.isfile(newFn)):
if (fileT[newFn]):
cmd = "cp "+newFn+" "+lib64_dir
if (verbose): print (" ",cmd)
os.system(cmd)
fileT[newFn] = False
cmd = "ln -sf "+os.path.basename(newFn)+" "+ os.path.join(lib64_dir,os.path.basename(fn))
if (verbose): print (" ",cmd)
os.system(cmd)
else:
print ("Cannot deal w/link: ",newFn)
os.exit(-1)
else:
if (fileT[fn]):
cmd = "cp "+fn+" "+lib64_dir
if (verbose): print (" ",cmd)
os.system(cmd)
fileT[fn] = False
if ( __name__ == '__main__'): main()
|
#!/usr/bin/env python
'''
Creates a fake XDCAM EX structure for testing purposes
'''
import os
import sys
import subprocess
def make_clip(clip_dir):
'''
Creates some dummy clips.
'''
clip_name = os.path.basename(clip_dir)
mp4 = os.path.join(clip_dir, clip_name + '.MP4')
smi = os.path.join(clip_dir, clip_name + '.SMI')
ppn = os.path.join(clip_dir, clip_name + 'I01.PPN')
xml = os.path.join(clip_dir, clip_name + 'M01.XML')
bim = os.path.join(clip_dir, clip_name + 'R01.BIM')
subprocess.call(
['ffmpeg',
'-f', 'lavfi',
'-i', 'mandelbrot',
'-c:v', 'mpeg2video', '-t', '1',
mp4
])
for files in [xml, ppn, smi, xml, bim]:
open(files, 'w')
def main():
'''
Launches functions that makes a dummy XDCAM structure.
'''
source = sys.argv[1]
bpav = os.path.join(source, 'BPAV')
clpr = os.path.join(bpav, 'CLPR')
takr = os.path.join(clpr, 'TAKR')
clip1 = os.path.join(clpr, '338_0011_06')
clip2 = os.path.join(clpr, '338_0011_07')
clip3 = os.path.join(clpr, '338_0011_08')
for folder in [bpav, clpr, takr, clip1, clip2, clip3]:
print folder
os.makedirs(folder)
cueup = os.path.join(bpav, 'CUEUP.XML')
mediapro = os.path.join(bpav, 'MEDIAPRO.XML')
for clip in [clip1, clip2, clip3]:
make_clip(clip)
for files in [cueup, mediapro]:
open(files, 'w')
if __name__ == '__main__':
main()
fakexdcam - adds audio streams and uses MXF
#!/usr/bin/env python
'''
Creates a fake XDCAM EX structure for testing purposes
'''
import os
import sys
import subprocess
def make_clip(clip_dir):
'''
Creates some dummy clips.
'''
clip_name = os.path.basename(clip_dir)
mxf = os.path.join(clip_dir, clip_name + '.MXF')
smi = os.path.join(clip_dir, clip_name + '.SMI')
ppn = os.path.join(clip_dir, clip_name + 'I01.PPN')
xml = os.path.join(clip_dir, clip_name + 'M01.XML')
bim = os.path.join(clip_dir, clip_name + 'R01.BIM')
subprocess.call(
['ffmpeg',
'-f', 'lavfi',
'-i', 'mandelbrot',
'-f', 'lavfi',
'-i', 'sine=sample_rate=48000',
'-c:v', 'mpeg2video',
'-c:a', 'pcm_s16le', '-t', '1',
mxf
])
for files in [xml, ppn, smi, xml, bim]:
open(files, 'w')
def main():
'''
Launches functions that makes a dummy XDCAM structure.
'''
source = sys.argv[1]
bpav = os.path.join(source, 'BPAV')
clpr = os.path.join(bpav, 'CLPR')
takr = os.path.join(clpr, 'TAKR')
clip1 = os.path.join(clpr, '338_0011_06')
clip2 = os.path.join(clpr, '338_0011_07')
clip3 = os.path.join(clpr, '338_0011_08')
for folder in [bpav, clpr, takr, clip1, clip2, clip3]:
print folder
os.makedirs(folder)
cueup = os.path.join(bpav, 'CUEUP.XML')
mediapro = os.path.join(bpav, 'MEDIAPRO.XML')
for clip in [clip1, clip2, clip3]:
make_clip(clip)
for files in [cueup, mediapro]:
open(files, 'w')
if __name__ == '__main__':
main()
|
#
# Collective Knowledge (individual environment - setup)
#
# See CK LICENSE.txt for licensing details
# See CK COPYRIGHT.txt for copyright details
#
# Developer: Grigori Fursin, Grigori.Fursin@cTuning.org, http://fursin.net
#
extra_dirs=['C:\\Program Files (x86)\\Microsoft Visual Studio\\2017',
'D:\\Program Files (x86)\\Microsoft Visual Studio\\2017']
import os
##############################################################################
# internal: select extension
def get_ext(i):
plat=i.get('processor','')
ext=''
if plat=='x86':
ext='x86'
elif plat=='x64':
ext='amd64'
elif plat=='arm':
ext='arm'
else:
return {'return':1, 'error':'target platform ('+plat+') is not supported by this software'}
return {'return':0, 'ext':ext}
##############################################################################
# customize directories to automatically find and register software
def dirs(i):
dirs=i.get('dirs', [])
for d in extra_dirs:
if os.path.isdir(d):
dirs.append(d)
return {'return':0, 'dirs':dirs}
##############################################################################
# prepare env
def version_cmd(i):
fp=i['full_path']
hosd=i['host_os_dict']
tosd=i['target_os_dict']
cmdx=i['cmd']
o=i.get('out','')
nout=hosd.get('no_output','')
xnout=''
if o!='con':
xnout=nout
eifsc=hosd.get('env_quotes_if_space_in_call','')
if eifsc!='' and fp.find(' ')>=0 and not fp.startswith(eifsc):
fp=eifsc+fp+eifsc
# Check platform
plat=tosd.get('processor','')
rx=get_ext({'processor':plat})
if rx['return']>0: return rx
ext=rx['ext']
cmd=''
if fp!='':
cmd+=xnout+'call '+fp+' '+ext+'\n'
cmd+=xnout+'cl '+cmdx+'\n'
return {'return':0, 'cmd':cmd}
##############################################################################
# parse software version
def parse_version(i):
lst=i['output']
ver=''
for q in lst:
q=q.strip()
if q!='':
j1=q.lower().find(' version')
if j1>0:
q=q[j1+9:]
j2=q.lower().find(' ')
if j2>=0:
ver=q[:j2]
break
return {'return':0, 'version':ver}
##############################################################################
# setup environment setup
def setup(i):
"""
Input: {
cfg - meta of this soft entry
self_cfg - meta of module soft
ck_kernel - import CK kernel module (to reuse functions)
host_os_uoa - host OS UOA
host_os_uid - host OS UID
host_os_dict - host OS meta
target_os_uoa - target OS UOA
target_os_uid - target OS UID
target_os_dict - target OS meta
target_device_id - target device ID (if via ADB)
tags - list of tags used to search this entry
env - updated environment vars from meta
customize - updated customize vars from meta
deps - resolved dependencies for this soft
interactive - if 'yes', can ask questions, otherwise quiet
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
bat - prepared string for bat file
}
"""
# Get variables
ck=i['ck_kernel']
s=''
iv=i.get('interactive','')
cus=i.get('customize',{})
fp=cus.get('full_path','')
hos=i['host_os_uid']
tos=i['host_os_uid']
tdid=i['target_device_id']
hosd=i['host_os_dict']
tosd=i['target_os_dict']
if 'android' in tosd.get('tags',[]):
return {'return':1, 'error':'this software is not supporting Android platform'}
ver=i.get('version','')
sver=i.get('version_split',[])
# Check platform
plat=tosd.get('processor','')
tbits=tosd.get('bits','')
# Check which processor
rx=get_ext({'processor':plat})
if rx['return']>0: return rx
ext=rx['ext']
env=i['env']
ep=cus['env_prefix']
pi=''
if fp!='' and ep!='':
p1=os.path.dirname(fp)
pi=os.path.dirname(p1)
env[ep]=pi
# Prepare cmake generator
cgen=''
if len(sver)>0:
if sver[0]==19 and len(sver)>0 and sver[1]>0:
cgen='Visual Studio 15 2017'
elif sver[0]==19:
cgen='Visual Studio 14 2015'
elif sver[0]==18:
cgen='Visual Studio 12 2013'
elif sver[0]==17:
cgen='Visual Studio 11 2012'
elif sver[0]==16:
cgen='Visual Studio 10 2010'
elif sver[0]==15:
cgen='Visual Studio 9 2008'
elif sver[0]==14:
cgen='Visual Studio 8 2005'
elif sver[0]==13:
cgen='Visual Studio 7 .NET 2003'
elif sver[0]==12:
cgen='Visual Studio 7'
elif sver[0]==11:
cgen='Visual Studio 6'
if cgen!='':
if str(tbits)=='64':
cgen+=' Win64'
env['CK_CMAKE_GENERATOR']=cgen
if env.get('CK_CMAKE_GENERATOR','')=='':
ck.out('**********************************')
ck.out('Problem: can\'t detect Visual Studio compiler version from ('+str(sver)+')')
ck.out('')
ck.out('Please report to the authors at https://github.com/ctuning/ck-env/issues')
ck.out('')
r=ck.inp({'text':'Would you like to continue installation (Y/n): '})
if r['return']>0: return r
rx=r['string'].strip().lower()
if rx=='n' or rx=='no':
return {'return':1, 'error':'can\'t detect Visual Studio compiler version'}
############################################################
s+='\n'
s+='rem Setting environment\n'
s+='call "'+fp+'" '+ext+'\n\n'
env['VSINSTALLDIR']=pi
pix=pi
if os.path.basename(pi)=='Auxiliary':
pix=os.path.dirname(pix)
# Try to get redistribute number VCxyz
r=ck.access({'action':'list_all_files',
'module':'env',
'path':pix,
'pattern':'Microsoft.VC*.CRT',
'recursion_level_max':4})
if r['return']>0: return r
x=r['list']
vc=''
for q in x:
j1=q.find('Microsoft.VC')
if j1>=0:
j2=q.find('.CRT',j1+1)
if j2>0:
vc=q[j1+12:j2]
break
env[ep+'_VC_MSBUILD']=vc
env['CK_COMPILER_TOOLCHAIN_NAME']='msvc'
# Attempt to detect path to compiler
cmd=s+'where cl.exe'
r=ck.access({'action':'shell',
'module_uoa':'os',
'host_os':hos,
'target_os':tos,
'device_id':tdid,
'cmd':cmd,
'split_to_list':'yes'})
if r['return']>0: return r
pcl=''
for x in reversed(r['stdout_lst']):
x=x.strip()
if x!='':
if os.path.isfile(x):
pcl=x
break
if pcl!='':
# Found compiler path (useful for CMAKE)
env[ep+'_BIN']=os.path.dirname(pcl)
ck.out('')
ck.out(' * Found compiler in '+pcl)
env['VCINSTALLDIR']=os.path.dirname(pcl)
return {'return':0, 'bat':s}
bug fix
#
# Collective Knowledge (individual environment - setup)
#
# See CK LICENSE.txt for licensing details
# See CK COPYRIGHT.txt for copyright details
#
# Developer: Grigori Fursin, Grigori.Fursin@cTuning.org, http://fursin.net
#
extra_dirs=['C:\\Program Files (x86)\\Microsoft Visual Studio\\2017',
'D:\\Program Files (x86)\\Microsoft Visual Studio\\2017']
import os
##############################################################################
# internal: select extension
def get_ext(i):
plat=i.get('processor','')
ext=''
if plat=='x86':
ext='x86'
elif plat=='x64':
ext='amd64'
elif plat=='arm':
ext='arm'
else:
return {'return':1, 'error':'target platform ('+plat+') is not supported by this software'}
return {'return':0, 'ext':ext}
##############################################################################
# customize directories to automatically find and register software
def dirs(i):
dirs=i.get('dirs', [])
for d in extra_dirs:
if os.path.isdir(d):
dirs.append(d)
return {'return':0, 'dirs':dirs}
##############################################################################
# prepare env
def version_cmd(i):
fp=i['full_path']
hosd=i['host_os_dict']
tosd=i['target_os_dict']
cmdx=i['cmd']
o=i.get('out','')
nout=hosd.get('no_output','')
xnout=''
if o!='con':
xnout=nout
eifsc=hosd.get('env_quotes_if_space_in_call','')
if eifsc!='' and fp.find(' ')>=0 and not fp.startswith(eifsc):
fp=eifsc+fp+eifsc
# Check platform
plat=tosd.get('processor','')
rx=get_ext({'processor':plat})
if rx['return']>0: return rx
ext=rx['ext']
cmd=''
if fp!='':
cmd+=xnout+'call '+fp+' '+ext+'\n'
cmd+=xnout+'cl '+cmdx+'\n'
return {'return':0, 'cmd':cmd}
##############################################################################
# parse software version
def parse_version(i):
lst=i['output']
ver=''
for q in lst:
q=q.strip()
if q!='':
j1=q.lower().find(' version')
if j1>0:
q=q[j1+9:]
j2=q.lower().find(' ')
if j2>=0:
ver=q[:j2]
break
return {'return':0, 'version':ver}
##############################################################################
# setup environment setup
def setup(i):
"""
Input: {
cfg - meta of this soft entry
self_cfg - meta of module soft
ck_kernel - import CK kernel module (to reuse functions)
host_os_uoa - host OS UOA
host_os_uid - host OS UID
host_os_dict - host OS meta
target_os_uoa - target OS UOA
target_os_uid - target OS UID
target_os_dict - target OS meta
target_device_id - target device ID (if via ADB)
tags - list of tags used to search this entry
env - updated environment vars from meta
customize - updated customize vars from meta
deps - resolved dependencies for this soft
interactive - if 'yes', can ask questions, otherwise quiet
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
bat - prepared string for bat file
}
"""
# Get variables
ck=i['ck_kernel']
s=''
iv=i.get('interactive','')
cus=i.get('customize',{})
fp=cus.get('full_path','')
hos=i['host_os_uid']
tos=i['host_os_uid']
tdid=i['target_device_id']
hosd=i['host_os_dict']
tosd=i['target_os_dict']
if 'android' in tosd.get('tags',[]):
return {'return':1, 'error':'this software is not supporting Android platform'}
ver=i.get('version','')
sver=i.get('version_split',[])
# Check platform
plat=tosd.get('processor','')
tbits=tosd.get('bits','')
# Check which processor
rx=get_ext({'processor':plat})
if rx['return']>0: return rx
ext=rx['ext']
env=i['env']
ep=cus['env_prefix']
pi=''
if fp!='' and ep!='':
p1=os.path.dirname(fp)
pi=os.path.dirname(p1)
env[ep]=pi
# Prepare cmake generator
cgen=''
if len(sver)>0:
if sver[0]==19 and len(sver)>0 and sver[1]>0:
cgen='Visual Studio 15 2017'
elif sver[0]==19:
cgen='Visual Studio 14 2015'
elif sver[0]==18:
cgen='Visual Studio 12 2013'
elif sver[0]==17:
cgen='Visual Studio 11 2012'
elif sver[0]==16:
cgen='Visual Studio 10 2010'
elif sver[0]==15:
cgen='Visual Studio 9 2008'
elif sver[0]==14:
cgen='Visual Studio 8 2005'
elif sver[0]==13:
cgen='Visual Studio 7 .NET 2003'
elif sver[0]==12:
cgen='Visual Studio 7'
elif sver[0]==11:
cgen='Visual Studio 6'
if cgen!='':
if str(tbits)=='64':
cgen+=' Win64'
env['CK_CMAKE_GENERATOR']=cgen
if env.get('CK_CMAKE_GENERATOR','')=='':
ck.out('**********************************')
ck.out('Problem: can\'t detect Visual Studio compiler version from ('+str(sver)+')')
ck.out('')
ck.out('Please report to the authors at https://github.com/ctuning/ck-env/issues')
ck.out('')
r=ck.inp({'text':'Would you like to continue installation (Y/n): '})
if r['return']>0: return r
rx=r['string'].strip().lower()
if rx=='n' or rx=='no':
return {'return':1, 'error':'can\'t detect Visual Studio compiler version'}
############################################################
s+='\n'
s+='rem Setting environment\n'
s+='call "'+fp+'" '+ext+'\n\n'
env['VSINSTALLDIR']=pi
pix=pi
if os.path.basename(pi)=='Auxiliary':
pix=os.path.dirname(pix)
# Try to get redistribute number VCxyz
r=ck.access({'action':'list_all_files',
'module_uoa':'soft',
'path':pix,
'pattern':'Microsoft.VC*.CRT',
'recursion_level_max':4})
if r['return']>0: return r
x=r['list']
vc=''
for q in x:
j1=q.find('Microsoft.VC')
if j1>=0:
j2=q.find('.CRT',j1+1)
if j2>0:
vc=q[j1+12:j2]
break
env[ep+'_VC_MSBUILD']=vc
env['CK_COMPILER_TOOLCHAIN_NAME']='msvc'
# Attempt to detect path to compiler
cmd=s+'where cl.exe'
r=ck.access({'action':'shell',
'module_uoa':'os',
'host_os':hos,
'target_os':tos,
'device_id':tdid,
'cmd':cmd,
'split_to_list':'yes'})
if r['return']>0: return r
pcl=''
for x in reversed(r['stdout_lst']):
x=x.strip()
if x!='':
if os.path.isfile(x):
pcl=x
break
if pcl!='':
# Found compiler path (useful for CMAKE)
env[ep+'_BIN']=os.path.dirname(pcl)
ck.out('')
ck.out(' * Found compiler in '+pcl)
env['VCINSTALLDIR']=os.path.dirname(pcl)
return {'return':0, 'bat':s}
|
#!/usr/bin/python
'''The MIT License (MIT)
Copyright (c) 2017 Yu Xiong Wei(try.dash.now@gmail.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
__author__ = 'sean yu (Yu, Xiongwei)'
__doc__ = '''
it's GUI of DasH aka Do as Human
created 2017-05-06 by Sean Yu
'''
from datetime import datetime
import wx.grid as gridlib
import traceback
import wx
from gui.MainFrame import MainFrame
import os
from lib.common import load_bench, caller_stack_info,info, get_next_in_ring_list,get_folder_item, info,debug, warn, error, parse_command_line, call_function_in_module
import re
import time
import threading
import ConfigParser
import sys
import inspect
import Queue
from SessionTab import SessionTab
import imp
import types
from lib.common import send_mail_smtp_without_login
from lib.common import run_script
from multiprocessing import Process
import subprocess
import shlex
#from dut import dut
class RedirectText(object):
font_point_size = 10
old_stdout = None
old_stderr = None
write_lock = None
log_file = None
def __init__(self,aWxTextCtrl, log_path=None):
self.old_stderr , self.old_stdout=sys.stderr , sys.stdout
self.out=aWxTextCtrl
self.font_point_size = self.out.GetFont().PointSize
self.write_lock = threading.Lock()
if log_path:
name = '{}/dash.log'.format(log_path)
self.log_file = open(name, 'w+')
self.fileno = self.log_file.fileno
def write(self,string):
self.write_lock.acquire()
self.old_stdout.write(string)
#string = string.replace('\\033\[[0-9\;]+m', '')
#self.old_stderr.write(string)
if re.search('error|\s+err\s+|fail|wrong',string.lower()):
self.out.SetDefaultStyle(wx.TextAttr(wx.RED, wx.YELLOW, font =wx.Font(self.font_point_size+2, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.BOLD, faceName = 'Consolas')))#wx.CallAfter(s
else:
self.out.SetDefaultStyle(wx.TextAttr(wx.GREEN, wx.BLACK,font =wx.Font(self.font_point_size, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.NORMAL, faceName = 'Consolas')))#wx.CallAfter(
wx.CallAfter(self.out.AppendText, string)
if self.log_file:
self.log_file.write(string)
self.log_file.flush()
self.write_lock.release()
def close(self):
if self.log_file:
self.log_file.flush()
self.log_file.close()
class process_info(object):
process = None
pid=None
full_name=None
returncode = None
def __init__(self,name, process):
self.process= process
self.pid = process.pid
self.full_name =name
self.returncode = process.returncode
@property
def returncode(self):
return self.process.returncode
class FileEditor(wx.Panel):
editor =None
font_size=10
parent=None
type = None
sessions_node =None
function_node =None
case_suite_node =None
full_file_name = None
file_instance = None
def on_close(self):
if self.full_file_name:
data = self.editor.GetValue()
with open(self.full_file_name, 'w') as f:
f.write(data)
f.flush()
#done 2017-9-12: handle close tab in edit_area
def __init__(self, parent, title='pageOne', type ='grid', file_name = None):
wx.Panel.__init__(self, parent)
self.parent = parent
self.type = type
self.full_file_name = file_name
#self.editor = wx.TextCtrl(self, style = wx.TE_MULTILINE|wx.TE_RICH2|wx.EXPAND|wx.ALL, size=(-1,-1))
if type in ['text']:
self.editor = wx.TextCtrl( self, -1, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, wx.TE_AUTO_URL|wx.VSCROLL|wx.TE_RICH|wx.TE_MULTILINE&(~wx.TE_PROCESS_ENTER))
#wx.richtext.RichTextCtrl( self, -1, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0|wx.VSCROLL|wx.HSCROLL|wx.NO_BORDER|wx.WANTS_CHARS )
with open(self.full_file_name, 'r') as f:
for line in f.readlines():
self.editor.AppendText(line)
else:
self.editor= gridlib.Grid(self)
self.editor.CreateGrid(50, 5)
col = self.editor.GetNumberCols()
row = self.editor.GetNumberRows()
function_color ='black'
arg_color = 'blue'
for c in range(0, col):
if c < 1 :
self.editor.SetColLabelValue(c, 'Function Name')
else:
self.editor.SetColLabelValue(c, 'arg# {}'.format(c))
for r in range (0, row):
self.editor.SetCellTextColour(r,c,function_color if c <1 else arg_color)
for r in range (0, row):
self.editor.SetCellFont(r, 0, wx.Font(self.font_size,wx.SWISS, wx.NORMAL, wx.BOLD ))
self.editor.Bind( wx.EVT_MOUSEWHEEL, self.editor_OnMouseWheel )
sizer = wx.BoxSizer()
sizer.Add(self.editor, 1, wx.EXPAND)
self.SetSizer(sizer)
def editor_OnMouseWheel(self,event):
min_font_size = 5
interval_step = 2
if event.ControlDown():
pass
else:
return
if event.GetWheelRotation() < 0:
if self.font_size>min_font_size:
self.font_size-=interval_step
else:
self.font_size+=1
if self.type in ['text']:
f =self.editor.GetFont()
f.PointSize= self.font_size
self.editor.SetFont(f)
else:
col = self.editor.GetNumberCols()
row = self.editor.GetNumberRows()
for c in range(0, col):
for r in range (0, row):
f = self.editor.GetCellFont(r, c)
f.PointSize = self.font_size
self.editor.SetCellFont(r, c, f)
self.Refresh()
#wx.StaticText(self, -1, "THIS IS A PAGE OBJECT", (20,20))
#DONE: DasHFrame should handle CLOSE event when closing the app, call on_close_tab_in_edit_area for all opened sessions and files
class DasHFrame(MainFrame):#wx.Frame
ini_setting = None
#m_left_navigator =None
redir = None
edit_area=None
tabs_in_edit_area = None
src_path = None
sessions_alive=None
sequence_queue=None
history_cmd = []
history_cmd_index = -1
import_modules={'TC':'TC'}
lib_path ='./lib'
log_path = '../log'
session_path = './sessions'
suite_path = '../test_suite'
dict_test_report= None
alive =True
mail_server=None
mail_to_list=None
mail_from=None
mail_read_url= 'outlook.office365.com'
mail_password = None
mail_usre =None
case_queue =None
check_case_running_status_lock = None
case_list=None
def __init__(self,parent=None, ini_file = './gDasH.ini'):
#wx.Frame.__init__(self, None, title="DasH")
self.case_list= []
self.case_queue = Queue.Queue()
self.dict_test_report={}
self.check_case_running_status_lock = threading.Lock()
self.tabs_in_edit_area=[]
self.sessions_alive={}
MainFrame.__init__(self, parent=parent)
self.sequence_queue= Queue.Queue()
#self.sequence_queue.put()
self.ini_setting = ConfigParser.ConfigParser()
self.ini_setting.read(ini_file)
self.src_path = os.path.abspath(self.ini_setting.get('dash','src_path'))
self.lib_path = os.path.abspath(self.ini_setting.get('dash','lib_path'))
self.log_path = os.path.abspath(self.ini_setting.get('dash','log_path'))
self.suite_path = os.path.abspath(self.ini_setting.get('dash', 'test_suite_path'))
self.mail_server = self.ini_setting.get('dash', 'mail_server')
self.mail_from =self.ini_setting.get('dash', 'mail_from')
self.mail_to_list =self.ini_setting.get('dash', 'mail_to_list')
self.mail_read_url =self.ini_setting.get('dash', 'mail_read_url')
self.mail_user = self.ini_setting.get('dash','mail_user')
self.mail_password =self.ini_setting.get('dash', 'mail_password')
from lib.common import create_case_folder, create_dir
sys.argv.append('-l')
sys.argv.append('{}'.format(self.log_path))
self.log_path = create_case_folder(self.log_path)
self.suite_path = create_dir(self.suite_path)
self.lib_path = create_dir(self.lib_path)
self.src_path = create_dir(self.src_path)
if not os.path.exists(self.log_path):
os.mkdir(self.log_path)
self.add_src_path_to_python_path(self.src_path)
self.redir = RedirectText(self.m_log, self.log_path)
sys.stdout = self.redir
sys.stderr = self.redir
self.m_log.SetBackgroundColour('Black')
self.m_log.SetDefaultStyle(wx.TextAttr(wx.GREEN, wx.BLACK, font =wx.Font(9, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.BOLD, faceName = 'Consolas')))
#self.m_editor.WriteText('welcome to dash world')
self.m_log.WriteText('Welcome to DasH!\n')
self.m_command_box.WriteText('functions.static_function_in_module test_ssh 2')
fileMenu = wx.Menu()
open_test_suite = fileMenu.Append(wx.NewId(), "Open TestSuite", "Open a Test Suite")
open_test_case = fileMenu.Append(wx.NewId(), "Open TestCase", "Open a Test Case")
mail_test_report = fileMenu.Append(wx.NewId(), "Mail Test Report", "Mail Test Report")
get_case_queue = fileMenu.Append(wx.NewId(), "Get Case Queue", "Get Case Queue") #done
clear_case_queue = fileMenu.Append(wx.NewId(), "Clear Case Queue", "Clear Case Queue")
kill_running_case = fileMenu.Append(wx.NewId(), "Kill Running Case(s)", "Kill Running Case(s)")
self.m_menubar_main.Append(fileMenu, "&Open")
self.Bind(wx.EVT_MENU,self.on_mail_test_report ,mail_test_report)
self.Bind(wx.EVT_MENU,self.get_case_queue ,get_case_queue)
self.Bind(wx.EVT_MENU,self.on_clear_case_queue ,clear_case_queue)
self.Bind(wx.EVT_MENU,self.on_kill_running_case ,kill_running_case)
self.Bind(wx.EVT_CLOSE, self.on_close)
self.m_command_box.Bind(wx.EVT_TEXT_ENTER, self.on_command_enter)
self.m_command_box.Bind(wx.EVT_KEY_UP, self.on_key_up)
self.m_command_box.Bind(wx.EVT_KEY_DOWN, self.on_key_down)
from wx.aui import AuiNotebook
bookStyle = wx.aui.AUI_NB_DEFAULT_STYLE &(~wx.aui.AUI_NB_CLOSE_ON_ACTIVE_TAB)
self.navigator = AuiNotebook(self.m_left_navigator, style= bookStyle )
self.case_suite_page = wx.TreeCtrl(self.navigator, wx.ID_ANY, wx.DefaultPosition, wx.Size(-1, -1), wx.TR_DEFAULT_STYLE | wx.TR_EDIT_LABELS | wx.TR_EXTENDED | wx.TR_HAS_BUTTONS | wx.TR_HAS_VARIABLE_ROW_HEIGHT | wx.HSCROLL | wx.TAB_TRAVERSAL | wx.VSCROLL | wx.WANTS_CHARS)
self.function_page = wx.TreeCtrl(self.navigator, wx.ID_ANY, wx.DefaultPosition, wx.Size(-1, -1), wx.TR_DEFAULT_STYLE | wx.TR_EDIT_LABELS | wx.TR_EXTENDED | wx.TR_HAS_BUTTONS | wx.TR_HAS_VARIABLE_ROW_HEIGHT | wx.HSCROLL | wx.TAB_TRAVERSAL | wx.VSCROLL | wx.WANTS_CHARS)
self.session_page = wx.TreeCtrl(self.navigator, wx.ID_ANY, wx.DefaultPosition, wx.Size(-1, -1), wx.TR_DEFAULT_STYLE | wx.TR_EDIT_LABELS | wx.TR_EXTENDED | wx.TR_HAS_BUTTONS | wx.TR_HAS_VARIABLE_ROW_HEIGHT | wx.HSCROLL | wx.TAB_TRAVERSAL | wx.VSCROLL | wx.WANTS_CHARS)
self.navigator.AddPage(self.session_page, 'SESSION')
self.navigator.AddPage(self.function_page, 'FUNCTION')
self.navigator.AddPage(self.case_suite_page, 'CASE')
self.edit_area = AuiNotebook(self.m_file_editor, style = wx.aui.AUI_NB_DEFAULT_STYLE)
self.edit_area.Bind(wx.aui.EVT_AUINOTEBOOK_PAGE_CLOSE, self.on_close_tab_in_edit_area, self.edit_area)
if False:
new_page = FileEditor(self.edit_area, 'a', type= type)
self.edit_area.AddPage(new_page, 'test')
self.tabs_in_edit_area.append(('test'))
self.edit_area.Enable(True)
right_sizer = wx.BoxSizer(wx.VERTICAL)
#right_sizer =wx.GridSizer( 3, 1, 0, 0 )
left_sizer = wx.BoxSizer(wx.HORIZONTAL)
left_sizer.Add(self.m_left_navigator, 1, wx.EXPAND)
self.case_suite_page.Bind(wx.EVT_LEFT_DCLICK, self.m_case_treeOnLeftDClick)
#self.case_suite_page.Bind(wx.EVT_MOUSEWHEEL, self.case_tree_OnMouseWheel)
self.case_suite_page.Bind(wx.EVT_TREE_ITEM_EXPANDING, self.m_case_treeOnTreeItemExpanding)
self.session_page.Bind(wx.EVT_LEFT_DCLICK, self.on_LeftDClick_in_Session_tab)
self.function_page.Bind(wx.EVT_LEFT_DCLICK, self.on_LeftDClick_in_Function_tab)
self.function_page.Bind(wx.EVT_RIGHT_DOWN, self.on_right_down_in_function_tab)
self.case_suite_page.Bind(wx.EVT_RIGHT_DOWN, self.on_right_down_in_case_tab)
self.session_page.Bind(wx.EVT_RIGHT_DOWN, self.on_right_down_in_session_tab)
main_sizer = wx.BoxSizer(wx.HORIZONTAL)
#main_sizer = wx.GridSizer( 1, 2, 0, 0 )
nav_sizer = wx.BoxSizer()
nav_sizer.Add(self.navigator, 1, wx.EXPAND, 1)
self.m_left_navigator.SetSizer(nav_sizer)
#main_sizer = wx.BoxSizer(wx.HORIZONTAL)
#main_sizer.Add(left_sizer, 3, wx.EXPAND)
main_sizer.Add(left_sizer, 2, wx.EXPAND)
edit_sizer = wx.BoxSizer()
edit_sizer.Add(self.edit_area, 1, wx.EXPAND, 1)
self.m_file_editor.SetSizer(edit_sizer)
right_sizer.Add(self.m_file_editor, 6, wx.ALL|wx.EXPAND, 1)
right_sizer.Add(self.m_log, 3, wx.ALL|wx.EXPAND, 2)
right_sizer.Add(self.m_command_box, 0, wx.ALL|wx.EXPAND, 3)
main_sizer.Add(right_sizer, 8, wx.EXPAND)
self.SetSizer(main_sizer)
self.build_session_tab()
self.build_suite_tree()
self.build_function_tab()
ico = wx.Icon('./gui/dash.bmp', wx.BITMAP_TYPE_ICO)
self.SetIcon(ico)
th= threading.Thread(target=self.polling_running_cases)
th.start()
th = threading.Thread(target=self.polling_request_via_mail)
th.start()
def on_close(self, event):
self.alive =False
time.sleep(0.01)
self.generate_code(file_name='{}/test_script.py'.format(self.suite_path))
if len(self.dict_test_report):
self.mail_test_report("DASH TEST REPORT")
for index in range(0,self.edit_area.GetPageCount()): #len(self.tabs_in_edit_area)):
closing_page = self.edit_area.GetPage(index)
if isinstance(closing_page, (SessionTab)):
if closing_page:
name = closing_page.name
self.tabs_in_edit_area.pop(self.tabs_in_edit_area.index(name))
closing_page.on_close()
self.redir.close()
sys.stderr =self.redir.old_stderr
sys.stdout = self.redir.old_stdout
event.Skip()
def generate_report(self, filename):
report = '''Test Report
RESULT,\tStart_Time,\tEnd_Time,\tPID,\tDuration,\tCase_Name,\tLog\n'''
if len(self.dict_test_report):
with open(filename, 'a+') as f:
f.write(report)
for pi in sorted(self.dict_test_report, key = lambda x: self.dict_test_report[x][1]):
case_name, start_time, end_time, duration, return_code ,proc, log_path =self.dict_test_report[pi][:7]
if return_code is None:
result = 'IP'
else:
result = return_code # 'FAIL' if return_code else 'PASS'
record = '\t'.join(['{},\t'.format(x) for x in [result,start_time,end_time,pi,duration,case_name,'<{}>'.format(log_path) ]])
report+=record+'\n'
f.write(record+'\n')
return report
def on_close_tab_in_edit_area(self, event):
#self.edit_area.GetPage(self.edit_area.GetSelection()).on_close()
closing_page = self.edit_area.GetPage(self.edit_area.GetSelection())
closing_page.on_close()
if isinstance(closing_page, (SessionTab)):
ses_name = closing_page.name
self.tabs_in_edit_area.pop(self.tabs_in_edit_area.index(ses_name))
if globals().has_key(ses_name):
#g = dict(globals())
#globals()[ses_name]=None
#del g[ses_name]
globals()[ses_name].close_session()
del globals()[ses_name]
def add_item_to_subfolder_in_tree(self,node):
subfolder_path_name = self.case_suite_page.GetPyData(node)['path_name']
items = get_folder_item(subfolder_path_name)
if items is None:
self.case_suite_page.SetItemText(node, self.m_case_tree.GetItemText(node) + ' Not Exists!!!')
self.case_suite_page.SetItemTextColour(node, wx.Colour(255, 0, 0))
return
for i in items:
path_name = '{}/{}'.format(subfolder_path_name,i)
base_name = os.path.basename(i)
item_info = wx.TreeItemData({'path_name':path_name})
self.case_list.append(path_name)
new_item = self.case_suite_page.InsertItem(node, node, base_name)
self.case_suite_page.SetItemData(new_item, item_info)
if os.path.isdir(path_name):
self.case_suite_page.SetItemHasChildren(new_item)
#self.m_case_tree.ItemHasChildren()
#self.m_case_tree.InsertItem(new_item,new_item,'')
def build_suite_tree(self):
suite_path = self.suite_path #os.path.abspath(self.ini_setting.get('dash','test_suite_path'))
if not os.path.exists(suite_path):
suite_path= os.path.abspath(os.path.curdir)
base_name = os.path.basename(suite_path)
root =self.case_suite_page.AddRoot(base_name)
item_info = wx.TreeItemData({'path_name':suite_path})
self.case_suite_page.SetItemData(root, item_info)
self.add_item_to_subfolder_in_tree(root)
self.case_suite_page.Expand(root)
# def OnSelChanged(self, event):
# item = event.GetItem()
# self.display.SetLabel(self.tree.GetItemText(item))
#def case_tree_OnMouseWheel(self, event):
def m_case_treeOnLeftDClick(self, event):
ht_item =self.case_suite_page.GetSelection()
#ht_item = self.HitTest(event.GetPosition())
item_name = self.case_suite_page.GetItemText(ht_item)
item_data = self.case_suite_page.GetItemData(ht_item)
if self.case_suite_page.ItemHasChildren(ht_item):
if self.case_suite_page.IsExpanded(ht_item):
self.case_suite_page.Collapse(ht_item)
else:
self.case_suite_page.ExpandAllChildren(ht_item)
else:
if item_name.lower() in ['.csv', '.xlsx','.xls']:
type = 'grid'
file_name = item_data.Data['path_name']
else:
type = 'text'
file_name = item_data.Data['path_name']
new_page = FileEditor(self.edit_area, 'a', type= type,file_name=file_name)
self.edit_area.AddPage(new_page, item_name)
index = self.edit_area.GetPageIndex(new_page)
self.edit_area.SetSelection(index)
def m_case_treeOnTreeItemExpanding(self,event):
ht_item =self.case_suite_page.GetSelection()
try:
item_info = self.case_suite_page.GetPyData(ht_item)
if 0== self.case_suite_page.GetChildrenCount(ht_item):
if os.path.isdir(item_info['path_name']):
self.add_item_to_subfolder_in_tree(ht_item)
except Exception as e:
pass
def build_session_tab(self):
if self.session_page.RootItem:
self.session_page.DeleteAllItems()
session_path = os.path.abspath(self.ini_setting.get('dash','session_path'))
self.session_path= session_path
if not os.path.exists(session_path):
session_path= os.path.abspath(os.path.curdir)
base_name = os.path.basename(session_path)
sessions = {}
root =self.session_page.AddRoot(base_name)
item_info = wx.TreeItemData({'path_name':session_path})
self.session_page.SetItemData(root, item_info)
self.session_page.Expand(root)
item_list = get_folder_item(session_path)
session_files=[]
for item in item_list:
if os.path.isfile('{}/{}'.format(session_path,item)) and '{}'.format(item).lower().strip().endswith('.csv'):
session_files.append(item)
for csv_file in sorted(session_files):
try:
ses_in_bench = load_bench(os.path.abspath('{}/{}'.format(session_path, csv_file)))
for bench in ses_in_bench:
for ses in ses_in_bench[bench]:
if ses_in_bench[bench][ses].has_key('login_step') and ses_in_bench[bench][ses]['login_step'].strip() not in ['', None]:
ses_in_bench[bench][ses].update(
{'login_step': os.path.abspath('{}/{}'.format(session_path, ses_in_bench[bench][ses]['login_step'].strip()))}
)
sessions.update(ses_in_bench)
except Exception as e:
pass
root =self.session_page.GetRootItem()
for file_name in sorted(sessions.keys()):
item_name = os.path.basename(file_name)
item_info = wx.TreeItemData({'file_name':file_name})
new_bench = self.session_page.InsertItem(root, root, item_name)
self.case_suite_page.SetItemData(new_bench, item_info)
for ses in sorted(sessions[file_name]):
item_name = ses
item_info = wx.TreeItemData({'attribute':sessions[file_name][ses]})
new_item = self.session_page.InsertItem(new_bench, new_bench, item_name)
self.case_suite_page.SetItemData(new_item, item_info)
self.session_page.Expand(root)
first_child = self.session_page.GetFirstChild(root)
self.session_page.Expand(first_child[0])
def on_LeftDClick_in_Session_tab(self, event):
event.Skip()
ses_name = self.session_page.GetItemText(self.session_page.GetSelection())
self.session_page.GetItemText(self.session_page.GetSelection())
session_attribute = self.session_page.GetItemData(self.session_page.GetSelection())
if session_attribute.Data.has_key('attribute'):
info(session_attribute.Data['attribute'])
counter =1
original_ses_name = ses_name
while ses_name in self.tabs_in_edit_area:
ses_name= '{}_{}'.format(original_ses_name,counter)
counter+=1
if globals().has_key(ses_name):
if not globals().has_key('_{}'.format(ses_name)):
info("variable '{}' is existed in global, change the name to _{}".format(ses_name, ses_name))
ses_name='_{}'.format(ses_name)
self.session_page.SetItemText(self.session_page.GetSelection(), ses_name)
else:
error(("variable '{}' is existed in global, please change the name".format(ses_name)))
return
new_page = SessionTab(self.edit_area, ses_name, session_attribute.Data['attribute'], self.sequence_queue, log_path=self.log_path)
window_id = self.edit_area.AddPage(new_page, ses_name)
index = self.edit_area.GetPageIndex(new_page)
self.edit_area.SetSelection(index)
self.tabs_in_edit_area.append(ses_name)
self.sessions_alive.update({ses_name: new_page.name})
attribute = session_attribute.Data['attribute']
log_path='a_fake_log_path_for_auto_script'
attribute['log_path']=log_path
self.add_new_session_to_globals(new_page, '{}'.format(attribute))
#globals().update({ses_name: new_page.session})
def add_new_session_to_globals(self, new_page, args_str):
if globals().has_key(new_page.name):
if globals()[new_page.name]==None:
pass
else:
error('{} already '.format(new_page.name))
else:
globals().update({new_page.name: new_page})
self.add_cmd_to_sequence_queue('{} = dut.dut(name= "{}", **{})'.format(new_page.name,new_page.name,args_str.replace("'a_fake_log_path_for_auto_script'",'log_path').replace("'not_call_open': True,", "'not_call_open': False,") ), 'dut')
#session = dut(name, **attributes)
def on_command_enter(self, event):
info('called on_command_enter')
cmd = self.m_command_box.GetValue()
self.m_command_box.Clear()
if cmd.strip()=='':
return
module,class_name, function,args = parse_command_line(cmd)
#args[0]=self.sessions_alive['test_ssh'].session
if module !='' or class_name!='' or function!='':
instance_name, function_name, new_argvs, new_kwargs, str_code = call_function_in_module(module,class_name,function,args, globals())
call_function = None
if class_name!="":
call_function = getattr(instance_name, function_name)
#(*new_argvs,**new_kwargs)
else:
call_function = instance_name#(*new_argvs,**new_kwargs)
th =threading.Thread(target=call_function, args=new_argvs, kwargs=new_kwargs)
th.start()
self.add_cmd_to_history(cmd, module, str_code)
else:
error('"{}" is NOT a valid call in format:\n\tmodule.class.function call or \n\tmodule.function'.format(cmd))
def add_src_path_to_python_path(self, path):
paths = path.split(';')
old_path = sys.path
for p in paths:
if p in old_path:
info('path {} already in sys.path'.format(p))
else:
abspath = os.path.abspath(p)
if os.path.exists(abspath):
sys.path.insert(0,abspath)
else:
warn('path {} is not existed, ignored to add it into sys.path'.format(p))
def on_key_down(self, event):
#error(event.KeyCode)
keycode = event.KeyCode
if keycode ==wx.WXK_TAB:
self.m_command_box.AppendText('\t')
self.on_command_enter(event)
elif keycode == wx.PAPER_ENV_INVITE and wx.GetKeyState(wx.WXK_SHIFT):
self.m_command_box.AppendText('?')
self.on_command_enter(event)
else:
event.Skip()
def on_key_up(self, event):
keycode = event.KeyCode
increase =False
if keycode ==wx.WXK_UP:
pass
elif keycode ==wx.WXK_DOWN:
increase =True#
if keycode in [wx.WXK_UP, wx.WXK_DOWN]:
self.m_command_box.Clear()
self.history_cmd_index, new_command = get_next_in_ring_list(self.history_cmd_index,self.history_cmd,increase=increase)
self.m_command_box.AppendText(new_command)
if keycode in [wx.WXK_TAB]:
pass
else:
event.Skip()
def add_cmd_to_history(self, cmd, module_name, str_code):
if self.history_cmd==[]:
self.history_cmd.append(cmd)
elif self.history_cmd[-1]==cmd:
pass
else:
self.history_cmd.append(cmd)
self.history_cmd_index= len(self.history_cmd)
self.add_cmd_to_sequence_queue(str_code,module_name )
#self.sequence_queue.put([cmd, datetime.now()])
def build_function_tab(self):
src_path = os.path.abspath(self.src_path)
if not os.path.exists(src_path):
src_path= os.path.abspath(os.path.curdir)
base_name = os.path.basename(src_path)
root =self.function_page.AddRoot(base_name)
item_info = wx.TreeItemData({'name':src_path})
self.function_page.SetItemData(root, item_info)
modules = get_folder_item(src_path)
if modules is None:
self.function_page.SetItemText(root, self.function_page.GetItemText(root) + ' Not Exists!!!')
self.function_page.SetItemTextColour(root, wx.Colour(255, 0, 0))
return
for module_file in modules:
path_name = '{}'.format(os.path.abspath(self.src_path))
module_name = os.path.basename(module_file).split('.')[0]
new_module = self.function_page.InsertItem(root, root, module_name)
file, path_name, description = imp.find_module(module_name)
lmod = imp.load_module(module_name, file, path_name,description)
for attr in sorted(dir(lmod)):
if attr.startswith('__'):
continue
attr_obj = getattr(lmod, attr)
attr_type = type(attr_obj)
if attr_type == types.FunctionType :
new_item = self.function_page.InsertItem(new_module, new_module, '{}'.format( attr))
item_info = wx.TreeItemData({'name':'{}.{}'.format(module_name,attr)})
self.function_page.SetItemData(new_item, item_info)
elif attr_type== types.TypeType:
class_obj = getattr(lmod, attr)
new_class = self.function_page.InsertItem(new_module, new_module, attr)
item_info = wx.TreeItemData({'name':'{}.{}'.format(module_name,attr)})
self.function_page.SetItemData(new_item, item_info)
for attr_in_class in sorted(dir(class_obj)):
if attr_in_class.startswith('__'):
continue
attr_obj = getattr(class_obj,attr_in_class)
attr_type =type(attr_obj)
if attr_type == types.MethodType :
item_info = wx.TreeItemData({'name':'{}.{}.{}'.format(module_name,attr,attr_in_class)})
new_item = self.function_page.InsertItem(new_class, new_class, attr_in_class)
self.function_page.SetItemData(new_item, item_info)
self.function_page.Expand(root)
first_child = self.function_page.GetFirstChild(root)
self.function_page.Expand(first_child[0])
def on_LeftDClick_in_Function_tab(self,event):
event.Skip()
select_item = self.function_page.GetSelection()
fun_name = self.function_page.GetItemData(select_item)
text_in_tree = self.function_page.GetItemText(select_item)
if fun_name != None and fun_name.Data.has_key('name'):
cmd = fun_name.Data['name']
info('click item in Functions tab: {}'.format(fun_name.Data['name']))
wx.CallAfter(self.m_command_box.Clear)
wx.CallAfter(self.m_command_box.AppendText, cmd+' ')
wx.CallAfter(self.m_command_box.SetFocus)
wx.CallAfter(self.m_command_box.SetInsertionPointEnd)
wx.CallAfter(self.m_command_box.Refresh)
def on_refresh_case_page(self, event):
self.case_suite_page.DeleteAllItems()
self.build_suite_tree()
info('Refresh Case tab done!')
def on_right_down_in_session_tab(self, event):
menu = wx.Menu()
item = wx.MenuItem(menu, wx.NewId(), "Refresh")
#acc = wx.AcceleratorEntry()
#acc.Set(wx.ACCEL_NORMAL, ord('O'), self.popupID1)
#item.SetAccel(acc)
menu.AppendItem(item)
self.Bind(wx.EVT_MENU, self.on_refresh_session_page,item)
self.PopupMenu(menu,event.GetPosition())
def on_refresh_session_page(self, event):
self.session_page.DeleteAllItems()
self.build_session_tab()
info('Refresh Session tab done!')
def on_right_down_in_function_tab(self, event):
menu = wx.Menu()
item = wx.MenuItem(menu, wx.NewId(), "Refresh")
#acc = wx.AcceleratorEntry()
#acc.Set(wx.ACCEL_NORMAL, ord('O'), self.popupID1)
#item.SetAccel(acc)
menu.AppendItem(item)
self.Bind(wx.EVT_MENU, self.on_refresh_function_page,item)
self.PopupMenu(menu,event.GetPosition())
def on_refresh_function_page(self, event):
self.function_page.DeleteAllItems()
self.build_function_tab()
info('Refresh Function tab done!')
def add_cmd_to_sequence_queue(self, cmd, module_name):
if self.import_modules.has_key(module_name):
pass
else:
self.import_modules.update({module_name:module_name})
self.sequence_queue.put([cmd,datetime.now() ])
def generate_code(self, file_name ):
str_code ="""#created by DasH
if __name__ == "__main__":
import sys, traceback
sys.path.insert(0,r'{}')
sys.path.insert(0,r'{}')
import lib.common
log_path= '../log/tmp'
log_path= lib.common.create_case_folder()
try:
""".format(self.src_path,self.lib_path )
sessions =[]
for module in self.import_modules:
str_code+=' import {mod}\n'.format(mod=module)#\n {mod}_instance = {mod}()
no_operation = True
while True:
try:
cmd, timestamp =self.sequence_queue.get(block=False)[:2]
str_code +=' {} #{}\n'.format(cmd, timestamp.isoformat( ' '))
if cmd.find('dut.dut(')!=-1:
sessions.append(cmd.split('=')[0].strip())
no_operation=False
#datetime.now().isoformat()
except Exception as e:
break
close_session=''
str_code+=''' except Exception as e:
print(traceback.format_exc())\n'''
for ses in sessions:
str_code+=''' {}.close_session()\n'''.format(ses)
str_code+=' sys.exit(-1)\n'#, sys.exit(-1)
for ses in sessions:
str_code+=''' {}.close_session()\n'''.format(ses)
info(str_code)
if not no_operation:
with open(file_name, 'a+') as f:
f.write(str_code)
def on_right_down_in_case_tab(self, event):
menu = wx.Menu()
item1 = wx.MenuItem(menu, wx.NewId(), "Run Test")
item2 = wx.MenuItem(menu, wx.NewId(), "Kill Test")
item3 = wx.MenuItem(menu, wx.NewId(), "Refresh")
#acc = wx.AcceleratorEntry()
#acc.Set(wx.ACCEL_NORMAL, ord('O'), self.popupID1)
#item.SetAccel(acc)
menu.AppendItem(item1)
menu.AppendItem(item2)
menu.AppendItem(item3)
self.Bind(wx.EVT_MENU, self.on_run_script,item1)
self.Bind(wx.EVT_MENU, self.on_kill_script,item2)
self.Bind(wx.EVT_MENU, self.on_refresh_case_page,item3)
self.PopupMenu(menu,event.GetPosition())
def on_kill_script(self,event):
hit_item = self.case_suite_page.GetSelection()
item_name = self.case_suite_page.GetItemText(hit_item)
item_data = self.case_suite_page.GetItemData(hit_item).Data
if item_data.has_key('PROCESS'):
p = item_data['PROCESS']
name= item_data['FULL_NAME']
info('script:{}, returncode:{}'.format(name,p.returncode))
if p.returncode is None:
#if p.is_alive():
info('Terminate alive process {}:{}'.format(item_name, p.pid))
result ='KILL'
self.update_case_status(p.pid, result)
self.mail_test_report("DASH TEST REPORT-updating")
p.terminate()
else:
result ='FAIL' if p.returncode else 'PASS'
info('{}:{} completed with returncode {}'.format(item_name, p.pid, result))
self.update_case_status(p.pid, result)
def run_script(self, script_name):
old_script_name = script_name
lex = shlex.shlex(script_name)
lex.quotes = '"'
lex.whitespace_split = True
script_name_and_args = list(lex)
script_args = script_name_and_args[1:]
script_name = script_name_and_args[0]
if script_name.find(os.path.sep)!=-1:
pass
else:
script_name= '{}/{}'.format(self.suite_path,script_name)
from lib.common import create_case_folder
old_sys_argv = sys.argv
sys.argv= [script_name]+script_args
case_log_path = self.log_path #create_case_folder()
sys.argv= old_sys_argv
try:
if os.path.exists('script_runner.exe'):
execute = 'script_runner.exe'
cmd = [execute,script_name ]+script_args + ['-l','{}'.format(case_log_path)]
#p=subprocess.Popen(cmd, creationflags = subprocess.CREATE_NEW_CONSOLE)
else:
cmd = [sys.executable,'./script_runner.py', script_name ]+script_args+ ['-l','{}'.format(case_log_path)]
p=subprocess.Popen(cmd, creationflags = subprocess.CREATE_NEW_CONSOLE)#, stdin=pipe_input, stdout=pipe_output,stderr=pipe_output)
self.add_new_case_to_report(p.pid, old_script_name, p, case_log_path)
except:
error(traceback.format_exc())
return p, case_log_path
def on_run_script(self,event):
hit_item = self.case_suite_page.GetSelection()
item_name = self.case_suite_page.GetItemText(hit_item)
item_data = self.case_suite_page.GetItemData(hit_item).Data
script_name = self.case_suite_page.GetItemData(hit_item).Data['path_name']
if script_name.lower().split('.')[-1] in ['txt','csv']:#test suite file, not a single script
self.run_a_test_suite(script_name)
else:#a single test case
self.on_kill_script(event)
try:
p, case_log_path = self.run_script('{} {}'.format(script_name, item_name))
self.case_suite_page.GetItemData(hit_item).Data['PROCESS']=p
self.case_suite_page.GetItemData(hit_item).Data['FULL_NAME']= item_name
info('start process {} :{}'.format(item_name, p.pid))
#p.join() # this blocks until the process terminates
time.sleep(1)
except Exception as e :
error(traceback.format_exc())
#p = Process(target=run_script, args=[script_name, script_and_args])
#p.start()
def check_case_status(self):
self.check_case_running_status_lock.acquire()
changed=False
running_case = 0
for pid in self.dict_test_report.keys():
case_name, start_time, end_time, duration, return_code ,proc, log_path= self.dict_test_report[pid]
if return_code is None:
if proc.poll() is None:
running_case+=1
debug('RUNNING', start_time, end_time, duration, return_code ,proc, log_path)
else:
changed=True
return_code = 'FAIL' if proc.returncode else 'PASS'
self.update_case_status(pid,return_code)
if running_case:
pass
elif not self.case_queue.empty():#self.case_queue.qsize():
case_name_with_args = self.case_queue.get()
p, case_log_path = self.run_script(case_name_with_args)
self.check_case_running_status_lock.release()
if changed:
#test_report = self.generate_report(filename='{}/dash_report.txt'.format(self.log_path))
self.mail_test_report('DasH Test Report-updating')
return changed
def polling_running_cases(self):
while True:
time.sleep(10)
try:
if not self.alive:
break
except:
break
#self.check_case_running_status_lock.acquire()
self.check_case_status()
#self.check_case_running_status_lock.release()
def add_new_case_to_report(self, pid, case_name, proc, log_path):
start_time=datetime.now()
duration = 0
end_time = None
return_code = None
#self.check_case_running_status_lock.acquire()
if pid in self.dict_test_report:
self.dict_test_report[pid].update([case_name, start_time, end_time, duration, return_code, proc, log_path])
else:
self.dict_test_report[pid]= [case_name, start_time, end_time, duration, return_code, proc, log_path ]
#self.check_case_running_status_lock.release()
def update_case_status(self, pid,return_code=None):
now = datetime.now()
case_name, start_time, end_time, duration, tmp_return_code ,proc,log_path= self.dict_test_report[pid]
if tmp_return_code is None:
duration = (now-start_time).total_seconds()
self.dict_test_report[pid]=[case_name,start_time, end_time, duration, return_code, proc, log_path]
else:
pass# don't update one case result twice
def mail_test_report(self, subject="DASH TEST REPORT-updating"):
try:
#self.check_case_status()
test_report = self.generate_report(filename='{}/dash_report.txt'.format(self.log_path))
#TO, SUBJECT, TEXT, SERVER, FROM
send_mail_smtp_without_login(self.mail_to_list, subject,test_report,self.mail_server,self.mail_from)
except Exception as e:
error(traceback.format_exc())
def on_mail_test_report(self,event):
self.mail_test_report('DasH Test Report-updating')
#p.terminate()
def on_handle_request_via_mail(self):
import imaplib
from email.parser import Parser
def process_multipart_message(message):
if isinstance(message, basestring) or isinstance(message , list):
return message
rtn = ''
try:
if message.is_multipart():
for m in message.get_payload():
rtn += process_multipart_message(m)
else:
rtn += message.get_payload()
except Exception as e:
pass
return rtn
url, user, password = self.mail_read_url,self.mail_user, self.mail_password
conn = imaplib.IMAP4_SSL(url,993)
conn.login(user,password)
conn.select('INBOX')#, readonly=True)
try:
authorized_mail_address = self.mail_to_list.replace(',',';').split(';')
except Exception as e:
return
for mail_address in authorized_mail_address:
results,data = conn.search(None,'(UNSEEN)', '(FROM "{}")'.format(mail_address)) # #'ALL')
msg_ids = data[0]
msg_id_list = msg_ids.split()
MAX_UNREAD_MAIL = 50
for unread_mail_id in msg_id_list[::-1][:MAX_UNREAD_MAIL]:
result,data = conn.fetch(unread_mail_id,'(BODY.PEEK[HEADER])')#"(RFC822)")#
raw_email = data[0][1]
p = Parser()
msg = p.parsestr(raw_email)
#msg = process_multipart_message(msg )
from1 = msg.get('From')
sub = '{}'.format(msg.get('Subject'))
sub = sub.strip().lower()
support_list='''
###############################
mail subject below is supported:
dash-request-case-queue : request the cases in queue which to be executed
dash-request-case : request cases which are under suite_path
dash-request-report : request a test report by now
dash-request-kill-running : to kill all running test cases
dash-request-clear-queue : to clear/remove all cases which are in case queue
dash-request-run : to run script(s), each line is a script with arguments if it has
--------------------------------
***non-case-sensitive***
###############################
'''
handled =False
if sub in ['dash']:
send_mail_smtp_without_login(self.mail_to_list, 'DONE-DasH Support List',support_list,self.mail_server,self.mail_from)
handled = True
#conn.uid('STORE', unread_mail_id, '+FLAGS', '\SEEN')
elif sub in ['dash-request-case-queue']:
case_in_queue =self.get_case_queue(None)
send_mail_smtp_without_login(self.mail_to_list, 'DONE-DasH:Case In Queue',case_in_queue+support_list,self.mail_server,self.mail_from)
#conn.uid('STORE', unread_mail_id, '+FLAGS', '\SEEN')
handled = True
elif sub in ['dash-request-case']:
cases_string = '\n\t'.join(self.case_list)
send_mail_smtp_without_login(self.mail_to_list, 'DONE-DasH:Case List',cases_string+support_list,self.mail_server,self.mail_from)
handled = True
#conn.uid('STORE', unread_mail_id, '+FLAGS', '\SEEN')
elif sub in ['dash-request-report']:
self.mail_test_report('DasH Test Report-requested')
#conn.uid('STORE', unread_mail_id, '+FLAGS', '\SEEN')
handled = True
elif sub in ['dash-request-kill-running']:
killed= self.on_kill_running_case()
send_mail_smtp_without_login(self.mail_to_list, 'DONE-[DasH]:Killed Running Case(s)',killed+support_list,self.mail_server,self.mail_from)
handled = True
#conn.uid('STORE', unread_mail_id, '+FLAGS', '\SEEN')
elif sub in ['dash-request-clear-queue']:
case_in_queue = self.on_clear_case_queue()
send_mail_smtp_without_login(self.mail_to_list, 'DONE-DasH:Clear Case Queue',case_in_queue+support_list,self.mail_server,self.mail_from)
handled = True
#conn.uid('STORE', unread_mail_id, '+FLAGS', '\SEEN')
elif sub in ['dash-request-run']:
#if from1 in ['dash@calix.com', 'yu_silence@163.com',self.mail_to_list]:
conn.uid('STORE', unread_mail_id, '+FLAGS', r'(\SEEN)')
handled = True
#conn.uid('STORE', '-FLAGS', '(\Seen)')
payload = msg.get_payload()
payload = process_multipart_message(payload )
from lib.html2text import html2text
txt = html2text(payload)
cases = txt.replace('\r\n','\n').split('\n')
for line in cases:
line = line.strip()
if line.strip().startswith('#') or len(line)==0:
pass
else:
type_case, case_name, args = self.check_case_type(line)
if type_case in ['txt','csv']:
self.run_a_test_suite(line)
else:
self.case_queue.put(line)
info('adding case to queue: {}'.format(line))
result,data = conn.fetch(unread_mail_id,'(RFC822)')#"(RFC822)")#
else:
conn.uid('STORE', unread_mail_id, '-FLAGS', r"(\SEEN)")
#fixed : 2017-09-25 failed to set unmatched mail to unread, to fetch it again with RFC822
if handled:
result,data = conn.fetch(unread_mail_id,'(RFC822)')#"(RFC822)")#
def check_case_type(self, str_line):
lex = shlex.shlex(str_line)
lex.quotes = '"'
lex.whitespace_split = True
script_name_and_args = list(lex)
script_name = script_name_and_args[0]
return script_name.lower().split('.')[-1],script_name_and_args[0] ,script_name_and_args[1:]
def polling_request_via_mail(self):
while True:
time.sleep(10)
try:
if not self.alive:
break
except:
break
try:
self.on_handle_request_via_mail()
except Exception as e:
error(traceback.format_exc())
pass
def get_case_queue(self, item=None):
case_in_queue = list(self.case_queue.queue)
number_in_queue= len(case_in_queue)
if number_in_queue:
str_case_in_queue='\ntotal {} case(s) in Queue\n'.format(number_in_queue)+'\n'.join('{}'.format(x) for x in case_in_queue)
else:
str_case_in_queue='\nNo Case in Queue'
info('Case(s) in Queue', str_case_in_queue)
return str_case_in_queue
def on_clear_case_queue(self, event=None):
case_in_queue = self.get_case_queue(None)
self.case_queue.queue.clear()
self.get_case_queue(None)
return case_in_queue
def on_kill_running_case(self,event=None):
killed_case= ''
for case in self.dict_test_report:
case_name,start_time, end_time, duration, return_code, proc, log_path = self.dict_test_report[:7]
if return_code is None:
if proc.poll() is None:
killed_case+='{}:{}\n'.format(case_name, proc.pid)
info('Terminate alive process {}:{}'.format(case_name, proc.pid))
result ='KILL'
self.update_case_status(proc.pid, result)
proc.terminate()
info('Killed All Running cases', killed_case)
return killed_case
def run_a_test_suite(self, csv_file_name, clear_queue=False, kill_running =False):
try:
case_type, suite_file_name, args =self.check_case_type(csv_file_name)
if clear_queue:
self.on_clear_case_queue()
if kill_running:
self.on_kill_running_case()
import csv
if suite_file_name.find(os.path.sep)!=-1:
pass
else:
suite_file_name= '{}/{}'.format(self.suite_path,suite_file_name)
with open(suite_file_name) as bench:
reader = csv.reader(bench,delimiter=',')
for row in reader:
if len(row)<1:
continue
else:
name = row[0]
args.insert(0,0)
for index in range(1,len(args)):
name =name.replace('{{index}}'.format(index =index), '{}'.format(args[index]))
self.case_queue.put(name)
info('adding case to queue: {}'.format(name))
except Exception as e:
error(traceback.format_exc())
#done: 2017-08-22, 2017-08-19 save main log window to a file
#todo: 2017-08-19 add timestamps to log message
#done: 2017-08-22, 2017-08-19 mail to someone
#todo: 2017-08-19 run a script in DasH
#todo: 2017-08-19 generate test report
#todo: 2017-08-19 publish all test cases in a web page
#todo: 2017-08-19 trigger a test remote via web page
#todo: 2017-08-19 re-run failed cases
#todo: 2017-08-19 build executable packege for DasH
#todo: 2017-08-19 a popup window to get email address/password/mail_server...
#todo: 2017-08-22 output in m_log window has a lot of empty line, need remove them
#todo: 2017-08-23 in common.call_function_in_module, should end all threads which are started in previous instance
#todo: 2017-08-23 add tips for all tree item in teh left
add duration in form of DD:HH:MM:SS in test report
#!/usr/bin/python
'''The MIT License (MIT)
Copyright (c) 2017 Yu Xiong Wei(try.dash.now@gmail.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
__author__ = 'sean yu (Yu, Xiongwei)'
__doc__ = '''
it's GUI of DasH aka Do as Human
created 2017-05-06 by Sean Yu
'''
from datetime import datetime
import wx.grid as gridlib
import traceback
import wx
from gui.MainFrame import MainFrame
import os
from lib.common import load_bench, caller_stack_info,info, get_next_in_ring_list,get_folder_item, info,debug, warn, error, parse_command_line, call_function_in_module
import re
import time
import threading
import ConfigParser
import sys
import inspect
import Queue
from SessionTab import SessionTab
import imp
import types
from lib.common import send_mail_smtp_without_login
from lib.common import run_script
from multiprocessing import Process
import subprocess
import shlex
#from dut import dut
class RedirectText(object):
font_point_size = 10
old_stdout = None
old_stderr = None
write_lock = None
log_file = None
def __init__(self,aWxTextCtrl, log_path=None):
self.old_stderr , self.old_stdout=sys.stderr , sys.stdout
self.out=aWxTextCtrl
self.font_point_size = self.out.GetFont().PointSize
self.write_lock = threading.Lock()
if log_path:
name = '{}/dash.log'.format(log_path)
self.log_file = open(name, 'w+')
self.fileno = self.log_file.fileno
def write(self,string):
self.write_lock.acquire()
self.old_stdout.write(string)
#string = string.replace('\\033\[[0-9\;]+m', '')
#self.old_stderr.write(string)
if re.search('error|\s+err\s+|fail|wrong',string.lower()):
self.out.SetDefaultStyle(wx.TextAttr(wx.RED, wx.YELLOW, font =wx.Font(self.font_point_size+2, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.BOLD, faceName = 'Consolas')))#wx.CallAfter(s
else:
self.out.SetDefaultStyle(wx.TextAttr(wx.GREEN, wx.BLACK,font =wx.Font(self.font_point_size, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.NORMAL, faceName = 'Consolas')))#wx.CallAfter(
wx.CallAfter(self.out.AppendText, string)
if self.log_file:
self.log_file.write(string)
self.log_file.flush()
self.write_lock.release()
def close(self):
if self.log_file:
self.log_file.flush()
self.log_file.close()
class process_info(object):
process = None
pid=None
full_name=None
returncode = None
def __init__(self,name, process):
self.process= process
self.pid = process.pid
self.full_name =name
self.returncode = process.returncode
@property
def returncode(self):
return self.process.returncode
class FileEditor(wx.Panel):
editor =None
font_size=10
parent=None
type = None
sessions_node =None
function_node =None
case_suite_node =None
full_file_name = None
file_instance = None
def on_close(self):
if self.full_file_name:
data = self.editor.GetValue()
with open(self.full_file_name, 'w') as f:
f.write(data)
f.flush()
#done 2017-9-12: handle close tab in edit_area
def __init__(self, parent, title='pageOne', type ='grid', file_name = None):
wx.Panel.__init__(self, parent)
self.parent = parent
self.type = type
self.full_file_name = file_name
#self.editor = wx.TextCtrl(self, style = wx.TE_MULTILINE|wx.TE_RICH2|wx.EXPAND|wx.ALL, size=(-1,-1))
if type in ['text']:
self.editor = wx.TextCtrl( self, -1, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, wx.TE_AUTO_URL|wx.VSCROLL|wx.TE_RICH|wx.TE_MULTILINE&(~wx.TE_PROCESS_ENTER))
#wx.richtext.RichTextCtrl( self, -1, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0|wx.VSCROLL|wx.HSCROLL|wx.NO_BORDER|wx.WANTS_CHARS )
with open(self.full_file_name, 'r') as f:
for line in f.readlines():
self.editor.AppendText(line)
else:
self.editor= gridlib.Grid(self)
self.editor.CreateGrid(50, 5)
col = self.editor.GetNumberCols()
row = self.editor.GetNumberRows()
function_color ='black'
arg_color = 'blue'
for c in range(0, col):
if c < 1 :
self.editor.SetColLabelValue(c, 'Function Name')
else:
self.editor.SetColLabelValue(c, 'arg# {}'.format(c))
for r in range (0, row):
self.editor.SetCellTextColour(r,c,function_color if c <1 else arg_color)
for r in range (0, row):
self.editor.SetCellFont(r, 0, wx.Font(self.font_size,wx.SWISS, wx.NORMAL, wx.BOLD ))
self.editor.Bind( wx.EVT_MOUSEWHEEL, self.editor_OnMouseWheel )
sizer = wx.BoxSizer()
sizer.Add(self.editor, 1, wx.EXPAND)
self.SetSizer(sizer)
def editor_OnMouseWheel(self,event):
min_font_size = 5
interval_step = 2
if event.ControlDown():
pass
else:
return
if event.GetWheelRotation() < 0:
if self.font_size>min_font_size:
self.font_size-=interval_step
else:
self.font_size+=1
if self.type in ['text']:
f =self.editor.GetFont()
f.PointSize= self.font_size
self.editor.SetFont(f)
else:
col = self.editor.GetNumberCols()
row = self.editor.GetNumberRows()
for c in range(0, col):
for r in range (0, row):
f = self.editor.GetCellFont(r, c)
f.PointSize = self.font_size
self.editor.SetCellFont(r, c, f)
self.Refresh()
#wx.StaticText(self, -1, "THIS IS A PAGE OBJECT", (20,20))
#DONE: DasHFrame should handle CLOSE event when closing the app, call on_close_tab_in_edit_area for all opened sessions and files
class DasHFrame(MainFrame):#wx.Frame
ini_setting = None
#m_left_navigator =None
redir = None
edit_area=None
tabs_in_edit_area = None
src_path = None
sessions_alive=None
sequence_queue=None
history_cmd = []
history_cmd_index = -1
import_modules={'TC':'TC'}
lib_path ='./lib'
log_path = '../log'
session_path = './sessions'
suite_path = '../test_suite'
dict_test_report= None
alive =True
mail_server=None
mail_to_list=None
mail_from=None
mail_read_url= 'outlook.office365.com'
mail_password = None
mail_usre =None
case_queue =None
check_case_running_status_lock = None
case_list=None
def __init__(self,parent=None, ini_file = './gDasH.ini'):
#wx.Frame.__init__(self, None, title="DasH")
self.case_list= []
self.case_queue = Queue.Queue()
self.dict_test_report={}
self.check_case_running_status_lock = threading.Lock()
self.tabs_in_edit_area=[]
self.sessions_alive={}
MainFrame.__init__(self, parent=parent)
self.sequence_queue= Queue.Queue()
#self.sequence_queue.put()
self.ini_setting = ConfigParser.ConfigParser()
self.ini_setting.read(ini_file)
self.src_path = os.path.abspath(self.ini_setting.get('dash','src_path'))
self.lib_path = os.path.abspath(self.ini_setting.get('dash','lib_path'))
self.log_path = os.path.abspath(self.ini_setting.get('dash','log_path'))
self.suite_path = os.path.abspath(self.ini_setting.get('dash', 'test_suite_path'))
self.mail_server = self.ini_setting.get('dash', 'mail_server')
self.mail_from =self.ini_setting.get('dash', 'mail_from')
self.mail_to_list =self.ini_setting.get('dash', 'mail_to_list')
self.mail_read_url =self.ini_setting.get('dash', 'mail_read_url')
self.mail_user = self.ini_setting.get('dash','mail_user')
self.mail_password =self.ini_setting.get('dash', 'mail_password')
from lib.common import create_case_folder, create_dir
sys.argv.append('-l')
sys.argv.append('{}'.format(self.log_path))
self.log_path = create_case_folder(self.log_path)
self.suite_path = create_dir(self.suite_path)
self.lib_path = create_dir(self.lib_path)
self.src_path = create_dir(self.src_path)
if not os.path.exists(self.log_path):
os.mkdir(self.log_path)
self.add_src_path_to_python_path(self.src_path)
self.redir = RedirectText(self.m_log, self.log_path)
sys.stdout = self.redir
sys.stderr = self.redir
self.m_log.SetBackgroundColour('Black')
self.m_log.SetDefaultStyle(wx.TextAttr(wx.GREEN, wx.BLACK, font =wx.Font(9, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.BOLD, faceName = 'Consolas')))
#self.m_editor.WriteText('welcome to dash world')
self.m_log.WriteText('Welcome to DasH!\n')
self.m_command_box.WriteText('functions.static_function_in_module test_ssh 2')
fileMenu = wx.Menu()
open_test_suite = fileMenu.Append(wx.NewId(), "Open TestSuite", "Open a Test Suite")
open_test_case = fileMenu.Append(wx.NewId(), "Open TestCase", "Open a Test Case")
mail_test_report = fileMenu.Append(wx.NewId(), "Mail Test Report", "Mail Test Report")
get_case_queue = fileMenu.Append(wx.NewId(), "Get Case Queue", "Get Case Queue") #done
clear_case_queue = fileMenu.Append(wx.NewId(), "Clear Case Queue", "Clear Case Queue")
kill_running_case = fileMenu.Append(wx.NewId(), "Kill Running Case(s)", "Kill Running Case(s)")
self.m_menubar_main.Append(fileMenu, "&Open")
self.Bind(wx.EVT_MENU,self.on_mail_test_report ,mail_test_report)
self.Bind(wx.EVT_MENU,self.get_case_queue ,get_case_queue)
self.Bind(wx.EVT_MENU,self.on_clear_case_queue ,clear_case_queue)
self.Bind(wx.EVT_MENU,self.on_kill_running_case ,kill_running_case)
self.Bind(wx.EVT_CLOSE, self.on_close)
self.m_command_box.Bind(wx.EVT_TEXT_ENTER, self.on_command_enter)
self.m_command_box.Bind(wx.EVT_KEY_UP, self.on_key_up)
self.m_command_box.Bind(wx.EVT_KEY_DOWN, self.on_key_down)
from wx.aui import AuiNotebook
bookStyle = wx.aui.AUI_NB_DEFAULT_STYLE &(~wx.aui.AUI_NB_CLOSE_ON_ACTIVE_TAB)
self.navigator = AuiNotebook(self.m_left_navigator, style= bookStyle )
self.case_suite_page = wx.TreeCtrl(self.navigator, wx.ID_ANY, wx.DefaultPosition, wx.Size(-1, -1), wx.TR_DEFAULT_STYLE | wx.TR_EDIT_LABELS | wx.TR_EXTENDED | wx.TR_HAS_BUTTONS | wx.TR_HAS_VARIABLE_ROW_HEIGHT | wx.HSCROLL | wx.TAB_TRAVERSAL | wx.VSCROLL | wx.WANTS_CHARS)
self.function_page = wx.TreeCtrl(self.navigator, wx.ID_ANY, wx.DefaultPosition, wx.Size(-1, -1), wx.TR_DEFAULT_STYLE | wx.TR_EDIT_LABELS | wx.TR_EXTENDED | wx.TR_HAS_BUTTONS | wx.TR_HAS_VARIABLE_ROW_HEIGHT | wx.HSCROLL | wx.TAB_TRAVERSAL | wx.VSCROLL | wx.WANTS_CHARS)
self.session_page = wx.TreeCtrl(self.navigator, wx.ID_ANY, wx.DefaultPosition, wx.Size(-1, -1), wx.TR_DEFAULT_STYLE | wx.TR_EDIT_LABELS | wx.TR_EXTENDED | wx.TR_HAS_BUTTONS | wx.TR_HAS_VARIABLE_ROW_HEIGHT | wx.HSCROLL | wx.TAB_TRAVERSAL | wx.VSCROLL | wx.WANTS_CHARS)
self.navigator.AddPage(self.session_page, 'SESSION')
self.navigator.AddPage(self.function_page, 'FUNCTION')
self.navigator.AddPage(self.case_suite_page, 'CASE')
self.edit_area = AuiNotebook(self.m_file_editor, style = wx.aui.AUI_NB_DEFAULT_STYLE)
if False:
new_page = FileEditor(self.edit_area, 'a', type= type)
self.edit_area.AddPage(new_page, 'test')
self.tabs_in_edit_area.append(('test'))
self.edit_area.Enable(True)
right_sizer = wx.BoxSizer(wx.VERTICAL)
#right_sizer =wx.GridSizer( 3, 1, 0, 0 )
left_sizer = wx.BoxSizer(wx.HORIZONTAL)
left_sizer.Add(self.m_left_navigator, 1, wx.EXPAND)
self.case_suite_page.Bind(wx.EVT_LEFT_DCLICK, self.m_case_treeOnLeftDClick)
#self.case_suite_page.Bind(wx.EVT_MOUSEWHEEL, self.case_tree_OnMouseWheel)
self.case_suite_page.Bind(wx.EVT_TREE_ITEM_EXPANDING, self.m_case_treeOnTreeItemExpanding)
self.session_page.Bind(wx.EVT_LEFT_DCLICK, self.on_LeftDClick_in_Session_tab)
self.function_page.Bind(wx.EVT_LEFT_DCLICK, self.on_LeftDClick_in_Function_tab)
self.function_page.Bind(wx.EVT_RIGHT_DOWN, self.on_right_down_in_function_tab)
self.case_suite_page.Bind(wx.EVT_RIGHT_DOWN, self.on_right_down_in_case_tab)
self.session_page.Bind(wx.EVT_RIGHT_DOWN, self.on_right_down_in_session_tab)
main_sizer = wx.BoxSizer(wx.HORIZONTAL)
#main_sizer = wx.GridSizer( 1, 2, 0, 0 )
nav_sizer = wx.BoxSizer()
nav_sizer.Add(self.navigator, 1, wx.EXPAND, 1)
self.m_left_navigator.SetSizer(nav_sizer)
#main_sizer = wx.BoxSizer(wx.HORIZONTAL)
#main_sizer.Add(left_sizer, 3, wx.EXPAND)
main_sizer.Add(left_sizer, 2, wx.EXPAND)
edit_sizer = wx.BoxSizer()
edit_sizer.Add(self.edit_area, 1, wx.EXPAND, 1)
self.m_file_editor.SetSizer(edit_sizer)
right_sizer.Add(self.m_file_editor, 6, wx.ALL|wx.EXPAND, 1)
right_sizer.Add(self.m_log, 3, wx.ALL|wx.EXPAND, 2)
right_sizer.Add(self.m_command_box, 0, wx.ALL|wx.EXPAND, 3)
main_sizer.Add(right_sizer, 8, wx.EXPAND)
self.SetSizer(main_sizer)
self.build_session_tab()
self.build_suite_tree()
self.build_function_tab()
ico = wx.Icon('./gui/dash.bmp', wx.BITMAP_TYPE_ICO)
self.SetIcon(ico)
th= threading.Thread(target=self.polling_running_cases)
th.start()
th = threading.Thread(target=self.polling_request_via_mail)
th.start()
def on_close(self, event):
self.alive =False
time.sleep(0.01)
self.generate_code(file_name='{}/test_script.py'.format(self.suite_path))
if len(self.dict_test_report):
self.mail_test_report("DASH TEST REPORT")
for index in range(0,self.edit_area.GetPageCount()): #len(self.tabs_in_edit_area)):
closing_page = self.edit_area.GetPage(index)
if isinstance(closing_page, (SessionTab)):
if closing_page:
name = closing_page.name
self.tabs_in_edit_area.pop(self.tabs_in_edit_area.index(name))
closing_page.on_close()
self.redir.close()
sys.stderr =self.redir.old_stderr
sys.stdout = self.redir.old_stdout
event.Skip()
def generate_report(self, filename):
def GetTime(duration):
from datetime import timedelta
sec = timedelta(seconds=int(duration))
d = datetime(1,1,1) + sec
print("DAYS:HOURS:MIN:SEC")
return "%d:%d:%d:%d" % (d.day-1, d.hour, d.minute, d.second)
report = '''Test Report
RESULT,\tStart_Time,\tEnd_Time,\tPID,\tDuration(s),\tDuration(D:H:M:S)\tCase_Name,\tLog\n'''
if len(self.dict_test_report):
with open(filename, 'a+') as f:
f.write(report)
for pi in sorted(self.dict_test_report, key = lambda x: self.dict_test_report[x][1]):
case_name, start_time, end_time, duration, return_code ,proc, log_path =self.dict_test_report[pi][:7]
if return_code is None:
result = 'IP'
else:
result = return_code # 'FAIL' if return_code else 'PASS'
record = '\t'.join(['{},\t'.format(x) for x in [result,start_time,end_time,pi,duration,GetTime(duration),case_name,'<{}>'.format(log_path) ]])
report+=record+'\n'
f.write(record+'\n')
return report
def on_close_tab_in_edit_area(self, event):
#self.edit_area.GetPage(self.edit_area.GetSelection()).on_close()
closing_page = self.edit_area.GetPage(self.edit_area.GetSelection())
closing_page.on_close()
if isinstance(closing_page, (SessionTab)):
ses_name = closing_page.name
self.tabs_in_edit_area.pop(self.tabs_in_edit_area.index(ses_name))
if globals().has_key(ses_name):
#g = dict(globals())
#globals()[ses_name]=None
#del g[ses_name]
globals()[ses_name].close_session()
del globals()[ses_name]
def add_item_to_subfolder_in_tree(self,node):
subfolder_path_name = self.case_suite_page.GetPyData(node)['path_name']
items = get_folder_item(subfolder_path_name)
if items is None:
self.case_suite_page.SetItemText(node, self.m_case_tree.GetItemText(node) + ' Not Exists!!!')
self.case_suite_page.SetItemTextColour(node, wx.Colour(255, 0, 0))
return
for i in items:
path_name = '{}/{}'.format(subfolder_path_name,i)
base_name = os.path.basename(i)
item_info = wx.TreeItemData({'path_name':path_name})
self.case_list.append(path_name)
new_item = self.case_suite_page.InsertItem(node, node, base_name)
self.case_suite_page.SetItemData(new_item, item_info)
if os.path.isdir(path_name):
self.case_suite_page.SetItemHasChildren(new_item)
#self.m_case_tree.ItemHasChildren()
#self.m_case_tree.InsertItem(new_item,new_item,'')
def build_suite_tree(self):
suite_path = self.suite_path #os.path.abspath(self.ini_setting.get('dash','test_suite_path'))
if not os.path.exists(suite_path):
suite_path= os.path.abspath(os.path.curdir)
base_name = os.path.basename(suite_path)
root =self.case_suite_page.AddRoot(base_name)
item_info = wx.TreeItemData({'path_name':suite_path})
self.case_suite_page.SetItemData(root, item_info)
self.add_item_to_subfolder_in_tree(root)
self.case_suite_page.Expand(root)
# def OnSelChanged(self, event):
# item = event.GetItem()
# self.display.SetLabel(self.tree.GetItemText(item))
#def case_tree_OnMouseWheel(self, event):
def m_case_treeOnLeftDClick(self, event):
ht_item =self.case_suite_page.GetSelection()
#ht_item = self.HitTest(event.GetPosition())
item_name = self.case_suite_page.GetItemText(ht_item)
item_data = self.case_suite_page.GetItemData(ht_item)
if self.case_suite_page.ItemHasChildren(ht_item):
if self.case_suite_page.IsExpanded(ht_item):
self.case_suite_page.Collapse(ht_item)
else:
self.case_suite_page.ExpandAllChildren(ht_item)
else:
if item_name.lower() in ['.csv', '.xlsx','.xls']:
type = 'grid'
file_name = item_data.Data['path_name']
else:
type = 'text'
file_name = item_data.Data['path_name']
new_page = FileEditor(self.edit_area, 'a', type= type,file_name=file_name)
self.edit_area.AddPage(new_page, item_name)
index = self.edit_area.GetPageIndex(new_page)
self.edit_area.SetSelection(index)
def m_case_treeOnTreeItemExpanding(self,event):
ht_item =self.case_suite_page.GetSelection()
try:
item_info = self.case_suite_page.GetPyData(ht_item)
if 0== self.case_suite_page.GetChildrenCount(ht_item):
if os.path.isdir(item_info['path_name']):
self.add_item_to_subfolder_in_tree(ht_item)
except Exception as e:
pass
def build_session_tab(self):
if self.session_page.RootItem:
self.session_page.DeleteAllItems()
session_path = os.path.abspath(self.ini_setting.get('dash','session_path'))
self.session_path= session_path
if not os.path.exists(session_path):
session_path= os.path.abspath(os.path.curdir)
base_name = os.path.basename(session_path)
sessions = {}
root =self.session_page.AddRoot(base_name)
item_info = wx.TreeItemData({'path_name':session_path})
self.session_page.SetItemData(root, item_info)
self.session_page.Expand(root)
item_list = get_folder_item(session_path)
session_files=[]
for item in item_list:
if os.path.isfile('{}/{}'.format(session_path,item)) and '{}'.format(item).lower().strip().endswith('.csv'):
session_files.append(item)
for csv_file in sorted(session_files):
try:
ses_in_bench = load_bench(os.path.abspath('{}/{}'.format(session_path, csv_file)))
for bench in ses_in_bench:
for ses in ses_in_bench[bench]:
if ses_in_bench[bench][ses].has_key('login_step') and ses_in_bench[bench][ses]['login_step'].strip() not in ['', None]:
ses_in_bench[bench][ses].update(
{'login_step': os.path.abspath('{}/{}'.format(session_path, ses_in_bench[bench][ses]['login_step'].strip()))}
)
sessions.update(ses_in_bench)
except Exception as e:
pass
root =self.session_page.GetRootItem()
for file_name in sorted(sessions.keys()):
item_name = os.path.basename(file_name)
item_info = wx.TreeItemData({'file_name':file_name})
new_bench = self.session_page.InsertItem(root, root, item_name)
self.case_suite_page.SetItemData(new_bench, item_info)
for ses in sorted(sessions[file_name]):
item_name = ses
item_info = wx.TreeItemData({'attribute':sessions[file_name][ses]})
new_item = self.session_page.InsertItem(new_bench, new_bench, item_name)
self.case_suite_page.SetItemData(new_item, item_info)
self.session_page.Expand(root)
first_child = self.session_page.GetFirstChild(root)
self.session_page.Expand(first_child[0])
def on_LeftDClick_in_Session_tab(self, event):
event.Skip()
ses_name = self.session_page.GetItemText(self.session_page.GetSelection())
self.session_page.GetItemText(self.session_page.GetSelection())
session_attribute = self.session_page.GetItemData(self.session_page.GetSelection())
if session_attribute.Data.has_key('attribute'):
info(session_attribute.Data['attribute'])
counter =1
original_ses_name = ses_name
while ses_name in self.tabs_in_edit_area:
ses_name= '{}_{}'.format(original_ses_name,counter)
counter+=1
if globals().has_key(ses_name):
if not globals().has_key('_{}'.format(ses_name)):
info("variable '{}' is existed in global, change the name to _{}".format(ses_name, ses_name))
ses_name='_{}'.format(ses_name)
self.session_page.SetItemText(self.session_page.GetSelection(), ses_name)
else:
error(("variable '{}' is existed in global, please change the name".format(ses_name)))
return
new_page = SessionTab(self.edit_area, ses_name, session_attribute.Data['attribute'], self.sequence_queue, log_path=self.log_path)
window_id = self.edit_area.AddPage(new_page, ses_name)
index = self.edit_area.GetPageIndex(new_page)
self.edit_area.SetSelection(index)
self.tabs_in_edit_area.append(ses_name)
self.sessions_alive.update({ses_name: new_page.name})
attribute = session_attribute.Data['attribute']
log_path='a_fake_log_path_for_auto_script'
attribute['log_path']=log_path
self.add_new_session_to_globals(new_page, '{}'.format(attribute))
#globals().update({ses_name: new_page.session})
def add_new_session_to_globals(self, new_page, args_str):
if globals().has_key(new_page.name):
if globals()[new_page.name]==None:
pass
else:
error('{} already '.format(new_page.name))
else:
globals().update({new_page.name: new_page})
self.add_cmd_to_sequence_queue('{} = dut.dut(name= "{}", **{})'.format(new_page.name,new_page.name,args_str.replace("'a_fake_log_path_for_auto_script'",'log_path').replace("'not_call_open': True,", "'not_call_open': False,") ), 'dut')
#session = dut(name, **attributes)
def on_command_enter(self, event):
info('called on_command_enter')
cmd = self.m_command_box.GetValue()
self.m_command_box.Clear()
if cmd.strip()=='':
return
module,class_name, function,args = parse_command_line(cmd)
#args[0]=self.sessions_alive['test_ssh'].session
if module !='' or class_name!='' or function!='':
instance_name, function_name, new_argvs, new_kwargs, str_code = call_function_in_module(module,class_name,function,args, globals())
call_function = None
if class_name!="":
call_function = getattr(instance_name, function_name)
#(*new_argvs,**new_kwargs)
else:
call_function = instance_name#(*new_argvs,**new_kwargs)
th =threading.Thread(target=call_function, args=new_argvs, kwargs=new_kwargs)
th.start()
self.add_cmd_to_history(cmd, module, str_code)
else:
error('"{}" is NOT a valid call in format:\n\tmodule.class.function call or \n\tmodule.function'.format(cmd))
def add_src_path_to_python_path(self, path):
paths = path.split(';')
old_path = sys.path
for p in paths:
if p in old_path:
info('path {} already in sys.path'.format(p))
else:
abspath = os.path.abspath(p)
if os.path.exists(abspath):
sys.path.insert(0,abspath)
else:
warn('path {} is not existed, ignored to add it into sys.path'.format(p))
def on_key_down(self, event):
#error(event.KeyCode)
keycode = event.KeyCode
if keycode ==wx.WXK_TAB:
self.m_command_box.AppendText('\t')
self.on_command_enter(event)
elif keycode == wx.PAPER_ENV_INVITE and wx.GetKeyState(wx.WXK_SHIFT):
self.m_command_box.AppendText('?')
self.on_command_enter(event)
else:
event.Skip()
def on_key_up(self, event):
keycode = event.KeyCode
increase =False
if keycode ==wx.WXK_UP:
pass
elif keycode ==wx.WXK_DOWN:
increase =True#
if keycode in [wx.WXK_UP, wx.WXK_DOWN]:
self.m_command_box.Clear()
self.history_cmd_index, new_command = get_next_in_ring_list(self.history_cmd_index,self.history_cmd,increase=increase)
self.m_command_box.AppendText(new_command)
if keycode in [wx.WXK_TAB]:
pass
else:
event.Skip()
def add_cmd_to_history(self, cmd, module_name, str_code):
if self.history_cmd==[]:
self.history_cmd.append(cmd)
elif self.history_cmd[-1]==cmd:
pass
else:
self.history_cmd.append(cmd)
self.history_cmd_index= len(self.history_cmd)
self.add_cmd_to_sequence_queue(str_code,module_name )
#self.sequence_queue.put([cmd, datetime.now()])
def build_function_tab(self):
src_path = os.path.abspath(self.src_path)
if not os.path.exists(src_path):
src_path= os.path.abspath(os.path.curdir)
base_name = os.path.basename(src_path)
root =self.function_page.AddRoot(base_name)
item_info = wx.TreeItemData({'name':src_path})
self.function_page.SetItemData(root, item_info)
modules = get_folder_item(src_path)
if modules is None:
self.function_page.SetItemText(root, self.function_page.GetItemText(root) + ' Not Exists!!!')
self.function_page.SetItemTextColour(root, wx.Colour(255, 0, 0))
return
for module_file in modules:
path_name = '{}'.format(os.path.abspath(self.src_path))
module_name = os.path.basename(module_file).split('.')[0]
new_module = self.function_page.InsertItem(root, root, module_name)
file, path_name, description = imp.find_module(module_name)
lmod = imp.load_module(module_name, file, path_name,description)
for attr in sorted(dir(lmod)):
if attr.startswith('__'):
continue
attr_obj = getattr(lmod, attr)
attr_type = type(attr_obj)
if attr_type == types.FunctionType :
new_item = self.function_page.InsertItem(new_module, new_module, '{}'.format( attr))
item_info = wx.TreeItemData({'name':'{}.{}'.format(module_name,attr)})
self.function_page.SetItemData(new_item, item_info)
elif attr_type== types.TypeType:
class_obj = getattr(lmod, attr)
new_class = self.function_page.InsertItem(new_module, new_module, attr)
item_info = wx.TreeItemData({'name':'{}.{}'.format(module_name,attr)})
self.function_page.SetItemData(new_item, item_info)
for attr_in_class in sorted(dir(class_obj)):
if attr_in_class.startswith('__'):
continue
attr_obj = getattr(class_obj,attr_in_class)
attr_type =type(attr_obj)
if attr_type == types.MethodType :
item_info = wx.TreeItemData({'name':'{}.{}.{}'.format(module_name,attr,attr_in_class)})
new_item = self.function_page.InsertItem(new_class, new_class, attr_in_class)
self.function_page.SetItemData(new_item, item_info)
self.function_page.Expand(root)
first_child = self.function_page.GetFirstChild(root)
self.function_page.Expand(first_child[0])
def on_LeftDClick_in_Function_tab(self,event):
event.Skip()
select_item = self.function_page.GetSelection()
fun_name = self.function_page.GetItemData(select_item)
text_in_tree = self.function_page.GetItemText(select_item)
if fun_name != None and fun_name.Data.has_key('name'):
cmd = fun_name.Data['name']
info('click item in Functions tab: {}'.format(fun_name.Data['name']))
wx.CallAfter(self.m_command_box.Clear)
wx.CallAfter(self.m_command_box.AppendText, cmd+' ')
wx.CallAfter(self.m_command_box.SetFocus)
wx.CallAfter(self.m_command_box.SetInsertionPointEnd)
wx.CallAfter(self.m_command_box.Refresh)
def on_refresh_case_page(self, event):
self.case_suite_page.DeleteAllItems()
self.build_suite_tree()
info('Refresh Case tab done!')
def on_right_down_in_session_tab(self, event):
menu = wx.Menu()
item = wx.MenuItem(menu, wx.NewId(), "Refresh")
#acc = wx.AcceleratorEntry()
#acc.Set(wx.ACCEL_NORMAL, ord('O'), self.popupID1)
#item.SetAccel(acc)
menu.AppendItem(item)
self.Bind(wx.EVT_MENU, self.on_refresh_session_page,item)
self.PopupMenu(menu,event.GetPosition())
def on_refresh_session_page(self, event):
self.session_page.DeleteAllItems()
self.build_session_tab()
info('Refresh Session tab done!')
def on_right_down_in_function_tab(self, event):
menu = wx.Menu()
item = wx.MenuItem(menu, wx.NewId(), "Refresh")
#acc = wx.AcceleratorEntry()
#acc.Set(wx.ACCEL_NORMAL, ord('O'), self.popupID1)
#item.SetAccel(acc)
menu.AppendItem(item)
self.Bind(wx.EVT_MENU, self.on_refresh_function_page,item)
self.PopupMenu(menu,event.GetPosition())
def on_refresh_function_page(self, event):
self.function_page.DeleteAllItems()
self.build_function_tab()
info('Refresh Function tab done!')
def add_cmd_to_sequence_queue(self, cmd, module_name):
if self.import_modules.has_key(module_name):
pass
else:
self.import_modules.update({module_name:module_name})
self.sequence_queue.put([cmd,datetime.now() ])
def generate_code(self, file_name ):
str_code ="""#created by DasH
if __name__ == "__main__":
import sys, traceback
sys.path.insert(0,r'{}')
sys.path.insert(0,r'{}')
import lib.common
log_path= '../log/tmp'
log_path= lib.common.create_case_folder()
try:
""".format(self.src_path,self.lib_path )
sessions =[]
for module in self.import_modules:
str_code+=' import {mod}\n'.format(mod=module)#\n {mod}_instance = {mod}()
no_operation = True
while True:
try:
cmd, timestamp =self.sequence_queue.get(block=False)[:2]
str_code +=' {} #{}\n'.format(cmd, timestamp.isoformat( ' '))
if cmd.find('dut.dut(')!=-1:
sessions.append(cmd.split('=')[0].strip())
no_operation=False
#datetime.now().isoformat()
except Exception as e:
break
close_session=''
str_code+=''' except Exception as e:
print(traceback.format_exc())\n'''
for ses in sessions:
str_code+=''' {}.close_session()\n'''.format(ses)
str_code+=' sys.exit(-1)\n'#, sys.exit(-1)
for ses in sessions:
str_code+=''' {}.close_session()\n'''.format(ses)
info(str_code)
if not no_operation:
with open(file_name, 'a+') as f:
f.write(str_code)
def on_right_down_in_case_tab(self, event):
menu = wx.Menu()
item1 = wx.MenuItem(menu, wx.NewId(), "Run Test")
item2 = wx.MenuItem(menu, wx.NewId(), "Kill Test")
item3 = wx.MenuItem(menu, wx.NewId(), "Refresh")
#acc = wx.AcceleratorEntry()
#acc.Set(wx.ACCEL_NORMAL, ord('O'), self.popupID1)
#item.SetAccel(acc)
menu.AppendItem(item1)
menu.AppendItem(item2)
menu.AppendItem(item3)
self.Bind(wx.EVT_MENU, self.on_run_script,item1)
self.Bind(wx.EVT_MENU, self.on_kill_script,item2)
self.Bind(wx.EVT_MENU, self.on_refresh_case_page,item3)
self.PopupMenu(menu,event.GetPosition())
def on_kill_script(self,event):
hit_item = self.case_suite_page.GetSelection()
item_name = self.case_suite_page.GetItemText(hit_item)
item_data = self.case_suite_page.GetItemData(hit_item).Data
if item_data.has_key('PROCESS'):
p = item_data['PROCESS']
name= item_data['FULL_NAME']
info('script:{}, returncode:{}'.format(name,p.returncode))
if p.returncode is None:
#if p.is_alive():
info('Terminate alive process {}:{}'.format(item_name, p.pid))
result ='KILL'
self.update_case_status(p.pid, result)
self.mail_test_report("DASH TEST REPORT-updating")
p.terminate()
else:
result ='FAIL' if p.returncode else 'PASS'
info('{}:{} completed with returncode {}'.format(item_name, p.pid, result))
self.update_case_status(p.pid, result)
def run_script(self, script_name):
old_script_name = script_name
lex = shlex.shlex(script_name)
lex.quotes = '"'
lex.whitespace_split = True
script_name_and_args = list(lex)
script_args = script_name_and_args[1:]
script_name = script_name_and_args[0]
if script_name.find(os.path.sep)!=-1:
pass
else:
script_name= '{}/{}'.format(self.suite_path,script_name)
from lib.common import create_case_folder
old_sys_argv = sys.argv
sys.argv= [script_name]+script_args
case_log_path = self.log_path #create_case_folder()
sys.argv= old_sys_argv
try:
if os.path.exists('script_runner.exe'):
execute = 'script_runner.exe'
cmd = [execute,script_name ]+script_args + ['-l','{}'.format(case_log_path)]
#p=subprocess.Popen(cmd, creationflags = subprocess.CREATE_NEW_CONSOLE)
else:
cmd = [sys.executable,'./script_runner.py', script_name ]+script_args+ ['-l','{}'.format(case_log_path)]
p=subprocess.Popen(cmd, creationflags = subprocess.CREATE_NEW_CONSOLE)#, stdin=pipe_input, stdout=pipe_output,stderr=pipe_output)
self.add_new_case_to_report(p.pid, old_script_name, p, case_log_path)
except:
error(traceback.format_exc())
return p, case_log_path
def on_run_script(self,event):
hit_item = self.case_suite_page.GetSelection()
item_name = self.case_suite_page.GetItemText(hit_item)
item_data = self.case_suite_page.GetItemData(hit_item).Data
script_name = self.case_suite_page.GetItemData(hit_item).Data['path_name']
if script_name.lower().split('.')[-1] in ['txt','csv']:#test suite file, not a single script
self.run_a_test_suite(script_name)
else:#a single test case
self.on_kill_script(event)
try:
p, case_log_path = self.run_script('{} {}'.format(script_name, item_name))
self.case_suite_page.GetItemData(hit_item).Data['PROCESS']=p
self.case_suite_page.GetItemData(hit_item).Data['FULL_NAME']= item_name
info('start process {} :{}'.format(item_name, p.pid))
#p.join() # this blocks until the process terminates
time.sleep(1)
except Exception as e :
error(traceback.format_exc())
#p = Process(target=run_script, args=[script_name, script_and_args])
#p.start()
def check_case_status(self):
self.check_case_running_status_lock.acquire()
changed=False
running_case = 0
for pid in self.dict_test_report.keys():
case_name, start_time, end_time, duration, return_code ,proc, log_path= self.dict_test_report[pid]
if return_code is None:
if proc.poll() is None:
running_case+=1
debug('RUNNING', start_time, end_time, duration, return_code ,proc, log_path)
else:
changed=True
return_code = 'FAIL' if proc.returncode else 'PASS'
self.update_case_status(pid,return_code)
if running_case:
pass
elif not self.case_queue.empty():#self.case_queue.qsize():
case_name_with_args = self.case_queue.get()
p, case_log_path = self.run_script(case_name_with_args)
self.check_case_running_status_lock.release()
if changed:
#test_report = self.generate_report(filename='{}/dash_report.txt'.format(self.log_path))
self.mail_test_report('DasH Test Report-updating')
return changed
def polling_running_cases(self):
while True:
time.sleep(10)
try:
if not self.alive:
break
except:
break
#self.check_case_running_status_lock.acquire()
self.check_case_status()
#self.check_case_running_status_lock.release()
def add_new_case_to_report(self, pid, case_name, proc, log_path):
start_time=datetime.now()
duration = 0
end_time = None
return_code = None
#self.check_case_running_status_lock.acquire()
if pid in self.dict_test_report:
self.dict_test_report[pid].update([case_name, start_time, end_time, duration, return_code, proc, log_path])
else:
self.dict_test_report[pid]= [case_name, start_time, end_time, duration, return_code, proc, log_path ]
#self.check_case_running_status_lock.release()
def update_case_status(self, pid,return_code=None):
now = datetime.now()
case_name, start_time, end_time, duration, tmp_return_code ,proc,log_path= self.dict_test_report[pid]
if tmp_return_code is None:
duration = (now-start_time).total_seconds()
self.dict_test_report[pid]=[case_name,start_time, end_time, duration, return_code, proc, log_path]
else:
pass# don't update one case result twice
def mail_test_report(self, subject="DASH TEST REPORT-updating"):
try:
#self.check_case_status()
test_report = self.generate_report(filename='{}/dash_report.txt'.format(self.log_path))
#TO, SUBJECT, TEXT, SERVER, FROM
send_mail_smtp_without_login(self.mail_to_list, subject,test_report,self.mail_server,self.mail_from)
except Exception as e:
error(traceback.format_exc())
def on_mail_test_report(self,event):
self.mail_test_report('DasH Test Report-updating')
#p.terminate()
def on_handle_request_via_mail(self):
import imaplib
from email.parser import Parser
def process_multipart_message(message):
if isinstance(message, basestring) or isinstance(message , list):
return message
rtn = ''
try:
if message.is_multipart():
for m in message.get_payload():
rtn += process_multipart_message(m)
else:
rtn += message.get_payload()
except Exception as e:
pass
return rtn
url, user, password = self.mail_read_url,self.mail_user, self.mail_password
conn = imaplib.IMAP4_SSL(url,993)
conn.login(user,password)
conn.select('INBOX')#, readonly=True)
try:
authorized_mail_address = self.mail_to_list.replace(',',';').split(';')
except Exception as e:
return
for mail_address in authorized_mail_address:
results,data = conn.search(None,'(UNSEEN)', '(FROM "{}")'.format(mail_address)) # #'ALL')
msg_ids = data[0]
msg_id_list = msg_ids.split()
MAX_UNREAD_MAIL = 50
for unread_mail_id in msg_id_list[::-1][:MAX_UNREAD_MAIL]:
result,data = conn.fetch(unread_mail_id,'(BODY.PEEK[HEADER])')#"(RFC822)")#
raw_email = data[0][1]
p = Parser()
msg = p.parsestr(raw_email)
#msg = process_multipart_message(msg )
from1 = msg.get('From')
sub = '{}'.format(msg.get('Subject'))
sub = sub.strip().lower()
support_list='''
###############################
mail subject below is supported:
dash-request-case-queue : request the cases in queue which to be executed
dash-request-case : request cases which are under suite_path
dash-request-report : request a test report by now
dash-request-kill-running : to kill all running test cases
dash-request-clear-queue : to clear/remove all cases which are in case queue
dash-request-run : to run script(s), each line is a script with arguments if it has
--------------------------------
***non-case-sensitive***
###############################
'''
handled =False
if sub in ['dash']:
send_mail_smtp_without_login(self.mail_to_list, 'DONE-DasH Support List',support_list,self.mail_server,self.mail_from)
handled = True
#conn.uid('STORE', unread_mail_id, '+FLAGS', '\SEEN')
elif sub in ['dash-request-case-queue']:
case_in_queue =self.get_case_queue(None)
send_mail_smtp_without_login(self.mail_to_list, 'DONE-DasH:Case In Queue',case_in_queue+support_list,self.mail_server,self.mail_from)
#conn.uid('STORE', unread_mail_id, '+FLAGS', '\SEEN')
handled = True
elif sub in ['dash-request-case']:
cases_string = '\n\t'.join(self.case_list)
send_mail_smtp_without_login(self.mail_to_list, 'DONE-DasH:Case List',cases_string+support_list,self.mail_server,self.mail_from)
handled = True
#conn.uid('STORE', unread_mail_id, '+FLAGS', '\SEEN')
elif sub in ['dash-request-report']:
self.mail_test_report('DasH Test Report-requested')
#conn.uid('STORE', unread_mail_id, '+FLAGS', '\SEEN')
handled = True
elif sub in ['dash-request-kill-running']:
killed= self.on_kill_running_case()
send_mail_smtp_without_login(self.mail_to_list, 'DONE-[DasH]:Killed Running Case(s)',killed+support_list,self.mail_server,self.mail_from)
handled = True
#conn.uid('STORE', unread_mail_id, '+FLAGS', '\SEEN')
elif sub in ['dash-request-clear-queue']:
case_in_queue = self.on_clear_case_queue()
send_mail_smtp_without_login(self.mail_to_list, 'DONE-DasH:Clear Case Queue',case_in_queue+support_list,self.mail_server,self.mail_from)
handled = True
#conn.uid('STORE', unread_mail_id, '+FLAGS', '\SEEN')
elif sub in ['dash-request-run']:
#if from1 in ['dash@calix.com', 'yu_silence@163.com',self.mail_to_list]:
conn.uid('STORE', unread_mail_id, '+FLAGS', r'(\SEEN)')
handled = True
#conn.uid('STORE', '-FLAGS', '(\Seen)')
payload = msg.get_payload()
payload = process_multipart_message(payload )
from lib.html2text import html2text
txt = html2text(payload)
cases = txt.replace('\r\n','\n').split('\n')
for line in cases:
line = line.strip()
if line.strip().startswith('#') or len(line)==0:
pass
else:
type_case, case_name, args = self.check_case_type(line)
if type_case in ['txt','csv']:
self.run_a_test_suite(line)
else:
self.case_queue.put(line)
info('adding case to queue: {}'.format(line))
result,data = conn.fetch(unread_mail_id,'(RFC822)')#"(RFC822)")#
else:
conn.uid('STORE', unread_mail_id, '-FLAGS', r"(\SEEN)")
#fixed : 2017-09-25 failed to set unmatched mail to unread, to fetch it again with RFC822
if handled:
result,data = conn.fetch(unread_mail_id,'(RFC822)')#"(RFC822)")#
def check_case_type(self, str_line):
lex = shlex.shlex(str_line)
lex.quotes = '"'
lex.whitespace_split = True
script_name_and_args = list(lex)
script_name = script_name_and_args[0]
return script_name.lower().split('.')[-1],script_name_and_args[0] ,script_name_and_args[1:]
def polling_request_via_mail(self):
while True:
time.sleep(10)
try:
if not self.alive:
break
except:
break
try:
self.on_handle_request_via_mail()
except Exception as e:
error(traceback.format_exc())
pass
def get_case_queue(self, item=None):
case_in_queue = list(self.case_queue.queue)
number_in_queue= len(case_in_queue)
if number_in_queue:
str_case_in_queue='\ntotal {} case(s) in Queue\n'.format(number_in_queue)+'\n'.join('{}'.format(x) for x in case_in_queue)
else:
str_case_in_queue='\nNo Case in Queue'
info('Case(s) in Queue', str_case_in_queue)
return str_case_in_queue
def on_clear_case_queue(self, event=None):
case_in_queue = self.get_case_queue(None)
self.case_queue.queue.clear()
self.get_case_queue(None)
return case_in_queue
def on_kill_running_case(self,event=None):
killed_case= ''
for case in self.dict_test_report:
case_name,start_time, end_time, duration, return_code, proc, log_path = self.dict_test_report[:7]
if return_code is None:
if proc.poll() is None:
killed_case+='{}:{}\n'.format(case_name, proc.pid)
info('Terminate alive process {}:{}'.format(case_name, proc.pid))
result ='KILL'
self.update_case_status(proc.pid, result)
proc.terminate()
info('Killed All Running cases', killed_case)
return killed_case
def run_a_test_suite(self, csv_file_name, clear_queue=False, kill_running =False):
try:
case_type, suite_file_name, args =self.check_case_type(csv_file_name)
if clear_queue:
self.on_clear_case_queue()
if kill_running:
self.on_kill_running_case()
import csv
if suite_file_name.find(os.path.sep)!=-1:
pass
else:
suite_file_name= '{}/{}'.format(self.suite_path,suite_file_name)
with open(suite_file_name) as bench:
reader = csv.reader(bench,delimiter=',')
for row in reader:
if len(row)<1:
continue
else:
name = row[0]
args.insert(0,0)
for index in range(1,len(args)):
name =name.replace('{{index}}'.format(index =index), '{}'.format(args[index]))
self.case_queue.put(name)
info('adding case to queue: {}'.format(name))
except Exception as e:
error(traceback.format_exc())
#done: 2017-08-22, 2017-08-19 save main log window to a file
#todo: 2017-08-19 add timestamps to log message
#done: 2017-08-22, 2017-08-19 mail to someone
#todo: 2017-08-19 run a script in DasH
#todo: 2017-08-19 generate test report
#todo: 2017-08-19 publish all test cases in a web page
#todo: 2017-08-19 trigger a test remote via web page
#todo: 2017-08-19 re-run failed cases
#todo: 2017-08-19 build executable packege for DasH
#todo: 2017-08-19 a popup window to get email address/password/mail_server...
#todo: 2017-08-22 output in m_log window has a lot of empty line, need remove them
#todo: 2017-08-23 in common.call_function_in_module, should end all threads which are started in previous instance
#todo: 2017-08-23 add tips for all tree item in teh left |
# The command file for every external command not specifically for running
# the bot. Even more relevant commands like broadcast options and whitelists
# are treated as such.
#
# Every command in here should follow the basic structure of:
# elif cmd == 'commandHere':
# doYourThings(lots, of, variables)
# return 'Your Response', True/False
#
# True: Allows that the command in question can, if gotten from a room,
# be returned to the same room rather than a PM.
# False: This will ALWAYS return the reply as a PM, no matter where it came from
from random import randint, sample
import re
import yaml
import json
import math # For funsies
from data.tiers import tiers, formats
from data.teams import Teams
from data.links import Links, YoutubeLinks
from data.pokedex import Pokedex
from data.types import Types
from data.replies import Lines
from plugins.games import Hangman, Anagram
from plugins.workshop import Workshop
from plugins.trivia.trivia import Trivia
from plugins.moderation import addBan, removeBan
usageLink = r'http://www.smogon.com/stats/2015-09/'
GameCommands = ['hangman', 'hg', 'anagram', 'a', 'trivia', 'ta']
CanPmReplyCommands = ['usage', 'help']
Scoreboard = {}
with open('plugins/scoreboard.yaml', 'a+') as yf:
yf.seek(0, 0)
Scoreboard = yaml.load(yf)
if not Scoreboard: # Empty yaml file set Scoreboard to None, but an empty dict is better
Scoreboard = {}
def Command(self, cmd, room, msg, user):
''' Returns the reply if the command exists, and False if it doesn't '''
# Debug commands and program info
if cmd == 'echo':
return msg, True
elif cmd in ['source', 'git']:
return 'Source code can be found at: {url}'.format(url = URL()), False
elif cmd == 'credits':
return 'Credits can be found: {url}'.format(url = URL()), True
elif cmd in ['commands', 'help']:
return 'Read about commands here: {url}blob/master/COMMANDS.md'.format(url = URL()), True
elif cmd == 'explain':
return "BB-8 is the name of a robot in the seventh Star Wars movie :)", True
elif cmd == 'leave':
msg = removeSpaces(msg)
if not msg: msg = room
if self.leaveRoom(msg):
return 'Leaving room {r} succeeded'.format(r = msg), False
else:
return 'Could not leave room: {r}'.format(r = msg), False
# THIS COMMAND SHOULDN'T BE DOCUMENTED!
elif cmd == 'get':
if isMaster(self, user):
return str(eval(msg)), True
else:
return 'You do not have permisson to use this command. (Only for owner)', False
# Save current self.details to details.yaml (moves rooms to joinRooms)
# Please note that this command will remove every comment from details.yaml, if those exist.
elif cmd == 'savedetails':
if canChange(self, user):
saveDetails(self)
return 'Details saved.', False
else:
return "You don't have permission to save settings. (Requires #)", False
# Permissions
elif cmd == 'broadcast':
return 'Rank required to broadcast: {rank}'.format(rank = self.details['broadcastrank']), True
elif cmd == 'setbroadcast':
msg = removeSpaces(msg)
if msg in self.Groups or msg in ['off', 'no', 'false']:
if canChange(self, user):
if msg in ['off', 'no', 'false']: msg = ' '
if self.details['broadcastrank'] == msg:
return 'Broadcast rank is already {rank}'.format(rank = msg), True
else:
self.details['broadcastrank'] = msg
return 'Broadcast rank set to {rank}. (This is not saved on reboot)'.format(rank = msg), True
else:
return 'You are not allowed to set broadcast rank. (Requires #)', False
else:
return '{rank} is not a valid rank'.format(rank = msg), False
elif cmd == 'whitelist':
if canSee(self, user):
if self.details['whitelist']:
return ', '.join(self.details['whitelist']), False
else:
return 'Whitelist is empty :(', False
else:
return 'You are not allowed to see the whitelist :l (Requires %)', False
elif cmd in ['whitelistuser', 'wluser']:
if canAddUser(self, user):
self.details['whitelist'].append(msg)
return 'User {usr} added to whitelist.'.format(usr = msg), True
elif cmd == 'removewl':
if canAddUser(self, user):
self.details['whitelist'].remove(msg)
return 'User {usr} removed from the whitelist.'.format(usr = msg), True
elif cmd == 'moderate':
if not msg:
return 'No parameters given. Command is ~moderate [room],True/False', False
else:
if canChange(self, user):
things = removeSpaces(msg).split(',')
if not len(things) == 2:
return 'Too few/many parameters given. Command is ~moderate [room],True/False', False
if things[0] in self.details['rooms']:
if things[1] in ['True', 'true']:
self.details['rooms'][things[0]].moderate = True
return '{room} will now be moderated'.format(room = things[0]), False
elif things[1] in ['False', 'false']:
self.details['rooms'][things[0]].moderate = False
return '{room} will not be moderated anymore'.format(room = things[0]), False
else:
return 'You cannot set moderation in a room without me in it.', False
else:
return 'You do not have permission to set this. (Requires #)', False
# Autobans
elif cmd in ['banuser', 'banphrase']:
if canAddUser(self, user):
error = addBan(cmd[3:], room, msg)
if not error:
return 'Added {thing} to the banlist'.format(thing = msg), True
else:
return error, True
else:
return 'You do not have permission to do this. (Requires #)', False
elif cmd in ['unbanuser', 'unbanphrase']:
if canAddUser(self, user):
error = removeBan(cmd[5:], room, msg)
if not error:
return 'Removed {thing} from banlist'.format(thing = msg), True
else:
return error, True
else:
return 'You do not have permission to do this. (Requires #)', False
elif cmd == 'allowgames':
if canChange(self, user):
msg = removeSpaces(msg)
things = msg.split(',')
if len(things) == 2:
if things[0] in self.details['rooms']:
if things[1] in ['true','yes','y','True']:
if not self.details['rooms'][things[0]].allowGames:
self.details['rooms'][things[0]].allowGames = True
return 'Chatgames are now allowed in {room}'.format(room = things[0]), True
else:
return 'Chatgames are already allowed in {room}'.format(room = things[0]), True
elif things[1] in ['false', 'no', 'n',' False']:
self.details['rooms'][things[0]].allowGames = False
return 'Chatgames are now not allowed in {room}'.format(room = things[0]), True
else:
return '{param} is not a supported parameter'.format(param = things[1]), True
else:
return 'Cannot allow chatgames without being in the room', True
else:
return 'Too few/many parameters. Command is ~allowgames [room],True/False', False
else:
return 'You do not have permission to change this. (Requires #)', False
# Informational commands
elif cmd in Links:
if msg in Links[cmd]:
return Links[cmd][msg], True
else:
return '{tier} is not a supported format for {command}'.format(tier = msg, command = cmd), True
elif cmd == 'team':
if msg not in Teams:
return 'Unsupported format', True
return Teams[msg][randint(0, len(Teams[msg])-1)], True
elif cmd == 'usage':
return usageLink, True
# Offline messages
elif cmd == 'tell':
if not isWhitelisted(self, user): return "You don't have the permission to use this feature.", False
if not msg: return 'You need to specify a user and a message to send in the format: [user], [message]', False
msg = msg.split(',')
to = re.sub(r'[^a-zA-z0-9]', '', msg[0]).lower()
if self.usernotes.alreadySentMessage(to, user['unform']):
return 'You already have a message to this user waiting', False
if len(msg[1].lstrip()) > 150:
return 'Message is too long. Max limit is 150 characters', False
self.usernotes.addMessage(to, user['unform'], msg[1].lstrip())
return "I'll be sure to tell them that.", True
# Fun stuff
elif cmd == 'pick':
options = msg.split(',')
return options[randint(0,(len(options)-1))], True
elif cmd == 'ask':
return Lines[randint(0, len(Lines)-1)], True
elif cmd in YoutubeLinks:
return YoutubeLinks[cmd], True
elif cmd == 'squid':
if msg:
if msg.isdecimal():
nr = float(msg)
if 0 < nr <= 10:
return '\u304f\u30b3\u003a\u5f61' * int(nr), True
else:
return 'Can only use whole numbers between 1 and 10', True
else:
return 'Invalid parameter given. Accepting whole numbers between 1 and 10.', True
else:
return '\u304f\u30b3\u003a\u5f61', True
elif cmd == 'joke':
if randint(0, 1) and self.Groups[user['group']] >= self.Groups['+']:
return user['unform'], True
else:
return getJoke(), True
elif cmd in tiers:
pick = list(tiers[cmd])[randint(0,len(tiers[cmd])-1)]
pNoForm = re.sub('-(?:Mega(?:-(X|Y))?|Primal)','', pick).lower()
return '{poke} was chosen: http://www.smogon.com/dex/xy/pokemon/{mon}/'.format(poke = pick, mon = pNoForm), True
elif cmd in [t.replace('poke','team') for t in tiers]:
team = set()
attempts = 0
while len(team) < 6 or not acceptableWeakness(team):
poke = list(tiers[cmd.replace('team','poke')])[randint(0,len(tiers[cmd.replace('team','poke')])-1)]
# Test if share dex number with anything in the team
if [p for p in team if Pokedex[poke]['dex'] == Pokedex[p]['dex']]:
continue
if [p for p in team if '-Mega' in p] and '-Mega' in poke:
continue
team |= {poke}
if not acceptableWeakness(team):
team -= {poke}
if len(team) >= 6:
break
attempts += 1
if attempts >= 100:
# Prevents locking up if a pokemon turns the team to an impossible genration
# Since the team is probably bad anyway, just finish it and exit
while len(team) < 6:
team |= {list(tiers[cmd.replace('team','poke')])[randint(0,len(tiers[cmd.replace('team','poke')])-1)]}
break
return ' / '.join(list(team)), True
# Workshop is not a hangman game, but uses the allowed slot for a game anyway
# Workshops also doesn't follow the chatgames rule, as they're not chat games
elif cmd == 'workshop':
if not isGameType(self.details['rooms'][room].game, Workshop):
if msg.startswith('new') and canStartGame(self, user):
self.details['rooms'][room].game = Workshop(re.sub(r'[^a-zA-z0-9]', '', msg[len('new '):] if msg[len('new '):] else user['name']).lower())
return 'A new workshop session was created', True
else:
return 'No active workshop right now', True
workshop = self.details['rooms'][room].game
if msg.startswith('add'):
if not user['name'] == workshop.host and not canStartGame(self, user):
return 'Only the workshop host or a Room Moderator can add Pokemon', True
return workshop.addPokemon(msg[len('add '):]), True
elif msg.startswith('remove'):
if not user['name'] == workshop.host and not canStartGame(self, user):
return 'Only the workshop host or a Room Moderator can remove Pokemon', True
return workshop.removePokemon(msg[len('remove '):]), True
elif msg == 'clear':
if not user['name'] == workshop.host and not canStartGame(self, user):
return 'Only the workshop host or a Room Moderator can clear the team', True
return workshop.clearTeam(), True
elif msg == 'team':
return workshop.getTeam(), True
elif msg == 'end':
if not user['name'] == workshop.host and not canStartGame(self, user):
return 'Only the workshop host or a Room Moderator can end the workshop', True
self.details['rooms'][room].game = None
return 'Workshop session ended', True
# Chat games go here
# Hangman
elif cmd == 'hangman':
msg = msg.strip().split(',')
if 'end' in msg[0] and canStartGame(self, user) and isGameType(self.details['rooms'][room].game, Hangman):
phrase = self.details['rooms'][room].game.getSolution()
self.details['rooms'][room].game = None
return 'The hangman game was forcefully ended by {baduser}. (Killjoy)\nThe solution was: **{solved}**'.format(baduser = user['unform'], solved = phrase), True
elif 'new' in msg[0]: # ~hangman new,room,[phrase]
if canStartGame(self, user):
if self.details['rooms'][room].game:
return 'A game is already running in this room', False
phrase = re.sub(r'[^a-zA-Z0-9 ]', '', re.sub(r'\s{2,}', ' ', msg[2].strip()))
if not phrase.strip():
return 'You can only have letters, numbers or spaces in the phrase', False
if len(removeSpaces(phrase)) <= 1:
return 'The phrase must be at least two characters long', False
self.details['rooms'][room].game = Hangman(phrase)
return 'A new game of hangman has begun:\n' + self.details['rooms'][room].game.printCurGame(), True
else:
return 'You do not have permission to start a game in this room. (Requires %)', False
else:
return 'To start a new hangman game: ~hangman new,[room],[phrase]', True
elif cmd == 'hg':
if isGameType(self.details['rooms'][room].game, Hangman):
if len(removeSpaces(msg)) == 1:
return self.details['rooms'][room].game.guessLetter(msg.replace(' ','').lower()), True
else:
if not msg.lstrip():
return "You can't guess nothing", True
if self.details['rooms'][room].game.guessPhrase(msg.lstrip()):
solved = self.details['rooms'][room].game.getFormatedPhrase()
self.details['rooms'][room].game = None
return 'Congratulations {name}. You won!\nThe phrase was: {phrase}'.format(name = user['unform'], phrase = solved), True
else:
return '{test} is wrong!'.format(test = msg.lstrip()), True
else:
return 'There is no hangman game in progress right now', True
# Anagrams of Pokemon names
elif cmd == 'anagram':
if msg == 'new':
if canStartGame(self, user):
if self.details['rooms'][room].game:
return 'A game is already running somewhere', False
else:
self.details['rooms'][room].game = Anagram()
return 'A new anagram has been created:\n' + self.details['rooms'][room].game.getWord(), True
else:
return 'You do not have permission to start a game in this room. (Requires %)', False
elif msg == 'hint':
if self.details['rooms'][room].game:
return 'The hint is: ' + self.details['rooms'][room].game.getHint(), True
else:
return 'There is no active anagram right now', False
elif msg == 'end':
if canStartGame(self, user):
if isGameType(self.details['rooms'][room].game, Anagram):
solved = self.details['rooms'][room].game.getSolvedWord()
self.details['rooms'][room].game = None
return 'The anagram was forcefully ended by {baduser}. (Killjoy)\nThe solution was: **{solved}**'.format(baduser = user['unform'], solved = solved), True
else:
return 'There is no active anagram or a different game is active.', False
else:
return 'You do not have permission to end the anagram. (Requires %)', True
elif msg.startswith('score'):
if msg.strip() == 'score':
return 'No name was given', True
name = re.sub(r'[^a-zA-z0-9]', '', msg[len('score '):]).lower()
if name not in Scoreboard:
return "This user never won any anagrams", True
return 'This user has won {number} anagram{plural}'.format(number = Scoreboard[name], plural = '' if not type(Scoreboard[name]) == str and Scoreboard[name] < 2 else 's'), True
else:
if not msg:
if isGameType(self.details['rooms'][room].game, Anagram):
return 'Current anagram: {word}'.format(word = self.details['rooms'][room].game.getWord()), True
else:
return 'There is no active anagram right now', False
return '{param} is not a valid parameter for ~anagram. Make guesses with ~a'.format(param = msg), False
elif cmd == 'a':
game = self.details['rooms'][room].game
if isGameType(game, Anagram):
if game.isCorrect(re.sub(r'[ -]', '', msg).lower()):
solved = game.getSolvedWord()
timeTaken = game.getSolveTimeStr()
self.details['rooms'][room].game = None
# Save score
Scoreboard[user['name']] = 1 if user['name'] not in Scoreboard else Scoreboard[user['name']] + 1
with open('plugins/scoreboard.yaml', 'w') as ym:
yaml.dump(Scoreboard, ym)
return 'Congratulations, {name} got it{time}\nThe solution was: {solution}'.format(name = user['unform'], time = timeTaken, solution = solved), True
else:
return '{test} is wrong!'.format(test = msg.lstrip()), True
else:
return 'There is no anagram active right now', True
# Trivia
elif cmd == 'trivia':
if msg:
params = removeSpaces(msg).split(',')
if params[0] in ['start', 'begin']:
kind = 'first'
if len(params) > 1:
kind = params[1]
if canStartTrivia(self, user):
self.details['rooms'][room].game = Trivia(self.ws, room, kind)
return 'A new trivia session has started.', True
else:
return 'You do not have permission to set up a trivia session', False
elif params[0] in ['stop', 'end']:
# The trivia class will solve everything after doing this.
self.details['rooms'][room].game.endSession = True
self.details['rooms'][room].game = None
return 'The trivia session has been ended', True
return '{msg} is not an valid parameter for trivia', False
elif cmd == 'ta':
game = self.details['rooms'][room].game
if isGameType(game, Trivia):
# Don't give information if wrong or right here, let Trivia deal with that
if game.tryAnswer(msg):
if not game.solver:
game.wasSolved(user['unform'])
else:
game.multiple = True
return 'NoAnswer', False
else:
return 'There is no ongoing trivia session.', True
# Commands with awful conditions last
elif cmd in formats:
return 'Format: http://www.smogon.com/dex/xy/formats/{tier}/'.format(tier = cmd), True
# This command is here because it's an awful condition, so try it last :/
elif [p for p in Pokedex if re.sub('-(?:mega(?:-(x|y))?|primal|xl|l)','', cmd, flags=re.I) in p.replace(' ','').lower()]:
cmd = re.sub('-(?:mega(?:-(x|y))?|primal)','', cmd)
substitutes = {'gourgeist-s':'gourgeist-small', # This doesn't break Arceus-Steel like adding |S to the regex would
'gourgeist-l':'gourgeist-large', # and gourgeist-s /pumpkaboo-s still get found, because it matches the
'gourgeist-xl':'gourgeist-super', # entry for gougeist/pumpkaboo-super
'pumpkaboo-s':'pumpkaboo-small',
'pumpkaboo-l':'pumpkaboo-large',
'pumpkaboo-xl':'pumpkaboo-super',
'giratina-o':'giratina-origin',
'mr.mime':'mr_mime',
'mimejr.':'mime_jr'}
if cmd.lower() not in (removeSpaces(p).lower() for p in Pokedex):
return '{cmd} is not a valid command'.format(cmd = cmd),True
if cmd in substitutes:
cmd = substitutes[cmd]
return 'Analysis: http://www.smogon.com/dex/xy/pokemon/{mon}/'.format(mon = cmd), True
else:
return False, False
def URL(): return 'https://github.com/QuiteQuiet/PokemonShowdownBot/'
def removeSpaces(text):
return text.replace(' ','')
# Permission settings for different things
# These can't be changed during operation, compared to the general permission
def isMaster(self, user):
return user['name'] == self.details['master']
def isWhitelisted(self, user):
return user['name'] == self.details['master'] or user['name'] in self.details['whitelist']
def canSee(self, user):
return user['name'] == self.details['master'] or self.Groups[user['group']] >= self.Groups['%']
def canChange(self, user):
return user['name'] == self.details['master'] or self.Groups[user['group']] >= self.Groups['#']
def canAddUser(self, user):
return user['name'] == self.details['master'] or self.Groups[user['group']] >= self.Groups['#']
def canStartGame(self, user):
return user['name'] == self.details['master'] or self.Groups[user['group']] >= self.Groups['%']
def canStartTrivia(self, user):
return user['name'] == self.details['master'] or self.Groups[user['group']] >= self.Groups['@']
def isGameType(running, gameType):
return type(running) == gameType
def acceptableWeakness(team):
if not team: return False
comp = {t:{'weak':0,'res':0} for t in Types}
for poke in team:
types = Pokedex[poke]['types']
if len(types) > 1:
for matchup in Types:
eff = Types[types[0]][matchup] * Types[types[1]][matchup]
if eff > 1:
comp[matchup]['weak'] += 1
elif eff < 1:
comp[matchup]['res'] += 1
else:
for matchup in Types:
if Types[types[0]][matchup] > 1:
comp[matchup]['weak'] += 1
elif Types[types[0]][matchup] < 1:
comp[matchup]['res'] += 1
for t in comp:
if comp[t]['weak'] >= 3:
return False
if comp[t]['weak'] >= 2 and comp[t]['res'] <= 1:
return False
return True
def saveDetails(self):
details = {k:v for k,v in self.details.items() if not k == 'rooms' and not k == 'joinRooms'}
details['joinRooms'] = []
for e in self.details['rooms']:
details['joinRooms'].append({e:{'moderate':self.details['rooms'][e].moderate,
'allow games':self.details['rooms'][e].allowGames}})
details['rooms'] = {}
with open('details.yaml', 'w') as yf:
yaml.dump(details, yf, default_flow_style = False)
def getJoke():
people = ['Disjunction','Aladyyn','boltsandbombers','Can-Eh-Dian','Deej Dy','innovamania','Kiyo','Marilli','Montsegur','Pokedots','Punchshroom','Queen of Luvdiscs','rw','Scorpdestroyer','silver Aurum','Sir Kay','tennis','Blast Chance','HJAD','shaneghoul','Soulgazer','Allstar124','Blaziken1337','Dentricos','Finchinator','flcl','GyRro','hootie','Jarii','Less Than Three Man','Marikeinen','Metaphysical','Not Nova','Nozzle','orphic','Raptures Finest','rozes','Sweet Jesus','Syncrasy','Vileman',"Winter's Howl"]
return people[randint(0, len(people)-1)]
test for whitelisted-ness before doing anything with it
# The command file for every external command not specifically for running
# the bot. Even more relevant commands like broadcast options and whitelists
# are treated as such.
#
# Every command in here should follow the basic structure of:
# elif cmd == 'commandHere':
# doYourThings(lots, of, variables)
# return 'Your Response', True/False
#
# True: Allows that the command in question can, if gotten from a room,
# be returned to the same room rather than a PM.
# False: This will ALWAYS return the reply as a PM, no matter where it came from
from random import randint, sample
import re
import yaml
import json
import math # For funsies
from data.tiers import tiers, formats
from data.teams import Teams
from data.links import Links, YoutubeLinks
from data.pokedex import Pokedex
from data.types import Types
from data.replies import Lines
from plugins.games import Hangman, Anagram
from plugins.workshop import Workshop
from plugins.trivia.trivia import Trivia
from plugins.moderation import addBan, removeBan
usageLink = r'http://www.smogon.com/stats/2015-09/'
GameCommands = ['hangman', 'hg', 'anagram', 'a', 'trivia', 'ta']
CanPmReplyCommands = ['usage', 'help']
Scoreboard = {}
with open('plugins/scoreboard.yaml', 'a+') as yf:
yf.seek(0, 0)
Scoreboard = yaml.load(yf)
if not Scoreboard: # Empty yaml file set Scoreboard to None, but an empty dict is better
Scoreboard = {}
def Command(self, cmd, room, msg, user):
''' Returns the reply if the command exists, and False if it doesn't '''
# Debug commands and program info
if cmd == 'echo':
return msg, True
elif cmd in ['source', 'git']:
return 'Source code can be found at: {url}'.format(url = URL()), False
elif cmd == 'credits':
return 'Credits can be found: {url}'.format(url = URL()), True
elif cmd in ['commands', 'help']:
return 'Read about commands here: {url}blob/master/COMMANDS.md'.format(url = URL()), True
elif cmd == 'explain':
return "BB-8 is the name of a robot in the seventh Star Wars movie :)", True
elif cmd == 'leave':
msg = removeSpaces(msg)
if not msg: msg = room
if self.leaveRoom(msg):
return 'Leaving room {r} succeeded'.format(r = msg), False
else:
return 'Could not leave room: {r}'.format(r = msg), False
# THIS COMMAND SHOULDN'T BE DOCUMENTED!
elif cmd == 'get':
if isMaster(self, user):
return str(eval(msg)), True
else:
return 'You do not have permisson to use this command. (Only for owner)', False
# Save current self.details to details.yaml (moves rooms to joinRooms)
# Please note that this command will remove every comment from details.yaml, if those exist.
elif cmd == 'savedetails':
if canChange(self, user):
saveDetails(self)
return 'Details saved.', False
else:
return "You don't have permission to save settings. (Requires #)", False
# Permissions
elif cmd == 'broadcast':
return 'Rank required to broadcast: {rank}'.format(rank = self.details['broadcastrank']), True
elif cmd == 'setbroadcast':
msg = removeSpaces(msg)
if msg in self.Groups or msg in ['off', 'no', 'false']:
if canChange(self, user):
if msg in ['off', 'no', 'false']: msg = ' '
if self.details['broadcastrank'] == msg:
return 'Broadcast rank is already {rank}'.format(rank = msg), True
else:
self.details['broadcastrank'] = msg
return 'Broadcast rank set to {rank}. (This is not saved on reboot)'.format(rank = msg), True
else:
return 'You are not allowed to set broadcast rank. (Requires #)', False
else:
return '{rank} is not a valid rank'.format(rank = msg), False
elif cmd == 'whitelist':
if canSee(self, user):
if self.details['whitelist']:
return ', '.join(self.details['whitelist']), False
else:
return 'Whitelist is empty :(', False
else:
return 'You are not allowed to see the whitelist :l (Requires %)', False
elif cmd in ['whitelistuser', 'wluser']:
if canAddUser(self, user):
msg = re.sub(r'[^a-zA-z0-9]', '', msg)
if msg in self.details['whitelist']:
return '{user} is already whitelisted'.format(user = msg), True
self.details['whitelist'].append(msg)
return 'User {usr} added to whitelist.'.format(usr = msg), True
elif cmd == 'removewl':
if canAddUser(self, user):
msg = re.sub(r'[^a-zA-z0-9]', '', msg)
if msg not in self.details['whitelist']:
return '{user} is not whitelisted'.format(user = msg), True
self.details['whitelist'].remove(msg)
return 'User {usr} removed from the whitelist.'.format(usr = msg), True
elif cmd == 'moderate':
if not msg:
return 'No parameters given. Command is ~moderate [room],True/False', False
else:
if canChange(self, user):
things = removeSpaces(msg).split(',')
if not len(things) == 2:
return 'Too few/many parameters given. Command is ~moderate [room],True/False', False
if things[0] in self.details['rooms']:
if things[1] in ['True', 'true']:
self.details['rooms'][things[0]].moderate = True
return '{room} will now be moderated'.format(room = things[0]), False
elif things[1] in ['False', 'false']:
self.details['rooms'][things[0]].moderate = False
return '{room} will not be moderated anymore'.format(room = things[0]), False
else:
return 'You cannot set moderation in a room without me in it.', False
else:
return 'You do not have permission to set this. (Requires #)', False
# Autobans
elif cmd in ['banuser', 'banphrase']:
if canAddUser(self, user):
error = addBan(cmd[3:], room, msg)
if not error:
return 'Added {thing} to the banlist'.format(thing = msg), True
else:
return error, True
else:
return 'You do not have permission to do this. (Requires #)', False
elif cmd in ['unbanuser', 'unbanphrase']:
if canAddUser(self, user):
error = removeBan(cmd[5:], room, msg)
if not error:
return 'Removed {thing} from banlist'.format(thing = msg), True
else:
return error, True
else:
return 'You do not have permission to do this. (Requires #)', False
elif cmd == 'allowgames':
if canChange(self, user):
msg = removeSpaces(msg)
things = msg.split(',')
if len(things) == 2:
if things[0] in self.details['rooms']:
if things[1] in ['true','yes','y','True']:
if not self.details['rooms'][things[0]].allowGames:
self.details['rooms'][things[0]].allowGames = True
return 'Chatgames are now allowed in {room}'.format(room = things[0]), True
else:
return 'Chatgames are already allowed in {room}'.format(room = things[0]), True
elif things[1] in ['false', 'no', 'n',' False']:
self.details['rooms'][things[0]].allowGames = False
return 'Chatgames are now not allowed in {room}'.format(room = things[0]), True
else:
return '{param} is not a supported parameter'.format(param = things[1]), True
else:
return 'Cannot allow chatgames without being in the room', True
else:
return 'Too few/many parameters. Command is ~allowgames [room],True/False', False
else:
return 'You do not have permission to change this. (Requires #)', False
# Informational commands
elif cmd in Links:
if msg in Links[cmd]:
return Links[cmd][msg], True
else:
return '{tier} is not a supported format for {command}'.format(tier = msg, command = cmd), True
elif cmd == 'team':
if msg not in Teams:
return 'Unsupported format', True
return Teams[msg][randint(0, len(Teams[msg])-1)], True
elif cmd == 'usage':
return usageLink, True
# Offline messages
elif cmd == 'tell':
if not isWhitelisted(self, user): return "You don't have the permission to use this feature.", False
if not msg: return 'You need to specify a user and a message to send in the format: [user], [message]', False
msg = msg.split(',')
to = re.sub(r'[^a-zA-z0-9]', '', msg[0]).lower()
if self.usernotes.alreadySentMessage(to, user['unform']):
return 'You already have a message to this user waiting', False
if len(msg[1].lstrip()) > 150:
return 'Message is too long. Max limit is 150 characters', False
self.usernotes.addMessage(to, user['unform'], msg[1].lstrip())
return "I'll be sure to tell them that.", True
# Fun stuff
elif cmd == 'pick':
options = msg.split(',')
return options[randint(0,(len(options)-1))], True
elif cmd == 'ask':
return Lines[randint(0, len(Lines)-1)], True
elif cmd in YoutubeLinks:
return YoutubeLinks[cmd], True
elif cmd == 'squid':
if msg:
if msg.isdecimal():
nr = float(msg)
if 0 < nr <= 10:
return '\u304f\u30b3\u003a\u5f61' * int(nr), True
else:
return 'Can only use whole numbers between 1 and 10', True
else:
return 'Invalid parameter given. Accepting whole numbers between 1 and 10.', True
else:
return '\u304f\u30b3\u003a\u5f61', True
elif cmd == 'joke':
if randint(0, 1) and self.Groups[user['group']] >= self.Groups['+']:
return user['unform'], True
else:
return getJoke(), True
elif cmd in tiers:
pick = list(tiers[cmd])[randint(0,len(tiers[cmd])-1)]
pNoForm = re.sub('-(?:Mega(?:-(X|Y))?|Primal)','', pick).lower()
return '{poke} was chosen: http://www.smogon.com/dex/xy/pokemon/{mon}/'.format(poke = pick, mon = pNoForm), True
elif cmd in [t.replace('poke','team') for t in tiers]:
team = set()
attempts = 0
while len(team) < 6 or not acceptableWeakness(team):
poke = list(tiers[cmd.replace('team','poke')])[randint(0,len(tiers[cmd.replace('team','poke')])-1)]
# Test if share dex number with anything in the team
if [p for p in team if Pokedex[poke]['dex'] == Pokedex[p]['dex']]:
continue
if [p for p in team if '-Mega' in p] and '-Mega' in poke:
continue
team |= {poke}
if not acceptableWeakness(team):
team -= {poke}
if len(team) >= 6:
break
attempts += 1
if attempts >= 100:
# Prevents locking up if a pokemon turns the team to an impossible genration
# Since the team is probably bad anyway, just finish it and exit
while len(team) < 6:
team |= {list(tiers[cmd.replace('team','poke')])[randint(0,len(tiers[cmd.replace('team','poke')])-1)]}
break
return ' / '.join(list(team)), True
# Workshop is not a hangman game, but uses the allowed slot for a game anyway
# Workshops also doesn't follow the chatgames rule, as they're not chat games
elif cmd == 'workshop':
if not isGameType(self.details['rooms'][room].game, Workshop):
if msg.startswith('new') and canStartGame(self, user):
self.details['rooms'][room].game = Workshop(re.sub(r'[^a-zA-z0-9]', '', msg[len('new '):] if msg[len('new '):] else user['name']).lower())
return 'A new workshop session was created', True
else:
return 'No active workshop right now', True
workshop = self.details['rooms'][room].game
if msg.startswith('add'):
if not user['name'] == workshop.host and not canStartGame(self, user):
return 'Only the workshop host or a Room Moderator can add Pokemon', True
return workshop.addPokemon(msg[len('add '):]), True
elif msg.startswith('remove'):
if not user['name'] == workshop.host and not canStartGame(self, user):
return 'Only the workshop host or a Room Moderator can remove Pokemon', True
return workshop.removePokemon(msg[len('remove '):]), True
elif msg == 'clear':
if not user['name'] == workshop.host and not canStartGame(self, user):
return 'Only the workshop host or a Room Moderator can clear the team', True
return workshop.clearTeam(), True
elif msg == 'team':
return workshop.getTeam(), True
elif msg == 'end':
if not user['name'] == workshop.host and not canStartGame(self, user):
return 'Only the workshop host or a Room Moderator can end the workshop', True
self.details['rooms'][room].game = None
return 'Workshop session ended', True
# Chat games go here
# Hangman
elif cmd == 'hangman':
msg = msg.strip().split(',')
if 'end' in msg[0] and canStartGame(self, user) and isGameType(self.details['rooms'][room].game, Hangman):
phrase = self.details['rooms'][room].game.getSolution()
self.details['rooms'][room].game = None
return 'The hangman game was forcefully ended by {baduser}. (Killjoy)\nThe solution was: **{solved}**'.format(baduser = user['unform'], solved = phrase), True
elif 'new' in msg[0]: # ~hangman new,room,[phrase]
if canStartGame(self, user):
if self.details['rooms'][room].game:
return 'A game is already running in this room', False
phrase = re.sub(r'[^a-zA-Z0-9 ]', '', re.sub(r'\s{2,}', ' ', msg[2].strip()))
if not phrase.strip():
return 'You can only have letters, numbers or spaces in the phrase', False
if len(removeSpaces(phrase)) <= 1:
return 'The phrase must be at least two characters long', False
self.details['rooms'][room].game = Hangman(phrase)
return 'A new game of hangman has begun:\n' + self.details['rooms'][room].game.printCurGame(), True
else:
return 'You do not have permission to start a game in this room. (Requires %)', False
else:
return 'To start a new hangman game: ~hangman new,[room],[phrase]', True
elif cmd == 'hg':
if isGameType(self.details['rooms'][room].game, Hangman):
if len(removeSpaces(msg)) == 1:
return self.details['rooms'][room].game.guessLetter(msg.replace(' ','').lower()), True
else:
if not msg.lstrip():
return "You can't guess nothing", True
if self.details['rooms'][room].game.guessPhrase(msg.lstrip()):
solved = self.details['rooms'][room].game.getFormatedPhrase()
self.details['rooms'][room].game = None
return 'Congratulations {name}. You won!\nThe phrase was: {phrase}'.format(name = user['unform'], phrase = solved), True
else:
return '{test} is wrong!'.format(test = msg.lstrip()), True
else:
return 'There is no hangman game in progress right now', True
# Anagrams of Pokemon names
elif cmd == 'anagram':
if msg == 'new':
if canStartGame(self, user):
if self.details['rooms'][room].game:
return 'A game is already running somewhere', False
else:
self.details['rooms'][room].game = Anagram()
return 'A new anagram has been created:\n' + self.details['rooms'][room].game.getWord(), True
else:
return 'You do not have permission to start a game in this room. (Requires %)', False
elif msg == 'hint':
if self.details['rooms'][room].game:
return 'The hint is: ' + self.details['rooms'][room].game.getHint(), True
else:
return 'There is no active anagram right now', False
elif msg == 'end':
if canStartGame(self, user):
if isGameType(self.details['rooms'][room].game, Anagram):
solved = self.details['rooms'][room].game.getSolvedWord()
self.details['rooms'][room].game = None
return 'The anagram was forcefully ended by {baduser}. (Killjoy)\nThe solution was: **{solved}**'.format(baduser = user['unform'], solved = solved), True
else:
return 'There is no active anagram or a different game is active.', False
else:
return 'You do not have permission to end the anagram. (Requires %)', True
elif msg.startswith('score'):
if msg.strip() == 'score':
return 'No name was given', True
name = re.sub(r'[^a-zA-z0-9]', '', msg[len('score '):]).lower()
if name not in Scoreboard:
return "This user never won any anagrams", True
return 'This user has won {number} anagram{plural}'.format(number = Scoreboard[name], plural = '' if not type(Scoreboard[name]) == str and Scoreboard[name] < 2 else 's'), True
else:
if not msg:
if isGameType(self.details['rooms'][room].game, Anagram):
return 'Current anagram: {word}'.format(word = self.details['rooms'][room].game.getWord()), True
else:
return 'There is no active anagram right now', False
return '{param} is not a valid parameter for ~anagram. Make guesses with ~a'.format(param = msg), False
elif cmd == 'a':
game = self.details['rooms'][room].game
if isGameType(game, Anagram):
if game.isCorrect(re.sub(r'[ -]', '', msg).lower()):
solved = game.getSolvedWord()
timeTaken = game.getSolveTimeStr()
self.details['rooms'][room].game = None
# Save score
Scoreboard[user['name']] = 1 if user['name'] not in Scoreboard else Scoreboard[user['name']] + 1
with open('plugins/scoreboard.yaml', 'w') as ym:
yaml.dump(Scoreboard, ym)
return 'Congratulations, {name} got it{time}\nThe solution was: {solution}'.format(name = user['unform'], time = timeTaken, solution = solved), True
else:
return '{test} is wrong!'.format(test = msg.lstrip()), True
else:
return 'There is no anagram active right now', True
# Trivia
elif cmd == 'trivia':
if msg:
params = removeSpaces(msg).split(',')
if params[0] in ['start', 'begin']:
kind = 'first'
if len(params) > 1:
kind = params[1]
if canStartTrivia(self, user):
self.details['rooms'][room].game = Trivia(self.ws, room, kind)
return 'A new trivia session has started.', True
else:
return 'You do not have permission to set up a trivia session', False
elif params[0] in ['stop', 'end']:
# The trivia class will solve everything after doing this.
self.details['rooms'][room].game.endSession = True
self.details['rooms'][room].game = None
return 'The trivia session has been ended', True
return '{msg} is not an valid parameter for trivia', False
elif cmd == 'ta':
game = self.details['rooms'][room].game
if isGameType(game, Trivia):
# Don't give information if wrong or right here, let Trivia deal with that
if game.tryAnswer(msg):
if not game.solver:
game.wasSolved(user['unform'])
else:
game.multiple = True
return 'NoAnswer', False
else:
return 'There is no ongoing trivia session.', True
# Commands with awful conditions last
elif cmd in formats:
return 'Format: http://www.smogon.com/dex/xy/formats/{tier}/'.format(tier = cmd), True
# This command is here because it's an awful condition, so try it last :/
elif [p for p in Pokedex if re.sub('-(?:mega(?:-(x|y))?|primal|xl|l)','', cmd, flags=re.I) in p.replace(' ','').lower()]:
cmd = re.sub('-(?:mega(?:-(x|y))?|primal)','', cmd)
substitutes = {'gourgeist-s':'gourgeist-small', # This doesn't break Arceus-Steel like adding |S to the regex would
'gourgeist-l':'gourgeist-large', # and gourgeist-s /pumpkaboo-s still get found, because it matches the
'gourgeist-xl':'gourgeist-super', # entry for gougeist/pumpkaboo-super
'pumpkaboo-s':'pumpkaboo-small',
'pumpkaboo-l':'pumpkaboo-large',
'pumpkaboo-xl':'pumpkaboo-super',
'giratina-o':'giratina-origin',
'mr.mime':'mr_mime',
'mimejr.':'mime_jr'}
if cmd.lower() not in (removeSpaces(p).lower() for p in Pokedex):
return '{cmd} is not a valid command'.format(cmd = cmd),True
if cmd in substitutes:
cmd = substitutes[cmd]
return 'Analysis: http://www.smogon.com/dex/xy/pokemon/{mon}/'.format(mon = cmd), True
else:
return False, False
def URL(): return 'https://github.com/QuiteQuiet/PokemonShowdownBot/'
def removeSpaces(text):
return text.replace(' ','')
# Permission settings for different things
# These can't be changed during operation, compared to the general permission
def isMaster(self, user):
return user['name'] == self.details['master']
def isWhitelisted(self, user):
return user['name'] == self.details['master'] or user['name'] in self.details['whitelist']
def canSee(self, user):
return user['name'] == self.details['master'] or self.Groups[user['group']] >= self.Groups['%']
def canChange(self, user):
return user['name'] == self.details['master'] or self.Groups[user['group']] >= self.Groups['#']
def canAddUser(self, user):
return user['name'] == self.details['master'] or self.Groups[user['group']] >= self.Groups['#']
def canStartGame(self, user):
return user['name'] == self.details['master'] or self.Groups[user['group']] >= self.Groups['%']
def canStartTrivia(self, user):
return user['name'] == self.details['master'] or self.Groups[user['group']] >= self.Groups['@']
def isGameType(running, gameType):
return type(running) == gameType
def acceptableWeakness(team):
if not team: return False
comp = {t:{'weak':0,'res':0} for t in Types}
for poke in team:
types = Pokedex[poke]['types']
if len(types) > 1:
for matchup in Types:
eff = Types[types[0]][matchup] * Types[types[1]][matchup]
if eff > 1:
comp[matchup]['weak'] += 1
elif eff < 1:
comp[matchup]['res'] += 1
else:
for matchup in Types:
if Types[types[0]][matchup] > 1:
comp[matchup]['weak'] += 1
elif Types[types[0]][matchup] < 1:
comp[matchup]['res'] += 1
for t in comp:
if comp[t]['weak'] >= 3:
return False
if comp[t]['weak'] >= 2 and comp[t]['res'] <= 1:
return False
return True
def saveDetails(self):
details = {k:v for k,v in self.details.items() if not k == 'rooms' and not k == 'joinRooms'}
details['joinRooms'] = []
for e in self.details['rooms']:
details['joinRooms'].append({e:{'moderate':self.details['rooms'][e].moderate,
'allow games':self.details['rooms'][e].allowGames}})
details['rooms'] = {}
with open('details.yaml', 'w') as yf:
yaml.dump(details, yf, default_flow_style = False)
def getJoke():
people = ['Disjunction','Aladyyn','boltsandbombers','Can-Eh-Dian','Deej Dy','innovamania','Kiyo','Marilli','Montsegur','Pokedots','Punchshroom','Queen of Luvdiscs','rw','Scorpdestroyer','silver Aurum','Sir Kay','tennis','Blast Chance','HJAD','shaneghoul','Soulgazer','Allstar124','Blaziken1337','Dentricos','Finchinator','flcl','GyRro','hootie','Jarii','Less Than Three Man','Marikeinen','Metaphysical','Not Nova','Nozzle','orphic','Raptures Finest','rozes','Sweet Jesus','Syncrasy','Vileman',"Winter's Howl"]
return people[randint(0, len(people)-1)]
|
# -*- mode: python; coding: utf-8 -*-
# Copyright (c) 2020 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
"""Tests for Mir class.
Performs a series of test for the Mir class, which inherits from UVData. Note that
there is a separate test module for the MirParser class (mir_parser.py), which is
what is used to read the raw binary data into something that the Mir class can
manipulate into a UVData object.
"""
import os
import pytest
import numpy as np
from ... import UVData
from ...data import DATA_PATH
from ...uvdata.mir import mir_parser
@pytest.fixture
def mir_data_object():
testfile = os.path.join(DATA_PATH, "sma_test.mir")
mir_data = mir_parser.MirParser(
testfile, load_vis=True, load_raw=True, load_auto=True,
)
yield mir_data
# cleanup
del mir_data
@pytest.fixture
def uv_in_uvfits(tmp_path):
uv_in = UVData()
testfile = os.path.join(DATA_PATH, "sma_test.mir")
write_file = os.path.join(tmp_path, "outtest_mir.uvfits")
# Currently only one source is supported.
uv_in.read(testfile, pseudo_cont=True)
uv_out = UVData()
yield uv_in, uv_out, write_file
# cleanup
del uv_in, uv_out
@pytest.fixture
def uv_in_uvh5(tmp_path):
uv_in = UVData()
testfile = os.path.join(DATA_PATH, "sma_test.mir")
write_file = os.path.join(tmp_path, "outtest_mir.uvh5")
# Currently only one source is supported.
uv_in.read(testfile)
uv_out = UVData()
yield uv_in, uv_out, write_file
# cleanup
del uv_in, uv_out
@pytest.mark.filterwarnings("ignore:LST values stored in this file are not ")
@pytest.mark.parametrize("future_shapes", [True, False])
def test_read_mir_write_uvfits(uv_in_uvfits, future_shapes):
"""
Mir to uvfits loopback test.
Read in Mir files, write out as uvfits, read back in and check for
object equality.
"""
mir_uv, uvfits_uv, testfile = uv_in_uvfits
if future_shapes:
mir_uv.use_future_array_shapes()
mir_uv.write_uvfits(testfile, spoof_nonessential=True)
uvfits_uv.read_uvfits(testfile)
if future_shapes:
uvfits_uv.use_future_array_shapes()
# UVFITS doesn't allow for numbering of spectral windows like MIR does, so
# we need an extra bit of handling here
assert len(np.unique(mir_uv.spw_array)) == len(np.unique(uvfits_uv.spw_array))
spw_dict = {idx: jdx for idx, jdx in zip(uvfits_uv.spw_array, mir_uv.spw_array)}
assert np.all(
[
idx == spw_dict[jdx]
for idx, jdx in zip(mir_uv.flex_spw_id_array, uvfits_uv.flex_spw_id_array,)
]
)
# Now that we've checked, set this things as equivalent
uvfits_uv.spw_array = mir_uv.spw_array
uvfits_uv.flex_spw_id_array = mir_uv.flex_spw_id_array
# Check the history first via find
assert 0 == uvfits_uv.history.find(
mir_uv.history + " Read/written with pyuvdata version:"
)
mir_uv.history = uvfits_uv.history
assert mir_uv == uvfits_uv
@pytest.mark.filterwarnings("ignore:LST values stored ")
def test_read_mir_write_uvh5(uv_in_uvh5):
"""
Mir to uvfits loopback test.
Read in Mir files, write out as uvfits, read back in and check for
object equality.
"""
mir_uv, uvh5_uv, testfile = uv_in_uvh5
mir_uv.write_uvh5(testfile)
uvh5_uv.read_uvh5(testfile)
# Check the history first via find
assert 0 == uvh5_uv.history.find(
mir_uv.history + " Read/written with pyuvdata version:"
)
# test fails because of updated history, so this is our workaround for now.
mir_uv.history = uvh5_uv.history
assert mir_uv == uvh5_uv
def test_write_mir(uv_in_uvfits, err_type=NotImplementedError):
"""
Mir writer test
Check and make sure that attempts to use the writer return a
'not implemented; error.
"""
mir_uv, uvfits_uv, testfile = uv_in_uvfits
# Check and see if the correct error is raised
with pytest.raises(err_type):
mir_uv.write_mir("dummy.mir")
@pytest.mark.filterwarnings("ignore:LST values stored in this file are not ")
@pytest.mark.parametrize("future_shapes", [True, False])
def test_flex_spw_read(tmp_path, future_shapes):
"""
Mir test for flexible spws.
Read in Mir files using flexible spectral windows, all of the same nchan
"""
testfile = os.path.join(DATA_PATH, "sma_test.mir")
uv_in = UVData()
uv_in.read_mir(testfile)
dummyfile = os.path.join(tmp_path, "dummy.mirtest.uvfits")
if future_shapes:
uv_in.use_future_array_shapes()
uv_in2 = uv_in.copy()
with pytest.raises(NotImplementedError):
uv_in2.frequency_average(2)
uv_in2.flex_spw_id_array[0] = 1
with pytest.raises(ValueError):
uv_in2._check_flex_spw_contiguous()
uv_in2 = uv_in.copy()
uv_in2.channel_width[0] = 0
with pytest.raises(ValueError, match="The frequencies are not evenly spaced"):
uv_in2._check_freq_spacing()
uv_in2 = uv_in.copy()
uv_in2.channel_width[:] = 0
with pytest.raises(ValueError, match="The frequencies are separated by more"):
uv_in2._check_freq_spacing()
uv_in2 = uv_in.copy()
uv_in2.freq_array *= -1
with pytest.raises(ValueError, match="Frequency values must be > 0 for UVFITS!"):
uv_in2.write_uvfits(dummyfile, spoof_nonessential=True)
uv_in2 = uv_in.copy()
uv_in2.freq_array[:] = 1
uv_in2.channel_width *= 0
with pytest.raises(ValueError, match="Something is wrong, frequency values not"):
uv_in2.write_uvfits(dummyfile, spoof_nonessential=True)
# Move on to testing fast-concat and add methods
uv_in2 = uv_in.copy()
uv_in2.select(freq_chans=np.where(uv_in2.flex_spw_id_array < 0))
uv_in3 = uv_in.copy()
uv_in3.select(freq_chans=np.where(uv_in3.flex_spw_id_array > 0))
uv_in4 = uv_in2.fast_concat(uv_in3, axis="freq")
# Check the history first via find
assert 0 == uv_in4.history.find(
uv_in.history + " Downselected to specific frequencies using pyuvdata. "
"Combined data along frequency axis using pyuvdata."
)
uv_in4.history = uv_in.history
assert uv_in == uv_in4
# Flipped order is intentional here, since the operation should work
# irrespective of add order, following the sort below
uv_in4 = uv_in3 + uv_in2
assert 0 == uv_in4.history.find(
uv_in.history + " Downselected to specific frequencies using pyuvdata. "
"Combined data along frequency axis using pyuvdata."
)
uv_in4.history = uv_in.history
# Need to perform a sort here, since the default ordering on add isn't identical
# to the read order for MIR files.
uv_in4.reorder_freqs(spw_order=uv_in.spw_array)
assert uv_in == uv_in4
def test_multi_nchan_spw_read(tmp_path):
"""
Mir to uvfits error test for spws of different sizes.
Read in Mir files, write out as uvfits, read back in and check for
object equality.
"""
testfile = os.path.join(DATA_PATH, "sma_test.mir")
uv_in = UVData()
uv_in.read_mir(testfile, corrchunk=[0, 1, 2, 3, 4])
dummyfile = os.path.join(tmp_path, "dummy.mirtest.uvfits")
with pytest.raises(IndexError):
uv_in.write_uvfits(dummyfile, spoof_nonessential=True)
def test_read_mir_no_records():
"""
Mir no-records check
Make sure that mir correctly handles the case where no matching records are found
"""
testfile = os.path.join(DATA_PATH, "sma_test.mir")
uv_in = UVData()
with pytest.raises(IndexError, match="No valid records matching those selections!"):
uv_in.read_mir(testfile, isource=-1)
with pytest.raises(IndexError, match="No valid sidebands selected!"):
uv_in.read_mir(testfile, isb=[])
with pytest.raises(IndexError, match="isb values contain invalid entries"):
uv_in.read_mir(testfile, isb=[-156])
def test_mir_auto_read(
err_type=IndexError, err_msg="Could not determine auto-correlation record size!"
):
"""
Mir read tester
Make sure that Mir autocorrelations are read correctly
"""
testfile = os.path.join(DATA_PATH, "sma_test.mir")
mir_data = mir_parser.MirParser(testfile)
with pytest.raises(err_type, match=err_msg):
ac_data = mir_data.scan_auto_data(testfile, nchunks=999)
ac_data = mir_data.scan_auto_data(testfile)
assert np.all(ac_data["nchunks"] == 8)
mir_data.load_data(load_vis=False, load_auto=True)
# Select the relevant auto records, which should be for spwin 0-3
auto_data = mir_data.read_auto_data(testfile, ac_data)[:, 0:4, :, :]
assert np.all(
np.logical_or(
auto_data == mir_data.auto_data,
np.logical_and(np.isnan(auto_data), np.isnan(mir_data.auto_data)),
)
)
mir_data.unload_data()
# Below are a series of checks that are designed to check to make sure that the
# MirParser class is able to produce consistent values from an engineering data
# set (originally stored in /data/engineering/mir_data/200724_16:35:14), to make
# sure that we haven't broken the ability of the reader to handle the data. Since
# this file is the basis for the above checks, we've put this here rather than in
# test_mir_parser.py
def test_mir_remember_me_record_lengths(mir_data_object):
"""
Mir record length checker
Make sure the test file containts the right number of records
"""
mir_data = mir_data_object
# Check to make sure we've got the right number of records everywhere
assert len(mir_data.ac_read) == 2
assert len(mir_data.bl_read) == 4
assert len(mir_data.codes_read) == 99
assert len(mir_data.eng_read) == 2
assert len(mir_data.in_read) == 1
assert len(mir_data.raw_data) == 20
assert len(mir_data.raw_scale_fac) == 20
assert len(mir_data.sp_read) == 20
assert len(mir_data.vis_data) == 20
assert len(mir_data.we_read) == 1
def test_mir_remember_me_codes_read(mir_data_object):
"""
Mir codes_read checker.
Make sure that certain values in the codes_read file of the test data set match
whatwe know to be 'true' at the time of observations.
"""
mir_data = mir_data_object
assert mir_data.codes_read[0][0] == b"filever"
assert mir_data.codes_read[0][2] == b"3"
assert mir_data.codes_read[90][0] == b"ref_time"
assert mir_data.codes_read[90][1] == 0
assert mir_data.codes_read[90][2] == b"Jul 24, 2020"
assert mir_data.codes_read[90][3] == 0
assert mir_data.codes_read[91][0] == b"ut"
assert mir_data.codes_read[91][1] == 1
assert mir_data.codes_read[91][2] == b"Jul 24 2020 4:34:39.00PM"
assert mir_data.codes_read[91][3] == 0
assert mir_data.codes_read[93][0] == b"source"
assert mir_data.codes_read[93][2] == b"3c84"
assert mir_data.codes_read[97][0] == b"ra"
assert mir_data.codes_read[97][2] == b"03:19:48.15"
assert mir_data.codes_read[98][0] == b"dec"
assert mir_data.codes_read[98][2] == b"+41:30:42.1"
def test_mir_remember_me_in_read(mir_data_object):
"""
Mir in_read checker.
Make sure that certain values in the in_read file of the test data set match what
we know to be 'true' at the time of observations, including that spare values are
stored as zero.
"""
mir_data = mir_data_object
# Check to make sure that things seem right in in_read
assert np.all(mir_data.in_read["traid"] == 484)
assert np.all(mir_data.in_read["proid"] == 484)
assert np.all(mir_data.in_read["inhid"] == 1)
assert np.all(mir_data.in_read["ints"] == 1)
assert np.all(mir_data.in_read["souid"] == 1)
assert np.all(mir_data.in_read["isource"] == 1)
assert np.all(mir_data.in_read["ivrad"] == 1)
assert np.all(mir_data.in_read["ira"] == 1)
assert np.all(mir_data.in_read["idec"] == 1)
assert np.all(mir_data.in_read["epoch"] == 2000.0)
assert np.all(mir_data.in_read["tile"] == 0)
assert np.all(mir_data.in_read["obsflag"] == 0)
assert np.all(mir_data.in_read["obsmode"] == 0)
assert np.all(np.round(mir_data.in_read["mjd"]) == 59055)
assert np.all(mir_data.in_read["spareshort"] == 0)
assert np.all(mir_data.in_read["spareint6"] == 0)
def test_mir_remember_me_bl_read(mir_data_object):
"""
Mir bl_read checker.
Make sure that certain values in the bl_read file of the test data set match what
we know to be 'true' at the time of observations, including that spare values are
stored as zero.
"""
mir_data = mir_data_object
# Now check bl_read
assert np.all(mir_data.bl_read["blhid"] == np.arange(1, 5))
assert np.all(mir_data.bl_read["isb"] == [0, 0, 1, 1])
assert np.all(mir_data.bl_read["ipol"] == [0, 0, 0, 0])
assert np.all(mir_data.bl_read["ant1rx"] == [0, 1, 0, 1])
assert np.all(mir_data.bl_read["ant2rx"] == [0, 1, 0, 1])
assert np.all(mir_data.bl_read["pointing"] == 0)
assert np.all(mir_data.bl_read["irec"] == [0, 3, 0, 3])
assert np.all(mir_data.bl_read["iant1"] == 1)
assert np.all(mir_data.bl_read["iant2"] == 4)
assert np.all(mir_data.bl_read["iblcd"] == 2)
assert np.all(mir_data.bl_read["spareint1"] == 0)
assert np.all(mir_data.bl_read["spareint2"] == 0)
assert np.all(mir_data.bl_read["spareint3"] == 0)
assert np.all(mir_data.bl_read["spareint4"] == 0)
assert np.all(mir_data.bl_read["spareint5"] == 0)
assert np.all(mir_data.bl_read["spareint6"] == 0)
assert np.all(mir_data.bl_read["sparedbl3"] == 0.0)
assert np.all(mir_data.bl_read["sparedbl4"] == 0.0)
assert np.all(mir_data.bl_read["sparedbl5"] == 0.0)
assert np.all(mir_data.bl_read["sparedbl6"] == 0.0)
def test_mir_remember_me_eng_read(mir_data_object):
"""
Mir bl_read checker.
Make sure that certain values in the eng_read file of the test data set match what
we know to be 'true' at the time of observations.
"""
mir_data = mir_data_object
# Now check eng_read
assert np.all(mir_data.eng_read["antennaNumber"] == [1, 4])
assert np.all(mir_data.eng_read["padNumber"] == [5, 8])
assert np.all(mir_data.eng_read["trackStatus"] == 1)
assert np.all(mir_data.eng_read["commStatus"] == 1)
assert np.all(mir_data.eng_read["inhid"] == 1)
def test_mir_remember_me_ac_read(mir_data_object):
"""
Mir bl_read checker.
Make sure that certain values in the autoCorrelations file of the test data set
match what we know to be 'true' at the time of observations.
"""
mir_data = mir_data_object
# Now check ac_read
assert np.all(mir_data.ac_read["inhid"] == 1)
assert np.all(mir_data.ac_read["achid"] == np.arange(1, 3))
assert np.all(mir_data.ac_read["antenna"] == [1, 4])
assert np.all(mir_data.ac_read["nchunks"] == 8)
assert np.all(mir_data.ac_read["datasize"] == 1048596)
assert np.all(mir_data.we_read["scanNumber"] == 1)
assert np.all(mir_data.we_read["flags"] == 0)
def test_mir_remember_me_sp_read(mir_data_object):
"""
Mir sp_read checker.
Make sure that certain values in the sp_read file of the test data set match what
we know to be 'true' at the time of observations, including that spare values are
stored as zero.
"""
mir_data = mir_data_object
# Now check sp_read
assert np.all(mir_data.sp_read["sphid"] == np.arange(1, 21))
assert np.all(mir_data.sp_read["sphid"] == np.arange(1, 21))
assert np.all(mir_data.sp_read["igq"] == 0)
assert np.all(mir_data.sp_read["ipq"] == 1)
assert np.all(mir_data.sp_read["igq"] == 0)
assert np.all(mir_data.sp_read["iband"] == [0, 1, 2, 3, 4] * 4)
assert np.all(mir_data.sp_read["ipstate"] == 0)
assert np.all(mir_data.sp_read["tau0"] == 0.0)
assert np.all(mir_data.sp_read["cabinLO"] == 0.0)
assert np.all(mir_data.sp_read["corrLO1"] == 0.0)
assert np.all(mir_data.sp_read["vradcat"] == 0.0)
assert np.all(mir_data.sp_read["nch"] == [4, 16384, 16384, 16384, 16384] * 4)
assert np.all(mir_data.sp_read["corrblock"] == [0, 1, 1, 1, 1] * 4)
assert np.all(mir_data.sp_read["corrchunk"] == [0, 1, 2, 3, 4] * 4)
assert np.all(mir_data.sp_read["correlator"] == 1)
assert np.all(mir_data.sp_read["spareint2"] == 0)
assert np.all(mir_data.sp_read["spareint3"] == 0)
assert np.all(mir_data.sp_read["spareint4"] == 0)
assert np.all(mir_data.sp_read["spareint5"] == 0)
assert np.all(mir_data.sp_read["spareint6"] == 0)
assert np.all(mir_data.sp_read["sparedbl1"] == 0.0)
assert np.all(mir_data.sp_read["sparedbl2"] == 0.0)
assert np.all(mir_data.sp_read["sparedbl3"] == 0.0)
assert np.all(mir_data.sp_read["sparedbl4"] == 0.0)
assert np.all(mir_data.sp_read["sparedbl5"] == 0.0)
assert np.all(mir_data.sp_read["sparedbl6"] == 0.0)
def test_mir_remember_me_sch_read(mir_data_object):
"""
Mir sch_read checker.
Make sure that certain values in the sch_read file of the test data set match what
we know to be 'true' at the time of observations.
"""
mir_data = mir_data_object
# Now check sch_read related values. Thanks to a glitch in the data recorder,
# all of the pseudo-cont values are the same
assert np.all(mir_data.raw_scale_fac[0::5] == [-26] * 4)
assert (
np.array(mir_data.raw_data[0::5]).flatten().tolist()
== [-4302, -20291, -5261, -21128, -4192, -19634, -4999, -16346] * 4
)
Adding further test coverage for flex_spw with add operations
# -*- mode: python; coding: utf-8 -*-
# Copyright (c) 2020 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
"""Tests for Mir class.
Performs a series of test for the Mir class, which inherits from UVData. Note that
there is a separate test module for the MirParser class (mir_parser.py), which is
what is used to read the raw binary data into something that the Mir class can
manipulate into a UVData object.
"""
import os
import pytest
import numpy as np
from ... import UVData
from ...data import DATA_PATH
from ...uvdata.mir import mir_parser
@pytest.fixture
def mir_data_object():
testfile = os.path.join(DATA_PATH, "sma_test.mir")
mir_data = mir_parser.MirParser(
testfile, load_vis=True, load_raw=True, load_auto=True,
)
yield mir_data
# cleanup
del mir_data
@pytest.fixture
def uv_in_uvfits(tmp_path):
uv_in = UVData()
testfile = os.path.join(DATA_PATH, "sma_test.mir")
write_file = os.path.join(tmp_path, "outtest_mir.uvfits")
# Currently only one source is supported.
uv_in.read(testfile, pseudo_cont=True)
uv_out = UVData()
yield uv_in, uv_out, write_file
# cleanup
del uv_in, uv_out
@pytest.fixture
def uv_in_uvh5(tmp_path):
uv_in = UVData()
testfile = os.path.join(DATA_PATH, "sma_test.mir")
write_file = os.path.join(tmp_path, "outtest_mir.uvh5")
# Currently only one source is supported.
uv_in.read(testfile)
uv_out = UVData()
yield uv_in, uv_out, write_file
# cleanup
del uv_in, uv_out
@pytest.mark.filterwarnings("ignore:LST values stored in this file are not ")
@pytest.mark.parametrize("future_shapes", [True, False])
def test_read_mir_write_uvfits(uv_in_uvfits, future_shapes):
"""
Mir to uvfits loopback test.
Read in Mir files, write out as uvfits, read back in and check for
object equality.
"""
mir_uv, uvfits_uv, testfile = uv_in_uvfits
if future_shapes:
mir_uv.use_future_array_shapes()
mir_uv.write_uvfits(testfile, spoof_nonessential=True)
uvfits_uv.read_uvfits(testfile)
if future_shapes:
uvfits_uv.use_future_array_shapes()
# UVFITS doesn't allow for numbering of spectral windows like MIR does, so
# we need an extra bit of handling here
assert len(np.unique(mir_uv.spw_array)) == len(np.unique(uvfits_uv.spw_array))
spw_dict = {idx: jdx for idx, jdx in zip(uvfits_uv.spw_array, mir_uv.spw_array)}
assert np.all(
[
idx == spw_dict[jdx]
for idx, jdx in zip(mir_uv.flex_spw_id_array, uvfits_uv.flex_spw_id_array,)
]
)
# Now that we've checked, set this things as equivalent
uvfits_uv.spw_array = mir_uv.spw_array
uvfits_uv.flex_spw_id_array = mir_uv.flex_spw_id_array
# Check the history first via find
assert 0 == uvfits_uv.history.find(
mir_uv.history + " Read/written with pyuvdata version:"
)
mir_uv.history = uvfits_uv.history
assert mir_uv == uvfits_uv
@pytest.mark.filterwarnings("ignore:LST values stored ")
def test_read_mir_write_uvh5(uv_in_uvh5):
"""
Mir to uvfits loopback test.
Read in Mir files, write out as uvfits, read back in and check for
object equality.
"""
mir_uv, uvh5_uv, testfile = uv_in_uvh5
mir_uv.write_uvh5(testfile)
uvh5_uv.read_uvh5(testfile)
# Check the history first via find
assert 0 == uvh5_uv.history.find(
mir_uv.history + " Read/written with pyuvdata version:"
)
# test fails because of updated history, so this is our workaround for now.
mir_uv.history = uvh5_uv.history
assert mir_uv == uvh5_uv
def test_write_mir(uv_in_uvfits, err_type=NotImplementedError):
"""
Mir writer test
Check and make sure that attempts to use the writer return a
'not implemented; error.
"""
mir_uv, uvfits_uv, testfile = uv_in_uvfits
# Check and see if the correct error is raised
with pytest.raises(err_type):
mir_uv.write_mir("dummy.mir")
@pytest.mark.filterwarnings("ignore:LST values stored in this file are not ")
@pytest.mark.parametrize("future_shapes", [True, False])
def test_flex_spw_read(tmp_path, future_shapes):
"""
Mir test for flexible spws.
Read in Mir files using flexible spectral windows, all of the same nchan
"""
testfile = os.path.join(DATA_PATH, "sma_test.mir")
uv_in = UVData()
uv_in.read_mir(testfile)
dummyfile = os.path.join(tmp_path, "dummy.mirtest.uvfits")
if future_shapes:
uv_in.use_future_array_shapes()
uv_in2 = uv_in.copy()
with pytest.raises(NotImplementedError):
uv_in2.frequency_average(2)
uv_in2.flex_spw_id_array[0] = 1
with pytest.raises(ValueError):
uv_in2._check_flex_spw_contiguous()
uv_in2 = uv_in.copy()
uv_in2.channel_width[0] = 0
with pytest.raises(ValueError, match="The frequencies are not evenly spaced"):
uv_in2._check_freq_spacing()
uv_in2 = uv_in.copy()
uv_in2.channel_width[:] = 0
with pytest.raises(ValueError, match="The frequencies are separated by more"):
uv_in2._check_freq_spacing()
uv_in2 = uv_in.copy()
uv_in2.freq_array *= -1
with pytest.raises(ValueError, match="Frequency values must be > 0 for UVFITS!"):
uv_in2.write_uvfits(dummyfile, spoof_nonessential=True)
uv_in2 = uv_in.copy()
uv_in2.freq_array[:] = 1
uv_in2.channel_width *= 0
with pytest.raises(ValueError, match="Something is wrong, frequency values not"):
uv_in2.write_uvfits(dummyfile, spoof_nonessential=True)
# Move on to testing fast-concat and add methods
uv_in2 = uv_in.copy()
uv_in3 = uv_in.copy()
uv_in2.select(freq_chans=np.where(uv_in2.flex_spw_id_array < 0))
uv_in3.select(freq_chans=np.where(uv_in3.flex_spw_id_array > 0))
uv_in4 = uv_in2.fast_concat(uv_in3, axis="freq")
# Check the history first via find
assert 0 == uv_in4.history.find(
uv_in.history + " Downselected to specific frequencies using pyuvdata. "
"Combined data along frequency axis using pyuvdata."
)
uv_in4.history = uv_in.history
assert uv_in == uv_in4
# Flipped order is intentional here, since the operation should work
# irrespective of add order, following the sort below
uv_in4 = uv_in3 + uv_in2
assert 0 == uv_in4.history.find(
uv_in.history + " Downselected to specific frequencies using pyuvdata. "
"Combined data along frequency axis using pyuvdata."
)
uv_in4.history = uv_in.history
# Need to perform a sort here, since the default ordering on add isn't identical
# to the read order for MIR files.
uv_in4.reorder_freqs(spw_order=uv_in.spw_array)
assert uv_in == uv_in4
uv_in2 = uv_in.copy()
uv_in3 = uv_in.copy()
# Test what happens when one window is flagged
uv_in2.select(freq_chans=np.where(uv_in2.flex_spw_id_array < 0))
uv_in3.select(freq_chans=np.where(uv_in3.flex_spw_id_array > -2))
if future_shapes:
uv_in3.data_array[:, uv_in3.flex_spw_id_array == -1] = 0.0
uv_in3.flag_array[:, uv_in3.flex_spw_id_array == -1] = True
else:
uv_in3.data_array[:, :, uv_in3.flex_spw_id_array == -1] = 0.0
uv_in3.flag_array[:, :, uv_in3.flex_spw_id_array == -1] = True
uv_in4 = uv_in3 + uv_in2
assert 0 == uv_in4.history.find(
uv_in.history + " Downselected to specific frequencies using pyuvdata. "
"Overwrote invalid data using pyuvdata. Overwrote invalid data using pyuvdata."
"frequency axis using pyuvdata."
)
uv_in4.history = uv_in.history
uv_in4.reorder_freqs(spw_order=uv_in.spw_array)
assert uv_in == uv_in4
# Test what happens when we go down to one window, split in half
uv_in2 = uv_in.copy()
# Test what happens when one window is flagged
uv_in2.select(freq_chans=np.where(uv_in2.flex_spw_id_array == 1))
uv_in3 = uv_in2.copy()
uv_in4 = uv_in2.copy()
uv_in3.select(freq_chans=np.arange(uv_in2.Nfreqs / 2, dtype=int))
uv_in4.select(freq_chans=np.arange(uv_in2.Nfreqs / 2, uv_in2.Nfreqs, dtype=int))
uv_in5 = uv_in4 + uv_in3
assert 0 == uv_in4.history.find(
uv_in.history + " Downselected to specific frequencies using pyuvdata."
" Downselected to specific frequencies using pyuvdata."
)
uv_in5.history = uv_in2.history
assert uv_in2 == uv_in5
# Test to make sure that flex and non-flex data sets cant be added
uv_in3.flex_spw = False
with pytest.raises(
ValueError, match="To combine these data, flex_spw must be set to the same "
):
uv_in5 = uv_in4 + uv_in3
def test_multi_nchan_spw_read(tmp_path):
"""
Mir to uvfits error test for spws of different sizes.
Read in Mir files, write out as uvfits, read back in and check for
object equality.
"""
testfile = os.path.join(DATA_PATH, "sma_test.mir")
uv_in = UVData()
uv_in.read_mir(testfile, corrchunk=[0, 1, 2, 3, 4])
dummyfile = os.path.join(tmp_path, "dummy.mirtest.uvfits")
with pytest.raises(IndexError):
uv_in.write_uvfits(dummyfile, spoof_nonessential=True)
def test_read_mir_no_records():
"""
Mir no-records check
Make sure that mir correctly handles the case where no matching records are found
"""
testfile = os.path.join(DATA_PATH, "sma_test.mir")
uv_in = UVData()
with pytest.raises(IndexError, match="No valid records matching those selections!"):
uv_in.read_mir(testfile, isource=-1)
with pytest.raises(IndexError, match="No valid sidebands selected!"):
uv_in.read_mir(testfile, isb=[])
with pytest.raises(IndexError, match="isb values contain invalid entries"):
uv_in.read_mir(testfile, isb=[-156])
def test_mir_auto_read(
err_type=IndexError, err_msg="Could not determine auto-correlation record size!"
):
"""
Mir read tester
Make sure that Mir autocorrelations are read correctly
"""
testfile = os.path.join(DATA_PATH, "sma_test.mir")
mir_data = mir_parser.MirParser(testfile)
with pytest.raises(err_type, match=err_msg):
ac_data = mir_data.scan_auto_data(testfile, nchunks=999)
ac_data = mir_data.scan_auto_data(testfile)
assert np.all(ac_data["nchunks"] == 8)
mir_data.load_data(load_vis=False, load_auto=True)
# Select the relevant auto records, which should be for spwin 0-3
auto_data = mir_data.read_auto_data(testfile, ac_data)[:, 0:4, :, :]
assert np.all(
np.logical_or(
auto_data == mir_data.auto_data,
np.logical_and(np.isnan(auto_data), np.isnan(mir_data.auto_data)),
)
)
mir_data.unload_data()
# Below are a series of checks that are designed to check to make sure that the
# MirParser class is able to produce consistent values from an engineering data
# set (originally stored in /data/engineering/mir_data/200724_16:35:14), to make
# sure that we haven't broken the ability of the reader to handle the data. Since
# this file is the basis for the above checks, we've put this here rather than in
# test_mir_parser.py
def test_mir_remember_me_record_lengths(mir_data_object):
"""
Mir record length checker
Make sure the test file containts the right number of records
"""
mir_data = mir_data_object
# Check to make sure we've got the right number of records everywhere
assert len(mir_data.ac_read) == 2
assert len(mir_data.bl_read) == 4
assert len(mir_data.codes_read) == 99
assert len(mir_data.eng_read) == 2
assert len(mir_data.in_read) == 1
assert len(mir_data.raw_data) == 20
assert len(mir_data.raw_scale_fac) == 20
assert len(mir_data.sp_read) == 20
assert len(mir_data.vis_data) == 20
assert len(mir_data.we_read) == 1
def test_mir_remember_me_codes_read(mir_data_object):
"""
Mir codes_read checker.
Make sure that certain values in the codes_read file of the test data set match
whatwe know to be 'true' at the time of observations.
"""
mir_data = mir_data_object
assert mir_data.codes_read[0][0] == b"filever"
assert mir_data.codes_read[0][2] == b"3"
assert mir_data.codes_read[90][0] == b"ref_time"
assert mir_data.codes_read[90][1] == 0
assert mir_data.codes_read[90][2] == b"Jul 24, 2020"
assert mir_data.codes_read[90][3] == 0
assert mir_data.codes_read[91][0] == b"ut"
assert mir_data.codes_read[91][1] == 1
assert mir_data.codes_read[91][2] == b"Jul 24 2020 4:34:39.00PM"
assert mir_data.codes_read[91][3] == 0
assert mir_data.codes_read[93][0] == b"source"
assert mir_data.codes_read[93][2] == b"3c84"
assert mir_data.codes_read[97][0] == b"ra"
assert mir_data.codes_read[97][2] == b"03:19:48.15"
assert mir_data.codes_read[98][0] == b"dec"
assert mir_data.codes_read[98][2] == b"+41:30:42.1"
def test_mir_remember_me_in_read(mir_data_object):
"""
Mir in_read checker.
Make sure that certain values in the in_read file of the test data set match what
we know to be 'true' at the time of observations, including that spare values are
stored as zero.
"""
mir_data = mir_data_object
# Check to make sure that things seem right in in_read
assert np.all(mir_data.in_read["traid"] == 484)
assert np.all(mir_data.in_read["proid"] == 484)
assert np.all(mir_data.in_read["inhid"] == 1)
assert np.all(mir_data.in_read["ints"] == 1)
assert np.all(mir_data.in_read["souid"] == 1)
assert np.all(mir_data.in_read["isource"] == 1)
assert np.all(mir_data.in_read["ivrad"] == 1)
assert np.all(mir_data.in_read["ira"] == 1)
assert np.all(mir_data.in_read["idec"] == 1)
assert np.all(mir_data.in_read["epoch"] == 2000.0)
assert np.all(mir_data.in_read["tile"] == 0)
assert np.all(mir_data.in_read["obsflag"] == 0)
assert np.all(mir_data.in_read["obsmode"] == 0)
assert np.all(np.round(mir_data.in_read["mjd"]) == 59055)
assert np.all(mir_data.in_read["spareshort"] == 0)
assert np.all(mir_data.in_read["spareint6"] == 0)
def test_mir_remember_me_bl_read(mir_data_object):
"""
Mir bl_read checker.
Make sure that certain values in the bl_read file of the test data set match what
we know to be 'true' at the time of observations, including that spare values are
stored as zero.
"""
mir_data = mir_data_object
# Now check bl_read
assert np.all(mir_data.bl_read["blhid"] == np.arange(1, 5))
assert np.all(mir_data.bl_read["isb"] == [0, 0, 1, 1])
assert np.all(mir_data.bl_read["ipol"] == [0, 0, 0, 0])
assert np.all(mir_data.bl_read["ant1rx"] == [0, 1, 0, 1])
assert np.all(mir_data.bl_read["ant2rx"] == [0, 1, 0, 1])
assert np.all(mir_data.bl_read["pointing"] == 0)
assert np.all(mir_data.bl_read["irec"] == [0, 3, 0, 3])
assert np.all(mir_data.bl_read["iant1"] == 1)
assert np.all(mir_data.bl_read["iant2"] == 4)
assert np.all(mir_data.bl_read["iblcd"] == 2)
assert np.all(mir_data.bl_read["spareint1"] == 0)
assert np.all(mir_data.bl_read["spareint2"] == 0)
assert np.all(mir_data.bl_read["spareint3"] == 0)
assert np.all(mir_data.bl_read["spareint4"] == 0)
assert np.all(mir_data.bl_read["spareint5"] == 0)
assert np.all(mir_data.bl_read["spareint6"] == 0)
assert np.all(mir_data.bl_read["sparedbl3"] == 0.0)
assert np.all(mir_data.bl_read["sparedbl4"] == 0.0)
assert np.all(mir_data.bl_read["sparedbl5"] == 0.0)
assert np.all(mir_data.bl_read["sparedbl6"] == 0.0)
def test_mir_remember_me_eng_read(mir_data_object):
"""
Mir bl_read checker.
Make sure that certain values in the eng_read file of the test data set match what
we know to be 'true' at the time of observations.
"""
mir_data = mir_data_object
# Now check eng_read
assert np.all(mir_data.eng_read["antennaNumber"] == [1, 4])
assert np.all(mir_data.eng_read["padNumber"] == [5, 8])
assert np.all(mir_data.eng_read["trackStatus"] == 1)
assert np.all(mir_data.eng_read["commStatus"] == 1)
assert np.all(mir_data.eng_read["inhid"] == 1)
def test_mir_remember_me_ac_read(mir_data_object):
"""
Mir bl_read checker.
Make sure that certain values in the autoCorrelations file of the test data set
match what we know to be 'true' at the time of observations.
"""
mir_data = mir_data_object
# Now check ac_read
assert np.all(mir_data.ac_read["inhid"] == 1)
assert np.all(mir_data.ac_read["achid"] == np.arange(1, 3))
assert np.all(mir_data.ac_read["antenna"] == [1, 4])
assert np.all(mir_data.ac_read["nchunks"] == 8)
assert np.all(mir_data.ac_read["datasize"] == 1048596)
assert np.all(mir_data.we_read["scanNumber"] == 1)
assert np.all(mir_data.we_read["flags"] == 0)
def test_mir_remember_me_sp_read(mir_data_object):
"""
Mir sp_read checker.
Make sure that certain values in the sp_read file of the test data set match what
we know to be 'true' at the time of observations, including that spare values are
stored as zero.
"""
mir_data = mir_data_object
# Now check sp_read
assert np.all(mir_data.sp_read["sphid"] == np.arange(1, 21))
assert np.all(mir_data.sp_read["sphid"] == np.arange(1, 21))
assert np.all(mir_data.sp_read["igq"] == 0)
assert np.all(mir_data.sp_read["ipq"] == 1)
assert np.all(mir_data.sp_read["igq"] == 0)
assert np.all(mir_data.sp_read["iband"] == [0, 1, 2, 3, 4] * 4)
assert np.all(mir_data.sp_read["ipstate"] == 0)
assert np.all(mir_data.sp_read["tau0"] == 0.0)
assert np.all(mir_data.sp_read["cabinLO"] == 0.0)
assert np.all(mir_data.sp_read["corrLO1"] == 0.0)
assert np.all(mir_data.sp_read["vradcat"] == 0.0)
assert np.all(mir_data.sp_read["nch"] == [4, 16384, 16384, 16384, 16384] * 4)
assert np.all(mir_data.sp_read["corrblock"] == [0, 1, 1, 1, 1] * 4)
assert np.all(mir_data.sp_read["corrchunk"] == [0, 1, 2, 3, 4] * 4)
assert np.all(mir_data.sp_read["correlator"] == 1)
assert np.all(mir_data.sp_read["spareint2"] == 0)
assert np.all(mir_data.sp_read["spareint3"] == 0)
assert np.all(mir_data.sp_read["spareint4"] == 0)
assert np.all(mir_data.sp_read["spareint5"] == 0)
assert np.all(mir_data.sp_read["spareint6"] == 0)
assert np.all(mir_data.sp_read["sparedbl1"] == 0.0)
assert np.all(mir_data.sp_read["sparedbl2"] == 0.0)
assert np.all(mir_data.sp_read["sparedbl3"] == 0.0)
assert np.all(mir_data.sp_read["sparedbl4"] == 0.0)
assert np.all(mir_data.sp_read["sparedbl5"] == 0.0)
assert np.all(mir_data.sp_read["sparedbl6"] == 0.0)
def test_mir_remember_me_sch_read(mir_data_object):
"""
Mir sch_read checker.
Make sure that certain values in the sch_read file of the test data set match what
we know to be 'true' at the time of observations.
"""
mir_data = mir_data_object
# Now check sch_read related values. Thanks to a glitch in the data recorder,
# all of the pseudo-cont values are the same
assert np.all(mir_data.raw_scale_fac[0::5] == [-26] * 4)
assert (
np.array(mir_data.raw_data[0::5]).flatten().tolist()
== [-4302, -20291, -5261, -21128, -4192, -19634, -4999, -16346] * 4
)
|
""" Summary plots of SHAP values across a whole dataset.
"""
from __future__ import division
import warnings
import numpy as np
from scipy.stats import gaussian_kde
try:
import matplotlib.pyplot as pl
except ImportError:
warnings.warn("matplotlib could not be loaded!")
pass
from . import labels
from . import colors
# TODO: remove unused title argument / use title argument
def summary_plot(shap_values, features=None, feature_names=None, max_display=None, plot_type=None,
color=None, axis_color="#333333", title=None, alpha=1, show=True, sort=True,
color_bar=True, plot_size="auto", layered_violin_max_num_bins=20, class_names=None,
color_bar_label=labels["FEATURE_VALUE"],
# depreciated
auto_size_plot=None):
"""Create a SHAP summary plot, colored by feature values when they are provided.
Parameters
----------
shap_values : numpy.array
For single output explanations this is a matrix of SHAP values (# samples x # features).
For multi-output explanations this is a list of such matrices of SHAP values.
features : numpy.array or pandas.DataFrame or list
Matrix of feature values (# samples x # features) or a feature_names list as shorthand
feature_names : list
Names of the features (length # features)
max_display : int
How many top features to include in the plot (default is 20, or 7 for interaction plots)
plot_type : "dot" (default for single output), "bar" (default for multi-output), "violin",
or "compact_dot".
What type of summary plot to produce. Note that "compact_dot" is only used for
SHAP interaction values.
plot_size : "auto" (default), float, (float, float), or None
What size to make the plot. By default the size is auto-scaled based on the number of
features that are being displayed. Passing a single float will cause each row to be that
many inches high. Passing a pair of floats will scale the plot by that
number of inches. If None is passed then the size of the current figure will be left
unchanged.
"""
# deprication warnings
if auto_size_plot is not None:
warnings.warn("auto_size_plot=False is depricated and is now ignored! Use plot_size=None instead.")
multi_class = False
if isinstance(shap_values, list):
multi_class = True
if plot_type is None:
plot_type = "bar" # default for multi-output explanations
assert plot_type == "bar", "Only plot_type = 'bar' is supported for multi-output explanations!"
else:
if plot_type is None:
plot_type = "dot" # default for single output explanations
assert len(shap_values.shape) != 1, "Summary plots need a matrix of shap_values, not a vector."
# default color:
if color is None:
if plot_type == 'layered_violin':
color = "coolwarm"
elif multi_class:
color = lambda i: colors.red_blue_circle(i/len(shap_values))
else:
color = colors.blue_rgb
# convert from a DataFrame or other types
if str(type(features)) == "<class 'pandas.core.frame.DataFrame'>":
if feature_names is None:
feature_names = features.columns
features = features.values
elif isinstance(features, list):
if feature_names is None:
feature_names = features
features = None
elif (features is not None) and len(features.shape) == 1 and feature_names is None:
feature_names = features
features = None
num_features = (shap_values[0].shape[1] if multi_class else shap_values.shape[1])
if features is not None:
shape_msg = "The shape of the shap_values matrix does not match the shape of the " \
"provided data matrix."
if num_features - 1 == features.shape[1]:
assert False, shape_msg + " Perhaps the extra column in the shap_values matrix is the " \
"constant offset? Of so just pass shap_values[:,:-1]."
else:
assert num_features == features.shape[1], shape_msg
if feature_names is None:
feature_names = np.array([labels['FEATURE'] % str(i) for i in range(num_features)])
# plotting SHAP interaction values
if not multi_class and len(shap_values.shape) == 3:
if plot_type == "compact_dot":
new_shap_values = shap_values.reshape(shap_values.shape[0], -1)
new_features = np.tile(features, (1, 1, features.shape[1])).reshape(features.shape[0], -1)
new_feature_names = []
for c1 in feature_names:
for c2 in feature_names:
if c1 == c2:
new_feature_names.append(c1)
else:
new_feature_names.append(c1 + "* - " + c2)
return summary_plot(
new_shap_values, new_features, new_feature_names,
max_display=max_display, plot_type="dot", color=color, axis_color=axis_color,
title=title, alpha=alpha, show=show, sort=sort,
color_bar=color_bar, plot_size=plot_size, class_names=class_names,
color_bar_label="*" + color_bar_label
)
if max_display is None:
max_display = 7
else:
max_display = min(len(feature_names), max_display)
sort_inds = np.argsort(-np.abs(shap_values.sum(1)).sum(0))
# get plotting limits
delta = 1.0 / (shap_values.shape[1] ** 2)
slow = np.nanpercentile(shap_values, delta)
shigh = np.nanpercentile(shap_values, 100 - delta)
v = max(abs(slow), abs(shigh))
slow = -v
shigh = v
pl.figure(figsize=(1.5 * max_display + 1, 0.8 * max_display + 1))
pl.subplot(1, max_display, 1)
proj_shap_values = shap_values[:, sort_inds[0], sort_inds]
proj_shap_values[:, 1:] *= 2 # because off diag effects are split in half
summary_plot(
proj_shap_values, features[:, sort_inds] if features is not None else None,
feature_names=feature_names[sort_inds],
sort=False, show=False, color_bar=False,
plot_size=None,
max_display=max_display
)
pl.xlim((slow, shigh))
pl.xlabel("")
title_length_limit = 11
pl.title(shorten_text(feature_names[sort_inds[0]], title_length_limit))
for i in range(1, min(len(sort_inds), max_display)):
ind = sort_inds[i]
pl.subplot(1, max_display, i + 1)
proj_shap_values = shap_values[:, ind, sort_inds]
proj_shap_values *= 2
proj_shap_values[:, i] /= 2 # because only off diag effects are split in half
summary_plot(
proj_shap_values, features[:, sort_inds] if features is not None else None,
sort=False,
feature_names=["" for i in range(len(feature_names))],
show=False,
color_bar=False,
plot_size=None,
max_display=max_display
)
pl.xlim((slow, shigh))
pl.xlabel("")
if i == min(len(sort_inds), max_display) // 2:
pl.xlabel(labels['INTERACTION_VALUE'])
pl.title(shorten_text(feature_names[ind], title_length_limit))
pl.tight_layout(pad=0, w_pad=0, h_pad=0.0)
pl.subplots_adjust(hspace=0, wspace=0.1)
if show:
pl.show()
return
if max_display is None:
max_display = 20
if sort:
# order features by the sum of their effect magnitudes
if multi_class:
feature_order = np.argsort(np.sum(np.mean(np.abs(shap_values), axis=0), axis=0))
else:
feature_order = np.argsort(np.sum(np.abs(shap_values), axis=0))
feature_order = feature_order[-min(max_display, len(feature_order)):]
else:
feature_order = np.flip(np.arange(min(max_display, num_features)), 0)
row_height = 0.4
if plot_size == "auto":
pl.gcf().set_size_inches(8, len(feature_order) * row_height + 1.5)
elif type(plot_size) in (list, tuple):
pl.gcf().set_size_inches(plot_size[0], plot_size[1])
elif plot_size is not None:
pl.gcf().set_size_inches(8, len(feature_order) * plot_size + 1.5)
pl.axvline(x=0, color="#999999", zorder=-1)
if plot_type == "dot":
for pos, i in enumerate(feature_order):
pl.axhline(y=pos, color="#cccccc", lw=0.5, dashes=(1, 5), zorder=-1)
shaps = shap_values[:, i]
values = None if features is None else features[:, i]
inds = np.arange(len(shaps))
np.random.shuffle(inds)
if values is not None:
values = values[inds]
shaps = shaps[inds]
colored_feature = True
try:
values = np.array(values, dtype=np.float64) # make sure this can be numeric
except:
colored_feature = False
N = len(shaps)
# hspacing = (np.max(shaps) - np.min(shaps)) / 200
# curr_bin = []
nbins = 100
quant = np.round(nbins * (shaps - np.min(shaps)) / (np.max(shaps) - np.min(shaps) + 1e-8))
inds = np.argsort(quant + np.random.randn(N) * 1e-6)
layer = 0
last_bin = -1
ys = np.zeros(N)
for ind in inds:
if quant[ind] != last_bin:
layer = 0
ys[ind] = np.ceil(layer / 2) * ((layer % 2) * 2 - 1)
layer += 1
last_bin = quant[ind]
ys *= 0.9 * (row_height / np.max(ys + 1))
if features is not None and colored_feature:
# trim the color range, but prevent the color range from collapsing
vmin = np.nanpercentile(values, 5)
vmax = np.nanpercentile(values, 95)
if vmin == vmax:
vmin = np.nanpercentile(values, 1)
vmax = np.nanpercentile(values, 99)
if vmin == vmax:
vmin = np.min(values)
vmax = np.max(values)
assert features.shape[0] == len(shaps), "Feature and SHAP matrices must have the same number of rows!"
# plot the nan values in the interaction feature as grey
nan_mask = np.isnan(values)
pl.scatter(shaps[nan_mask], pos + ys[nan_mask], color="#777777", vmin=vmin,
vmax=vmax, s=16, alpha=alpha, linewidth=0,
zorder=3, rasterized=len(shaps) > 500)
# plot the non-nan values colored by the trimmed feature value
cvals = values[np.invert(nan_mask)].astype(np.float64)
cvals_imp = cvals.copy()
cvals_imp[np.isnan(cvals)] = (vmin + vmax) / 2.0
cvals[cvals_imp > vmax] = vmax
cvals[cvals_imp < vmin] = vmin
pl.scatter(shaps[np.invert(nan_mask)], pos + ys[np.invert(nan_mask)],
cmap=colors.red_blue, vmin=vmin, vmax=vmax, s=16,
c=cvals, alpha=alpha, linewidth=0,
zorder=3, rasterized=len(shaps) > 500)
else:
pl.scatter(shaps, pos + ys, s=16, alpha=alpha, linewidth=0, zorder=3,
color=color if colored_feature else "#777777", rasterized=len(shaps) > 500)
elif plot_type == "violin":
for pos, i in enumerate(feature_order):
pl.axhline(y=pos, color="#cccccc", lw=0.5, dashes=(1, 5), zorder=-1)
if features is not None:
global_low = np.nanpercentile(shap_values[:, :len(feature_names)].flatten(), 1)
global_high = np.nanpercentile(shap_values[:, :len(feature_names)].flatten(), 99)
for pos, i in enumerate(feature_order):
shaps = shap_values[:, i]
shap_min, shap_max = np.min(shaps), np.max(shaps)
rng = shap_max - shap_min
xs = np.linspace(np.min(shaps) - rng * 0.2, np.max(shaps) + rng * 0.2, 100)
if np.std(shaps) < (global_high - global_low) / 100:
ds = gaussian_kde(shaps + np.random.randn(len(shaps)) * (global_high - global_low) / 100)(xs)
else:
ds = gaussian_kde(shaps)(xs)
ds /= np.max(ds) * 3
values = features[:, i]
window_size = max(10, len(values) // 20)
smooth_values = np.zeros(len(xs) - 1)
sort_inds = np.argsort(shaps)
trailing_pos = 0
leading_pos = 0
running_sum = 0
back_fill = 0
for j in range(len(xs) - 1):
while leading_pos < len(shaps) and xs[j] >= shaps[sort_inds[leading_pos]]:
running_sum += values[sort_inds[leading_pos]]
leading_pos += 1
if leading_pos - trailing_pos > 20:
running_sum -= values[sort_inds[trailing_pos]]
trailing_pos += 1
if leading_pos - trailing_pos > 0:
smooth_values[j] = running_sum / (leading_pos - trailing_pos)
for k in range(back_fill):
smooth_values[j - k - 1] = smooth_values[j]
else:
back_fill += 1
vmin = np.nanpercentile(values, 5)
vmax = np.nanpercentile(values, 95)
if vmin == vmax:
vmin = np.nanpercentile(values, 1)
vmax = np.nanpercentile(values, 99)
if vmin == vmax:
vmin = np.min(values)
vmax = np.max(values)
# plot the nan values in the interaction feature as grey
nan_mask = np.isnan(values)
pl.scatter(shaps[nan_mask], np.ones(shap_values[nan_mask].shape[0]) * pos,
color="#777777", vmin=vmin, vmax=vmax, s=9,
alpha=alpha, linewidth=0, zorder=1)
# plot the non-nan values colored by the trimmed feature value
cvals = values[np.invert(nan_mask)].astype(np.float64)
cvals_imp = cvals.copy()
cvals_imp[np.isnan(cvals)] = (vmin + vmax) / 2.0
cvals[cvals_imp > vmax] = vmax
cvals[cvals_imp < vmin] = vmin
pl.scatter(shaps[np.invert(nan_mask)], np.ones(shap_values[np.invert(nan_mask)].shape[0]) * pos,
cmap=colors.red_blue, vmin=vmin, vmax=vmax, s=9,
c=cvals, alpha=alpha, linewidth=0, zorder=1)
# smooth_values -= nxp.nanpercentile(smooth_values, 5)
# smooth_values /= np.nanpercentile(smooth_values, 95)
smooth_values -= vmin
if vmax - vmin > 0:
smooth_values /= vmax - vmin
for i in range(len(xs) - 1):
if ds[i] > 0.05 or ds[i + 1] > 0.05:
pl.fill_between([xs[i], xs[i + 1]], [pos + ds[i], pos + ds[i + 1]],
[pos - ds[i], pos - ds[i + 1]], color=colors.red_blue(smooth_values[i]),
zorder=2)
else:
parts = pl.violinplot(shap_values[:, feature_order], range(len(feature_order)), points=200, vert=False,
widths=0.7,
showmeans=False, showextrema=False, showmedians=False)
for pc in parts['bodies']:
pc.set_facecolor(color)
pc.set_edgecolor('none')
pc.set_alpha(alpha)
elif plot_type == "layered_violin": # courtesy of @kodonnell
num_x_points = 200
bins = np.linspace(0, features.shape[0], layered_violin_max_num_bins + 1).round(0).astype(
'int') # the indices of the feature data corresponding to each bin
shap_min, shap_max = np.min(shap_values), np.max(shap_values)
x_points = np.linspace(shap_min, shap_max, num_x_points)
# loop through each feature and plot:
for pos, ind in enumerate(feature_order):
# decide how to handle: if #unique < layered_violin_max_num_bins then split by unique value, otherwise use bins/percentiles.
# to keep simpler code, in the case of uniques, we just adjust the bins to align with the unique counts.
feature = features[:, ind]
unique, counts = np.unique(feature, return_counts=True)
if unique.shape[0] <= layered_violin_max_num_bins:
order = np.argsort(unique)
thesebins = np.cumsum(counts[order])
thesebins = np.insert(thesebins, 0, 0)
else:
thesebins = bins
nbins = thesebins.shape[0] - 1
# order the feature data so we can apply percentiling
order = np.argsort(feature)
# x axis is located at y0 = pos, with pos being there for offset
y0 = np.ones(num_x_points) * pos
# calculate kdes:
ys = np.zeros((nbins, num_x_points))
for i in range(nbins):
# get shap values in this bin:
shaps = shap_values[order[thesebins[i]:thesebins[i + 1]], ind]
# if there's only one element, then we can't
if shaps.shape[0] == 1:
warnings.warn(
"not enough data in bin #%d for feature %s, so it'll be ignored. Try increasing the number of records to plot."
% (i, feature_names[ind]))
# to ignore it, just set it to the previous y-values (so the area between them will be zero). Not ys is already 0, so there's
# nothing to do if i == 0
if i > 0:
ys[i, :] = ys[i - 1, :]
continue
# save kde of them: note that we add a tiny bit of gaussian noise to avoid singular matrix errors
ys[i, :] = gaussian_kde(shaps + np.random.normal(loc=0, scale=0.001, size=shaps.shape[0]))(x_points)
# scale it up so that the 'size' of each y represents the size of the bin. For continuous data this will
# do nothing, but when we've gone with the unqique option, this will matter - e.g. if 99% are male and 1%
# female, we want the 1% to appear a lot smaller.
size = thesebins[i + 1] - thesebins[i]
bin_size_if_even = features.shape[0] / nbins
relative_bin_size = size / bin_size_if_even
ys[i, :] *= relative_bin_size
# now plot 'em. We don't plot the individual strips, as this can leave whitespace between them.
# instead, we plot the full kde, then remove outer strip and plot over it, etc., to ensure no
# whitespace
ys = np.cumsum(ys, axis=0)
width = 0.8
scale = ys.max() * 2 / width # 2 is here as we plot both sides of x axis
for i in range(nbins - 1, -1, -1):
y = ys[i, :] / scale
c = pl.get_cmap(color)(i / (
nbins - 1)) if color in pl.cm.datad else color # if color is a cmap, use it, otherwise use a color
pl.fill_between(x_points, pos - y, pos + y, facecolor=c)
pl.xlim(shap_min, shap_max)
elif not multi_class and plot_type == "bar":
feature_inds = feature_order[:max_display]
y_pos = np.arange(len(feature_inds))
global_shap_values = np.abs(shap_values).mean(0)
pl.barh(y_pos, global_shap_values[feature_inds], 0.7, align='center', color=color)
pl.yticks(y_pos, fontsize=13)
pl.gca().set_yticklabels([feature_names[i] for i in feature_inds])
elif multi_class and plot_type == "bar":
if class_names is None:
class_names = ["Class "+str(i) for i in range(len(shap_values))]
feature_inds = feature_order[:max_display]
y_pos = np.arange(len(feature_inds))
left_pos = np.zeros(len(feature_inds))
class_inds = np.argsort([-np.abs(shap_values[i]).mean() for i in range(len(shap_values))])
for i,ind in enumerate(class_inds):
global_shap_values = np.abs(shap_values[ind]).mean(0)
pl.barh(
y_pos, global_shap_values[feature_inds], 0.7, left=left_pos, align='center',
color=color(i), label=class_names[ind]
)
left_pos += global_shap_values[feature_inds]
pl.yticks(y_pos, fontsize=13)
pl.gca().set_yticklabels([feature_names[i] for i in feature_inds])
pl.legend(frameon=False, fontsize=12)
# draw the color bar
if color_bar and features is not None and plot_type != "bar" and \
(plot_type != "layered_violin" or color in pl.cm.datad):
import matplotlib.cm as cm
m = cm.ScalarMappable(cmap=colors.red_blue if plot_type != "layered_violin" else pl.get_cmap(color))
m.set_array([0, 1])
cb = pl.colorbar(m, ticks=[0, 1], aspect=1000)
cb.set_ticklabels([labels['FEATURE_VALUE_LOW'], labels['FEATURE_VALUE_HIGH']])
cb.set_label(color_bar_label, size=12, labelpad=0)
cb.ax.tick_params(labelsize=11, length=0)
cb.set_alpha(1)
cb.outline.set_visible(False)
bbox = cb.ax.get_window_extent().transformed(pl.gcf().dpi_scale_trans.inverted())
cb.ax.set_aspect((bbox.height - 0.9) * 20)
# cb.draw_all()
pl.gca().xaxis.set_ticks_position('bottom')
pl.gca().yaxis.set_ticks_position('none')
pl.gca().spines['right'].set_visible(False)
pl.gca().spines['top'].set_visible(False)
pl.gca().spines['left'].set_visible(False)
pl.gca().tick_params(color=axis_color, labelcolor=axis_color)
pl.yticks(range(len(feature_order)), [feature_names[i] for i in feature_order], fontsize=13)
if plot_type != "bar":
pl.gca().tick_params('y', length=20, width=0.5, which='major')
pl.gca().tick_params('x', labelsize=11)
pl.ylim(-1, len(feature_order))
if plot_type == "bar":
pl.xlabel(labels['GLOBAL_VALUE'], fontsize=13)
else:
pl.xlabel(labels['VALUE'], fontsize=13)
if show:
pl.show()
def shorten_text(text, length_limit):
if len(text) > length_limit:
return text[:length_limit - 3] + "..."
else:
return text
Update summary.py
Fix grey regions of the violin plot
""" Summary plots of SHAP values across a whole dataset.
"""
from __future__ import division
import warnings
import numpy as np
from scipy.stats import gaussian_kde
try:
import matplotlib.pyplot as pl
except ImportError:
warnings.warn("matplotlib could not be loaded!")
pass
from . import labels
from . import colors
# TODO: remove unused title argument / use title argument
def summary_plot(shap_values, features=None, feature_names=None, max_display=None, plot_type=None,
color=None, axis_color="#333333", title=None, alpha=1, show=True, sort=True,
color_bar=True, plot_size="auto", layered_violin_max_num_bins=20, class_names=None,
color_bar_label=labels["FEATURE_VALUE"],
# depreciated
auto_size_plot=None):
"""Create a SHAP summary plot, colored by feature values when they are provided.
Parameters
----------
shap_values : numpy.array
For single output explanations this is a matrix of SHAP values (# samples x # features).
For multi-output explanations this is a list of such matrices of SHAP values.
features : numpy.array or pandas.DataFrame or list
Matrix of feature values (# samples x # features) or a feature_names list as shorthand
feature_names : list
Names of the features (length # features)
max_display : int
How many top features to include in the plot (default is 20, or 7 for interaction plots)
plot_type : "dot" (default for single output), "bar" (default for multi-output), "violin",
or "compact_dot".
What type of summary plot to produce. Note that "compact_dot" is only used for
SHAP interaction values.
plot_size : "auto" (default), float, (float, float), or None
What size to make the plot. By default the size is auto-scaled based on the number of
features that are being displayed. Passing a single float will cause each row to be that
many inches high. Passing a pair of floats will scale the plot by that
number of inches. If None is passed then the size of the current figure will be left
unchanged.
"""
# deprication warnings
if auto_size_plot is not None:
warnings.warn("auto_size_plot=False is depricated and is now ignored! Use plot_size=None instead.")
multi_class = False
if isinstance(shap_values, list):
multi_class = True
if plot_type is None:
plot_type = "bar" # default for multi-output explanations
assert plot_type == "bar", "Only plot_type = 'bar' is supported for multi-output explanations!"
else:
if plot_type is None:
plot_type = "dot" # default for single output explanations
assert len(shap_values.shape) != 1, "Summary plots need a matrix of shap_values, not a vector."
# default color:
if color is None:
if plot_type == 'layered_violin':
color = "coolwarm"
elif multi_class:
color = lambda i: colors.red_blue_circle(i/len(shap_values))
else:
color = colors.blue_rgb
# convert from a DataFrame or other types
if str(type(features)) == "<class 'pandas.core.frame.DataFrame'>":
if feature_names is None:
feature_names = features.columns
features = features.values
elif isinstance(features, list):
if feature_names is None:
feature_names = features
features = None
elif (features is not None) and len(features.shape) == 1 and feature_names is None:
feature_names = features
features = None
num_features = (shap_values[0].shape[1] if multi_class else shap_values.shape[1])
if features is not None:
shape_msg = "The shape of the shap_values matrix does not match the shape of the " \
"provided data matrix."
if num_features - 1 == features.shape[1]:
assert False, shape_msg + " Perhaps the extra column in the shap_values matrix is the " \
"constant offset? Of so just pass shap_values[:,:-1]."
else:
assert num_features == features.shape[1], shape_msg
if feature_names is None:
feature_names = np.array([labels['FEATURE'] % str(i) for i in range(num_features)])
# plotting SHAP interaction values
if not multi_class and len(shap_values.shape) == 3:
if plot_type == "compact_dot":
new_shap_values = shap_values.reshape(shap_values.shape[0], -1)
new_features = np.tile(features, (1, 1, features.shape[1])).reshape(features.shape[0], -1)
new_feature_names = []
for c1 in feature_names:
for c2 in feature_names:
if c1 == c2:
new_feature_names.append(c1)
else:
new_feature_names.append(c1 + "* - " + c2)
return summary_plot(
new_shap_values, new_features, new_feature_names,
max_display=max_display, plot_type="dot", color=color, axis_color=axis_color,
title=title, alpha=alpha, show=show, sort=sort,
color_bar=color_bar, plot_size=plot_size, class_names=class_names,
color_bar_label="*" + color_bar_label
)
if max_display is None:
max_display = 7
else:
max_display = min(len(feature_names), max_display)
sort_inds = np.argsort(-np.abs(shap_values.sum(1)).sum(0))
# get plotting limits
delta = 1.0 / (shap_values.shape[1] ** 2)
slow = np.nanpercentile(shap_values, delta)
shigh = np.nanpercentile(shap_values, 100 - delta)
v = max(abs(slow), abs(shigh))
slow = -v
shigh = v
pl.figure(figsize=(1.5 * max_display + 1, 0.8 * max_display + 1))
pl.subplot(1, max_display, 1)
proj_shap_values = shap_values[:, sort_inds[0], sort_inds]
proj_shap_values[:, 1:] *= 2 # because off diag effects are split in half
summary_plot(
proj_shap_values, features[:, sort_inds] if features is not None else None,
feature_names=feature_names[sort_inds],
sort=False, show=False, color_bar=False,
plot_size=None,
max_display=max_display
)
pl.xlim((slow, shigh))
pl.xlabel("")
title_length_limit = 11
pl.title(shorten_text(feature_names[sort_inds[0]], title_length_limit))
for i in range(1, min(len(sort_inds), max_display)):
ind = sort_inds[i]
pl.subplot(1, max_display, i + 1)
proj_shap_values = shap_values[:, ind, sort_inds]
proj_shap_values *= 2
proj_shap_values[:, i] /= 2 # because only off diag effects are split in half
summary_plot(
proj_shap_values, features[:, sort_inds] if features is not None else None,
sort=False,
feature_names=["" for i in range(len(feature_names))],
show=False,
color_bar=False,
plot_size=None,
max_display=max_display
)
pl.xlim((slow, shigh))
pl.xlabel("")
if i == min(len(sort_inds), max_display) // 2:
pl.xlabel(labels['INTERACTION_VALUE'])
pl.title(shorten_text(feature_names[ind], title_length_limit))
pl.tight_layout(pad=0, w_pad=0, h_pad=0.0)
pl.subplots_adjust(hspace=0, wspace=0.1)
if show:
pl.show()
return
if max_display is None:
max_display = 20
if sort:
# order features by the sum of their effect magnitudes
if multi_class:
feature_order = np.argsort(np.sum(np.mean(np.abs(shap_values), axis=0), axis=0))
else:
feature_order = np.argsort(np.sum(np.abs(shap_values), axis=0))
feature_order = feature_order[-min(max_display, len(feature_order)):]
else:
feature_order = np.flip(np.arange(min(max_display, num_features)), 0)
row_height = 0.4
if plot_size == "auto":
pl.gcf().set_size_inches(8, len(feature_order) * row_height + 1.5)
elif type(plot_size) in (list, tuple):
pl.gcf().set_size_inches(plot_size[0], plot_size[1])
elif plot_size is not None:
pl.gcf().set_size_inches(8, len(feature_order) * plot_size + 1.5)
pl.axvline(x=0, color="#999999", zorder=-1)
if plot_type == "dot":
for pos, i in enumerate(feature_order):
pl.axhline(y=pos, color="#cccccc", lw=0.5, dashes=(1, 5), zorder=-1)
shaps = shap_values[:, i]
values = None if features is None else features[:, i]
inds = np.arange(len(shaps))
np.random.shuffle(inds)
if values is not None:
values = values[inds]
shaps = shaps[inds]
colored_feature = True
try:
values = np.array(values, dtype=np.float64) # make sure this can be numeric
except:
colored_feature = False
N = len(shaps)
# hspacing = (np.max(shaps) - np.min(shaps)) / 200
# curr_bin = []
nbins = 100
quant = np.round(nbins * (shaps - np.min(shaps)) / (np.max(shaps) - np.min(shaps) + 1e-8))
inds = np.argsort(quant + np.random.randn(N) * 1e-6)
layer = 0
last_bin = -1
ys = np.zeros(N)
for ind in inds:
if quant[ind] != last_bin:
layer = 0
ys[ind] = np.ceil(layer / 2) * ((layer % 2) * 2 - 1)
layer += 1
last_bin = quant[ind]
ys *= 0.9 * (row_height / np.max(ys + 1))
if features is not None and colored_feature:
# trim the color range, but prevent the color range from collapsing
vmin = np.nanpercentile(values, 5)
vmax = np.nanpercentile(values, 95)
if vmin == vmax:
vmin = np.nanpercentile(values, 1)
vmax = np.nanpercentile(values, 99)
if vmin == vmax:
vmin = np.min(values)
vmax = np.max(values)
assert features.shape[0] == len(shaps), "Feature and SHAP matrices must have the same number of rows!"
# plot the nan values in the interaction feature as grey
nan_mask = np.isnan(values)
pl.scatter(shaps[nan_mask], pos + ys[nan_mask], color="#777777", vmin=vmin,
vmax=vmax, s=16, alpha=alpha, linewidth=0,
zorder=3, rasterized=len(shaps) > 500)
# plot the non-nan values colored by the trimmed feature value
cvals = values[np.invert(nan_mask)].astype(np.float64)
cvals_imp = cvals.copy()
cvals_imp[np.isnan(cvals)] = (vmin + vmax) / 2.0
cvals[cvals_imp > vmax] = vmax
cvals[cvals_imp < vmin] = vmin
pl.scatter(shaps[np.invert(nan_mask)], pos + ys[np.invert(nan_mask)],
cmap=colors.red_blue, vmin=vmin, vmax=vmax, s=16,
c=cvals, alpha=alpha, linewidth=0,
zorder=3, rasterized=len(shaps) > 500)
else:
pl.scatter(shaps, pos + ys, s=16, alpha=alpha, linewidth=0, zorder=3,
color=color if colored_feature else "#777777", rasterized=len(shaps) > 500)
elif plot_type == "violin":
for pos, i in enumerate(feature_order):
pl.axhline(y=pos, color="#cccccc", lw=0.5, dashes=(1, 5), zorder=-1)
if features is not None:
global_low = np.nanpercentile(shap_values[:, :len(feature_names)].flatten(), 1)
global_high = np.nanpercentile(shap_values[:, :len(feature_names)].flatten(), 99)
for pos, i in enumerate(feature_order):
shaps = shap_values[:, i]
shap_min, shap_max = np.min(shaps), np.max(shaps)
rng = shap_max - shap_min
xs = np.linspace(np.min(shaps) - rng * 0.2, np.max(shaps) + rng * 0.2, 100)
if np.std(shaps) < (global_high - global_low) / 100:
ds = gaussian_kde(shaps + np.random.randn(len(shaps)) * (global_high - global_low) / 100)(xs)
else:
ds = gaussian_kde(shaps)(xs)
ds /= np.max(ds) * 3
values = features[:, i]
window_size = max(10, len(values) // 20)
smooth_values = np.zeros(len(xs) - 1)
sort_inds = np.argsort(shaps)
trailing_pos = 0
leading_pos = 0
running_sum = 0
back_fill = 0
for j in range(len(xs) - 1):
while leading_pos < len(shaps) and xs[j] >= shaps[sort_inds[leading_pos]]:
running_sum += values[sort_inds[leading_pos]]
leading_pos += 1
if leading_pos - trailing_pos > 20:
running_sum -= values[sort_inds[trailing_pos]]
trailing_pos += 1
if leading_pos - trailing_pos > 0:
smooth_values[j] = running_sum / (leading_pos - trailing_pos)
for k in range(back_fill):
smooth_values[j - k - 1] = smooth_values[j]
else:
back_fill += 1
vmin = np.nanpercentile(values, 5)
vmax = np.nanpercentile(values, 95)
if vmin == vmax:
vmin = np.nanpercentile(values, 1)
vmax = np.nanpercentile(values, 99)
if vmin == vmax:
vmin = np.min(values)
vmax = np.max(values)
# plot the nan values in the interaction feature as grey
nan_mask = np.isnan(values)
pl.scatter(shaps[nan_mask], np.ones(shap_values[nan_mask].shape[0]) * pos,
color="#777777", vmin=vmin, vmax=vmax, s=9,
alpha=alpha, linewidth=0, zorder=1)
# plot the non-nan values colored by the trimmed feature value
cvals = values[np.invert(nan_mask)].astype(np.float64)
cvals_imp = cvals.copy()
cvals_imp[np.isnan(cvals)] = (vmin + vmax) / 2.0
cvals[cvals_imp > vmax] = vmax
cvals[cvals_imp < vmin] = vmin
pl.scatter(shaps[np.invert(nan_mask)], np.ones(shap_values[np.invert(nan_mask)].shape[0]) * pos,
cmap=colors.red_blue, vmin=vmin, vmax=vmax, s=9,
c=cvals, alpha=alpha, linewidth=0, zorder=1)
# smooth_values -= nxp.nanpercentile(smooth_values, 5)
# smooth_values /= np.nanpercentile(smooth_values, 95)
smooth_values -= vmin
if vmax - vmin > 0:
smooth_values /= vmax - vmin
for i in range(len(xs) - 1):
if ds[i] > 0.05 or ds[i + 1] > 0.05:
pl.fill_between([xs[i], xs[i + 1]], [pos + ds[i], pos + ds[i + 1]],
[pos - ds[i], pos - ds[i + 1]], color=colors.red_blue_no_bounds(smooth_values[i]),
zorder=2)
else:
parts = pl.violinplot(shap_values[:, feature_order], range(len(feature_order)), points=200, vert=False,
widths=0.7,
showmeans=False, showextrema=False, showmedians=False)
for pc in parts['bodies']:
pc.set_facecolor(color)
pc.set_edgecolor('none')
pc.set_alpha(alpha)
elif plot_type == "layered_violin": # courtesy of @kodonnell
num_x_points = 200
bins = np.linspace(0, features.shape[0], layered_violin_max_num_bins + 1).round(0).astype(
'int') # the indices of the feature data corresponding to each bin
shap_min, shap_max = np.min(shap_values), np.max(shap_values)
x_points = np.linspace(shap_min, shap_max, num_x_points)
# loop through each feature and plot:
for pos, ind in enumerate(feature_order):
# decide how to handle: if #unique < layered_violin_max_num_bins then split by unique value, otherwise use bins/percentiles.
# to keep simpler code, in the case of uniques, we just adjust the bins to align with the unique counts.
feature = features[:, ind]
unique, counts = np.unique(feature, return_counts=True)
if unique.shape[0] <= layered_violin_max_num_bins:
order = np.argsort(unique)
thesebins = np.cumsum(counts[order])
thesebins = np.insert(thesebins, 0, 0)
else:
thesebins = bins
nbins = thesebins.shape[0] - 1
# order the feature data so we can apply percentiling
order = np.argsort(feature)
# x axis is located at y0 = pos, with pos being there for offset
y0 = np.ones(num_x_points) * pos
# calculate kdes:
ys = np.zeros((nbins, num_x_points))
for i in range(nbins):
# get shap values in this bin:
shaps = shap_values[order[thesebins[i]:thesebins[i + 1]], ind]
# if there's only one element, then we can't
if shaps.shape[0] == 1:
warnings.warn(
"not enough data in bin #%d for feature %s, so it'll be ignored. Try increasing the number of records to plot."
% (i, feature_names[ind]))
# to ignore it, just set it to the previous y-values (so the area between them will be zero). Not ys is already 0, so there's
# nothing to do if i == 0
if i > 0:
ys[i, :] = ys[i - 1, :]
continue
# save kde of them: note that we add a tiny bit of gaussian noise to avoid singular matrix errors
ys[i, :] = gaussian_kde(shaps + np.random.normal(loc=0, scale=0.001, size=shaps.shape[0]))(x_points)
# scale it up so that the 'size' of each y represents the size of the bin. For continuous data this will
# do nothing, but when we've gone with the unqique option, this will matter - e.g. if 99% are male and 1%
# female, we want the 1% to appear a lot smaller.
size = thesebins[i + 1] - thesebins[i]
bin_size_if_even = features.shape[0] / nbins
relative_bin_size = size / bin_size_if_even
ys[i, :] *= relative_bin_size
# now plot 'em. We don't plot the individual strips, as this can leave whitespace between them.
# instead, we plot the full kde, then remove outer strip and plot over it, etc., to ensure no
# whitespace
ys = np.cumsum(ys, axis=0)
width = 0.8
scale = ys.max() * 2 / width # 2 is here as we plot both sides of x axis
for i in range(nbins - 1, -1, -1):
y = ys[i, :] / scale
c = pl.get_cmap(color)(i / (
nbins - 1)) if color in pl.cm.datad else color # if color is a cmap, use it, otherwise use a color
pl.fill_between(x_points, pos - y, pos + y, facecolor=c)
pl.xlim(shap_min, shap_max)
elif not multi_class and plot_type == "bar":
feature_inds = feature_order[:max_display]
y_pos = np.arange(len(feature_inds))
global_shap_values = np.abs(shap_values).mean(0)
pl.barh(y_pos, global_shap_values[feature_inds], 0.7, align='center', color=color)
pl.yticks(y_pos, fontsize=13)
pl.gca().set_yticklabels([feature_names[i] for i in feature_inds])
elif multi_class and plot_type == "bar":
if class_names is None:
class_names = ["Class "+str(i) for i in range(len(shap_values))]
feature_inds = feature_order[:max_display]
y_pos = np.arange(len(feature_inds))
left_pos = np.zeros(len(feature_inds))
class_inds = np.argsort([-np.abs(shap_values[i]).mean() for i in range(len(shap_values))])
for i,ind in enumerate(class_inds):
global_shap_values = np.abs(shap_values[ind]).mean(0)
pl.barh(
y_pos, global_shap_values[feature_inds], 0.7, left=left_pos, align='center',
color=color(i), label=class_names[ind]
)
left_pos += global_shap_values[feature_inds]
pl.yticks(y_pos, fontsize=13)
pl.gca().set_yticklabels([feature_names[i] for i in feature_inds])
pl.legend(frameon=False, fontsize=12)
# draw the color bar
if color_bar and features is not None and plot_type != "bar" and \
(plot_type != "layered_violin" or color in pl.cm.datad):
import matplotlib.cm as cm
m = cm.ScalarMappable(cmap=colors.red_blue if plot_type != "layered_violin" else pl.get_cmap(color))
m.set_array([0, 1])
cb = pl.colorbar(m, ticks=[0, 1], aspect=1000)
cb.set_ticklabels([labels['FEATURE_VALUE_LOW'], labels['FEATURE_VALUE_HIGH']])
cb.set_label(color_bar_label, size=12, labelpad=0)
cb.ax.tick_params(labelsize=11, length=0)
cb.set_alpha(1)
cb.outline.set_visible(False)
bbox = cb.ax.get_window_extent().transformed(pl.gcf().dpi_scale_trans.inverted())
cb.ax.set_aspect((bbox.height - 0.9) * 20)
# cb.draw_all()
pl.gca().xaxis.set_ticks_position('bottom')
pl.gca().yaxis.set_ticks_position('none')
pl.gca().spines['right'].set_visible(False)
pl.gca().spines['top'].set_visible(False)
pl.gca().spines['left'].set_visible(False)
pl.gca().tick_params(color=axis_color, labelcolor=axis_color)
pl.yticks(range(len(feature_order)), [feature_names[i] for i in feature_order], fontsize=13)
if plot_type != "bar":
pl.gca().tick_params('y', length=20, width=0.5, which='major')
pl.gca().tick_params('x', labelsize=11)
pl.ylim(-1, len(feature_order))
if plot_type == "bar":
pl.xlabel(labels['GLOBAL_VALUE'], fontsize=13)
else:
pl.xlabel(labels['VALUE'], fontsize=13)
if show:
pl.show()
def shorten_text(text, length_limit):
if len(text) > length_limit:
return text[:length_limit - 3] + "..."
else:
return text
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 28 18:56:43 2014
@author: micha
"""
from pyNN.models import BaseCellType
# These are defined in $GeNNPATH/lib/utils.h and extra_neurons.h
class RulkovMapNeuron(BaseCellType):
"""
Rulkov map-based neurons.
"""
default_parameters = {}
default_initial_values = {}
parameter_checks = {}
class PoissonNeurons(BaseCellType):
"""
Poisson process spike train generator.
"""
recordable = ['spikes']
default_parameters = {'rate': 0.1, # 0 - firing rate
't_refrac': 2.5, # 1 - seed
'V_spike': 20., # 2 - SpikeTime
'V_rest': -60.}
default_initial_values = {'V': 0., # 0 - V
'seed': 1234567, # 1 - seed
'SpikeTime': -10.} # 2 - SpikeTime
parameter_checks = {}
class TraubMiles(BaseCellType):
"""
Traub and Miles Hodgin-Huxley neurons.
"""
default_parameters = {'gNa': 7.15, # 0 - gNa: Na conductance in 1/(mOhms * cm^2)
'ENa': 50., # 1 - ENa: Na equi potential in mV
'gK': 1.43, # 2 - gK: K conductance in 1/(mOhms * cm^2)
'EK': -95., # 3 - EK: K equi potential in mV
'gl': 0.02672,# 4 - gl: leak conductance in 1/(mOhms * cm^2)
'El': -63.563,# 5 - El: leak equi potential in mV
'Cmem':0.143} # 6 - Cmem: membr. capacity density in muF/cm^2
default_initial_values = {'E':0., # 0 - membrane potential E
'p_Na_m': 0.529324, # 1 - prob. for Na channel activation m
'p_Na_h': 0.3176767, # 2 - prob. for not Na channel blocking h
'p_K_n': 0.5961207} # 3 - prob. for K channel activation n
parameter_checks = {}
class Izhikevich(BaseCellType):
"""
Izhikevich neurons.
"""
default_parameters = {}
default_initial_values = {}
parameter_checks = {}
class IzhikevichVar(BaseCellType):
"""
Izhikevich neurons with variable parameters.
"""
default_parameters = {}
default_initial_values = {}
parameter_checks = {}
# from extra_neurons.h
class LeakyIF(BaseCellType):
"""
Leaky Integrate-And-Fire neurons.
"""
default_parameters = {}
default_initial_values = {}
parameter_checks = {}
class RegularSpiking(BaseCellType):
"""
A regularly spiking neuron.
"""
default_parameters = {}
default_initial_values = {}
parameter_checks = {}
class LeakyIntegrate(BaseCellType):
"""
Leaky Integrate and Fire number two.
"""
default_parameters = {}
default_initial_values = {}
parameter_checks = {}
added parameter sequence attributes to PoissonNeurons
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 28 18:56:43 2014
@author: micha
"""
from pyNN.models import BaseCellType
# These are defined in $GeNNPATH/lib/utils.h and extra_neurons.h
class RulkovMapNeuron(BaseCellType):
"""
Rulkov map-based neurons.
"""
default_parameters = {}
default_initial_values = {}
parameter_checks = {}
class PoissonNeurons(BaseCellType):
"""
Poisson process spike train generator.
"""
recordable = ['spikes']
default_parameters = {'rate': 0.1, # 0 - firing rate
't_refrac': 2.5, # 1 - seed
'V_spike': 20., # 2 - SpikeTime
'V_rest': -60.}
param_seq = ['rate', 't_refrac', 'V_spike', 'V_rest']
default_initial_values = {'V': 0., # 0 - V
'seed': 1234567, # 1 - seed
'SpikeTime': -10.} # 2 - SpikeTime
ini_seq = ['V', 'seed', 'SpikeTime']
c_type = 'double'
parameter_checks = {}
class TraubMiles(BaseCellType):
"""
Traub and Miles Hodgin-Huxley neurons.
"""
default_parameters = {'gNa': 7.15, # 0 - gNa: Na conductance in 1/(mOhms * cm^2)
'ENa': 50., # 1 - ENa: Na equi potential in mV
'gK': 1.43, # 2 - gK: K conductance in 1/(mOhms * cm^2)
'EK': -95., # 3 - EK: K equi potential in mV
'gl': 0.02672,# 4 - gl: leak conductance in 1/(mOhms * cm^2)
'El': -63.563,# 5 - El: leak equi potential in mV
'Cmem':0.143} # 6 - Cmem: membr. capacity density in muF/cm^2
default_initial_values = {'E':0., # 0 - membrane potential E
'p_Na_m': 0.529324, # 1 - prob. for Na channel activation m
'p_Na_h': 0.3176767, # 2 - prob. for not Na channel blocking h
'p_K_n': 0.5961207} # 3 - prob. for K channel activation n
parameter_checks = {}
class Izhikevich(BaseCellType):
"""
Izhikevich neurons.
"""
default_parameters = {}
default_initial_values = {}
parameter_checks = {}
class IzhikevichVar(BaseCellType):
"""
Izhikevich neurons with variable parameters.
"""
default_parameters = {}
default_initial_values = {}
parameter_checks = {}
# from extra_neurons.h
class LeakyIF(BaseCellType):
"""
Leaky Integrate-And-Fire neurons.
"""
default_parameters = {}
default_initial_values = {}
parameter_checks = {}
class RegularSpiking(BaseCellType):
"""
A regularly spiking neuron.
"""
default_parameters = {}
default_initial_values = {}
parameter_checks = {}
class LeakyIntegrate(BaseCellType):
"""
Leaky Integrate and Fire number two.
"""
default_parameters = {}
default_initial_values = {}
parameter_checks = {}
|
#!/usr/bin/env python
__all__ = ['twitter_download']
from ..common import *
from .vine import vine_download
def twitter_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
html = get_html(url)
screen_name = r1(r'data-screen-name="([^"]*)"', html)
item_id = r1(r'data-item-id="([^"]*)"', html)
page_title = "{} [{}]".format(screen_name, item_id)
try: # extract video
icards = r1(r'data-src="([^"]*)"', html)
if icards:
card = get_html("https://twitter.com" + icards)
data_player_config = r1(r'data-player-config="([^"]*)"', card)
if data_player_config is None:
vine_src = r1(r'<iframe src="([^"]*)"', card)
vine_download(vine_src, output_dir=output_dir, merge=merge, info_only=info_only)
return
data = json.loads(unescape_html(data_player_config))
source = data['playlist'][0]['source']
else:
source = r1(r'<source video-src="([^"]*)"', html)
mime, ext, size = url_info(source)
print_info(site_info, page_title, mime, size)
if not info_only:
download_urls([source], page_title, ext, size, output_dir, merge=merge)
except: # extract images
urls = re.findall(r'property="og:image"\s*content="([^"]+)"', html)
images = []
for url in urls:
url = ':'.join(url.split(':')[:-1]) + ':orig'
filename = parse.unquote(url.split('/')[-1])
title = '.'.join(filename.split('.')[:-1])
ext = url.split(':')[-2].split('.')[-1]
size = int(get_head(url)['Content-Length'])
images.append({'title': title,
'url': url,
'ext': ext,
'size': size})
size = sum([image['size'] for image in images])
print_info(site_info, page_title, images[0]['ext'], size)
if not info_only:
for image in images:
title = image['title']
ext = image['ext']
size = image['size']
url = image['url']
print_info(site_info, title, ext, size)
download_urls([url], title, ext, size,
output_dir=output_dir)
site_info = "Twitter.com"
download = twitter_download
download_playlist = playlist_not_supported('twitter')
[twitter] prioritize (main) images over videos
#!/usr/bin/env python
__all__ = ['twitter_download']
from ..common import *
from .vine import vine_download
def twitter_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
html = get_html(url)
screen_name = r1(r'data-screen-name="([^"]*)"', html)
item_id = r1(r'data-item-id="([^"]*)"', html)
page_title = "{} [{}]".format(screen_name, item_id)
try: # extract images
urls = re.findall(r'property="og:image"\s*content="([^"]+:large)"', html)
assert urls
images = []
for url in urls:
url = ':'.join(url.split(':')[:-1]) + ':orig'
filename = parse.unquote(url.split('/')[-1])
title = '.'.join(filename.split('.')[:-1])
ext = url.split(':')[-2].split('.')[-1]
size = int(get_head(url)['Content-Length'])
images.append({'title': title,
'url': url,
'ext': ext,
'size': size})
size = sum([image['size'] for image in images])
print_info(site_info, page_title, images[0]['ext'], size)
if not info_only:
for image in images:
title = image['title']
ext = image['ext']
size = image['size']
url = image['url']
print_info(site_info, title, ext, size)
download_urls([url], title, ext, size,
output_dir=output_dir)
except: # extract video
icards = r1(r'data-src="([^"]*)"', html)
if icards:
card = get_html("https://twitter.com" + icards)
data_player_config = r1(r'data-player-config="([^"]*)"', card)
if data_player_config is None:
vine_src = r1(r'<iframe src="([^"]*)"', card)
vine_download(vine_src, output_dir=output_dir, merge=merge, info_only=info_only)
return
data = json.loads(unescape_html(data_player_config))
source = data['playlist'][0]['source']
else:
source = r1(r'<source video-src="([^"]*)"', html)
mime, ext, size = url_info(source)
print_info(site_info, page_title, mime, size)
if not info_only:
download_urls([source], page_title, ext, size, output_dir, merge=merge)
site_info = "Twitter.com"
download = twitter_download
download_playlist = playlist_not_supported('twitter')
|
import cherrypy
import os
# static dir defined here for static content
STATIC_DIR = os.path.abspath("%s/../client" % os.path.dirname(__file__))
def CORS():
cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
# this class just returns the fixed-chart html file
class GetChart(object):
@cherrypy.expose
def index(self):
return open(os.path.join(STATIC_DIR, 'index.html'))
if __name__ == "__main__":
from REST_Api import RESTServer
from d3_api import D3Server
cherrypy.tools.CORS = cherrypy.Tool('before_finalize', CORS)
api_conf = {
'/': {'request.dispatch': cherrypy.dispatch.MethodDispatcher(), 'tools.CORS.on': True}
}
cherrypy.tree.mount(RESTServer(), '/api/amort', config=api_conf)
cherrypy.tree.mount(D3Server(), '/api/d3/amort', config=api_conf)
static_conf = {
'/': {
'tools.staticdir.on': True,
'tools.staticdir.dir': STATIC_DIR,
}
}
cherrypy.tree.mount(GetChart(), '/', config=static_conf)
cherrypy.server.bind_addr = ('0.0.0.0', 4001)
cherrypy.engine.start()
cherrypy.engine.block()
Removed the main since this shouldn't be imported...not sure why that was there.
import cherrypy
import os
# static dir defined here for static content
STATIC_DIR = os.path.abspath("%s/../client" % os.path.dirname(__file__))
def CORS():
cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
# this class just returns the fixed-chart html file
class GetChart(object):
@cherrypy.expose
def index(self):
return open(os.path.join(STATIC_DIR, 'index.html'))
from REST_Api import RESTServer
from d3_api import D3Server
cherrypy.tools.CORS = cherrypy.Tool('before_finalize', CORS)
api_conf = {
'/': {'request.dispatch': cherrypy.dispatch.MethodDispatcher(), 'tools.CORS.on': True}
}
cherrypy.tree.mount(RESTServer(), '/api/amort', config=api_conf)
cherrypy.tree.mount(D3Server(), '/api/d3/amort', config=api_conf)
static_conf = {
'/': {
'tools.staticdir.on': True,
'tools.staticdir.dir': STATIC_DIR,
}
}
cherrypy.tree.mount(GetChart(), '/', config=static_conf)
cherrypy.server.bind_addr = ('0.0.0.0', 4001)
cherrypy.engine.start()
cherrypy.engine.block()
|
"""
CLI for kalite
"""
from __future__ import print_function
from __future__ import unicode_literals
import getpass
import logging
import os
import re
import json
import pwd
import shlex
import subprocess
from distutils.spawn import find_executable
from .exceptions import ValidationError
from . import validators
logger = logging.getLogger(__name__)
KALITE_GTK_SETTINGS_FILE = os.path.expanduser(os.path.join('~', '.kalite', 'ka-lite-gtk.json'))
DEFAULT_USER = getpass.getuser()
DEFAULT_PORT = 8008
# A validator callback will raise an exception ValidationError
validate = {
'user': validators.username,
'port': validators.port
}
# Constants from the ka-lite .deb package conventions
DEBIAN_INIT_SCRIPT = '/etc/init.d/ka-lite'
DEBIAN_USERNAME_FILE = '/etc/ka-lite/username'
DEBIAN_OPTIONS_FILE = '/etc/ka-lite/server_options'
if find_executable('pkexec'):
SU_COMMAND = 'pkexec --user {username}'
SUDO_COMMAND = 'pkexec'
else:
SU_COMMAND = 'gksudo -u {username}'
SUDO_COMMAND = 'gksudo'
# KA Lite Debian convention
# Set new default values from debian system files
if os.path.isfile(DEBIAN_USERNAME_FILE):
debian_username = open(DEBIAN_USERNAME_FILE, 'r').read()
debian_username = debian_username.split('\n')[0]
if debian_username:
try:
debian_username = validate['user'](debian_username)
DEFAULT_USER = debian_username
# Okay there's a default debian user. If that user is the same as
# the one selected in the user settings, we should use the --port
# option set for the debian service.
if os.path.isfile(DEBIAN_OPTIONS_FILE):
debian_options = open(DEBIAN_OPTIONS_FILE, 'r').read()
port = re.compile(r'--port\s+(\d+)').search(debian_options)
port = validate['port'](port, none_if_invalid=True)
DEFAULT_PORT = port.group(1) if port else DEFAULT_PORT
except ValidationError:
logger.error('Non-existing username in {}'.format(DEBIAN_USERNAME_FILE))
def get_kalite_home(user):
return os.path.join(pwd.getpwnam(user).pw_dir, '.kalite')
DEFAULT_HOME = get_kalite_home(DEFAULT_USER)
# These are the settings. They are subject to change at load time by
# reading in settings files
settings = {
'user': DEFAULT_USER,
'command': find_executable('kalite'),
'content_root': os.path.join(DEFAULT_HOME, 'content'),
'port': DEFAULT_PORT,
'home': DEFAULT_HOME,
}
# Read settings from settings file
if os.path.isfile(KALITE_GTK_SETTINGS_FILE):
try:
loaded_settings = json.load(open(KALITE_GTK_SETTINGS_FILE, 'r'))
for (k, v) in loaded_settings.items():
try:
settings[k] = validate[k](v) if k in validate else v
except ValidationError:
logger.error("Illegal value in {} for {}".format(KALITE_GTK_SETTINGS_FILE, k))
# Update the home folder if it wasn't specified
if 'home' not in loaded_settings:
settings['home'] = get_kalite_home(settings['user'])
if 'content_root' not in loaded_settings:
print("SETTING CONTENT_ROOT")
settings['content_root'] = os.path.join(settings['home'], 'content')
except ValueError:
logger.error("Parsing error in {}".format(KALITE_GTK_SETTINGS_FILE))
def get_command(kalite_command):
return [settings['command']] + kalite_command.split(" ")
def conditional_sudo(cmd, no_su=False):
"""Decorator indicating that sudo access is needed before running
run_kalite_command or stream_kalite_command"""
if settings['user'] != getpass.getuser():
return shlex.split(SU_COMMAND.format(username=settings['user'])) + cmd
return cmd
def sudo(cmd, no_su=False):
"""Decorator indicating that sudo access is needed before running
run_kalite_command or stream_kalite_command"""
return shlex.split(SUDO_COMMAND) + cmd
def run_kalite_command(cmd, shell=False):
"""
Blocking:
Uses the current UI settings to run a command and returns
stdin, stdout
Example:
run_kalite_command("start --port=7007")
"""
env = os.environ.copy()
env['KALITE_HOME'] = settings['home']
logger.debug("Running command: {}, KALITE_HOME={}".format(cmd, str(settings['home'])))
p = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
shell=shell
)
# decode() necessary to convert streams from byte to str
return list(map(lambda x: x.decode(), p.communicate())) + [p.returncode]
def stream_kalite_command(cmd, shell=False):
"""
Generator that yields for every line of stdout
Finally, returns stderr
Example:
for stdout, stderr in stream_kalite_command("start --port=7007"):
print(stdout)
print(stderr)
"""
env = os.environ.copy()
env['KALITE_HOME'] = settings['home']
logger.debug("Streaming command: {}, KALITE_HOME={}".format(cmd, str(settings['home'])))
p = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
shell=shell
)
for line in iter(lambda: p.stdout.readline().decode(), ''):
yield line, None, None
yield (
None,
p.stderr.read().decode() if p.stderr is not None else None,
p.returncode
)
def has_init_d():
return os.path.isfile(DEBIAN_INIT_SCRIPT)
def is_installed():
return any('ka-lite' in x for x in os.listdir('/etc/rc3.d'))
def install():
return run_kalite_command(
sudo([
"bash".encode('ascii'),
"-c".encode('ascii'),
"echo {username} > /etc/ka-lite/username && update-rc.d ka-lite defaults".format(username=settings['user']).encode('ascii')
])
)
def remove():
return run_kalite_command(
sudo(shlex.split("update-rc.d -f ka-lite remove"))
)
def start():
"""
Streaming:
Starts the server
"""
for val in stream_kalite_command(conditional_sudo(get_command('start'))):
yield val
def stop():
"""
Streaming:
Stops the server
"""
for val in stream_kalite_command(conditional_sudo(get_command('stop'))):
yield val
def diagnose():
"""
Blocking:
Runs the diagnose command
"""
return run_kalite_command(get_command('diagnose'))
def status():
"""
Blocking:
Fetches server's current status as a string
"""
__, err, __ = run_kalite_command(get_command('status'))
return err
def save_settings():
# Write settings to ka-lite-gtk settings file
json.dump(settings, open(KALITE_GTK_SETTINGS_FILE, 'w'))
# Write to debian settings if applicable
pass
Ensure we're always in sync with /etc/ka-lite/username
"""
CLI for kalite
"""
from __future__ import print_function
from __future__ import unicode_literals
import getpass
import logging
import os
import re
import json
import pwd
import shlex
import subprocess
from distutils.spawn import find_executable
from .exceptions import ValidationError
from . import validators
logger = logging.getLogger(__name__)
KALITE_GTK_SETTINGS_FILE = os.path.expanduser(os.path.join('~', '.kalite', 'ka-lite-gtk.json'))
DEFAULT_USER = getpass.getuser()
DEFAULT_PORT = 8008
# A validator callback will raise an exception ValidationError
validate = {
'user': validators.username,
'port': validators.port
}
# Constants from the ka-lite .deb package conventions
DEBIAN_INIT_SCRIPT = '/etc/init.d/ka-lite'
DEBIAN_USERNAME_FILE = '/etc/ka-lite/username'
DEBIAN_OPTIONS_FILE = '/etc/ka-lite/server_options'
if find_executable('pkexec'):
SU_COMMAND = 'pkexec --user {username}'
SUDO_COMMAND = 'pkexec'
else:
SU_COMMAND = 'gksudo -u {username}'
SUDO_COMMAND = 'gksudo'
# KA Lite Debian convention
# Set new default values from debian system files
debian_username = None
if os.path.isfile(DEBIAN_USERNAME_FILE):
debian_username = open(DEBIAN_USERNAME_FILE, 'r').read()
debian_username = debian_username.split('\n')[0]
if debian_username:
try:
debian_username = validate['user'](debian_username)
DEFAULT_USER = debian_username
# Okay there's a default debian user. If that user is the same as
# the one selected in the user settings, we should use the --port
# option set for the debian service.
if os.path.isfile(DEBIAN_OPTIONS_FILE):
debian_options = open(DEBIAN_OPTIONS_FILE, 'r').read()
port = re.compile(r'--port\s+(\d+)').search(debian_options)
port = validate['port'](port, none_if_invalid=True)
DEFAULT_PORT = port.group(1) if port else DEFAULT_PORT
except ValidationError:
logger.error('Non-existing username in {}'.format(DEBIAN_USERNAME_FILE))
def get_kalite_home(user):
return os.path.join(pwd.getpwnam(user).pw_dir, '.kalite')
DEFAULT_HOME = get_kalite_home(DEFAULT_USER)
# These are the settings. They are subject to change at load time by
# reading in settings files
settings = {
'user': DEFAULT_USER,
'command': find_executable('kalite'),
'content_root': os.path.join(DEFAULT_HOME, 'content'),
'port': DEFAULT_PORT,
'home': DEFAULT_HOME,
}
# Read settings from settings file
if os.path.isfile(KALITE_GTK_SETTINGS_FILE):
try:
loaded_settings = json.load(open(KALITE_GTK_SETTINGS_FILE, 'r'))
if debian_username:
# Do NOT load the username from the settings file if we are
# using /etc/ka-lite/username -- they can get out of sync
del loaded_settings['user']
for (k, v) in loaded_settings.items():
try:
settings[k] = validate[k](v) if k in validate else v
except ValidationError:
logger.error("Illegal value in {} for {}".format(KALITE_GTK_SETTINGS_FILE, k))
# Update the home folder if it wasn't specified
if 'home' not in loaded_settings:
settings['home'] = get_kalite_home(settings['user'])
if 'content_root' not in loaded_settings:
print("SETTING CONTENT_ROOT")
settings['content_root'] = os.path.join(settings['home'], 'content')
except ValueError:
logger.error("Parsing error in {}".format(KALITE_GTK_SETTINGS_FILE))
def get_command(kalite_command):
return [settings['command']] + kalite_command.split(" ")
def conditional_sudo(cmd, no_su=False):
"""Decorator indicating that sudo access is needed before running
run_kalite_command or stream_kalite_command"""
if settings['user'] != getpass.getuser():
return shlex.split(SU_COMMAND.format(username=settings['user'])) + cmd
return cmd
def sudo(cmd, no_su=False):
"""Decorator indicating that sudo access is needed before running
run_kalite_command or stream_kalite_command"""
return shlex.split(SUDO_COMMAND) + cmd
def run_kalite_command(cmd, shell=False):
"""
Blocking:
Uses the current UI settings to run a command and returns
stdin, stdout
Example:
run_kalite_command("start --port=7007")
"""
env = os.environ.copy()
env['KALITE_HOME'] = settings['home']
logger.debug("Running command: {}, KALITE_HOME={}".format(cmd, str(settings['home'])))
p = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
shell=shell
)
# decode() necessary to convert streams from byte to str
return list(map(lambda x: x.decode(), p.communicate())) + [p.returncode]
def stream_kalite_command(cmd, shell=False):
"""
Generator that yields for every line of stdout
Finally, returns stderr
Example:
for stdout, stderr in stream_kalite_command("start --port=7007"):
print(stdout)
print(stderr)
"""
env = os.environ.copy()
env['KALITE_HOME'] = settings['home']
logger.debug("Streaming command: {}, KALITE_HOME={}".format(cmd, str(settings['home'])))
p = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
shell=shell
)
for line in iter(lambda: p.stdout.readline().decode(), ''):
yield line, None, None
yield (
None,
p.stderr.read().decode() if p.stderr is not None else None,
p.returncode
)
def has_init_d():
return os.path.isfile(DEBIAN_INIT_SCRIPT)
def is_installed():
return any('ka-lite' in x for x in os.listdir('/etc/rc3.d'))
def install():
return run_kalite_command(
sudo([
"bash".encode('ascii'),
"-c".encode('ascii'),
"echo {username} > /etc/ka-lite/username && update-rc.d ka-lite defaults".format(username=settings['user']).encode('ascii')
])
)
def remove():
return run_kalite_command(
sudo(shlex.split("update-rc.d -f ka-lite remove"))
)
def start():
"""
Streaming:
Starts the server
"""
for val in stream_kalite_command(conditional_sudo(get_command('start'))):
yield val
def stop():
"""
Streaming:
Stops the server
"""
for val in stream_kalite_command(conditional_sudo(get_command('stop'))):
yield val
def diagnose():
"""
Blocking:
Runs the diagnose command
"""
return run_kalite_command(get_command('diagnose'))
def status():
"""
Blocking:
Fetches server's current status as a string
"""
__, err, __ = run_kalite_command(get_command('status'))
return err
def save_settings():
# Write settings to ka-lite-gtk settings file
json.dump(settings, open(KALITE_GTK_SETTINGS_FILE, 'w'))
# Write to debian settings if applicable
pass
|
from .market_calendar import MarketCalendar
def get_calendar(name, open_time=None, close_time=None):
"""
Retrieves an instance of an MarketCalendar whose name is given.
:param name: The name of the MarketCalendar to be retrieved.
:param open_time: Market open time override as datetime.time object. If None then default is used.
:param close_time: Market close time override as datetime.time object. If None then default is used.
:return: MarketCalendar of the desired calendar.
"""
return MarketCalendar.factory(name, open_time=open_time, close_time=close_time)
def get_calendar_names():
"""All Market Calendar names and aliases that can be used in "factory"
:return: list(str)
"""
return MarketCalendar.calendar_names()
Update calendar_registry.py
from .market_calendar import MarketCalendar
from .exchange_calendar_cfe import CFEExchangeCalendar
from .exchange_calendar_ice import ICEExchangeCalendar
from .exchange_calendar_nyse import NYSEExchangeCalendar
from .exchange_calendar_cme import CMEExchangeCalendar
from .exchange_calendar_bmf import BMFExchangeCalendar
from .exchange_calendar_lse import LSEExchangeCalendar
from .exchange_calendar_tsx import TSXExchangeCalendar
from .exchange_calendar_eurex import EUREXExchangeCalendar
from .exchange_calendar_six import SIXExchangeCalendar
from .exchange_calendar_jpx import JPXExchangeCalendar
from .exchange_calendar_ose import OSEExchangeCalendar
def get_calendar(name, open_time=None, close_time=None):
"""
Retrieves an instance of an MarketCalendar whose name is given.
:param name: The name of the MarketCalendar to be retrieved.
:param open_time: Market open time override as datetime.time object. If None then default is used.
:param close_time: Market close time override as datetime.time object. If None then default is used.
:return: MarketCalendar of the desired calendar.
"""
return MarketCalendar.factory(name, open_time=open_time, close_time=close_time)
def get_calendar_names():
"""All Market Calendar names and aliases that can be used in "factory"
:return: list(str)
"""
return MarketCalendar.calendar_names()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.